aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/RCU/RTFP.txt77
-rw-r--r--Documentation/RCU/UP.txt34
-rw-r--r--Documentation/RCU/checklist.txt20
-rw-r--r--Documentation/RCU/rcu.txt10
-rw-r--r--Documentation/RCU/rcubarrier.txt7
-rw-r--r--Documentation/RCU/torture.txt23
-rw-r--r--Documentation/RCU/trace.txt7
-rw-r--r--Documentation/RCU/whatisRCU.txt22
-rw-r--r--Documentation/feature-removal-schedule.txt51
-rw-r--r--Documentation/filesystems/nfs.txt98
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kernel-parameters.txt34
-rw-r--r--Documentation/keys.txt39
-rw-r--r--Documentation/kmemleak.txt31
-rw-r--r--Documentation/s390/s390dbf.txt7
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt30
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt33
-rw-r--r--Documentation/sound/alsa/HD-Audio.txt64
-rw-r--r--Documentation/sysctl/kernel.txt16
-rw-r--r--Documentation/trace/events.txt9
-rw-r--r--Documentation/trace/ftrace.txt68
-rw-r--r--Documentation/trace/function-graph-fold.vim42
-rw-r--r--Documentation/trace/ring-buffer-design.txt955
-rw-r--r--MAINTAINERS12
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig12
-rw-r--r--arch/alpha/include/asm/thread_info.h5
-rw-r--r--arch/alpha/kernel/signal.c8
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/signal.c8
-rw-r--r--arch/arm/mach-omap2/mcbsp.c5
-rw-r--r--arch/arm/mach-pxa/include/mach/audio.h3
-rw-r--r--arch/arm/plat-omap/dma.c10
-rw-r--r--arch/arm/plat-omap/include/mach/mcbsp.h51
-rw-r--r--arch/arm/plat-omap/mcbsp.c401
-rw-r--r--arch/arm/plat-s3c/include/plat/audio-simtec.h37
-rw-r--r--arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h5
-rw-r--r--arch/avr32/include/asm/thread_info.h6
-rw-r--r--arch/avr32/kernel/entry-avr32b.S2
-rw-r--r--arch/avr32/kernel/signal.c8
-rw-r--r--arch/cris/kernel/ptrace.c8
-rw-r--r--arch/frv/kernel/signal.c2
-rw-r--r--arch/h8300/include/asm/thread_info.h2
-rw-r--r--arch/h8300/kernel/signal.c8
-rw-r--r--arch/ia64/include/asm/dma-mapping.h19
-rw-r--r--arch/ia64/kernel/process.c2
-rw-r--r--arch/ia64/xen/time.c3
-rw-r--r--arch/m32r/include/asm/thread_info.h2
-rw-r--r--arch/m32r/kernel/signal.c8
-rw-r--r--arch/m68k/include/asm/entry_mm.h4
-rw-r--r--arch/m68k/include/asm/entry_no.h8
-rw-r--r--arch/m68k/include/asm/math-emu.h20
-rw-r--r--arch/m68k/include/asm/thread_info_mm.h11
-rw-r--r--arch/m68k/kernel/asm-offsets.c39
-rw-r--r--arch/m68k/kernel/entry.S22
-rw-r--r--arch/m68k/math-emu/fp_entry.S38
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mips/kernel/signal.c8
-rw-r--r--arch/mn10300/kernel/signal.c2
-rw-r--r--arch/parisc/include/asm/thread_info.h4
-rw-r--r--arch/parisc/kernel/entry.S2
-rw-r--r--arch/parisc/kernel/signal.c8
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h23
-rw-r--r--arch/powerpc/include/asm/pgtable.h6
-rw-r--r--arch/powerpc/include/asm/spinlock.h20
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c48
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S19
-rw-r--r--arch/powerpc/kernel/perf_callchain.c527
-rw-r--r--arch/powerpc/mm/slb.c37
-rw-r--r--arch/powerpc/mm/stab.c11
-rw-r--r--arch/s390/Kconfig10
-rw-r--r--arch/s390/Makefile3
-rw-r--r--arch/s390/crypto/des_s390.c11
-rw-r--r--arch/s390/crypto/sha1_s390.c26
-rw-r--r--arch/s390/crypto/sha256_s390.c26
-rw-r--r--arch/s390/crypto/sha512_s390.c36
-rw-r--r--arch/s390/defconfig2
-rw-r--r--arch/s390/hypfs/inode.c6
-rw-r--r--arch/s390/include/asm/atomic.h205
-rw-r--r--arch/s390/include/asm/checksum.h25
-rw-r--r--arch/s390/include/asm/chsc.h28
-rw-r--r--arch/s390/include/asm/cio.h223
-rw-r--r--arch/s390/include/asm/cpu.h26
-rw-r--r--arch/s390/include/asm/cpuid.h25
-rw-r--r--arch/s390/include/asm/debug.h9
-rw-r--r--arch/s390/include/asm/hardirq.h7
-rw-r--r--arch/s390/include/asm/ipl.h5
-rw-r--r--arch/s390/include/asm/kvm_host.h6
-rw-r--r--arch/s390/include/asm/kvm_virtio.h10
-rw-r--r--arch/s390/include/asm/lowcore.h6
-rw-r--r--arch/s390/include/asm/mmu.h1
-rw-r--r--arch/s390/include/asm/page.h4
-rw-r--r--arch/s390/include/asm/pgalloc.h1
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/scatterlist.h20
-rw-r--r--arch/s390/include/asm/scsw.h (renamed from drivers/s390/cio/scsw.c)345
-rw-r--r--arch/s390/include/asm/setup.h2
-rw-r--r--arch/s390/include/asm/smp.h32
-rw-r--r--arch/s390/include/asm/spinlock.h29
-rw-r--r--arch/s390/include/asm/system.h4
-rw-r--r--arch/s390/include/asm/thread_info.h4
-rw-r--r--arch/s390/include/asm/timex.h14
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/early.c74
-rw-r--r--arch/s390/kernel/entry.S18
-rw-r--r--arch/s390/kernel/entry64.S6
-rw-r--r--arch/s390/kernel/ftrace.c36
-rw-r--r--arch/s390/kernel/head.S1
-rw-r--r--arch/s390/kernel/head31.S1
-rw-r--r--arch/s390/kernel/head64.S9
-rw-r--r--arch/s390/kernel/ipl.c166
-rw-r--r--arch/s390/kernel/mcount.S147
-rw-r--r--arch/s390/kernel/mcount64.S78
-rw-r--r--arch/s390/kernel/ptrace.c11
-rw-r--r--arch/s390/kernel/setup.c10
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c39
-rw-r--r--arch/s390/kernel/suspend.c (renamed from arch/s390/power/swsusp.c)35
-rw-r--r--arch/s390/kernel/swsusp_asm64.S (renamed from arch/s390/power/swsusp_asm64.S)2
-rw-r--r--arch/s390/kernel/time.c3
-rw-r--r--arch/s390/kernel/vmlinux.lds.S87
-rw-r--r--arch/s390/mm/Makefile4
-rw-r--r--arch/s390/mm/fault.c13
-rw-r--r--arch/s390/mm/page-states.c6
-rw-r--r--arch/s390/mm/pgtable.c24
-rw-r--r--arch/s390/mm/vmem.c1
-rw-r--r--arch/s390/power/Makefile8
-rw-r--r--arch/s390/power/suspend.c40
-rw-r--r--arch/s390/power/swsusp_64.c17
-rw-r--r--arch/sh/kernel/signal_32.c2
-rw-r--r--arch/sh/kernel/signal_64.c2
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/dma-mapping.h145
-rw-r--r--arch/sparc/include/asm/irq_64.h4
-rw-r--r--arch/sparc/include/asm/pci.h3
-rw-r--r--arch/sparc/include/asm/pci_32.h105
-rw-r--r--arch/sparc/include/asm/pci_64.h88
-rw-r--r--arch/sparc/include/asm/spinlock_32.h12
-rw-r--r--arch/sparc/include/asm/spinlock_64.h28
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/dma.c175
-rw-r--r--arch/sparc/kernel/dma.h14
-rw-r--r--arch/sparc/kernel/iommu.c20
-rw-r--r--arch/sparc/kernel/ioport.c190
-rw-r--r--arch/sparc/kernel/pci.c2
-rw-r--r--arch/sparc/kernel/pci_sun4v.c30
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/kernel/signal_32.c2
-rw-r--r--arch/sparc/kernel/signal_64.c3
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/Kconfig.cpu19
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/Makefile_32.cpu2
-rw-r--r--arch/x86/configs/i386_defconfig2
-rw-r--r--arch/x86/configs/x86_64_defconfig2
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c19
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/ia32/sys_ia32.c14
-rw-r--r--arch/x86/include/asm/alternative.h7
-rw-r--r--arch/x86/include/asm/amd_iommu.h1
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h50
-rw-r--r--arch/x86/include/asm/apic.h7
-rw-r--r--arch/x86/include/asm/apicdef.h3
-rw-r--r--arch/x86/include/asm/asm.h10
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/desc.h13
-rw-r--r--arch/x86/include/asm/desc_defs.h6
-rw-r--r--arch/x86/include/asm/dma-mapping.h18
-rw-r--r--arch/x86/include/asm/dwarf2.h18
-rw-r--r--arch/x86/include/asm/ftrace.h7
-rw-r--r--arch/x86/include/asm/i387.h8
-rw-r--r--arch/x86/include/asm/io_apic.h13
-rw-r--r--arch/x86/include/asm/ioctls.h95
-rw-r--r--arch/x86/include/asm/ipcbuf.h29
-rw-r--r--arch/x86/include/asm/irqflags.h9
-rw-r--r--arch/x86/include/asm/lguest.h5
-rw-r--r--arch/x86/include/asm/mman.h14
-rw-r--r--arch/x86/include/asm/module.h15
-rw-r--r--arch/x86/include/asm/msgbuf.h40
-rw-r--r--arch/x86/include/asm/msr.h75
-rw-r--r--arch/x86/include/asm/nmi.h4
-rw-r--r--arch/x86/include/asm/param.h23
-rw-r--r--arch/x86/include/asm/paravirt.h746
-rw-r--r--arch/x86/include/asm/paravirt_types.h721
-rw-r--r--arch/x86/include/asm/perf_counter.h10
-rw-r--r--arch/x86/include/asm/processor.h28
-rw-r--r--arch/x86/include/asm/scatterlist.h27
-rw-r--r--arch/x86/include/asm/shmbuf.h52
-rw-r--r--arch/x86/include/asm/socket.h61
-rw-r--r--arch/x86/include/asm/sockios.h14
-rw-r--r--arch/x86/include/asm/stackprotector.h10
-rw-r--r--arch/x86/include/asm/system.h29
-rw-r--r--arch/x86/include/asm/termbits.h199
-rw-r--r--arch/x86/include/asm/termios.h115
-rw-r--r--arch/x86/include/asm/thread_info.h13
-rw-r--r--arch/x86/include/asm/topology.h47
-rw-r--r--arch/x86/include/asm/traps.h4
-rw-r--r--arch/x86/include/asm/types.h12
-rw-r--r--arch/x86/include/asm/ucontext.h8
-rw-r--r--arch/x86/include/asm/unistd_32.h2
-rw-r--r--arch/x86/include/asm/unistd_64.h6
-rw-r--r--arch/x86/kernel/acpi/boot.c105
-rw-r--r--arch/x86/kernel/alternative.c58
-rw-r--r--arch/x86/kernel/amd_iommu.c489
-rw-r--r--arch/x86/kernel/amd_iommu_init.c42
-rw-r--r--arch/x86/kernel/aperture_64.c6
-rw-r--r--arch/x86/kernel/apic/apic.c110
-rw-r--r--arch/x86/kernel/apic/es7000_32.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c324
-rw-r--r--arch/x86/kernel/apic/ipi.c2
-rw-r--r--arch/x86/kernel/apic/nmi.c20
-rw-r--r--arch/x86/kernel/apic/probe_64.c6
-rw-r--r--arch/x86/kernel/apm_32.c31
-rw-r--r--arch/x86/kernel/asm-offsets_64.c1
-rw-r--r--arch/x86/kernel/cpu/amd.c117
-rw-r--r--arch/x86/kernel/cpu/bugs.c10
-rw-r--r--arch/x86/kernel/cpu/bugs_64.c2
-rw-r--r--arch/x86/kernel/cpu/common.c52
-rw-r--r--arch/x86/kernel/cpu/cyrix.c19
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c5
-rw-r--r--arch/x86/kernel/cpu/intel.c11
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c148
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c8
-rw-r--r--arch/x86/kernel/cpu/mtrr/amd.c97
-rw-r--r--arch/x86/kernel/cpu/mtrr/centaur.c168
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c390
-rw-r--r--arch/x86/kernel/cpu/mtrr/cyrix.c94
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c304
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c135
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c455
-rw-r--r--arch/x86/kernel/cpu/mtrr/mtrr.h19
-rw-r--r--arch/x86/kernel/cpu/mtrr/state.c68
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c329
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c45
-rw-r--r--arch/x86/kernel/cpu/proc.c4
-rw-r--r--arch/x86/kernel/cpu/vmware.c18
-rw-r--r--arch/x86/kernel/doublefault_32.c4
-rw-r--r--arch/x86/kernel/ds.c6
-rw-r--r--arch/x86/kernel/dumpstack.c1
-rw-r--r--arch/x86/kernel/ftrace.c51
-rw-r--r--arch/x86/kernel/head_32.S1
-rw-r--r--arch/x86/kernel/irq_32.c5
-rw-r--r--arch/x86/kernel/mpparse.c10
-rw-r--r--arch/x86/kernel/msr.c61
-rw-r--r--arch/x86/kernel/paravirt.c3
-rw-r--r--arch/x86/kernel/pci-dma.c17
-rw-r--r--arch/x86/kernel/pci-gart_64.c5
-rw-r--r--arch/x86/kernel/pci-nommu.c29
-rw-r--r--arch/x86/kernel/pci-swiotlb.c25
-rw-r--r--arch/x86/kernel/ptrace.c13
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/kernel/step.c9
-rw-r--r--arch/x86/kernel/sys_x86_64.c8
-rw-r--r--arch/x86/kernel/tlb_uv.c4
-rw-r--r--arch/x86/kernel/traps.c23
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/msr-reg-export.c5
-rw-r--r--arch/x86/lib/msr-reg.S102
-rw-r--r--arch/x86/lib/msr.c49
-rw-r--r--arch/x86/mm/kmemcheck/kmemcheck.c14
-rw-r--r--arch/x86/mm/pat.c2
-rw-r--r--arch/x86/oprofile/nmi_int.c404
-rw-r--r--arch/x86/oprofile/op_counter.h2
-rw-r--r--arch/x86/oprofile/op_model_amd.c372
-rw-r--r--arch/x86/oprofile/op_model_p4.c72
-rw-r--r--arch/x86/oprofile/op_model_ppro.c101
-rw-r--r--arch/x86/oprofile/op_x86_model.h59
-rw-r--r--arch/x86/pci/direct.c5
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--block/blk-core.c1
-rw-r--r--crypto/Kconfig30
-rw-r--r--crypto/Makefile5
-rw-r--r--crypto/ablkcipher.c29
-rw-r--r--crypto/aes_generic.c9
-rw-r--r--crypto/ahash.c336
-rw-r--r--crypto/algapi.c180
-rw-r--r--crypto/algboss.c5
-rw-r--r--crypto/ansi_cprng.c43
-rw-r--r--crypto/api.c54
-rw-r--r--crypto/authenc.c358
-rw-r--r--crypto/cryptd.c321
-rw-r--r--crypto/ctr.c2
-rw-r--r--crypto/gcm.c580
-rw-r--r--crypto/ghash-generic.c170
-rw-r--r--crypto/hmac.c302
-rw-r--r--crypto/internal.h28
-rw-r--r--crypto/pcompress.c6
-rw-r--r--crypto/rng.c2
-rw-r--r--crypto/sha1_generic.c41
-rw-r--r--crypto/sha256_generic.c100
-rw-r--r--crypto/sha512_generic.c48
-rw-r--r--crypto/shash.c270
-rw-r--r--crypto/tcrypt.c22
-rw-r--r--crypto/testmgr.c30
-rw-r--r--crypto/testmgr.h16
-rw-r--r--crypto/vmac.c678
-rw-r--r--crypto/xcbc.c370
-rw-r--r--drivers/acpi/blacklist.c5
-rw-r--r--drivers/ata/Kconfig21
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c143
-rw-r--r--drivers/ata/libata-acpi.c7
-rw-r--r--drivers/ata/libata-core.c44
-rw-r--r--drivers/ata/libata-eh.c146
-rw-r--r--drivers/ata/libata-pmp.c2
-rw-r--r--drivers/ata/libata-scsi.c159
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_atiixp.c1
-rw-r--r--drivers/ata/pata_cs5535.c3
-rw-r--r--drivers/ata/pata_octeon_cf.c4
-rw-r--r--drivers/ata/pata_platform.c8
-rw-r--r--drivers/ata/pata_rb532_cf.c2
-rw-r--r--drivers/ata/pata_rdc.c400
-rw-r--r--drivers/ata/pata_rz1000.c4
-rw-r--r--drivers/ata/sata_fsl.c1
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_sil.c13
-rw-r--r--drivers/ata/sata_sil24.c11
-rw-r--r--drivers/ata/sata_sis.c75
-rw-r--r--drivers/block/aoe/aoe.h2
-rw-r--r--drivers/block/aoe/aoeblk.c13
-rw-r--r--drivers/block/aoe/aoedev.c1
-rw-r--r--drivers/char/agp/intel-agp.c8
-rw-r--r--drivers/char/hvc_iucv.c2
-rw-r--r--drivers/char/hw_random/amd-rng.c4
-rw-r--r--drivers/char/hw_random/geode-rng.c3
-rw-r--r--drivers/char/mem.c1
-rw-r--r--drivers/char/random.c14
-rw-r--r--drivers/char/sysrq.c19
-rw-r--r--drivers/char/tpm/tpm_tis.c12
-rw-r--r--drivers/crypto/Kconfig15
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c3
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c73
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h25
-rw-r--r--drivers/crypto/mv_cesa.c606
-rw-r--r--drivers/crypto/mv_cesa.h119
-rw-r--r--drivers/crypto/padlock-sha.c329
-rw-r--r--drivers/crypto/talitos.c216
-rw-r--r--drivers/crypto/talitos.h1
-rw-r--r--drivers/firmware/dmi_scan.c77
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c19
-rw-r--r--drivers/gpu/drm/i915/intel_display.c16
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c1
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h6
-rw-r--r--drivers/gpu/drm/radeon/rs600.c65
-rw-r--r--drivers/gpu/drm/radeon/rs690.c64
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/ide/atiixp.c1
-rw-r--r--drivers/ide/ide-cs.c1
-rw-r--r--drivers/infiniband/core/iwcm.c1
-rw-r--r--drivers/infiniband/core/mad.c35
-rw-r--r--drivers/infiniband/core/mad_priv.h3
-rw-r--r--drivers/infiniband/core/multicast.c10
-rw-r--r--drivers/infiniband/core/sa_query.c7
-rw-r--r--drivers/infiniband/core/smi.c8
-rw-r--r--drivers/infiniband/core/uverbs_main.c10
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c24
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c37
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c68
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h9
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c21
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c52
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c47
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c12
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_config_reg.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c17
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c1
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c128
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c767
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h103
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c204
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c21
-rw-r--r--drivers/md/dm-log-userspace-base.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c6
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h8
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/mlx4/cq.c1
-rw-r--r--drivers/net/mlx4/eq.c77
-rw-r--r--drivers/net/mlx4/icm.c1
-rw-r--r--drivers/net/mlx4/main.c37
-rw-r--r--drivers/net/mlx4/mcg.c1
-rw-r--r--drivers/net/mlx4/mlx4.h7
-rw-r--r--drivers/net/mlx4/mr.c1
-rw-r--r--drivers/net/mlx4/pd.c1
-rw-r--r--drivers/net/mlx4/profile.c2
-rw-r--r--drivers/net/mlx4/qp.c2
-rw-r--r--drivers/net/mlx4/reset.c1
-rw-r--r--drivers/net/mlx4/srq.c2
-rw-r--r--drivers/net/tun.c22
-rw-r--r--drivers/oprofile/cpu_buffer.c16
-rw-r--r--drivers/oprofile/oprof.c71
-rw-r--r--drivers/oprofile/oprof.h3
-rw-r--r--drivers/oprofile/oprofile_files.c46
-rw-r--r--drivers/oprofile/oprofile_stats.c5
-rw-r--r--drivers/oprofile/oprofile_stats.h1
-rw-r--r--drivers/pci/intr_remapping.c14
-rw-r--r--drivers/pci/quirks.c4
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c25
-rw-r--r--drivers/s390/block/dasd.c26
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_alias.c5
-rw-r--r--drivers/s390/block/dasd_diag.c5
-rw-r--r--drivers/s390/block/dasd_eckd.c47
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/block/dasd_erp.c4
-rw-r--r--drivers/s390/block/dasd_fba.c9
-rw-r--r--drivers/s390/block/dasd_int.h11
-rw-r--r--drivers/s390/block/dasd_ioctl.c24
-rw-r--r--drivers/s390/block/xpram.c65
-rw-r--r--drivers/s390/char/Kconfig10
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/s390/char/sclp.h4
-rw-r--r--drivers/s390/char/sclp_async.c224
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c4
-rw-r--r--drivers/s390/char/tape_block.c12
-rw-r--r--drivers/s390/char/tape_core.c18
-rw-r--r--drivers/s390/char/tape_std.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c4
-rw-r--r--drivers/s390/char/vmur.c19
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/chp.c3
-rw-r--r--drivers/s390/cio/chsc.h24
-rw-r--r--drivers/s390/cio/cio.c56
-rw-r--r--drivers/s390/cio/cio.h4
-rw-r--r--drivers/s390/cio/css.c32
-rw-r--r--drivers/s390/cio/device.c172
-rw-r--r--drivers/s390/cio/device_fsm.c22
-rw-r--r--drivers/s390/cio/qdio.h4
-rw-r--r--drivers/s390/cio/qdio_debug.c55
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c17
-rw-r--r--drivers/s390/kvm/kvm_virtio.c8
-rw-r--r--drivers/s390/net/netiucv.c9
-rw-r--r--drivers/s390/net/smsgiucv.c6
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c12
-rw-r--r--drivers/staging/comedi/comedi_fops.c8
-rw-r--r--drivers/staging/pohmelfs/inode.c9
-rw-r--r--fs/binfmt_elf.c28
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/char_dev.c40
-rw-r--r--fs/configfs/inode.c1
-rw-r--r--fs/dcache.c1
-rw-r--r--fs/ext2/acl.c8
-rw-r--r--fs/ext2/acl.h4
-rw-r--r--fs/ext2/file.c2
-rw-r--r--fs/ext2/namei.c4
-rw-r--r--fs/ext3/acl.c8
-rw-r--r--fs/ext3/acl.h4
-rw-r--r--fs/ext3/file.c2
-rw-r--r--fs/ext3/namei.c4
-rw-r--r--fs/ext4/acl.c8
-rw-r--r--fs/ext4/acl.h4
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/namei.c4
-rw-r--r--fs/fs-writeback.c1065
-rw-r--r--fs/fuse/inode.c1
-rw-r--r--fs/hugetlbfs/inode.c1
-rw-r--r--fs/jffs2/acl.c7
-rw-r--r--fs/jffs2/acl.h4
-rw-r--r--fs/jffs2/dir.c2
-rw-r--r--fs/jffs2/file.c2
-rw-r--r--fs/jffs2/symlink.c2
-rw-r--r--fs/jfs/acl.c7
-rw-r--r--fs/jfs/file.c2
-rw-r--r--fs/jfs/jfs_acl.h2
-rw-r--r--fs/jfs/namei.c2
-rw-r--r--fs/lockd/host.c14
-rw-r--r--fs/lockd/mon.c44
-rw-r--r--fs/locks.c4
-rw-r--r--fs/namei.c110
-rw-r--r--fs/nfs/Makefile3
-rw-r--r--fs/nfs/cache_lib.c140
-rw-r--r--fs/nfs/cache_lib.h27
-rw-r--r--fs/nfs/callback.c26
-rw-r--r--fs/nfs/client.c16
-rw-r--r--fs/nfs/direct.c3
-rw-r--r--fs/nfs/dns_resolve.c335
-rw-r--r--fs/nfs/dns_resolve.h14
-rw-r--r--fs/nfs/file.c49
-rw-r--r--fs/nfs/idmap.c6
-rw-r--r--fs/nfs/inode.c100
-rw-r--r--fs/nfs/internal.h39
-rw-r--r--fs/nfs/mount_clnt.c83
-rw-r--r--fs/nfs/nfs3proc.c1
-rw-r--r--fs/nfs/nfs4namespace.c24
-rw-r--r--fs/nfs/nfs4proc.c40
-rw-r--r--fs/nfs/nfs4xdr.c1460
-rw-r--r--fs/nfs/super.c451
-rw-r--r--fs/nfs/write.c91
-rw-r--r--fs/nfsd/auth.c4
-rw-r--r--fs/nfsd/export.c14
-rw-r--r--fs/nfsd/nfs4idmap.c20
-rw-r--r--fs/nfsd/nfsctl.c21
-rw-r--r--fs/nfsd/nfssvc.c2
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/ocfs2/dlm/dlmfs.c1
-rw-r--r--fs/open.c12
-rw-r--r--fs/ramfs/inode.c1
-rw-r--r--fs/super.c5
-rw-r--r--fs/sync.c20
-rw-r--r--fs/sysfs/dir.c1
-rw-r--r--fs/sysfs/inode.c135
-rw-r--r--fs/sysfs/symlink.c2
-rw-r--r--fs/sysfs/sysfs.h12
-rw-r--r--fs/ubifs/budget.c16
-rw-r--r--fs/ubifs/super.c9
-rw-r--r--fs/xattr.c55
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c16
-rw-r--r--include/asm-generic/dma-mapping-common.h6
-rw-r--r--include/asm-generic/percpu.h3
-rw-r--r--include/crypto/algapi.h37
-rw-r--r--include/crypto/cryptd.h17
-rw-r--r--include/crypto/hash.h147
-rw-r--r--include/crypto/internal/hash.h147
-rw-r--r--include/crypto/sha.h20
-rw-r--r--include/crypto/vmac.h61
-rw-r--r--include/linux/ata.h36
-rw-r--r--include/linux/backing-dev.h55
-rw-r--r--include/linux/cpu.h17
-rw-r--r--include/linux/cred.h69
-rw-r--r--include/linux/crypto.h43
-rw-r--r--include/linux/dma-mapping.h5
-rw-r--r--include/linux/dmi.h13
-rw-r--r--include/linux/fips.h10
-rw-r--r--include/linux/fs.h29
-rw-r--r--include/linux/ftrace_event.h51
-rw-r--r--include/linux/hardirq.h10
-rw-r--r--include/linux/init.h12
-rw-r--r--include/linux/init_task.h11
-rw-r--r--include/linux/interrupt.h4
-rw-r--r--include/linux/irq.h18
-rw-r--r--include/linux/irqnr.h6
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/key.h8
-rw-r--r--include/linux/keyctl.h1
-rw-r--r--include/linux/kmemcheck.h7
-rw-r--r--include/linux/kmemleak.h18
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/lockdep.h18
-rw-r--r--include/linux/lsm_audit.h12
-rw-r--r--include/linux/module.h14
-rw-r--r--include/linux/nfs4.h1
-rw-r--r--include/linux/nfs_fs_sb.h9
-rw-r--r--include/linux/nmi.h19
-rw-r--r--include/linux/oprofile.h5
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/percpu-defs.h8
-rw-r--r--include/linux/perf_counter.h7
-rw-r--r--include/linux/rcuclassic.h178
-rw-r--r--include/linux/rcupdate.h98
-rw-r--r--include/linux/rcupreempt.h127
-rw-r--r--include/linux/rcupreempt_trace.h97
-rw-r--r--include/linux/rcutree.h262
-rw-r--r--include/linux/ring_buffer.h24
-rw-r--r--include/linux/sched.h129
-rw-r--r--include/linux/security.h154
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/spinlock.h64
-rw-r--r--include/linux/spinlock_api_smp.h394
-rw-r--r--include/linux/sunrpc/cache.h40
-rw-r--r--include/linux/sunrpc/clnt.h43
-rw-r--r--include/linux/sunrpc/msg_prot.h17
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h20
-rw-r--r--include/linux/sunrpc/xdr.h10
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/swiotlb.h11
-rw-r--r--include/linux/syscalls.h131
-rw-r--r--include/linux/topology.h168
-rw-r--r--include/linux/tracepoint.h29
-rw-r--r--include/linux/tty.h4
-rw-r--r--include/linux/writeback.h23
-rw-r--r--include/linux/xattr.h1
-rw-r--r--include/sound/ac97_codec.h9
-rw-r--r--include/sound/asound.h2
-rw-r--r--include/sound/core.h51
-rw-r--r--include/sound/info.h4
-rw-r--r--include/sound/memalloc.h6
-rw-r--r--include/sound/pcm.h25
-rw-r--r--include/sound/sh_fsi.h83
-rw-r--r--include/sound/soc-dai.h40
-rw-r--r--include/sound/soc-dapm.h10
-rw-r--r--include/sound/soc.h49
-rw-r--r--include/sound/tlv.h14
-rw-r--r--include/sound/uda1380.h22
-rw-r--r--include/sound/version.h2
-rw-r--r--include/sound/wm8993.h44
-rw-r--r--include/sound/ymfpci.h1
-rw-r--r--include/trace/define_trace.h7
-rw-r--r--include/trace/events/module.h126
-rw-r--r--include/trace/events/sched.h107
-rw-r--r--include/trace/events/syscalls.h70
-rw-r--r--include/trace/ftrace.h93
-rw-r--r--include/trace/syscall.h48
-rw-r--r--init/Kconfig46
-rw-r--r--init/main.c4
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/acct.c8
-rw-r--r--kernel/cgroup.c1
-rw-r--r--kernel/cred.c293
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c11
-rw-r--r--kernel/futex.c47
-rw-r--r--kernel/irq/chip.c74
-rw-r--r--kernel/irq/handle.c5
-rw-r--r--kernel/irq/internals.h13
-rw-r--r--kernel/irq/manage.c102
-rw-r--r--kernel/irq/pm.c8
-rw-r--r--kernel/irq/resend.c3
-rw-r--r--kernel/irq/spurious.c1
-rw-r--r--kernel/kmod.c9
-rw-r--r--kernel/kprobes.c30
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/lockdep.c792
-rw-r--r--kernel/lockdep_internals.h2
-rw-r--r--kernel/lockdep_proc.c128
-rw-r--r--kernel/module.c11
-rw-r--r--kernel/perf_counter.c173
-rw-r--r--kernel/printk.c175
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rcuclassic.c807
-rw-r--r--kernel/rcupdate.c44
-rw-r--r--kernel/rcupreempt.c1539
-rw-r--r--kernel/rcupreempt_trace.c334
-rw-r--r--kernel/rcutorture.c202
-rw-r--r--kernel/rcutree.c280
-rw-r--r--kernel/rcutree.h253
-rw-r--r--kernel/rcutree_plugin.h532
-rw-r--r--kernel/rcutree_trace.c88
-rw-r--r--kernel/sched.c1232
-rw-r--r--kernel/sched_cpupri.c30
-rw-r--r--kernel/sched_debug.c4
-rw-r--r--kernel/sched_fair.c84
-rw-r--r--kernel/sched_features.h2
-rw-r--r--kernel/sched_rt.c62
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/spinlock.c230
-rw-r--r--kernel/sysctl.c25
-rw-r--r--kernel/timer.c3
-rw-r--r--kernel/trace/Kconfig13
-rw-r--r--kernel/trace/blktrace.c12
-rw-r--r--kernel/trace/ftrace.c107
-rw-r--r--kernel/trace/kmemtrace.c149
-rw-r--r--kernel/trace/ring_buffer.c1112
-rw-r--r--kernel/trace/trace.c679
-rw-r--r--kernel/trace/trace.h76
-rw-r--r--kernel/trace/trace_boot.c16
-rw-r--r--kernel/trace/trace_events.c146
-rw-r--r--kernel/trace/trace_events_filter.c261
-rw-r--r--kernel/trace/trace_export.c28
-rw-r--r--kernel/trace/trace_functions.c4
-rw-r--r--kernel/trace/trace_functions_graph.c166
-rw-r--r--kernel/trace/trace_irqsoff.c3
-rw-r--r--kernel/trace/trace_mmiotrace.c10
-rw-r--r--kernel/trace/trace_power.c22
-rw-r--r--kernel/trace/trace_sched_switch.c59
-rw-r--r--kernel/trace/trace_sched_wakeup.c7
-rw-r--r--kernel/trace/trace_selftest.c1
-rw-r--r--kernel/trace/trace_stack.c43
-rw-r--r--kernel/trace/trace_stat.c17
-rw-r--r--kernel/trace/trace_stat.h2
-rw-r--r--kernel/trace/trace_syscalls.c471
-rw-r--r--kernel/trace/trace_workqueue.c32
-rw-r--r--kernel/tracepoint.c50
-rw-r--r--kernel/workqueue.c9
-rw-r--r--lib/Kconfig.debug17
-rw-r--r--lib/is_single_threaded.c61
-rw-r--r--lib/swiotlb.c124
-rw-r--r--mm/Makefile2
-rw-r--r--mm/backing-dev.c381
-rw-r--r--mm/bootmem.c6
-rw-r--r--mm/kmemleak.c336
-rw-r--r--mm/page-writeback.c182
-rw-r--r--mm/pdflush.c269
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/shmem_acl.c11
-rw-r--r--mm/swap_state.c1
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/ipv4/tcp_cong.c4
-rw-r--r--net/sunrpc/Makefile2
-rw-r--r--net/sunrpc/addr.c364
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c12
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c7
-rw-r--r--net/sunrpc/cache.c622
-rw-r--r--net/sunrpc/clnt.c60
-rw-r--r--net/sunrpc/rpc_pipe.c685
-rw-r--r--net/sunrpc/rpcb_clnt.c420
-rw-r--r--net/sunrpc/sunrpc_syms.c2
-rw-r--r--net/sunrpc/svcauth_unix.c14
-rw-r--r--net/sunrpc/timer.c45
-rw-r--r--net/sunrpc/xdr.c12
-rw-r--r--net/sunrpc/xprtrdma/transport.c48
-rw-r--r--net/sunrpc/xprtsock.c287
-rwxr-xr-xscripts/recordmcount.pl1
-rw-r--r--security/Makefile4
-rw-r--r--security/capability.c63
-rw-r--r--security/commoncap.c4
-rw-r--r--security/integrity/ima/ima_main.c6
-rw-r--r--security/keys/Makefile1
-rw-r--r--security/keys/compat.c3
-rw-r--r--security/keys/gc.c194
-rw-r--r--security/keys/internal.h10
-rw-r--r--security/keys/key.c24
-rw-r--r--security/keys/keyctl.c161
-rw-r--r--security/keys/keyring.c85
-rw-r--r--security/keys/proc.c93
-rw-r--r--security/keys/process_keys.c69
-rw-r--r--security/keys/sysctl.c28
-rw-r--r--security/lsm_audit.c2
-rw-r--r--security/security.c62
-rw-r--r--security/selinux/avc.c205
-rw-r--r--security/selinux/hooks.c318
-rw-r--r--security/selinux/include/av_inherit.h1
-rw-r--r--security/selinux/include/av_perm_to_string.h1
-rw-r--r--security/selinux/include/av_permissions.h23
-rw-r--r--security/selinux/include/avc.h55
-rw-r--r--security/selinux/include/class_to_string.h1
-rw-r--r--security/selinux/include/flask.h1
-rw-r--r--security/selinux/include/netlabel.h4
-rw-r--r--security/selinux/include/xfrm.h8
-rw-r--r--security/selinux/netlabel.c2
-rw-r--r--security/selinux/ss/services.c142
-rw-r--r--security/selinux/xfrm.c4
-rw-r--r--security/smack/smack.h2
-rw-r--r--security/smack/smack_access.c11
-rw-r--r--security/smack/smack_lsm.c65
-rw-r--r--security/tomoyo/common.c30
-rw-r--r--security/tomoyo/common.h2
-rw-r--r--security/tomoyo/domain.c42
-rw-r--r--security/tomoyo/tomoyo.c27
-rw-r--r--security/tomoyo/tomoyo.h3
-rw-r--r--sound/Kconfig28
-rw-r--r--sound/arm/pxa2xx-ac97.c10
-rw-r--r--sound/arm/pxa2xx-pcm-lib.c3
-rw-r--r--sound/core/Kconfig4
-rw-r--r--sound/core/Makefile2
-rw-r--r--sound/core/control.c34
-rw-r--r--sound/core/info.c8
-rw-r--r--sound/core/init.c8
-rw-r--r--sound/core/memalloc.c4
-rw-r--r--sound/core/misc.c75
-rw-r--r--sound/core/oss/mixer_oss.c3
-rw-r--r--sound/core/oss/pcm_oss.c12
-rw-r--r--sound/core/pcm.c26
-rw-r--r--sound/core/pcm_lib.c12
-rw-r--r--sound/core/pcm_memory.c2
-rw-r--r--sound/core/pcm_native.c64
-rw-r--r--sound/core/rawmidi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c14
-rw-r--r--sound/core/seq/seq_midi.c7
-rw-r--r--sound/core/vmaster.c8
-rw-r--r--sound/drivers/dummy.c700
-rw-r--r--sound/isa/cmi8330.c86
-rw-r--r--sound/oss/midibuf.c7
-rw-r--r--sound/oss/vwsnd.c6
-rw-r--r--sound/pci/Kconfig4
-rw-r--r--sound/pci/ali5451/ali5451.c65
-rw-r--r--sound/pci/azt3328.c1116
-rw-r--r--sound/pci/azt3328.h103
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.h2
-rw-r--r--sound/pci/ctxfi/ct20k2reg.h9
-rw-r--r--sound/pci/ctxfi/ctamixer.c20
-rw-r--r--sound/pci/ctxfi/ctatc.c77
-rw-r--r--sound/pci/ctxfi/ctdaio.c30
-rw-r--r--sound/pci/ctxfi/cthw20k1.c22
-rw-r--r--sound/pci/ctxfi/cthw20k2.c73
-rw-r--r--sound/pci/ctxfi/ctmixer.c8
-rw-r--r--sound/pci/ctxfi/ctpcm.c6
-rw-r--r--sound/pci/ctxfi/ctresource.c4
-rw-r--r--sound/pci/ctxfi/ctsrc.c10
-rw-r--r--sound/pci/ctxfi/ctvmem.c6
-rw-r--r--sound/pci/hda/Kconfig27
-rw-r--r--sound/pci/hda/Makefile4
-rw-r--r--sound/pci/hda/hda_beep.c4
-rw-r--r--sound/pci/hda/hda_codec.c68
-rw-r--r--sound/pci/hda/hda_codec.h10
-rw-r--r--sound/pci/hda/hda_generic.c18
-rw-r--r--sound/pci/hda/hda_hwdep.c236
-rw-r--r--sound/pci/hda/hda_intel.c74
-rw-r--r--sound/pci/hda/hda_local.h14
-rw-r--r--sound/pci/hda/hda_proc.c7
-rw-r--r--sound/pci/hda/patch_analog.c131
-rw-r--r--sound/pci/hda/patch_atihdmi.c3
-rw-r--r--sound/pci/hda/patch_ca0110.c3
-rw-r--r--sound/pci/hda/patch_cirrus.c1194
-rw-r--r--sound/pci/hda/patch_cmedia.c3
-rw-r--r--sound/pci/hda/patch_conexant.c479
-rw-r--r--sound/pci/hda/patch_intelhdmi.c104
-rw-r--r--sound/pci/hda/patch_nvhdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c4144
-rw-r--r--sound/pci/hda/patch_sigmatel.c1206
-rw-r--r--sound/pci/hda/patch_via.c3
-rw-r--r--sound/pci/ice1712/ice1712.h9
-rw-r--r--sound/pci/ice1712/ice1724.c112
-rw-r--r--sound/pci/ice1712/prodigy_hifi.c46
-rw-r--r--sound/pci/oxygen/oxygen_io.c11
-rw-r--r--sound/pci/rme9652/hdsp.c39
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c20
-rw-r--r--sound/soc/Kconfig1
-rw-r--r--sound/soc/Makefile3
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c138
-rw-r--r--sound/soc/au1x/psc-ac97.c129
-rw-r--r--sound/soc/au1x/psc.h1
-rw-r--r--sound/soc/blackfin/Kconfig31
-rw-r--r--sound/soc/blackfin/Makefile8
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.c10
-rw-r--r--sound/soc/blackfin/bf5xx-ad1836.c128
-rw-r--r--sound/soc/blackfin/bf5xx-ad1938.c142
-rw-r--r--sound/soc/blackfin/bf5xx-ad73311.c16
-rw-r--r--sound/soc/blackfin/bf5xx-i2s.c8
-rw-r--r--sound/soc/blackfin/bf5xx-ssm2602.c16
-rw-r--r--sound/soc/blackfin/bf5xx-tdm-pcm.c330
-rw-r--r--sound/soc/blackfin/bf5xx-tdm-pcm.h21
-rw-r--r--sound/soc/blackfin/bf5xx-tdm.c343
-rw-r--r--sound/soc/blackfin/bf5xx-tdm.h14
-rw-r--r--sound/soc/codecs/Kconfig44
-rw-r--r--sound/soc/codecs/Makefile26
-rw-r--r--sound/soc/codecs/ad1836.c446
-rw-r--r--sound/soc/codecs/ad1836.h64
-rw-r--r--sound/soc/codecs/ad1938.c682
-rw-r--r--sound/soc/codecs/ad1938.h100
-rw-r--r--sound/soc/codecs/ak4535.c16
-rw-r--r--sound/soc/codecs/ak4642.c502
-rw-r--r--sound/soc/codecs/ak4642.h20
-rw-r--r--sound/soc/codecs/cs4270.c27
-rw-r--r--sound/soc/codecs/cx20442.c501
-rw-r--r--sound/soc/codecs/cx20442.h20
-rw-r--r--sound/soc/codecs/max9877.c308
-rw-r--r--sound/soc/codecs/max9877.h37
-rw-r--r--sound/soc/codecs/spdif_transciever.c3
-rw-r--r--sound/soc/codecs/stac9766.c4
-rw-r--r--sound/soc/codecs/tlv320aic3x.c233
-rw-r--r--sound/soc/codecs/tlv320aic3x.h2
-rw-r--r--sound/soc/codecs/twl4030.c260
-rw-r--r--sound/soc/codecs/twl4030.h2
-rw-r--r--sound/soc/codecs/uda134x.c2
-rw-r--r--sound/soc/codecs/uda1380.c313
-rw-r--r--sound/soc/codecs/uda1380.h8
-rw-r--r--sound/soc/codecs/wm8350.c51
-rw-r--r--sound/soc/codecs/wm8400.c26
-rw-r--r--sound/soc/codecs/wm8510.c175
-rw-r--r--sound/soc/codecs/wm8523.c699
-rw-r--r--sound/soc/codecs/wm8523.h160
-rw-r--r--sound/soc/codecs/wm8580.c211
-rw-r--r--sound/soc/codecs/wm8728.c111
-rw-r--r--sound/soc/codecs/wm8731.c218
-rw-r--r--sound/soc/codecs/wm8750.c154
-rw-r--r--sound/soc/codecs/wm8753.c35
-rw-r--r--sound/soc/codecs/wm8776.c744
-rw-r--r--sound/soc/codecs/wm8776.h51
-rw-r--r--sound/soc/codecs/wm8900.c345
-rw-r--r--sound/soc/codecs/wm8903.c267
-rw-r--r--sound/soc/codecs/wm8940.c160
-rw-r--r--sound/soc/codecs/wm8960.c233
-rw-r--r--sound/soc/codecs/wm8961.c1265
-rw-r--r--sound/soc/codecs/wm8961.h866
-rw-r--r--sound/soc/codecs/wm8971.c127
-rw-r--r--sound/soc/codecs/wm8974.c808
-rw-r--r--sound/soc/codecs/wm8974.h99
-rw-r--r--sound/soc/codecs/wm8988.c180
-rw-r--r--sound/soc/codecs/wm8990.c194
-rw-r--r--sound/soc/codecs/wm8993.c1675
-rw-r--r--sound/soc/codecs/wm8993.h2132
-rw-r--r--sound/soc/codecs/wm9081.c317
-rw-r--r--sound/soc/codecs/wm9705.c2
-rw-r--r--sound/soc/codecs/wm_hubs.c743
-rw-r--r--sound/soc/codecs/wm_hubs.h24
-rw-r--r--sound/soc/davinci/Kconfig33
-rw-r--r--sound/soc/davinci/Makefile5
-rw-r--r--sound/soc/davinci/davinci-evm.c140
-rw-r--r--sound/soc/davinci/davinci-i2s.c340
-rw-r--r--sound/soc/davinci/davinci-mcasp.c973
-rw-r--r--sound/soc/davinci/davinci-mcasp.h60
-rw-r--r--sound/soc/davinci/davinci-pcm.c10
-rw-r--r--sound/soc/davinci/davinci-pcm.h19
-rw-r--r--sound/soc/fsl/mpc5200_dma.c17
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.c3
-rw-r--r--sound/soc/imx/Kconfig21
-rw-r--r--sound/soc/imx/Makefile10
-rw-r--r--sound/soc/imx/mx1_mx2-pcm.c488
-rw-r--r--sound/soc/imx/mx1_mx2-pcm.h26
-rw-r--r--sound/soc/imx/mx27vis_wm8974.c317
-rw-r--r--sound/soc/imx/mxc-ssi.c868
-rw-r--r--sound/soc/imx/mxc-ssi.h238
-rw-r--r--sound/soc/omap/Kconfig15
-rw-r--r--sound/soc/omap/Makefile4
-rw-r--r--sound/soc/omap/ams-delta.c646
-rw-r--r--sound/soc/omap/n810.c12
-rw-r--r--sound/soc/omap/omap-mcbsp.c123
-rw-r--r--sound/soc/omap/omap-mcbsp.h4
-rw-r--r--sound/soc/omap/omap-pcm.c53
-rw-r--r--sound/soc/omap/omap-pcm.h2
-rw-r--r--sound/soc/omap/sdp3430.c18
-rw-r--r--sound/soc/omap/zoom2.c314
-rw-r--r--sound/soc/pxa/magician.c56
-rw-r--r--sound/soc/pxa/palm27x.c204
-rw-r--r--sound/soc/pxa/pxa-ssp.c77
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c12
-rw-r--r--sound/soc/s3c24xx/Kconfig35
-rw-r--r--sound/soc/s3c24xx/Makefile9
-rw-r--r--sound/soc/s3c24xx/neo1973_gta02_wm8753.c498
-rw-r--r--sound/soc/s3c24xx/s3c-i2s-v2.c17
-rw-r--r--sound/soc/s3c24xx/s3c2443-ac97.c20
-rw-r--r--sound/soc/s3c24xx/s3c24xx-i2s.c5
-rw-r--r--sound/soc/s3c24xx/s3c24xx-pcm.c2
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec.c394
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec.h22
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec_hermes.c153
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c137
-rw-r--r--sound/soc/s6000/s6105-ipcam.c12
-rw-r--r--sound/soc/sh/Kconfig15
-rw-r--r--sound/soc/sh/Makefile4
-rw-r--r--sound/soc/sh/fsi-ak4642.c107
-rw-r--r--sound/soc/sh/fsi.c1004
-rw-r--r--sound/soc/soc-cache.c218
-rw-r--r--sound/soc/soc-core.c148
-rw-r--r--sound/soc/soc-dapm.c498
-rw-r--r--sound/soc/soc-jack.c24
-rw-r--r--sound/soc/txx9/txx9aclc.c10
-rw-r--r--sound/sound_core.c100
-rw-r--r--sound/usb/usbaudio.c6
-rw-r--r--sound/usb/usbmidi.c290
-rw-r--r--sound/usb/usbmixer.c73
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Documentation/perf-report.txt13
-rw-r--r--tools/perf/Makefile39
-rw-r--r--tools/perf/builtin-annotate.c472
-rw-r--r--tools/perf/builtin-help.c1
-rw-r--r--tools/perf/builtin-record.c38
-rw-r--r--tools/perf/builtin-report.c719
-rw-r--r--tools/perf/builtin-stat.c239
-rw-r--r--tools/perf/builtin-top.c66
-rw-r--r--tools/perf/builtin-trace.c297
-rw-r--r--tools/perf/builtin.h1
-rw-r--r--tools/perf/perf.c1
-rw-r--r--tools/perf/util/abspath.c3
-rw-r--r--tools/perf/util/cache.h1
-rw-r--r--tools/perf/util/callchain.c2
-rw-r--r--tools/perf/util/callchain.h1
-rw-r--r--tools/perf/util/color.c16
-rw-r--r--tools/perf/util/color.h3
-rw-r--r--tools/perf/util/config.c22
-rw-r--r--tools/perf/util/debug.c95
-rw-r--r--tools/perf/util/debug.h8
-rw-r--r--tools/perf/util/event.h96
-rw-r--r--tools/perf/util/exec_cmd.c1
-rw-r--r--tools/perf/util/header.c37
-rw-r--r--tools/perf/util/header.h4
-rw-r--r--tools/perf/util/map.c97
-rw-r--r--tools/perf/util/module.c4
-rw-r--r--tools/perf/util/parse-events.c147
-rw-r--r--tools/perf/util/parse-events.h17
-rw-r--r--tools/perf/util/parse-options.c22
-rw-r--r--tools/perf/util/path.c25
-rw-r--r--tools/perf/util/run-command.c6
-rw-r--r--tools/perf/util/symbol.c199
-rw-r--r--tools/perf/util/symbol.h14
-rw-r--r--tools/perf/util/thread.c175
-rw-r--r--tools/perf/util/thread.h21
-rw-r--r--tools/perf/util/trace-event-info.c539
-rw-r--r--tools/perf/util/trace-event-parse.c2942
-rw-r--r--tools/perf/util/trace-event-read.c512
-rw-r--r--tools/perf/util/trace-event.h240
-rw-r--r--tools/perf/util/util.h6
-rw-r--r--tools/perf/util/values.c230
-rw-r--r--tools/perf/util/values.h27
1005 files changed, 66767 insertions, 26279 deletions
diff --git a/Documentation/RCU/RTFP.txt b/Documentation/RCU/RTFP.txt
index 9f711d2df91b..d2b85237c76e 100644
--- a/Documentation/RCU/RTFP.txt
+++ b/Documentation/RCU/RTFP.txt
@@ -743,3 +743,80 @@ Revised:
RCU, realtime RCU, sleepable RCU, performance.
"
}
+
+@article{PaulEMcKenney2008RCUOSR
+,author="Paul E. McKenney and Jonathan Walpole"
+,title="Introducing technology into the {Linux} kernel: a case study"
+,Year="2008"
+,journal="SIGOPS Oper. Syst. Rev."
+,volume="42"
+,number="5"
+,pages="4--17"
+,issn="0163-5980"
+,doi={http://doi.acm.org/10.1145/1400097.1400099}
+,publisher="ACM"
+,address="New York, NY, USA"
+,annotation={
+ Linux changed RCU to a far greater degree than RCU has changed Linux.
+}
+}
+
+@unpublished{PaulEMcKenney2008HierarchicalRCU
+,Author="Paul E. McKenney"
+,Title="Hierarchical {RCU}"
+,month="November"
+,day="3"
+,year="2008"
+,note="Available:
+\url{http://lwn.net/Articles/305782/}
+[Viewed November 6, 2008]"
+,annotation="
+ RCU with combining-tree-based grace-period detection,
+ permitting it to handle thousands of CPUs.
+"
+}
+
+@conference{PaulEMcKenney2009MaliciousURCU
+,Author="Paul E. McKenney"
+,Title="Using a Malicious User-Level {RCU} to Torture {RCU}-Based Algorithms"
+,Booktitle="linux.conf.au 2009"
+,month="January"
+,year="2009"
+,address="Hobart, Australia"
+,note="Available:
+\url{http://www.rdrop.com/users/paulmck/RCU/urcutorture.2009.01.22a.pdf}
+[Viewed February 2, 2009]"
+,annotation="
+ Realtime RCU and torture-testing RCU uses.
+"
+}
+
+@unpublished{MathieuDesnoyers2009URCU
+,Author="Mathieu Desnoyers"
+,Title="[{RFC} git tree] Userspace {RCU} (urcu) for {Linux}"
+,month="February"
+,day="5"
+,year="2009"
+,note="Available:
+\url{http://lkml.org/lkml/2009/2/5/572}
+\url{git://lttng.org/userspace-rcu.git}
+[Viewed February 20, 2009]"
+,annotation="
+ Mathieu Desnoyers's user-space RCU implementation.
+ git://lttng.org/userspace-rcu.git
+"
+}
+
+@unpublished{PaulEMcKenney2009BloatWatchRCU
+,Author="Paul E. McKenney"
+,Title="{RCU}: The {Bloatwatch} Edition"
+,month="March"
+,day="17"
+,year="2009"
+,note="Available:
+\url{http://lwn.net/Articles/323929/}
+[Viewed March 20, 2009]"
+,annotation="
+ Uniprocessor assumptions allow simplified RCU implementation.
+"
+}
diff --git a/Documentation/RCU/UP.txt b/Documentation/RCU/UP.txt
index aab4a9ec3931..90ec5341ee98 100644
--- a/Documentation/RCU/UP.txt
+++ b/Documentation/RCU/UP.txt
@@ -2,14 +2,13 @@ RCU on Uniprocessor Systems
A common misconception is that, on UP systems, the call_rcu() primitive
-may immediately invoke its function, and that the synchronize_rcu()
-primitive may return immediately. The basis of this misconception
+may immediately invoke its function. The basis of this misconception
is that since there is only one CPU, it should not be necessary to
wait for anything else to get done, since there are no other CPUs for
anything else to be happening on. Although this approach will -sort- -of-
work a surprising amount of the time, it is a very bad idea in general.
-This document presents three examples that demonstrate exactly how bad an
-idea this is.
+This document presents three examples that demonstrate exactly how bad
+an idea this is.
Example 1: softirq Suicide
@@ -82,11 +81,18 @@ Quick Quiz #2: What locking restriction must RCU callbacks respect?
Summary
-Permitting call_rcu() to immediately invoke its arguments or permitting
-synchronize_rcu() to immediately return breaks RCU, even on a UP system.
-So do not do it! Even on a UP system, the RCU infrastructure -must-
-respect grace periods, and -must- invoke callbacks from a known environment
-in which no locks are held.
+Permitting call_rcu() to immediately invoke its arguments breaks RCU,
+even on a UP system. So do not do it! Even on a UP system, the RCU
+infrastructure -must- respect grace periods, and -must- invoke callbacks
+from a known environment in which no locks are held.
+
+It -is- safe for synchronize_sched() and synchronize_rcu_bh() to return
+immediately on an UP system. It is also safe for synchronize_rcu()
+to return immediately on UP systems, except when running preemptable
+RCU.
+
+Quick Quiz #3: Why can't synchronize_rcu() return immediately on
+ UP systems running preemptable RCU?
Answer to Quick Quiz #1:
@@ -117,3 +123,13 @@ Answer to Quick Quiz #2:
callbacks acquire locks directly. However, a great many RCU
callbacks do acquire locks -indirectly-, for example, via
the kfree() primitive.
+
+Answer to Quick Quiz #3:
+ Why can't synchronize_rcu() return immediately on UP systems
+ running preemptable RCU?
+
+ Because some other task might have been preempted in the middle
+ of an RCU read-side critical section. If synchronize_rcu()
+ simply immediately returned, it would prematurely signal the
+ end of the grace period, which would come as a nasty shock to
+ that other thread when it started running again.
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index accfe2f5247d..51525a30e8b4 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -11,7 +11,10 @@ over a rather long period of time, but improvements are always welcome!
structure is updated more than about 10% of the time, then
you should strongly consider some other approach, unless
detailed performance measurements show that RCU is nonetheless
- the right tool for the job.
+ the right tool for the job. Yes, you might think of RCU
+ as simply cutting overhead off of the readers and imposing it
+ on the writers. That is exactly why normal uses of RCU will
+ do much more reading than updating.
Another exception is where performance is not an issue, and RCU
provides a simpler implementation. An example of this situation
@@ -240,10 +243,11 @@ over a rather long period of time, but improvements are always welcome!
instead need to use synchronize_irq() or synchronize_sched().
12. Any lock acquired by an RCU callback must be acquired elsewhere
- with irq disabled, e.g., via spin_lock_irqsave(). Failing to
- disable irq on a given acquisition of that lock will result in
- deadlock as soon as the RCU callback happens to interrupt that
- acquisition's critical section.
+ with softirq disabled, e.g., via spin_lock_irqsave(),
+ spin_lock_bh(), etc. Failing to disable irq on a given
+ acquisition of that lock will result in deadlock as soon as the
+ RCU callback happens to interrupt that acquisition's critical
+ section.
13. RCU callbacks can be and are executed in parallel. In many cases,
the callback code simply wrappers around kfree(), so that this
@@ -310,3 +314,9 @@ over a rather long period of time, but improvements are always welcome!
Because these primitives only wait for pre-existing readers,
it is the caller's responsibility to guarantee safety to
any subsequent readers.
+
+16. The various RCU read-side primitives do -not- contain memory
+ barriers. The CPU (and in some cases, the compiler) is free
+ to reorder code into and out of RCU read-side critical sections.
+ It is the responsibility of the RCU update-side primitives to
+ deal with this.
diff --git a/Documentation/RCU/rcu.txt b/Documentation/RCU/rcu.txt
index 7aa2002ade77..2a23523ce471 100644
--- a/Documentation/RCU/rcu.txt
+++ b/Documentation/RCU/rcu.txt
@@ -36,7 +36,7 @@ o How can the updater tell when a grace period has completed
executed in user mode, or executed in the idle loop, we can
safely free up that item.
- Preemptible variants of RCU (CONFIG_PREEMPT_RCU) get the
+ Preemptible variants of RCU (CONFIG_TREE_PREEMPT_RCU) get the
same effect, but require that the readers manipulate CPU-local
counters. These counters allow limited types of blocking
within RCU read-side critical sections. SRCU also uses
@@ -79,10 +79,10 @@ o I hear that RCU is patented? What is with that?
o I hear that RCU needs work in order to support realtime kernels?
This work is largely completed. Realtime-friendly RCU can be
- enabled via the CONFIG_PREEMPT_RCU kernel configuration parameter.
- However, work is in progress for enabling priority boosting of
- preempted RCU read-side critical sections. This is needed if you
- have CPU-bound realtime threads.
+ enabled via the CONFIG_TREE_PREEMPT_RCU kernel configuration
+ parameter. However, work is in progress for enabling priority
+ boosting of preempted RCU read-side critical sections. This is
+ needed if you have CPU-bound realtime threads.
o Where can I find more information on RCU?
diff --git a/Documentation/RCU/rcubarrier.txt b/Documentation/RCU/rcubarrier.txt
index 909602d409bb..e439a0edee22 100644
--- a/Documentation/RCU/rcubarrier.txt
+++ b/Documentation/RCU/rcubarrier.txt
@@ -170,6 +170,13 @@ module invokes call_rcu() from timers, you will need to first cancel all
the timers, and only then invoke rcu_barrier() to wait for any remaining
RCU callbacks to complete.
+Of course, if you module uses call_rcu_bh(), you will need to invoke
+rcu_barrier_bh() before unloading. Similarly, if your module uses
+call_rcu_sched(), you will need to invoke rcu_barrier_sched() before
+unloading. If your module uses call_rcu(), call_rcu_bh(), -and-
+call_rcu_sched(), then you will need to invoke each of rcu_barrier(),
+rcu_barrier_bh(), and rcu_barrier_sched().
+
Implementing rcu_barrier()
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index a342b6e1cc10..9dba3bb90e60 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -76,8 +76,10 @@ torture_type The type of RCU to test: "rcu" for the rcu_read_lock() API,
"rcu_sync" for rcu_read_lock() with synchronous reclamation,
"rcu_bh" for the rcu_read_lock_bh() API, "rcu_bh_sync" for
rcu_read_lock_bh() with synchronous reclamation, "srcu" for
- the "srcu_read_lock()" API, and "sched" for the use of
- preempt_disable() together with synchronize_sched().
+ the "srcu_read_lock()" API, "sched" for the use of
+ preempt_disable() together with synchronize_sched(),
+ and "sched_expedited" for the use of preempt_disable()
+ with synchronize_sched_expedited().
verbose Enable debug printk()s. Default is disabled.
@@ -162,6 +164,23 @@ of the "old" and "current" counters for the corresponding CPU. The
"idx" value maps the "old" and "current" values to the underlying array,
and is useful for debugging.
+Similarly, sched_expedited RCU provides the following:
+
+ sched_expedited-torture: rtc: d0000000016c1880 ver: 1090796 tfle: 0 rta: 1090796 rtaf: 0 rtf: 1090787 rtmbe: 0 nt: 27713319
+ sched_expedited-torture: Reader Pipe: 12660320201 95875 0 0 0 0 0 0 0 0 0
+ sched_expedited-torture: Reader Batch: 12660424885 0 0 0 0 0 0 0 0 0 0
+ sched_expedited-torture: Free-Block Circulation: 1090795 1090795 1090794 1090793 1090792 1090791 1090790 1090789 1090788 1090787 0
+ state: -1 / 0:0 3:0 4:0
+
+As before, the first four lines are similar to those for RCU.
+The last line shows the task-migration state. The first number is
+-1 if synchronize_sched_expedited() is idle, -2 if in the process of
+posting wakeups to the migration kthreads, and N when waiting on CPU N.
+Each of the colon-separated fields following the "/" is a CPU:state pair.
+Valid states are "0" for idle, "1" for waiting for quiescent state,
+"2" for passed through quiescent state, and "3" when a race with a
+CPU-hotplug event forces use of the synchronize_sched() primitive.
+
USAGE
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index 02cced183b2d..187bbf10c923 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -191,8 +191,7 @@ rcu/rcuhier (which displays the struct rcu_node hierarchy).
The output of "cat rcu/rcudata" looks as follows:
-rcu:
-rcu:
+rcu_sched:
0 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=10951/1 dn=0 df=1101 of=0 ri=36 ql=0 b=10
1 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=16117/1 dn=0 df=1015 of=0 ri=0 ql=0 b=10
2 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=1445/1 dn=0 df=1839 of=0 ri=0 ql=0 b=10
@@ -306,7 +305,7 @@ comma-separated-variable spreadsheet format.
The output of "cat rcu/rcugp" looks as follows:
-rcu: completed=33062 gpnum=33063
+rcu_sched: completed=33062 gpnum=33063
rcu_bh: completed=464 gpnum=464
Again, this output is for both "rcu" and "rcu_bh". The fields are
@@ -413,7 +412,7 @@ o Each element of the form "1/1 0:127 ^0" represents one struct
The output of "cat rcu/rcu_pending" looks as follows:
-rcu:
+rcu_sched:
0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741
1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792
2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 96170824a717..e41a7fecf0d3 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -136,10 +136,10 @@ rcu_read_lock()
Used by a reader to inform the reclaimer that the reader is
entering an RCU read-side critical section. It is illegal
to block while in an RCU read-side critical section, though
- kernels built with CONFIG_PREEMPT_RCU can preempt RCU read-side
- critical sections. Any RCU-protected data structure accessed
- during an RCU read-side critical section is guaranteed to remain
- unreclaimed for the full duration of that critical section.
+ kernels built with CONFIG_TREE_PREEMPT_RCU can preempt RCU
+ read-side critical sections. Any RCU-protected data structure
+ accessed during an RCU read-side critical section is guaranteed to
+ remain unreclaimed for the full duration of that critical section.
Reference counts may be used in conjunction with RCU to maintain
longer-term references to data structures.
@@ -785,6 +785,7 @@ RCU pointer/list traversal:
rcu_dereference
list_for_each_entry_rcu
hlist_for_each_entry_rcu
+ hlist_nulls_for_each_entry_rcu
list_for_each_continue_rcu (to be deprecated in favor of new
list_for_each_entry_continue_rcu)
@@ -807,19 +808,23 @@ RCU: Critical sections Grace period Barrier
rcu_read_lock synchronize_net rcu_barrier
rcu_read_unlock synchronize_rcu
+ synchronize_rcu_expedited
call_rcu
bh: Critical sections Grace period Barrier
rcu_read_lock_bh call_rcu_bh rcu_barrier_bh
- rcu_read_unlock_bh
+ rcu_read_unlock_bh synchronize_rcu_bh
+ synchronize_rcu_bh_expedited
sched: Critical sections Grace period Barrier
- [preempt_disable] synchronize_sched rcu_barrier_sched
- [and friends] call_rcu_sched
+ rcu_read_lock_sched synchronize_sched rcu_barrier_sched
+ rcu_read_unlock_sched call_rcu_sched
+ [preempt_disable] synchronize_sched_expedited
+ [and friends]
SRCU: Critical sections Grace period Barrier
@@ -827,6 +832,9 @@ SRCU: Critical sections Grace period Barrier
srcu_read_lock synchronize_srcu N/A
srcu_read_unlock
+SRCU: Initialization/cleanup
+ init_srcu_struct
+ cleanup_srcu_struct
See the comment headers in the source code (or the docbook generated
from them) for more information.
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 09e031c55887..bb3a53cdfbc3 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -206,24 +206,6 @@ Who: Len Brown <len.brown@intel.com>
---------------------------
-What: libata spindown skipping and warning
-When: Dec 2008
-Why: Some halt(8) implementations synchronize caches for and spin
- down libata disks because libata didn't use to spin down disk on
- system halt (only synchronized caches).
- Spin down on system halt is now implemented. sysfs node
- /sys/class/scsi_disk/h:c:i:l/manage_start_stop is present if
- spin down support is available.
- Because issuing spin down command to an already spun down disk
- makes some disks spin up just to spin down again, libata tracks
- device spindown status to skip the extra spindown command and
- warn about it.
- This is to give userspace tools the time to get updated and will
- be removed after userspace is reasonably updated.
-Who: Tejun Heo <htejun@gmail.com>
-
----------------------------
-
What: i386/x86_64 bzImage symlinks
When: April 2010
@@ -394,15 +376,6 @@ Who: Thomas Gleixner <tglx@linutronix.de>
-----------------------------
-What: obsolete generic irq defines and typedefs
-When: 2.6.30
-Why: The defines and typedefs (hw_interrupt_type, no_irq_type, irq_desc_t)
- have been kept around for migration reasons. After more than two years
- it's time to remove them finally
-Who: Thomas Gleixner <tglx@linutronix.de>
-
----------------------------
-
What: fakephp and associated sysfs files in /sys/bus/pci/slots/
When: 2011
Why: In 2.6.27, the semantics of /sys/bus/pci/slots was redefined to
@@ -468,3 +441,27 @@ Why: cpu_policy_rwsem has a new cleaner definition making it local to
cpufreq core and contained inside cpufreq.c. Other dependent
drivers should not use it in order to safely avoid lockdep issues.
Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+
+----------------------------
+
+What: sound-slot/service-* module aliases and related clutters in
+ sound/sound_core.c
+When: August 2010
+Why: OSS sound_core grabs all legacy minors (0-255) of SOUND_MAJOR
+ (14) and requests modules using custom sound-slot/service-*
+ module aliases. The only benefit of doing this is allowing
+ use of custom module aliases which might as well be considered
+ a bug at this point. This preemptive claiming prevents
+ alternative OSS implementations.
+
+ Till the feature is removed, the kernel will be requesting
+ both sound-slot/service-* and the standard char-major-* module
+ aliases and allow turning off the pre-claiming selectively via
+ CONFIG_SOUND_OSS_CORE_PRECLAIM and soundcore.preclaim_oss
+ kernel parameter.
+
+ After the transition phase is complete, both the custom module
+ aliases and switches to disable it will go away. This removal
+ will also allow making ALSA OSS emulation independent of
+ sound_core. The dependency will be broken then too.
+Who: Tejun Heo <tj@kernel.org>
diff --git a/Documentation/filesystems/nfs.txt b/Documentation/filesystems/nfs.txt
new file mode 100644
index 000000000000..f50f26ce6cd0
--- /dev/null
+++ b/Documentation/filesystems/nfs.txt
@@ -0,0 +1,98 @@
+
+The NFS client
+==============
+
+The NFS version 2 protocol was first documented in RFC1094 (March 1989).
+Since then two more major releases of NFS have been published, with NFSv3
+being documented in RFC1813 (June 1995), and NFSv4 in RFC3530 (April
+2003).
+
+The Linux NFS client currently supports all the above published versions,
+and work is in progress on adding support for minor version 1 of the NFSv4
+protocol.
+
+The purpose of this document is to provide information on some of the
+upcall interfaces that are used in order to provide the NFS client with
+some of the information that it requires in order to fully comply with
+the NFS spec.
+
+The DNS resolver
+================
+
+NFSv4 allows for one server to refer the NFS client to data that has been
+migrated onto another server by means of the special "fs_locations"
+attribute. See
+ http://tools.ietf.org/html/rfc3530#section-6
+and
+ http://tools.ietf.org/html/draft-ietf-nfsv4-referrals-00
+
+The fs_locations information can take the form of either an ip address and
+a path, or a DNS hostname and a path. The latter requires the NFS client to
+do a DNS lookup in order to mount the new volume, and hence the need for an
+upcall to allow userland to provide this service.
+
+Assuming that the user has the 'rpc_pipefs' filesystem mounted in the usual
+/var/lib/nfs/rpc_pipefs, the upcall consists of the following steps:
+
+ (1) The process checks the dns_resolve cache to see if it contains a
+ valid entry. If so, it returns that entry and exits.
+
+ (2) If no valid entry exists, the helper script '/sbin/nfs_cache_getent'
+ (may be changed using the 'nfs.cache_getent' kernel boot parameter)
+ is run, with two arguments:
+ - the cache name, "dns_resolve"
+ - the hostname to resolve
+
+ (3) After looking up the corresponding ip address, the helper script
+ writes the result into the rpc_pipefs pseudo-file
+ '/var/lib/nfs/rpc_pipefs/cache/dns_resolve/channel'
+ in the following (text) format:
+
+ "<ip address> <hostname> <ttl>\n"
+
+ Where <ip address> is in the usual IPv4 (123.456.78.90) or IPv6
+ (ffee:ddcc:bbaa:9988:7766:5544:3322:1100, ffee::1100, ...) format.
+ <hostname> is identical to the second argument of the helper
+ script, and <ttl> is the 'time to live' of this cache entry (in
+ units of seconds).
+
+ Note: If <ip address> is invalid, say the string "0", then a negative
+ entry is created, which will cause the kernel to treat the hostname
+ as having no valid DNS translation.
+
+
+
+
+A basic sample /sbin/nfs_cache_getent
+=====================================
+
+#!/bin/bash
+#
+ttl=600
+#
+cut=/usr/bin/cut
+getent=/usr/bin/getent
+rpc_pipefs=/var/lib/nfs/rpc_pipefs
+#
+die()
+{
+ echo "Usage: $0 cache_name entry_name"
+ exit 1
+}
+
+[ $# -lt 2 ] && die
+cachename="$1"
+cache_path=${rpc_pipefs}/cache/${cachename}/channel
+
+case "${cachename}" in
+ dns_resolve)
+ name="$2"
+ result="$(${getent} hosts ${name} | ${cut} -f1 -d\ )"
+ [ -z "${result}" ] && result="0"
+ ;;
+ *)
+ die
+ ;;
+esac
+echo "${result} ${name} ${ttl}" >${cache_path}
+
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index dbea4f95fc85..1c058b552e93 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -121,6 +121,7 @@ Code Seq# Include File Comments
'c' 00-7F linux/comstats.h conflict!
'c' 00-7F linux/coda.h conflict!
'c' 80-9F arch/s390/include/asm/chsc.h
+'c' A0-AF arch/x86/include/asm/msr.h
'd' 00-FF linux/char/drm/drm/h conflict!
'd' F0-FF linux/digi1.h
'e' all linux/digi1.h conflict!
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 7936b801fe6a..5d4427d17281 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1503,6 +1503,14 @@ and is between 256 and 4096 characters. It is defined in the file
[NFS] set the TCP port on which the NFSv4 callback
channel should listen.
+ nfs.cache_getent=
+ [NFS] sets the pathname to the program which is used
+ to update the NFS client cache entries.
+
+ nfs.cache_getent_timeout=
+ [NFS] sets the timeout after which an attempt to
+ update a cache entry is deemed to have failed.
+
nfs.idmap_cache_timeout=
[NFS] set the maximum lifetime for idmapper cache
entries.
@@ -2395,6 +2403,18 @@ and is between 256 and 4096 characters. It is defined in the file
stifb= [HW]
Format: bpp:<bpp1>[:<bpp2>[:<bpp3>...]]
+ sunrpc.min_resvport=
+ sunrpc.max_resvport=
+ [NFS,SUNRPC]
+ SunRPC servers often require that client requests
+ originate from a privileged port (i.e. a port in the
+ range 0 < portnr < 1024).
+ An administrator who wishes to reserve some of these
+ ports for other uses may adjust the range that the
+ kernel's sunrpc client considers to be privileged
+ using these two parameters to set the minimum and
+ maximum port values.
+
sunrpc.pool_mode=
[NFS]
Control how the NFS server code allocates CPUs to
@@ -2411,6 +2431,15 @@ and is between 256 and 4096 characters. It is defined in the file
pernode one pool for each NUMA node (equivalent
to global on non-NUMA machines)
+ sunrpc.tcp_slot_table_entries=
+ sunrpc.udp_slot_table_entries=
+ [NFS,SUNRPC]
+ Sets the upper limit on the number of simultaneous
+ RPC calls that can be sent from the client to a
+ server. Increasing these values may allow you to
+ improve throughput, but will also increase the
+ amount of memory reserved for use by the client.
+
swiotlb= [IA-64] Number of I/O TLB slabs
switches= [HW,M68k]
@@ -2480,6 +2509,11 @@ and is between 256 and 4096 characters. It is defined in the file
trace_buf_size=nn[KMG]
[FTRACE] will set tracing buffer size.
+ trace_event=[event-list]
+ [FTRACE] Set and start specified trace events in order
+ to facilitate early boot debugging.
+ See also Documentation/trace/events.txt
+
trix= [HW,OSS] MediaTrix AudioTrix Pro
Format:
<io>,<irq>,<dma>,<dma2>,<sb_io>,<sb_irq>,<sb_dma>,<mpu_io>,<mpu_irq>
diff --git a/Documentation/keys.txt b/Documentation/keys.txt
index b56aacc1fff8..e4dbbdb1bd96 100644
--- a/Documentation/keys.txt
+++ b/Documentation/keys.txt
@@ -26,7 +26,7 @@ This document has the following sections:
- Notes on accessing payload contents
- Defining a key type
- Request-key callback service
- - Key access filesystem
+ - Garbage collection
============
@@ -113,6 +113,9 @@ Each key has a number of attributes:
(*) Dead. The key's type was unregistered, and so the key is now useless.
+Keys in the last three states are subject to garbage collection. See the
+section on "Garbage collection".
+
====================
KEY SERVICE OVERVIEW
@@ -754,6 +757,26 @@ The keyctl syscall functions are:
successful.
+ (*) Install the calling process's session keyring on its parent.
+
+ long keyctl(KEYCTL_SESSION_TO_PARENT);
+
+ This functions attempts to install the calling process's session keyring
+ on to the calling process's parent, replacing the parent's current session
+ keyring.
+
+ The calling process must have the same ownership as its parent, the
+ keyring must have the same ownership as the calling process, the calling
+ process must have LINK permission on the keyring and the active LSM module
+ mustn't deny permission, otherwise error EPERM will be returned.
+
+ Error ENOMEM will be returned if there was insufficient memory to complete
+ the operation, otherwise 0 will be returned to indicate success.
+
+ The keyring will be replaced next time the parent process leaves the
+ kernel and resumes executing userspace.
+
+
===============
KERNEL SERVICES
===============
@@ -1231,3 +1254,17 @@ by executing:
In this case, the program isn't required to actually attach the key to a ring;
the rings are provided for reference.
+
+
+==================
+GARBAGE COLLECTION
+==================
+
+Dead keys (for which the type has been removed) will be automatically unlinked
+from those keyrings that point to them and deleted as soon as possible by a
+background garbage collector.
+
+Similarly, revoked and expired keys will be garbage collected, but only after a
+certain amount of time has passed. This time is set as a number of seconds in:
+
+ /proc/sys/kernel/keys/gc_delay
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt
index 89068030b01b..34f6638aa5ac 100644
--- a/Documentation/kmemleak.txt
+++ b/Documentation/kmemleak.txt
@@ -27,6 +27,13 @@ To trigger an intermediate memory scan:
# echo scan > /sys/kernel/debug/kmemleak
+To clear the list of all current possible memory leaks:
+
+ # echo clear > /sys/kernel/debug/kmemleak
+
+New leaks will then come up upon reading /sys/kernel/debug/kmemleak
+again.
+
Note that the orphan objects are listed in the order they were allocated
and one object at the beginning of the list may cause other subsequent
objects to be reported as orphan.
@@ -42,6 +49,9 @@ Memory scanning parameters can be modified at run-time by writing to the
scan=<secs> - set the automatic memory scanning period in seconds
(default 600, 0 to stop the automatic scanning)
scan - trigger a memory scan
+ clear - clear list of current memory leak suspects, done by
+ marking all current reported unreferenced objects grey
+ dump=<addr> - dump information about the object found at <addr>
Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on
the kernel command line.
@@ -86,6 +96,27 @@ avoid this, kmemleak can also store the number of values pointing to an
address inside the block address range that need to be found so that the
block is not considered a leak. One example is __vmalloc().
+Testing specific sections with kmemleak
+---------------------------------------
+
+Upon initial bootup your /sys/kernel/debug/kmemleak output page may be
+quite extensive. This can also be the case if you have very buggy code
+when doing development. To work around these situations you can use the
+'clear' command to clear all reported unreferenced objects from the
+/sys/kernel/debug/kmemleak output. By issuing a 'scan' after a 'clear'
+you can find new unreferenced objects; this should help with testing
+specific sections of code.
+
+To test a critical section on demand with a clean kmemleak do:
+
+ # echo clear > /sys/kernel/debug/kmemleak
+ ... test your kernel or modules ...
+ # echo scan > /sys/kernel/debug/kmemleak
+
+Then as usual to get your report with:
+
+ # cat /sys/kernel/debug/kmemleak
+
Kmemleak API
------------
diff --git a/Documentation/s390/s390dbf.txt b/Documentation/s390/s390dbf.txt
index 2d10053dd97e..ae66f9b90a25 100644
--- a/Documentation/s390/s390dbf.txt
+++ b/Documentation/s390/s390dbf.txt
@@ -495,6 +495,13 @@ and for each vararg a long value. So e.g. for a debug entry with a format
string plus two varargs one would need to allocate a (3 * sizeof(long))
byte data area in the debug_register() function.
+IMPORTANT: Using "%s" in sprintf event functions is dangerous. You can only
+use "%s" in the sprintf event functions, if the memory for the passed string is
+available as long as the debug feature exists. The reason behind this is that
+due to performance considerations only a pointer to the string is stored in
+the debug feature. If you log a string that is freed afterwards, you will get
+an OOPS when inspecting the debug feature, because then the debug feature will
+access the already freed memory.
NOTE: If using the sprintf view do NOT use other event/exception functions
than the sprintf-event and -exception functions.
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 4252697a95d6..1c8eb4518ce0 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -60,6 +60,12 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
slots - Reserve the slot index for the given driver.
This option takes multiple strings.
See "Module Autoloading Support" section for details.
+ debug - Specifies the debug message level
+ (0 = disable debug prints, 1 = normal debug messages,
+ 2 = verbose debug messages)
+ This option appears only when CONFIG_SND_DEBUG=y.
+ This option can be dynamically changed via sysfs
+ /sys/modules/snd/parameters/debug file.
Module snd-pcm-oss
------------------
@@ -513,6 +519,26 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
or input, but you may use this module for any application which
requires a sound card (like RealPlayer).
+ pcm_devs - Number of PCM devices assigned to each card
+ (default = 1, up to 4)
+ pcm_substreams - Number of PCM substreams assigned to each PCM
+ (default = 8, up to 16)
+ hrtimer - Use hrtimer (=1, default) or system timer (=0)
+ fake_buffer - Fake buffer allocations (default = 1)
+
+ When multiple PCM devices are created, snd-dummy gives different
+ behavior to each PCM device:
+ 0 = interleaved with mmap support
+ 1 = non-interleaved with mmap support
+ 2 = interleaved without mmap
+ 3 = non-interleaved without mmap
+
+ As default, snd-dummy drivers doesn't allocate the real buffers
+ but either ignores read/write or mmap a single dummy page to all
+ buffer pages, in order to save the resouces. If your apps need
+ the read/ written buffer data to be consistent, pass fake_buffer=0
+ option.
+
The power-management is supported.
Module snd-echo3g
@@ -768,6 +794,10 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
bdl_pos_adj - Specifies the DMA IRQ timing delay in samples.
Passing -1 will make the driver to choose the appropriate
value based on the controller chip.
+ patch - Specifies the early "patch" files to modify the HD-audio
+ setup before initializing the codecs. This option is
+ available only when CONFIG_SND_HDA_PATCH_LOADER=y is set.
+ See HD-Audio.txt for details.
[Single (global) options]
single_cmd - Use single immediate commands to communicate with
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 939a3dd58148..97eebd63bedc 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -114,8 +114,8 @@ ALC662/663/272
samsung-nc10 Samsung NC10 mini notebook
auto auto-config reading BIOS (default)
-ALC882/885
-==========
+ALC882/883/885/888/889
+======================
3stack-dig 3-jack with SPDIF I/O
6stack-dig 6-jack digital with SPDIF I/O
arima Arima W820Di1
@@ -127,12 +127,8 @@ ALC882/885
mbp3 Macbook Pro rev3
imac24 iMac 24'' with jack detection
w2jc ASUS W2JC
- auto auto-config reading BIOS (default)
-
-ALC883/888
-==========
- 3stack-dig 3-jack with SPDIF I/O
- 6stack-dig 6-jack digital with SPDIF I/O
+ 3stack-2ch-dig 3-jack with SPDIF I/O (ALC883)
+ alc883-6stack-dig 6-jack digital with SPDIF I/O (ALC883)
3stack-6ch 3-jack 6-channel
3stack-6ch-dig 3-jack 6-channel with SPDIF I/O
6stack-dig-demo 6-jack digital for Intel demo board
@@ -140,6 +136,7 @@ ALC883/888
acer-aspire Acer Aspire 9810
acer-aspire-4930g Acer Aspire 4930G
acer-aspire-6530g Acer Aspire 6530G
+ acer-aspire-7730g Acer Aspire 7730G
acer-aspire-8930g Acer Aspire 8930G
medion Medion Laptops
medion-md2 Medion MD2
@@ -155,10 +152,13 @@ ALC883/888
3stack-hp HP machines with 3stack (Lucknow, Samba boards)
6stack-dell Dell machines with 6stack (Inspiron 530)
mitac Mitac 8252D
+ clevo-m540r Clevo M540R (6ch + digital)
clevo-m720 Clevo M720 laptop series
fujitsu-pi2515 Fujitsu AMILO Pi2515
fujitsu-xa3530 Fujitsu AMILO XA3530
3stack-6ch-intel Intel DG33* boards
+ intel-alc889a Intel IbexPeak with ALC889A
+ intel-x58 Intel DX58 with ALC889
asus-p5q ASUS P5Q-EM boards
mb31 MacBook 3,1
sony-vaio-tt Sony VAIO TT
@@ -229,7 +229,7 @@ AD1984
======
basic default configuration
thinkpad Lenovo Thinkpad T61/X61
- dell Dell T3400
+ dell_desktop Dell T3400
AD1986A
=======
@@ -258,6 +258,7 @@ Conexant 5045
laptop-micsense Laptop with Mic sense (old model fujitsu)
laptop-hpmicsense Laptop with HP and Mic senses
benq Benq R55E
+ laptop-hp530 HP 530 laptop
test for testing/debugging purpose, almost all controls
can be adjusted. Appearing only when compiled with
$CONFIG_SND_DEBUG=y
@@ -278,9 +279,16 @@ Conexant 5051
hp-dv6736 HP dv6736
lenovo-x200 Lenovo X200 laptop
+Conexant 5066
+=============
+ laptop Basic Laptop config (default)
+ dell-laptop Dell laptops
+ olpc-xo-1_5 OLPC XO 1.5
+
STAC9200
========
ref Reference board
+ oqo OQO Model 2
dell-d21 Dell (unknown)
dell-d22 Dell (unknown)
dell-d23 Dell (unknown)
@@ -368,10 +376,12 @@ STAC92HD73*
===========
ref Reference board
no-jd BIOS setup but without jack-detection
+ intel Intel DG45* mobos
dell-m6-amic Dell desktops/laptops with analog mics
dell-m6-dmic Dell desktops/laptops with digital mics
dell-m6 Dell desktops/laptops with both type of mics
dell-eq Dell desktops/laptops
+ alienware Alienware M17x
auto BIOS setup (default)
STAC92HD83*
@@ -385,3 +395,8 @@ STAC9872
========
vaio VAIO laptop without SPDIF
auto BIOS setup (default)
+
+Cirrus Logic CS4206/4207
+========================
+ mbp55 MacBook Pro 5,5
+ auto BIOS setup (default)
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt
index 71ac995b1915..7b8a5f947d1d 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/alsa/HD-Audio.txt
@@ -139,6 +139,10 @@ The driver checks PCI SSID and looks through the static configuration
table until any matching entry is found. If you have a new machine,
you may see a message like below:
------------------------------------------------------------------------
+ hda_codec: ALC880: BIOS auto-probing.
+------------------------------------------------------------------------
+Meanwhile, in the earlier versions, you would see a message like:
+------------------------------------------------------------------------
hda_codec: Unknown model for ALC880, trying auto-probe from BIOS...
------------------------------------------------------------------------
Even if you see such a message, DON'T PANIC. Take a deep breath and
@@ -403,6 +407,66 @@ re-configure based on that state, run like below:
------------------------------------------------------------------------
+Early Patching
+~~~~~~~~~~~~~~
+When CONFIG_SND_HDA_PATCH_LOADER=y is set, you can pass a "patch" as a
+firmware file for modifying the HD-audio setup before initializing the
+codec. This can work basically like the reconfiguration via sysfs in
+the above, but it does it before the first codec configuration.
+
+A patch file is a plain text file which looks like below:
+
+------------------------------------------------------------------------
+ [codec]
+ 0x12345678 0xabcd1234 2
+
+ [model]
+ auto
+
+ [pincfg]
+ 0x12 0x411111f0
+
+ [verb]
+ 0x20 0x500 0x03
+ 0x20 0x400 0xff
+
+ [hint]
+ hp_detect = yes
+------------------------------------------------------------------------
+
+The file needs to have a line `[codec]`. The next line should contain
+three numbers indicating the codec vendor-id (0x12345678 in the
+example), the codec subsystem-id (0xabcd1234) and the address (2) of
+the codec. The rest patch entries are applied to this specified codec
+until another codec entry is given.
+
+The `[model]` line allows to change the model name of the each codec.
+In the example above, it will be changed to model=auto.
+Note that this overrides the module option.
+
+After the `[pincfg]` line, the contents are parsed as the initial
+default pin-configurations just like `user_pin_configs` sysfs above.
+The values can be shown in user_pin_configs sysfs file, too.
+
+Similarly, the lines after `[verb]` are parsed as `init_verbs`
+sysfs entries, and the lines after `[hint]` are parsed as `hints`
+sysfs entries, respectively.
+
+The hd-audio driver reads the file via request_firmware(). Thus,
+a patch file has to be located on the appropriate firmware path,
+typically, /lib/firmware. For example, when you pass the option
+`patch=hda-init.fw`, the file /lib/firmware/hda-init-fw must be
+present.
+
+The patch module option is specific to each card instance, and you
+need to give one file name for each instance, separated by commas.
+For example, if you have two cards, one for an on-board analog and one
+for an HDMI video board, you may pass patch option like below:
+------------------------------------------------------------------------
+ options snd-hda-intel patch=on-board-patch,hdmi-patch
+------------------------------------------------------------------------
+
+
Power-Saving
~~~~~~~~~~~~
The power-saving is a kind of auto-suspend of the device. When the
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 322a00bb99d9..2dbff53369d0 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -19,6 +19,7 @@ Currently, these files might (depending on your configuration)
show up in /proc/sys/kernel:
- acpi_video_flags
- acct
+- callhome [ S390 only ]
- auto_msgmni
- core_pattern
- core_uses_pid
@@ -91,6 +92,21 @@ valid for 30 seconds.
==============================================================
+callhome:
+
+Controls the kernel's callhome behavior in case of a kernel panic.
+
+The s390 hardware allows an operating system to send a notification
+to a service organization (callhome) in case of an operating system panic.
+
+When the value in this file is 0 (which is the default behavior)
+nothing happens in case of a kernel panic. If this value is set to "1"
+the complete kernel oops message is send to the IBM customer service
+organization in case the mainframe the Linux operating system is running
+on has a service contract with IBM.
+
+==============================================================
+
core_pattern:
core_pattern is used to specify a core dumpfile pattern name.
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index f157d7594ea7..2bcc8d4dea29 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -83,6 +83,15 @@ When reading one of these enable files, there are four results:
X - there is a mixture of events enabled and disabled
? - this file does not affect any event
+2.3 Boot option
+---------------
+
+In order to facilitate early boot debugging, use boot option:
+
+ trace_event=[event-list]
+
+The format of this boot option is the same as described in section 2.1.
+
3. Defining an event-enabled tracepoint
=======================================
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index a39b3c749de5..355d0f1f8c50 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -85,26 +85,19 @@ of ftrace. Here is a list of some of the key files:
This file holds the output of the trace in a human
readable format (described below).
- latency_trace:
-
- This file shows the same trace but the information
- is organized more to display possible latencies
- in the system (described below).
-
trace_pipe:
The output is the same as the "trace" file but this
file is meant to be streamed with live tracing.
- Reads from this file will block until new data
- is retrieved. Unlike the "trace" and "latency_trace"
- files, this file is a consumer. This means reading
- from this file causes sequential reads to display
- more current data. Once data is read from this
- file, it is consumed, and will not be read
- again with a sequential read. The "trace" and
- "latency_trace" files are static, and if the
- tracer is not adding more data, they will display
- the same information every time they are read.
+ Reads from this file will block until new data is
+ retrieved. Unlike the "trace" file, this file is a
+ consumer. This means reading from this file causes
+ sequential reads to display more current data. Once
+ data is read from this file, it is consumed, and
+ will not be read again with a sequential read. The
+ "trace" file is static, and if the tracer is not
+ adding more data,they will display the same
+ information every time they are read.
trace_options:
@@ -117,10 +110,10 @@ of ftrace. Here is a list of some of the key files:
Some of the tracers record the max latency.
For example, the time interrupts are disabled.
This time is saved in this file. The max trace
- will also be stored, and displayed by either
- "trace" or "latency_trace". A new max trace will
- only be recorded if the latency is greater than
- the value in this file. (in microseconds)
+ will also be stored, and displayed by "trace".
+ A new max trace will only be recorded if the
+ latency is greater than the value in this
+ file. (in microseconds)
buffer_size_kb:
@@ -210,7 +203,7 @@ Here is the list of current tracers that may be configured.
the trace with the longest max latency.
See tracing_max_latency. When a new max is recorded,
it replaces the old trace. It is best to view this
- trace via the latency_trace file.
+ trace with the latency-format option enabled.
"preemptoff"
@@ -307,8 +300,8 @@ the lowest priority thread (pid 0).
Latency trace format
--------------------
-For traces that display latency times, the latency_trace file
-gives somewhat more information to see why a latency happened.
+When the latency-format option is enabled, the trace file gives
+somewhat more information to see why a latency happened.
Here is a typical trace.
# tracer: irqsoff
@@ -380,9 +373,10 @@ explains which is which.
The above is mostly meaningful for kernel developers.
- time: This differs from the trace file output. The trace file output
- includes an absolute timestamp. The timestamp used by the
- latency_trace file is relative to the start of the trace.
+ time: When the latency-format option is enabled, the trace file
+ output includes a timestamp relative to the start of the
+ trace. This differs from the output when latency-format
+ is disabled, which includes an absolute timestamp.
delay: This is just to help catch your eye a bit better. And
needs to be fixed to be only relative to the same CPU.
@@ -440,7 +434,8 @@ Here are the available options:
sym-addr:
bash-4000 [01] 1477.606694: simple_strtoul <c0339346>
- verbose - This deals with the latency_trace file.
+ verbose - This deals with the trace file when the
+ latency-format option is enabled.
bash 4000 1 0 00000000 00010a95 [58127d26] 1720.415ms \
(+0.000ms): simple_strtoul (strict_strtoul)
@@ -472,7 +467,7 @@ Here are the available options:
the app is no longer running
The lookup is performed when you read
- trace,trace_pipe,latency_trace. Example:
+ trace,trace_pipe. Example:
a.out-1623 [000] 40874.465068: /root/a.out[+0x480] <-/root/a.out[+0
x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
@@ -481,6 +476,11 @@ x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
every scheduling event. Will add overhead if
there's a lot of tasks running at once.
+ latency-format - This option changes the trace. When
+ it is enabled, the trace displays
+ additional information about the
+ latencies, as described in "Latency
+ trace format".
sched_switch
------------
@@ -596,12 +596,13 @@ To reset the maximum, echo 0 into tracing_max_latency. Here is
an example:
# echo irqsoff > current_tracer
+ # echo latency-format > trace_options
# echo 0 > tracing_max_latency
# echo 1 > tracing_enabled
# ls -ltr
[...]
# echo 0 > tracing_enabled
- # cat latency_trace
+ # cat trace
# tracer: irqsoff
#
irqsoff latency trace v1.1.5 on 2.6.26
@@ -703,12 +704,13 @@ which preemption was disabled. The control of preemptoff tracer
is much like the irqsoff tracer.
# echo preemptoff > current_tracer
+ # echo latency-format > trace_options
# echo 0 > tracing_max_latency
# echo 1 > tracing_enabled
# ls -ltr
[...]
# echo 0 > tracing_enabled
- # cat latency_trace
+ # cat trace
# tracer: preemptoff
#
preemptoff latency trace v1.1.5 on 2.6.26-rc8
@@ -850,12 +852,13 @@ Again, using this trace is much like the irqsoff and preemptoff
tracers.
# echo preemptirqsoff > current_tracer
+ # echo latency-format > trace_options
# echo 0 > tracing_max_latency
# echo 1 > tracing_enabled
# ls -ltr
[...]
# echo 0 > tracing_enabled
- # cat latency_trace
+ # cat trace
# tracer: preemptirqsoff
#
preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
@@ -1012,11 +1015,12 @@ Instead of performing an 'ls', we will run 'sleep 1' under
'chrt' which changes the priority of the task.
# echo wakeup > current_tracer
+ # echo latency-format > trace_options
# echo 0 > tracing_max_latency
# echo 1 > tracing_enabled
# chrt -f 5 sleep 1
# echo 0 > tracing_enabled
- # cat latency_trace
+ # cat trace
# tracer: wakeup
#
wakeup latency trace v1.1.5 on 2.6.26-rc8
diff --git a/Documentation/trace/function-graph-fold.vim b/Documentation/trace/function-graph-fold.vim
new file mode 100644
index 000000000000..0544b504c8b0
--- /dev/null
+++ b/Documentation/trace/function-graph-fold.vim
@@ -0,0 +1,42 @@
+" Enable folding for ftrace function_graph traces.
+"
+" To use, :source this file while viewing a function_graph trace, or use vim's
+" -S option to load from the command-line together with a trace. You can then
+" use the usual vim fold commands, such as "za", to open and close nested
+" functions. While closed, a fold will show the total time taken for a call,
+" as would normally appear on the line with the closing brace. Folded
+" functions will not include finish_task_switch(), so folding should remain
+" relatively sane even through a context switch.
+"
+" Note that this will almost certainly only work well with a
+" single-CPU trace (e.g. trace-cmd report --cpu 1).
+
+function! FunctionGraphFoldExpr(lnum)
+ let line = getline(a:lnum)
+ if line[-1:] == '{'
+ if line =~ 'finish_task_switch() {$'
+ return '>1'
+ endif
+ return 'a1'
+ elseif line[-1:] == '}'
+ return 's1'
+ else
+ return '='
+ endif
+endfunction
+
+function! FunctionGraphFoldText()
+ let s = split(getline(v:foldstart), '|', 1)
+ if getline(v:foldend+1) =~ 'finish_task_switch() {$'
+ let s[2] = ' task switch '
+ else
+ let e = split(getline(v:foldend), '|', 1)
+ let s[2] = e[2]
+ endif
+ return join(s, '|')
+endfunction
+
+setlocal foldexpr=FunctionGraphFoldExpr(v:lnum)
+setlocal foldtext=FunctionGraphFoldText()
+setlocal foldcolumn=12
+setlocal foldmethod=expr
diff --git a/Documentation/trace/ring-buffer-design.txt b/Documentation/trace/ring-buffer-design.txt
new file mode 100644
index 000000000000..5b1d23d604c5
--- /dev/null
+++ b/Documentation/trace/ring-buffer-design.txt
@@ -0,0 +1,955 @@
+ Lockless Ring Buffer Design
+ ===========================
+
+Copyright 2009 Red Hat Inc.
+ Author: Steven Rostedt <srostedt@redhat.com>
+ License: The GNU Free Documentation License, Version 1.2
+ (dual licensed under the GPL v2)
+Reviewers: Mathieu Desnoyers, Huang Ying, Hidetoshi Seto,
+ and Frederic Weisbecker.
+
+
+Written for: 2.6.31
+
+Terminology used in this Document
+---------------------------------
+
+tail - where new writes happen in the ring buffer.
+
+head - where new reads happen in the ring buffer.
+
+producer - the task that writes into the ring buffer (same as writer)
+
+writer - same as producer
+
+consumer - the task that reads from the buffer (same as reader)
+
+reader - same as consumer.
+
+reader_page - A page outside the ring buffer used solely (for the most part)
+ by the reader.
+
+head_page - a pointer to the page that the reader will use next
+
+tail_page - a pointer to the page that will be written to next
+
+commit_page - a pointer to the page with the last finished non nested write.
+
+cmpxchg - hardware assisted atomic transaction that performs the following:
+
+ A = B iff previous A == C
+
+ R = cmpxchg(A, C, B) is saying that we replace A with B if and only if
+ current A is equal to C, and we put the old (current) A into R
+
+ R gets the previous A regardless if A is updated with B or not.
+
+ To see if the update was successful a compare of R == C may be used.
+
+The Generic Ring Buffer
+-----------------------
+
+The ring buffer can be used in either an overwrite mode or in
+producer/consumer mode.
+
+Producer/consumer mode is where the producer were to fill up the
+buffer before the consumer could free up anything, the producer
+will stop writing to the buffer. This will lose most recent events.
+
+Overwrite mode is where the produce were to fill up the buffer
+before the consumer could free up anything, the producer will
+overwrite the older data. This will lose the oldest events.
+
+No two writers can write at the same time (on the same per cpu buffer),
+but a writer may interrupt another writer, but it must finish writing
+before the previous writer may continue. This is very important to the
+algorithm. The writers act like a "stack". The way interrupts works
+enforces this behavior.
+
+
+ writer1 start
+ <preempted> writer2 start
+ <preempted> writer3 start
+ writer3 finishes
+ writer2 finishes
+ writer1 finishes
+
+This is very much like a writer being preempted by an interrupt and
+the interrupt doing a write as well.
+
+Readers can happen at any time. But no two readers may run at the
+same time, nor can a reader preempt/interrupt another reader. A reader
+can not preempt/interrupt a writer, but it may read/consume from the
+buffer at the same time as a writer is writing, but the reader must be
+on another processor to do so. A reader may read on its own processor
+and can be preempted by a writer.
+
+A writer can preempt a reader, but a reader can not preempt a writer.
+But a reader can read the buffer at the same time (on another processor)
+as a writer.
+
+The ring buffer is made up of a list of pages held together by a link list.
+
+At initialization a reader page is allocated for the reader that is not
+part of the ring buffer.
+
+The head_page, tail_page and commit_page are all initialized to point
+to the same page.
+
+The reader page is initialized to have its next pointer pointing to
+the head page, and its previous pointer pointing to a page before
+the head page.
+
+The reader has its own page to use. At start up time, this page is
+allocated but is not attached to the list. When the reader wants
+to read from the buffer, if its page is empty (like it is on start up)
+it will swap its page with the head_page. The old reader page will
+become part of the ring buffer and the head_page will be removed.
+The page after the inserted page (old reader_page) will become the
+new head page.
+
+Once the new page is given to the reader, the reader could do what
+it wants with it, as long as a writer has left that page.
+
+A sample of how the reader page is swapped: Note this does not
+show the head page in the buffer, it is for demonstrating a swap
+only.
+
+ +------+
+ |reader| RING BUFFER
+ |page |
+ +------+
+ +---+ +---+ +---+
+ | |-->| |-->| |
+ | |<--| |<--| |
+ +---+ +---+ +---+
+ ^ | ^ |
+ | +-------------+ |
+ +-----------------+
+
+
+ +------+
+ |reader| RING BUFFER
+ |page |-------------------+
+ +------+ v
+ | +---+ +---+ +---+
+ | | |-->| |-->| |
+ | | |<--| |<--| |<-+
+ | +---+ +---+ +---+ |
+ | ^ | ^ | |
+ | | +-------------+ | |
+ | +-----------------+ |
+ +------------------------------------+
+
+ +------+
+ |reader| RING BUFFER
+ |page |-------------------+
+ +------+ <---------------+ v
+ | ^ +---+ +---+ +---+
+ | | | |-->| |-->| |
+ | | | | | |<--| |<-+
+ | | +---+ +---+ +---+ |
+ | | | ^ | |
+ | | +-------------+ | |
+ | +-----------------------------+ |
+ +------------------------------------+
+
+ +------+
+ |buffer| RING BUFFER
+ |page |-------------------+
+ +------+ <---------------+ v
+ | ^ +---+ +---+ +---+
+ | | | | | |-->| |
+ | | New | | | |<--| |<-+
+ | | Reader +---+ +---+ +---+ |
+ | | page ----^ | |
+ | | | |
+ | +-----------------------------+ |
+ +------------------------------------+
+
+
+
+It is possible that the page swapped is the commit page and the tail page,
+if what is in the ring buffer is less than what is held in a buffer page.
+
+
+ reader page commit page tail page
+ | | |
+ v | |
+ +---+ | |
+ | |<----------+ |
+ | |<------------------------+
+ | |------+
+ +---+ |
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |--->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+This case is still valid for this algorithm.
+When the writer leaves the page, it simply goes into the ring buffer
+since the reader page still points to the next location in the ring
+buffer.
+
+
+The main pointers:
+
+ reader page - The page used solely by the reader and is not part
+ of the ring buffer (may be swapped in)
+
+ head page - the next page in the ring buffer that will be swapped
+ with the reader page.
+
+ tail page - the page where the next write will take place.
+
+ commit page - the page that last finished a write.
+
+The commit page only is updated by the outer most writer in the
+writer stack. A writer that preempts another writer will not move the
+commit page.
+
+When data is written into the ring buffer, a position is reserved
+in the ring buffer and passed back to the writer. When the writer
+is finished writing data into that position, it commits the write.
+
+Another write (or a read) may take place at anytime during this
+transaction. If another write happens it must finish before continuing
+with the previous write.
+
+
+ Write reserve:
+
+ Buffer page
+ +---------+
+ |written |
+ +---------+ <--- given back to writer (current commit)
+ |reserved |
+ +---------+ <--- tail pointer
+ | empty |
+ +---------+
+
+ Write commit:
+
+ Buffer page
+ +---------+
+ |written |
+ +---------+
+ |written |
+ +---------+ <--- next positon for write (current commit)
+ | empty |
+ +---------+
+
+
+ If a write happens after the first reserve:
+
+ Buffer page
+ +---------+
+ |written |
+ +---------+ <-- current commit
+ |reserved |
+ +---------+ <--- given back to second writer
+ |reserved |
+ +---------+ <--- tail pointer
+
+ After second writer commits:
+
+
+ Buffer page
+ +---------+
+ |written |
+ +---------+ <--(last full commit)
+ |reserved |
+ +---------+
+ |pending |
+ |commit |
+ +---------+ <--- tail pointer
+
+ When the first writer commits:
+
+ Buffer page
+ +---------+
+ |written |
+ +---------+
+ |written |
+ +---------+
+ |written |
+ +---------+ <--(last full commit and tail pointer)
+
+
+The commit pointer points to the last write location that was
+committed without preempting another write. When a write that
+preempted another write is committed, it only becomes a pending commit
+and will not be a full commit till all writes have been committed.
+
+The commit page points to the page that has the last full commit.
+The tail page points to the page with the last write (before
+committing).
+
+The tail page is always equal to or after the commit page. It may
+be several pages ahead. If the tail page catches up to the commit
+page then no more writes may take place (regardless of the mode
+of the ring buffer: overwrite and produce/consumer).
+
+The order of pages are:
+
+ head page
+ commit page
+ tail page
+
+Possible scenario:
+ tail page
+ head page commit page |
+ | | |
+ v v v
+ +---+ +---+ +---+ +---+
+<---| |--->| |--->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+There is a special case that the head page is after either the commit page
+and possibly the tail page. That is when the commit (and tail) page has been
+swapped with the reader page. This is because the head page is always
+part of the ring buffer, but the reader page is not. When ever there
+has been less than a full page that has been committed inside the ring buffer,
+and a reader swaps out a page, it will be swapping out the commit page.
+
+
+ reader page commit page tail page
+ | | |
+ v | |
+ +---+ | |
+ | |<----------+ |
+ | |<------------------------+
+ | |------+
+ +---+ |
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |--->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+ ^
+ |
+ head page
+
+
+In this case, the head page will not move when the tail and commit
+move back into the ring buffer.
+
+The reader can not swap a page into the ring buffer if the commit page
+is still on that page. If the read meets the last commit (real commit
+not pending or reserved), then there is nothing more to read.
+The buffer is considered empty until another full commit finishes.
+
+When the tail meets the head page, if the buffer is in overwrite mode,
+the head page will be pushed ahead one. If the buffer is in producer/consumer
+mode, the write will fail.
+
+Overwrite mode:
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |--->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+ ^
+ |
+ head page
+
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |--->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+ ^
+ |
+ head page
+
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |--->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+ ^
+ |
+ head page
+
+Note, the reader page will still point to the previous head page.
+But when a swap takes place, it will use the most recent head page.
+
+
+Making the Ring Buffer Lockless:
+--------------------------------
+
+The main idea behind the lockless algorithm is to combine the moving
+of the head_page pointer with the swapping of pages with the reader.
+State flags are placed inside the pointer to the page. To do this,
+each page must be aligned in memory by 4 bytes. This will allow the 2
+least significant bits of the address to be used as flags. Since
+they will always be zero for the address. To get the address,
+simply mask out the flags.
+
+ MASK = ~3
+
+ address & MASK
+
+Two flags will be kept by these two bits:
+
+ HEADER - the page being pointed to is a head page
+
+ UPDATE - the page being pointed to is being updated by a writer
+ and was or is about to be a head page.
+
+
+ reader page
+ |
+ v
+ +---+
+ | |------+
+ +---+ |
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-H->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+
+The above pointer "-H->" would have the HEADER flag set. That is
+the next page is the next page to be swapped out by the reader.
+This pointer means the next page is the head page.
+
+When the tail page meets the head pointer, it will use cmpxchg to
+change the pointer to the UPDATE state:
+
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-H->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+"-U->" represents a pointer in the UPDATE state.
+
+Any access to the reader will need to take some sort of lock to serialize
+the readers. But the writers will never take a lock to write to the
+ring buffer. This means we only need to worry about a single reader,
+and writes only preempt in "stack" formation.
+
+When the reader tries to swap the page with the ring buffer, it
+will also use cmpxchg. If the flag bit in the pointer to the
+head page does not have the HEADER flag set, the compare will fail
+and the reader will need to look for the new head page and try again.
+Note, the flag UPDATE and HEADER are never set at the same time.
+
+The reader swaps the reader page as follows:
+
+ +------+
+ |reader| RING BUFFER
+ |page |
+ +------+
+ +---+ +---+ +---+
+ | |--->| |--->| |
+ | |<---| |<---| |
+ +---+ +---+ +---+
+ ^ | ^ |
+ | +---------------+ |
+ +-----H-------------+
+
+The reader sets the reader page next pointer as HEADER to the page after
+the head page.
+
+
+ +------+
+ |reader| RING BUFFER
+ |page |-------H-----------+
+ +------+ v
+ | +---+ +---+ +---+
+ | | |--->| |--->| |
+ | | |<---| |<---| |<-+
+ | +---+ +---+ +---+ |
+ | ^ | ^ | |
+ | | +---------------+ | |
+ | +-----H-------------+ |
+ +--------------------------------------+
+
+It does a cmpxchg with the pointer to the previous head page to make it
+point to the reader page. Note that the new pointer does not have the HEADER
+flag set. This action atomically moves the head page forward.
+
+ +------+
+ |reader| RING BUFFER
+ |page |-------H-----------+
+ +------+ v
+ | ^ +---+ +---+ +---+
+ | | | |-->| |-->| |
+ | | | |<--| |<--| |<-+
+ | | +---+ +---+ +---+ |
+ | | | ^ | |
+ | | +-------------+ | |
+ | +-----------------------------+ |
+ +------------------------------------+
+
+After the new head page is set, the previous pointer of the head page is
+updated to the reader page.
+
+ +------+
+ |reader| RING BUFFER
+ |page |-------H-----------+
+ +------+ <---------------+ v
+ | ^ +---+ +---+ +---+
+ | | | |-->| |-->| |
+ | | | | | |<--| |<-+
+ | | +---+ +---+ +---+ |
+ | | | ^ | |
+ | | +-------------+ | |
+ | +-----------------------------+ |
+ +------------------------------------+
+
+ +------+
+ |buffer| RING BUFFER
+ |page |-------H-----------+ <--- New head page
+ +------+ <---------------+ v
+ | ^ +---+ +---+ +---+
+ | | | | | |-->| |
+ | | New | | | |<--| |<-+
+ | | Reader +---+ +---+ +---+ |
+ | | page ----^ | |
+ | | | |
+ | +-----------------------------+ |
+ +------------------------------------+
+
+Another important point. The page that the reader page points back to
+by its previous pointer (the one that now points to the new head page)
+never points back to the reader page. That is because the reader page is
+not part of the ring buffer. Traversing the ring buffer via the next pointers
+will always stay in the ring buffer. Traversing the ring buffer via the
+prev pointers may not.
+
+Note, the way to determine a reader page is simply by examining the previous
+pointer of the page. If the next pointer of the previous page does not
+point back to the original page, then the original page is a reader page:
+
+
+ +--------+
+ | reader | next +----+
+ | page |-------->| |<====== (buffer page)
+ +--------+ +----+
+ | | ^
+ | v | next
+ prev | +----+
+ +------------->| |
+ +----+
+
+The way the head page moves forward:
+
+When the tail page meets the head page and the buffer is in overwrite mode
+and more writes take place, the head page must be moved forward before the
+writer may move the tail page. The way this is done is that the writer
+performs a cmpxchg to convert the pointer to the head page from the HEADER
+flag to have the UPDATE flag set. Once this is done, the reader will
+not be able to swap the head page from the buffer, nor will it be able to
+move the head page, until the writer is finished with the move.
+
+This eliminates any races that the reader can have on the writer. The reader
+must spin, and this is why the reader can not preempt the writer.
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-H->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+The following page will be made into the new head page.
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |-H->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+After the new head page has been set, we can set the old head page
+pointer back to NORMAL.
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |--->| |-H->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+After the head page has been moved, the tail page may now move forward.
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |--->| |-H->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+
+The above are the trivial updates. Now for the more complex scenarios.
+
+
+As stated before, if enough writes preempt the first write, the
+tail page may make it all the way around the buffer and meet the commit
+page. At this time, we must start dropping writes (usually with some kind
+of warning to the user). But what happens if the commit was still on the
+reader page? The commit page is not part of the ring buffer. The tail page
+must account for this.
+
+
+ reader page commit page
+ | |
+ v |
+ +---+ |
+ | |<----------+
+ | |
+ | |------+
+ +---+ |
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-H->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+ ^
+ |
+ tail page
+
+If the tail page were to simply push the head page forward, the commit when
+leaving the reader page would not be pointing to the correct page.
+
+The solution to this is to test if the commit page is on the reader page
+before pushing the head page. If it is, then it can be assumed that the
+tail page wrapped the buffer, and we must drop new writes.
+
+This is not a race condition, because the commit page can only be moved
+by the outter most writer (the writer that was preempted).
+This means that the commit will not move while a writer is moving the
+tail page. The reader can not swap the reader page if it is also being
+used as the commit page. The reader can simply check that the commit
+is off the reader page. Once the commit page leaves the reader page
+it will never go back on it unless a reader does another swap with the
+buffer page that is also the commit page.
+
+
+Nested writes
+-------------
+
+In the pushing forward of the tail page we must first push forward
+the head page if the head page is the next page. If the head page
+is not the next page, the tail page is simply updated with a cmpxchg.
+
+Only writers move the tail page. This must be done atomically to protect
+against nested writers.
+
+ temp_page = tail_page
+ next_page = temp_page->next
+ cmpxchg(tail_page, temp_page, next_page)
+
+The above will update the tail page if it is still pointing to the expected
+page. If this fails, a nested write pushed it forward, the the current write
+does not need to push it.
+
+
+ temp page
+ |
+ v
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |--->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+Nested write comes in and moves the tail page forward:
+
+ tail page (moved by nested writer)
+ temp page |
+ | |
+ v v
+ +---+ +---+ +---+ +---+
+<---| |--->| |--->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+The above would fail the cmpxchg, but since the tail page has already
+been moved forward, the writer will just try again to reserve storage
+on the new tail page.
+
+But the moving of the head page is a bit more complex.
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-H->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+The write converts the head page pointer to UPDATE.
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+But if a nested writer preempts here. It will see that the next
+page is a head page, but it is also nested. It will detect that
+it is nested and will save that information. The detection is the
+fact that it sees the UPDATE flag instead of a HEADER or NORMAL
+pointer.
+
+The nested writer will set the new head page pointer.
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |-H->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+But it will not reset the update back to normal. Only the writer
+that converted a pointer from HEAD to UPDATE will convert it back
+to NORMAL.
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |-H->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+After the nested writer finishes, the outer most writer will convert
+the UPDATE pointer to NORMAL.
+
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |--->| |-H->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+
+It can be even more complex if several nested writes came in and moved
+the tail page ahead several pages:
+
+
+(first writer)
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-H->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+The write converts the head page pointer to UPDATE.
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |--->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+Next writer comes in, and sees the update and sets up the new
+head page.
+
+(second writer)
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |-H->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+The nested writer moves the tail page forward. But does not set the old
+update page to NORMAL because it is not the outer most writer.
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |-H->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+Another writer preempts and sees the page after the tail page is a head page.
+It changes it from HEAD to UPDATE.
+
+(third writer)
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |-U->| |--->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+The writer will move the head page forward:
+
+
+(third writer)
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |-U->| |-H->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+But now that the third writer did change the HEAD flag to UPDATE it
+will convert it to normal:
+
+
+(third writer)
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |--->| |-H->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+
+Then it will move the tail page, and return back to the second writer.
+
+
+(second writer)
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |--->| |-H->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+
+The second writer will fail to move the tail page because it was already
+moved, so it will try again and add its data to the new tail page.
+It will return to the first writer.
+
+
+(first writer)
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |--->| |-H->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+The first writer can not know atomically test if the tail page moved
+while it updates the HEAD page. It will then update the head page to
+what it thinks is the new head page.
+
+
+(first writer)
+
+ tail page
+ |
+ v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |-H->| |-H->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+Since the cmpxchg returns the old value of the pointer the first writer
+will see it succeeded in updating the pointer from NORMAL to HEAD.
+But as we can see, this is not good enough. It must also check to see
+if the tail page is either where it use to be or on the next page:
+
+
+(first writer)
+
+ A B tail page
+ | | |
+ v v v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |-H->| |-H->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+If tail page != A and tail page does not equal B, then it must reset the
+pointer back to NORMAL. The fact that it only needs to worry about
+nested writers, it only needs to check this after setting the HEAD page.
+
+
+(first writer)
+
+ A B tail page
+ | | |
+ v v v
+ +---+ +---+ +---+ +---+
+<---| |--->| |-U->| |--->| |-H->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
+Now the writer can update the head page. This is also why the head page must
+remain in UPDATE and only reset by the outer most writer. This prevents
+the reader from seeing the incorrect head page.
+
+
+(first writer)
+
+ A B tail page
+ | | |
+ v v v
+ +---+ +---+ +---+ +---+
+<---| |--->| |--->| |--->| |-H->
+--->| |<---| |<---| |<---| |<---
+ +---+ +---+ +---+ +---+
+
diff --git a/MAINTAINERS b/MAINTAINERS
index 8dca9d89c6c1..989ff1149390 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -439,7 +439,7 @@ F: drivers/hwmon/ams/
AMSO1100 RNIC DRIVER
M: Tom Tucker <tom@opengridcomputing.com>
M: Steve Wise <swise@opengridcomputing.com>
-L: general@lists.openfabrics.org
+L: linux-rdma@vger.kernel.org
S: Maintained
F: drivers/infiniband/hw/amso1100/
@@ -1494,7 +1494,7 @@ F: drivers/net/cxgb3/
CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
M: Steve Wise <swise@chelsio.com>
-L: general@lists.openfabrics.org
+L: linux-rdma@vger.kernel.org
W: http://www.openfabrics.org
S: Supported
F: drivers/infiniband/hw/cxgb3/
@@ -1868,7 +1868,7 @@ F: fs/efs/
EHCA (IBM GX bus InfiniBand adapter) DRIVER
M: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
M: Christoph Raisch <raisch@de.ibm.com>
-L: general@lists.openfabrics.org
+L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/ehca/
@@ -2552,7 +2552,7 @@ INFINIBAND SUBSYSTEM
M: Roland Dreier <rolandd@cisco.com>
M: Sean Hefty <sean.hefty@intel.com>
M: Hal Rosenstock <hal.rosenstock@gmail.com>
-L: general@lists.openfabrics.org (moderated for non-subscribers)
+L: linux-rdma@vger.kernel.org
W: http://www.openib.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git
S: Supported
@@ -2729,7 +2729,7 @@ F: drivers/net/ipg.c
IPATH DRIVER
M: Ralph Campbell <infinipath@qlogic.com>
-L: general@lists.openfabrics.org
+L: linux-rdma@vger.kernel.org
T: git git://git.qlogic.com/ipath-linux-2.6
S: Supported
F: drivers/infiniband/hw/ipath/
@@ -3485,7 +3485,7 @@ F: drivers/scsi/NCR_D700.*
NETEFFECT IWARP RNIC DRIVER (IW_NES)
M: Faisal Latif <faisal.latif@intel.com>
M: Chien Tung <chien.tin.tung@intel.com>
-L: general@lists.openfabrics.org
+L: linux-rdma@vger.kernel.org
W: http://www.neteffect.com
S: Supported
F: drivers/infiniband/hw/nes/
diff --git a/Makefile b/Makefile
index 7d3415c0709c..60de4ef31254 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 31
-EXTRAVERSION = -rc9
+EXTRAVERSION =
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 99193b160232..beea3ccebb5e 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -30,6 +30,18 @@ config OPROFILE_IBS
If unsure, say N.
+config OPROFILE_EVENT_MULTIPLEX
+ bool "OProfile multiplexing support (EXPERIMENTAL)"
+ default n
+ depends on OPROFILE && X86
+ help
+ The number of hardware counters is limited. The multiplexing
+ feature enables OProfile to gather more events than counters
+ are provided by the hardware. This is realized by switching
+ between events at an user specified time interval.
+
+ If unsure, say N.
+
config HAVE_OPROFILE
bool
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 60c83abfde70..5076a8860b18 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -75,6 +75,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define TIF_UAC_SIGBUS 7
#define TIF_MEMDIE 8
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */
+#define TIF_NOTIFY_RESUME 10 /* callback before returning to user */
#define TIF_FREEZE 16 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -82,10 +83,12 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_FREEZE (1<<TIF_FREEZE)
/* Work to do on interrupt/exception return. */
-#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
+#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_RESUME)
/* Work to do on any return to userspace. */
#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index df65eaa84c4c..0932dbb1ef8e 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -20,6 +20,7 @@
#include <linux/binfmts.h>
#include <linux/bitops.h>
#include <linux/syscalls.h>
+#include <linux/tracehook.h>
#include <asm/uaccess.h>
#include <asm/sigcontext.h>
@@ -683,4 +684,11 @@ do_notify_resume(struct pt_regs *regs, struct switch_stack *sw,
{
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
do_signal(regs, sw, r0, r19);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
}
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 73394e50cbca..d3a39b1e6c0f 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -130,11 +130,13 @@ extern void vfp_sync_state(struct thread_info *thread);
* TIF_SYSCALL_TRACE - syscall trace active
* TIF_SIGPENDING - signal pending
* TIF_NEED_RESCHED - rescheduling necessary
+ * TIF_NOTIFY_RESUME - callback before returning to user
* TIF_USEDFPU - FPU was used by this task this quantum (SMP)
* TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
*/
#define TIF_SIGPENDING 0
#define TIF_NEED_RESCHED 1
+#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_SYSCALL_TRACE 8
#define TIF_POLLING_NRFLAG 16
#define TIF_USING_IWMMXT 17
@@ -143,6 +145,7 @@ extern void vfp_sync_state(struct thread_info *thread);
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 8c3de1a350b5..7813ab782fda 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -51,7 +51,7 @@ fast_work_pending:
work_pending:
tst r1, #_TIF_NEED_RESCHED
bne work_resched
- tst r1, #_TIF_SIGPENDING
+ tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
beq no_work_pending
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index f6bc5d442782..b76fe06d92e7 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -12,6 +12,7 @@
#include <linux/personality.h>
#include <linux/freezer.h>
#include <linux/uaccess.h>
+#include <linux/tracehook.h>
#include <asm/elf.h>
#include <asm/cacheflush.h>
@@ -707,4 +708,11 @@ do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
if (thread_flags & _TIF_SIGPENDING)
do_signal(&current->blocked, regs, syscall);
+
+ if (thread_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
}
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index 99b6e1546311..0447d26d454b 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -128,6 +128,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
.rx_irq = INT_24XX_MCBSP1_IRQ_RX,
.tx_irq = INT_24XX_MCBSP1_IRQ_TX,
.ops = &omap2_mcbsp_ops,
+ .buffer_size = 0x6F,
},
{
.phys_base = OMAP34XX_MCBSP2_BASE,
@@ -136,6 +137,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
.rx_irq = INT_24XX_MCBSP2_IRQ_RX,
.tx_irq = INT_24XX_MCBSP2_IRQ_TX,
.ops = &omap2_mcbsp_ops,
+ .buffer_size = 0x3FF,
},
{
.phys_base = OMAP34XX_MCBSP3_BASE,
@@ -144,6 +146,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
.rx_irq = INT_24XX_MCBSP3_IRQ_RX,
.tx_irq = INT_24XX_MCBSP3_IRQ_TX,
.ops = &omap2_mcbsp_ops,
+ .buffer_size = 0x6F,
},
{
.phys_base = OMAP34XX_MCBSP4_BASE,
@@ -152,6 +155,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
.rx_irq = INT_24XX_MCBSP4_IRQ_RX,
.tx_irq = INT_24XX_MCBSP4_IRQ_TX,
.ops = &omap2_mcbsp_ops,
+ .buffer_size = 0x6F,
},
{
.phys_base = OMAP34XX_MCBSP5_BASE,
@@ -160,6 +164,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
.rx_irq = INT_24XX_MCBSP5_IRQ_RX,
.tx_irq = INT_24XX_MCBSP5_IRQ_TX,
.ops = &omap2_mcbsp_ops,
+ .buffer_size = 0x6F,
},
};
#define OMAP34XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap34xx_mcbsp_pdata)
diff --git a/arch/arm/mach-pxa/include/mach/audio.h b/arch/arm/mach-pxa/include/mach/audio.h
index 16eb02552d5d..a3449e35a6f5 100644
--- a/arch/arm/mach-pxa/include/mach/audio.h
+++ b/arch/arm/mach-pxa/include/mach/audio.h
@@ -3,10 +3,12 @@
#include <sound/core.h>
#include <sound/pcm.h>
+#include <sound/ac97_codec.h>
/*
* @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95)
* a -1 value means no gpio will be used for reset
+ * @codec_pdata: AC97 codec platform_data
* reset_gpio should only be specified for pxa27x CPUs where a silicon
* bug prevents correct operation of the reset line. If not specified,
@@ -20,6 +22,7 @@ typedef struct {
void (*resume)(void *);
void *priv;
int reset_gpio;
+ void *codec_pdata[AC97_BUS_MAX_DEVICES];
} pxa2xx_audio_ops_t;
extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops);
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index e3ac94f09006..9b00f4cbc903 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -1127,6 +1127,11 @@ int omap_dma_running(void)
void omap_dma_link_lch(int lch_head, int lch_queue)
{
if (omap_dma_in_1510_mode()) {
+ if (lch_head == lch_queue) {
+ dma_write(dma_read(CCR(lch_head)) | (3 << 8),
+ CCR(lch_head));
+ return;
+ }
printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
BUG();
return;
@@ -1149,6 +1154,11 @@ EXPORT_SYMBOL(omap_dma_link_lch);
void omap_dma_unlink_lch(int lch_head, int lch_queue)
{
if (omap_dma_in_1510_mode()) {
+ if (lch_head == lch_queue) {
+ dma_write(dma_read(CCR(lch_head)) & ~(3 << 8),
+ CCR(lch_head));
+ return;
+ }
printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
BUG();
return;
diff --git a/arch/arm/plat-omap/include/mach/mcbsp.h b/arch/arm/plat-omap/include/mach/mcbsp.h
index bb154ea76769..63a3f254af7b 100644
--- a/arch/arm/plat-omap/include/mach/mcbsp.h
+++ b/arch/arm/plat-omap/include/mach/mcbsp.h
@@ -134,6 +134,11 @@
#define OMAP_MCBSP_REG_XCERG 0x74
#define OMAP_MCBSP_REG_XCERH 0x78
#define OMAP_MCBSP_REG_SYSCON 0x8C
+#define OMAP_MCBSP_REG_THRSH2 0x90
+#define OMAP_MCBSP_REG_THRSH1 0x94
+#define OMAP_MCBSP_REG_IRQST 0xA0
+#define OMAP_MCBSP_REG_IRQEN 0xA4
+#define OMAP_MCBSP_REG_WAKEUPEN 0xA8
#define OMAP_MCBSP_REG_XCCR 0xAC
#define OMAP_MCBSP_REG_RCCR 0xB0
@@ -249,8 +254,27 @@
#define RDISABLE 0x0001
/********************** McBSP SYSCONFIG bit definitions ********************/
+#define CLOCKACTIVITY(value) ((value)<<8)
+#define SIDLEMODE(value) ((value)<<3)
+#define ENAWAKEUP 0x0004
#define SOFTRST 0x0002
+/********************** McBSP DMA operating modes **************************/
+#define MCBSP_DMA_MODE_ELEMENT 0
+#define MCBSP_DMA_MODE_THRESHOLD 1
+#define MCBSP_DMA_MODE_FRAME 2
+
+/********************** McBSP WAKEUPEN bit definitions *********************/
+#define XEMPTYEOFEN 0x4000
+#define XRDYEN 0x0400
+#define XEOFEN 0x0200
+#define XFSXEN 0x0100
+#define XSYNCERREN 0x0080
+#define RRDYEN 0x0008
+#define REOFEN 0x0004
+#define RFSREN 0x0002
+#define RSYNCERREN 0x0001
+
/* we don't do multichannel for now */
struct omap_mcbsp_reg_cfg {
u16 spcr2;
@@ -344,6 +368,9 @@ struct omap_mcbsp_platform_data {
u8 dma_rx_sync, dma_tx_sync;
u16 rx_irq, tx_irq;
struct omap_mcbsp_ops *ops;
+#ifdef CONFIG_ARCH_OMAP34XX
+ u16 buffer_size;
+#endif
};
struct omap_mcbsp {
@@ -377,6 +404,11 @@ struct omap_mcbsp {
struct omap_mcbsp_platform_data *pdata;
struct clk *iclk;
struct clk *fclk;
+#ifdef CONFIG_ARCH_OMAP34XX
+ int dma_op_mode;
+ u16 max_tx_thres;
+ u16 max_rx_thres;
+#endif
};
extern struct omap_mcbsp **mcbsp_ptr;
extern int omap_mcbsp_count;
@@ -385,10 +417,25 @@ int omap_mcbsp_init(void);
void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config,
int size);
void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg * config);
+#ifdef CONFIG_ARCH_OMAP34XX
+void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold);
+void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold);
+u16 omap_mcbsp_get_max_tx_threshold(unsigned int id);
+u16 omap_mcbsp_get_max_rx_threshold(unsigned int id);
+int omap_mcbsp_get_dma_op_mode(unsigned int id);
+#else
+static inline void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold)
+{ }
+static inline void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold)
+{ }
+static inline u16 omap_mcbsp_get_max_tx_threshold(unsigned int id) { return 0; }
+static inline u16 omap_mcbsp_get_max_rx_threshold(unsigned int id) { return 0; }
+static inline int omap_mcbsp_get_dma_op_mode(unsigned int id) { return 0; }
+#endif
int omap_mcbsp_request(unsigned int id);
void omap_mcbsp_free(unsigned int id);
-void omap_mcbsp_start(unsigned int id);
-void omap_mcbsp_stop(unsigned int id);
+void omap_mcbsp_start(unsigned int id, int tx, int rx);
+void omap_mcbsp_stop(unsigned int id, int tx, int rx);
void omap_mcbsp_xmit_word(unsigned int id, u32 word);
u32 omap_mcbsp_recv_word(unsigned int id);
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index efa0e0111f38..8dc7927906f1 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -198,6 +198,170 @@ void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg *config)
}
EXPORT_SYMBOL(omap_mcbsp_config);
+#ifdef CONFIG_ARCH_OMAP34XX
+/*
+ * omap_mcbsp_set_tx_threshold configures how to deal
+ * with transmit threshold. the threshold value and handler can be
+ * configure in here.
+ */
+void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold)
+{
+ struct omap_mcbsp *mcbsp;
+ void __iomem *io_base;
+
+ if (!cpu_is_omap34xx())
+ return;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return;
+ }
+ mcbsp = id_to_mcbsp_ptr(id);
+ io_base = mcbsp->io_base;
+
+ OMAP_MCBSP_WRITE(io_base, THRSH2, threshold);
+}
+EXPORT_SYMBOL(omap_mcbsp_set_tx_threshold);
+
+/*
+ * omap_mcbsp_set_rx_threshold configures how to deal
+ * with receive threshold. the threshold value and handler can be
+ * configure in here.
+ */
+void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold)
+{
+ struct omap_mcbsp *mcbsp;
+ void __iomem *io_base;
+
+ if (!cpu_is_omap34xx())
+ return;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return;
+ }
+ mcbsp = id_to_mcbsp_ptr(id);
+ io_base = mcbsp->io_base;
+
+ OMAP_MCBSP_WRITE(io_base, THRSH1, threshold);
+}
+EXPORT_SYMBOL(omap_mcbsp_set_rx_threshold);
+
+/*
+ * omap_mcbsp_get_max_tx_thres just return the current configured
+ * maximum threshold for transmission
+ */
+u16 omap_mcbsp_get_max_tx_threshold(unsigned int id)
+{
+ struct omap_mcbsp *mcbsp;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+ mcbsp = id_to_mcbsp_ptr(id);
+
+ return mcbsp->max_tx_thres;
+}
+EXPORT_SYMBOL(omap_mcbsp_get_max_tx_threshold);
+
+/*
+ * omap_mcbsp_get_max_rx_thres just return the current configured
+ * maximum threshold for reception
+ */
+u16 omap_mcbsp_get_max_rx_threshold(unsigned int id)
+{
+ struct omap_mcbsp *mcbsp;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+ mcbsp = id_to_mcbsp_ptr(id);
+
+ return mcbsp->max_rx_thres;
+}
+EXPORT_SYMBOL(omap_mcbsp_get_max_rx_threshold);
+
+/*
+ * omap_mcbsp_get_dma_op_mode just return the current configured
+ * operating mode for the mcbsp channel
+ */
+int omap_mcbsp_get_dma_op_mode(unsigned int id)
+{
+ struct omap_mcbsp *mcbsp;
+ int dma_op_mode;
+
+ if (!omap_mcbsp_check_valid_id(id)) {
+ printk(KERN_ERR "%s: Invalid id (%u)\n", __func__, id + 1);
+ return -ENODEV;
+ }
+ mcbsp = id_to_mcbsp_ptr(id);
+
+ spin_lock_irq(&mcbsp->lock);
+ dma_op_mode = mcbsp->dma_op_mode;
+ spin_unlock_irq(&mcbsp->lock);
+
+ return dma_op_mode;
+}
+EXPORT_SYMBOL(omap_mcbsp_get_dma_op_mode);
+
+static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp)
+{
+ /*
+ * Enable wakup behavior, smart idle and all wakeups
+ * REVISIT: some wakeups may be unnecessary
+ */
+ if (cpu_is_omap34xx()) {
+ u16 syscon;
+
+ syscon = OMAP_MCBSP_READ(mcbsp->io_base, SYSCON);
+ syscon &= ~(ENAWAKEUP | SIDLEMODE(0x03) | CLOCKACTIVITY(0x03));
+
+ spin_lock_irq(&mcbsp->lock);
+ if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) {
+ syscon |= (ENAWAKEUP | SIDLEMODE(0x02) |
+ CLOCKACTIVITY(0x02));
+ OMAP_MCBSP_WRITE(mcbsp->io_base, WAKEUPEN,
+ XRDYEN | RRDYEN);
+ } else {
+ syscon |= SIDLEMODE(0x01);
+ }
+ spin_unlock_irq(&mcbsp->lock);
+
+ OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon);
+ }
+}
+
+static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp)
+{
+ /*
+ * Disable wakup behavior, smart idle and all wakeups
+ */
+ if (cpu_is_omap34xx()) {
+ u16 syscon;
+
+ syscon = OMAP_MCBSP_READ(mcbsp->io_base, SYSCON);
+ syscon &= ~(ENAWAKEUP | SIDLEMODE(0x03) | CLOCKACTIVITY(0x03));
+ /*
+ * HW bug workaround - If no_idle mode is taken, we need to
+ * go to smart_idle before going to always_idle, or the
+ * device will not hit retention anymore.
+ */
+ syscon |= SIDLEMODE(0x02);
+ OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon);
+
+ syscon &= ~(SIDLEMODE(0x03));
+ OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon);
+
+ OMAP_MCBSP_WRITE(mcbsp->io_base, WAKEUPEN, 0);
+ }
+}
+#else
+static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp) {}
+static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp) {}
+#endif
+
/*
* We can choose between IRQ based or polled IO.
* This needs to be called before omap_mcbsp_request().
@@ -257,6 +421,9 @@ int omap_mcbsp_request(unsigned int id)
clk_enable(mcbsp->iclk);
clk_enable(mcbsp->fclk);
+ /* Do procedure specific to omap34xx arch, if applicable */
+ omap34xx_mcbsp_request(mcbsp);
+
/*
* Make sure that transmitter, receiver and sample-rate generator are
* not running before activating IRQs.
@@ -305,6 +472,9 @@ void omap_mcbsp_free(unsigned int id)
if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free)
mcbsp->pdata->ops->free(id);
+ /* Do procedure specific to omap34xx arch, if applicable */
+ omap34xx_mcbsp_free(mcbsp);
+
clk_disable(mcbsp->fclk);
clk_disable(mcbsp->iclk);
@@ -328,14 +498,15 @@ void omap_mcbsp_free(unsigned int id)
EXPORT_SYMBOL(omap_mcbsp_free);
/*
- * Here we start the McBSP, by enabling the sample
- * generator, both transmitter and receivers,
- * and the frame sync.
+ * Here we start the McBSP, by enabling transmitter, receiver or both.
+ * If no transmitter or receiver is active prior calling, then sample-rate
+ * generator and frame sync are started.
*/
-void omap_mcbsp_start(unsigned int id)
+void omap_mcbsp_start(unsigned int id, int tx, int rx)
{
struct omap_mcbsp *mcbsp;
void __iomem *io_base;
+ int idle;
u16 w;
if (!omap_mcbsp_check_valid_id(id)) {
@@ -348,32 +519,58 @@ void omap_mcbsp_start(unsigned int id)
mcbsp->rx_word_length = (OMAP_MCBSP_READ(io_base, RCR1) >> 5) & 0x7;
mcbsp->tx_word_length = (OMAP_MCBSP_READ(io_base, XCR1) >> 5) & 0x7;
- /* Start the sample generator */
- w = OMAP_MCBSP_READ(io_base, SPCR2);
- OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 6));
+ idle = !((OMAP_MCBSP_READ(io_base, SPCR2) |
+ OMAP_MCBSP_READ(io_base, SPCR1)) & 1);
+
+ if (idle) {
+ /* Start the sample generator */
+ w = OMAP_MCBSP_READ(io_base, SPCR2);
+ OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 6));
+ }
/* Enable transmitter and receiver */
+ tx &= 1;
w = OMAP_MCBSP_READ(io_base, SPCR2);
- OMAP_MCBSP_WRITE(io_base, SPCR2, w | 1);
+ OMAP_MCBSP_WRITE(io_base, SPCR2, w | tx);
+ rx &= 1;
w = OMAP_MCBSP_READ(io_base, SPCR1);
- OMAP_MCBSP_WRITE(io_base, SPCR1, w | 1);
+ OMAP_MCBSP_WRITE(io_base, SPCR1, w | rx);
- udelay(100);
+ /*
+ * Worst case: CLKSRG*2 = 8000khz: (1/8000) * 2 * 2 usec
+ * REVISIT: 100us may give enough time for two CLKSRG, however
+ * due to some unknown PM related, clock gating etc. reason it
+ * is now at 500us.
+ */
+ udelay(500);
- /* Start frame sync */
- w = OMAP_MCBSP_READ(io_base, SPCR2);
- OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 7));
+ if (idle) {
+ /* Start frame sync */
+ w = OMAP_MCBSP_READ(io_base, SPCR2);
+ OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 7));
+ }
+
+ if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+ /* Release the transmitter and receiver */
+ w = OMAP_MCBSP_READ(io_base, XCCR);
+ w &= ~(tx ? XDISABLE : 0);
+ OMAP_MCBSP_WRITE(io_base, XCCR, w);
+ w = OMAP_MCBSP_READ(io_base, RCCR);
+ w &= ~(rx ? RDISABLE : 0);
+ OMAP_MCBSP_WRITE(io_base, RCCR, w);
+ }
/* Dump McBSP Regs */
omap_mcbsp_dump_reg(id);
}
EXPORT_SYMBOL(omap_mcbsp_start);
-void omap_mcbsp_stop(unsigned int id)
+void omap_mcbsp_stop(unsigned int id, int tx, int rx)
{
struct omap_mcbsp *mcbsp;
void __iomem *io_base;
+ int idle;
u16 w;
if (!omap_mcbsp_check_valid_id(id)) {
@@ -385,16 +582,33 @@ void omap_mcbsp_stop(unsigned int id)
io_base = mcbsp->io_base;
/* Reset transmitter */
+ tx &= 1;
+ if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+ w = OMAP_MCBSP_READ(io_base, XCCR);
+ w |= (tx ? XDISABLE : 0);
+ OMAP_MCBSP_WRITE(io_base, XCCR, w);
+ }
w = OMAP_MCBSP_READ(io_base, SPCR2);
- OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1));
+ OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~tx);
/* Reset receiver */
+ rx &= 1;
+ if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+ w = OMAP_MCBSP_READ(io_base, RCCR);
+ w |= (tx ? RDISABLE : 0);
+ OMAP_MCBSP_WRITE(io_base, RCCR, w);
+ }
w = OMAP_MCBSP_READ(io_base, SPCR1);
- OMAP_MCBSP_WRITE(io_base, SPCR1, w & ~(1));
+ OMAP_MCBSP_WRITE(io_base, SPCR1, w & ~rx);
- /* Reset the sample rate generator */
- w = OMAP_MCBSP_READ(io_base, SPCR2);
- OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1 << 6));
+ idle = !((OMAP_MCBSP_READ(io_base, SPCR2) |
+ OMAP_MCBSP_READ(io_base, SPCR1)) & 1);
+
+ if (idle) {
+ /* Reset the sample rate generator */
+ w = OMAP_MCBSP_READ(io_base, SPCR2);
+ OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1 << 6));
+ }
}
EXPORT_SYMBOL(omap_mcbsp_stop);
@@ -883,6 +1097,149 @@ void omap_mcbsp_set_spi_mode(unsigned int id,
}
EXPORT_SYMBOL(omap_mcbsp_set_spi_mode);
+#ifdef CONFIG_ARCH_OMAP34XX
+#define max_thres(m) (mcbsp->pdata->buffer_size)
+#define valid_threshold(m, val) ((val) <= max_thres(m))
+#define THRESHOLD_PROP_BUILDER(prop) \
+static ssize_t prop##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \
+ \
+ return sprintf(buf, "%u\n", mcbsp->prop); \
+} \
+ \
+static ssize_t prop##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \
+ unsigned long val; \
+ int status; \
+ \
+ status = strict_strtoul(buf, 0, &val); \
+ if (status) \
+ return status; \
+ \
+ if (!valid_threshold(mcbsp, val)) \
+ return -EDOM; \
+ \
+ mcbsp->prop = val; \
+ return size; \
+} \
+ \
+static DEVICE_ATTR(prop, 0644, prop##_show, prop##_store);
+
+THRESHOLD_PROP_BUILDER(max_tx_thres);
+THRESHOLD_PROP_BUILDER(max_rx_thres);
+
+static const char *dma_op_modes[] = {
+ "element", "threshold", "frame",
+};
+
+static ssize_t dma_op_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
+ int dma_op_mode, i = 0;
+ ssize_t len = 0;
+ const char * const *s;
+
+ spin_lock_irq(&mcbsp->lock);
+ dma_op_mode = mcbsp->dma_op_mode;
+ spin_unlock_irq(&mcbsp->lock);
+
+ for (s = &dma_op_modes[i]; i < ARRAY_SIZE(dma_op_modes); s++, i++) {
+ if (dma_op_mode == i)
+ len += sprintf(buf + len, "[%s] ", *s);
+ else
+ len += sprintf(buf + len, "%s ", *s);
+ }
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static ssize_t dma_op_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
+ const char * const *s;
+ int i = 0;
+
+ for (s = &dma_op_modes[i]; i < ARRAY_SIZE(dma_op_modes); s++, i++)
+ if (sysfs_streq(buf, *s))
+ break;
+
+ if (i == ARRAY_SIZE(dma_op_modes))
+ return -EINVAL;
+
+ spin_lock_irq(&mcbsp->lock);
+ if (!mcbsp->free) {
+ size = -EBUSY;
+ goto unlock;
+ }
+ mcbsp->dma_op_mode = i;
+
+unlock:
+ spin_unlock_irq(&mcbsp->lock);
+
+ return size;
+}
+
+static DEVICE_ATTR(dma_op_mode, 0644, dma_op_mode_show, dma_op_mode_store);
+
+static const struct attribute *additional_attrs[] = {
+ &dev_attr_max_tx_thres.attr,
+ &dev_attr_max_rx_thres.attr,
+ &dev_attr_dma_op_mode.attr,
+ NULL,
+};
+
+static const struct attribute_group additional_attr_group = {
+ .attrs = (struct attribute **)additional_attrs,
+};
+
+static inline int __devinit omap_additional_add(struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &additional_attr_group);
+}
+
+static inline void __devexit omap_additional_remove(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &additional_attr_group);
+}
+
+static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp)
+{
+ mcbsp->dma_op_mode = MCBSP_DMA_MODE_ELEMENT;
+ if (cpu_is_omap34xx()) {
+ mcbsp->max_tx_thres = max_thres(mcbsp);
+ mcbsp->max_rx_thres = max_thres(mcbsp);
+ /*
+ * REVISIT: Set dmap_op_mode to THRESHOLD as default
+ * for mcbsp2 instances.
+ */
+ if (omap_additional_add(mcbsp->dev))
+ dev_warn(mcbsp->dev,
+ "Unable to create additional controls\n");
+ } else {
+ mcbsp->max_tx_thres = -EINVAL;
+ mcbsp->max_rx_thres = -EINVAL;
+ }
+}
+
+static inline void __devexit omap34xx_device_exit(struct omap_mcbsp *mcbsp)
+{
+ if (cpu_is_omap34xx())
+ omap_additional_remove(mcbsp->dev);
+}
+#else
+static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp) {}
+static inline void __devexit omap34xx_device_exit(struct omap_mcbsp *mcbsp) {}
+#endif /* CONFIG_ARCH_OMAP34XX */
+
/*
* McBSP1 and McBSP3 are directly mapped on 1610 and 1510.
* 730 has only 2 McBSP, and both of them are MPU peripherals.
@@ -953,6 +1310,10 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev)
mcbsp->dev = &pdev->dev;
mcbsp_ptr[id] = mcbsp;
platform_set_drvdata(pdev, mcbsp);
+
+ /* Initialize mcbsp properties for OMAP34XX if needed / applicable */
+ omap34xx_device_init(mcbsp);
+
return 0;
err_fclk:
@@ -976,6 +1337,8 @@ static int __devexit omap_mcbsp_remove(struct platform_device *pdev)
mcbsp->pdata->ops->free)
mcbsp->pdata->ops->free(mcbsp->id);
+ omap34xx_device_exit(mcbsp);
+
clk_disable(mcbsp->fclk);
clk_disable(mcbsp->iclk);
clk_put(mcbsp->fclk);
diff --git a/arch/arm/plat-s3c/include/plat/audio-simtec.h b/arch/arm/plat-s3c/include/plat/audio-simtec.h
new file mode 100644
index 000000000000..0f440b9168db
--- /dev/null
+++ b/arch/arm/plat-s3c/include/plat/audio-simtec.h
@@ -0,0 +1,37 @@
+/* arch/arm/plat-s3c/include/plat/audio-simtec.h
+ *
+ * Copyright 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Simtec Audio support.
+*/
+
+/**
+ * struct s3c24xx_audio_simtec_pdata - platform data for simtec audio
+ * @use_mpllin: Select codec clock from MPLLin
+ * @output_cdclk: Need to output CDCLK to the codec
+ * @have_mic: Set if we have a MIC socket
+ * @have_lout: Set if we have a LineOut socket
+ * @amp_gpio: GPIO pin to enable the AMP
+ * @amp_gain: Option GPIO to control AMP gain
+ */
+struct s3c24xx_audio_simtec_pdata {
+ unsigned int use_mpllin:1;
+ unsigned int output_cdclk:1;
+
+ unsigned int have_mic:1;
+ unsigned int have_lout:1;
+
+ int amp_gpio;
+ int amp_gain[2];
+
+ void (*startup)(void);
+};
+
+extern int simtec_audio_add(const char *codec_name,
+ struct s3c24xx_audio_simtec_pdata *pdata);
diff --git a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h
index 0fad7571030e..07659dad1748 100644
--- a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h
+++ b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h
@@ -33,6 +33,11 @@
#define S3C2412_IISCON_RXDMA_ACTIVE (1 << 1)
#define S3C2412_IISCON_IIS_ACTIVE (1 << 0)
+#define S3C64XX_IISMOD_BLC_16BIT (0 << 13)
+#define S3C64XX_IISMOD_BLC_8BIT (1 << 13)
+#define S3C64XX_IISMOD_BLC_24BIT (2 << 13)
+#define S3C64XX_IISMOD_BLC_MASK (3 << 13)
+
#define S3C64XX_IISMOD_IMS_PCLK (0 << 10)
#define S3C64XX_IISMOD_IMS_SYSMUX (1 << 10)
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h
index fc42de5ca209..fd0c5d7e9337 100644
--- a/arch/avr32/include/asm/thread_info.h
+++ b/arch/avr32/include/asm/thread_info.h
@@ -84,6 +84,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_MEMDIE 6
#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */
#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */
+#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */
#define TIF_FREEZE 29
#define TIF_DEBUG 30 /* debugging enabled */
#define TIF_USERSPACE 31 /* true if FS sets userspace */
@@ -96,6 +97,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_FREEZE (1 << TIF_FREEZE)
/* Note: The masks below must never span more than 16 bits! */
@@ -103,13 +105,15 @@ static inline struct thread_info *current_thread_info(void)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK \
((1 << TIF_SIGPENDING) \
+ | _TIF_NOTIFY_RESUME \
| (1 << TIF_NEED_RESCHED) \
| (1 << TIF_POLLING_NRFLAG) \
| (1 << TIF_BREAKPOINT) \
| (1 << TIF_RESTORE_SIGMASK))
/* work to do on any return to userspace */
-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | (1 << TIF_SYSCALL_TRACE))
+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | (1 << TIF_SYSCALL_TRACE) | \
+ _TIF_NOTIFY_RESUME)
/* work to do on return from debug mode */
#define _TIF_DBGWORK_MASK (_TIF_WORK_MASK & ~(1 << TIF_BREAKPOINT))
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
index 009a80155d67..169268c40ae2 100644
--- a/arch/avr32/kernel/entry-avr32b.S
+++ b/arch/avr32/kernel/entry-avr32b.S
@@ -281,7 +281,7 @@ syscall_exit_work:
ld.w r1, r0[TI_flags]
rjmp 1b
-2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
+2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NOTIFY_RESUME
tst r1, r2
breq 3f
unmask_interrupts
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c
index 27227561bad6..64f886fac2ef 100644
--- a/arch/avr32/kernel/signal.c
+++ b/arch/avr32/kernel/signal.c
@@ -16,6 +16,7 @@
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/freezer.h>
+#include <linux/tracehook.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
@@ -322,4 +323,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
do_signal(regs, &current->blocked, syscall);
+
+ if (ti->flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
}
diff --git a/arch/cris/kernel/ptrace.c b/arch/cris/kernel/ptrace.c
index b326023baab2..48b0f3912632 100644
--- a/arch/cris/kernel/ptrace.c
+++ b/arch/cris/kernel/ptrace.c
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
+#include <linux/tracehook.h>
#include <asm/uaccess.h>
#include <asm/page.h>
@@ -36,4 +37,11 @@ void do_notify_resume(int canrestart, struct pt_regs *regs,
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(canrestart,regs);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
}
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 4a7a62c6e783..6b0a2b6fed6a 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -572,6 +572,8 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags)
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(__frame);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
}
} /* end do_notify_resume() */
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index 8bbc8b0ee45d..70e67e47d020 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -89,6 +89,7 @@ static inline struct thread_info *current_thread_info(void)
TIF_NEED_RESCHED */
#define TIF_MEMDIE 4
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
+#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */
#define TIF_FREEZE 16 /* is freezing for suspend */
/* as above, but as bit values */
@@ -97,6 +98,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index cf3472f7389b..af842c369d24 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -39,6 +39,7 @@
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/freezer.h>
+#include <linux/tracehook.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
@@ -552,4 +553,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
{
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
do_signal(regs, NULL);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
}
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 5a61b5c2e18f..8d3c79cd81e7 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -44,7 +44,6 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define get_dma_ops(dev) platform_dma_get_ops(dev)
-#define flush_write_buffers()
#include <asm-generic/dma-mapping-common.h>
@@ -69,6 +68,24 @@ dma_set_mask (struct device *dev, u64 mask)
return 0;
}
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return 0;
+
+ return addr + size <= *dev->dma_mask;
+}
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return daddr;
+}
+
extern int dma_get_cache_alignment(void);
static inline void
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 5d7c0e5b9e76..89969e950045 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -192,6 +192,8 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
if (test_thread_flag(TIF_NOTIFY_RESUME)) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(&scr->pt);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
}
/* copy user rbs to kernel rbs */
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index fb8332690179..dbeadb9c8e20 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -133,8 +133,7 @@ consider_steal_time(unsigned long new_itm)
account_idle_ticks(blocked);
run_local_timers();
- if (rcu_pending(cpu))
- rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
+ rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
scheduler_tick();
run_posix_cpu_timers(p);
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index 07bb5bd00e2a..71578151a403 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -149,6 +149,7 @@ static inline unsigned int get_thread_fault_code(void)
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
#define TIF_IRET 4 /* return with iret */
+#define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
#define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
@@ -160,6 +161,7 @@ static inline unsigned int get_thread_fault_code(void)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_IRET (1<<TIF_IRET)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 18124542a6eb..144b0f124fc7 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -21,6 +21,7 @@
#include <linux/stddef.h>
#include <linux/personality.h>
#include <linux/freezer.h>
+#include <linux/tracehook.h>
#include <asm/cacheflush.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
@@ -408,5 +409,12 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs,oldset);
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
+
clear_thread_flag(TIF_IRET);
}
diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h
index 5202f5a5b420..474125886218 100644
--- a/arch/m68k/include/asm/entry_mm.h
+++ b/arch/m68k/include/asm/entry_mm.h
@@ -46,7 +46,6 @@
#define curptr a2
LFLUSH_I_AND_D = 0x00000808
-LSIGTRAP = 5
/* process bits for task_struct.ptrace */
PT_TRACESYS_OFF = 3
@@ -118,9 +117,6 @@ PT_DTRACE_BIT = 2
#define STR(X) STR1(X)
#define STR1(X) #X
-#define PT_OFF_ORIG_D0 0x24
-#define PT_OFF_FORMATVEC 0x32
-#define PT_OFF_SR 0x2C
#define SAVE_ALL_INT \
"clrl %%sp@-;" /* stk_adj */ \
"pea -1:w;" /* orig d0 = -1 */ \
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h
index c2553d26273d..907ed03d792f 100644
--- a/arch/m68k/include/asm/entry_no.h
+++ b/arch/m68k/include/asm/entry_no.h
@@ -72,8 +72,8 @@ LENOSYS = 38
lea %sp@(-32),%sp /* space for 8 regs */
moveml %d1-%d5/%a0-%a2,%sp@
movel sw_usp,%a0 /* get usp */
- movel %a0@-,%sp@(PT_PC) /* copy exception program counter */
- movel %a0@-,%sp@(PT_FORMATVEC)/* copy exception format/vector/sr */
+ movel %a0@-,%sp@(PT_OFF_PC) /* copy exception program counter */
+ movel %a0@-,%sp@(PT_OFF_FORMATVEC)/*copy exception format/vector/sr */
bra 7f
6:
clrl %sp@- /* stkadj */
@@ -89,8 +89,8 @@ LENOSYS = 38
bnes 8f /* no, skip */
move #0x2700,%sr /* disable intrs */
movel sw_usp,%a0 /* get usp */
- movel %sp@(PT_PC),%a0@- /* copy exception program counter */
- movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */
+ movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */
+ movel %sp@(PT_OFF_FORMATVEC),%a0@-/*copy exception format/vector/sr */
moveml %sp@,%d1-%d5/%a0-%a2
lea %sp@(32),%sp /* space for 8 regs */
movel %sp@+,%d0
diff --git a/arch/m68k/include/asm/math-emu.h b/arch/m68k/include/asm/math-emu.h
index ddfab96403cb..5e9249b0014c 100644
--- a/arch/m68k/include/asm/math-emu.h
+++ b/arch/m68k/include/asm/math-emu.h
@@ -145,16 +145,16 @@ extern unsigned int fp_debugprint;
* these are only used during instruction decoding
* where we always know how deep we're on the stack.
*/
-#define FPS_DO (PT_D0)
-#define FPS_D1 (PT_D1)
-#define FPS_D2 (PT_D2)
-#define FPS_A0 (PT_A0)
-#define FPS_A1 (PT_A1)
-#define FPS_A2 (PT_A2)
-#define FPS_SR (PT_SR)
-#define FPS_PC (PT_PC)
-#define FPS_EA (PT_PC+6)
-#define FPS_PC2 (PT_PC+10)
+#define FPS_DO (PT_OFF_D0)
+#define FPS_D1 (PT_OFF_D1)
+#define FPS_D2 (PT_OFF_D2)
+#define FPS_A0 (PT_OFF_A0)
+#define FPS_A1 (PT_OFF_A1)
+#define FPS_A2 (PT_OFF_A2)
+#define FPS_SR (PT_OFF_SR)
+#define FPS_PC (PT_OFF_PC)
+#define FPS_EA (PT_OFF_PC+6)
+#define FPS_PC2 (PT_OFF_PC+10)
.macro fp_get_fp_reg
lea (FPD_FPREG,FPDATA,%d0.w*4),%a0
diff --git a/arch/m68k/include/asm/thread_info_mm.h b/arch/m68k/include/asm/thread_info_mm.h
index 6ea5c33b3c56..b6da3882be9b 100644
--- a/arch/m68k/include/asm/thread_info_mm.h
+++ b/arch/m68k/include/asm/thread_info_mm.h
@@ -1,6 +1,10 @@
#ifndef _ASM_M68K_THREAD_INFO_H
#define _ASM_M68K_THREAD_INFO_H
+#ifndef ASM_OFFSETS_C
+#include <asm/asm-offsets.h>
+#endif
+#include <asm/current.h>
#include <asm/types.h>
#include <asm/page.h>
@@ -31,7 +35,12 @@ struct thread_info {
#define init_thread_info (init_task.thread.info)
#define init_stack (init_thread_union.stack)
-#define task_thread_info(tsk) (&(tsk)->thread.info)
+#ifdef ASM_OFFSETS_C
+#define task_thread_info(tsk) ((struct thread_info *) NULL)
+#else
+#define task_thread_info(tsk) ((struct thread_info *)((char *)tsk+TASK_TINFO))
+#endif
+
#define task_stack_page(tsk) ((tsk)->stack)
#define current_thread_info() task_thread_info(current)
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index b1f012f6c493..73e5e581245b 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -8,6 +8,8 @@
* #defines from the assembly-language output.
*/
+#define ASM_OFFSETS_C
+
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
@@ -27,6 +29,9 @@ int main(void)
DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+#ifdef CONFIG_MMU
+ DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
+#endif
/* offsets into the thread struct */
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
@@ -44,20 +49,20 @@ int main(void)
DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
/* offsets into the pt_regs */
- DEFINE(PT_D0, offsetof(struct pt_regs, d0));
- DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0));
- DEFINE(PT_D1, offsetof(struct pt_regs, d1));
- DEFINE(PT_D2, offsetof(struct pt_regs, d2));
- DEFINE(PT_D3, offsetof(struct pt_regs, d3));
- DEFINE(PT_D4, offsetof(struct pt_regs, d4));
- DEFINE(PT_D5, offsetof(struct pt_regs, d5));
- DEFINE(PT_A0, offsetof(struct pt_regs, a0));
- DEFINE(PT_A1, offsetof(struct pt_regs, a1));
- DEFINE(PT_A2, offsetof(struct pt_regs, a2));
- DEFINE(PT_PC, offsetof(struct pt_regs, pc));
- DEFINE(PT_SR, offsetof(struct pt_regs, sr));
+ DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
+ DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
+ DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
+ DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
+ DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
+ DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
+ DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
+ DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
+ DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
+ DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
+ DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
+ DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
/* bitfields are a bit difficult */
- DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4);
+ DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
/* offsets into the irq_handler struct */
DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler));
@@ -84,10 +89,10 @@ int main(void)
DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref));
/* signal defines */
- DEFINE(SIGSEGV, SIGSEGV);
- DEFINE(SEGV_MAPERR, SEGV_MAPERR);
- DEFINE(SIGTRAP, SIGTRAP);
- DEFINE(TRAP_TRACE, TRAP_TRACE);
+ DEFINE(LSIGSEGV, SIGSEGV);
+ DEFINE(LSEGV_MAPERR, SEGV_MAPERR);
+ DEFINE(LSIGTRAP, SIGTRAP);
+ DEFINE(LTRAP_TRACE, TRAP_TRACE);
/* offsets into the custom struct */
DEFINE(CUSTOMBASE, &amiga_custom);
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index c3735cd6207e..922f52e7ed1a 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -77,17 +77,17 @@ ENTRY(ret_from_fork)
jra .Lret_from_exception
do_trace_entry:
- movel #-ENOSYS,%sp@(PT_D0) | needed for strace
+ movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace
RESTORE_SWITCH_STACK
addql #4,%sp
- movel %sp@(PT_ORIG_D0),%d0
+ movel %sp@(PT_OFF_ORIG_D0),%d0
cmpl #NR_syscalls,%d0
jcs syscall
badsys:
- movel #-ENOSYS,%sp@(PT_D0)
+ movel #-ENOSYS,%sp@(PT_OFF_D0)
jra ret_from_syscall
do_trace_exit:
@@ -103,7 +103,7 @@ ENTRY(ret_from_signal)
addql #4,%sp
/* on 68040 complete pending writebacks if any */
#ifdef CONFIG_M68040
- bfextu %sp@(PT_VECTOR){#0,#4},%d0
+ bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
subql #7,%d0 | bus error frame ?
jbne 1f
movel %sp,%sp@-
@@ -127,7 +127,7 @@ ENTRY(system_call)
jcc badsys
syscall:
jbsr @(sys_call_table,%d0:l:4)@(0)
- movel %d0,%sp@(PT_D0) | save the return value
+ movel %d0,%sp@(PT_OFF_D0) | save the return value
ret_from_syscall:
|oriw #0x0700,%sr
movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
@@ -135,7 +135,7 @@ ret_from_syscall:
1: RESTORE_ALL
syscall_exit_work:
- btst #5,%sp@(PT_SR) | check if returning to kernel
+ btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
bnes 1b | if so, skip resched, signals
lslw #1,%d0
jcs do_trace_exit
@@ -148,7 +148,7 @@ syscall_exit_work:
ENTRY(ret_from_exception)
.Lret_from_exception:
- btst #5,%sp@(PT_SR) | check if returning to kernel
+ btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
bnes 1f | if so, skip resched, signals
| only allow interrupts when we are really the last one on the
| kernel stack, otherwise stack overflow can occur during
@@ -182,7 +182,7 @@ do_signal_return:
jbra resume_userspace
do_delayed_trace:
- bclr #7,%sp@(PT_SR) | clear trace bit in SR
+ bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
pea 1 | send SIGTRAP
movel %curptr,%sp@-
pea LSIGTRAP
@@ -199,7 +199,7 @@ ENTRY(auto_inthandler)
GET_CURRENT(%d0)
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
| put exception # in d0
- bfextu %sp@(PT_VECTOR){#4,#10},%d0
+ bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
subw #VEC_SPUR,%d0
movel %sp,%sp@-
@@ -216,7 +216,7 @@ ret_from_interrupt:
ALIGN
ret_from_last_interrupt:
moveq #(~ALLOWINT>>8)&0xff,%d0
- andb %sp@(PT_SR),%d0
+ andb %sp@(PT_OFF_SR),%d0
jne 2b
/* check if we need to do software interrupts */
@@ -232,7 +232,7 @@ ENTRY(user_inthandler)
GET_CURRENT(%d0)
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
| put exception # in d0
- bfextu %sp@(PT_VECTOR){#4,#10},%d0
+ bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
user_irqvec_fixup = . + 2
subw #VEC_USER,%d0
diff --git a/arch/m68k/math-emu/fp_entry.S b/arch/m68k/math-emu/fp_entry.S
index 954b4f304a7d..a3fe1f348dfe 100644
--- a/arch/m68k/math-emu/fp_entry.S
+++ b/arch/m68k/math-emu/fp_entry.S
@@ -85,8 +85,8 @@ fp_err_ua2:
fp_err_ua1:
addq.l #4,%sp
move.l %a0,-(%sp)
- pea SEGV_MAPERR
- pea SIGSEGV
+ pea LSEGV_MAPERR
+ pea LSIGSEGV
jsr fpemu_signal
add.w #12,%sp
jra ret_from_exception
@@ -96,8 +96,8 @@ fp_err_ua1:
| it does not really belong here, but...
fp_sendtrace060:
move.l (FPS_PC,%sp),-(%sp)
- pea TRAP_TRACE
- pea SIGTRAP
+ pea LTRAP_TRACE
+ pea LSIGTRAP
jsr fpemu_signal
add.w #12,%sp
jra ret_from_exception
@@ -122,17 +122,17 @@ fp_get_data_reg:
.long fp_get_d6, fp_get_d7
fp_get_d0:
- move.l (PT_D0+8,%sp),%d0
+ move.l (PT_OFF_D0+8,%sp),%d0
printf PREGISTER,"{d0->%08x}",1,%d0
rts
fp_get_d1:
- move.l (PT_D1+8,%sp),%d0
+ move.l (PT_OFF_D1+8,%sp),%d0
printf PREGISTER,"{d1->%08x}",1,%d0
rts
fp_get_d2:
- move.l (PT_D2+8,%sp),%d0
+ move.l (PT_OFF_D2+8,%sp),%d0
printf PREGISTER,"{d2->%08x}",1,%d0
rts
@@ -173,35 +173,35 @@ fp_put_data_reg:
fp_put_d0:
printf PREGISTER,"{d0<-%08x}",1,%d0
- move.l %d0,(PT_D0+8,%sp)
+ move.l %d0,(PT_OFF_D0+8,%sp)
rts
fp_put_d1:
printf PREGISTER,"{d1<-%08x}",1,%d0
- move.l %d0,(PT_D1+8,%sp)
+ move.l %d0,(PT_OFF_D1+8,%sp)
rts
fp_put_d2:
printf PREGISTER,"{d2<-%08x}",1,%d0
- move.l %d0,(PT_D2+8,%sp)
+ move.l %d0,(PT_OFF_D2+8,%sp)
rts
fp_put_d3:
printf PREGISTER,"{d3<-%08x}",1,%d0
| move.l %d0,%d3
- move.l %d0,(PT_D3+8,%sp)
+ move.l %d0,(PT_OFF_D3+8,%sp)
rts
fp_put_d4:
printf PREGISTER,"{d4<-%08x}",1,%d0
| move.l %d0,%d4
- move.l %d0,(PT_D4+8,%sp)
+ move.l %d0,(PT_OFF_D4+8,%sp)
rts
fp_put_d5:
printf PREGISTER,"{d5<-%08x}",1,%d0
| move.l %d0,%d5
- move.l %d0,(PT_D5+8,%sp)
+ move.l %d0,(PT_OFF_D5+8,%sp)
rts
fp_put_d6:
@@ -225,17 +225,17 @@ fp_get_addr_reg:
.long fp_get_a6, fp_get_a7
fp_get_a0:
- move.l (PT_A0+8,%sp),%a0
+ move.l (PT_OFF_A0+8,%sp),%a0
printf PREGISTER,"{a0->%08x}",1,%a0
rts
fp_get_a1:
- move.l (PT_A1+8,%sp),%a0
+ move.l (PT_OFF_A1+8,%sp),%a0
printf PREGISTER,"{a1->%08x}",1,%a0
rts
fp_get_a2:
- move.l (PT_A2+8,%sp),%a0
+ move.l (PT_OFF_A2+8,%sp),%a0
printf PREGISTER,"{a2->%08x}",1,%a0
rts
@@ -276,17 +276,17 @@ fp_put_addr_reg:
fp_put_a0:
printf PREGISTER,"{a0<-%08x}",1,%a0
- move.l %a0,(PT_A0+8,%sp)
+ move.l %a0,(PT_OFF_A0+8,%sp)
rts
fp_put_a1:
printf PREGISTER,"{a1<-%08x}",1,%a0
- move.l %a0,(PT_A1+8,%sp)
+ move.l %a0,(PT_OFF_A1+8,%sp)
rts
fp_put_a2:
printf PREGISTER,"{a2<-%08x}",1,%a0
- move.l %a0,(PT_A2+8,%sp)
+ move.l %a0,(PT_OFF_A2+8,%sp)
rts
fp_put_a3:
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index f9df720d2e40..01cc1630b66c 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -115,6 +115,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */
#define TIF_SECCOMP 4 /* secure computing */
+#define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
@@ -139,6 +140,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 830c5ef9932b..6254041b942f 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -21,6 +21,7 @@
#include <linux/compiler.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
+#include <linux/tracehook.h>
#include <asm/abi.h>
#include <asm/asm.h>
@@ -700,4 +701,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
/* deal with pending signal delivery */
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
do_signal(regs);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
}
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index feb2f2e810db..a21f43bc68e2 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -568,5 +568,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(__frame);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
}
}
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 4ce0edfbe969..ac775a76bff7 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -59,6 +59,7 @@ struct thread_info {
#define TIF_MEMDIE 5
#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
#define TIF_FREEZE 7 /* is freezing for suspend */
+#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -67,8 +68,9 @@ struct thread_info {
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_FREEZE (1 << TIF_FREEZE)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
-#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | \
+#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
_TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
#endif /* __KERNEL__ */
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index e552e547cb93..8c4712b74dc1 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -948,7 +948,7 @@ intr_check_sig:
/* As above */
mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19
- ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20
+ ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20
and,COND(<>) %r19, %r20, %r0
b,n intr_restore /* skip past if we've nothing to do */
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index f82544225e8e..8eb3c63c407a 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -25,6 +25,7 @@
#include <linux/stddef.h>
#include <linux/compat.h>
#include <linux/elf.h>
+#include <linux/tracehook.h>
#include <asm/ucontext.h>
#include <asm/rt_sigframe.h>
#include <asm/uaccess.h>
@@ -645,4 +646,11 @@ void do_notify_resume(struct pt_regs *regs, long in_syscall)
if (test_thread_flag(TIF_SIGPENDING) ||
test_thread_flag(TIF_RESTORE_SIGMASK))
do_signal(regs, in_syscall);
+
+ if (test_thread_flag(TIF_NOTIFY_RESUME)) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
}
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index b44aaabdd1a6..0c34371ec49c 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -424,6 +424,29 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
#endif
}
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+
+ if (ops->addr_needs_map && ops->addr_needs_map(dev, addr, size))
+ return 0;
+
+ if (!dev->dma_mask)
+ return 0;
+
+ return addr + size <= *dev->dma_mask;
+}
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr + get_dma_direct_offset(dev);
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return daddr - get_dma_direct_offset(dev);
+}
+
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#ifdef CONFIG_NOT_COHERENT_CACHE
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index eb17da781128..2a5da069714e 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
else
pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
-#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
- /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we
+#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+ /* Second case is 32-bit with 64-bit PTE. In this case, we
* can just store as long as we do the two halves in the right order
* with a barrier in between. This is possible because we take care,
* in the hash code, to pre-invalidate if the PTE was already hashed,
@@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
#else
/* Anything else just stores the PTE normally. That covers all 64-bit
- * cases, and 32-bit non-hash with 64-bit PTEs in UP mode
+ * cases, and 32-bit non-hash with 32-bit PTEs.
*/
*ptep = pte;
#endif
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index c3b193121f81..198266cf9e2d 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -54,7 +54,7 @@
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.
*/
-static inline unsigned long __spin_trylock(raw_spinlock_t *lock)
+static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
{
unsigned long tmp, token;
@@ -76,7 +76,7 @@ static inline unsigned long __spin_trylock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
CLEAR_IO_SYNC;
- return __spin_trylock(lock) == 0;
+ return arch_spin_trylock(lock) == 0;
}
/*
@@ -108,7 +108,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
CLEAR_IO_SYNC;
while (1) {
- if (likely(__spin_trylock(lock) == 0))
+ if (likely(arch_spin_trylock(lock) == 0))
break;
do {
HMT_low();
@@ -126,7 +126,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
CLEAR_IO_SYNC;
while (1) {
- if (likely(__spin_trylock(lock) == 0))
+ if (likely(arch_spin_trylock(lock) == 0))
break;
local_save_flags(flags_dis);
local_irq_restore(flags);
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
* This returns the old value in the lock + 1,
* so we got a read lock if the return value is > 0.
*/
-static inline long __read_trylock(raw_rwlock_t *rw)
+static inline long arch_read_trylock(raw_rwlock_t *rw)
{
long tmp;
@@ -205,7 +205,7 @@ static inline long __read_trylock(raw_rwlock_t *rw)
* This returns the old value in the lock,
* so we got the write lock if the return value is 0.
*/
-static inline long __write_trylock(raw_rwlock_t *rw)
+static inline long arch_write_trylock(raw_rwlock_t *rw)
{
long tmp, token;
@@ -228,7 +228,7 @@ static inline long __write_trylock(raw_rwlock_t *rw)
static inline void __raw_read_lock(raw_rwlock_t *rw)
{
while (1) {
- if (likely(__read_trylock(rw) > 0))
+ if (likely(arch_read_trylock(rw) > 0))
break;
do {
HMT_low();
@@ -242,7 +242,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
static inline void __raw_write_lock(raw_rwlock_t *rw)
{
while (1) {
- if (likely(__write_trylock(rw) == 0))
+ if (likely(arch_write_trylock(rw) == 0))
break;
do {
HMT_low();
@@ -255,12 +255,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
static inline int __raw_read_trylock(raw_rwlock_t *rw)
{
- return __read_trylock(rw) > 0;
+ return arch_read_trylock(rw) > 0;
}
static inline int __raw_write_trylock(raw_rwlock_t *rw)
{
- return __write_trylock(rw) == 0;
+ return arch_write_trylock(rw) == 0;
}
static inline void __raw_read_unlock(raw_rwlock_t *rw)
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index b73396b93905..9619285f64e8 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
-obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o
+obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
power5+-pmu.o power6-pmu.o power7-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 561b64652311..197b15646eeb 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -67,6 +67,8 @@ int main(void)
DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id));
#ifdef CONFIG_PPC64
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
+ DEFINE(SIGSEGV, SIGSEGV);
+ DEFINE(NMI_MASK, NMI_MASK);
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 68ccf11e4f19..e8a57de85bcf 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -24,50 +24,12 @@
int swiotlb __read_mostly;
unsigned int ppc_swiotlb_enable;
-void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr)
-{
- unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr));
- void *pageaddr = page_address(pfn_to_page(pfn));
-
- if (pageaddr != NULL)
- return pageaddr + (addr % PAGE_SIZE);
- return NULL;
-}
-
-dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
-{
- return paddr + get_dma_direct_offset(hwdev);
-}
-
-phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
-
-{
- return baddr - get_dma_direct_offset(hwdev);
-}
-
-/*
- * Determine if an address needs bounce buffering via swiotlb.
- * Going forward I expect the swiotlb code to generalize on using
- * a dma_ops->addr_needs_map, and this function will move from here to the
- * generic swiotlb code.
- */
-int
-swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr,
- size_t size)
-{
- struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev);
-
- BUG_ON(!dma_ops);
- return dma_ops->addr_needs_map(hwdev, addr, size);
-}
-
/*
* Determine if an address is reachable by a pci device, or if we must bounce.
*/
static int
swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
{
- u64 mask = dma_get_mask(hwdev);
dma_addr_t max;
struct pci_controller *hose;
struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -79,16 +41,9 @@ swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
if ((addr + size > max) | (addr < hose->dma_window_base_cur))
return 1;
- return !is_buffer_dma_capable(mask, addr, size);
-}
-
-static int
-swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
-{
- return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
+ return 0;
}
-
/*
* At the moment, all platforms that use this code only require
* swiotlb to be used if we're operating on HIGHMEM. Since
@@ -104,7 +59,6 @@ struct dma_mapping_ops swiotlb_dma_ops = {
.dma_supported = swiotlb_dma_supported,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
- .addr_needs_map = swiotlb_addr_needs_map,
.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index eb898112e577..8ac85e08ffae 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -729,6 +729,11 @@ BEGIN_FTR_SECTION
bne- do_ste_alloc /* If so handle it */
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+ clrrdi r11,r1,THREAD_SHIFT
+ lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
+ andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
+ bne 77f /* then don't call hash_page now */
+
/*
* On iSeries, we soft-disable interrupts here, then
* hard-enable interrupts so that the hash_page code can spin on
@@ -833,6 +838,20 @@ handle_page_fault:
bl .low_hash_fault
b .ret_from_except
+/*
+ * We come here as a result of a DSI at a point where we don't want
+ * to call hash_page, such as when we are accessing memory (possibly
+ * user memory) inside a PMU interrupt that occurred while interrupts
+ * were soft-disabled. We want to invoke the exception handler for
+ * the access, or panic if there isn't a handler.
+ */
+77: bl .save_nvgprs
+ mr r4,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r5,SIGSEGV
+ bl .bad_page_fault
+ b .ret_from_except
+
/* here we have a segment miss */
do_ste_alloc:
bl .ste_allocate /* try to insert stab entry */
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
new file mode 100644
index 000000000000..f74b62c67511
--- /dev/null
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -0,0 +1,527 @@
+/*
+ * Performance counter callchain support - powerpc architecture code
+ *
+ * Copyright © 2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/perf_counter.h>
+#include <linux/percpu.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <asm/ptrace.h>
+#include <asm/pgtable.h>
+#include <asm/sigcontext.h>
+#include <asm/ucontext.h>
+#include <asm/vdso.h>
+#ifdef CONFIG_PPC64
+#include "ppc32.h"
+#endif
+
+/*
+ * Store another value in a callchain_entry.
+ */
+static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
+{
+ unsigned int nr = entry->nr;
+
+ if (nr < PERF_MAX_STACK_DEPTH) {
+ entry->ip[nr] = ip;
+ entry->nr = nr + 1;
+ }
+}
+
+/*
+ * Is sp valid as the address of the next kernel stack frame after prev_sp?
+ * The next frame may be in a different stack area but should not go
+ * back down in the same stack area.
+ */
+static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
+{
+ if (sp & 0xf)
+ return 0; /* must be 16-byte aligned */
+ if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
+ return 0;
+ if (sp >= prev_sp + STACK_FRAME_OVERHEAD)
+ return 1;
+ /*
+ * sp could decrease when we jump off an interrupt stack
+ * back to the regular process stack.
+ */
+ if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1)))
+ return 1;
+ return 0;
+}
+
+static void perf_callchain_kernel(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ unsigned long sp, next_sp;
+ unsigned long next_ip;
+ unsigned long lr;
+ long level = 0;
+ unsigned long *fp;
+
+ lr = regs->link;
+ sp = regs->gpr[1];
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ callchain_store(entry, regs->nip);
+
+ if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
+ return;
+
+ for (;;) {
+ fp = (unsigned long *) sp;
+ next_sp = fp[0];
+
+ if (next_sp == sp + STACK_INT_FRAME_SIZE &&
+ fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
+ /*
+ * This looks like an interrupt frame for an
+ * interrupt that occurred in the kernel
+ */
+ regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD);
+ next_ip = regs->nip;
+ lr = regs->link;
+ level = 0;
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+
+ } else {
+ if (level == 0)
+ next_ip = lr;
+ else
+ next_ip = fp[STACK_FRAME_LR_SAVE];
+
+ /*
+ * We can't tell which of the first two addresses
+ * we get are valid, but we can filter out the
+ * obviously bogus ones here. We replace them
+ * with 0 rather than removing them entirely so
+ * that userspace can tell which is which.
+ */
+ if ((level == 1 && next_ip == lr) ||
+ (level <= 1 && !kernel_text_address(next_ip)))
+ next_ip = 0;
+
+ ++level;
+ }
+
+ callchain_store(entry, next_ip);
+ if (!valid_next_sp(next_sp, sp))
+ return;
+ sp = next_sp;
+ }
+}
+
+#ifdef CONFIG_PPC64
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define is_huge_psize(pagesize) (HPAGE_SHIFT && mmu_huge_psizes[pagesize])
+#else
+#define is_huge_psize(pagesize) 0
+#endif
+
+/*
+ * On 64-bit we don't want to invoke hash_page on user addresses from
+ * interrupt context, so if the access faults, we read the page tables
+ * to find which page (if any) is mapped and access it directly.
+ */
+static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
+{
+ pgd_t *pgdir;
+ pte_t *ptep, pte;
+ int pagesize;
+ unsigned long addr = (unsigned long) ptr;
+ unsigned long offset;
+ unsigned long pfn;
+ void *kaddr;
+
+ pgdir = current->mm->pgd;
+ if (!pgdir)
+ return -EFAULT;
+
+ pagesize = get_slice_psize(current->mm, addr);
+
+ /* align address to page boundary */
+ offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1);
+ addr -= offset;
+
+ if (is_huge_psize(pagesize))
+ ptep = huge_pte_offset(current->mm, addr);
+ else
+ ptep = find_linux_pte(pgdir, addr);
+
+ if (ptep == NULL)
+ return -EFAULT;
+ pte = *ptep;
+ if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
+ return -EFAULT;
+ pfn = pte_pfn(pte);
+ if (!page_is_ram(pfn))
+ return -EFAULT;
+
+ /* no highmem to worry about here */
+ kaddr = pfn_to_kaddr(pfn);
+ memcpy(ret, kaddr + offset, nb);
+ return 0;
+}
+
+static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
+{
+ if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
+ ((unsigned long)ptr & 7))
+ return -EFAULT;
+
+ if (!__get_user_inatomic(*ret, ptr))
+ return 0;
+
+ return read_user_stack_slow(ptr, ret, 8);
+}
+
+static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
+{
+ if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
+ ((unsigned long)ptr & 3))
+ return -EFAULT;
+
+ if (!__get_user_inatomic(*ret, ptr))
+ return 0;
+
+ return read_user_stack_slow(ptr, ret, 4);
+}
+
+static inline int valid_user_sp(unsigned long sp, int is_64)
+{
+ if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
+ return 0;
+ return 1;
+}
+
+/*
+ * 64-bit user processes use the same stack frame for RT and non-RT signals.
+ */
+struct signal_frame_64 {
+ char dummy[__SIGNAL_FRAMESIZE];
+ struct ucontext uc;
+ unsigned long unused[2];
+ unsigned int tramp[6];
+ struct siginfo *pinfo;
+ void *puc;
+ struct siginfo info;
+ char abigap[288];
+};
+
+static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
+{
+ if (nip == fp + offsetof(struct signal_frame_64, tramp))
+ return 1;
+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
+ nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
+ return 1;
+ return 0;
+}
+
+/*
+ * Do some sanity checking on the signal frame pointed to by sp.
+ * We check the pinfo and puc pointers in the frame.
+ */
+static int sane_signal_64_frame(unsigned long sp)
+{
+ struct signal_frame_64 __user *sf;
+ unsigned long pinfo, puc;
+
+ sf = (struct signal_frame_64 __user *) sp;
+ if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
+ read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
+ return 0;
+ return pinfo == (unsigned long) &sf->info &&
+ puc == (unsigned long) &sf->uc;
+}
+
+static void perf_callchain_user_64(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ unsigned long sp, next_sp;
+ unsigned long next_ip;
+ unsigned long lr;
+ long level = 0;
+ struct signal_frame_64 __user *sigframe;
+ unsigned long __user *fp, *uregs;
+
+ next_ip = regs->nip;
+ lr = regs->link;
+ sp = regs->gpr[1];
+ callchain_store(entry, PERF_CONTEXT_USER);
+ callchain_store(entry, next_ip);
+
+ for (;;) {
+ fp = (unsigned long __user *) sp;
+ if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
+ return;
+ if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
+ return;
+
+ /*
+ * Note: the next_sp - sp >= signal frame size check
+ * is true when next_sp < sp, which can happen when
+ * transitioning from an alternate signal stack to the
+ * normal stack.
+ */
+ if (next_sp - sp >= sizeof(struct signal_frame_64) &&
+ (is_sigreturn_64_address(next_ip, sp) ||
+ (level <= 1 && is_sigreturn_64_address(lr, sp))) &&
+ sane_signal_64_frame(sp)) {
+ /*
+ * This looks like an signal frame
+ */
+ sigframe = (struct signal_frame_64 __user *) sp;
+ uregs = sigframe->uc.uc_mcontext.gp_regs;
+ if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
+ read_user_stack_64(&uregs[PT_LNK], &lr) ||
+ read_user_stack_64(&uregs[PT_R1], &sp))
+ return;
+ level = 0;
+ callchain_store(entry, PERF_CONTEXT_USER);
+ callchain_store(entry, next_ip);
+ continue;
+ }
+
+ if (level == 0)
+ next_ip = lr;
+ callchain_store(entry, next_ip);
+ ++level;
+ sp = next_sp;
+ }
+}
+
+static inline int current_is_64bit(void)
+{
+ /*
+ * We can't use test_thread_flag() here because we may be on an
+ * interrupt stack, and the thread flags don't get copied over
+ * from the thread_info on the main stack to the interrupt stack.
+ */
+ return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT);
+}
+
+#else /* CONFIG_PPC64 */
+/*
+ * On 32-bit we just access the address and let hash_page create a
+ * HPTE if necessary, so there is no need to fall back to reading
+ * the page tables. Since this is called at interrupt level,
+ * do_page_fault() won't treat a DSI as a page fault.
+ */
+static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
+{
+ if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
+ ((unsigned long)ptr & 3))
+ return -EFAULT;
+
+ return __get_user_inatomic(*ret, ptr);
+}
+
+static inline void perf_callchain_user_64(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+}
+
+static inline int current_is_64bit(void)
+{
+ return 0;
+}
+
+static inline int valid_user_sp(unsigned long sp, int is_64)
+{
+ if (!sp || (sp & 7) || sp > TASK_SIZE - 32)
+ return 0;
+ return 1;
+}
+
+#define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
+#define sigcontext32 sigcontext
+#define mcontext32 mcontext
+#define ucontext32 ucontext
+#define compat_siginfo_t struct siginfo
+
+#endif /* CONFIG_PPC64 */
+
+/*
+ * Layout for non-RT signal frames
+ */
+struct signal_frame_32 {
+ char dummy[__SIGNAL_FRAMESIZE32];
+ struct sigcontext32 sctx;
+ struct mcontext32 mctx;
+ int abigap[56];
+};
+
+/*
+ * Layout for RT signal frames
+ */
+struct rt_signal_frame_32 {
+ char dummy[__SIGNAL_FRAMESIZE32 + 16];
+ compat_siginfo_t info;
+ struct ucontext32 uc;
+ int abigap[56];
+};
+
+static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
+{
+ if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
+ return 1;
+ if (vdso32_sigtramp && current->mm->context.vdso_base &&
+ nip == current->mm->context.vdso_base + vdso32_sigtramp)
+ return 1;
+ return 0;
+}
+
+static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
+{
+ if (nip == fp + offsetof(struct rt_signal_frame_32,
+ uc.uc_mcontext.mc_pad))
+ return 1;
+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
+ nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
+ return 1;
+ return 0;
+}
+
+static int sane_signal_32_frame(unsigned int sp)
+{
+ struct signal_frame_32 __user *sf;
+ unsigned int regs;
+
+ sf = (struct signal_frame_32 __user *) (unsigned long) sp;
+ if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, &regs))
+ return 0;
+ return regs == (unsigned long) &sf->mctx;
+}
+
+static int sane_rt_signal_32_frame(unsigned int sp)
+{
+ struct rt_signal_frame_32 __user *sf;
+ unsigned int regs;
+
+ sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
+ if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, &regs))
+ return 0;
+ return regs == (unsigned long) &sf->uc.uc_mcontext;
+}
+
+static unsigned int __user *signal_frame_32_regs(unsigned int sp,
+ unsigned int next_sp, unsigned int next_ip)
+{
+ struct mcontext32 __user *mctx = NULL;
+ struct signal_frame_32 __user *sf;
+ struct rt_signal_frame_32 __user *rt_sf;
+
+ /*
+ * Note: the next_sp - sp >= signal frame size check
+ * is true when next_sp < sp, for example, when
+ * transitioning from an alternate signal stack to the
+ * normal stack.
+ */
+ if (next_sp - sp >= sizeof(struct signal_frame_32) &&
+ is_sigreturn_32_address(next_ip, sp) &&
+ sane_signal_32_frame(sp)) {
+ sf = (struct signal_frame_32 __user *) (unsigned long) sp;
+ mctx = &sf->mctx;
+ }
+
+ if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
+ is_rt_sigreturn_32_address(next_ip, sp) &&
+ sane_rt_signal_32_frame(sp)) {
+ rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
+ mctx = &rt_sf->uc.uc_mcontext;
+ }
+
+ if (!mctx)
+ return NULL;
+ return mctx->mc_gregs;
+}
+
+static void perf_callchain_user_32(struct pt_regs *regs,
+ struct perf_callchain_entry *entry)
+{
+ unsigned int sp, next_sp;
+ unsigned int next_ip;
+ unsigned int lr;
+ long level = 0;
+ unsigned int __user *fp, *uregs;
+
+ next_ip = regs->nip;
+ lr = regs->link;
+ sp = regs->gpr[1];
+ callchain_store(entry, PERF_CONTEXT_USER);
+ callchain_store(entry, next_ip);
+
+ while (entry->nr < PERF_MAX_STACK_DEPTH) {
+ fp = (unsigned int __user *) (unsigned long) sp;
+ if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
+ return;
+ if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
+ return;
+
+ uregs = signal_frame_32_regs(sp, next_sp, next_ip);
+ if (!uregs && level <= 1)
+ uregs = signal_frame_32_regs(sp, next_sp, lr);
+ if (uregs) {
+ /*
+ * This looks like an signal frame, so restart
+ * the stack trace with the values in it.
+ */
+ if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
+ read_user_stack_32(&uregs[PT_LNK], &lr) ||
+ read_user_stack_32(&uregs[PT_R1], &sp))
+ return;
+ level = 0;
+ callchain_store(entry, PERF_CONTEXT_USER);
+ callchain_store(entry, next_ip);
+ continue;
+ }
+
+ if (level == 0)
+ next_ip = lr;
+ callchain_store(entry, next_ip);
+ ++level;
+ sp = next_sp;
+ }
+}
+
+/*
+ * Since we can't get PMU interrupts inside a PMU interrupt handler,
+ * we don't need separate irq and nmi entries here.
+ */
+static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
+
+struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+ struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
+
+ entry->nr = 0;
+
+ if (current->pid == 0) /* idle task? */
+ return entry;
+
+ if (!user_mode(regs)) {
+ perf_callchain_kernel(regs, entry);
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+
+ if (regs) {
+ if (current_is_64bit())
+ perf_callchain_user_64(regs, entry);
+ else
+ perf_callchain_user_32(regs, entry);
+ }
+
+ return entry;
+}
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 5b7038f248b6..a685652effeb 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -92,15 +92,13 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
: "memory" );
}
-void slb_flush_and_rebolt(void)
+static void __slb_flush_and_rebolt(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
* appropriately too. */
unsigned long linear_llp, vmalloc_llp, lflags, vflags;
unsigned long ksp_esid_data, ksp_vsid_data;
- WARN_ON(!irqs_disabled());
-
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
lflags = SLB_VSID_KERNEL | linear_llp;
@@ -117,12 +115,6 @@ void slb_flush_and_rebolt(void)
ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
}
- /*
- * We can't take a PMU exception in the following code, so hard
- * disable interrupts.
- */
- hard_irq_disable();
-
/* We need to do this all in asm, so we're sure we don't touch
* the stack between the slbia and rebolting it. */
asm volatile("isync\n"
@@ -139,6 +131,21 @@ void slb_flush_and_rebolt(void)
: "memory");
}
+void slb_flush_and_rebolt(void)
+{
+
+ WARN_ON(!irqs_disabled());
+
+ /*
+ * We can't take a PMU exception in the following code, so hard
+ * disable interrupts.
+ */
+ hard_irq_disable();
+
+ __slb_flush_and_rebolt();
+ get_paca()->slb_cache_ptr = 0;
+}
+
void slb_vmalloc_update(void)
{
unsigned long vflags;
@@ -180,12 +187,20 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
/* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
{
- unsigned long offset = get_paca()->slb_cache_ptr;
+ unsigned long offset;
unsigned long slbie_data = 0;
unsigned long pc = KSTK_EIP(tsk);
unsigned long stack = KSTK_ESP(tsk);
unsigned long unmapped_base;
+ /*
+ * We need interrupts hard-disabled here, not just soft-disabled,
+ * so that a PMU interrupt can't occur, which might try to access
+ * user memory (to get a stack trace) and possible cause an SLB miss
+ * which would update the slb_cache/slb_cache_ptr fields in the PACA.
+ */
+ hard_irq_disable();
+ offset = get_paca()->slb_cache_ptr;
if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
offset <= SLB_CACHE_ENTRIES) {
int i;
@@ -200,7 +215,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
}
asm volatile("isync" : : : "memory");
} else {
- slb_flush_and_rebolt();
+ __slb_flush_and_rebolt();
}
/* Workaround POWER5 < DD2.1 issue */
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 98cd1dc2ae75..ab5fb48b3e90 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -164,7 +164,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
{
struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
struct stab_entry *ste;
- unsigned long offset = __get_cpu_var(stab_cache_ptr);
+ unsigned long offset;
unsigned long pc = KSTK_EIP(tsk);
unsigned long stack = KSTK_ESP(tsk);
unsigned long unmapped_base;
@@ -172,6 +172,15 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
/* Force previous translations to complete. DRENG */
asm volatile("isync" : : : "memory");
+ /*
+ * We need interrupts hard-disabled here, not just soft-disabled,
+ * so that a PMU interrupt can't occur, which might try to access
+ * user memory (to get a stack trace) and possible cause an STAB miss
+ * which would update the stab_cache/stab_cache_ptr per-cpu variables.
+ */
+ hard_irq_disable();
+
+ offset = __get_cpu_var(stab_cache_ptr);
if (offset <= NR_STAB_CACHE_ENTRIES) {
int i;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2ae5d72f47ed..1c866efd217d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -84,7 +84,7 @@ config S390
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FTRACE_MCOUNT_RECORD
- select HAVE_FTRACE_SYSCALLS
+ select HAVE_SYSCALL_TRACEPOINTS
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_DEFAULT_NO_SPIN_MUTEXES
@@ -95,7 +95,6 @@ config S390
select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE
select HAVE_PERF_COUNTERS
- select GENERIC_ATOMIC64 if !64BIT
config SCHED_OMIT_FRAME_POINTER
bool
@@ -481,13 +480,6 @@ config CMM_IUCV
Select this option to enable the special message interface to
the cooperative memory management.
-config PAGE_STATES
- bool "Unused page notification"
- help
- This enables the notification of unused pages to the
- hypervisor. The ESSA instruction is used to do the states
- changes between a page that has content and the unused state.
-
config APPLDATA_BASE
bool "Linux - VM Monitor Stream, base infrastructure"
depends on PROC_FS
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 0ff387cebf88..fc8fb20e7fc0 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -88,8 +88,7 @@ LDFLAGS_vmlinux := -e start
head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o
core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
- arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ \
- arch/s390/power/
+ arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
libs-y += arch/s390/lib/
drivers-y += drivers/s390/
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 4aba83b31596..2bc479ab3a66 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -250,8 +250,9 @@ static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key,
const u8 *temp_key = key;
u32 *flags = &tfm->crt_flags;
- if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
+ if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE)) &&
+ (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) {
@@ -411,9 +412,9 @@ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
- DES_KEY_SIZE))) {
-
- *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
+ DES_KEY_SIZE)) &&
+ (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) {
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index e85ba348722a..f6de7826c979 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -46,12 +46,38 @@ static int sha1_init(struct shash_desc *desc)
return 0;
}
+static int sha1_export(struct shash_desc *desc, void *out)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_state *octx = out;
+
+ octx->count = sctx->count;
+ memcpy(octx->state, sctx->state, sizeof(octx->state));
+ memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer));
+ return 0;
+}
+
+static int sha1_import(struct shash_desc *desc, const void *in)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+ const struct sha1_state *ictx = in;
+
+ sctx->count = ictx->count;
+ memcpy(sctx->state, ictx->state, sizeof(ictx->state));
+ memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer));
+ sctx->func = KIMD_SHA_1;
+ return 0;
+}
+
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init,
.update = s390_sha_update,
.final = s390_sha_final,
+ .export = sha1_export,
+ .import = sha1_import,
.descsize = sizeof(struct s390_sha_ctx),
+ .statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-s390",
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index f9fefc569632..61a7db372121 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -42,12 +42,38 @@ static int sha256_init(struct shash_desc *desc)
return 0;
}
+static int sha256_export(struct shash_desc *desc, void *out)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+ struct sha256_state *octx = out;
+
+ octx->count = sctx->count;
+ memcpy(octx->state, sctx->state, sizeof(octx->state));
+ memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
+ return 0;
+}
+
+static int sha256_import(struct shash_desc *desc, const void *in)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+ const struct sha256_state *ictx = in;
+
+ sctx->count = ictx->count;
+ memcpy(sctx->state, ictx->state, sizeof(ictx->state));
+ memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ sctx->func = KIMD_SHA_256;
+ return 0;
+}
+
static struct shash_alg alg = {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_init,
.update = s390_sha_update,
.final = s390_sha_final,
+ .export = sha256_export,
+ .import = sha256_import,
.descsize = sizeof(struct s390_sha_ctx),
+ .statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name= "sha256-s390",
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index 83192bfc8048..4bf73d0dc525 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -13,7 +13,10 @@
*
*/
#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include "sha.h"
@@ -37,12 +40,42 @@ static int sha512_init(struct shash_desc *desc)
return 0;
}
+static int sha512_export(struct shash_desc *desc, void *out)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+ struct sha512_state *octx = out;
+
+ octx->count[0] = sctx->count;
+ octx->count[1] = 0;
+ memcpy(octx->state, sctx->state, sizeof(octx->state));
+ memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
+ return 0;
+}
+
+static int sha512_import(struct shash_desc *desc, const void *in)
+{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+ const struct sha512_state *ictx = in;
+
+ if (unlikely(ictx->count[1]))
+ return -ERANGE;
+ sctx->count = ictx->count[0];
+
+ memcpy(sctx->state, ictx->state, sizeof(ictx->state));
+ memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ sctx->func = KIMD_SHA_512;
+ return 0;
+}
+
static struct shash_alg sha512_alg = {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_init,
.update = s390_sha_update,
.final = s390_sha_final,
+ .export = sha512_export,
+ .import = sha512_import,
.descsize = sizeof(struct s390_sha_ctx),
+ .statesize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha512",
.cra_driver_name= "sha512-s390",
@@ -78,7 +111,10 @@ static struct shash_alg sha384_alg = {
.init = sha384_init,
.update = s390_sha_update,
.final = s390_sha_final,
+ .export = sha512_export,
+ .import = sha512_import,
.descsize = sizeof(struct s390_sha_ctx),
+ .statesize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha384",
.cra_driver_name= "sha384-s390",
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index fcba206529f3..4e91a2573cc4 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -900,7 +900,7 @@ CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
-CONFIG_HAVE_FTRACE_SYSCALLS=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 5a805df216bb..bd9914b89488 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -355,11 +355,7 @@ static struct dentry *hypfs_create_file(struct super_block *sb,
{
struct dentry *dentry;
struct inode *inode;
- struct qstr qname;
- qname.name = name;
- qname.len = strlen(name);
- qname.hash = full_name_hash(name, qname.len);
mutex_lock(&parent->d_inode->i_mutex);
dentry = lookup_one_len(name, parent, strlen(name));
if (IS_ERR(dentry)) {
@@ -426,7 +422,7 @@ struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir,
char tmp[TMP_SIZE];
struct dentry *dentry;
- snprintf(tmp, TMP_SIZE, "%lld\n", (unsigned long long int)value);
+ snprintf(tmp, TMP_SIZE, "%llu\n", (unsigned long long int)value);
buffer = kstrdup(tmp, GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index c7d0abfb0f00..ae7c8f9f94a5 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -1,33 +1,23 @@
#ifndef __ARCH_S390_ATOMIC__
#define __ARCH_S390_ATOMIC__
-#include <linux/compiler.h>
-#include <linux/types.h>
-
/*
- * include/asm-s390/atomic.h
+ * Copyright 1999,2009 IBM Corp.
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Denis Joseph Barrow,
+ * Arnd Bergmann <arndb@de.ibm.com>,
*
- * S390 version
- * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Denis Joseph Barrow,
- * Arnd Bergmann (arndb@de.ibm.com)
- *
- * Derived from "include/asm-i386/bitops.h"
- * Copyright (C) 1992, Linus Torvalds
+ * Atomic operations that C can't guarantee us.
+ * Useful for resource counting etc.
+ * s390 uses 'Compare And Swap' for atomicity in SMP enviroment.
*
*/
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
- */
+#include <linux/compiler.h>
+#include <linux/types.h>
#define ATOMIC_INIT(i) { (i) }
-#ifdef __KERNEL__
-
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __CS_LOOP(ptr, op_val, op_string) ({ \
@@ -77,7 +67,7 @@ static inline void atomic_set(atomic_t *v, int i)
barrier();
}
-static __inline__ int atomic_add_return(int i, atomic_t * v)
+static inline int atomic_add_return(int i, atomic_t *v)
{
return __CS_LOOP(v, i, "ar");
}
@@ -87,7 +77,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
#define atomic_inc_return(_v) atomic_add_return(1, _v)
#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
-static __inline__ int atomic_sub_return(int i, atomic_t * v)
+static inline int atomic_sub_return(int i, atomic_t *v)
{
return __CS_LOOP(v, i, "sr");
}
@@ -97,19 +87,19 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
-static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
+static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
{
- __CS_LOOP(v, ~mask, "nr");
+ __CS_LOOP(v, ~mask, "nr");
}
-static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
+static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
{
- __CS_LOOP(v, mask, "or");
+ __CS_LOOP(v, mask, "or");
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
asm volatile(
@@ -127,7 +117,7 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
return old;
}
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
@@ -146,9 +136,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
#undef __CS_LOOP
-#ifdef __s390x__
#define ATOMIC64_INIT(i) { (i) }
+#ifdef CONFIG_64BIT
+
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __CSG_LOOP(ptr, op_val, op_string) ({ \
@@ -162,7 +153,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
: "=&d" (old_val), "=&d" (new_val), \
"=Q" (((atomic_t *)(ptr))->counter) \
: "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
- : "cc", "memory" ); \
+ : "cc", "memory"); \
new_val; \
})
@@ -180,7 +171,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
"=m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val), \
"m" (((atomic_t *)(ptr))->counter) \
- : "cc", "memory" ); \
+ : "cc", "memory"); \
new_val; \
})
@@ -198,39 +189,29 @@ static inline void atomic64_set(atomic64_t *v, long long i)
barrier();
}
-static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
return __CSG_LOOP(v, i, "agr");
}
-#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
-#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
-#define atomic64_inc(_v) atomic64_add_return(1, _v)
-#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
-#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
-static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
{
return __CSG_LOOP(v, i, "sgr");
}
-#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
-#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
-#define atomic64_dec(_v) atomic64_sub_return(1, _v)
-#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
-#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
-static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
+static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{
- __CSG_LOOP(v, ~mask, "ngr");
+ __CSG_LOOP(v, ~mask, "ngr");
}
-static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
+static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
{
- __CSG_LOOP(v, mask, "ogr");
+ __CSG_LOOP(v, mask, "ogr");
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
+static inline long long atomic64_cmpxchg(atomic64_t *v,
long long old, long long new)
{
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
@@ -249,8 +230,112 @@ static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
return old;
}
-static __inline__ int atomic64_add_unless(atomic64_t *v,
- long long a, long long u)
+#undef __CSG_LOOP
+
+#else /* CONFIG_64BIT */
+
+typedef struct {
+ long long counter;
+} atomic64_t;
+
+static inline long long atomic64_read(const atomic64_t *v)
+{
+ register_pair rp;
+
+ asm volatile(
+ " lm %0,%N0,0(%1)"
+ : "=&d" (rp)
+ : "a" (&v->counter), "m" (v->counter)
+ );
+ return rp.pair;
+}
+
+static inline void atomic64_set(atomic64_t *v, long long i)
+{
+ register_pair rp = {.pair = i};
+
+ asm volatile(
+ " stm %1,%N1,0(%2)"
+ : "=m" (v->counter)
+ : "d" (rp), "a" (&v->counter)
+ );
+}
+
+static inline long long atomic64_xchg(atomic64_t *v, long long new)
+{
+ register_pair rp_new = {.pair = new};
+ register_pair rp_old;
+
+ asm volatile(
+ " lm %0,%N0,0(%2)\n"
+ "0: cds %0,%3,0(%2)\n"
+ " jl 0b\n"
+ : "=&d" (rp_old), "+m" (v->counter)
+ : "a" (&v->counter), "d" (rp_new)
+ : "cc");
+ return rp_old.pair;
+}
+
+static inline long long atomic64_cmpxchg(atomic64_t *v,
+ long long old, long long new)
+{
+ register_pair rp_old = {.pair = old};
+ register_pair rp_new = {.pair = new};
+
+ asm volatile(
+ " cds %0,%3,0(%2)"
+ : "+&d" (rp_old), "+m" (v->counter)
+ : "a" (&v->counter), "d" (rp_new)
+ : "cc");
+ return rp_old.pair;
+}
+
+
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
+{
+ long long old, new;
+
+ do {
+ old = atomic64_read(v);
+ new = old + i;
+ } while (atomic64_cmpxchg(v, old, new) != old);
+ return new;
+}
+
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+{
+ long long old, new;
+
+ do {
+ old = atomic64_read(v);
+ new = old - i;
+ } while (atomic64_cmpxchg(v, old, new) != old);
+ return new;
+}
+
+static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
+{
+ long long old, new;
+
+ do {
+ old = atomic64_read(v);
+ new = old | mask;
+ } while (atomic64_cmpxchg(v, old, new) != old);
+}
+
+static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
+{
+ long long old, new;
+
+ do {
+ old = atomic64_read(v);
+ new = old & mask;
+ } while (atomic64_cmpxchg(v, old, new) != old);
+}
+
+#endif /* CONFIG_64BIT */
+
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
long long c, old;
c = atomic64_read(v);
@@ -265,15 +350,17 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
return c != u;
}
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-
-#undef __CSG_LOOP
-
-#else /* __s390x__ */
-
-#include <asm-generic/atomic64.h>
-
-#endif /* __s390x__ */
+#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
+#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
+#define atomic64_inc(_v) atomic64_add_return(1, _v)
+#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
+#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
+#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
+#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
+#define atomic64_dec(_v) atomic64_sub_return(1, _v)
+#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
+#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
@@ -281,5 +368,5 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
#define smp_mb__after_atomic_inc() smp_mb()
#include <asm-generic/atomic-long.h>
-#endif /* __KERNEL__ */
+
#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index d5a8e7c1477c..6c00f6800a34 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -78,28 +78,11 @@ csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
*/
static inline __sum16 csum_fold(__wsum sum)
{
-#ifndef __s390x__
- register_pair rp;
+ u32 csum = (__force u32) sum;
- asm volatile(
- " slr %N1,%N1\n" /* %0 = H L */
- " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */
- " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */
- " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */
- " alr %0,%1\n" /* %0 = H+L+C L+H */
- " srl %0,16\n" /* %0 = H+L+C */
- : "+&d" (sum), "=d" (rp) : : "cc");
-#else /* __s390x__ */
- asm volatile(
- " sr 3,3\n" /* %0 = H*65536 + L */
- " lr 2,%0\n" /* %0 = H L, 2/3 = H L / 0 0 */
- " srdl 2,16\n" /* %0 = H L, 2/3 = 0 H / L 0 */
- " alr 2,3\n" /* %0 = H L, 2/3 = L H / L 0 */
- " alr %0,2\n" /* %0 = H+L+C L+H */
- " srl %0,16\n" /* %0 = H+L+C */
- : "+&d" (sum) : : "cc", "2", "3");
-#endif /* __s390x__ */
- return (__force __sum16) ~sum;
+ csum += (csum >> 16) + (csum << 16);
+ csum >>= 16;
+ return (__force __sum16) ~csum;
}
/*
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index 807997f7414b..4943654ed7fd 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -125,4 +125,32 @@ struct chsc_cpd_info {
#define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info)
#define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal)
+#ifdef __KERNEL__
+
+struct css_general_char {
+ u64 : 12;
+ u32 dynio : 1; /* bit 12 */
+ u32 : 28;
+ u32 aif : 1; /* bit 41 */
+ u32 : 3;
+ u32 mcss : 1; /* bit 45 */
+ u32 fcs : 1; /* bit 46 */
+ u32 : 1;
+ u32 ext_mb : 1; /* bit 48 */
+ u32 : 7;
+ u32 aif_tdd : 1; /* bit 56 */
+ u32 : 1;
+ u32 qebsm : 1; /* bit 58 */
+ u32 : 8;
+ u32 aif_osa : 1; /* bit 67 */
+ u32 : 14;
+ u32 cib : 1; /* bit 82 */
+ u32 : 5;
+ u32 fcx : 1; /* bit 88 */
+ u32 : 7;
+}__attribute__((packed));
+
+extern struct css_general_char css_general_characteristics;
+
+#endif /* __KERNEL__ */
#endif
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 619bf94b11f1..e85679af54dd 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -15,228 +15,7 @@
#define LPM_ANYPATH 0xff
#define __MAX_CSSID 0
-/**
- * struct cmd_scsw - command-mode subchannel status word
- * @key: subchannel key
- * @sctl: suspend control
- * @eswf: esw format
- * @cc: deferred condition code
- * @fmt: format
- * @pfch: prefetch
- * @isic: initial-status interruption control
- * @alcc: address-limit checking control
- * @ssi: suppress-suspended interruption
- * @zcc: zero condition code
- * @ectl: extended control
- * @pno: path not operational
- * @res: reserved
- * @fctl: function control
- * @actl: activity control
- * @stctl: status control
- * @cpa: channel program address
- * @dstat: device status
- * @cstat: subchannel status
- * @count: residual count
- */
-struct cmd_scsw {
- __u32 key : 4;
- __u32 sctl : 1;
- __u32 eswf : 1;
- __u32 cc : 2;
- __u32 fmt : 1;
- __u32 pfch : 1;
- __u32 isic : 1;
- __u32 alcc : 1;
- __u32 ssi : 1;
- __u32 zcc : 1;
- __u32 ectl : 1;
- __u32 pno : 1;
- __u32 res : 1;
- __u32 fctl : 3;
- __u32 actl : 7;
- __u32 stctl : 5;
- __u32 cpa;
- __u32 dstat : 8;
- __u32 cstat : 8;
- __u32 count : 16;
-} __attribute__ ((packed));
-
-/**
- * struct tm_scsw - transport-mode subchannel status word
- * @key: subchannel key
- * @eswf: esw format
- * @cc: deferred condition code
- * @fmt: format
- * @x: IRB-format control
- * @q: interrogate-complete
- * @ectl: extended control
- * @pno: path not operational
- * @fctl: function control
- * @actl: activity control
- * @stctl: status control
- * @tcw: TCW address
- * @dstat: device status
- * @cstat: subchannel status
- * @fcxs: FCX status
- * @schxs: subchannel-extended status
- */
-struct tm_scsw {
- u32 key:4;
- u32 :1;
- u32 eswf:1;
- u32 cc:2;
- u32 fmt:3;
- u32 x:1;
- u32 q:1;
- u32 :1;
- u32 ectl:1;
- u32 pno:1;
- u32 :1;
- u32 fctl:3;
- u32 actl:7;
- u32 stctl:5;
- u32 tcw;
- u32 dstat:8;
- u32 cstat:8;
- u32 fcxs:8;
- u32 schxs:8;
-} __attribute__ ((packed));
-
-/**
- * union scsw - subchannel status word
- * @cmd: command-mode SCSW
- * @tm: transport-mode SCSW
- */
-union scsw {
- struct cmd_scsw cmd;
- struct tm_scsw tm;
-} __attribute__ ((packed));
-
-int scsw_is_tm(union scsw *scsw);
-u32 scsw_key(union scsw *scsw);
-u32 scsw_eswf(union scsw *scsw);
-u32 scsw_cc(union scsw *scsw);
-u32 scsw_ectl(union scsw *scsw);
-u32 scsw_pno(union scsw *scsw);
-u32 scsw_fctl(union scsw *scsw);
-u32 scsw_actl(union scsw *scsw);
-u32 scsw_stctl(union scsw *scsw);
-u32 scsw_dstat(union scsw *scsw);
-u32 scsw_cstat(union scsw *scsw);
-int scsw_is_solicited(union scsw *scsw);
-int scsw_is_valid_key(union scsw *scsw);
-int scsw_is_valid_eswf(union scsw *scsw);
-int scsw_is_valid_cc(union scsw *scsw);
-int scsw_is_valid_ectl(union scsw *scsw);
-int scsw_is_valid_pno(union scsw *scsw);
-int scsw_is_valid_fctl(union scsw *scsw);
-int scsw_is_valid_actl(union scsw *scsw);
-int scsw_is_valid_stctl(union scsw *scsw);
-int scsw_is_valid_dstat(union scsw *scsw);
-int scsw_is_valid_cstat(union scsw *scsw);
-int scsw_cmd_is_valid_key(union scsw *scsw);
-int scsw_cmd_is_valid_sctl(union scsw *scsw);
-int scsw_cmd_is_valid_eswf(union scsw *scsw);
-int scsw_cmd_is_valid_cc(union scsw *scsw);
-int scsw_cmd_is_valid_fmt(union scsw *scsw);
-int scsw_cmd_is_valid_pfch(union scsw *scsw);
-int scsw_cmd_is_valid_isic(union scsw *scsw);
-int scsw_cmd_is_valid_alcc(union scsw *scsw);
-int scsw_cmd_is_valid_ssi(union scsw *scsw);
-int scsw_cmd_is_valid_zcc(union scsw *scsw);
-int scsw_cmd_is_valid_ectl(union scsw *scsw);
-int scsw_cmd_is_valid_pno(union scsw *scsw);
-int scsw_cmd_is_valid_fctl(union scsw *scsw);
-int scsw_cmd_is_valid_actl(union scsw *scsw);
-int scsw_cmd_is_valid_stctl(union scsw *scsw);
-int scsw_cmd_is_valid_dstat(union scsw *scsw);
-int scsw_cmd_is_valid_cstat(union scsw *scsw);
-int scsw_cmd_is_solicited(union scsw *scsw);
-int scsw_tm_is_valid_key(union scsw *scsw);
-int scsw_tm_is_valid_eswf(union scsw *scsw);
-int scsw_tm_is_valid_cc(union scsw *scsw);
-int scsw_tm_is_valid_fmt(union scsw *scsw);
-int scsw_tm_is_valid_x(union scsw *scsw);
-int scsw_tm_is_valid_q(union scsw *scsw);
-int scsw_tm_is_valid_ectl(union scsw *scsw);
-int scsw_tm_is_valid_pno(union scsw *scsw);
-int scsw_tm_is_valid_fctl(union scsw *scsw);
-int scsw_tm_is_valid_actl(union scsw *scsw);
-int scsw_tm_is_valid_stctl(union scsw *scsw);
-int scsw_tm_is_valid_dstat(union scsw *scsw);
-int scsw_tm_is_valid_cstat(union scsw *scsw);
-int scsw_tm_is_valid_fcxs(union scsw *scsw);
-int scsw_tm_is_valid_schxs(union scsw *scsw);
-int scsw_tm_is_solicited(union scsw *scsw);
-
-#define SCSW_FCTL_CLEAR_FUNC 0x1
-#define SCSW_FCTL_HALT_FUNC 0x2
-#define SCSW_FCTL_START_FUNC 0x4
-
-#define SCSW_ACTL_SUSPENDED 0x1
-#define SCSW_ACTL_DEVACT 0x2
-#define SCSW_ACTL_SCHACT 0x4
-#define SCSW_ACTL_CLEAR_PEND 0x8
-#define SCSW_ACTL_HALT_PEND 0x10
-#define SCSW_ACTL_START_PEND 0x20
-#define SCSW_ACTL_RESUME_PEND 0x40
-
-#define SCSW_STCTL_STATUS_PEND 0x1
-#define SCSW_STCTL_SEC_STATUS 0x2
-#define SCSW_STCTL_PRIM_STATUS 0x4
-#define SCSW_STCTL_INTER_STATUS 0x8
-#define SCSW_STCTL_ALERT_STATUS 0x10
-
-#define DEV_STAT_ATTENTION 0x80
-#define DEV_STAT_STAT_MOD 0x40
-#define DEV_STAT_CU_END 0x20
-#define DEV_STAT_BUSY 0x10
-#define DEV_STAT_CHN_END 0x08
-#define DEV_STAT_DEV_END 0x04
-#define DEV_STAT_UNIT_CHECK 0x02
-#define DEV_STAT_UNIT_EXCEP 0x01
-
-#define SCHN_STAT_PCI 0x80
-#define SCHN_STAT_INCORR_LEN 0x40
-#define SCHN_STAT_PROG_CHECK 0x20
-#define SCHN_STAT_PROT_CHECK 0x10
-#define SCHN_STAT_CHN_DATA_CHK 0x08
-#define SCHN_STAT_CHN_CTRL_CHK 0x04
-#define SCHN_STAT_INTF_CTRL_CHK 0x02
-#define SCHN_STAT_CHAIN_CHECK 0x01
-
-/*
- * architectured values for first sense byte
- */
-#define SNS0_CMD_REJECT 0x80
-#define SNS_CMD_REJECT SNS0_CMD_REJEC
-#define SNS0_INTERVENTION_REQ 0x40
-#define SNS0_BUS_OUT_CHECK 0x20
-#define SNS0_EQUIPMENT_CHECK 0x10
-#define SNS0_DATA_CHECK 0x08
-#define SNS0_OVERRUN 0x04
-#define SNS0_INCOMPL_DOMAIN 0x01
-
-/*
- * architectured values for second sense byte
- */
-#define SNS1_PERM_ERR 0x80
-#define SNS1_INV_TRACK_FORMAT 0x40
-#define SNS1_EOC 0x20
-#define SNS1_MESSAGE_TO_OPER 0x10
-#define SNS1_NO_REC_FOUND 0x08
-#define SNS1_FILE_PROTECTED 0x04
-#define SNS1_WRITE_INHIBITED 0x02
-#define SNS1_INPRECISE_END 0x01
-
-/*
- * architectured values for third sense byte
- */
-#define SNS2_REQ_INH_WRITE 0x80
-#define SNS2_CORRECTABLE 0x40
-#define SNS2_FIRST_LOG_ERR 0x20
-#define SNS2_ENV_DATA_PRESENT 0x10
-#define SNS2_INPRECISE_END 0x04
+#include <asm/scsw.h>
/**
* struct ccw1 - channel command word
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h
new file mode 100644
index 000000000000..471234b90574
--- /dev/null
+++ b/arch/s390/include/asm/cpu.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright IBM Corp. 2000,2009
+ * Author(s): Hartmut Penner <hp@de.ibm.com>,
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ * Christian Ehrhardt <ehrhardt@de.ibm.com>,
+ */
+
+#ifndef _ASM_S390_CPU_H
+#define _ASM_S390_CPU_H
+
+#define MAX_CPU_ADDRESS 255
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+struct cpuid
+{
+ unsigned int version : 8;
+ unsigned int ident : 24;
+ unsigned int machine : 16;
+ unsigned int unused : 16;
+} __packed;
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_S390_CPU_H */
diff --git a/arch/s390/include/asm/cpuid.h b/arch/s390/include/asm/cpuid.h
deleted file mode 100644
index 07836a2e5222..000000000000
--- a/arch/s390/include/asm/cpuid.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright IBM Corp. 2000,2009
- * Author(s): Hartmut Penner <hp@de.ibm.com>,
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
- * Christian Ehrhardt <ehrhardt@de.ibm.com>
- */
-
-#ifndef _ASM_S390_CPUID_H_
-#define _ASM_S390_CPUID_H_
-
-/*
- * CPU type and hardware bug flags. Kept separately for each CPU.
- * Members of this structure are referenced in head.S, so think twice
- * before touching them. [mj]
- */
-
-typedef struct
-{
- unsigned int version : 8;
- unsigned int ident : 24;
- unsigned int machine : 16;
- unsigned int unused : 16;
-} __attribute__ ((packed)) cpuid_t;
-
-#endif /* _ASM_S390_CPUID_H_ */
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 31ed5686a968..18124b75a7ab 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -167,6 +167,10 @@ debug_text_event(debug_info_t* id, int level, const char* txt)
return debug_event_common(id,level,txt,strlen(txt));
}
+/*
+ * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
+ * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
+ */
extern debug_entry_t *
debug_sprintf_event(debug_info_t* id,int level,char *string,...)
__attribute__ ((format(printf, 3, 4)));
@@ -206,7 +210,10 @@ debug_text_exception(debug_info_t* id, int level, const char* txt)
return debug_exception_common(id,level,txt,strlen(txt));
}
-
+/*
+ * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
+ * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
+ */
extern debug_entry_t *
debug_sprintf_exception(debug_info_t* id,int level,char *string,...)
__attribute__ ((format(printf, 3, 4)));
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 89ec7056da28..498bc3892385 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -18,13 +18,6 @@
#include <linux/interrupt.h>
#include <asm/lowcore.h>
-/* irq_cpustat_t is unused currently, but could be converted
- * into a percpu variable instead of storing softirq_pending
- * on the lowcore */
-typedef struct {
- unsigned int __softirq_pending;
-} irq_cpustat_t;
-
#define local_softirq_pending() (S390_lowcore.softirq_pending)
#define __ARCH_IRQ_STAT
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 1171e6d144a3..5e95d95450b3 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -57,6 +57,8 @@ struct ipl_block_fcp {
} __attribute__((packed));
#define DIAG308_VMPARM_SIZE 64
+#define DIAG308_SCPDATA_SIZE (PAGE_SIZE - (sizeof(struct ipl_list_hdr) + \
+ offsetof(struct ipl_block_fcp, scp_data)))
struct ipl_block_ccw {
u8 load_parm[8];
@@ -91,7 +93,8 @@ extern void do_halt(void);
extern void do_poff(void);
extern void ipl_save_parameters(void);
extern void ipl_update_parameters(void);
-extern void get_ipl_vmparm(char *);
+extern size_t append_ipl_vmparm(char *, size_t);
+extern size_t append_ipl_scpdata(char *, size_t);
enum {
IPL_DEVNO_VALID = 1,
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 1cd02f6073a0..698988f69403 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -17,7 +17,7 @@
#include <linux/interrupt.h>
#include <linux/kvm_host.h>
#include <asm/debug.h>
-#include <asm/cpuid.h>
+#include <asm/cpu.h>
#define KVM_MAX_VCPUS 64
#define KVM_MEMORY_SLOTS 32
@@ -217,8 +217,8 @@ struct kvm_vcpu_arch {
struct hrtimer ckc_timer;
struct tasklet_struct tasklet;
union {
- cpuid_t cpu_id;
- u64 stidp_data;
+ struct cpuid cpu_id;
+ u64 stidp_data;
};
};
diff --git a/arch/s390/include/asm/kvm_virtio.h b/arch/s390/include/asm/kvm_virtio.h
index 0503936f101f..acdfdff26611 100644
--- a/arch/s390/include/asm/kvm_virtio.h
+++ b/arch/s390/include/asm/kvm_virtio.h
@@ -54,14 +54,4 @@ struct kvm_vqconfig {
* This is pagesize for historical reasons. */
#define KVM_S390_VIRTIO_RING_ALIGN 4096
-#ifdef __KERNEL__
-/* early virtio console setup */
-#ifdef CONFIG_S390_GUEST
-extern void s390_virtio_console_init(void);
-#else
-static inline void s390_virtio_console_init(void)
-{
-}
-#endif /* CONFIG_VIRTIO_CONSOLE */
-#endif /* __KERNEL__ */
#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 5046ad6b7a63..6bc9426a6fbf 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -132,7 +132,7 @@
#ifndef __ASSEMBLY__
-#include <asm/cpuid.h>
+#include <asm/cpu.h>
#include <asm/ptrace.h>
#include <linux/types.h>
@@ -275,7 +275,7 @@ struct _lowcore
__u32 user_exec_asce; /* 0x02ac */
/* SMP info area */
- cpuid_t cpu_id; /* 0x02b0 */
+ struct cpuid cpu_id; /* 0x02b0 */
__u32 cpu_nr; /* 0x02b8 */
__u32 softirq_pending; /* 0x02bc */
__u32 percpu_offset; /* 0x02c0 */
@@ -380,7 +380,7 @@ struct _lowcore
__u64 user_exec_asce; /* 0x0318 */
/* SMP info area */
- cpuid_t cpu_id; /* 0x0320 */
+ struct cpuid cpu_id; /* 0x0320 */
__u32 cpu_nr; /* 0x0328 */
__u32 softirq_pending; /* 0x032c */
__u64 percpu_offset; /* 0x0330 */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 3b59216e6284..03be99919d62 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -2,6 +2,7 @@
#define __MMU_H
typedef struct {
+ spinlock_t list_lock;
struct list_head crst_list;
struct list_head pgtable_list;
unsigned long asce_bits;
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 3e3594d01f83..5e9daf5d7f22 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -125,8 +125,6 @@ page_get_storage_key(unsigned long addr)
return skey;
}
-#ifdef CONFIG_PAGE_STATES
-
struct page;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
@@ -134,8 +132,6 @@ void arch_alloc_page(struct page *page, int order);
#define HAVE_ARCH_FREE_PAGE
#define HAVE_ARCH_ALLOC_PAGE
-#endif
-
#endif /* !__ASSEMBLY__ */
#define __PAGE_OFFSET 0x0UL
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index b2658b9220fe..ddad5903341c 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -140,6 +140,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
+ spin_lock_init(&mm->context.list_lock);
INIT_LIST_HEAD(&mm->context.crst_list);
INIT_LIST_HEAD(&mm->context.pgtable_list);
return (pgd_t *) crst_table_alloc(mm, s390_noexec);
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index c139fa7b8e89..cf8eed3fa779 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -14,7 +14,7 @@
#define __ASM_S390_PROCESSOR_H
#include <linux/linkage.h>
-#include <asm/cpuid.h>
+#include <asm/cpu.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/setup.h>
@@ -26,7 +26,7 @@
*/
#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
-static inline void get_cpu_id(cpuid_t *ptr)
+static inline void get_cpu_id(struct cpuid *ptr)
{
asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr));
}
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h
index 29ec8e28c8df..35d786fe93ae 100644
--- a/arch/s390/include/asm/scatterlist.h
+++ b/arch/s390/include/asm/scatterlist.h
@@ -1,19 +1 @@
-#ifndef _ASMS390_SCATTERLIST_H
-#define _ASMS390_SCATTERLIST_H
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
- unsigned int length;
-};
-
-#ifdef __s390x__
-#define ISA_DMA_THRESHOLD (0xffffffffffffffffUL)
-#else
-#define ISA_DMA_THRESHOLD (0xffffffffUL)
-#endif
-
-#endif /* _ASMS390X_SCATTERLIST_H */
+#include <asm-generic/scatterlist.h>
diff --git a/drivers/s390/cio/scsw.c b/arch/s390/include/asm/scsw.h
index f8da25ab576d..de389cb54d28 100644
--- a/drivers/s390/cio/scsw.c
+++ b/arch/s390/include/asm/scsw.h
@@ -1,15 +1,182 @@
/*
* Helper functions for scsw access.
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008,2009
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
+#ifndef _ASM_S390_SCSW_H_
+#define _ASM_S390_SCSW_H_
+
#include <linux/types.h>
-#include <linux/module.h>
+#include <asm/chsc.h>
#include <asm/cio.h>
-#include "css.h"
-#include "chsc.h"
+
+/**
+ * struct cmd_scsw - command-mode subchannel status word
+ * @key: subchannel key
+ * @sctl: suspend control
+ * @eswf: esw format
+ * @cc: deferred condition code
+ * @fmt: format
+ * @pfch: prefetch
+ * @isic: initial-status interruption control
+ * @alcc: address-limit checking control
+ * @ssi: suppress-suspended interruption
+ * @zcc: zero condition code
+ * @ectl: extended control
+ * @pno: path not operational
+ * @res: reserved
+ * @fctl: function control
+ * @actl: activity control
+ * @stctl: status control
+ * @cpa: channel program address
+ * @dstat: device status
+ * @cstat: subchannel status
+ * @count: residual count
+ */
+struct cmd_scsw {
+ __u32 key : 4;
+ __u32 sctl : 1;
+ __u32 eswf : 1;
+ __u32 cc : 2;
+ __u32 fmt : 1;
+ __u32 pfch : 1;
+ __u32 isic : 1;
+ __u32 alcc : 1;
+ __u32 ssi : 1;
+ __u32 zcc : 1;
+ __u32 ectl : 1;
+ __u32 pno : 1;
+ __u32 res : 1;
+ __u32 fctl : 3;
+ __u32 actl : 7;
+ __u32 stctl : 5;
+ __u32 cpa;
+ __u32 dstat : 8;
+ __u32 cstat : 8;
+ __u32 count : 16;
+} __attribute__ ((packed));
+
+/**
+ * struct tm_scsw - transport-mode subchannel status word
+ * @key: subchannel key
+ * @eswf: esw format
+ * @cc: deferred condition code
+ * @fmt: format
+ * @x: IRB-format control
+ * @q: interrogate-complete
+ * @ectl: extended control
+ * @pno: path not operational
+ * @fctl: function control
+ * @actl: activity control
+ * @stctl: status control
+ * @tcw: TCW address
+ * @dstat: device status
+ * @cstat: subchannel status
+ * @fcxs: FCX status
+ * @schxs: subchannel-extended status
+ */
+struct tm_scsw {
+ u32 key:4;
+ u32 :1;
+ u32 eswf:1;
+ u32 cc:2;
+ u32 fmt:3;
+ u32 x:1;
+ u32 q:1;
+ u32 :1;
+ u32 ectl:1;
+ u32 pno:1;
+ u32 :1;
+ u32 fctl:3;
+ u32 actl:7;
+ u32 stctl:5;
+ u32 tcw;
+ u32 dstat:8;
+ u32 cstat:8;
+ u32 fcxs:8;
+ u32 schxs:8;
+} __attribute__ ((packed));
+
+/**
+ * union scsw - subchannel status word
+ * @cmd: command-mode SCSW
+ * @tm: transport-mode SCSW
+ */
+union scsw {
+ struct cmd_scsw cmd;
+ struct tm_scsw tm;
+} __attribute__ ((packed));
+
+#define SCSW_FCTL_CLEAR_FUNC 0x1
+#define SCSW_FCTL_HALT_FUNC 0x2
+#define SCSW_FCTL_START_FUNC 0x4
+
+#define SCSW_ACTL_SUSPENDED 0x1
+#define SCSW_ACTL_DEVACT 0x2
+#define SCSW_ACTL_SCHACT 0x4
+#define SCSW_ACTL_CLEAR_PEND 0x8
+#define SCSW_ACTL_HALT_PEND 0x10
+#define SCSW_ACTL_START_PEND 0x20
+#define SCSW_ACTL_RESUME_PEND 0x40
+
+#define SCSW_STCTL_STATUS_PEND 0x1
+#define SCSW_STCTL_SEC_STATUS 0x2
+#define SCSW_STCTL_PRIM_STATUS 0x4
+#define SCSW_STCTL_INTER_STATUS 0x8
+#define SCSW_STCTL_ALERT_STATUS 0x10
+
+#define DEV_STAT_ATTENTION 0x80
+#define DEV_STAT_STAT_MOD 0x40
+#define DEV_STAT_CU_END 0x20
+#define DEV_STAT_BUSY 0x10
+#define DEV_STAT_CHN_END 0x08
+#define DEV_STAT_DEV_END 0x04
+#define DEV_STAT_UNIT_CHECK 0x02
+#define DEV_STAT_UNIT_EXCEP 0x01
+
+#define SCHN_STAT_PCI 0x80
+#define SCHN_STAT_INCORR_LEN 0x40
+#define SCHN_STAT_PROG_CHECK 0x20
+#define SCHN_STAT_PROT_CHECK 0x10
+#define SCHN_STAT_CHN_DATA_CHK 0x08
+#define SCHN_STAT_CHN_CTRL_CHK 0x04
+#define SCHN_STAT_INTF_CTRL_CHK 0x02
+#define SCHN_STAT_CHAIN_CHECK 0x01
+
+/*
+ * architectured values for first sense byte
+ */
+#define SNS0_CMD_REJECT 0x80
+#define SNS_CMD_REJECT SNS0_CMD_REJEC
+#define SNS0_INTERVENTION_REQ 0x40
+#define SNS0_BUS_OUT_CHECK 0x20
+#define SNS0_EQUIPMENT_CHECK 0x10
+#define SNS0_DATA_CHECK 0x08
+#define SNS0_OVERRUN 0x04
+#define SNS0_INCOMPL_DOMAIN 0x01
+
+/*
+ * architectured values for second sense byte
+ */
+#define SNS1_PERM_ERR 0x80
+#define SNS1_INV_TRACK_FORMAT 0x40
+#define SNS1_EOC 0x20
+#define SNS1_MESSAGE_TO_OPER 0x10
+#define SNS1_NO_REC_FOUND 0x08
+#define SNS1_FILE_PROTECTED 0x04
+#define SNS1_WRITE_INHIBITED 0x02
+#define SNS1_INPRECISE_END 0x01
+
+/*
+ * architectured values for third sense byte
+ */
+#define SNS2_REQ_INH_WRITE 0x80
+#define SNS2_CORRECTABLE 0x40
+#define SNS2_FIRST_LOG_ERR 0x20
+#define SNS2_ENV_DATA_PRESENT 0x10
+#define SNS2_INPRECISE_END 0x04
/**
* scsw_is_tm - check for transport mode scsw
@@ -18,11 +185,10 @@
* Return non-zero if the specified scsw is a transport mode scsw, zero
* otherwise.
*/
-int scsw_is_tm(union scsw *scsw)
+static inline int scsw_is_tm(union scsw *scsw)
{
return css_general_characteristics.fcx && (scsw->tm.x == 1);
}
-EXPORT_SYMBOL(scsw_is_tm);
/**
* scsw_key - return scsw key field
@@ -31,14 +197,13 @@ EXPORT_SYMBOL(scsw_is_tm);
* Return the value of the key field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_key(union scsw *scsw)
+static inline u32 scsw_key(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.key;
else
return scsw->cmd.key;
}
-EXPORT_SYMBOL(scsw_key);
/**
* scsw_eswf - return scsw eswf field
@@ -47,14 +212,13 @@ EXPORT_SYMBOL(scsw_key);
* Return the value of the eswf field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_eswf(union scsw *scsw)
+static inline u32 scsw_eswf(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.eswf;
else
return scsw->cmd.eswf;
}
-EXPORT_SYMBOL(scsw_eswf);
/**
* scsw_cc - return scsw cc field
@@ -63,14 +227,13 @@ EXPORT_SYMBOL(scsw_eswf);
* Return the value of the cc field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_cc(union scsw *scsw)
+static inline u32 scsw_cc(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.cc;
else
return scsw->cmd.cc;
}
-EXPORT_SYMBOL(scsw_cc);
/**
* scsw_ectl - return scsw ectl field
@@ -79,14 +242,13 @@ EXPORT_SYMBOL(scsw_cc);
* Return the value of the ectl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_ectl(union scsw *scsw)
+static inline u32 scsw_ectl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.ectl;
else
return scsw->cmd.ectl;
}
-EXPORT_SYMBOL(scsw_ectl);
/**
* scsw_pno - return scsw pno field
@@ -95,14 +257,13 @@ EXPORT_SYMBOL(scsw_ectl);
* Return the value of the pno field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_pno(union scsw *scsw)
+static inline u32 scsw_pno(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.pno;
else
return scsw->cmd.pno;
}
-EXPORT_SYMBOL(scsw_pno);
/**
* scsw_fctl - return scsw fctl field
@@ -111,14 +272,13 @@ EXPORT_SYMBOL(scsw_pno);
* Return the value of the fctl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_fctl(union scsw *scsw)
+static inline u32 scsw_fctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.fctl;
else
return scsw->cmd.fctl;
}
-EXPORT_SYMBOL(scsw_fctl);
/**
* scsw_actl - return scsw actl field
@@ -127,14 +287,13 @@ EXPORT_SYMBOL(scsw_fctl);
* Return the value of the actl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_actl(union scsw *scsw)
+static inline u32 scsw_actl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.actl;
else
return scsw->cmd.actl;
}
-EXPORT_SYMBOL(scsw_actl);
/**
* scsw_stctl - return scsw stctl field
@@ -143,14 +302,13 @@ EXPORT_SYMBOL(scsw_actl);
* Return the value of the stctl field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_stctl(union scsw *scsw)
+static inline u32 scsw_stctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.stctl;
else
return scsw->cmd.stctl;
}
-EXPORT_SYMBOL(scsw_stctl);
/**
* scsw_dstat - return scsw dstat field
@@ -159,14 +317,13 @@ EXPORT_SYMBOL(scsw_stctl);
* Return the value of the dstat field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_dstat(union scsw *scsw)
+static inline u32 scsw_dstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.dstat;
else
return scsw->cmd.dstat;
}
-EXPORT_SYMBOL(scsw_dstat);
/**
* scsw_cstat - return scsw cstat field
@@ -175,14 +332,13 @@ EXPORT_SYMBOL(scsw_dstat);
* Return the value of the cstat field of the specified scsw, regardless of
* whether it is a transport mode or command mode scsw.
*/
-u32 scsw_cstat(union scsw *scsw)
+static inline u32 scsw_cstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw->tm.cstat;
else
return scsw->cmd.cstat;
}
-EXPORT_SYMBOL(scsw_cstat);
/**
* scsw_cmd_is_valid_key - check key field validity
@@ -191,11 +347,10 @@ EXPORT_SYMBOL(scsw_cstat);
* Return non-zero if the key field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_key(union scsw *scsw)
+static inline int scsw_cmd_is_valid_key(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_key);
/**
* scsw_cmd_is_valid_sctl - check fctl field validity
@@ -204,11 +359,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_key);
* Return non-zero if the fctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_sctl(union scsw *scsw)
+static inline int scsw_cmd_is_valid_sctl(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
/**
* scsw_cmd_is_valid_eswf - check eswf field validity
@@ -217,11 +371,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
* Return non-zero if the eswf field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_eswf(union scsw *scsw)
+static inline int scsw_cmd_is_valid_eswf(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
/**
* scsw_cmd_is_valid_cc - check cc field validity
@@ -230,12 +383,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
* Return non-zero if the cc field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_cc(union scsw *scsw)
+static inline int scsw_cmd_is_valid_cc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
/**
* scsw_cmd_is_valid_fmt - check fmt field validity
@@ -244,11 +396,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
* Return non-zero if the fmt field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_fmt(union scsw *scsw)
+static inline int scsw_cmd_is_valid_fmt(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
/**
* scsw_cmd_is_valid_pfch - check pfch field validity
@@ -257,11 +408,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
* Return non-zero if the pfch field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_pfch(union scsw *scsw)
+static inline int scsw_cmd_is_valid_pfch(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
/**
* scsw_cmd_is_valid_isic - check isic field validity
@@ -270,11 +420,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
* Return non-zero if the isic field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_isic(union scsw *scsw)
+static inline int scsw_cmd_is_valid_isic(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
/**
* scsw_cmd_is_valid_alcc - check alcc field validity
@@ -283,11 +432,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
* Return non-zero if the alcc field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_alcc(union scsw *scsw)
+static inline int scsw_cmd_is_valid_alcc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
/**
* scsw_cmd_is_valid_ssi - check ssi field validity
@@ -296,11 +444,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
* Return non-zero if the ssi field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_ssi(union scsw *scsw)
+static inline int scsw_cmd_is_valid_ssi(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
/**
* scsw_cmd_is_valid_zcc - check zcc field validity
@@ -309,12 +456,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
* Return non-zero if the zcc field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_zcc(union scsw *scsw)
+static inline int scsw_cmd_is_valid_zcc(union scsw *scsw)
{
return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
/**
* scsw_cmd_is_valid_ectl - check ectl field validity
@@ -323,13 +469,12 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
* Return non-zero if the ectl field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_ectl(union scsw *scsw)
+static inline int scsw_cmd_is_valid_ectl(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
/**
* scsw_cmd_is_valid_pno - check pno field validity
@@ -338,7 +483,7 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
* Return non-zero if the pno field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_pno(union scsw *scsw)
+static inline int scsw_cmd_is_valid_pno(union scsw *scsw)
{
return (scsw->cmd.fctl != 0) &&
(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
@@ -346,7 +491,6 @@ int scsw_cmd_is_valid_pno(union scsw *scsw)
((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
/**
* scsw_cmd_is_valid_fctl - check fctl field validity
@@ -355,12 +499,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
* Return non-zero if the fctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_fctl(union scsw *scsw)
+static inline int scsw_cmd_is_valid_fctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
/**
* scsw_cmd_is_valid_actl - check actl field validity
@@ -369,12 +512,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
* Return non-zero if the actl field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_actl(union scsw *scsw)
+static inline int scsw_cmd_is_valid_actl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
/**
* scsw_cmd_is_valid_stctl - check stctl field validity
@@ -383,12 +525,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
* Return non-zero if the stctl field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_stctl(union scsw *scsw)
+static inline int scsw_cmd_is_valid_stctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
/**
* scsw_cmd_is_valid_dstat - check dstat field validity
@@ -397,12 +538,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
* Return non-zero if the dstat field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_dstat(union scsw *scsw)
+static inline int scsw_cmd_is_valid_dstat(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->cmd.cc != 3);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
/**
* scsw_cmd_is_valid_cstat - check cstat field validity
@@ -411,12 +551,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
* Return non-zero if the cstat field of the specified command mode scsw is
* valid, zero otherwise.
*/
-int scsw_cmd_is_valid_cstat(union scsw *scsw)
+static inline int scsw_cmd_is_valid_cstat(union scsw *scsw)
{
return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->cmd.cc != 3);
}
-EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
/**
* scsw_tm_is_valid_key - check key field validity
@@ -425,11 +564,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
* Return non-zero if the key field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_key(union scsw *scsw)
+static inline int scsw_tm_is_valid_key(union scsw *scsw)
{
return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
}
-EXPORT_SYMBOL(scsw_tm_is_valid_key);
/**
* scsw_tm_is_valid_eswf - check eswf field validity
@@ -438,11 +576,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_key);
* Return non-zero if the eswf field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_eswf(union scsw *scsw)
+static inline int scsw_tm_is_valid_eswf(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
}
-EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
/**
* scsw_tm_is_valid_cc - check cc field validity
@@ -451,12 +588,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
* Return non-zero if the cc field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_cc(union scsw *scsw)
+static inline int scsw_tm_is_valid_cc(union scsw *scsw)
{
return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
}
-EXPORT_SYMBOL(scsw_tm_is_valid_cc);
/**
* scsw_tm_is_valid_fmt - check fmt field validity
@@ -465,11 +601,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_cc);
* Return non-zero if the fmt field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_fmt(union scsw *scsw)
+static inline int scsw_tm_is_valid_fmt(union scsw *scsw)
{
return 1;
}
-EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
/**
* scsw_tm_is_valid_x - check x field validity
@@ -478,11 +613,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
* Return non-zero if the x field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_x(union scsw *scsw)
+static inline int scsw_tm_is_valid_x(union scsw *scsw)
{
return 1;
}
-EXPORT_SYMBOL(scsw_tm_is_valid_x);
/**
* scsw_tm_is_valid_q - check q field validity
@@ -491,11 +625,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_x);
* Return non-zero if the q field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_q(union scsw *scsw)
+static inline int scsw_tm_is_valid_q(union scsw *scsw)
{
return 1;
}
-EXPORT_SYMBOL(scsw_tm_is_valid_q);
/**
* scsw_tm_is_valid_ectl - check ectl field validity
@@ -504,13 +637,12 @@ EXPORT_SYMBOL(scsw_tm_is_valid_q);
* Return non-zero if the ectl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_ectl(union scsw *scsw)
+static inline int scsw_tm_is_valid_ectl(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
}
-EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
/**
* scsw_tm_is_valid_pno - check pno field validity
@@ -519,7 +651,7 @@ EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
* Return non-zero if the pno field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_pno(union scsw *scsw)
+static inline int scsw_tm_is_valid_pno(union scsw *scsw)
{
return (scsw->tm.fctl != 0) &&
(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
@@ -527,7 +659,6 @@ int scsw_tm_is_valid_pno(union scsw *scsw)
((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
(scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
}
-EXPORT_SYMBOL(scsw_tm_is_valid_pno);
/**
* scsw_tm_is_valid_fctl - check fctl field validity
@@ -536,12 +667,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_pno);
* Return non-zero if the fctl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_fctl(union scsw *scsw)
+static inline int scsw_tm_is_valid_fctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
-EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
/**
* scsw_tm_is_valid_actl - check actl field validity
@@ -550,12 +680,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
* Return non-zero if the actl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_actl(union scsw *scsw)
+static inline int scsw_tm_is_valid_actl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
-EXPORT_SYMBOL(scsw_tm_is_valid_actl);
/**
* scsw_tm_is_valid_stctl - check stctl field validity
@@ -564,12 +693,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_actl);
* Return non-zero if the stctl field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_stctl(union scsw *scsw)
+static inline int scsw_tm_is_valid_stctl(union scsw *scsw)
{
/* Only valid if pmcw.dnv == 1*/
return 1;
}
-EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
/**
* scsw_tm_is_valid_dstat - check dstat field validity
@@ -578,12 +706,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
* Return non-zero if the dstat field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_dstat(union scsw *scsw)
+static inline int scsw_tm_is_valid_dstat(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->tm.cc != 3);
}
-EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
/**
* scsw_tm_is_valid_cstat - check cstat field validity
@@ -592,12 +719,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
* Return non-zero if the cstat field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_cstat(union scsw *scsw)
+static inline int scsw_tm_is_valid_cstat(union scsw *scsw)
{
return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
(scsw->tm.cc != 3);
}
-EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
/**
* scsw_tm_is_valid_fcxs - check fcxs field validity
@@ -606,11 +732,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
* Return non-zero if the fcxs field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_fcxs(union scsw *scsw)
+static inline int scsw_tm_is_valid_fcxs(union scsw *scsw)
{
return 1;
}
-EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
/**
* scsw_tm_is_valid_schxs - check schxs field validity
@@ -619,14 +744,13 @@ EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
* Return non-zero if the schxs field of the specified transport mode scsw is
* valid, zero otherwise.
*/
-int scsw_tm_is_valid_schxs(union scsw *scsw)
+static inline int scsw_tm_is_valid_schxs(union scsw *scsw)
{
return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_PROT_CHECK |
SCHN_STAT_CHN_DATA_CHK));
}
-EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
/**
* scsw_is_valid_actl - check actl field validity
@@ -636,14 +760,13 @@ EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_actl(union scsw *scsw)
+static inline int scsw_is_valid_actl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_actl(scsw);
else
return scsw_cmd_is_valid_actl(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_actl);
/**
* scsw_is_valid_cc - check cc field validity
@@ -653,14 +776,13 @@ EXPORT_SYMBOL(scsw_is_valid_actl);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_cc(union scsw *scsw)
+static inline int scsw_is_valid_cc(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_cc(scsw);
else
return scsw_cmd_is_valid_cc(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_cc);
/**
* scsw_is_valid_cstat - check cstat field validity
@@ -670,14 +792,13 @@ EXPORT_SYMBOL(scsw_is_valid_cc);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_cstat(union scsw *scsw)
+static inline int scsw_is_valid_cstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_cstat(scsw);
else
return scsw_cmd_is_valid_cstat(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_cstat);
/**
* scsw_is_valid_dstat - check dstat field validity
@@ -687,14 +808,13 @@ EXPORT_SYMBOL(scsw_is_valid_cstat);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_dstat(union scsw *scsw)
+static inline int scsw_is_valid_dstat(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_dstat(scsw);
else
return scsw_cmd_is_valid_dstat(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_dstat);
/**
* scsw_is_valid_ectl - check ectl field validity
@@ -704,14 +824,13 @@ EXPORT_SYMBOL(scsw_is_valid_dstat);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_ectl(union scsw *scsw)
+static inline int scsw_is_valid_ectl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_ectl(scsw);
else
return scsw_cmd_is_valid_ectl(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_ectl);
/**
* scsw_is_valid_eswf - check eswf field validity
@@ -721,14 +840,13 @@ EXPORT_SYMBOL(scsw_is_valid_ectl);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_eswf(union scsw *scsw)
+static inline int scsw_is_valid_eswf(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_eswf(scsw);
else
return scsw_cmd_is_valid_eswf(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_eswf);
/**
* scsw_is_valid_fctl - check fctl field validity
@@ -738,14 +856,13 @@ EXPORT_SYMBOL(scsw_is_valid_eswf);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_fctl(union scsw *scsw)
+static inline int scsw_is_valid_fctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_fctl(scsw);
else
return scsw_cmd_is_valid_fctl(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_fctl);
/**
* scsw_is_valid_key - check key field validity
@@ -755,14 +872,13 @@ EXPORT_SYMBOL(scsw_is_valid_fctl);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_key(union scsw *scsw)
+static inline int scsw_is_valid_key(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_key(scsw);
else
return scsw_cmd_is_valid_key(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_key);
/**
* scsw_is_valid_pno - check pno field validity
@@ -772,14 +888,13 @@ EXPORT_SYMBOL(scsw_is_valid_key);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_pno(union scsw *scsw)
+static inline int scsw_is_valid_pno(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_pno(scsw);
else
return scsw_cmd_is_valid_pno(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_pno);
/**
* scsw_is_valid_stctl - check stctl field validity
@@ -789,14 +904,13 @@ EXPORT_SYMBOL(scsw_is_valid_pno);
* regardless of whether it is a transport mode or command mode scsw.
* Return zero if the field does not contain a valid value.
*/
-int scsw_is_valid_stctl(union scsw *scsw)
+static inline int scsw_is_valid_stctl(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_valid_stctl(scsw);
else
return scsw_cmd_is_valid_stctl(scsw);
}
-EXPORT_SYMBOL(scsw_is_valid_stctl);
/**
* scsw_cmd_is_solicited - check for solicited scsw
@@ -805,12 +919,11 @@ EXPORT_SYMBOL(scsw_is_valid_stctl);
* Return non-zero if the command mode scsw indicates that the associated
* status condition is solicited, zero if it is unsolicited.
*/
-int scsw_cmd_is_solicited(union scsw *scsw)
+static inline int scsw_cmd_is_solicited(union scsw *scsw)
{
return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
}
-EXPORT_SYMBOL(scsw_cmd_is_solicited);
/**
* scsw_tm_is_solicited - check for solicited scsw
@@ -819,12 +932,11 @@ EXPORT_SYMBOL(scsw_cmd_is_solicited);
* Return non-zero if the transport mode scsw indicates that the associated
* status condition is solicited, zero if it is unsolicited.
*/
-int scsw_tm_is_solicited(union scsw *scsw)
+static inline int scsw_tm_is_solicited(union scsw *scsw)
{
return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
}
-EXPORT_SYMBOL(scsw_tm_is_solicited);
/**
* scsw_is_solicited - check for solicited scsw
@@ -833,11 +945,12 @@ EXPORT_SYMBOL(scsw_tm_is_solicited);
* Return non-zero if the transport or command mode scsw indicates that the
* associated status condition is solicited, zero if it is unsolicited.
*/
-int scsw_is_solicited(union scsw *scsw)
+static inline int scsw_is_solicited(union scsw *scsw)
{
if (scsw_is_tm(scsw))
return scsw_tm_is_solicited(scsw);
else
return scsw_cmd_is_solicited(scsw);
}
-EXPORT_SYMBOL(scsw_is_solicited);
+
+#endif /* _ASM_S390_SCSW_H_ */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 38b0fc221ed7..e37478e87286 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -8,7 +8,7 @@
#ifndef _ASM_S390_SETUP_H
#define _ASM_S390_SETUP_H
-#define COMMAND_LINE_SIZE 1024
+#define COMMAND_LINE_SIZE 4096
#define ARCH_COMMAND_LINE_SIZE 896
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 72137bc907ac..c991fe6473c9 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -51,32 +51,7 @@ extern void machine_power_off_smp(void);
#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
-
-/*
- * returns 1 if cpu is in stopped/check stopped state or not operational
- * returns 0 otherwise
- */
-static inline int
-smp_cpu_not_running(int cpu)
-{
- __u32 status;
-
- switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
- case sigp_order_code_accepted:
- case sigp_status_stored:
- /* Check for stopped and check stop state */
- if (status & 0x50)
- return 1;
- break;
- case sigp_not_operational:
- return 1;
- default:
- break;
- }
- return 0;
-}
-
-#define cpu_logical_map(cpu) (cpu)
+#define cpu_logical_map(cpu) (cpu)
extern int __cpu_disable (void);
extern void __cpu_die (unsigned int cpu);
@@ -91,11 +66,6 @@ extern void arch_send_call_function_ipi(cpumask_t mask);
#endif
-#ifndef CONFIG_SMP
-#define hard_smp_processor_id() 0
-#define smp_cpu_not_running(cpu) 1
-#endif
-
#ifdef CONFIG_HOTPLUG_CPU
extern int smp_rescan_cpus(void);
#else
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index c9af0d19c7ab..41ce6861174e 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -191,4 +191,33 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
+#define __always_inline__spin_lock
+#define __always_inline__read_lock
+#define __always_inline__write_lock
+#define __always_inline__spin_lock_bh
+#define __always_inline__read_lock_bh
+#define __always_inline__write_lock_bh
+#define __always_inline__spin_lock_irq
+#define __always_inline__read_lock_irq
+#define __always_inline__write_lock_irq
+#define __always_inline__spin_lock_irqsave
+#define __always_inline__read_lock_irqsave
+#define __always_inline__write_lock_irqsave
+#define __always_inline__spin_trylock
+#define __always_inline__read_trylock
+#define __always_inline__write_trylock
+#define __always_inline__spin_trylock_bh
+#define __always_inline__spin_unlock
+#define __always_inline__read_unlock
+#define __always_inline__write_unlock
+#define __always_inline__spin_unlock_bh
+#define __always_inline__read_unlock_bh
+#define __always_inline__write_unlock_bh
+#define __always_inline__spin_unlock_irq
+#define __always_inline__read_unlock_irq
+#define __always_inline__write_unlock_irq
+#define __always_inline__spin_unlock_irqrestore
+#define __always_inline__read_unlock_irqrestore
+#define __always_inline__write_unlock_irqrestore
+
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 4fb83c1cdb77..379661d2f81a 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -109,11 +109,7 @@ extern void pfault_fini(void);
#define pfault_fini() do { } while (0)
#endif /* CONFIG_PFAULT */
-#ifdef CONFIG_PAGE_STATES
extern void cmma_init(void);
-#else
-static inline void cmma_init(void) { }
-#endif
#define finish_arch_switch(prev) do { \
set_fs(current->thread.mm_segment); \
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index ba1cab9fc1f9..07eb61b2fb3a 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -92,7 +92,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
#define TIF_SECCOMP 10 /* secure computing */
-#define TIF_SYSCALL_FTRACE 11 /* ftrace syscall instrumentation */
+#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
@@ -111,7 +111,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
-#define _TIF_SYSCALL_FTRACE (1<<TIF_SYSCALL_FTRACE)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_31BIT (1<<TIF_31BIT)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index cc21e3e20fd7..24aa1cda20ad 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -90,4 +90,18 @@ unsigned long long monotonic_clock(void);
extern u64 sched_clock_base_cc;
+/**
+ * get_clock_monotonic - returns current time in clock rate units
+ *
+ * The caller must ensure that preemption is disabled.
+ * The clock and sched_clock_base get changed via stop_machine.
+ * Therefore preemption must be disabled when calling this
+ * function, otherwise the returned value is not guaranteed to
+ * be monotonic.
+ */
+static inline unsigned long long get_clock_monotonic(void)
+{
+ return get_clock_xt() - sched_clock_base_cc;
+}
+
#endif
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index c75ed43b1a18..c7be8e10b87e 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -32,7 +32,7 @@ extra-y += head.o init_task.o vmlinux.lds
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o topology.o
-
+obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
@@ -41,7 +41,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
+obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o)
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index cae14c499511..bf8b4ae7ff2d 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -6,6 +6,9 @@
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
+#define KMSG_COMPONENT "setup"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -16,6 +19,7 @@
#include <linux/module.h>
#include <linux/pfn.h>
#include <linux/uaccess.h>
+#include <linux/kernel.h>
#include <asm/ebcdic.h>
#include <asm/ipl.h>
#include <asm/lowcore.h>
@@ -35,8 +39,6 @@
char kernel_nss_name[NSS_NAME_SIZE + 1];
-static unsigned long machine_flags;
-
static void __init setup_boot_command_line(void);
/*
@@ -81,6 +83,8 @@ asm(
" br 14\n"
" .size savesys_ipl_nss, .-savesys_ipl_nss\n");
+static __initdata char upper_command_line[COMMAND_LINE_SIZE];
+
static noinline __init void create_kernel_nss(void)
{
unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
@@ -90,7 +94,6 @@ static noinline __init void create_kernel_nss(void)
int response;
size_t len;
char *savesys_ptr;
- char upper_command_line[COMMAND_LINE_SIZE];
char defsys_cmd[DEFSYS_CMD_SIZE];
char savesys_cmd[SAVESYS_CMD_SIZE];
@@ -141,6 +144,8 @@ static noinline __init void create_kernel_nss(void)
__cpcmd(defsys_cmd, NULL, 0, &response);
if (response != 0) {
+ pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
+ response);
kernel_nss_name[0] = '\0';
return;
}
@@ -153,8 +158,11 @@ static noinline __init void create_kernel_nss(void)
* max SAVESYS_CMD_SIZE
* On error: response contains the numeric portion of cp error message.
* for SAVESYS it will be >= 263
+ * for missing privilege class, it will be 1
*/
- if (response > SAVESYS_CMD_SIZE) {
+ if (response > SAVESYS_CMD_SIZE || response == 1) {
+ pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
+ response);
kernel_nss_name[0] = '\0';
return;
}
@@ -205,12 +213,9 @@ static noinline __init void detect_machine_type(void)
/* Running under KVM? If not we assume z/VM */
if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3))
- machine_flags |= MACHINE_FLAG_KVM;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
else
- machine_flags |= MACHINE_FLAG_VM;
-
- /* Store machine flags for setting up lowcore early */
- S390_lowcore.machine_flags = machine_flags;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}
static __init void early_pgm_check_handler(void)
@@ -245,7 +250,7 @@ static noinline __init void setup_hpage(void)
facilities = stfl();
if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29)))
return;
- machine_flags |= MACHINE_FLAG_HPAGE;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE;
__ctl_set_bit(0, 23);
#endif
}
@@ -263,7 +268,7 @@ static __init void detect_mvpg(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0");
if (!rc)
- machine_flags |= MACHINE_FLAG_MVPG;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG;
#endif
}
@@ -279,7 +284,7 @@ static __init void detect_ieee(void)
EX_TABLE(0b,1b)
: "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc");
if (!rc)
- machine_flags |= MACHINE_FLAG_IEEE;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE;
#endif
}
@@ -298,7 +303,7 @@ static __init void detect_csp(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2");
if (!rc)
- machine_flags |= MACHINE_FLAG_CSP;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_CSP;
#endif
}
@@ -315,7 +320,7 @@ static __init void detect_diag9c(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
if (!rc)
- machine_flags |= MACHINE_FLAG_DIAG9C;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
}
static __init void detect_diag44(void)
@@ -330,7 +335,7 @@ static __init void detect_diag44(void)
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
if (!rc)
- machine_flags |= MACHINE_FLAG_DIAG44;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
#endif
}
@@ -341,11 +346,11 @@ static __init void detect_machine_facilities(void)
facilities = stfl();
if (facilities & (1 << 28))
- machine_flags |= MACHINE_FLAG_IDTE;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (facilities & (1 << 23))
- machine_flags |= MACHINE_FLAG_PFMF;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
if (facilities & (1 << 4))
- machine_flags |= MACHINE_FLAG_MVCOS;
+ S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
#endif
}
@@ -367,21 +372,35 @@ static __init void rescue_initrd(void)
}
/* Set up boot command line */
-static void __init setup_boot_command_line(void)
+static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
{
- char *parm = NULL;
+ char *parm, *delim;
+ size_t rc, len;
+
+ len = strlen(boot_command_line);
+
+ delim = boot_command_line + len; /* '\0' character position */
+ parm = boot_command_line + len + 1; /* append right after '\0' */
+ rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
+ if (rc) {
+ if (*parm == '=')
+ memmove(boot_command_line, parm + 1, rc);
+ else
+ *delim = ' '; /* replace '\0' with space */
+ }
+}
+
+static void __init setup_boot_command_line(void)
+{
/* copy arch command line */
strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
/* append IPL PARM data to the boot command line */
- if (MACHINE_IS_VM) {
- parm = boot_command_line + strlen(boot_command_line);
- *parm++ = ' ';
- get_ipl_vmparm(parm);
- if (parm[0] == '=')
- memmove(boot_command_line, parm + 1, strlen(parm));
- }
+ if (MACHINE_IS_VM)
+ append_to_cmdline(append_ipl_vmparm);
+
+ append_to_cmdline(append_ipl_scpdata);
}
@@ -413,7 +432,6 @@ void __init startup_init(void)
setup_hpage();
sclp_facilities_detect();
detect_memory_layout(memory_chunk);
- S390_lowcore.machine_flags = machine_flags;
#ifdef CONFIG_DYNAMIC_FTRACE
S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
#endif
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index c4c80a22bc1f..f43d2ee54464 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -54,7 +54,7 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING)
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
- _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8)
+ _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8)
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
@@ -278,7 +278,8 @@ sysc_return:
bnz BASED(sysc_work) # there is work to do (signals etc.)
sysc_restore:
#ifdef CONFIG_TRACE_IRQFLAGS
- la %r1,BASED(sysc_restore_trace_psw)
+ la %r1,BASED(sysc_restore_trace_psw_addr)
+ l %r1,0(%r1)
lpsw 0(%r1)
sysc_restore_trace:
TRACE_IRQS_CHECK
@@ -289,10 +290,15 @@ sysc_leave:
sysc_done:
#ifdef CONFIG_TRACE_IRQFLAGS
+sysc_restore_trace_psw_addr:
+ .long sysc_restore_trace_psw
+
+ .section .data,"aw",@progbits
.align 8
.globl sysc_restore_trace_psw
sysc_restore_trace_psw:
.long 0, sysc_restore_trace + 0x80000000
+ .previous
#endif
#
@@ -606,7 +612,8 @@ io_return:
bnz BASED(io_work) # there is work to do (signals etc.)
io_restore:
#ifdef CONFIG_TRACE_IRQFLAGS
- la %r1,BASED(io_restore_trace_psw)
+ la %r1,BASED(io_restore_trace_psw_addr)
+ l %r1,0(%r1)
lpsw 0(%r1)
io_restore_trace:
TRACE_IRQS_CHECK
@@ -617,10 +624,15 @@ io_leave:
io_done:
#ifdef CONFIG_TRACE_IRQFLAGS
+io_restore_trace_psw_addr:
+ .long io_restore_trace_psw
+
+ .section .data,"aw",@progbits
.align 8
.globl io_restore_trace_psw
io_restore_trace_psw:
.long 0, io_restore_trace + 0x80000000
+ .previous
#endif
#
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index f6618e9e15ef..a6f7b20df616 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -57,7 +57,7 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING)
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
- _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8)
+ _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8)
#define BASED(name) name-system_call(%r13)
@@ -284,10 +284,12 @@ sysc_leave:
sysc_done:
#ifdef CONFIG_TRACE_IRQFLAGS
+ .section .data,"aw",@progbits
.align 8
.globl sysc_restore_trace_psw
sysc_restore_trace_psw:
.quad 0, sysc_restore_trace
+ .previous
#endif
#
@@ -595,10 +597,12 @@ io_leave:
io_done:
#ifdef CONFIG_TRACE_IRQFLAGS
+ .section .data,"aw",@progbits
.align 8
.globl io_restore_trace_psw
io_restore_trace_psw:
.quad 0, io_restore_trace
+ .previous
#endif
#
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 3e298e64f0db..57bdcb1e3cdf 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -220,6 +220,29 @@ struct syscall_metadata *syscall_nr_to_meta(int nr)
return syscalls_metadata[nr];
}
+int syscall_name_to_nr(char *name)
+{
+ int i;
+
+ if (!syscalls_metadata)
+ return -1;
+ for (i = 0; i < NR_syscalls; i++)
+ if (syscalls_metadata[i])
+ if (!strcmp(syscalls_metadata[i]->name, name))
+ return i;
+ return -1;
+}
+
+void set_syscall_enter_id(int num, int id)
+{
+ syscalls_metadata[num]->enter_id = id;
+}
+
+void set_syscall_exit_id(int num, int id)
+{
+ syscalls_metadata[num]->exit_id = id;
+}
+
static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
{
struct syscall_metadata *start;
@@ -237,24 +260,19 @@ static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
return NULL;
}
-void arch_init_ftrace_syscalls(void)
+static int __init arch_init_ftrace_syscalls(void)
{
struct syscall_metadata *meta;
int i;
- static atomic_t refs;
-
- if (atomic_inc_return(&refs) != 1)
- goto out;
syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
GFP_KERNEL);
if (!syscalls_metadata)
- goto out;
+ return -ENOMEM;
for (i = 0; i < NR_syscalls; i++) {
meta = find_syscall_meta((unsigned long)sys_call_table[i]);
syscalls_metadata[i] = meta;
}
- return;
-out:
- atomic_dec(&refs);
+ return 0;
}
+arch_initcall(arch_init_ftrace_syscalls);
#endif
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index ec6882348520..c52b4f7742fa 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -27,6 +27,7 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
+#include <asm/cpu.h>
#ifdef CONFIG_64BIT
#define ARCH_OFFSET 4
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 2ced846065b7..602b508cd4c4 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -24,6 +24,7 @@ startup_continue:
# Setup stack
#
l %r15,.Linittu-.LPG1(%r13)
+ st %r15,__LC_THREAD_INFO # cache thread info in lowcore
mvc __LC_CURRENT(4),__TI_task(%r15)
ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
st %r15,__LC_KERNEL_STACK # set end of kernel stack
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 65667b2e65ce..6a250808092b 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -62,9 +62,9 @@ startup_continue:
clr %r11,%r12
je 5f # no more space in prefix array
4:
- ahi %r8,1 # next cpu (r8 += 1)
- cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ?
- jl 1b # jump if not last cpu
+ ahi %r8,1 # next cpu (r8 += 1)
+ chi %r8,MAX_CPU_ADDRESS # is last possible cpu ?
+ jle 1b # jump if not last cpu
5:
lhi %r1,2 # mode 2 = esame (dump)
j 6f
@@ -92,6 +92,7 @@ startup_continue:
# Setup stack
#
larl %r15,init_thread_union
+ stg %r15,__LC_THREAD_INFO # cache thread info in lowcore
lg %r14,__TI_task(%r15) # cache current in lowcore
stg %r14,__LC_CURRENT
aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
@@ -129,8 +130,6 @@ startup_continue:
#ifdef CONFIG_ZFCPDUMP
.Lcurrent_cpu:
.long 0x0
-.Llast_cpu:
- .long 0x0000ffff
.Lpref_arr_ptr:
.long zfcpdump_prefix_array
#endif /* CONFIG_ZFCPDUMP */
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 371a2d88f4ac..ee57a42e6e93 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -272,17 +272,18 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
/* VM IPL PARM routines */
-static void reipl_get_ascii_vmparm(char *dest,
+size_t reipl_get_ascii_vmparm(char *dest, size_t size,
const struct ipl_parameter_block *ipb)
{
int i;
- int len = 0;
+ size_t len;
char has_lowercase = 0;
+ len = 0;
if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
(ipb->ipl_info.ccw.vm_parm_len > 0)) {
- len = ipb->ipl_info.ccw.vm_parm_len;
+ len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
/* If at least one character is lowercase, we assume mixed
* case; otherwise we convert everything to lowercase.
@@ -299,14 +300,20 @@ static void reipl_get_ascii_vmparm(char *dest,
EBCASC(dest, len);
}
dest[len] = 0;
+
+ return len;
}
-void get_ipl_vmparm(char *dest)
+size_t append_ipl_vmparm(char *dest, size_t size)
{
+ size_t rc;
+
+ rc = 0;
if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
- reipl_get_ascii_vmparm(dest, &ipl_block);
+ rc = reipl_get_ascii_vmparm(dest, size, &ipl_block);
else
dest[0] = 0;
+ return rc;
}
static ssize_t ipl_vm_parm_show(struct kobject *kobj,
@@ -314,10 +321,65 @@ static ssize_t ipl_vm_parm_show(struct kobject *kobj,
{
char parm[DIAG308_VMPARM_SIZE + 1] = {};
- get_ipl_vmparm(parm);
+ append_ipl_vmparm(parm, sizeof(parm));
return sprintf(page, "%s\n", parm);
}
+static size_t scpdata_length(const char* buf, size_t count)
+{
+ while (count) {
+ if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
+ break;
+ count--;
+ }
+ return count;
+}
+
+size_t reipl_append_ascii_scpdata(char *dest, size_t size,
+ const struct ipl_parameter_block *ipb)
+{
+ size_t count;
+ size_t i;
+ int has_lowercase;
+
+ count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
+ ipb->ipl_info.fcp.scp_data_len));
+ if (!count)
+ goto out;
+
+ has_lowercase = 0;
+ for (i = 0; i < count; i++) {
+ if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
+ count = 0;
+ goto out;
+ }
+ if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
+ has_lowercase = 1;
+ }
+
+ if (has_lowercase)
+ memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
+ else
+ for (i = 0; i < count; i++)
+ dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
+out:
+ dest[count] = '\0';
+ return count;
+}
+
+size_t append_ipl_scpdata(char *dest, size_t len)
+{
+ size_t rc;
+
+ rc = 0;
+ if (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP)
+ rc = reipl_append_ascii_scpdata(dest, len, &ipl_block);
+ else
+ dest[0] = 0;
+ return rc;
+}
+
+
static struct kobj_attribute sys_ipl_vm_parm_attr =
__ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL);
@@ -553,7 +615,7 @@ static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
{
char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
- reipl_get_ascii_vmparm(vmparm, ipb);
+ reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
return sprintf(page, "%s\n", vmparm);
}
@@ -626,6 +688,59 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
/* FCP reipl device attributes */
+static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ size_t size = reipl_block_fcp->ipl_info.fcp.scp_data_len;
+ void *scp_data = reipl_block_fcp->ipl_info.fcp.scp_data;
+
+ return memory_read_from_buffer(buf, count, &off, scp_data, size);
+}
+
+static ssize_t reipl_fcp_scpdata_write(struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ size_t padding;
+ size_t scpdata_len;
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off >= DIAG308_SCPDATA_SIZE)
+ return -ENOSPC;
+
+ if (count > DIAG308_SCPDATA_SIZE - off)
+ count = DIAG308_SCPDATA_SIZE - off;
+
+ memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count);
+ scpdata_len = off + count;
+
+ if (scpdata_len % 8) {
+ padding = 8 - (scpdata_len % 8);
+ memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
+ 0, padding);
+ scpdata_len += padding;
+ }
+
+ reipl_block_fcp->ipl_info.fcp.scp_data_len = scpdata_len;
+ reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN + scpdata_len;
+ reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN + scpdata_len;
+
+ return count;
+}
+
+static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
+ .attr = {
+ .name = "scp_data",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .size = PAGE_SIZE,
+ .read = reipl_fcp_scpdata_read,
+ .write = reipl_fcp_scpdata_write,
+};
+
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
reipl_block_fcp->ipl_info.fcp.wwpn);
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n",
@@ -647,7 +762,6 @@ static struct attribute *reipl_fcp_attrs[] = {
};
static struct attribute_group reipl_fcp_attr_group = {
- .name = IPL_FCP_STR,
.attrs = reipl_fcp_attrs,
};
@@ -895,6 +1009,7 @@ static struct kobj_attribute reipl_type_attr =
__ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
static struct kset *reipl_kset;
+static struct kset *reipl_fcp_kset;
static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
const enum ipl_method m)
@@ -906,7 +1021,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
reipl_get_ascii_loadparm(loadparm, ipb);
reipl_get_ascii_nss_name(nss_name, ipb);
- reipl_get_ascii_vmparm(vmparm, ipb);
+ reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
switch (m) {
case REIPL_METHOD_CCW_VM:
@@ -1076,23 +1191,44 @@ static int __init reipl_fcp_init(void)
int rc;
if (!diag308_set_works) {
- if (ipl_info.type == IPL_TYPE_FCP)
+ if (ipl_info.type == IPL_TYPE_FCP) {
make_attrs_ro(reipl_fcp_attrs);
- else
+ sys_reipl_fcp_scp_data_attr.attr.mode = S_IRUGO;
+ } else
return 0;
}
reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
if (!reipl_block_fcp)
return -ENOMEM;
- rc = sysfs_create_group(&reipl_kset->kobj, &reipl_fcp_attr_group);
+
+ /* sysfs: create fcp kset for mixing attr group and bin attrs */
+ reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL,
+ &reipl_kset->kobj);
+ if (!reipl_kset) {
+ free_page((unsigned long) reipl_block_fcp);
+ return -ENOMEM;
+ }
+
+ rc = sysfs_create_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
+ if (rc) {
+ kset_unregister(reipl_fcp_kset);
+ free_page((unsigned long) reipl_block_fcp);
+ return rc;
+ }
+
+ rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj,
+ &sys_reipl_fcp_scp_data_attr);
if (rc) {
- free_page((unsigned long)reipl_block_fcp);
+ sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
+ kset_unregister(reipl_fcp_kset);
+ free_page((unsigned long) reipl_block_fcp);
return rc;
}
- if (ipl_info.type == IPL_TYPE_FCP) {
+
+ if (ipl_info.type == IPL_TYPE_FCP)
memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
- } else {
+ else {
reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 2a0a5e97ba8c..dfe015d7398c 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -11,111 +11,27 @@
ftrace_stub:
br %r14
-#ifdef CONFIG_64BIT
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
.globl _mcount
_mcount:
- br %r14
-
- .globl ftrace_caller
-ftrace_caller:
- larl %r1,function_trace_stop
- icm %r1,0xf,0(%r1)
- bnzr %r14
- stmg %r2,%r5,32(%r15)
- stg %r14,112(%r15)
- lgr %r1,%r15
- aghi %r15,-160
- stg %r1,__SF_BACKCHAIN(%r15)
- lgr %r2,%r14
- lg %r3,168(%r15)
- larl %r14,ftrace_dyn_func
- lg %r14,0(%r14)
- basr %r14,%r14
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .globl ftrace_graph_caller
-ftrace_graph_caller:
- # This unconditional branch gets runtime patched. Change only if
- # you know what you are doing. See ftrace_enable_graph_caller().
- j 0f
- lg %r2,272(%r15)
- lg %r3,168(%r15)
- brasl %r14,prepare_ftrace_return
- stg %r2,168(%r15)
-0:
-#endif
- aghi %r15,160
- lmg %r2,%r5,32(%r15)
- lg %r14,112(%r15)
+#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
.data
.globl ftrace_dyn_func
ftrace_dyn_func:
- .quad ftrace_stub
+ .long ftrace_stub
.previous
-#else /* CONFIG_DYNAMIC_FTRACE */
-
- .globl _mcount
-_mcount:
- larl %r1,function_trace_stop
- icm %r1,0xf,0(%r1)
- bnzr %r14
- stmg %r2,%r5,32(%r15)
- stg %r14,112(%r15)
- lgr %r1,%r15
- aghi %r15,-160
- stg %r1,__SF_BACKCHAIN(%r15)
- lgr %r2,%r14
- lg %r3,168(%r15)
- larl %r14,ftrace_trace_function
- lg %r14,0(%r14)
- basr %r14,%r14
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- lg %r2,272(%r15)
- lg %r3,168(%r15)
- brasl %r14,prepare_ftrace_return
- stg %r2,168(%r15)
-#endif
- aghi %r15,160
- lmg %r2,%r5,32(%r15)
- lg %r14,112(%r15)
- br %r14
-
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
- .globl return_to_handler
-return_to_handler:
- stmg %r2,%r5,32(%r15)
- lgr %r1,%r15
- aghi %r15,-160
- stg %r1,__SF_BACKCHAIN(%r15)
- brasl %r14,ftrace_return_to_handler
- aghi %r15,160
- lgr %r14,%r2
- lmg %r2,%r5,32(%r15)
- br %r14
-
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-#else /* CONFIG_64BIT */
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
- .globl _mcount
-_mcount:
- br %r14
-
.globl ftrace_caller
ftrace_caller:
+#endif
stm %r2,%r5,16(%r15)
bras %r1,2f
+#ifdef CONFIG_DYNAMIC_FTRACE
+0: .long ftrace_dyn_func
+#else
0: .long ftrace_trace_function
+#endif
1: .long function_trace_stop
2: l %r2,1b-0b(%r1)
icm %r2,0xf,0(%r2)
@@ -131,53 +47,13 @@ ftrace_caller:
l %r14,0(%r14)
basr %r14,%r14
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
.globl ftrace_graph_caller
ftrace_graph_caller:
# This unconditional branch gets runtime patched. Change only if
# you know what you are doing. See ftrace_enable_graph_caller().
j 1f
- bras %r1,0f
- .long prepare_ftrace_return
-0: l %r2,152(%r15)
- l %r4,0(%r1)
- l %r3,100(%r15)
- basr %r14,%r4
- st %r2,100(%r15)
-1:
#endif
- ahi %r15,96
- l %r14,56(%r15)
-3: lm %r2,%r5,16(%r15)
- br %r14
-
- .data
- .globl ftrace_dyn_func
-ftrace_dyn_func:
- .long ftrace_stub
- .previous
-
-#else /* CONFIG_DYNAMIC_FTRACE */
-
- .globl _mcount
-_mcount:
- stm %r2,%r5,16(%r15)
- bras %r1,2f
-0: .long ftrace_trace_function
-1: .long function_trace_stop
-2: l %r2,1b-0b(%r1)
- icm %r2,0xf,0(%r2)
- jnz 3f
- st %r14,56(%r15)
- lr %r0,%r15
- ahi %r15,-96
- l %r3,100(%r15)
- la %r2,0(%r14)
- st %r0,__SF_BACKCHAIN(%r15)
- la %r3,0(%r3)
- l %r14,0b-0b(%r1)
- l %r14,0(%r14)
- basr %r14,%r14
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
bras %r1,0f
.long prepare_ftrace_return
0: l %r2,152(%r15)
@@ -185,14 +61,13 @@ _mcount:
l %r3,100(%r15)
basr %r14,%r4
st %r2,100(%r15)
+1:
#endif
ahi %r15,96
l %r14,56(%r15)
3: lm %r2,%r5,16(%r15)
br %r14
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl return_to_handler
@@ -211,6 +86,4 @@ return_to_handler:
lm %r2,%r5,16(%r15)
br %r14
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-#endif /* CONFIG_64BIT */
+#endif
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
new file mode 100644
index 000000000000..c37211c6092b
--- /dev/null
+++ b/arch/s390/kernel/mcount64.S
@@ -0,0 +1,78 @@
+/*
+ * Copyright IBM Corp. 2008,2009
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *
+ */
+
+#include <asm/asm-offsets.h>
+
+ .globl ftrace_stub
+ftrace_stub:
+ br %r14
+
+ .globl _mcount
+_mcount:
+#ifdef CONFIG_DYNAMIC_FTRACE
+ br %r14
+
+ .data
+ .globl ftrace_dyn_func
+ftrace_dyn_func:
+ .quad ftrace_stub
+ .previous
+
+ .globl ftrace_caller
+ftrace_caller:
+#endif
+ larl %r1,function_trace_stop
+ icm %r1,0xf,0(%r1)
+ bnzr %r14
+ stmg %r2,%r5,32(%r15)
+ stg %r14,112(%r15)
+ lgr %r1,%r15
+ aghi %r15,-160
+ stg %r1,__SF_BACKCHAIN(%r15)
+ lgr %r2,%r14
+ lg %r3,168(%r15)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ larl %r14,ftrace_dyn_func
+#else
+ larl %r14,ftrace_trace_function
+#endif
+ lg %r14,0(%r14)
+ basr %r14,%r14
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+ .globl ftrace_graph_caller
+ftrace_graph_caller:
+ # This unconditional branch gets runtime patched. Change only if
+ # you know what you are doing. See ftrace_enable_graph_caller().
+ j 0f
+#endif
+ lg %r2,272(%r15)
+ lg %r3,168(%r15)
+ brasl %r14,prepare_ftrace_return
+ stg %r2,168(%r15)
+0:
+#endif
+ aghi %r15,160
+ lmg %r2,%r5,32(%r15)
+ lg %r14,112(%r15)
+ br %r14
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+ .globl return_to_handler
+return_to_handler:
+ stmg %r2,%r5,32(%r15)
+ lgr %r1,%r15
+ aghi %r15,-160
+ stg %r1,__SF_BACKCHAIN(%r15)
+ brasl %r14,ftrace_return_to_handler
+ aghi %r15,160
+ lgr %r14,%r2
+ lmg %r2,%r5,32(%r15)
+ br %r14
+
+#endif
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 43acd73105b7..f3ddd7ac06c5 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -51,6 +51,9 @@
#include "compat_ptrace.h"
#endif
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
enum s390_regset {
REGSET_GENERAL,
REGSET_FP,
@@ -661,8 +664,8 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
ret = -1;
}
- if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
- ftrace_syscall_enter(regs);
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->gprs[2]);
if (unlikely(current->audit_context))
audit_syscall_entry(is_compat_task() ?
@@ -679,8 +682,8 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
regs->gprs[2]);
- if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
- ftrace_syscall_exit(regs);
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->gprs[2]);
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index cbb897bc50bd..9ed13a1ed376 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -156,15 +156,11 @@ __setup("condev=", condev_setup);
static void __init set_preferred_console(void)
{
- if (MACHINE_IS_KVM) {
+ if (MACHINE_IS_KVM)
add_preferred_console("hvc", 0, NULL);
- s390_virtio_console_init();
- return;
- }
-
- if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
+ else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
add_preferred_console("ttyS", 0, NULL);
- if (CONSOLE_IS_3270)
+ else if (CONSOLE_IS_3270)
add_preferred_console("tty3270", 0, NULL);
}
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 062bd64e65fa..6b4fef877f9d 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -536,4 +536,6 @@ void do_notify_resume(struct pt_regs *regs)
{
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index be2cae083406..56c16876b919 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -49,6 +49,7 @@
#include <asm/sclp.h>
#include <asm/cputime.h>
#include <asm/vdso.h>
+#include <asm/cpu.h>
#include "entry.h"
static struct task_struct *current_set[NR_CPUS];
@@ -70,6 +71,23 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
static void smp_ext_bitcall(int, ec_bit_sig);
+static int cpu_stopped(int cpu)
+{
+ __u32 status;
+
+ switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
+ case sigp_order_code_accepted:
+ case sigp_status_stored:
+ /* Check for stopped and check stop state */
+ if (status & 0x50)
+ return 1;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
void smp_send_stop(void)
{
int cpu, rc;
@@ -86,7 +104,7 @@ void smp_send_stop(void)
rc = signal_processor(cpu, sigp_stop);
} while (rc == sigp_busy);
- while (!smp_cpu_not_running(cpu))
+ while (!cpu_stopped(cpu))
cpu_relax();
}
}
@@ -269,19 +287,6 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
#endif /* CONFIG_ZFCPDUMP */
-static int cpu_stopped(int cpu)
-{
- __u32 status;
-
- /* Check for stopped state */
- if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
- sigp_status_stored) {
- if (status & 0x40)
- return 1;
- }
- return 0;
-}
-
static int cpu_known(int cpu_id)
{
int cpu;
@@ -300,7 +305,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
logical_cpu = cpumask_first(&avail);
if (logical_cpu >= nr_cpu_ids)
return 0;
- for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
+ for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
if (cpu_known(cpu_id))
continue;
__cpu_logical_map[logical_cpu] = cpu_id;
@@ -379,7 +384,7 @@ static void __init smp_detect_cpus(void)
/* Use sigp detection algorithm if sclp doesn't work. */
if (sclp_get_cpu_info(info)) {
smp_use_sigp_detection = 1;
- for (cpu = 0; cpu <= 65535; cpu++) {
+ for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
if (cpu == boot_cpu_addr)
continue;
__cpu_logical_map[CPU_INIT_NO] = cpu;
@@ -635,7 +640,7 @@ int __cpu_disable(void)
void __cpu_die(unsigned int cpu)
{
/* Wait until target cpu is down */
- while (!smp_cpu_not_running(cpu))
+ while (!cpu_stopped(cpu))
cpu_relax();
smp_free_lowcore(cpu);
pr_info("Processor %d stopped\n", cpu);
diff --git a/arch/s390/power/swsusp.c b/arch/s390/kernel/suspend.c
index bd1f5c6b0b8c..086bee970cae 100644
--- a/arch/s390/power/swsusp.c
+++ b/arch/s390/kernel/suspend.c
@@ -1,13 +1,44 @@
/*
- * Support for suspend and resume on s390
+ * Suspend support specific for s390.
*
* Copyright IBM Corp. 2009
*
* Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
- *
*/
+#include <linux/suspend.h>
+#include <linux/reboot.h>
+#include <linux/pfn.h>
+#include <linux/mm.h>
+#include <asm/sections.h>
#include <asm/system.h>
+#include <asm/ipl.h>
+
+/*
+ * References to section boundaries
+ */
+extern const void __nosave_begin, __nosave_end;
+
+/*
+ * check if given pfn is in the 'nosave' or in the read only NSS section
+ */
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end))
+ >> PAGE_SHIFT;
+ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
+ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+
+ if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
+ return 1;
+ if (pfn >= stext_pfn && pfn <= eshared_pfn) {
+ if (ipl_info.type == IPL_TYPE_NSS)
+ return 1;
+ } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0))
+ return 1;
+ return 0;
+}
void save_processor_state(void)
{
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index b26df5c5933e..7cd6b096f0d1 100644
--- a/arch/s390/power/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -21,7 +21,7 @@
* This function runs with disabled interrupts.
*/
.section .text
- .align 2
+ .align 4
.globl swsusp_arch_suspend
swsusp_arch_suspend:
stmg %r6,%r15,__SF_GPRS(%r15)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d4c8e9c47c81..54e327e9af04 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -60,6 +60,7 @@
#define TICK_SIZE tick
u64 sched_clock_base_cc = -1; /* Force to data section. */
+EXPORT_SYMBOL_GPL(sched_clock_base_cc);
static DEFINE_PER_CPU(struct clock_event_device, comparators);
@@ -68,7 +69,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
*/
unsigned long long notrace sched_clock(void)
{
- return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9;
+ return (get_clock_monotonic() * 125) >> 9;
}
/*
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index a53db23ee092..7315f9e67e1d 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -52,55 +52,18 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_eshared = .; /* End of shareable data */
- . = ALIGN(16); /* Exception table */
- __ex_table : {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- } :data
-
- .data : { /* Data */
- DATA_DATA
- CONSTRUCTORS
- }
-
- . = ALIGN(PAGE_SIZE);
- .data_nosave : {
- __nosave_begin = .;
- *(.data.nosave)
- }
- . = ALIGN(PAGE_SIZE);
- __nosave_end = .;
-
- . = ALIGN(PAGE_SIZE);
- .data.page_aligned : {
- *(.data.idt)
- }
+ EXCEPTION_TABLE(16) :data
- . = ALIGN(0x100);
- .data.cacheline_aligned : {
- *(.data.cacheline_aligned)
- }
+ RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE)
- . = ALIGN(0x100);
- .data.read_mostly : {
- *(.data.read_mostly)
- }
_edata = .; /* End of data section */
- . = ALIGN(THREAD_SIZE); /* init_task */
- .data.init_task : {
- *(.data.init_task)
- }
-
/* will be freed after init */
. = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
+
+ INIT_TEXT_SECTION(PAGE_SIZE)
+
/*
* .exit.text is discarded at runtime, not link time,
* to deal with references from __bug_table
@@ -111,49 +74,13 @@ SECTIONS
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
- .init.data : {
- INIT_DATA
- }
- . = ALIGN(0x100);
- .init.setup : {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- }
- .initcall.init : {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- }
-
- .con_initcall.init : {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
- }
- SECURITY_INIT
-
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(0x100);
- .init.ramfs : {
- __initramfs_start = .;
- *(.init.ramfs)
- . = ALIGN(2);
- __initramfs_end = .;
- }
-#endif
+ INIT_DATA_SECTION(0x100)
PERCPU(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */
- /* BSS */
- .bss : {
- __bss_start = .;
- *(.bss)
- . = ALIGN(2);
- __bss_stop = .;
- }
+ BSS_SECTION(0, 2, 0)
_end = . ;
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index db05661ac895..eec054484419 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux s390-specific parts of the memory manager.
#
-obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o
+obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
+ page-states.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_PAGE_STATES) += page-states.o
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index e5e119fe03b2..1abbadd497e1 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -10,6 +10,7 @@
* Copyright (C) 1995 Linus Torvalds
*/
+#include <linux/perf_counter.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -305,7 +306,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
* interrupts again and then search the VMAs
*/
local_irq_enable();
-
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
down_read(&mm->mmap_sem);
si_code = SEGV_MAPERR;
@@ -363,11 +364,15 @@ good_area:
}
BUG();
}
- if (fault & VM_FAULT_MAJOR)
+ if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- else
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ regs, address);
+ } else {
tsk->min_flt++;
-
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ regs, address);
+ }
up_read(&mm->mmap_sem);
/*
* The instruction that caused the program check will
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index fc0ad73ffd90..f92ec203ad92 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -1,6 +1,4 @@
/*
- * arch/s390/mm/page-states.c
- *
* Copyright IBM Corp. 2008
*
* Guest page hinting for unused pages.
@@ -17,11 +15,12 @@
#define ESSA_SET_STABLE 1
#define ESSA_SET_UNUSED 2
-static int cmma_flag;
+static int cmma_flag = 1;
static int __init cmma(char *str)
{
char *parm;
+
parm = strstrip(str);
if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
cmma_flag = 1;
@@ -32,7 +31,6 @@ static int __init cmma(char *str)
return 1;
return 0;
}
-
__setup("cmma=", cmma);
void __init cmma_init(void)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 565667207985..c70215247071 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -78,9 +78,9 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
}
page->index = page_to_phys(shadow);
}
- spin_lock(&mm->page_table_lock);
+ spin_lock(&mm->context.list_lock);
list_add(&page->lru, &mm->context.crst_list);
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(&mm->context.list_lock);
return (unsigned long *) page_to_phys(page);
}
@@ -89,9 +89,9 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
unsigned long *shadow = get_shadow_table(table);
struct page *page = virt_to_page(table);
- spin_lock(&mm->page_table_lock);
+ spin_lock(&mm->context.list_lock);
list_del(&page->lru);
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(&mm->context.list_lock);
if (shadow)
free_pages((unsigned long) shadow, ALLOC_ORDER);
free_pages((unsigned long) table, ALLOC_ORDER);
@@ -182,7 +182,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
unsigned long bits;
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
- spin_lock(&mm->page_table_lock);
+ spin_lock(&mm->context.list_lock);
page = NULL;
if (!list_empty(&mm->context.pgtable_list)) {
page = list_first_entry(&mm->context.pgtable_list,
@@ -191,7 +191,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
page = NULL;
}
if (!page) {
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(&mm->context.list_lock);
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (!page)
return NULL;
@@ -202,7 +202,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
clear_table_pgstes(table);
else
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
- spin_lock(&mm->page_table_lock);
+ spin_lock(&mm->context.list_lock);
list_add(&page->lru, &mm->context.pgtable_list);
}
table = (unsigned long *) page_to_phys(page);
@@ -213,7 +213,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
page->flags |= bits;
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
list_move_tail(&page->lru, &mm->context.pgtable_list);
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(&mm->context.list_lock);
return table;
}
@@ -225,7 +225,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- spin_lock(&mm->page_table_lock);
+ spin_lock(&mm->context.list_lock);
page->flags ^= bits;
if (page->flags & FRAG_MASK) {
/* Page now has some free pgtable fragments. */
@@ -234,7 +234,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
} else
/* All fragments of the 4K page have been freed. */
list_del(&page->lru);
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(&mm->context.list_lock);
if (page) {
pgtable_page_dtor(page);
__free_page(page);
@@ -245,7 +245,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
{
struct page *page;
- spin_lock(&mm->page_table_lock);
+ spin_lock(&mm->context.list_lock);
/* Free shadow region and segment tables. */
list_for_each_entry(page, &mm->context.crst_list, lru)
if (page->index) {
@@ -255,7 +255,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
/* "Free" second halves of page tables. */
list_for_each_entry(page, &mm->context.pgtable_list, lru)
page->flags &= ~SECOND_HALVES;
- spin_unlock(&mm->page_table_lock);
+ spin_unlock(&mm->context.list_lock);
mm->context.noexec = 0;
update_mm(mm, tsk);
}
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index e4868bfc672f..5f91a38d7592 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -331,6 +331,7 @@ void __init vmem_map_init(void)
unsigned long start, end;
int i;
+ spin_lock_init(&init_mm.context.list_lock);
INIT_LIST_HEAD(&init_mm.context.crst_list);
INIT_LIST_HEAD(&init_mm.context.pgtable_list);
init_mm.context.noexec = 0;
diff --git a/arch/s390/power/Makefile b/arch/s390/power/Makefile
deleted file mode 100644
index 973bb45a8fec..000000000000
--- a/arch/s390/power/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for s390 PM support
-#
-
-obj-$(CONFIG_HIBERNATION) += suspend.o
-obj-$(CONFIG_HIBERNATION) += swsusp.o
-obj-$(CONFIG_HIBERNATION) += swsusp_64.o
-obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o
diff --git a/arch/s390/power/suspend.c b/arch/s390/power/suspend.c
deleted file mode 100644
index b3351eceebbe..000000000000
--- a/arch/s390/power/suspend.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Suspend support specific for s390.
- *
- * Copyright IBM Corp. 2009
- *
- * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
- */
-
-#include <linux/mm.h>
-#include <linux/suspend.h>
-#include <linux/reboot.h>
-#include <linux/pfn.h>
-#include <asm/sections.h>
-#include <asm/ipl.h>
-
-/*
- * References to section boundaries
- */
-extern const void __nosave_begin, __nosave_end;
-
-/*
- * check if given pfn is in the 'nosave' or in the read only NSS section
- */
-int pfn_is_nosave(unsigned long pfn)
-{
- unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
- unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end))
- >> PAGE_SHIFT;
- unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
- unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
-
- if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
- return 1;
- if (pfn >= stext_pfn && pfn <= eshared_pfn) {
- if (ipl_info.type == IPL_TYPE_NSS)
- return 1;
- } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0))
- return 1;
- return 0;
-}
diff --git a/arch/s390/power/swsusp_64.c b/arch/s390/power/swsusp_64.c
deleted file mode 100644
index 9516a517d72f..000000000000
--- a/arch/s390/power/swsusp_64.c
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Support for suspend and resume on s390
- *
- * Copyright IBM Corp. 2009
- *
- * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
- *
- */
-
-#include <asm/system.h>
-#include <linux/interrupt.h>
-
-void do_after_copyback(void)
-{
- mb();
-}
-
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index b5afbec1db59..04a21883f327 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -640,5 +640,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
}
}
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index 0663a0ee6021..9e5c9b1d7e98 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -772,5 +772,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
}
}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 3f8b6a92eabd..233cff53a623 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -25,6 +25,8 @@ config SPARC
select ARCH_WANT_OPTIONAL_GPIOLIB
select RTC_CLASS
select RTC_DRV_M48T59
+ select HAVE_DMA_ATTRS
+ select HAVE_DMA_API_DEBUG
config SPARC32
def_bool !64BIT
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 204e4bf64438..5a8c308e2b5c 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -3,6 +3,7 @@
#include <linux/scatterlist.h>
#include <linux/mm.h>
+#include <linux/dma-debug.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
@@ -13,142 +14,40 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
-struct dma_ops {
- void *(*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
- void (*free_coherent)(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle);
- dma_addr_t (*map_page)(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction);
- void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction);
- int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
- void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
- int nhwentries,
- enum dma_data_direction direction);
- void (*sync_single_for_cpu)(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction);
- void (*sync_single_for_device)(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction);
- void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
- int nelems,
- enum dma_data_direction direction);
- void (*sync_sg_for_device)(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
-};
-extern const struct dma_ops *dma_ops;
+extern struct dma_map_ops *dma_ops, pci32_dma_ops;
+extern struct bus_type pci_bus_type;
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
-}
-
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
-}
-
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size,
- enum dma_data_direction direction)
-{
- return dma_ops->map_page(dev, virt_to_page(cpu_addr),
- (unsigned long)cpu_addr & ~PAGE_MASK, size,
- direction);
-}
-
-static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_ops->unmap_page(dev, dma_addr, size, direction);
-}
-
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- return dma_ops->map_page(dev, page, offset, size, direction);
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_ops->unmap_page(dev, dma_address, size, direction);
-}
-
-static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- return dma_ops->map_sg(dev, sg, nents, direction);
-}
-
-static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- dma_ops->unmap_sg(dev, sg, nents, direction);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
+#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
+ if (dev->bus == &pci_bus_type)
+ return &pci32_dma_ops;
+#endif
+ return dma_ops;
}
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
-{
- if (dma_ops->sync_single_for_device)
- dma_ops->sync_single_for_device(dev, dma_handle, size,
- direction);
-}
+#include <asm-generic/dma-mapping-common.h>
-static inline void dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
{
- dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
-}
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
-static inline void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- if (dma_ops->sync_sg_for_device)
- dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
+ cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+ return cpu_addr;
}
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
{
- dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir);
-}
+ struct dma_map_ops *ops = get_dma_ops(dev);
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
-{
- dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free_coherent(dev, size, cpu_addr, dma_handle);
}
-
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return (dma_addr == DMA_ERROR_CODE);
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 1934f2cbf513..a0b443cb3c1f 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -89,8 +89,8 @@ static inline unsigned long get_softint(void)
return retval;
}
-void __trigger_all_cpu_backtrace(void);
-#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
extern void *hardirq_stack[NR_CPUS];
extern void *softirq_stack[NR_CPUS];
diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h
index 6e14fd179335..d9c031f9910f 100644
--- a/arch/sparc/include/asm/pci.h
+++ b/arch/sparc/include/asm/pci.h
@@ -5,4 +5,7 @@
#else
#include <asm/pci_32.h>
#endif
+
+#include <asm-generic/pci-dma-compat.h>
+
#endif
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index b41c4c198159..ac0e8369fd97 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -31,42 +31,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
*/
#define PCI_DMA_BUS_IS_PHYS (0)
-#include <asm/scatterlist.h>
-
struct pci_dev;
-/* Allocate and map kernel buffer using consistent mode DMA for a device.
- * hwdev should be valid struct pci_dev pointer for PCI devices.
- */
-extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
-
-/* Free and unmap a consistent DMA buffer.
- * cpu_addr is what was returned from pci_alloc_consistent,
- * size must be the same as what as passed into pci_alloc_consistent,
- * and likewise dma_addr must be the same as what *dma_addrp was set to.
- *
- * References to the memory and mappings assosciated with cpu_addr/dma_addr
- * past this call are illegal.
- */
-extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
-
-/* Map a single buffer of the indicated size for DMA in streaming mode.
- * The 32-bit bus address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory
- * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
- */
-extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
-
-/* Unmap a single streaming mode DMA translation. The dma_addr and size
- * must match what was provided for in a previous pci_map_single call. All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
-
/* pci_unmap_{single,page} is not a nop, thus... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
@@ -81,69 +47,6 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL))
-/*
- * Same as above, only with pages instead of mapped addresses.
- */
-extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction);
-extern void pci_unmap_page(struct pci_dev *hwdev,
- dma_addr_t dma_address, size_t size, int direction);
-
-/* Map a set of buffers described by scatterlist in streaming
- * mode for DMA. This is the scather-gather version of the
- * above pci_map_single interface. Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length. They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
-
-/* Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction);
-
-/* Make physical memory consistent for a single
- * streaming mode DMA translation after a transfer.
- *
- * If you perform a pci_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so. At the
- * next point you give the PCI dma address back to the card, you
- * must first perform a pci_dma_sync_for_device, and then the device
- * again owns the buffer.
- */
-extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
-extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
-
-/* Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- *
- * The same as pci_dma_sync_single_* but for a scatter-gather list,
- * same rules and usage.
- */
-extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
-extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
-
-/* Return whether the given PCI device DMA address mask can
- * be supported properly. For example, if your device can
- * only drive the low 24-bits during PCI bus mastering, then
- * you would pass 0x00ffffff as the mask to this function.
- */
-static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
-{
- return 1;
-}
-
#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
@@ -154,14 +57,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
}
#endif
-#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-static inline int pci_dma_mapping_error(struct pci_dev *pdev,
- dma_addr_t dma_addr)
-{
- return (dma_addr == PCI_DMA_ERROR_CODE);
-}
-
struct device_node;
extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 7a1e3566e59c..5cc9f6aa5494 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -35,37 +35,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
*/
#define PCI_DMA_BUS_IS_PHYS (0)
-static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
- dma_addr_t *dma_handle)
-{
- return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
-}
-
-static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
-}
-
-static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
- size_t size, int direction)
-{
- return dma_map_single(&pdev->dev, ptr, size,
- (enum dma_data_direction) direction);
-}
-
-static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- dma_unmap_single(&pdev->dev, dma_addr, size,
- (enum dma_data_direction) direction);
-}
-
-#define pci_map_page(dev, page, off, size, dir) \
- pci_map_single(dev, (page_address(page) + (off)), size, dir)
-#define pci_unmap_page(dev,addr,sz,dir) \
- pci_unmap_single(dev,addr,sz,dir)
-
/* pci_unmap_{single,page} is not a nop, thus... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
@@ -80,57 +49,6 @@ static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL))
-static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
- int nents, int direction)
-{
- return dma_map_sg(&pdev->dev, sg, nents,
- (enum dma_data_direction) direction);
-}
-
-static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
- int nents, int direction)
-{
- dma_unmap_sg(&pdev->dev, sg, nents,
- (enum dma_data_direction) direction);
-}
-
-static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
- dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
- (enum dma_data_direction) direction);
-}
-
-static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
- /* No flushing needed to sync cpu writes to the device. */
-}
-
-static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
- struct scatterlist *sg,
- int nents, int direction)
-{
- dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
- (enum dma_data_direction) direction);
-}
-
-static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
- struct scatterlist *sg,
- int nelems, int direction)
-{
- /* No flushing needed to sync cpu writes to the device. */
-}
-
-/* Return whether the given PCI device DMA address mask can
- * be supported properly. For example, if your device can
- * only drive the low 24-bits during PCI bus mastering, then
- * you would pass 0x00ffffff as the mask to this function.
- */
-extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
-
/* PCI IOMMU mapping bypass support. */
/* PCI 64-bit addressing works for all slots on all controller
@@ -140,12 +58,6 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
#define PCI64_ADDR_BASE 0xfffc000000000000UL
-static inline int pci_dma_mapping_error(struct pci_dev *pdev,
- dma_addr_t dma_addr)
-{
- return dma_mapping_error(&pdev->dev, dma_addr);
-}
-
#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 46f91ab66a50..857630cff636 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -76,7 +76,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
*
* Unfortunately this scheme limits us to ~16,000,000 cpus.
*/
-static inline void __read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(raw_rwlock_t *rw)
{
register raw_rwlock_t *lp asm("g1");
lp = rw;
@@ -92,11 +92,11 @@ static inline void __read_lock(raw_rwlock_t *rw)
#define __raw_read_lock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- __read_lock(lock); \
+ arch_read_lock(lock); \
local_irq_restore(flags); \
} while(0)
-static inline void __read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(raw_rwlock_t *rw)
{
register raw_rwlock_t *lp asm("g1");
lp = rw;
@@ -112,7 +112,7 @@ static inline void __read_unlock(raw_rwlock_t *rw)
#define __raw_read_unlock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- __read_unlock(lock); \
+ arch_read_unlock(lock); \
local_irq_restore(flags); \
} while(0)
@@ -150,7 +150,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return (val == 0);
}
-static inline int __read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(raw_rwlock_t *rw)
{
register raw_rwlock_t *lp asm("g1");
register int res asm("o0");
@@ -169,7 +169,7 @@ static inline int __read_trylock(raw_rwlock_t *rw)
({ unsigned long flags; \
int res; \
local_irq_save(flags); \
- res = __read_trylock(lock); \
+ res = arch_read_trylock(lock); \
local_irq_restore(flags); \
res; \
})
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index f6b2b92ad8d2..43e514783582 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-static void inline __read_lock(raw_rwlock_t *lock)
+static void inline arch_read_lock(raw_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -115,7 +115,7 @@ static void inline __read_lock(raw_rwlock_t *lock)
: "memory");
}
-static int inline __read_trylock(raw_rwlock_t *lock)
+static int inline arch_read_trylock(raw_rwlock_t *lock)
{
int tmp1, tmp2;
@@ -136,7 +136,7 @@ static int inline __read_trylock(raw_rwlock_t *lock)
return tmp1;
}
-static void inline __read_unlock(raw_rwlock_t *lock)
+static void inline arch_read_unlock(raw_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -152,7 +152,7 @@ static void inline __read_unlock(raw_rwlock_t *lock)
: "memory");
}
-static void inline __write_lock(raw_rwlock_t *lock)
+static void inline arch_write_lock(raw_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2;
@@ -177,7 +177,7 @@ static void inline __write_lock(raw_rwlock_t *lock)
: "memory");
}
-static void inline __write_unlock(raw_rwlock_t *lock)
+static void inline arch_write_unlock(raw_rwlock_t *lock)
{
__asm__ __volatile__(
" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline __write_unlock(raw_rwlock_t *lock)
: "memory");
}
-static int inline __write_trylock(raw_rwlock_t *lock)
+static int inline arch_write_trylock(raw_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2, result;
@@ -210,14 +210,14 @@ static int inline __write_trylock(raw_rwlock_t *lock)
return result;
}
-#define __raw_read_lock(p) __read_lock(p)
-#define __raw_read_lock_flags(p, f) __read_lock(p)
-#define __raw_read_trylock(p) __read_trylock(p)
-#define __raw_read_unlock(p) __read_unlock(p)
-#define __raw_write_lock(p) __write_lock(p)
-#define __raw_write_lock_flags(p, f) __write_lock(p)
-#define __raw_write_unlock(p) __write_unlock(p)
-#define __raw_write_trylock(p) __write_trylock(p)
+#define __raw_read_lock(p) arch_read_lock(p)
+#define __raw_read_lock_flags(p, f) arch_read_lock(p)
+#define __raw_read_trylock(p) arch_read_trylock(p)
+#define __raw_read_unlock(p) arch_read_unlock(p)
+#define __raw_write_lock(p) arch_write_lock(p)
+#define __raw_write_lock_flags(p, f) arch_write_lock(p)
+#define __raw_write_unlock(p) arch_write_unlock(p)
+#define __raw_write_trylock(p) arch_write_trylock(p)
#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
#define __raw_write_can_lock(rw) (!(rw)->lock)
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 475ce4696acd..29b88a580661 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -61,7 +61,7 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o
obj-$(CONFIG_SPARC32) += devres.o
devres-y := ../../../kernel/irq/devres.o
-obj-$(CONFIG_SPARC32) += dma.o
+obj-y += dma.o
obj-$(CONFIG_SPARC32_PCI) += pcic.o
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c
index 524c32f97c55..e1ba8ee21b9a 100644
--- a/arch/sparc/kernel/dma.c
+++ b/arch/sparc/kernel/dma.c
@@ -1,178 +1,13 @@
-/* dma.c: PCI and SBUS DMA accessors for 32-bit sparc.
- *
- * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
- */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-#include <linux/mm.h>
-
-#ifdef CONFIG_PCI
-#include <linux/pci.h>
-#endif
+#include <linux/dma-debug.h>
-#include "dma.h"
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15)
-int dma_supported(struct device *dev, u64 mask)
+static int __init dma_init(void)
{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
- return pci_dma_supported(to_pci_dev(dev), mask);
-#endif
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
-EXPORT_SYMBOL(dma_supported);
-
-int dma_set_mask(struct device *dev, u64 dma_mask)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
- return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
-#endif
- return -EOPNOTSUPP;
-}
-EXPORT_SYMBOL(dma_set_mask);
-
-static void *dma32_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
- return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
-#endif
- return sbus_alloc_consistent(dev, size, dma_handle);
-}
-
-static void dma32_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type) {
- pci_free_consistent(to_pci_dev(dev), size,
- cpu_addr, dma_handle);
- return;
- }
-#endif
- sbus_free_consistent(dev, size, cpu_addr, dma_handle);
-}
-
-static dma_addr_t dma32_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
- return pci_map_page(to_pci_dev(dev), page, offset,
- size, (int)direction);
-#endif
- return sbus_map_single(dev, page_address(page) + offset,
- size, (int)direction);
-}
-
-static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type) {
- pci_unmap_page(to_pci_dev(dev), dma_address,
- size, (int)direction);
- return;
- }
-#endif
- sbus_unmap_single(dev, dma_address, size, (int)direction);
-}
-
-static int dma32_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
- return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
-#endif
- return sbus_map_sg(dev, sg, nents, direction);
-}
-
-void dma32_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type) {
- pci_unmap_sg(to_pci_dev(dev), sg, nents, (int)direction);
- return;
- }
-#endif
- sbus_unmap_sg(dev, sg, nents, (int)direction);
-}
-
-static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type) {
- pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
- size, (int)direction);
- return;
- }
-#endif
- sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction);
-}
-
-static void dma32_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type) {
- pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
- size, (int)direction);
- return;
- }
-#endif
- sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction);
-}
-
-static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type) {
- pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg,
- nelems, (int)direction);
- return;
- }
-#endif
- BUG();
-}
-
-static void dma32_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type) {
- pci_dma_sync_sg_for_device(to_pci_dev(dev), sg,
- nelems, (int)direction);
- return;
- }
-#endif
- BUG();
-}
-
-static const struct dma_ops dma32_dma_ops = {
- .alloc_coherent = dma32_alloc_coherent,
- .free_coherent = dma32_free_coherent,
- .map_page = dma32_map_page,
- .unmap_page = dma32_unmap_page,
- .map_sg = dma32_map_sg,
- .unmap_sg = dma32_unmap_sg,
- .sync_single_for_cpu = dma32_sync_single_for_cpu,
- .sync_single_for_device = dma32_sync_single_for_device,
- .sync_sg_for_cpu = dma32_sync_sg_for_cpu,
- .sync_sg_for_device = dma32_sync_sg_for_device,
-};
-
-const struct dma_ops *dma_ops = &dma32_dma_ops;
-EXPORT_SYMBOL(dma_ops);
+fs_initcall(dma_init);
diff --git a/arch/sparc/kernel/dma.h b/arch/sparc/kernel/dma.h
deleted file mode 100644
index f8d8951adb53..000000000000
--- a/arch/sparc/kernel/dma.h
+++ /dev/null
@@ -1,14 +0,0 @@
-void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp);
-void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba);
-dma_addr_t sbus_map_single(struct device *dev, void *va,
- size_t len, int direction);
-void sbus_unmap_single(struct device *dev, dma_addr_t ba,
- size_t n, int direction);
-int sbus_map_sg(struct device *dev, struct scatterlist *sg,
- int n, int direction);
-void sbus_unmap_sg(struct device *dev, struct scatterlist *sg,
- int n, int direction);
-void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
- size_t size, int direction);
-void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba,
- size_t size, int direction);
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 0aeaefe696b9..7690cc219ecc 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -353,7 +353,8 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
- enum dma_data_direction direction)
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
@@ -474,7 +475,8 @@ do_flush_sync:
}
static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
- size_t sz, enum dma_data_direction direction)
+ size_t sz, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
@@ -520,7 +522,8 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
}
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
+ int nelems, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot, ctx;
@@ -691,7 +694,8 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
}
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
+ int nelems, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
unsigned long flags, ctx;
struct scatterlist *sg;
@@ -822,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static const struct dma_ops sun4u_dma_ops = {
+static struct dma_map_ops sun4u_dma_ops = {
.alloc_coherent = dma_4u_alloc_coherent,
.free_coherent = dma_4u_free_coherent,
.map_page = dma_4u_map_page,
@@ -833,9 +837,11 @@ static const struct dma_ops sun4u_dma_ops = {
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
};
-const struct dma_ops *dma_ops = &sun4u_dma_ops;
+struct dma_map_ops *dma_ops = &sun4u_dma_ops;
EXPORT_SYMBOL(dma_ops);
+extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
+
int dma_supported(struct device *dev, u64 device_mask)
{
struct iommu *iommu = dev->archdata.iommu;
@@ -849,7 +855,7 @@ int dma_supported(struct device *dev, u64 device_mask)
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
- return pci_dma_supported(to_pci_dev(dev), device_mask);
+ return pci64_dma_supported(to_pci_dev(dev), device_mask);
#endif
return 0;
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 87ea0d03d975..edbea232c617 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -48,8 +48,6 @@
#include <asm/iommu.h>
#include <asm/io-unit.h>
-#include "dma.h"
-
#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
static struct resource *_sparc_find_resource(struct resource *r,
@@ -246,7 +244,8 @@ EXPORT_SYMBOL(sbus_set_sbus64);
* Typically devices use them for control blocks.
* CPU may access them without any explicit flushing.
*/
-void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp)
+static void *sbus_alloc_coherent(struct device *dev, size_t len,
+ dma_addr_t *dma_addrp, gfp_t gfp)
{
struct of_device *op = to_of_device(dev);
unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
@@ -299,7 +298,8 @@ err_nopages:
return NULL;
}
-void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
+static void sbus_free_coherent(struct device *dev, size_t n, void *p,
+ dma_addr_t ba)
{
struct resource *res;
struct page *pgv;
@@ -317,7 +317,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
n = (n + PAGE_SIZE-1) & PAGE_MASK;
if ((res->end-res->start)+1 != n) {
- printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n",
+ printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
(long)((res->end-res->start)+1), n);
return;
}
@@ -337,8 +337,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
* CPU view of this memory may be inconsistent with
* a device view and explicit flushing is necessary.
*/
-dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction)
+static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t len,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
+ void *va = page_address(page) + offset;
+
/* XXX why are some lengths signed, others unsigned? */
if (len <= 0) {
return 0;
@@ -350,12 +355,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi
return mmu_get_scsi_one(dev, va, len);
}
-void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction)
+static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
mmu_release_scsi_one(dev, ba, n);
}
-int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
+static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
mmu_get_scsi_sgl(dev, sg, n);
@@ -366,19 +373,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction
return n;
}
-void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
+static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
mmu_release_scsi_sgl(dev, sg, n);
}
-void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction)
+static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int n, enum dma_data_direction dir)
{
+ BUG();
}
-void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction)
+static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int n, enum dma_data_direction dir)
{
+ BUG();
}
+struct dma_map_ops sbus_dma_ops = {
+ .alloc_coherent = sbus_alloc_coherent,
+ .free_coherent = sbus_free_coherent,
+ .map_page = sbus_map_page,
+ .unmap_page = sbus_unmap_page,
+ .map_sg = sbus_map_sg,
+ .unmap_sg = sbus_unmap_sg,
+ .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
+ .sync_sg_for_device = sbus_sync_sg_for_device,
+};
+
+struct dma_map_ops *dma_ops = &sbus_dma_ops;
+EXPORT_SYMBOL(dma_ops);
+
static int __init sparc_register_ioport(void)
{
register_proc_sparc_ioport();
@@ -395,7 +421,8 @@ arch_initcall(sparc_register_ioport);
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices.
*/
-void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
+static void *pci32_alloc_coherent(struct device *dev, size_t len,
+ dma_addr_t *pba, gfp_t gfp)
{
unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
unsigned long va;
@@ -439,7 +466,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
return (void *) res->start;
}
-EXPORT_SYMBOL(pci_alloc_consistent);
/* Free and unmap a consistent DMA buffer.
* cpu_addr is what was returned from pci_alloc_consistent,
@@ -449,7 +475,8 @@ EXPORT_SYMBOL(pci_alloc_consistent);
* References to the memory and mappings associated with cpu_addr/dma_addr
* past this call are illegal.
*/
-void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
+static void pci32_free_coherent(struct device *dev, size_t n, void *p,
+ dma_addr_t ba)
{
struct resource *res;
unsigned long pgp;
@@ -481,60 +508,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
free_pages(pgp, get_order(n));
}
-EXPORT_SYMBOL(pci_free_consistent);
-
-/* Map a single buffer of the indicated size for DMA in streaming mode.
- * The 32-bit bus address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory
- * until either pci_unmap_single or pci_dma_sync_single_* is performed.
- */
-dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
- int direction)
-{
- BUG_ON(direction == PCI_DMA_NONE);
- /* IIep is write-through, not flushing. */
- return virt_to_phys(ptr);
-}
-EXPORT_SYMBOL(pci_map_single);
-
-/* Unmap a single streaming mode DMA translation. The dma_addr and size
- * must match what was provided for in a previous pci_map_single call. All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
- int direction)
-{
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
- mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
- (size + PAGE_SIZE-1) & PAGE_MASK);
- }
-}
-EXPORT_SYMBOL(pci_unmap_single);
/*
* Same as pci_map_single, but with pages.
*/
-dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
+static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
- BUG_ON(direction == PCI_DMA_NONE);
/* IIep is write-through, not flushing. */
return page_to_phys(page) + offset;
}
-EXPORT_SYMBOL(pci_map_page);
-
-void pci_unmap_page(struct pci_dev *hwdev,
- dma_addr_t dma_address, size_t size, int direction)
-{
- BUG_ON(direction == PCI_DMA_NONE);
- /* mmu_inval_dma_area XXX */
-}
-EXPORT_SYMBOL(pci_unmap_page);
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
@@ -551,13 +536,13 @@ EXPORT_SYMBOL(pci_unmap_page);
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
-int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
- int direction)
+static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct scatterlist *sg;
int n;
- BUG_ON(direction == PCI_DMA_NONE);
/* IIep is write-through, not flushing. */
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
@@ -566,20 +551,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
}
return nents;
}
-EXPORT_SYMBOL(pci_map_sg);
/* Unmap a set of streaming mode DMA translations.
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
-void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
- int direction)
+static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct scatterlist *sg;
int n;
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
@@ -588,7 +572,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
}
}
}
-EXPORT_SYMBOL(pci_unmap_sg);
/* Make physical memory consistent for a single
* streaming mode DMA translation before or after a transfer.
@@ -600,25 +583,23 @@ EXPORT_SYMBOL(pci_unmap_sg);
* must first perform a pci_dma_sync_for_device, and then the
* device again owns the buffer.
*/
-void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
+static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
+ size_t size, enum dma_data_direction dir)
{
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
(size + PAGE_SIZE-1) & PAGE_MASK);
}
}
-EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
-void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
+static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
+ size_t size, enum dma_data_direction dir)
{
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
(size + PAGE_SIZE-1) & PAGE_MASK);
}
}
-EXPORT_SYMBOL(pci_dma_sync_single_for_device);
/* Make physical memory consistent for a set of streaming
* mode DMA translations after a transfer.
@@ -626,13 +607,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device);
* The same as pci_dma_sync_single_* but for a scatter-gather list,
* same rules and usage.
*/
-void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
+static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int n;
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
@@ -641,15 +622,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
}
}
}
-EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
-void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
+static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int n;
- BUG_ON(direction == PCI_DMA_NONE);
- if (direction != PCI_DMA_TODEVICE) {
+ if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
@@ -658,9 +638,49 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
}
}
}
-EXPORT_SYMBOL(pci_dma_sync_sg_for_device);
+
+struct dma_map_ops pci32_dma_ops = {
+ .alloc_coherent = pci32_alloc_coherent,
+ .free_coherent = pci32_free_coherent,
+ .map_page = pci32_map_page,
+ .map_sg = pci32_map_sg,
+ .unmap_sg = pci32_unmap_sg,
+ .sync_single_for_cpu = pci32_sync_single_for_cpu,
+ .sync_single_for_device = pci32_sync_single_for_device,
+ .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
+ .sync_sg_for_device = pci32_sync_sg_for_device,
+};
+EXPORT_SYMBOL(pci32_dma_ops);
+
#endif /* CONFIG_PCI */
+/*
+ * Return whether the given PCI device DMA address mask can be
+ * supported properly. For example, if your device can only drive the
+ * low 24-bits during PCI bus mastering, then you would pass
+ * 0x00ffffff as the mask to this function.
+ */
+int dma_supported(struct device *dev, u64 mask)
+{
+#ifdef CONFIG_PCI
+ if (dev->bus == &pci_bus_type)
+ return 1;
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(dma_supported);
+
+int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+#ifdef CONFIG_PCI
+ if (dev->bus == &pci_bus_type)
+ return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
+#endif
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(dma_set_mask);
+
+
#ifdef CONFIG_PROC_FS
static int
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 57859ad23547..c68648662802 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -1039,7 +1039,7 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
pci_dev_put(ali_isa_bridge);
}
-int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
+int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask)
{
u64 dma_addr_mask;
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 2485eaa23101..23c33ff9c31e 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -232,7 +232,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
- enum dma_data_direction direction)
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
struct iommu *iommu;
unsigned long flags, npages, oaddr;
@@ -296,7 +297,8 @@ iommu_map_fail:
}
static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
- size_t sz, enum dma_data_direction direction)
+ size_t sz, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
@@ -336,7 +338,8 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
}
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
+ int nelems, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot;
@@ -478,7 +481,8 @@ iommu_map_failed:
}
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
+ int nelems, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
struct pci_pbm_info *pbm;
struct scatterlist *sg;
@@ -521,29 +525,13 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static void dma_4v_sync_single_for_cpu(struct device *dev,
- dma_addr_t bus_addr, size_t sz,
- enum dma_data_direction direction)
-{
- /* Nothing to do... */
-}
-
-static void dma_4v_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- /* Nothing to do... */
-}
-
-static const struct dma_ops sun4v_dma_ops = {
+static struct dma_map_ops sun4v_dma_ops = {
.alloc_coherent = dma_4v_alloc_coherent,
.free_coherent = dma_4v_free_coherent,
.map_page = dma_4v_map_page,
.unmap_page = dma_4v_unmap_page,
.map_sg = dma_4v_map_sg,
.unmap_sg = dma_4v_unmap_sg,
- .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
- .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
};
static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 4041f94e7724..18d67854a1b8 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
}
}
-void __trigger_all_cpu_backtrace(void)
+void arch_trigger_all_cpu_backtrace(void)
{
struct thread_info *tp = current_thread_info();
struct pt_regs *regs = get_irq_regs();
@@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void)
static void sysrq_handle_globreg(int key, struct tty_struct *tty)
{
- __trigger_all_cpu_backtrace();
+ arch_trigger_all_cpu_backtrace();
}
static struct sysrq_key_op sparc_globalreg_op = {
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 181d069a2d44..7ce1a1005b1d 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -590,6 +590,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
}
}
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index ec82d76dc6f2..647afbda7ae1 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -613,5 +613,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
}
}
+
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 13ffa5df37d7..fc20fdc0f7f2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -38,7 +38,7 @@ config X86
select HAVE_FUNCTION_GRAPH_FP_TEST
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
- select HAVE_FTRACE_SYSCALLS
+ select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KVM
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
@@ -586,7 +586,6 @@ config GART_IOMMU
bool "GART IOMMU support" if EMBEDDED
default y
select SWIOTLB
- select AGP
depends on X86_64 && PCI
---help---
Support for full DMA access of devices with 32bit memory access only
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 8130334329c0..527519b8a9f9 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -262,6 +262,15 @@ config MCORE2
family in /proc/cpuinfo. Newer ones have 6 and older ones 15
(not a typo)
+config MATOM
+ bool "Intel Atom"
+ ---help---
+
+ Select this for the Intel Atom platform. Intel Atom CPUs have an
+ in-order pipelining architecture and thus can benefit from
+ accordingly optimized code. Use a recent GCC with specific Atom
+ support in order to fully benefit from selecting this option.
+
config GENERIC_CPU
bool "Generic-x86-64"
depends on X86_64
@@ -295,7 +304,7 @@ config X86_CPU
config X86_L1_CACHE_BYTES
int
default "128" if MPSC
- default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32
+ default "64" if GENERIC_CPU || MK8 || MCORE2 || MATOM || X86_32
config X86_INTERNODE_CACHE_BYTES
int
@@ -310,7 +319,7 @@ config X86_L1_CACHE_SHIFT
default "7" if MPENTIUM4 || MPSC
default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 || X86_GENERIC || GENERIC_CPU
+ default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
config X86_XADD
def_bool y
@@ -359,7 +368,7 @@ config X86_INTEL_USERCOPY
config X86_USE_PPRO_CHECKSUM
def_bool y
- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2
+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
config X86_USE_3DNOW
def_bool y
@@ -387,7 +396,7 @@ config X86_P6_NOP
config X86_TSC
def_bool y
- depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ) || X86_64
+ depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) && !X86_NUMAQ) || X86_64
config X86_CMPXCHG64
def_bool y
@@ -397,7 +406,7 @@ config X86_CMPXCHG64
# generates cmov.
config X86_CMOV
def_bool y
- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64)
+ depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
config X86_MINIMUM_CPU_FAMILY
int
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 1b68659c41b4..5128b178529f 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -55,6 +55,8 @@ else
cflags-$(CONFIG_MCORE2) += \
$(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
+ cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+ $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
KBUILD_CFLAGS += $(cflags-y)
@@ -72,7 +74,7 @@ endif
ifdef CONFIG_CC_STACKPROTECTOR
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
- ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC)),y)
+ ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y)
stackp-y := -fstack-protector
stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all
KBUILD_CFLAGS += $(stackp-y)
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 80177ec052f0..30e9a264f69d 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -33,6 +33,8 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)-f
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
cflags-$(CONFIG_MVIAC7) += -march=i686
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
+cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
+ $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
# AMD Elan support
cflags-$(CONFIG_X86_ELAN) += -march=i486
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index edb992ebef92..d28fad19654a 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -2355,7 +2355,7 @@ CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_HW_BRANCH_TRACER=y
-CONFIG_HAVE_FTRACE_SYSCALLS=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index cee1dd2e69b2..6c86acd847a4 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -2329,7 +2329,7 @@ CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_HW_BRANCH_TRACER=y
-CONFIG_HAVE_FTRACE_SYSCALLS=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_RING_BUFFER=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index c580c5ec1cad..585edebe12cf 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -59,13 +59,6 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
-static inline int kernel_fpu_using(void)
-{
- if (in_interrupt() && !(read_cr0() & X86_CR0_TS))
- return 1;
- return 0;
-}
-
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
{
unsigned long addr = (unsigned long)raw_ctx;
@@ -89,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
return -EINVAL;
}
- if (kernel_fpu_using())
+ if (irq_fpu_usable())
err = crypto_aes_expand_key(ctx, in_key, key_len);
else {
kernel_fpu_begin();
@@ -110,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
- if (kernel_fpu_using())
+ if (irq_fpu_usable())
crypto_aes_encrypt_x86(ctx, dst, src);
else {
kernel_fpu_begin();
@@ -123,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
- if (kernel_fpu_using())
+ if (irq_fpu_usable())
crypto_aes_decrypt_x86(ctx, dst, src);
else {
kernel_fpu_begin();
@@ -349,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- if (kernel_fpu_using()) {
+ if (irq_fpu_usable()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req));
@@ -370,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- if (kernel_fpu_using()) {
+ if (irq_fpu_usable()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req));
@@ -636,7 +629,7 @@ static int __init aesni_init(void)
int err;
if (!cpu_has_aes) {
- printk(KERN_ERR "Intel AES-NI instructions are not detected.\n");
+ printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
return -ENODEV;
}
if ((err = crypto_register_alg(&aesni_alg)))
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index e590261ba059..ba331bfd1112 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -537,7 +537,7 @@ ia32_sys_call_table:
.quad sys_mkdir
.quad sys_rmdir /* 40 */
.quad sys_dup
- .quad sys32_pipe
+ .quad sys_pipe
.quad compat_sys_times
.quad quiet_ni_syscall /* old prof syscall holder */
.quad sys_brk /* 45 */
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 085a8c35f149..9f5527198825 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -189,20 +189,6 @@ asmlinkage long sys32_mprotect(unsigned long start, size_t len,
return sys_mprotect(start, len, prot);
}
-asmlinkage long sys32_pipe(int __user *fd)
-{
- int retval;
- int fds[2];
-
- retval = do_pipe_flags(fds, 0);
- if (retval)
- goto out;
- if (copy_to_user(fd, fds, sizeof(fds)))
- retval = -EFAULT;
-out:
- return retval;
-}
-
asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
struct sigaction32 __user *oact,
unsigned int sigsetsize)
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 1a37bcdc8606..c240efc74e00 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -73,8 +73,6 @@ static inline void alternatives_smp_module_del(struct module *mod) {}
static inline void alternatives_smp_switch(int smp) {}
#endif /* CONFIG_SMP */
-const unsigned char *const *find_nop_table(void);
-
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
\
@@ -144,8 +142,6 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
#define __parainstructions_end NULL
#endif
-extern void add_nops(void *insns, unsigned int len);
-
/*
* Clear and restore the kernel write-protection flag on the local CPU.
* Allows the kernel to edit read-only pages.
@@ -161,10 +157,7 @@ extern void add_nops(void *insns, unsigned int len);
* Intel's errata.
* On the local CPU you need to be protected again NMI or MCE handlers seeing an
* inconsistent instruction while you patch.
- * The _early version expects the memory to already be RW.
*/
-
extern void *text_poke(void *addr, const void *opcode, size_t len);
-extern void *text_poke_early(void *addr, const void *opcode, size_t len);
#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h
index bdf96f119f06..ac95995b7bad 100644
--- a/arch/x86/include/asm/amd_iommu.h
+++ b/arch/x86/include/asm/amd_iommu.h
@@ -25,6 +25,7 @@
#ifdef CONFIG_AMD_IOMMU
extern int amd_iommu_init(void);
extern int amd_iommu_init_dma_ops(void);
+extern int amd_iommu_init_passthrough(void);
extern void amd_iommu_detect(void);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_flush_all_domains(void);
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 0c878caaa0a2..2a2cc7a78a81 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -143,22 +143,29 @@
#define EVT_BUFFER_SIZE 8192 /* 512 entries */
#define EVT_LEN_MASK (0x9ULL << 56)
+#define PAGE_MODE_NONE 0x00
#define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02
#define PAGE_MODE_3_LEVEL 0x03
-
-#define IOMMU_PDE_NL_0 0x000ULL
-#define IOMMU_PDE_NL_1 0x200ULL
-#define IOMMU_PDE_NL_2 0x400ULL
-#define IOMMU_PDE_NL_3 0x600ULL
-
-#define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL)
-#define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL)
-#define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL)
-
-#define IOMMU_MAP_SIZE_L1 (1ULL << 21)
-#define IOMMU_MAP_SIZE_L2 (1ULL << 30)
-#define IOMMU_MAP_SIZE_L3 (1ULL << 39)
+#define PAGE_MODE_4_LEVEL 0x04
+#define PAGE_MODE_5_LEVEL 0x05
+#define PAGE_MODE_6_LEVEL 0x06
+
+#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9))
+#define PM_LEVEL_SIZE(x) (((x) < 6) ? \
+ ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
+ (0xffffffffffffffffULL))
+#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
+#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
+#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
+ IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
+#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
+
+#define PM_MAP_4k 0
+#define PM_ADDR_MASK 0x000ffffffffff000ULL
+#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \
+ (~((1ULL << (12 + ((lvl) * 9))) - 1)))
+#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
#define IOMMU_PTE_P (1ULL << 0)
#define IOMMU_PTE_TV (1ULL << 1)
@@ -167,11 +174,6 @@
#define IOMMU_PTE_IR (1ULL << 61)
#define IOMMU_PTE_IW (1ULL << 62)
-#define IOMMU_L1_PDE(address) \
- ((address) | IOMMU_PDE_NL_1 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
-#define IOMMU_L2_PDE(address) \
- ((address) | IOMMU_PDE_NL_2 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
-
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
@@ -194,11 +196,14 @@
#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
domain for an IOMMU */
+#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
+ translation */
+
extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \
do { \
if (amd_iommu_dump) \
- printk(KERN_INFO "AMD IOMMU: " format, ## arg); \
+ printk(KERN_INFO "AMD-Vi: " format, ## arg); \
} while(0);
/*
@@ -226,6 +231,7 @@ struct protection_domain {
int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */
unsigned long flags; /* flags to find out type of domain */
+ bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */
void *priv; /* private data */
};
@@ -337,6 +343,9 @@ struct amd_iommu {
/* if one, we need to send a completion wait command */
bool need_sync;
+ /* becomes true if a command buffer reset is running */
+ bool reset_in_progress;
+
/* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom;
};
@@ -457,4 +466,7 @@ static inline void amd_iommu_stats_init(void) { }
#endif /* CONFIG_AMD_IOMMU_STATS */
+/* some function prototypes */
+extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
+
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index bb7d47925847..586b7adb8e53 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -183,6 +183,10 @@ static inline int x2apic_enabled(void)
}
#define x2apic_supported() (cpu_has_x2apic)
+static inline void x2apic_force_phys(void)
+{
+ x2apic_phys = 1;
+}
#else
static inline void check_x2apic(void)
{
@@ -194,6 +198,9 @@ static inline int x2apic_enabled(void)
{
return 0;
}
+static inline void x2apic_force_phys(void)
+{
+}
#define x2apic_preenabled 0
#define x2apic_supported() 0
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 7ddb36ab933b..7386bfa4f4bc 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -8,7 +8,8 @@
* Ingo Molnar <mingo@redhat.com>, 1999, 2000
*/
-#define APIC_DEFAULT_PHYS_BASE 0xfee00000
+#define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000
+#define APIC_DEFAULT_PHYS_BASE 0xfee00000
#define APIC_ID 0x20
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 56be78f582f0..b3ed1e1460ff 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -3,7 +3,7 @@
#ifdef __ASSEMBLY__
# define __ASM_FORM(x) x
-# define __ASM_EX_SEC .section __ex_table
+# define __ASM_EX_SEC .section __ex_table, "a"
#else
# define __ASM_FORM(x) " " #x " "
# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
@@ -38,10 +38,18 @@
#define _ASM_DI __ASM_REG(di)
/* Exception table entry */
+#ifdef __ASSEMBLY__
+# define _ASM_EXTABLE(from,to) \
+ __ASM_EX_SEC ; \
+ _ASM_ALIGN ; \
+ _ASM_PTR from , to ; \
+ .previous
+#else
# define _ASM_EXTABLE(from,to) \
__ASM_EX_SEC \
_ASM_ALIGN "\n" \
_ASM_PTR #from "," #to "\n" \
" .previous\n"
+#endif
#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 4a28d22d4793..847fee6493a2 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -95,6 +95,7 @@
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */
#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */
+#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index c993e9e0fed4..e8de2f6f5ca5 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -291,11 +291,24 @@ static inline unsigned long get_desc_base(const struct desc_struct *desc)
return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24);
}
+static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
+{
+ desc->base0 = base & 0xffff;
+ desc->base1 = (base >> 16) & 0xff;
+ desc->base2 = (base >> 24) & 0xff;
+}
+
static inline unsigned long get_desc_limit(const struct desc_struct *desc)
{
return desc->limit0 | (desc->limit << 16);
}
+static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
+{
+ desc->limit0 = limit & 0xffff;
+ desc->limit = (limit >> 16) & 0xf;
+}
+
static inline void _set_gate(int gate, unsigned type, void *addr,
unsigned dpl, unsigned ist, unsigned seg)
{
diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
index a6adefa28b94..9d6684849fd9 100644
--- a/arch/x86/include/asm/desc_defs.h
+++ b/arch/x86/include/asm/desc_defs.h
@@ -34,6 +34,12 @@ struct desc_struct {
};
} __attribute__((packed));
+#define GDT_ENTRY_INIT(flags, base, limit) { { { \
+ .a = ((limit) & 0xffff) | (((base) & 0xffff) << 16), \
+ .b = (((base) & 0xff0000) >> 16) | (((flags) & 0xf0ff) << 8) | \
+ ((limit) & 0xf0000) | ((base) & 0xff000000), \
+ } } }
+
enum {
GATE_INTERRUPT = 0xE,
GATE_TRAP = 0xF,
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 1c3f9435f1c9..0ee770d23d0e 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -55,6 +55,24 @@ extern int dma_set_mask(struct device *dev, u64 mask);
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag);
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return 0;
+
+ return addr + size <= *dev->dma_mask;
+}
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return daddr;
+}
+
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index 3afc5e87cfdd..ae6253ab9029 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -87,9 +87,25 @@
CFI_RESTORE \reg
.endm
#else /*!CONFIG_X86_64*/
+ .macro pushl_cfi reg
+ pushl \reg
+ CFI_ADJUST_CFA_OFFSET 4
+ .endm
- /* 32bit defenitions are missed yet */
+ .macro popl_cfi reg
+ popl \reg
+ CFI_ADJUST_CFA_OFFSET -4
+ .endm
+ .macro movl_cfi reg offset=0
+ movl %\reg, \offset(%esp)
+ CFI_REL_OFFSET \reg, \offset
+ .endm
+
+ .macro movl_cfi_restore offset reg
+ movl \offset(%esp), %\reg
+ CFI_RESTORE \reg
+ .endm
#endif /*!CONFIG_X86_64*/
#endif /*__ASSEMBLY__*/
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index bd2c6511c887..db24c2278be0 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -28,13 +28,6 @@
#endif
-/* FIXME: I don't want to stay hardcoded */
-#ifdef CONFIG_X86_64
-# define FTRACE_SYSCALL_MAX 296
-#else
-# define FTRACE_SYSCALL_MAX 333
-#endif
-
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((long)(mcount))
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 175adf58dd4f..fb7f0d64e14f 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -301,6 +301,14 @@ static inline void kernel_fpu_end(void)
preempt_enable();
}
+static inline bool irq_fpu_usable(void)
+{
+ struct pt_regs *regs;
+
+ return !in_interrupt() || !(regs = get_irq_regs()) || \
+ user_mode(regs) || (read_cr0() & X86_CR0_TS);
+}
+
/*
* Some instructions like VIA's padlock instructions generate a spurious
* DNA fault but don't modify SSE registers. And these instructions
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 330ee807f89e..85232d32fcb8 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -150,11 +150,10 @@ extern int timer_through_8259;
#define io_apic_assign_pci_irqs \
(mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
-#ifdef CONFIG_ACPI
+extern u8 io_apic_unique_id(u8 id);
extern int io_apic_get_unique_id(int ioapic, int apic_id);
extern int io_apic_get_version(int ioapic);
extern int io_apic_get_redir_entries(int ioapic);
-#endif /* CONFIG_ACPI */
struct io_apic_irq_attr;
extern int io_apic_set_pci_routing(struct device *dev, int irq,
@@ -177,6 +176,16 @@ extern int setup_ioapic_entry(int apic, int irq,
int polarity, int vector, int pin);
extern void ioapic_write_entry(int apic, int pin,
struct IO_APIC_route_entry e);
+
+struct mp_ioapic_gsi{
+ int gsi_base;
+ int gsi_end;
+};
+extern struct mp_ioapic_gsi mp_gsi_routing[];
+int mp_find_ioapic(int gsi);
+int mp_find_ioapic_pin(int ioapic, int gsi);
+void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
+
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
static const int timer_through_8259 = 0;
diff --git a/arch/x86/include/asm/ioctls.h b/arch/x86/include/asm/ioctls.h
index 0d5b23b7b06e..ec34c760665e 100644
--- a/arch/x86/include/asm/ioctls.h
+++ b/arch/x86/include/asm/ioctls.h
@@ -1,94 +1 @@
-#ifndef _ASM_X86_IOCTLS_H
-#define _ASM_X86_IOCTLS_H
-
-#include <asm/ioctl.h>
-
-/* 0x54 is just a magic number to make these relatively unique ('T') */
-
-#define TCGETS 0x5401
-#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
-#define TCSETSW 0x5403
-#define TCSETSF 0x5404
-#define TCGETA 0x5405
-#define TCSETA 0x5406
-#define TCSETAW 0x5407
-#define TCSETAF 0x5408
-#define TCSBRK 0x5409
-#define TCXONC 0x540A
-#define TCFLSH 0x540B
-#define TIOCEXCL 0x540C
-#define TIOCNXCL 0x540D
-#define TIOCSCTTY 0x540E
-#define TIOCGPGRP 0x540F
-#define TIOCSPGRP 0x5410
-#define TIOCOUTQ 0x5411
-#define TIOCSTI 0x5412
-#define TIOCGWINSZ 0x5413
-#define TIOCSWINSZ 0x5414
-#define TIOCMGET 0x5415
-#define TIOCMBIS 0x5416
-#define TIOCMBIC 0x5417
-#define TIOCMSET 0x5418
-#define TIOCGSOFTCAR 0x5419
-#define TIOCSSOFTCAR 0x541A
-#define FIONREAD 0x541B
-#define TIOCINQ FIONREAD
-#define TIOCLINUX 0x541C
-#define TIOCCONS 0x541D
-#define TIOCGSERIAL 0x541E
-#define TIOCSSERIAL 0x541F
-#define TIOCPKT 0x5420
-#define FIONBIO 0x5421
-#define TIOCNOTTY 0x5422
-#define TIOCSETD 0x5423
-#define TIOCGETD 0x5424
-#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
-/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
-#define TIOCSBRK 0x5427 /* BSD compatibility */
-#define TIOCCBRK 0x5428 /* BSD compatibility */
-#define TIOCGSID 0x5429 /* Return the session ID of FD */
-#define TCGETS2 _IOR('T', 0x2A, struct termios2)
-#define TCSETS2 _IOW('T', 0x2B, struct termios2)
-#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
-#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
-#define TIOCGRS485 0x542E
-#define TIOCSRS485 0x542F
-#define TIOCGPTN _IOR('T', 0x30, unsigned int)
- /* Get Pty Number (of pty-mux device) */
-#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
-#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */
-#define TCSETX 0x5433
-#define TCSETXF 0x5434
-#define TCSETXW 0x5435
-
-#define FIONCLEX 0x5450
-#define FIOCLEX 0x5451
-#define FIOASYNC 0x5452
-#define TIOCSERCONFIG 0x5453
-#define TIOCSERGWILD 0x5454
-#define TIOCSERSWILD 0x5455
-#define TIOCGLCKTRMIOS 0x5456
-#define TIOCSLCKTRMIOS 0x5457
-#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
-#define TIOCSERGETLSR 0x5459 /* Get line status register */
-#define TIOCSERGETMULTI 0x545A /* Get multiport config */
-#define TIOCSERSETMULTI 0x545B /* Set multiport config */
-
-#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
-#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
-#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
-#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
-#define FIOQSIZE 0x5460
-
-/* Used for packet mode */
-#define TIOCPKT_DATA 0
-#define TIOCPKT_FLUSHREAD 1
-#define TIOCPKT_FLUSHWRITE 2
-#define TIOCPKT_STOP 4
-#define TIOCPKT_START 8
-#define TIOCPKT_NOSTOP 16
-#define TIOCPKT_DOSTOP 32
-
-#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
-
-#endif /* _ASM_X86_IOCTLS_H */
+#include <asm-generic/ioctls.h>
diff --git a/arch/x86/include/asm/ipcbuf.h b/arch/x86/include/asm/ipcbuf.h
index ee678fd51594..84c7e51cb6d0 100644
--- a/arch/x86/include/asm/ipcbuf.h
+++ b/arch/x86/include/asm/ipcbuf.h
@@ -1,28 +1 @@
-#ifndef _ASM_X86_IPCBUF_H
-#define _ASM_X86_IPCBUF_H
-
-/*
- * The ipc64_perm structure for x86 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 32-bit mode_t and seq
- * - 2 miscellaneous 32-bit values
- */
-
-struct ipc64_perm {
- __kernel_key_t key;
- __kernel_uid32_t uid;
- __kernel_gid32_t gid;
- __kernel_uid32_t cuid;
- __kernel_gid32_t cgid;
- __kernel_mode_t mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned short __pad2;
- unsigned long __unused1;
- unsigned long __unused2;
-};
-
-#endif /* _ASM_X86_IPCBUF_H */
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index c6ccbe7e81ad..9e2b952f810a 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -13,14 +13,13 @@ static inline unsigned long native_save_fl(void)
unsigned long flags;
/*
- * Note: this needs to be "=r" not "=rm", because we have the
- * stack offset from what gcc expects at the time the "pop" is
- * executed, and so a memory reference with respect to the stack
- * would end up using the wrong address.
+ * "=rm" is safe here, because "pop" adjusts the stack before
+ * it evaluates its effective address -- this is part of the
+ * documented behavior of the "pop" instruction.
*/
asm volatile("# __raw_save_flags\n\t"
"pushf ; pop %0"
- : "=r" (flags)
+ : "=rm" (flags)
: /* no input */
: "memory");
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h
index 5136dad57cbb..0d97deba1e35 100644
--- a/arch/x86/include/asm/lguest.h
+++ b/arch/x86/include/asm/lguest.h
@@ -90,8 +90,9 @@ static inline void lguest_set_ts(void)
}
/* Full 4G segment descriptors, suitable for CS and DS. */
-#define FULL_EXEC_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9b00} } })
-#define FULL_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9300} } })
+#define FULL_EXEC_SEGMENT \
+ ((struct desc_struct)GDT_ENTRY_INIT(0xc09b, 0, 0xfffff))
+#define FULL_SEGMENT ((struct desc_struct)GDT_ENTRY_INIT(0xc093, 0, 0xfffff))
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
index 751af2550ed9..593e51d4643f 100644
--- a/arch/x86/include/asm/mman.h
+++ b/arch/x86/include/asm/mman.h
@@ -1,20 +1,8 @@
#ifndef _ASM_X86_MMAN_H
#define _ASM_X86_MMAN_H
-#include <asm-generic/mman-common.h>
-
#define MAP_32BIT 0x40 /* only give out 32bit addresses */
-#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
-#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MAP_LOCKED 0x2000 /* pages are locked */
-#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
+#include <asm-generic/mman.h>
#endif /* _ASM_X86_MMAN_H */
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index 47d62743c4d5..3e2ce58a31a3 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -1,18 +1,7 @@
#ifndef _ASM_X86_MODULE_H
#define _ASM_X86_MODULE_H
-/* x86_32/64 are simple */
-struct mod_arch_specific {};
-
-#ifdef CONFIG_X86_32
-# define Elf_Shdr Elf32_Shdr
-# define Elf_Sym Elf32_Sym
-# define Elf_Ehdr Elf32_Ehdr
-#else
-# define Elf_Shdr Elf64_Shdr
-# define Elf_Sym Elf64_Sym
-# define Elf_Ehdr Elf64_Ehdr
-#endif
+#include <asm-generic/module.h>
#ifdef CONFIG_X86_64
/* X86_64 does not define MODULE_PROC_FAMILY */
@@ -28,6 +17,8 @@ struct mod_arch_specific {};
#define MODULE_PROC_FAMILY "586MMX "
#elif defined CONFIG_MCORE2
#define MODULE_PROC_FAMILY "CORE2 "
+#elif defined CONFIG_MATOM
+#define MODULE_PROC_FAMILY "ATOM "
#elif defined CONFIG_M686
#define MODULE_PROC_FAMILY "686 "
#elif defined CONFIG_MPENTIUMII
diff --git a/arch/x86/include/asm/msgbuf.h b/arch/x86/include/asm/msgbuf.h
index 7e4e9481f51c..809134c644a6 100644
--- a/arch/x86/include/asm/msgbuf.h
+++ b/arch/x86/include/asm/msgbuf.h
@@ -1,39 +1 @@
-#ifndef _ASM_X86_MSGBUF_H
-#define _ASM_X86_MSGBUF_H
-
-/*
- * The msqid64_ds structure for i386 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space on i386 is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- *
- * Pad space on x8664 is left for:
- * - 2 miscellaneous 64-bit values
- */
-struct msqid64_ds {
- struct ipc64_perm msg_perm;
- __kernel_time_t msg_stime; /* last msgsnd time */
-#ifdef __i386__
- unsigned long __unused1;
-#endif
- __kernel_time_t msg_rtime; /* last msgrcv time */
-#ifdef __i386__
- unsigned long __unused2;
-#endif
- __kernel_time_t msg_ctime; /* last change time */
-#ifdef __i386__
- unsigned long __unused3;
-#endif
- unsigned long msg_cbytes; /* current number of bytes on queue */
- unsigned long msg_qnum; /* number of messages in queue */
- unsigned long msg_qbytes; /* max number of bytes on queue */
- __kernel_pid_t msg_lspid; /* pid of last msgsnd */
- __kernel_pid_t msg_lrpid; /* last receive pid */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-#endif /* _ASM_X86_MSGBUF_H */
+#include <asm-generic/msgbuf.h>
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 48ad9d29484a..7e2b6ba962ff 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -3,10 +3,16 @@
#include <asm/msr-index.h>
-#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
+#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
+
+#ifdef __KERNEL__
+
#include <asm/asm.h>
#include <asm/errno.h>
#include <asm/cpumask.h>
@@ -67,23 +73,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
".previous\n\t"
_ASM_EXTABLE(2b, 3b)
: [err] "=r" (*err), EAX_EDX_RET(val, low, high)
- : "c" (msr), [fault] "i" (-EFAULT));
- return EAX_EDX_VAL(val, low, high);
-}
-
-static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
- int *err)
-{
- DECLARE_ARGS(val, low, high);
-
- asm volatile("2: rdmsr ; xor %0,%0\n"
- "1:\n\t"
- ".section .fixup,\"ax\"\n\t"
- "3: mov %3,%0 ; jmp 1b\n\t"
- ".previous\n\t"
- _ASM_EXTABLE(2b, 3b)
- : "=r" (*err), EAX_EDX_RET(val, low, high)
- : "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
+ : "c" (msr), [fault] "i" (-EIO));
return EAX_EDX_VAL(val, low, high);
}
@@ -106,13 +96,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
_ASM_EXTABLE(2b, 3b)
: [err] "=a" (err)
: "c" (msr), "0" (low), "d" (high),
- [fault] "i" (-EFAULT)
+ [fault] "i" (-EIO)
: "memory");
return err;
}
extern unsigned long long native_read_tsc(void);
+extern int native_rdmsr_safe_regs(u32 regs[8]);
+extern int native_wrmsr_safe_regs(u32 regs[8]);
+
static __always_inline unsigned long long __native_read_tsc(void)
{
DECLARE_ARGS(val, low, high);
@@ -181,14 +174,44 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
*p = native_read_msr_safe(msr, &err);
return err;
}
+
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
+ u32 gprs[8] = { 0 };
int err;
- *p = native_read_msr_amd_safe(msr, &err);
+ gprs[1] = msr;
+ gprs[7] = 0x9c5a203a;
+
+ err = native_rdmsr_safe_regs(gprs);
+
+ *p = gprs[0] | ((u64)gprs[2] << 32);
+
return err;
}
+static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
+{
+ u32 gprs[8] = { 0 };
+
+ gprs[0] = (u32)val;
+ gprs[1] = msr;
+ gprs[2] = val >> 32;
+ gprs[7] = 0x9c5a203a;
+
+ return native_wrmsr_safe_regs(gprs);
+}
+
+static inline int rdmsr_safe_regs(u32 regs[8])
+{
+ return native_rdmsr_safe_regs(regs);
+}
+
+static inline int wrmsr_safe_regs(u32 regs[8])
+{
+ return native_wrmsr_safe_regs(regs);
+}
+
#define rdtscl(low) \
((low) = (u32)__native_read_tsc())
@@ -228,6 +251,8 @@ void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
+int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
#else /* CONFIG_SMP */
static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{
@@ -258,7 +283,15 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
return wrmsr_safe(msr_no, l, h);
}
+static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
+{
+ return rdmsr_safe_regs(regs);
+}
+static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
+{
+ return wrmsr_safe_regs(regs);
+}
#endif /* CONFIG_SMP */
-#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_MSR_H */
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c86e5ed4af51..e63cf7d441e1 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -45,8 +45,8 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
void __user *, size_t *, loff_t *);
extern int unknown_nmi_panic;
-void __trigger_all_cpu_backtrace(void);
-#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
static inline void localise_nmi_watchdog(void)
{
diff --git a/arch/x86/include/asm/param.h b/arch/x86/include/asm/param.h
index 6f0d0422f4ca..965d45427975 100644
--- a/arch/x86/include/asm/param.h
+++ b/arch/x86/include/asm/param.h
@@ -1,22 +1 @@
-#ifndef _ASM_X86_PARAM_H
-#define _ASM_X86_PARAM_H
-
-#ifdef __KERNEL__
-# define HZ CONFIG_HZ /* Internal kernel timer frequency */
-# define USER_HZ 100 /* some user interfaces are */
-# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
-#endif
-
-#ifndef HZ
-#define HZ 100
-#endif
-
-#define EXEC_PAGESIZE 4096
-
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
-
-#endif /* _ASM_X86_PARAM_H */
+#include <asm-generic/param.h>
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 4fb37c8a0832..40d6586af25b 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -7,689 +7,11 @@
#include <asm/pgtable_types.h>
#include <asm/asm.h>
-/* Bitmask of what can be clobbered: usually at least eax. */
-#define CLBR_NONE 0
-#define CLBR_EAX (1 << 0)
-#define CLBR_ECX (1 << 1)
-#define CLBR_EDX (1 << 2)
-#define CLBR_EDI (1 << 3)
-
-#ifdef CONFIG_X86_32
-/* CLBR_ANY should match all regs platform has. For i386, that's just it */
-#define CLBR_ANY ((1 << 4) - 1)
-
-#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
-#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
-#define CLBR_SCRATCH (0)
-#else
-#define CLBR_RAX CLBR_EAX
-#define CLBR_RCX CLBR_ECX
-#define CLBR_RDX CLBR_EDX
-#define CLBR_RDI CLBR_EDI
-#define CLBR_RSI (1 << 4)
-#define CLBR_R8 (1 << 5)
-#define CLBR_R9 (1 << 6)
-#define CLBR_R10 (1 << 7)
-#define CLBR_R11 (1 << 8)
-
-#define CLBR_ANY ((1 << 9) - 1)
-
-#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
- CLBR_RCX | CLBR_R8 | CLBR_R9)
-#define CLBR_RET_REG (CLBR_RAX)
-#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
-
-#include <asm/desc_defs.h>
-#endif /* X86_64 */
-
-#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
+#include <asm/paravirt_types.h>
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/cpumask.h>
-#include <asm/kmap_types.h>
-#include <asm/desc_defs.h>
-
-struct page;
-struct thread_struct;
-struct desc_ptr;
-struct tss_struct;
-struct mm_struct;
-struct desc_struct;
-struct task_struct;
-
-/*
- * Wrapper type for pointers to code which uses the non-standard
- * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
- */
-struct paravirt_callee_save {
- void *func;
-};
-
-/* general info */
-struct pv_info {
- unsigned int kernel_rpl;
- int shared_kernel_pmd;
- int paravirt_enabled;
- const char *name;
-};
-
-struct pv_init_ops {
- /*
- * Patch may replace one of the defined code sequences with
- * arbitrary code, subject to the same register constraints.
- * This generally means the code is not free to clobber any
- * registers other than EAX. The patch function should return
- * the number of bytes of code generated, as we nop pad the
- * rest in generic code.
- */
- unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
- unsigned long addr, unsigned len);
-
- /* Basic arch-specific setup */
- void (*arch_setup)(void);
- char *(*memory_setup)(void);
- void (*post_allocator_init)(void);
-
- /* Print a banner to identify the environment */
- void (*banner)(void);
-};
-
-
-struct pv_lazy_ops {
- /* Set deferred update mode, used for batching operations. */
- void (*enter)(void);
- void (*leave)(void);
-};
-
-struct pv_time_ops {
- void (*time_init)(void);
-
- /* Set and set time of day */
- unsigned long (*get_wallclock)(void);
- int (*set_wallclock)(unsigned long);
-
- unsigned long long (*sched_clock)(void);
- unsigned long (*get_tsc_khz)(void);
-};
-
-struct pv_cpu_ops {
- /* hooks for various privileged instructions */
- unsigned long (*get_debugreg)(int regno);
- void (*set_debugreg)(int regno, unsigned long value);
-
- void (*clts)(void);
-
- unsigned long (*read_cr0)(void);
- void (*write_cr0)(unsigned long);
-
- unsigned long (*read_cr4_safe)(void);
- unsigned long (*read_cr4)(void);
- void (*write_cr4)(unsigned long);
-
-#ifdef CONFIG_X86_64
- unsigned long (*read_cr8)(void);
- void (*write_cr8)(unsigned long);
-#endif
-
- /* Segment descriptor handling */
- void (*load_tr_desc)(void);
- void (*load_gdt)(const struct desc_ptr *);
- void (*load_idt)(const struct desc_ptr *);
- void (*store_gdt)(struct desc_ptr *);
- void (*store_idt)(struct desc_ptr *);
- void (*set_ldt)(const void *desc, unsigned entries);
- unsigned long (*store_tr)(void);
- void (*load_tls)(struct thread_struct *t, unsigned int cpu);
-#ifdef CONFIG_X86_64
- void (*load_gs_index)(unsigned int idx);
-#endif
- void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
- const void *desc);
- void (*write_gdt_entry)(struct desc_struct *,
- int entrynum, const void *desc, int size);
- void (*write_idt_entry)(gate_desc *,
- int entrynum, const gate_desc *gate);
- void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
- void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
-
- void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
-
- void (*set_iopl_mask)(unsigned mask);
-
- void (*wbinvd)(void);
- void (*io_delay)(void);
-
- /* cpuid emulation, mostly so that caps bits can be disabled */
- void (*cpuid)(unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx);
-
- /* MSR, PMC and TSR operations.
- err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
- u64 (*read_msr_amd)(unsigned int msr, int *err);
- u64 (*read_msr)(unsigned int msr, int *err);
- int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
-
- u64 (*read_tsc)(void);
- u64 (*read_pmc)(int counter);
- unsigned long long (*read_tscp)(unsigned int *aux);
-
- /*
- * Atomically enable interrupts and return to userspace. This
- * is only ever used to return to 32-bit processes; in a
- * 64-bit kernel, it's used for 32-on-64 compat processes, but
- * never native 64-bit processes. (Jump, not call.)
- */
- void (*irq_enable_sysexit)(void);
-
- /*
- * Switch to usermode gs and return to 64-bit usermode using
- * sysret. Only used in 64-bit kernels to return to 64-bit
- * processes. Usermode register state, including %rsp, must
- * already be restored.
- */
- void (*usergs_sysret64)(void);
-
- /*
- * Switch to usermode gs and return to 32-bit usermode using
- * sysret. Used to return to 32-on-64 compat processes.
- * Other usermode register state, including %esp, must already
- * be restored.
- */
- void (*usergs_sysret32)(void);
-
- /* Normal iret. Jump to this with the standard iret stack
- frame set up. */
- void (*iret)(void);
-
- void (*swapgs)(void);
-
- void (*start_context_switch)(struct task_struct *prev);
- void (*end_context_switch)(struct task_struct *next);
-};
-
-struct pv_irq_ops {
- void (*init_IRQ)(void);
-
- /*
- * Get/set interrupt state. save_fl and restore_fl are only
- * expected to use X86_EFLAGS_IF; all other bits
- * returned from save_fl are undefined, and may be ignored by
- * restore_fl.
- *
- * NOTE: These functions callers expect the callee to preserve
- * more registers than the standard C calling convention.
- */
- struct paravirt_callee_save save_fl;
- struct paravirt_callee_save restore_fl;
- struct paravirt_callee_save irq_disable;
- struct paravirt_callee_save irq_enable;
-
- void (*safe_halt)(void);
- void (*halt)(void);
-
-#ifdef CONFIG_X86_64
- void (*adjust_exception_frame)(void);
-#endif
-};
-
-struct pv_apic_ops {
-#ifdef CONFIG_X86_LOCAL_APIC
- void (*setup_boot_clock)(void);
- void (*setup_secondary_clock)(void);
-
- void (*startup_ipi_hook)(int phys_apicid,
- unsigned long start_eip,
- unsigned long start_esp);
-#endif
-};
-
-struct pv_mmu_ops {
- /*
- * Called before/after init_mm pagetable setup. setup_start
- * may reset %cr3, and may pre-install parts of the pagetable;
- * pagetable setup is expected to preserve any existing
- * mapping.
- */
- void (*pagetable_setup_start)(pgd_t *pgd_base);
- void (*pagetable_setup_done)(pgd_t *pgd_base);
-
- unsigned long (*read_cr2)(void);
- void (*write_cr2)(unsigned long);
-
- unsigned long (*read_cr3)(void);
- void (*write_cr3)(unsigned long);
-
- /*
- * Hooks for intercepting the creation/use/destruction of an
- * mm_struct.
- */
- void (*activate_mm)(struct mm_struct *prev,
- struct mm_struct *next);
- void (*dup_mmap)(struct mm_struct *oldmm,
- struct mm_struct *mm);
- void (*exit_mmap)(struct mm_struct *mm);
-
-
- /* TLB operations */
- void (*flush_tlb_user)(void);
- void (*flush_tlb_kernel)(void);
- void (*flush_tlb_single)(unsigned long addr);
- void (*flush_tlb_others)(const struct cpumask *cpus,
- struct mm_struct *mm,
- unsigned long va);
-
- /* Hooks for allocating and freeing a pagetable top-level */
- int (*pgd_alloc)(struct mm_struct *mm);
- void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
-
- /*
- * Hooks for allocating/releasing pagetable pages when they're
- * attached to a pagetable
- */
- void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
- void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
- void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
- void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
- void (*release_pte)(unsigned long pfn);
- void (*release_pmd)(unsigned long pfn);
- void (*release_pud)(unsigned long pfn);
-
- /* Pagetable manipulation functions */
- void (*set_pte)(pte_t *ptep, pte_t pteval);
- void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval);
- void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
- void (*pte_update)(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
- void (*pte_update_defer)(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep);
-
- pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
- void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
-
- struct paravirt_callee_save pte_val;
- struct paravirt_callee_save make_pte;
-
- struct paravirt_callee_save pgd_val;
- struct paravirt_callee_save make_pgd;
-
-#if PAGETABLE_LEVELS >= 3
-#ifdef CONFIG_X86_PAE
- void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
- void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
- void (*pmd_clear)(pmd_t *pmdp);
-
-#endif /* CONFIG_X86_PAE */
-
- void (*set_pud)(pud_t *pudp, pud_t pudval);
-
- struct paravirt_callee_save pmd_val;
- struct paravirt_callee_save make_pmd;
-
-#if PAGETABLE_LEVELS == 4
- struct paravirt_callee_save pud_val;
- struct paravirt_callee_save make_pud;
-
- void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
-#endif /* PAGETABLE_LEVELS == 4 */
-#endif /* PAGETABLE_LEVELS >= 3 */
-
-#ifdef CONFIG_HIGHPTE
- void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
-#endif
-
- struct pv_lazy_ops lazy_mode;
-
- /* dom0 ops */
-
- /* Sometimes the physical address is a pfn, and sometimes its
- an mfn. We can tell which is which from the index. */
- void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
- phys_addr_t phys, pgprot_t flags);
-};
-
-struct raw_spinlock;
-struct pv_lock_ops {
- int (*spin_is_locked)(struct raw_spinlock *lock);
- int (*spin_is_contended)(struct raw_spinlock *lock);
- void (*spin_lock)(struct raw_spinlock *lock);
- void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
- int (*spin_trylock)(struct raw_spinlock *lock);
- void (*spin_unlock)(struct raw_spinlock *lock);
-};
-
-/* This contains all the paravirt structures: we get a convenient
- * number for each function using the offset which we use to indicate
- * what to patch. */
-struct paravirt_patch_template {
- struct pv_init_ops pv_init_ops;
- struct pv_time_ops pv_time_ops;
- struct pv_cpu_ops pv_cpu_ops;
- struct pv_irq_ops pv_irq_ops;
- struct pv_apic_ops pv_apic_ops;
- struct pv_mmu_ops pv_mmu_ops;
- struct pv_lock_ops pv_lock_ops;
-};
-
-extern struct pv_info pv_info;
-extern struct pv_init_ops pv_init_ops;
-extern struct pv_time_ops pv_time_ops;
-extern struct pv_cpu_ops pv_cpu_ops;
-extern struct pv_irq_ops pv_irq_ops;
-extern struct pv_apic_ops pv_apic_ops;
-extern struct pv_mmu_ops pv_mmu_ops;
-extern struct pv_lock_ops pv_lock_ops;
-
-#define PARAVIRT_PATCH(x) \
- (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
-
-#define paravirt_type(op) \
- [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
- [paravirt_opptr] "i" (&(op))
-#define paravirt_clobber(clobber) \
- [paravirt_clobber] "i" (clobber)
-
-/*
- * Generate some code, and mark it as patchable by the
- * apply_paravirt() alternate instruction patcher.
- */
-#define _paravirt_alt(insn_string, type, clobber) \
- "771:\n\t" insn_string "\n" "772:\n" \
- ".pushsection .parainstructions,\"a\"\n" \
- _ASM_ALIGN "\n" \
- _ASM_PTR " 771b\n" \
- " .byte " type "\n" \
- " .byte 772b-771b\n" \
- " .short " clobber "\n" \
- ".popsection\n"
-
-/* Generate patchable code, with the default asm parameters. */
-#define paravirt_alt(insn_string) \
- _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
-
-/* Simple instruction patching code. */
-#define DEF_NATIVE(ops, name, code) \
- extern const char start_##ops##_##name[], end_##ops##_##name[]; \
- asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
-
-unsigned paravirt_patch_nop(void);
-unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
-unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
-unsigned paravirt_patch_ignore(unsigned len);
-unsigned paravirt_patch_call(void *insnbuf,
- const void *target, u16 tgt_clobbers,
- unsigned long addr, u16 site_clobbers,
- unsigned len);
-unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
- unsigned long addr, unsigned len);
-unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
- unsigned long addr, unsigned len);
-
-unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
- const char *start, const char *end);
-
-unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
- unsigned long addr, unsigned len);
-
-int paravirt_disable_iospace(void);
-
-/*
- * This generates an indirect call based on the operation type number.
- * The type number, computed in PARAVIRT_PATCH, is derived from the
- * offset into the paravirt_patch_template structure, and can therefore be
- * freely converted back into a structure offset.
- */
-#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
-
-/*
- * These macros are intended to wrap calls through one of the paravirt
- * ops structs, so that they can be later identified and patched at
- * runtime.
- *
- * Normally, a call to a pv_op function is a simple indirect call:
- * (pv_op_struct.operations)(args...).
- *
- * Unfortunately, this is a relatively slow operation for modern CPUs,
- * because it cannot necessarily determine what the destination
- * address is. In this case, the address is a runtime constant, so at
- * the very least we can patch the call to e a simple direct call, or
- * ideally, patch an inline implementation into the callsite. (Direct
- * calls are essentially free, because the call and return addresses
- * are completely predictable.)
- *
- * For i386, these macros rely on the standard gcc "regparm(3)" calling
- * convention, in which the first three arguments are placed in %eax,
- * %edx, %ecx (in that order), and the remaining arguments are placed
- * on the stack. All caller-save registers (eax,edx,ecx) are expected
- * to be modified (either clobbered or used for return values).
- * X86_64, on the other hand, already specifies a register-based calling
- * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
- * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
- * special handling for dealing with 4 arguments, unlike i386.
- * However, x86_64 also have to clobber all caller saved registers, which
- * unfortunately, are quite a bit (r8 - r11)
- *
- * The call instruction itself is marked by placing its start address
- * and size into the .parainstructions section, so that
- * apply_paravirt() in arch/i386/kernel/alternative.c can do the
- * appropriate patching under the control of the backend pv_init_ops
- * implementation.
- *
- * Unfortunately there's no way to get gcc to generate the args setup
- * for the call, and then allow the call itself to be generated by an
- * inline asm. Because of this, we must do the complete arg setup and
- * return value handling from within these macros. This is fairly
- * cumbersome.
- *
- * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
- * It could be extended to more arguments, but there would be little
- * to be gained from that. For each number of arguments, there are
- * the two VCALL and CALL variants for void and non-void functions.
- *
- * When there is a return value, the invoker of the macro must specify
- * the return type. The macro then uses sizeof() on that type to
- * determine whether its a 32 or 64 bit value, and places the return
- * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
- * 64-bit). For x86_64 machines, it just returns at %rax regardless of
- * the return value size.
- *
- * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
- * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
- * in low,high order
- *
- * Small structures are passed and returned in registers. The macro
- * calling convention can't directly deal with this, so the wrapper
- * functions must do this.
- *
- * These PVOP_* macros are only defined within this header. This
- * means that all uses must be wrapped in inline functions. This also
- * makes sure the incoming and outgoing types are always correct.
- */
-#ifdef CONFIG_X86_32
-#define PVOP_VCALL_ARGS \
- unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
-#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
-
-#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
-#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
-#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
-
-#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
- "=c" (__ecx)
-#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
-
-#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
-#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
-
-#define EXTRA_CLOBBERS
-#define VEXTRA_CLOBBERS
-#else /* CONFIG_X86_64 */
-#define PVOP_VCALL_ARGS \
- unsigned long __edi = __edi, __esi = __esi, \
- __edx = __edx, __ecx = __ecx
-#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
-
-#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
-#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
-#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
-#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
-
-#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
- "=S" (__esi), "=d" (__edx), \
- "=c" (__ecx)
-#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
-
-#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
-#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
-
-#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
-#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
-#endif /* CONFIG_X86_32 */
-
-#ifdef CONFIG_PARAVIRT_DEBUG
-#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
-#else
-#define PVOP_TEST_NULL(op) ((void)op)
-#endif
-
-#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
- pre, post, ...) \
- ({ \
- rettype __ret; \
- PVOP_CALL_ARGS; \
- PVOP_TEST_NULL(op); \
- /* This is 32-bit specific, but is okay in 64-bit */ \
- /* since this condition will never hold */ \
- if (sizeof(rettype) > sizeof(unsigned long)) { \
- asm volatile(pre \
- paravirt_alt(PARAVIRT_CALL) \
- post \
- : call_clbr \
- : paravirt_type(op), \
- paravirt_clobber(clbr), \
- ##__VA_ARGS__ \
- : "memory", "cc" extra_clbr); \
- __ret = (rettype)((((u64)__edx) << 32) | __eax); \
- } else { \
- asm volatile(pre \
- paravirt_alt(PARAVIRT_CALL) \
- post \
- : call_clbr \
- : paravirt_type(op), \
- paravirt_clobber(clbr), \
- ##__VA_ARGS__ \
- : "memory", "cc" extra_clbr); \
- __ret = (rettype)__eax; \
- } \
- __ret; \
- })
-
-#define __PVOP_CALL(rettype, op, pre, post, ...) \
- ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
- EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
-
-#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
- ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
- PVOP_CALLEE_CLOBBERS, , \
- pre, post, ##__VA_ARGS__)
-
-
-#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
- ({ \
- PVOP_VCALL_ARGS; \
- PVOP_TEST_NULL(op); \
- asm volatile(pre \
- paravirt_alt(PARAVIRT_CALL) \
- post \
- : call_clbr \
- : paravirt_type(op), \
- paravirt_clobber(clbr), \
- ##__VA_ARGS__ \
- : "memory", "cc" extra_clbr); \
- })
-
-#define __PVOP_VCALL(op, pre, post, ...) \
- ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
- VEXTRA_CLOBBERS, \
- pre, post, ##__VA_ARGS__)
-
-#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
- ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
- PVOP_VCALLEE_CLOBBERS, , \
- pre, post, ##__VA_ARGS__)
-
-
-
-#define PVOP_CALL0(rettype, op) \
- __PVOP_CALL(rettype, op, "", "")
-#define PVOP_VCALL0(op) \
- __PVOP_VCALL(op, "", "")
-
-#define PVOP_CALLEE0(rettype, op) \
- __PVOP_CALLEESAVE(rettype, op, "", "")
-#define PVOP_VCALLEE0(op) \
- __PVOP_VCALLEESAVE(op, "", "")
-
-
-#define PVOP_CALL1(rettype, op, arg1) \
- __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
-#define PVOP_VCALL1(op, arg1) \
- __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
-
-#define PVOP_CALLEE1(rettype, op, arg1) \
- __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
-#define PVOP_VCALLEE1(op, arg1) \
- __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
-
-
-#define PVOP_CALL2(rettype, op, arg1, arg2) \
- __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2))
-#define PVOP_VCALL2(op, arg1, arg2) \
- __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2))
-
-#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
- __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2))
-#define PVOP_VCALLEE2(op, arg1, arg2) \
- __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2))
-
-
-#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
- __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
-#define PVOP_VCALL3(op, arg1, arg2, arg3) \
- __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
-
-/* This is the only difference in x86_64. We can make it much simpler */
-#ifdef CONFIG_X86_32
-#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
- __PVOP_CALL(rettype, op, \
- "push %[_arg4];", "lea 4(%%esp),%%esp;", \
- PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
- PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
-#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
- __PVOP_VCALL(op, \
- "push %[_arg4];", "lea 4(%%esp),%%esp;", \
- "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
- "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
-#else
-#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
- __PVOP_CALL(rettype, op, "", "", \
- PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
- PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
-#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
- __PVOP_VCALL(op, "", "", \
- PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
- PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
-#endif
static inline int paravirt_enabled(void)
{
@@ -820,15 +142,22 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
{
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
}
-static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
+
+static inline int paravirt_rdmsr_regs(u32 *regs)
{
- return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
+ return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
}
+
static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
{
return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
}
+static inline int paravirt_wrmsr_regs(u32 *regs)
+{
+ return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
+}
+
/* These should all do BUG_ON(_err), but our headers are too tangled. */
#define rdmsr(msr, val1, val2) \
do { \
@@ -862,6 +191,9 @@ do { \
_err; \
})
+#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
+#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
+
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
{
int err;
@@ -871,12 +203,31 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
}
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
+ u32 gprs[8] = { 0 };
int err;
- *p = paravirt_read_msr_amd(msr, &err);
+ gprs[1] = msr;
+ gprs[7] = 0x9c5a203a;
+
+ err = paravirt_rdmsr_regs(gprs);
+
+ *p = gprs[0] | ((u64)gprs[2] << 32);
+
return err;
}
+static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
+{
+ u32 gprs[8] = { 0 };
+
+ gprs[0] = (u32)val;
+ gprs[1] = msr;
+ gprs[2] = val >> 32;
+ gprs[7] = 0x9c5a203a;
+
+ return paravirt_wrmsr_regs(gprs);
+}
+
static inline u64 paravirt_read_tsc(void)
{
return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
@@ -1393,20 +744,6 @@ static inline void pmd_clear(pmd_t *pmdp)
}
#endif /* CONFIG_X86_PAE */
-/* Lazy mode for batching updates / context switch */
-enum paravirt_lazy_mode {
- PARAVIRT_LAZY_NONE,
- PARAVIRT_LAZY_MMU,
- PARAVIRT_LAZY_CPU,
-};
-
-enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
-void paravirt_start_context_switch(struct task_struct *prev);
-void paravirt_end_context_switch(struct task_struct *next);
-
-void paravirt_enter_lazy_mmu(void);
-void paravirt_leave_lazy_mmu(void);
-
#define __HAVE_ARCH_START_CONTEXT_SWITCH
static inline void arch_start_context_switch(struct task_struct *prev)
{
@@ -1437,12 +774,6 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
pv_mmu_ops.set_fixmap(idx, phys, flags);
}
-void _paravirt_nop(void);
-u32 _paravirt_ident_32(u32);
-u64 _paravirt_ident_64(u64);
-
-#define paravirt_nop ((void *)_paravirt_nop)
-
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
@@ -1479,17 +810,6 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
#endif
-/* These all sit in the .parainstructions section to tell us what to patch. */
-struct paravirt_patch_site {
- u8 *instr; /* original instructions */
- u8 instrtype; /* type of this instruction */
- u8 len; /* length of original instruction */
- u16 clobbers; /* what registers you may clobber */
-};
-
-extern struct paravirt_patch_site __parainstructions[],
- __parainstructions_end[];
-
#ifdef CONFIG_X86_32
#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
new file mode 100644
index 000000000000..25402d0006e7
--- /dev/null
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -0,0 +1,721 @@
+#ifndef _ASM_X86_PARAVIRT_TYPES_H
+#define _ASM_X86_PARAVIRT_TYPES_H
+
+/* Bitmask of what can be clobbered: usually at least eax. */
+#define CLBR_NONE 0
+#define CLBR_EAX (1 << 0)
+#define CLBR_ECX (1 << 1)
+#define CLBR_EDX (1 << 2)
+#define CLBR_EDI (1 << 3)
+
+#ifdef CONFIG_X86_32
+/* CLBR_ANY should match all regs platform has. For i386, that's just it */
+#define CLBR_ANY ((1 << 4) - 1)
+
+#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
+#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
+#define CLBR_SCRATCH (0)
+#else
+#define CLBR_RAX CLBR_EAX
+#define CLBR_RCX CLBR_ECX
+#define CLBR_RDX CLBR_EDX
+#define CLBR_RDI CLBR_EDI
+#define CLBR_RSI (1 << 4)
+#define CLBR_R8 (1 << 5)
+#define CLBR_R9 (1 << 6)
+#define CLBR_R10 (1 << 7)
+#define CLBR_R11 (1 << 8)
+
+#define CLBR_ANY ((1 << 9) - 1)
+
+#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
+ CLBR_RCX | CLBR_R8 | CLBR_R9)
+#define CLBR_RET_REG (CLBR_RAX)
+#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
+
+#endif /* X86_64 */
+
+#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/desc_defs.h>
+#include <asm/kmap_types.h>
+
+struct page;
+struct thread_struct;
+struct desc_ptr;
+struct tss_struct;
+struct mm_struct;
+struct desc_struct;
+struct task_struct;
+struct cpumask;
+
+/*
+ * Wrapper type for pointers to code which uses the non-standard
+ * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
+ */
+struct paravirt_callee_save {
+ void *func;
+};
+
+/* general info */
+struct pv_info {
+ unsigned int kernel_rpl;
+ int shared_kernel_pmd;
+ int paravirt_enabled;
+ const char *name;
+};
+
+struct pv_init_ops {
+ /*
+ * Patch may replace one of the defined code sequences with
+ * arbitrary code, subject to the same register constraints.
+ * This generally means the code is not free to clobber any
+ * registers other than EAX. The patch function should return
+ * the number of bytes of code generated, as we nop pad the
+ * rest in generic code.
+ */
+ unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
+ unsigned long addr, unsigned len);
+
+ /* Basic arch-specific setup */
+ void (*arch_setup)(void);
+ char *(*memory_setup)(void);
+ void (*post_allocator_init)(void);
+
+ /* Print a banner to identify the environment */
+ void (*banner)(void);
+};
+
+
+struct pv_lazy_ops {
+ /* Set deferred update mode, used for batching operations. */
+ void (*enter)(void);
+ void (*leave)(void);
+};
+
+struct pv_time_ops {
+ void (*time_init)(void);
+
+ /* Set and set time of day */
+ unsigned long (*get_wallclock)(void);
+ int (*set_wallclock)(unsigned long);
+
+ unsigned long long (*sched_clock)(void);
+ unsigned long (*get_tsc_khz)(void);
+};
+
+struct pv_cpu_ops {
+ /* hooks for various privileged instructions */
+ unsigned long (*get_debugreg)(int regno);
+ void (*set_debugreg)(int regno, unsigned long value);
+
+ void (*clts)(void);
+
+ unsigned long (*read_cr0)(void);
+ void (*write_cr0)(unsigned long);
+
+ unsigned long (*read_cr4_safe)(void);
+ unsigned long (*read_cr4)(void);
+ void (*write_cr4)(unsigned long);
+
+#ifdef CONFIG_X86_64
+ unsigned long (*read_cr8)(void);
+ void (*write_cr8)(unsigned long);
+#endif
+
+ /* Segment descriptor handling */
+ void (*load_tr_desc)(void);
+ void (*load_gdt)(const struct desc_ptr *);
+ void (*load_idt)(const struct desc_ptr *);
+ void (*store_gdt)(struct desc_ptr *);
+ void (*store_idt)(struct desc_ptr *);
+ void (*set_ldt)(const void *desc, unsigned entries);
+ unsigned long (*store_tr)(void);
+ void (*load_tls)(struct thread_struct *t, unsigned int cpu);
+#ifdef CONFIG_X86_64
+ void (*load_gs_index)(unsigned int idx);
+#endif
+ void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
+ const void *desc);
+ void (*write_gdt_entry)(struct desc_struct *,
+ int entrynum, const void *desc, int size);
+ void (*write_idt_entry)(gate_desc *,
+ int entrynum, const gate_desc *gate);
+ void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
+ void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
+
+ void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
+
+ void (*set_iopl_mask)(unsigned mask);
+
+ void (*wbinvd)(void);
+ void (*io_delay)(void);
+
+ /* cpuid emulation, mostly so that caps bits can be disabled */
+ void (*cpuid)(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx);
+
+ /* MSR, PMC and TSR operations.
+ err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
+ u64 (*read_msr)(unsigned int msr, int *err);
+ int (*rdmsr_regs)(u32 *regs);
+ int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
+ int (*wrmsr_regs)(u32 *regs);
+
+ u64 (*read_tsc)(void);
+ u64 (*read_pmc)(int counter);
+ unsigned long long (*read_tscp)(unsigned int *aux);
+
+ /*
+ * Atomically enable interrupts and return to userspace. This
+ * is only ever used to return to 32-bit processes; in a
+ * 64-bit kernel, it's used for 32-on-64 compat processes, but
+ * never native 64-bit processes. (Jump, not call.)
+ */
+ void (*irq_enable_sysexit)(void);
+
+ /*
+ * Switch to usermode gs and return to 64-bit usermode using
+ * sysret. Only used in 64-bit kernels to return to 64-bit
+ * processes. Usermode register state, including %rsp, must
+ * already be restored.
+ */
+ void (*usergs_sysret64)(void);
+
+ /*
+ * Switch to usermode gs and return to 32-bit usermode using
+ * sysret. Used to return to 32-on-64 compat processes.
+ * Other usermode register state, including %esp, must already
+ * be restored.
+ */
+ void (*usergs_sysret32)(void);
+
+ /* Normal iret. Jump to this with the standard iret stack
+ frame set up. */
+ void (*iret)(void);
+
+ void (*swapgs)(void);
+
+ void (*start_context_switch)(struct task_struct *prev);
+ void (*end_context_switch)(struct task_struct *next);
+};
+
+struct pv_irq_ops {
+ void (*init_IRQ)(void);
+
+ /*
+ * Get/set interrupt state. save_fl and restore_fl are only
+ * expected to use X86_EFLAGS_IF; all other bits
+ * returned from save_fl are undefined, and may be ignored by
+ * restore_fl.
+ *
+ * NOTE: These functions callers expect the callee to preserve
+ * more registers than the standard C calling convention.
+ */
+ struct paravirt_callee_save save_fl;
+ struct paravirt_callee_save restore_fl;
+ struct paravirt_callee_save irq_disable;
+ struct paravirt_callee_save irq_enable;
+
+ void (*safe_halt)(void);
+ void (*halt)(void);
+
+#ifdef CONFIG_X86_64
+ void (*adjust_exception_frame)(void);
+#endif
+};
+
+struct pv_apic_ops {
+#ifdef CONFIG_X86_LOCAL_APIC
+ void (*setup_boot_clock)(void);
+ void (*setup_secondary_clock)(void);
+
+ void (*startup_ipi_hook)(int phys_apicid,
+ unsigned long start_eip,
+ unsigned long start_esp);
+#endif
+};
+
+struct pv_mmu_ops {
+ /*
+ * Called before/after init_mm pagetable setup. setup_start
+ * may reset %cr3, and may pre-install parts of the pagetable;
+ * pagetable setup is expected to preserve any existing
+ * mapping.
+ */
+ void (*pagetable_setup_start)(pgd_t *pgd_base);
+ void (*pagetable_setup_done)(pgd_t *pgd_base);
+
+ unsigned long (*read_cr2)(void);
+ void (*write_cr2)(unsigned long);
+
+ unsigned long (*read_cr3)(void);
+ void (*write_cr3)(unsigned long);
+
+ /*
+ * Hooks for intercepting the creation/use/destruction of an
+ * mm_struct.
+ */
+ void (*activate_mm)(struct mm_struct *prev,
+ struct mm_struct *next);
+ void (*dup_mmap)(struct mm_struct *oldmm,
+ struct mm_struct *mm);
+ void (*exit_mmap)(struct mm_struct *mm);
+
+
+ /* TLB operations */
+ void (*flush_tlb_user)(void);
+ void (*flush_tlb_kernel)(void);
+ void (*flush_tlb_single)(unsigned long addr);
+ void (*flush_tlb_others)(const struct cpumask *cpus,
+ struct mm_struct *mm,
+ unsigned long va);
+
+ /* Hooks for allocating and freeing a pagetable top-level */
+ int (*pgd_alloc)(struct mm_struct *mm);
+ void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
+
+ /*
+ * Hooks for allocating/releasing pagetable pages when they're
+ * attached to a pagetable
+ */
+ void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
+ void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
+ void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
+ void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
+ void (*release_pte)(unsigned long pfn);
+ void (*release_pmd)(unsigned long pfn);
+ void (*release_pud)(unsigned long pfn);
+
+ /* Pagetable manipulation functions */
+ void (*set_pte)(pte_t *ptep, pte_t pteval);
+ void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval);
+ void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
+ void (*pte_update)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+ void (*pte_update_defer)(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+
+ pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+ void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+
+ struct paravirt_callee_save pte_val;
+ struct paravirt_callee_save make_pte;
+
+ struct paravirt_callee_save pgd_val;
+ struct paravirt_callee_save make_pgd;
+
+#if PAGETABLE_LEVELS >= 3
+#ifdef CONFIG_X86_PAE
+ void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
+ void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+ void (*pmd_clear)(pmd_t *pmdp);
+
+#endif /* CONFIG_X86_PAE */
+
+ void (*set_pud)(pud_t *pudp, pud_t pudval);
+
+ struct paravirt_callee_save pmd_val;
+ struct paravirt_callee_save make_pmd;
+
+#if PAGETABLE_LEVELS == 4
+ struct paravirt_callee_save pud_val;
+ struct paravirt_callee_save make_pud;
+
+ void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
+#endif /* PAGETABLE_LEVELS == 4 */
+#endif /* PAGETABLE_LEVELS >= 3 */
+
+#ifdef CONFIG_HIGHPTE
+ void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
+#endif
+
+ struct pv_lazy_ops lazy_mode;
+
+ /* dom0 ops */
+
+ /* Sometimes the physical address is a pfn, and sometimes its
+ an mfn. We can tell which is which from the index. */
+ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags);
+};
+
+struct raw_spinlock;
+struct pv_lock_ops {
+ int (*spin_is_locked)(struct raw_spinlock *lock);
+ int (*spin_is_contended)(struct raw_spinlock *lock);
+ void (*spin_lock)(struct raw_spinlock *lock);
+ void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
+ int (*spin_trylock)(struct raw_spinlock *lock);
+ void (*spin_unlock)(struct raw_spinlock *lock);
+};
+
+/* This contains all the paravirt structures: we get a convenient
+ * number for each function using the offset which we use to indicate
+ * what to patch. */
+struct paravirt_patch_template {
+ struct pv_init_ops pv_init_ops;
+ struct pv_time_ops pv_time_ops;
+ struct pv_cpu_ops pv_cpu_ops;
+ struct pv_irq_ops pv_irq_ops;
+ struct pv_apic_ops pv_apic_ops;
+ struct pv_mmu_ops pv_mmu_ops;
+ struct pv_lock_ops pv_lock_ops;
+};
+
+extern struct pv_info pv_info;
+extern struct pv_init_ops pv_init_ops;
+extern struct pv_time_ops pv_time_ops;
+extern struct pv_cpu_ops pv_cpu_ops;
+extern struct pv_irq_ops pv_irq_ops;
+extern struct pv_apic_ops pv_apic_ops;
+extern struct pv_mmu_ops pv_mmu_ops;
+extern struct pv_lock_ops pv_lock_ops;
+
+#define PARAVIRT_PATCH(x) \
+ (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
+
+#define paravirt_type(op) \
+ [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
+ [paravirt_opptr] "i" (&(op))
+#define paravirt_clobber(clobber) \
+ [paravirt_clobber] "i" (clobber)
+
+/*
+ * Generate some code, and mark it as patchable by the
+ * apply_paravirt() alternate instruction patcher.
+ */
+#define _paravirt_alt(insn_string, type, clobber) \
+ "771:\n\t" insn_string "\n" "772:\n" \
+ ".pushsection .parainstructions,\"a\"\n" \
+ _ASM_ALIGN "\n" \
+ _ASM_PTR " 771b\n" \
+ " .byte " type "\n" \
+ " .byte 772b-771b\n" \
+ " .short " clobber "\n" \
+ ".popsection\n"
+
+/* Generate patchable code, with the default asm parameters. */
+#define paravirt_alt(insn_string) \
+ _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
+
+/* Simple instruction patching code. */
+#define DEF_NATIVE(ops, name, code) \
+ extern const char start_##ops##_##name[], end_##ops##_##name[]; \
+ asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
+
+unsigned paravirt_patch_nop(void);
+unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
+unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
+unsigned paravirt_patch_ignore(unsigned len);
+unsigned paravirt_patch_call(void *insnbuf,
+ const void *target, u16 tgt_clobbers,
+ unsigned long addr, u16 site_clobbers,
+ unsigned len);
+unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
+ unsigned long addr, unsigned len);
+unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+ unsigned long addr, unsigned len);
+
+unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
+ const char *start, const char *end);
+
+unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
+ unsigned long addr, unsigned len);
+
+int paravirt_disable_iospace(void);
+
+/*
+ * This generates an indirect call based on the operation type number.
+ * The type number, computed in PARAVIRT_PATCH, is derived from the
+ * offset into the paravirt_patch_template structure, and can therefore be
+ * freely converted back into a structure offset.
+ */
+#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
+
+/*
+ * These macros are intended to wrap calls through one of the paravirt
+ * ops structs, so that they can be later identified and patched at
+ * runtime.
+ *
+ * Normally, a call to a pv_op function is a simple indirect call:
+ * (pv_op_struct.operations)(args...).
+ *
+ * Unfortunately, this is a relatively slow operation for modern CPUs,
+ * because it cannot necessarily determine what the destination
+ * address is. In this case, the address is a runtime constant, so at
+ * the very least we can patch the call to e a simple direct call, or
+ * ideally, patch an inline implementation into the callsite. (Direct
+ * calls are essentially free, because the call and return addresses
+ * are completely predictable.)
+ *
+ * For i386, these macros rely on the standard gcc "regparm(3)" calling
+ * convention, in which the first three arguments are placed in %eax,
+ * %edx, %ecx (in that order), and the remaining arguments are placed
+ * on the stack. All caller-save registers (eax,edx,ecx) are expected
+ * to be modified (either clobbered or used for return values).
+ * X86_64, on the other hand, already specifies a register-based calling
+ * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
+ * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
+ * special handling for dealing with 4 arguments, unlike i386.
+ * However, x86_64 also have to clobber all caller saved registers, which
+ * unfortunately, are quite a bit (r8 - r11)
+ *
+ * The call instruction itself is marked by placing its start address
+ * and size into the .parainstructions section, so that
+ * apply_paravirt() in arch/i386/kernel/alternative.c can do the
+ * appropriate patching under the control of the backend pv_init_ops
+ * implementation.
+ *
+ * Unfortunately there's no way to get gcc to generate the args setup
+ * for the call, and then allow the call itself to be generated by an
+ * inline asm. Because of this, we must do the complete arg setup and
+ * return value handling from within these macros. This is fairly
+ * cumbersome.
+ *
+ * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
+ * It could be extended to more arguments, but there would be little
+ * to be gained from that. For each number of arguments, there are
+ * the two VCALL and CALL variants for void and non-void functions.
+ *
+ * When there is a return value, the invoker of the macro must specify
+ * the return type. The macro then uses sizeof() on that type to
+ * determine whether its a 32 or 64 bit value, and places the return
+ * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
+ * 64-bit). For x86_64 machines, it just returns at %rax regardless of
+ * the return value size.
+ *
+ * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
+ * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
+ * in low,high order
+ *
+ * Small structures are passed and returned in registers. The macro
+ * calling convention can't directly deal with this, so the wrapper
+ * functions must do this.
+ *
+ * These PVOP_* macros are only defined within this header. This
+ * means that all uses must be wrapped in inline functions. This also
+ * makes sure the incoming and outgoing types are always correct.
+ */
+#ifdef CONFIG_X86_32
+#define PVOP_VCALL_ARGS \
+ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
+#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
+
+#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
+#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
+#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
+
+#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
+ "=c" (__ecx)
+#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
+
+#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
+#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
+
+#define EXTRA_CLOBBERS
+#define VEXTRA_CLOBBERS
+#else /* CONFIG_X86_64 */
+#define PVOP_VCALL_ARGS \
+ unsigned long __edi = __edi, __esi = __esi, \
+ __edx = __edx, __ecx = __ecx
+#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
+
+#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
+#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
+#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
+#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
+
+#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
+ "=S" (__esi), "=d" (__edx), \
+ "=c" (__ecx)
+#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
+
+#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
+#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
+
+#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
+#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
+#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_PARAVIRT_DEBUG
+#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
+#else
+#define PVOP_TEST_NULL(op) ((void)op)
+#endif
+
+#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
+ pre, post, ...) \
+ ({ \
+ rettype __ret; \
+ PVOP_CALL_ARGS; \
+ PVOP_TEST_NULL(op); \
+ /* This is 32-bit specific, but is okay in 64-bit */ \
+ /* since this condition will never hold */ \
+ if (sizeof(rettype) > sizeof(unsigned long)) { \
+ asm volatile(pre \
+ paravirt_alt(PARAVIRT_CALL) \
+ post \
+ : call_clbr \
+ : paravirt_type(op), \
+ paravirt_clobber(clbr), \
+ ##__VA_ARGS__ \
+ : "memory", "cc" extra_clbr); \
+ __ret = (rettype)((((u64)__edx) << 32) | __eax); \
+ } else { \
+ asm volatile(pre \
+ paravirt_alt(PARAVIRT_CALL) \
+ post \
+ : call_clbr \
+ : paravirt_type(op), \
+ paravirt_clobber(clbr), \
+ ##__VA_ARGS__ \
+ : "memory", "cc" extra_clbr); \
+ __ret = (rettype)__eax; \
+ } \
+ __ret; \
+ })
+
+#define __PVOP_CALL(rettype, op, pre, post, ...) \
+ ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
+ EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
+
+#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
+ ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
+ PVOP_CALLEE_CLOBBERS, , \
+ pre, post, ##__VA_ARGS__)
+
+
+#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
+ ({ \
+ PVOP_VCALL_ARGS; \
+ PVOP_TEST_NULL(op); \
+ asm volatile(pre \
+ paravirt_alt(PARAVIRT_CALL) \
+ post \
+ : call_clbr \
+ : paravirt_type(op), \
+ paravirt_clobber(clbr), \
+ ##__VA_ARGS__ \
+ : "memory", "cc" extra_clbr); \
+ })
+
+#define __PVOP_VCALL(op, pre, post, ...) \
+ ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
+ VEXTRA_CLOBBERS, \
+ pre, post, ##__VA_ARGS__)
+
+#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
+ ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
+ PVOP_VCALLEE_CLOBBERS, , \
+ pre, post, ##__VA_ARGS__)
+
+
+
+#define PVOP_CALL0(rettype, op) \
+ __PVOP_CALL(rettype, op, "", "")
+#define PVOP_VCALL0(op) \
+ __PVOP_VCALL(op, "", "")
+
+#define PVOP_CALLEE0(rettype, op) \
+ __PVOP_CALLEESAVE(rettype, op, "", "")
+#define PVOP_VCALLEE0(op) \
+ __PVOP_VCALLEESAVE(op, "", "")
+
+
+#define PVOP_CALL1(rettype, op, arg1) \
+ __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+#define PVOP_VCALL1(op, arg1) \
+ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
+
+#define PVOP_CALLEE1(rettype, op, arg1) \
+ __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+#define PVOP_VCALLEE1(op, arg1) \
+ __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
+
+
+#define PVOP_CALL2(rettype, op, arg1, arg2) \
+ __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+#define PVOP_VCALL2(op, arg1, arg2) \
+ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+
+#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
+ __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+#define PVOP_VCALLEE2(op, arg1, arg2) \
+ __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+
+
+#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
+ __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
+#define PVOP_VCALL3(op, arg1, arg2, arg3) \
+ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
+
+/* This is the only difference in x86_64. We can make it much simpler */
+#ifdef CONFIG_X86_32
+#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
+ __PVOP_CALL(rettype, op, \
+ "push %[_arg4];", "lea 4(%%esp),%%esp;", \
+ PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
+ PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
+#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
+ __PVOP_VCALL(op, \
+ "push %[_arg4];", "lea 4(%%esp),%%esp;", \
+ "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
+ "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
+#else
+#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
+ __PVOP_CALL(rettype, op, "", "", \
+ PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
+ PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
+#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
+ __PVOP_VCALL(op, "", "", \
+ PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
+ PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
+#endif
+
+/* Lazy mode for batching updates / context switch */
+enum paravirt_lazy_mode {
+ PARAVIRT_LAZY_NONE,
+ PARAVIRT_LAZY_MMU,
+ PARAVIRT_LAZY_CPU,
+};
+
+enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
+void paravirt_start_context_switch(struct task_struct *prev);
+void paravirt_end_context_switch(struct task_struct *next);
+
+void paravirt_enter_lazy_mmu(void);
+void paravirt_leave_lazy_mmu(void);
+
+void _paravirt_nop(void);
+u32 _paravirt_ident_32(u32);
+u64 _paravirt_ident_64(u64);
+
+#define paravirt_nop ((void *)_paravirt_nop)
+
+/* These all sit in the .parainstructions section to tell us what to patch. */
+struct paravirt_patch_site {
+ u8 *instr; /* original instructions */
+ u8 instrtype; /* type of this instruction */
+ u8 len; /* length of original instruction */
+ u16 clobbers; /* what registers you may clobber */
+};
+
+extern struct paravirt_patch_site __parainstructions[],
+ __parainstructions_end[];
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_PARAVIRT_TYPES_H */
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h
index fa64e401589d..e7b7c938ae27 100644
--- a/arch/x86/include/asm/perf_counter.h
+++ b/arch/x86/include/asm/perf_counter.h
@@ -84,6 +84,16 @@ union cpuid10_edx {
#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
+/*
+ * We model BTS tracing as another fixed-mode PMC.
+ *
+ * We choose a value in the middle of the fixed counter range, since lower
+ * values are used by actual fixed counters and higher values are used
+ * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
+ */
+#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
+
+
#ifdef CONFIG_PERF_COUNTERS
extern void init_hw_perf_counters(void);
extern void perf_counters_lapic_init(void);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c7768269b1cf..e08ea043e085 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -403,7 +403,17 @@ extern unsigned long kernel_eflags;
extern asmlinkage void ignore_sysret(void);
#else /* X86_64 */
#ifdef CONFIG_CC_STACKPROTECTOR
-DECLARE_PER_CPU(unsigned long, stack_canary);
+/*
+ * Make sure stack canary segment base is cached-aligned:
+ * "For Intel Atom processors, avoid non zero segment base address
+ * that is not aligned to cache line boundary at all cost."
+ * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
+ */
+struct stack_canary {
+ char __pad[20]; /* canary at %gs:20 */
+ unsigned long canary;
+};
+DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
#endif
#endif /* X86_64 */
@@ -703,13 +713,23 @@ static inline void cpu_relax(void)
rep_nop();
}
-/* Stop speculative execution: */
+/* Stop speculative execution and prefetching of modified code. */
static inline void sync_core(void)
{
int tmp;
- asm volatile("cpuid" : "=a" (tmp) : "0" (1)
- : "ebx", "ecx", "edx", "memory");
+#if defined(CONFIG_M386) || defined(CONFIG_M486)
+ if (boot_cpu_data.x86 < 5)
+ /* There is no speculative execution.
+ * jmp is a barrier to prefetching. */
+ asm volatile("jmp 1f\n1:\n" ::: "memory");
+ else
+#endif
+ /* cpuid is a barrier to speculative execution.
+ * Prefetched instructions are automatically
+ * invalidated when modified. */
+ asm volatile("cpuid" : "=a" (tmp) : "0" (1)
+ : "ebx", "ecx", "edx", "memory");
}
static inline void __monitor(const void *eax, unsigned long ecx,
diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h
index 263d397d2eef..75af592677ec 100644
--- a/arch/x86/include/asm/scatterlist.h
+++ b/arch/x86/include/asm/scatterlist.h
@@ -1,33 +1,8 @@
#ifndef _ASM_X86_SCATTERLIST_H
#define _ASM_X86_SCATTERLIST_H
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
- unsigned int length;
- dma_addr_t dma_address;
- unsigned int dma_length;
-};
-
-#define ARCH_HAS_SG_CHAIN
#define ISA_DMA_THRESHOLD (0x00ffffff)
-/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#ifdef CONFIG_X86_32
-# define sg_dma_len(sg) ((sg)->length)
-#else
-# define sg_dma_len(sg) ((sg)->dma_length)
-#endif
+#include <asm-generic/scatterlist.h>
#endif /* _ASM_X86_SCATTERLIST_H */
diff --git a/arch/x86/include/asm/shmbuf.h b/arch/x86/include/asm/shmbuf.h
index b51413b74971..83c05fc2de38 100644
--- a/arch/x86/include/asm/shmbuf.h
+++ b/arch/x86/include/asm/shmbuf.h
@@ -1,51 +1 @@
-#ifndef _ASM_X86_SHMBUF_H
-#define _ASM_X86_SHMBUF_H
-
-/*
- * The shmid64_ds structure for x86 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space on 32 bit is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- *
- * Pad space on 64 bit is left for:
- * - 2 miscellaneous 64-bit values
- */
-
-struct shmid64_ds {
- struct ipc64_perm shm_perm; /* operation perms */
- size_t shm_segsz; /* size of segment (bytes) */
- __kernel_time_t shm_atime; /* last attach time */
-#ifdef __i386__
- unsigned long __unused1;
-#endif
- __kernel_time_t shm_dtime; /* last detach time */
-#ifdef __i386__
- unsigned long __unused2;
-#endif
- __kernel_time_t shm_ctime; /* last change time */
-#ifdef __i386__
- unsigned long __unused3;
-#endif
- __kernel_pid_t shm_cpid; /* pid of creator */
- __kernel_pid_t shm_lpid; /* pid of last operator */
- unsigned long shm_nattch; /* no. of current attaches */
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-struct shminfo64 {
- unsigned long shmmax;
- unsigned long shmmin;
- unsigned long shmmni;
- unsigned long shmseg;
- unsigned long shmall;
- unsigned long __unused1;
- unsigned long __unused2;
- unsigned long __unused3;
- unsigned long __unused4;
-};
-
-#endif /* _ASM_X86_SHMBUF_H */
+#include <asm-generic/shmbuf.h>
diff --git a/arch/x86/include/asm/socket.h b/arch/x86/include/asm/socket.h
index ca8bf2cd0ba9..6b71384b9d8b 100644
--- a/arch/x86/include/asm/socket.h
+++ b/arch/x86/include/asm/socket.h
@@ -1,60 +1 @@
-#ifndef _ASM_X86_SOCKET_H
-#define _ASM_X86_SOCKET_H
-
-#include <asm/sockios.h>
-
-/* For setsockopt(2) */
-#define SOL_SOCKET 1
-
-#define SO_DEBUG 1
-#define SO_REUSEADDR 2
-#define SO_TYPE 3
-#define SO_ERROR 4
-#define SO_DONTROUTE 5
-#define SO_BROADCAST 6
-#define SO_SNDBUF 7
-#define SO_RCVBUF 8
-#define SO_SNDBUFFORCE 32
-#define SO_RCVBUFFORCE 33
-#define SO_KEEPALIVE 9
-#define SO_OOBINLINE 10
-#define SO_NO_CHECK 11
-#define SO_PRIORITY 12
-#define SO_LINGER 13
-#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
-#define SO_PASSCRED 16
-#define SO_PEERCRED 17
-#define SO_RCVLOWAT 18
-#define SO_SNDLOWAT 19
-#define SO_RCVTIMEO 20
-#define SO_SNDTIMEO 21
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION 22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
-#define SO_SECURITY_ENCRYPTION_NETWORK 24
-
-#define SO_BINDTODEVICE 25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
-
-#define SO_PEERNAME 28
-#define SO_TIMESTAMP 29
-#define SCM_TIMESTAMP SO_TIMESTAMP
-
-#define SO_ACCEPTCONN 30
-
-#define SO_PEERSEC 31
-#define SO_PASSSEC 34
-#define SO_TIMESTAMPNS 35
-#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
-
-#define SO_MARK 36
-
-#define SO_TIMESTAMPING 37
-#define SCM_TIMESTAMPING SO_TIMESTAMPING
-
-#endif /* _ASM_X86_SOCKET_H */
+#include <asm-generic/socket.h>
diff --git a/arch/x86/include/asm/sockios.h b/arch/x86/include/asm/sockios.h
index 49cc72b5d3c9..def6d4746ee7 100644
--- a/arch/x86/include/asm/sockios.h
+++ b/arch/x86/include/asm/sockios.h
@@ -1,13 +1 @@
-#ifndef _ASM_X86_SOCKIOS_H
-#define _ASM_X86_SOCKIOS_H
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN 0x8901
-#define SIOCSPGRP 0x8902
-#define FIOGETOWN 0x8903
-#define SIOCGPGRP 0x8904
-#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
-
-#endif /* _ASM_X86_SOCKIOS_H */
+#include <asm-generic/sockios.h>
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index c2d742c6e15f..157517763565 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -48,7 +48,7 @@
* head_32 for boot CPU and setup_per_cpu_areas() for others.
*/
#define GDT_STACK_CANARY_INIT \
- [GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } },
+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
/*
* Initialize the stackprotector canary value.
@@ -78,21 +78,19 @@ static __always_inline void boot_init_stack_canary(void)
#ifdef CONFIG_X86_64
percpu_write(irq_stack_union.stack_canary, canary);
#else
- percpu_write(stack_canary, canary);
+ percpu_write(stack_canary.canary, canary);
#endif
}
static inline void setup_stack_canary_segment(int cpu)
{
#ifdef CONFIG_X86_32
- unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20;
+ unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu);
struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
struct desc_struct desc;
desc = gdt_table[GDT_ENTRY_STACK_CANARY];
- desc.base0 = canary & 0xffff;
- desc.base1 = (canary >> 16) & 0xff;
- desc.base2 = (canary >> 24) & 0xff;
+ set_desc_base(&desc, canary);
write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
#endif
}
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 643c59b4bc6e..f08f97374892 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -31,7 +31,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
"movl %P[task_canary](%[next]), %%ebx\n\t" \
"movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
#define __switch_canary_oparam \
- , [stack_canary] "=m" (per_cpu_var(stack_canary))
+ , [stack_canary] "=m" (per_cpu_var(stack_canary.canary))
#define __switch_canary_iparam \
, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
#else /* CC_STACKPROTECTOR */
@@ -150,33 +150,6 @@ do { \
#endif
#ifdef __KERNEL__
-#define _set_base(addr, base) do { unsigned long __pr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %%dl,%2\n\t" \
- "movb %%dh,%3" \
- :"=&d" (__pr) \
- :"m" (*((addr)+2)), \
- "m" (*((addr)+4)), \
- "m" (*((addr)+7)), \
- "0" (base) \
- ); } while (0)
-
-#define _set_limit(addr, limit) do { unsigned long __lr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
- "rorl $16,%%edx\n\t" \
- "movb %2,%%dh\n\t" \
- "andb $0xf0,%%dh\n\t" \
- "orb %%dh,%%dl\n\t" \
- "movb %%dl,%2" \
- :"=&d" (__lr) \
- :"m" (*(addr)), \
- "m" (*((addr)+6)), \
- "0" (limit) \
- ); } while (0)
-
-#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
-#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
extern void native_load_gs_index(unsigned);
diff --git a/arch/x86/include/asm/termbits.h b/arch/x86/include/asm/termbits.h
index af1b70ea440f..3935b106de79 100644
--- a/arch/x86/include/asm/termbits.h
+++ b/arch/x86/include/asm/termbits.h
@@ -1,198 +1 @@
-#ifndef _ASM_X86_TERMBITS_H
-#define _ASM_X86_TERMBITS_H
-
-#include <linux/posix_types.h>
-
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
-typedef unsigned int tcflag_t;
-
-#define NCCS 19
-struct termios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
-};
-
-struct termios2 {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-struct ktermios {
- tcflag_t c_iflag; /* input mode flags */
- tcflag_t c_oflag; /* output mode flags */
- tcflag_t c_cflag; /* control mode flags */
- tcflag_t c_lflag; /* local mode flags */
- cc_t c_line; /* line discipline */
- cc_t c_cc[NCCS]; /* control characters */
- speed_t c_ispeed; /* input speed */
- speed_t c_ospeed; /* output speed */
-};
-
-/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
-
-/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IUCLC 0001000
-#define IXON 0002000
-#define IXANY 0004000
-#define IXOFF 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
-
-/* c_oflag bits */
-#define OPOST 0000001
-#define OLCUC 0000002
-#define ONLCR 0000004
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-#define OFILL 0000100
-#define OFDEL 0000200
-#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
-#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
-#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
-#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
-#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
-#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
-
-/* c_cflag bit meaning */
-#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE 0000060
-#define CS5 0000000
-#define CS6 0000020
-#define CS7 0000040
-#define CS8 0000060
-#define CSTOPB 0000100
-#define CREAD 0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL 0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define BOTHER 0010000 /* non standard rate */
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
-#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
-
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
-
-/* c_lflag bits */
-#define ISIG 0000001
-#define ICANON 0000002
-#define XCASE 0000004
-#define ECHO 0000010
-#define ECHOE 0000020
-#define ECHOK 0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL 0001000
-#define ECHOPRT 0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-
-/* tcflow() and TCXONC use these */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
-/* tcsetattr uses these */
-#define TCSANOW 0
-#define TCSADRAIN 1
-#define TCSAFLUSH 2
-
-#endif /* _ASM_X86_TERMBITS_H */
+#include <asm-generic/termbits.h>
diff --git a/arch/x86/include/asm/termios.h b/arch/x86/include/asm/termios.h
index c4ee8056baca..280d78a9d966 100644
--- a/arch/x86/include/asm/termios.h
+++ b/arch/x86/include/asm/termios.h
@@ -1,114 +1 @@
-#ifndef _ASM_X86_TERMIOS_H
-#define _ASM_X86_TERMIOS_H
-
-#include <asm/termbits.h>
-#include <asm/ioctls.h>
-
-struct winsize {
- unsigned short ws_row;
- unsigned short ws_col;
- unsigned short ws_xpixel;
- unsigned short ws_ypixel;
-};
-
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
-/* modem lines */
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
-#ifdef __KERNEL__
-
-#include <asm/uaccess.h>
-
-/* intr=^C quit=^\ erase=del kill=^U
- eof=^D vtime=\0 vmin=\1 sxtc=\0
- start=^Q stop=^S susp=^Z eol=\0
- reprint=^R discard=^U werase=^W lnext=^V
- eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
- unsigned short __tmp; \
- get_user(__tmp,&(termio)->x); \
- *(unsigned short *) &(termios)->x = __tmp; \
-}
-
-static inline int user_termio_to_kernel_termios(struct ktermios *termios,
- struct termio __user *termio)
-{
- SET_LOW_TERMIOS_BITS(termios, termio, c_iflag);
- SET_LOW_TERMIOS_BITS(termios, termio, c_oflag);
- SET_LOW_TERMIOS_BITS(termios, termio, c_cflag);
- SET_LOW_TERMIOS_BITS(termios, termio, c_lflag);
- get_user(termios->c_line, &termio->c_line);
- return copy_from_user(termios->c_cc, termio->c_cc, NCC);
-}
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-static inline int kernel_termios_to_user_termio(struct termio __user *termio,
- struct ktermios *termios)
-{
- put_user((termios)->c_iflag, &(termio)->c_iflag);
- put_user((termios)->c_oflag, &(termio)->c_oflag);
- put_user((termios)->c_cflag, &(termio)->c_cflag);
- put_user((termios)->c_lflag, &(termio)->c_lflag);
- put_user((termios)->c_line, &(termio)->c_line);
- return copy_to_user((termio)->c_cc, (termios)->c_cc, NCC);
-}
-
-static inline int user_termios_to_kernel_termios(struct ktermios *k,
- struct termios2 __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios2));
-}
-
-static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios2));
-}
-
-static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
- struct termios __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios));
-}
-
-static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios));
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_X86_TERMIOS_H */
+#include <asm-generic/termios.h>
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index fad7d40b75f8..6f7786aea4fc 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -95,7 +95,7 @@ struct thread_info {
#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
-#define TIF_SYSCALL_FTRACE 28 /* for ftrace syscall instrumentation */
+#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -118,17 +118,17 @@ struct thread_info {
#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
-#define _TIF_SYSCALL_FTRACE (1 << TIF_SYSCALL_FTRACE)
+#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
/* work to do in syscall_trace_enter() */
#define _TIF_WORK_SYSCALL_ENTRY \
- (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_FTRACE | \
- _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP)
+ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
/* work to do in syscall_trace_leave() */
#define _TIF_WORK_SYSCALL_EXIT \
(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
- _TIF_SYSCALL_FTRACE)
+ _TIF_SYSCALL_TRACEPOINT)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK \
@@ -137,7 +137,8 @@ struct thread_info {
_TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU))
/* work to do on any return to user space */
-#define _TIF_ALLWORK_MASK ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_FTRACE)
+#define _TIF_ALLWORK_MASK \
+ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
/* Only used for 64 bit */
#define _TIF_DO_NOTIFY_MASK \
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 066ef590d7e0..26d06e052a18 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -129,25 +129,34 @@ extern unsigned long node_remap_size[];
#endif
/* sched_domains SD_NODE_INIT for NUMA machines */
-#define SD_NODE_INIT (struct sched_domain) { \
- .min_interval = 8, \
- .max_interval = 32, \
- .busy_factor = 32, \
- .imbalance_pct = 125, \
- .cache_nice_tries = SD_CACHE_NICE_TRIES, \
- .busy_idx = 3, \
- .idle_idx = SD_IDLE_IDX, \
- .newidle_idx = SD_NEWIDLE_IDX, \
- .wake_idx = 1, \
- .forkexec_idx = SD_FORKEXEC_IDX, \
- .flags = SD_LOAD_BALANCE \
- | SD_BALANCE_EXEC \
- | SD_BALANCE_FORK \
- | SD_WAKE_AFFINE \
- | SD_WAKE_BALANCE \
- | SD_SERIALIZE, \
- .last_balance = jiffies, \
- .balance_interval = 1, \
+#define SD_NODE_INIT (struct sched_domain) { \
+ .min_interval = 8, \
+ .max_interval = 32, \
+ .busy_factor = 32, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = SD_CACHE_NICE_TRIES, \
+ .busy_idx = 3, \
+ .idle_idx = SD_IDLE_IDX, \
+ .newidle_idx = SD_NEWIDLE_IDX, \
+ .wake_idx = 1, \
+ .forkexec_idx = SD_FORKEXEC_IDX, \
+ \
+ .flags = 1*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 0*SD_WAKE_IDLE \
+ | 1*SD_WAKE_AFFINE \
+ | 1*SD_WAKE_BALANCE \
+ | 0*SD_SHARE_CPUPOWER \
+ | 0*SD_POWERSAVINGS_BALANCE \
+ | 0*SD_SHARE_PKG_RESOURCES \
+ | 1*SD_SERIALIZE \
+ | 1*SD_WAKE_IDLE_FAR \
+ | 0*SD_PREFER_SIBLING \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
}
#ifdef CONFIG_X86_64_ACPI_NUMA
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index bfd74c032fca..4da91ad69e0d 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -81,9 +81,7 @@ extern int panic_on_unrecovered_nmi;
void math_error(void __user *);
void math_emulate(struct math_emu_info *);
-#ifdef CONFIG_X86_32
-unsigned long patch_espfix_desc(unsigned long, unsigned long);
-#else
+#ifndef CONFIG_X86_32
asmlinkage void smp_thermal_interrupt(void);
asmlinkage void mce_threshold_interrupt(void);
#endif
diff --git a/arch/x86/include/asm/types.h b/arch/x86/include/asm/types.h
index 09b97745772f..df1da20f4534 100644
--- a/arch/x86/include/asm/types.h
+++ b/arch/x86/include/asm/types.h
@@ -1,19 +1,11 @@
#ifndef _ASM_X86_TYPES_H
#define _ASM_X86_TYPES_H
-#include <asm-generic/int-ll64.h>
+#define dma_addr_t dma_addr_t
-#ifndef __ASSEMBLY__
-
-typedef unsigned short umode_t;
+#include <asm-generic/types.h>
-#endif /* __ASSEMBLY__ */
-
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
#ifdef __KERNEL__
-
#ifndef __ASSEMBLY__
typedef u64 dma64_addr_t;
diff --git a/arch/x86/include/asm/ucontext.h b/arch/x86/include/asm/ucontext.h
index 87324cf439d9..b7c29c8017f2 100644
--- a/arch/x86/include/asm/ucontext.h
+++ b/arch/x86/include/asm/ucontext.h
@@ -7,12 +7,6 @@
* sigcontext struct (uc_mcontext).
*/
-struct ucontext {
- unsigned long uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- struct sigcontext uc_mcontext;
- sigset_t uc_sigmask; /* mask last for extensibility */
-};
+#include <asm-generic/ucontext.h>
#endif /* _ASM_X86_UCONTEXT_H */
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index 732a30706153..8deaada61bc8 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -345,6 +345,8 @@
#ifdef __KERNEL__
+#define NR_syscalls 337
+
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 900e1617e672..b9f3c60de5f7 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -688,6 +688,12 @@ __SYSCALL(__NR_perf_counter_open, sys_perf_counter_open)
#endif /* __NO_STUBS */
#ifdef __KERNEL__
+
+#ifndef COMPILE_OFFSETS
+#include <asm/asm-offsets.h>
+#define NR_syscalls (__NR_syscall_max + 1)
+#endif
+
/*
* "Conditional" syscalls
*
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 6b8ca3a0285d..67e929b89875 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -833,106 +833,6 @@ static int __init acpi_parse_madt_lapic_entries(void)
extern int es7000_plat;
#endif
-static struct {
- int gsi_base;
- int gsi_end;
-} mp_ioapic_routing[MAX_IO_APICS];
-
-int mp_find_ioapic(int gsi)
-{
- int i = 0;
-
- /* Find the IOAPIC that manages this GSI. */
- for (i = 0; i < nr_ioapics; i++) {
- if ((gsi >= mp_ioapic_routing[i].gsi_base)
- && (gsi <= mp_ioapic_routing[i].gsi_end))
- return i;
- }
-
- printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
- return -1;
-}
-
-int mp_find_ioapic_pin(int ioapic, int gsi)
-{
- if (WARN_ON(ioapic == -1))
- return -1;
- if (WARN_ON(gsi > mp_ioapic_routing[ioapic].gsi_end))
- return -1;
-
- return gsi - mp_ioapic_routing[ioapic].gsi_base;
-}
-
-static u8 __init uniq_ioapic_id(u8 id)
-{
-#ifdef CONFIG_X86_32
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
- !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
- return io_apic_get_unique_id(nr_ioapics, id);
- else
- return id;
-#else
- int i;
- DECLARE_BITMAP(used, 256);
- bitmap_zero(used, 256);
- for (i = 0; i < nr_ioapics; i++) {
- struct mpc_ioapic *ia = &mp_ioapics[i];
- __set_bit(ia->apicid, used);
- }
- if (!test_bit(id, used))
- return id;
- return find_first_zero_bit(used, 256);
-#endif
-}
-
-static int bad_ioapic(unsigned long address)
-{
- if (nr_ioapics >= MAX_IO_APICS) {
- printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
- "(found %d)\n", MAX_IO_APICS, nr_ioapics);
- panic("Recompile kernel with bigger MAX_IO_APICS!\n");
- }
- if (!address) {
- printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
- " found in table, skipping!\n");
- return 1;
- }
- return 0;
-}
-
-void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
-{
- int idx = 0;
-
- if (bad_ioapic(address))
- return;
-
- idx = nr_ioapics;
-
- mp_ioapics[idx].type = MP_IOAPIC;
- mp_ioapics[idx].flags = MPC_APIC_USABLE;
- mp_ioapics[idx].apicaddr = address;
-
- set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
- mp_ioapics[idx].apicid = uniq_ioapic_id(id);
- mp_ioapics[idx].apicver = io_apic_get_version(idx);
-
- /*
- * Build basic GSI lookup table to facilitate gsi->io_apic lookups
- * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
- */
- mp_ioapic_routing[idx].gsi_base = gsi_base;
- mp_ioapic_routing[idx].gsi_end = gsi_base +
- io_apic_get_redir_entries(idx);
-
- printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
- "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
- mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
- mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
-
- nr_ioapics++;
-}
-
int __init acpi_probe_gsi(void)
{
int idx;
@@ -947,7 +847,7 @@ int __init acpi_probe_gsi(void)
max_gsi = 0;
for (idx = 0; idx < nr_ioapics; idx++) {
- gsi = mp_ioapic_routing[idx].gsi_end;
+ gsi = mp_gsi_routing[idx].gsi_end;
if (gsi > max_gsi)
max_gsi = gsi;
@@ -1179,9 +1079,8 @@ static int __init acpi_parse_madt_ioapic_entries(void)
* If MPS is present, it will handle them,
* otherwise the system will stay in PIC mode
*/
- if (acpi_disabled || acpi_noirq) {
+ if (acpi_disabled || acpi_noirq)
return -ENODEV;
- }
if (!cpu_has_apic)
return -ENODEV;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index f57658702571..de7353c0ce9c 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -2,6 +2,7 @@
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/list.h>
+#include <linux/stringify.h>
#include <linux/kprobes.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
@@ -32,7 +33,7 @@ __setup("smp-alt-boot", bootonly);
#define smp_alt_once 1
#endif
-static int debug_alternative;
+static int __initdata_or_module debug_alternative;
static int __init debug_alt(char *str)
{
@@ -51,7 +52,7 @@ static int __init setup_noreplace_smp(char *str)
__setup("noreplace-smp", setup_noreplace_smp);
#ifdef CONFIG_PARAVIRT
-static int noreplace_paravirt = 0;
+static int __initdata_or_module noreplace_paravirt = 0;
static int __init setup_noreplace_paravirt(char *str)
{
@@ -64,16 +65,17 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt);
#define DPRINTK(fmt, args...) if (debug_alternative) \
printk(KERN_DEBUG fmt, args)
-#ifdef GENERIC_NOP1
+#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
/* Use inline assembly to define this because the nops are defined
as inline assembly strings in the include files and we cannot
get them easily into strings. */
-asm("\t.section .rodata, \"a\"\nintelnops: "
+asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: "
GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
GENERIC_NOP7 GENERIC_NOP8
"\t.previous");
extern const unsigned char intelnops[];
-static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = {
+static const unsigned char *const __initconst_or_module
+intel_nops[ASM_NOP_MAX+1] = {
NULL,
intelnops,
intelnops + 1,
@@ -87,12 +89,13 @@ static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = {
#endif
#ifdef K8_NOP1
-asm("\t.section .rodata, \"a\"\nk8nops: "
+asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: "
K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
K8_NOP7 K8_NOP8
"\t.previous");
extern const unsigned char k8nops[];
-static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = {
+static const unsigned char *const __initconst_or_module
+k8_nops[ASM_NOP_MAX+1] = {
NULL,
k8nops,
k8nops + 1,
@@ -105,13 +108,14 @@ static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = {
};
#endif
-#ifdef K7_NOP1
-asm("\t.section .rodata, \"a\"\nk7nops: "
+#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
+asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: "
K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
K7_NOP7 K7_NOP8
"\t.previous");
extern const unsigned char k7nops[];
-static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = {
+static const unsigned char *const __initconst_or_module
+k7_nops[ASM_NOP_MAX+1] = {
NULL,
k7nops,
k7nops + 1,
@@ -125,12 +129,13 @@ static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = {
#endif
#ifdef P6_NOP1
-asm("\t.section .rodata, \"a\"\np6nops: "
+asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: "
P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
P6_NOP7 P6_NOP8
"\t.previous");
extern const unsigned char p6nops[];
-static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = {
+static const unsigned char *const __initconst_or_module
+p6_nops[ASM_NOP_MAX+1] = {
NULL,
p6nops,
p6nops + 1,
@@ -146,7 +151,7 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = {
#ifdef CONFIG_X86_64
extern char __vsyscall_0;
-const unsigned char *const *find_nop_table(void)
+static const unsigned char *const *__init_or_module find_nop_table(void)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_has(X86_FEATURE_NOPL))
@@ -157,7 +162,7 @@ const unsigned char *const *find_nop_table(void)
#else /* CONFIG_X86_64 */
-const unsigned char *const *find_nop_table(void)
+static const unsigned char *const *__init_or_module find_nop_table(void)
{
if (boot_cpu_has(X86_FEATURE_K8))
return k8_nops;
@@ -172,7 +177,7 @@ const unsigned char *const *find_nop_table(void)
#endif /* CONFIG_X86_64 */
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
-void add_nops(void *insns, unsigned int len)
+static void __init_or_module add_nops(void *insns, unsigned int len)
{
const unsigned char *const *noptable = find_nop_table();
@@ -185,10 +190,10 @@ void add_nops(void *insns, unsigned int len)
len -= noplen;
}
}
-EXPORT_SYMBOL_GPL(add_nops);
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern u8 *__smp_locks[], *__smp_locks_end[];
+static void *text_poke_early(void *addr, const void *opcode, size_t len);
/* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with
@@ -196,7 +201,8 @@ extern u8 *__smp_locks[], *__smp_locks_end[];
APs have less capabilities than the boot processor are not handled.
Tough. Make sure you disable such features by hand. */
-void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
+void __init_or_module apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end)
{
struct alt_instr *a;
char insnbuf[MAX_PATCH_LEN];
@@ -279,9 +285,10 @@ static LIST_HEAD(smp_alt_modules);
static DEFINE_MUTEX(smp_alt);
static int smp_mode = 1; /* protected by smp_alt */
-void alternatives_smp_module_add(struct module *mod, char *name,
- void *locks, void *locks_end,
- void *text, void *text_end)
+void __init_or_module alternatives_smp_module_add(struct module *mod,
+ char *name,
+ void *locks, void *locks_end,
+ void *text, void *text_end)
{
struct smp_alt_module *smp;
@@ -317,7 +324,7 @@ void alternatives_smp_module_add(struct module *mod, char *name,
mutex_unlock(&smp_alt);
}
-void alternatives_smp_module_del(struct module *mod)
+void __init_or_module alternatives_smp_module_del(struct module *mod)
{
struct smp_alt_module *item;
@@ -386,8 +393,8 @@ void alternatives_smp_switch(int smp)
#endif
#ifdef CONFIG_PARAVIRT
-void apply_paravirt(struct paravirt_patch_site *start,
- struct paravirt_patch_site *end)
+void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
+ struct paravirt_patch_site *end)
{
struct paravirt_patch_site *p;
char insnbuf[MAX_PATCH_LEN];
@@ -485,13 +492,14 @@ void __init alternative_instructions(void)
* instructions. And on the local CPU you need to be protected again NMI or MCE
* handlers seeing an inconsistent instruction while you patch.
*/
-void *text_poke_early(void *addr, const void *opcode, size_t len)
+static void *__init_or_module text_poke_early(void *addr, const void *opcode,
+ size_t len)
{
unsigned long flags;
local_irq_save(flags);
memcpy(addr, opcode, len);
- local_irq_restore(flags);
sync_core();
+ local_irq_restore(flags);
/* Could also do a CLFLUSH here to speed up CPU recovery; but
that causes hangs on some VIA CPUs. */
return addr;
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 6c99f5037801..98f230f6a28d 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -41,9 +41,13 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock);
static LIST_HEAD(iommu_pd_list);
static DEFINE_SPINLOCK(iommu_pd_list_lock);
-#ifdef CONFIG_IOMMU_API
+/*
+ * Domain for untranslated devices - only allocated
+ * if iommu=pt passed on kernel cmd line.
+ */
+static struct protection_domain *pt_domain;
+
static struct iommu_ops amd_iommu_ops;
-#endif
/*
* general struct to manage commands send to an IOMMU
@@ -55,16 +59,16 @@ struct iommu_cmd {
static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
struct unity_map_entry *e);
static struct dma_ops_domain *find_protection_domain(u16 devid);
-static u64* alloc_pte(struct protection_domain *dom,
- unsigned long address, u64
- **pte_page, gfp_t gfp);
+static u64 *alloc_pte(struct protection_domain *domain,
+ unsigned long address, int end_lvl,
+ u64 **pte_page, gfp_t gfp);
static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
unsigned long start_page,
unsigned int pages);
-
-#ifndef BUS_NOTIFY_UNBOUND_DRIVER
-#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
-#endif
+static void reset_iommu_command_buffer(struct amd_iommu *iommu);
+static u64 *fetch_pte(struct protection_domain *domain,
+ unsigned long address, int map_size);
+static void update_domain(struct protection_domain *domain);
#ifdef CONFIG_AMD_IOMMU_STATS
@@ -138,7 +142,25 @@ static int iommu_has_npcache(struct amd_iommu *iommu)
*
****************************************************************************/
-static void iommu_print_event(void *__evt)
+static void dump_dte_entry(u16 devid)
+{
+ int i;
+
+ for (i = 0; i < 8; ++i)
+ pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
+ amd_iommu_dev_table[devid].data[i]);
+}
+
+static void dump_command(unsigned long phys_addr)
+{
+ struct iommu_cmd *cmd = phys_to_virt(phys_addr);
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
+}
+
+static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{
u32 *event = __evt;
int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
@@ -147,7 +169,7 @@ static void iommu_print_event(void *__evt)
int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
u64 address = (u64)(((u64)event[3]) << 32) | event[2];
- printk(KERN_ERR "AMD IOMMU: Event logged [");
+ printk(KERN_ERR "AMD-Vi: Event logged [");
switch (type) {
case EVENT_TYPE_ILL_DEV:
@@ -155,6 +177,7 @@ static void iommu_print_event(void *__evt)
"address=0x%016llx flags=0x%04x]\n",
PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address, flags);
+ dump_dte_entry(devid);
break;
case EVENT_TYPE_IO_FAULT:
printk("IO_PAGE_FAULT device=%02x:%02x.%x "
@@ -176,6 +199,8 @@ static void iommu_print_event(void *__evt)
break;
case EVENT_TYPE_ILL_CMD:
printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
+ reset_iommu_command_buffer(iommu);
+ dump_command(address);
break;
case EVENT_TYPE_CMD_HARD_ERR:
printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
@@ -209,7 +234,7 @@ static void iommu_poll_events(struct amd_iommu *iommu)
tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
while (head != tail) {
- iommu_print_event(iommu->evt_buf + head);
+ iommu_print_event(iommu, iommu->evt_buf + head);
head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
}
@@ -296,8 +321,11 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu)
status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
- if (unlikely(i == EXIT_LOOP_COUNT))
- panic("AMD IOMMU: Completion wait loop failed\n");
+ if (unlikely(i == EXIT_LOOP_COUNT)) {
+ spin_unlock(&iommu->lock);
+ reset_iommu_command_buffer(iommu);
+ spin_lock(&iommu->lock);
+ }
}
/*
@@ -445,47 +473,78 @@ static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid)
}
/*
+ * This function flushes one domain on one IOMMU
+ */
+static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid)
+{
+ struct iommu_cmd cmd;
+ unsigned long flags;
+
+ __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+ domid, 1, 1);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ __iommu_queue_command(iommu, &cmd);
+ __iommu_completion_wait(iommu);
+ __iommu_wait_for_completion(iommu);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
+{
+ int i;
+
+ for (i = 1; i < MAX_DOMAIN_ID; ++i) {
+ if (!test_bit(i, amd_iommu_pd_alloc_bitmap))
+ continue;
+ flush_domain_on_iommu(iommu, i);
+ }
+
+}
+
+/*
* This function is used to flush the IO/TLB for a given protection domain
* on every IOMMU in the system
*/
static void iommu_flush_domain(u16 domid)
{
- unsigned long flags;
struct amd_iommu *iommu;
- struct iommu_cmd cmd;
INC_STATS_COUNTER(domain_flush_all);
- __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- domid, 1, 1);
-
- for_each_iommu(iommu) {
- spin_lock_irqsave(&iommu->lock, flags);
- __iommu_queue_command(iommu, &cmd);
- __iommu_completion_wait(iommu);
- __iommu_wait_for_completion(iommu);
- spin_unlock_irqrestore(&iommu->lock, flags);
- }
+ for_each_iommu(iommu)
+ flush_domain_on_iommu(iommu, domid);
}
void amd_iommu_flush_all_domains(void)
{
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu)
+ flush_all_domains_on_iommu(iommu);
+}
+
+static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
+{
int i;
- for (i = 1; i < MAX_DOMAIN_ID; ++i) {
- if (!test_bit(i, amd_iommu_pd_alloc_bitmap))
+ for (i = 0; i <= amd_iommu_last_bdf; ++i) {
+ if (iommu != amd_iommu_rlookup_table[i])
continue;
- iommu_flush_domain(i);
+
+ iommu_queue_inv_dev_entry(iommu, i);
+ iommu_completion_wait(iommu);
}
}
-void amd_iommu_flush_all_devices(void)
+static void flush_devices_by_domain(struct protection_domain *domain)
{
struct amd_iommu *iommu;
int i;
for (i = 0; i <= amd_iommu_last_bdf; ++i) {
- if (amd_iommu_pd_table[i] == NULL)
+ if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
+ (amd_iommu_pd_table[i] != domain))
continue;
iommu = amd_iommu_rlookup_table[i];
@@ -497,6 +556,27 @@ void amd_iommu_flush_all_devices(void)
}
}
+static void reset_iommu_command_buffer(struct amd_iommu *iommu)
+{
+ pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
+
+ if (iommu->reset_in_progress)
+ panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
+
+ iommu->reset_in_progress = true;
+
+ amd_iommu_reset_cmd_buffer(iommu);
+ flush_all_devices_for_iommu(iommu);
+ flush_all_domains_on_iommu(iommu);
+
+ iommu->reset_in_progress = false;
+}
+
+void amd_iommu_flush_all_devices(void)
+{
+ flush_devices_by_domain(NULL);
+}
+
/****************************************************************************
*
* The functions below are used the create the page table mappings for
@@ -514,18 +594,21 @@ void amd_iommu_flush_all_devices(void)
static int iommu_map_page(struct protection_domain *dom,
unsigned long bus_addr,
unsigned long phys_addr,
- int prot)
+ int prot,
+ int map_size)
{
u64 __pte, *pte;
bus_addr = PAGE_ALIGN(bus_addr);
phys_addr = PAGE_ALIGN(phys_addr);
- /* only support 512GB address spaces for now */
- if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
+ BUG_ON(!PM_ALIGNED(map_size, bus_addr));
+ BUG_ON(!PM_ALIGNED(map_size, phys_addr));
+
+ if (!(prot & IOMMU_PROT_MASK))
return -EINVAL;
- pte = alloc_pte(dom, bus_addr, NULL, GFP_KERNEL);
+ pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL);
if (IOMMU_PTE_PRESENT(*pte))
return -EBUSY;
@@ -538,29 +621,18 @@ static int iommu_map_page(struct protection_domain *dom,
*pte = __pte;
+ update_domain(dom);
+
return 0;
}
static void iommu_unmap_page(struct protection_domain *dom,
- unsigned long bus_addr)
+ unsigned long bus_addr, int map_size)
{
- u64 *pte;
-
- pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
-
- if (!IOMMU_PTE_PRESENT(*pte))
- return;
-
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
+ u64 *pte = fetch_pte(dom, bus_addr, map_size);
- if (!IOMMU_PTE_PRESENT(*pte))
- return;
-
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
-
- *pte = 0;
+ if (pte)
+ *pte = 0;
}
/*
@@ -615,7 +687,8 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
for (addr = e->address_start; addr < e->address_end;
addr += PAGE_SIZE) {
- ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot);
+ ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
+ PM_MAP_4k);
if (ret)
return ret;
/*
@@ -670,24 +743,29 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
* This function checks if there is a PTE for a given dma address. If
* there is one, it returns the pointer to it.
*/
-static u64* fetch_pte(struct protection_domain *domain,
- unsigned long address)
+static u64 *fetch_pte(struct protection_domain *domain,
+ unsigned long address, int map_size)
{
+ int level;
u64 *pte;
- pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(address)];
+ level = domain->mode - 1;
+ pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
- if (!IOMMU_PTE_PRESENT(*pte))
- return NULL;
+ while (level > map_size) {
+ if (!IOMMU_PTE_PRESENT(*pte))
+ return NULL;
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[IOMMU_PTE_L1_INDEX(address)];
+ level -= 1;
- if (!IOMMU_PTE_PRESENT(*pte))
- return NULL;
+ pte = IOMMU_PTE_PAGE(*pte);
+ pte = &pte[PM_LEVEL_INDEX(level, address)];
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[IOMMU_PTE_L0_INDEX(address)];
+ if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
+ pte = NULL;
+ break;
+ }
+ }
return pte;
}
@@ -727,7 +805,7 @@ static int alloc_new_range(struct amd_iommu *iommu,
u64 *pte, *pte_page;
for (i = 0; i < num_ptes; ++i) {
- pte = alloc_pte(&dma_dom->domain, address,
+ pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k,
&pte_page, gfp);
if (!pte)
goto out_free;
@@ -760,16 +838,20 @@ static int alloc_new_range(struct amd_iommu *iommu,
for (i = dma_dom->aperture[index]->offset;
i < dma_dom->aperture_size;
i += PAGE_SIZE) {
- u64 *pte = fetch_pte(&dma_dom->domain, i);
+ u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
continue;
dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
}
+ update_domain(&dma_dom->domain);
+
return 0;
out_free:
+ update_domain(&dma_dom->domain);
+
free_page((unsigned long)dma_dom->aperture[index]->bitmap);
kfree(dma_dom->aperture[index]);
@@ -1009,7 +1091,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
dma_dom->domain.id = domain_id_alloc();
if (dma_dom->domain.id == 0)
goto free_dma_dom;
- dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
+ dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
dma_dom->domain.flags = PD_DMA_OPS_MASK;
dma_dom->domain.priv = dma_dom;
@@ -1063,6 +1145,41 @@ static struct protection_domain *domain_for_device(u16 devid)
return dom;
}
+static void set_dte_entry(u16 devid, struct protection_domain *domain)
+{
+ u64 pte_root = virt_to_phys(domain->pt_root);
+
+ pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
+ << DEV_ENTRY_MODE_SHIFT;
+ pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
+
+ amd_iommu_dev_table[devid].data[2] = domain->id;
+ amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
+ amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
+
+ amd_iommu_pd_table[devid] = domain;
+}
+
+/*
+ * If a device is not yet associated with a domain, this function does
+ * assigns it visible for the hardware
+ */
+static void __attach_device(struct amd_iommu *iommu,
+ struct protection_domain *domain,
+ u16 devid)
+{
+ /* lock domain */
+ spin_lock(&domain->lock);
+
+ /* update DTE entry */
+ set_dte_entry(devid, domain);
+
+ domain->dev_cnt += 1;
+
+ /* ready */
+ spin_unlock(&domain->lock);
+}
+
/*
* If a device is not yet associated with a domain, this function does
* assigns it visible for the hardware
@@ -1072,27 +1189,16 @@ static void attach_device(struct amd_iommu *iommu,
u16 devid)
{
unsigned long flags;
- u64 pte_root = virt_to_phys(domain->pt_root);
-
- domain->dev_cnt += 1;
-
- pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
- << DEV_ENTRY_MODE_SHIFT;
- pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
- amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
- amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
- amd_iommu_dev_table[devid].data[2] = domain->id;
-
- amd_iommu_pd_table[devid] = domain;
+ __attach_device(iommu, domain, devid);
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
- /*
- * We might boot into a crash-kernel here. The crashed kernel
- * left the caches in the IOMMU dirty. So we have to flush
- * here to evict all dirty stuff.
- */
+ /*
+ * We might boot into a crash-kernel here. The crashed kernel
+ * left the caches in the IOMMU dirty. So we have to flush
+ * here to evict all dirty stuff.
+ */
iommu_queue_inv_dev_entry(iommu, devid);
iommu_flush_tlb_pde(iommu, domain->id);
}
@@ -1119,6 +1225,15 @@ static void __detach_device(struct protection_domain *domain, u16 devid)
/* ready */
spin_unlock(&domain->lock);
+
+ /*
+ * If we run in passthrough mode the device must be assigned to the
+ * passthrough domain if it is detached from any other domain
+ */
+ if (iommu_pass_through) {
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+ __attach_device(iommu, pt_domain, devid);
+ }
}
/*
@@ -1164,6 +1279,8 @@ static int device_change_notifier(struct notifier_block *nb,
case BUS_NOTIFY_UNBOUND_DRIVER:
if (!domain)
goto out;
+ if (iommu_pass_through)
+ break;
detach_device(domain, devid);
break;
case BUS_NOTIFY_ADD_DEVICE:
@@ -1292,39 +1409,91 @@ static int get_device_resources(struct device *dev,
return 1;
}
+static void update_device_table(struct protection_domain *domain)
+{
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i <= amd_iommu_last_bdf; ++i) {
+ if (amd_iommu_pd_table[i] != domain)
+ continue;
+ write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ set_dte_entry(i, domain);
+ write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ }
+}
+
+static void update_domain(struct protection_domain *domain)
+{
+ if (!domain->updated)
+ return;
+
+ update_device_table(domain);
+ flush_devices_by_domain(domain);
+ iommu_flush_domain(domain->id);
+
+ domain->updated = false;
+}
+
/*
- * If the pte_page is not yet allocated this function is called
+ * This function is used to add another level to an IO page table. Adding
+ * another level increases the size of the address space by 9 bits to a size up
+ * to 64 bits.
*/
-static u64* alloc_pte(struct protection_domain *dom,
- unsigned long address, u64 **pte_page, gfp_t gfp)
+static bool increase_address_space(struct protection_domain *domain,
+ gfp_t gfp)
+{
+ u64 *pte;
+
+ if (domain->mode == PAGE_MODE_6_LEVEL)
+ /* address space already 64 bit large */
+ return false;
+
+ pte = (void *)get_zeroed_page(gfp);
+ if (!pte)
+ return false;
+
+ *pte = PM_LEVEL_PDE(domain->mode,
+ virt_to_phys(domain->pt_root));
+ domain->pt_root = pte;
+ domain->mode += 1;
+ domain->updated = true;
+
+ return true;
+}
+
+static u64 *alloc_pte(struct protection_domain *domain,
+ unsigned long address,
+ int end_lvl,
+ u64 **pte_page,
+ gfp_t gfp)
{
u64 *pte, *page;
+ int level;
- pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(address)];
+ while (address > PM_LEVEL_SIZE(domain->mode))
+ increase_address_space(domain, gfp);
- if (!IOMMU_PTE_PRESENT(*pte)) {
- page = (u64 *)get_zeroed_page(gfp);
- if (!page)
- return NULL;
- *pte = IOMMU_L2_PDE(virt_to_phys(page));
- }
+ level = domain->mode - 1;
+ pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[IOMMU_PTE_L1_INDEX(address)];
+ while (level > end_lvl) {
+ if (!IOMMU_PTE_PRESENT(*pte)) {
+ page = (u64 *)get_zeroed_page(gfp);
+ if (!page)
+ return NULL;
+ *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
+ }
- if (!IOMMU_PTE_PRESENT(*pte)) {
- page = (u64 *)get_zeroed_page(gfp);
- if (!page)
- return NULL;
- *pte = IOMMU_L1_PDE(virt_to_phys(page));
- }
+ level -= 1;
- pte = IOMMU_PTE_PAGE(*pte);
+ pte = IOMMU_PTE_PAGE(*pte);
- if (pte_page)
- *pte_page = pte;
+ if (pte_page && level == end_lvl)
+ *pte_page = pte;
- pte = &pte[IOMMU_PTE_L0_INDEX(address)];
+ pte = &pte[PM_LEVEL_INDEX(level, address)];
+ }
return pte;
}
@@ -1344,10 +1513,13 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
if (!pte) {
- pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC);
+ pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page,
+ GFP_ATOMIC);
aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
} else
- pte += IOMMU_PTE_L0_INDEX(address);
+ pte += PM_LEVEL_INDEX(0, address);
+
+ update_domain(&dom->domain);
return pte;
}
@@ -1409,7 +1581,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
if (!pte)
return;
- pte += IOMMU_PTE_L0_INDEX(address);
+ pte += PM_LEVEL_INDEX(0, address);
WARN_ON(!*pte);
@@ -1988,19 +2160,47 @@ static void cleanup_domain(struct protection_domain *domain)
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
}
-static int amd_iommu_domain_init(struct iommu_domain *dom)
+static void protection_domain_free(struct protection_domain *domain)
+{
+ if (!domain)
+ return;
+
+ if (domain->id)
+ domain_id_free(domain->id);
+
+ kfree(domain);
+}
+
+static struct protection_domain *protection_domain_alloc(void)
{
struct protection_domain *domain;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
- return -ENOMEM;
+ return NULL;
spin_lock_init(&domain->lock);
- domain->mode = PAGE_MODE_3_LEVEL;
domain->id = domain_id_alloc();
if (!domain->id)
+ goto out_err;
+
+ return domain;
+
+out_err:
+ kfree(domain);
+
+ return NULL;
+}
+
+static int amd_iommu_domain_init(struct iommu_domain *dom)
+{
+ struct protection_domain *domain;
+
+ domain = protection_domain_alloc();
+ if (!domain)
goto out_free;
+
+ domain->mode = PAGE_MODE_3_LEVEL;
domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
if (!domain->pt_root)
goto out_free;
@@ -2010,7 +2210,7 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
return 0;
out_free:
- kfree(domain);
+ protection_domain_free(domain);
return -ENOMEM;
}
@@ -2115,7 +2315,7 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
paddr &= PAGE_MASK;
for (i = 0; i < npages; ++i) {
- ret = iommu_map_page(domain, iova, paddr, prot);
+ ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
if (ret)
return ret;
@@ -2136,7 +2336,7 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,
iova &= PAGE_MASK;
for (i = 0; i < npages; ++i) {
- iommu_unmap_page(domain, iova);
+ iommu_unmap_page(domain, iova, PM_MAP_4k);
iova += PAGE_SIZE;
}
@@ -2151,21 +2351,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
phys_addr_t paddr;
u64 *pte;
- pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(iova)];
-
- if (!IOMMU_PTE_PRESENT(*pte))
- return 0;
-
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[IOMMU_PTE_L1_INDEX(iova)];
-
- if (!IOMMU_PTE_PRESENT(*pte))
- return 0;
-
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[IOMMU_PTE_L0_INDEX(iova)];
+ pte = fetch_pte(domain, iova, PM_MAP_4k);
- if (!IOMMU_PTE_PRESENT(*pte))
+ if (!pte || !IOMMU_PTE_PRESENT(*pte))
return 0;
paddr = *pte & IOMMU_PAGE_MASK;
@@ -2191,3 +2379,46 @@ static struct iommu_ops amd_iommu_ops = {
.domain_has_cap = amd_iommu_domain_has_cap,
};
+/*****************************************************************************
+ *
+ * The next functions do a basic initialization of IOMMU for pass through
+ * mode
+ *
+ * In passthrough mode the IOMMU is initialized and enabled but not used for
+ * DMA-API translation.
+ *
+ *****************************************************************************/
+
+int __init amd_iommu_init_passthrough(void)
+{
+ struct pci_dev *dev = NULL;
+ u16 devid, devid2;
+
+ /* allocate passthroug domain */
+ pt_domain = protection_domain_alloc();
+ if (!pt_domain)
+ return -ENOMEM;
+
+ pt_domain->mode |= PAGE_MODE_NONE;
+
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ struct amd_iommu *iommu;
+
+ devid = calc_devid(dev->bus->number, dev->devfn);
+ if (devid > amd_iommu_last_bdf)
+ continue;
+
+ devid2 = amd_iommu_alias_table[devid];
+
+ iommu = amd_iommu_rlookup_table[devid2];
+ if (!iommu)
+ continue;
+
+ __attach_device(iommu, pt_domain, devid);
+ __attach_device(iommu, pt_domain, devid2);
+ }
+
+ pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
+
+ return 0;
+}
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index c1b17e97252e..b4b61d462dcc 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -252,7 +252,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
/* Function to enable the hardware */
static void iommu_enable(struct amd_iommu *iommu)
{
- printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n",
+ printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n",
dev_name(&iommu->dev->dev), iommu->cap_ptr);
iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
@@ -435,6 +435,20 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
}
/*
+ * This function resets the command buffer if the IOMMU stopped fetching
+ * commands from it.
+ */
+void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
+{
+ iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
+
+ writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+
+ iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
+}
+
+/*
* This function writes the command buffer address to the hardware and
* enables it.
*/
@@ -450,11 +464,7 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
&entry, sizeof(entry));
- /* set head and tail to zero manually */
- writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
- writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
-
- iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
+ amd_iommu_reset_cmd_buffer(iommu);
}
static void __init free_command_buffer(struct amd_iommu *iommu)
@@ -858,7 +868,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
switch (*p) {
case ACPI_IVHD_TYPE:
- DUMP_printk("IOMMU: device: %02x:%02x.%01x cap: %04x "
+ DUMP_printk("device: %02x:%02x.%01x cap: %04x "
"seg: %d flags: %01x info %04x\n",
PCI_BUS(h->devid), PCI_SLOT(h->devid),
PCI_FUNC(h->devid), h->cap_ptr,
@@ -902,7 +912,7 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu)
r = request_irq(iommu->dev->irq, amd_iommu_int_handler,
IRQF_SAMPLE_RANDOM,
- "AMD IOMMU",
+ "AMD-Vi",
NULL);
if (r) {
@@ -1150,7 +1160,7 @@ int __init amd_iommu_init(void)
if (no_iommu) {
- printk(KERN_INFO "AMD IOMMU disabled by kernel command line\n");
+ printk(KERN_INFO "AMD-Vi disabled by kernel command line\n");
return 0;
}
@@ -1242,22 +1252,28 @@ int __init amd_iommu_init(void)
if (ret)
goto free;
- ret = amd_iommu_init_dma_ops();
+ if (iommu_pass_through)
+ ret = amd_iommu_init_passthrough();
+ else
+ ret = amd_iommu_init_dma_ops();
if (ret)
goto free;
enable_iommus();
- printk(KERN_INFO "AMD IOMMU: device isolation ");
+ if (iommu_pass_through)
+ goto out;
+
+ printk(KERN_INFO "AMD-Vi: device isolation ");
if (amd_iommu_isolate)
printk("enabled\n");
else
printk("disabled\n");
if (amd_iommu_unmap_flush)
- printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n");
+ printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
else
- printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n");
+ printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
out:
return ret;
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 676debfc1702..128111d8ffe0 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -20,6 +20,7 @@
#include <linux/bitops.h>
#include <linux/ioport.h>
#include <linux/suspend.h>
+#include <linux/kmemleak.h>
#include <asm/e820.h>
#include <asm/io.h>
#include <asm/iommu.h>
@@ -94,6 +95,11 @@ static u32 __init allocate_aperture(void)
* code for safe
*/
p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20);
+ /*
+ * Kmemleak should not scan this block as it may not be mapped via the
+ * kernel direct mapping.
+ */
+ kmemleak_ignore(p);
if (!p || __pa(p)+aper_size > 0xffffffff) {
printk(KERN_ERR
"Cannot allocate aperture memory hole (%p,%uK)\n",
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 0a1c2830ec66..159740decc41 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -49,6 +49,7 @@
#include <asm/mtrr.h>
#include <asm/smp.h>
#include <asm/mce.h>
+#include <asm/kvm_para.h>
unsigned int num_processors;
@@ -1361,52 +1362,80 @@ void enable_x2apic(void)
}
#endif /* CONFIG_X86_X2APIC */
-void __init enable_IR_x2apic(void)
+int __init enable_IR(void)
{
#ifdef CONFIG_INTR_REMAP
- int ret;
- unsigned long flags;
- struct IO_APIC_route_entry **ioapic_entries = NULL;
-
- ret = dmar_table_init();
- if (ret) {
- pr_debug("dmar_table_init() failed with %d:\n", ret);
- goto ir_failed;
- }
-
if (!intr_remapping_supported()) {
pr_debug("intr-remapping not supported\n");
- goto ir_failed;
+ return 0;
}
-
if (!x2apic_preenabled && skip_ioapic_setup) {
pr_info("Skipped enabling intr-remap because of skipping "
"io-apic setup\n");
- return;
+ return 0;
}
+ if (enable_intr_remapping(x2apic_supported()))
+ return 0;
+
+ pr_info("Enabled Interrupt-remapping\n");
+
+ return 1;
+
+#endif
+ return 0;
+}
+
+void __init enable_IR_x2apic(void)
+{
+ unsigned long flags;
+ struct IO_APIC_route_entry **ioapic_entries = NULL;
+ int ret, x2apic_enabled = 0;
+ int dmar_table_init_ret = 0;
+
+#ifdef CONFIG_INTR_REMAP
+ dmar_table_init_ret = dmar_table_init();
+ if (dmar_table_init_ret)
+ pr_debug("dmar_table_init() failed with %d:\n",
+ dmar_table_init_ret);
+#endif
+
ioapic_entries = alloc_ioapic_entries();
if (!ioapic_entries) {
- pr_info("Allocate ioapic_entries failed: %d\n", ret);
- goto end;
+ pr_err("Allocate ioapic_entries failed\n");
+ goto out;
}
ret = save_IO_APIC_setup(ioapic_entries);
if (ret) {
pr_info("Saving IO-APIC state failed: %d\n", ret);
- goto end;
+ goto out;
}
local_irq_save(flags);
- mask_IO_APIC_setup(ioapic_entries);
mask_8259A();
+ mask_IO_APIC_setup(ioapic_entries);
- ret = enable_intr_remapping(x2apic_supported());
- if (ret)
- goto end_restore;
+ if (dmar_table_init_ret)
+ ret = 0;
+ else
+ ret = enable_IR();
- pr_info("Enabled Interrupt-remapping\n");
+ if (!ret) {
+ /* IR is required if there is APIC ID > 255 even when running
+ * under KVM
+ */
+ if (max_physical_apicid > 255 || !kvm_para_available())
+ goto nox2apic;
+ /*
+ * without IR all CPUs can be addressed by IOAPIC/MSI
+ * only in physical mode
+ */
+ x2apic_force_phys();
+ }
+
+ x2apic_enabled = 1;
if (x2apic_supported() && !x2apic_mode) {
x2apic_mode = 1;
@@ -1414,41 +1443,25 @@ void __init enable_IR_x2apic(void)
pr_info("Enabled x2apic\n");
}
-end_restore:
- if (ret)
- /*
- * IR enabling failed
- */
+nox2apic:
+ if (!ret) /* IR enabling failed */
restore_IO_APIC_setup(ioapic_entries);
-
unmask_8259A();
local_irq_restore(flags);
-end:
+out:
if (ioapic_entries)
free_ioapic_entries(ioapic_entries);
- if (!ret)
+ if (x2apic_enabled)
return;
-ir_failed:
if (x2apic_preenabled)
- panic("x2apic enabled by bios. But IR enabling failed");
+ panic("x2apic: enabled by BIOS but kernel init failed.");
else if (cpu_has_x2apic)
- pr_info("Not enabling x2apic,Intr-remapping\n");
-#else
- if (!cpu_has_x2apic)
- return;
-
- if (x2apic_preenabled)
- panic("x2apic enabled prior OS handover,"
- " enable CONFIG_X86_X2APIC, CONFIG_INTR_REMAP");
-#endif
-
- return;
+ pr_info("Not enabling x2apic, Intr-remapping init failed.\n");
}
-
#ifdef CONFIG_X86_64
/*
* Detect and enable local APICs on non-SMP boards.
@@ -1549,8 +1562,6 @@ no_apic:
#ifdef CONFIG_X86_64
void __init early_init_lapic_mapping(void)
{
- unsigned long phys_addr;
-
/*
* If no local APIC can be found then go out
* : it means there is no mpatable and MADT
@@ -1558,11 +1569,9 @@ void __init early_init_lapic_mapping(void)
if (!smp_found_config)
return;
- phys_addr = mp_lapic_addr;
-
- set_fixmap_nocache(FIX_APIC_BASE, phys_addr);
+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
- APIC_BASE, phys_addr);
+ APIC_BASE, mp_lapic_addr);
/*
* Fetch the APIC ID of the BSP in case we have a
@@ -1651,7 +1660,6 @@ int __init APIC_init_uniprocessor(void)
APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
boot_cpu_physical_apicid);
- clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
return -1;
}
#endif
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 8952a5890281..89174f847b49 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -167,7 +167,7 @@ static int es7000_apic_is_cluster(void)
{
/* MPENTIUMIII */
if (boot_cpu_data.x86 == 6 &&
- (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11))
+ (boot_cpu_data.x86_model >= 7 && boot_cpu_data.x86_model <= 11))
return 1;
return 0;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index d2ed6c5ddc80..3c8f9e75d038 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -66,6 +66,8 @@
#include <asm/apic.h>
#define __apicdebuginit(type) static type __init
+#define for_each_irq_pin(entry, head) \
+ for (entry = head; entry; entry = entry->next)
/*
* Is the SiS APIC rmw bug present ?
@@ -85,6 +87,9 @@ int nr_ioapic_registers[MAX_IO_APICS];
struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
int nr_ioapics;
+/* IO APIC gsi routing info */
+struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS];
+
/* MP IRQ source entries */
struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
@@ -116,15 +121,6 @@ static int __init parse_noapic(char *str)
}
early_param("noapic", parse_noapic);
-struct irq_pin_list;
-
-/*
- * This is performance-critical, we want to do it O(1)
- *
- * the indexing order of this array favors 1:1 mappings
- * between pins and IRQs.
- */
-
struct irq_pin_list {
int apic, pin;
struct irq_pin_list *next;
@@ -139,6 +135,11 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int node)
return pin;
}
+/*
+ * This is performance-critical, we want to do it O(1)
+ *
+ * Most irqs are mapped 1:1 with pins.
+ */
struct irq_cfg {
struct irq_pin_list *irq_2_pin;
cpumask_var_t domain;
@@ -414,13 +415,10 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
unsigned long flags;
spin_lock_irqsave(&ioapic_lock, flags);
- entry = cfg->irq_2_pin;
- for (;;) {
+ for_each_irq_pin(entry, cfg->irq_2_pin) {
unsigned int reg;
int pin;
- if (!entry)
- break;
pin = entry->pin;
reg = io_apic_read(entry->apic, 0x10 + pin*2);
/* Is the remote IRR bit set? */
@@ -428,9 +426,6 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
spin_unlock_irqrestore(&ioapic_lock, flags);
return true;
}
- if (!entry->next)
- break;
- entry = entry->next;
}
spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -498,72 +493,68 @@ static void ioapic_mask_entry(int apic, int pin)
* shared ISA-space IRQs, so we have to support them. We are super
* fast in the common case, and fast for shared ISA-space IRQs.
*/
-static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
+static int
+add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin)
{
- struct irq_pin_list *entry;
+ struct irq_pin_list **last, *entry;
- entry = cfg->irq_2_pin;
- if (!entry) {
- entry = get_one_free_irq_2_pin(node);
- if (!entry) {
- printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n",
- apic, pin);
- return;
- }
- cfg->irq_2_pin = entry;
- entry->apic = apic;
- entry->pin = pin;
- return;
- }
-
- while (entry->next) {
- /* not again, please */
+ /* don't allow duplicates */
+ last = &cfg->irq_2_pin;
+ for_each_irq_pin(entry, cfg->irq_2_pin) {
if (entry->apic == apic && entry->pin == pin)
- return;
-
- entry = entry->next;
+ return 0;
+ last = &entry->next;
}
- entry->next = get_one_free_irq_2_pin(node);
- entry = entry->next;
+ entry = get_one_free_irq_2_pin(node);
+ if (!entry) {
+ printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n",
+ node, apic, pin);
+ return -ENOMEM;
+ }
entry->apic = apic;
entry->pin = pin;
+
+ *last = entry;
+ return 0;
+}
+
+static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
+{
+ if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin))
+ panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
}
/*
* Reroute an IRQ to a different pin.
*/
static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
- int oldapic, int oldpin,
- int newapic, int newpin)
+ int oldapic, int oldpin,
+ int newapic, int newpin)
{
- struct irq_pin_list *entry = cfg->irq_2_pin;
- int replaced = 0;
+ struct irq_pin_list *entry;
- while (entry) {
+ for_each_irq_pin(entry, cfg->irq_2_pin) {
if (entry->apic == oldapic && entry->pin == oldpin) {
entry->apic = newapic;
entry->pin = newpin;
- replaced = 1;
/* every one is different, right? */
- break;
+ return;
}
- entry = entry->next;
}
- /* why? call replace before add? */
- if (!replaced)
- add_pin_to_irq_node(cfg, node, newapic, newpin);
+ /* old apic/pin didn't exist, so just add new ones */
+ add_pin_to_irq_node(cfg, node, newapic, newpin);
}
-static inline void io_apic_modify_irq(struct irq_cfg *cfg,
- int mask_and, int mask_or,
- void (*final)(struct irq_pin_list *entry))
+static void io_apic_modify_irq(struct irq_cfg *cfg,
+ int mask_and, int mask_or,
+ void (*final)(struct irq_pin_list *entry))
{
int pin;
struct irq_pin_list *entry;
- for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
+ for_each_irq_pin(entry, cfg->irq_2_pin) {
unsigned int reg;
pin = entry->pin;
reg = io_apic_read(entry->apic, 0x10 + pin * 2);
@@ -580,7 +571,6 @@ static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
}
-#ifdef CONFIG_X86_64
static void io_apic_sync(struct irq_pin_list *entry)
{
/*
@@ -596,11 +586,6 @@ static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
{
io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
}
-#else /* CONFIG_X86_32 */
-static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
-{
- io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL);
-}
static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg)
{
@@ -613,7 +598,6 @@ static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg)
io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED,
IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
}
-#endif /* CONFIG_X86_32 */
static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
{
@@ -1702,12 +1686,8 @@ __apicdebuginit(void) print_IO_APIC(void)
if (!entry)
continue;
printk(KERN_DEBUG "IRQ%d ", irq);
- for (;;) {
+ for_each_irq_pin(entry, cfg->irq_2_pin)
printk("-> %d:%d", entry->apic, entry->pin);
- if (!entry->next)
- break;
- entry = entry->next;
- }
printk("\n");
}
@@ -2211,7 +2191,6 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
return was_pending;
}
-#ifdef CONFIG_X86_64
static int ioapic_retrigger_irq(unsigned int irq)
{
@@ -2224,14 +2203,6 @@ static int ioapic_retrigger_irq(unsigned int irq)
return 1;
}
-#else
-static int ioapic_retrigger_irq(unsigned int irq)
-{
- apic->send_IPI_self(irq_cfg(irq)->vector);
-
- return 1;
-}
-#endif
/*
* Level and edge triggered IO-APIC interrupts need different handling,
@@ -2269,13 +2240,9 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
struct irq_pin_list *entry;
u8 vector = cfg->vector;
- entry = cfg->irq_2_pin;
- for (;;) {
+ for_each_irq_pin(entry, cfg->irq_2_pin) {
unsigned int reg;
- if (!entry)
- break;
-
apic = entry->apic;
pin = entry->pin;
/*
@@ -2288,9 +2255,6 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
reg &= ~IO_APIC_REDIR_VECTOR_MASK;
reg |= vector;
io_apic_modify(apic, 0x10 + pin*2, reg);
- if (!entry->next)
- break;
- entry = entry->next;
}
}
@@ -2515,11 +2479,8 @@ atomic_t irq_mis_count;
static void ack_apic_level(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
-
-#ifdef CONFIG_X86_32
unsigned long v;
int i;
-#endif
struct irq_cfg *cfg;
int do_unmask_irq = 0;
@@ -2532,31 +2493,28 @@ static void ack_apic_level(unsigned int irq)
}
#endif
-#ifdef CONFIG_X86_32
/*
- * It appears there is an erratum which affects at least version 0x11
- * of I/O APIC (that's the 82093AA and cores integrated into various
- * chipsets). Under certain conditions a level-triggered interrupt is
- * erroneously delivered as edge-triggered one but the respective IRR
- * bit gets set nevertheless. As a result the I/O unit expects an EOI
- * message but it will never arrive and further interrupts are blocked
- * from the source. The exact reason is so far unknown, but the
- * phenomenon was observed when two consecutive interrupt requests
- * from a given source get delivered to the same CPU and the source is
- * temporarily disabled in between.
- *
- * A workaround is to simulate an EOI message manually. We achieve it
- * by setting the trigger mode to edge and then to level when the edge
- * trigger mode gets detected in the TMR of a local APIC for a
- * level-triggered interrupt. We mask the source for the time of the
- * operation to prevent an edge-triggered interrupt escaping meanwhile.
- * The idea is from Manfred Spraul. --macro
- */
+ * It appears there is an erratum which affects at least version 0x11
+ * of I/O APIC (that's the 82093AA and cores integrated into various
+ * chipsets). Under certain conditions a level-triggered interrupt is
+ * erroneously delivered as edge-triggered one but the respective IRR
+ * bit gets set nevertheless. As a result the I/O unit expects an EOI
+ * message but it will never arrive and further interrupts are blocked
+ * from the source. The exact reason is so far unknown, but the
+ * phenomenon was observed when two consecutive interrupt requests
+ * from a given source get delivered to the same CPU and the source is
+ * temporarily disabled in between.
+ *
+ * A workaround is to simulate an EOI message manually. We achieve it
+ * by setting the trigger mode to edge and then to level when the edge
+ * trigger mode gets detected in the TMR of a local APIC for a
+ * level-triggered interrupt. We mask the source for the time of the
+ * operation to prevent an edge-triggered interrupt escaping meanwhile.
+ * The idea is from Manfred Spraul. --macro
+ */
cfg = desc->chip_data;
i = cfg->vector;
-
v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
-#endif
/*
* We must acknowledge the irq before we move it or the acknowledge will
@@ -2598,7 +2556,7 @@ static void ack_apic_level(unsigned int irq)
unmask_IO_APIC_irq_desc(desc);
}
-#ifdef CONFIG_X86_32
+ /* Tail end of version 0x11 I/O APIC bug workaround */
if (!(v & (1 << (i & 0x1f)))) {
atomic_inc(&irq_mis_count);
spin_lock(&ioapic_lock);
@@ -2606,26 +2564,15 @@ static void ack_apic_level(unsigned int irq)
__unmask_and_level_IO_APIC_irq(cfg);
spin_unlock(&ioapic_lock);
}
-#endif
}
#ifdef CONFIG_INTR_REMAP
static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
{
- int apic, pin;
struct irq_pin_list *entry;
- entry = cfg->irq_2_pin;
- for (;;) {
-
- if (!entry)
- break;
-
- apic = entry->apic;
- pin = entry->pin;
- io_apic_eoi(apic, pin);
- entry = entry->next;
- }
+ for_each_irq_pin(entry, cfg->irq_2_pin)
+ io_apic_eoi(entry->apic, entry->pin);
}
static void
@@ -3241,8 +3188,7 @@ void destroy_irq(unsigned int irq)
cfg = desc->chip_data;
dynamic_irq_cleanup(irq);
/* connect back irq_cfg */
- if (desc)
- desc->chip_data = cfg;
+ desc->chip_data = cfg;
free_irte(irq);
spin_lock_irqsave(&vector_lock, flags);
@@ -3912,7 +3858,11 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq,
*/
if (irq >= NR_IRQS_LEGACY) {
cfg = desc->chip_data;
- add_pin_to_irq_node(cfg, node, ioapic, pin);
+ if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) {
+ printk(KERN_INFO "can not add pin %d for irq %d\n",
+ pin, irq);
+ return 0;
+ }
}
setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity);
@@ -3941,11 +3891,28 @@ int io_apic_set_pci_routing(struct device *dev, int irq,
return __io_apic_set_pci_routing(dev, irq, irq_attr);
}
-/* --------------------------------------------------------------------------
- ACPI-based IOAPIC Configuration
- -------------------------------------------------------------------------- */
+u8 __init io_apic_unique_id(u8 id)
+{
+#ifdef CONFIG_X86_32
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
+ !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
+ return io_apic_get_unique_id(nr_ioapics, id);
+ else
+ return id;
+#else
+ int i;
+ DECLARE_BITMAP(used, 256);
-#ifdef CONFIG_ACPI
+ bitmap_zero(used, 256);
+ for (i = 0; i < nr_ioapics; i++) {
+ struct mpc_ioapic *ia = &mp_ioapics[i];
+ __set_bit(ia->apicid, used);
+ }
+ if (!test_bit(id, used))
+ return id;
+ return find_first_zero_bit(used, 256);
+#endif
+}
#ifdef CONFIG_X86_32
int __init io_apic_get_unique_id(int ioapic, int apic_id)
@@ -4054,8 +4021,6 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
return 0;
}
-#endif /* CONFIG_ACPI */
-
/*
* This function currently is only a helper for the i386 smp boot process where
* we need to reprogram the ioredtbls to cater for the cpus which have come online
@@ -4109,7 +4074,7 @@ void __init setup_ioapic_dest(void)
static struct resource *ioapic_resources;
-static struct resource * __init ioapic_setup_resources(void)
+static struct resource * __init ioapic_setup_resources(int nr_ioapics)
{
unsigned long n;
struct resource *res;
@@ -4125,15 +4090,13 @@ static struct resource * __init ioapic_setup_resources(void)
mem = alloc_bootmem(n);
res = (void *)mem;
- if (mem != NULL) {
- mem += sizeof(struct resource) * nr_ioapics;
+ mem += sizeof(struct resource) * nr_ioapics;
- for (i = 0; i < nr_ioapics; i++) {
- res[i].name = mem;
- res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- sprintf(mem, "IOAPIC %u", i);
- mem += IOAPIC_RESOURCE_NAME_SIZE;
- }
+ for (i = 0; i < nr_ioapics; i++) {
+ res[i].name = mem;
+ res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ sprintf(mem, "IOAPIC %u", i);
+ mem += IOAPIC_RESOURCE_NAME_SIZE;
}
ioapic_resources = res;
@@ -4147,7 +4110,7 @@ void __init ioapic_init_mappings(void)
struct resource *ioapic_res;
int i;
- ioapic_res = ioapic_setup_resources();
+ ioapic_res = ioapic_setup_resources(nr_ioapics);
for (i = 0; i < nr_ioapics; i++) {
if (smp_found_config) {
ioapic_phys = mp_ioapics[i].apicaddr;
@@ -4176,11 +4139,9 @@ fake_ioapic_page:
__fix_to_virt(idx), ioapic_phys);
idx++;
- if (ioapic_res != NULL) {
- ioapic_res->start = ioapic_phys;
- ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
- ioapic_res++;
- }
+ ioapic_res->start = ioapic_phys;
+ ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
+ ioapic_res++;
}
}
@@ -4201,3 +4162,76 @@ void __init ioapic_insert_resources(void)
r++;
}
}
+
+int mp_find_ioapic(int gsi)
+{
+ int i = 0;
+
+ /* Find the IOAPIC that manages this GSI. */
+ for (i = 0; i < nr_ioapics; i++) {
+ if ((gsi >= mp_gsi_routing[i].gsi_base)
+ && (gsi <= mp_gsi_routing[i].gsi_end))
+ return i;
+ }
+
+ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
+ return -1;
+}
+
+int mp_find_ioapic_pin(int ioapic, int gsi)
+{
+ if (WARN_ON(ioapic == -1))
+ return -1;
+ if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end))
+ return -1;
+
+ return gsi - mp_gsi_routing[ioapic].gsi_base;
+}
+
+static int bad_ioapic(unsigned long address)
+{
+ if (nr_ioapics >= MAX_IO_APICS) {
+ printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
+ "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
+ return 1;
+ }
+ if (!address) {
+ printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address"
+ " found in table, skipping!\n");
+ return 1;
+ }
+ return 0;
+}
+
+void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
+{
+ int idx = 0;
+
+ if (bad_ioapic(address))
+ return;
+
+ idx = nr_ioapics;
+
+ mp_ioapics[idx].type = MP_IOAPIC;
+ mp_ioapics[idx].flags = MPC_APIC_USABLE;
+ mp_ioapics[idx].apicaddr = address;
+
+ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
+ mp_ioapics[idx].apicid = io_apic_unique_id(id);
+ mp_ioapics[idx].apicver = io_apic_get_version(idx);
+
+ /*
+ * Build basic GSI lookup table to facilitate gsi->io_apic lookups
+ * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
+ */
+ mp_gsi_routing[idx].gsi_base = gsi_base;
+ mp_gsi_routing[idx].gsi_end = gsi_base +
+ io_apic_get_redir_entries(idx);
+
+ printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
+ "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
+ mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
+ mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end);
+
+ nr_ioapics++;
+}
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 6ef00ba4c886..08385e090a6f 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -153,7 +153,7 @@ int safe_smp_processor_id(void)
{
int apicid, cpuid;
- if (!boot_cpu_has(X86_FEATURE_APIC))
+ if (!cpu_has_apic)
return 0;
apicid = hard_smp_processor_id();
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index b3025b43b63a..db7220220d09 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -39,7 +39,7 @@
int unknown_nmi_panic;
int nmi_watchdog_enabled;
-static cpumask_var_t backtrace_mask;
+static cpumask_t backtrace_mask __read_mostly;
/* nmi_active:
* >0: the lapic NMI watchdog is active, but can be disabled
@@ -138,7 +138,6 @@ int __init check_nmi_watchdog(void)
if (!prev_nmi_count)
goto error;
- alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO);
printk(KERN_INFO "Testing NMI watchdog ... ");
#ifdef CONFIG_SMP
@@ -415,14 +414,17 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
}
/* We can be called before check_nmi_watchdog, hence NULL check. */
- if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) {
+ if (cpumask_test_cpu(cpu, &backtrace_mask)) {
static DEFINE_SPINLOCK(lock); /* Serialise the printks */
spin_lock(&lock);
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
+ show_regs(regs);
dump_stack();
spin_unlock(&lock);
- cpumask_clear_cpu(cpu, backtrace_mask);
+ cpumask_clear_cpu(cpu, &backtrace_mask);
+
+ rc = 1;
}
/* Could check oops_in_progress here too, but it's safer not to */
@@ -552,14 +554,18 @@ int do_nmi_callback(struct pt_regs *regs, int cpu)
return 0;
}
-void __trigger_all_cpu_backtrace(void)
+void arch_trigger_all_cpu_backtrace(void)
{
int i;
- cpumask_copy(backtrace_mask, cpu_online_mask);
+ cpumask_copy(&backtrace_mask, cpu_online_mask);
+
+ printk(KERN_INFO "sending NMI to all CPUs:\n");
+ apic->send_IPI_all(NMI_VECTOR);
+
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
for (i = 0; i < 10 * 1000; i++) {
- if (cpumask_empty(backtrace_mask))
+ if (cpumask_empty(&backtrace_mask))
break;
mdelay(1);
}
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index fcec2f1d34a1..65edc180fc82 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -55,11 +55,11 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
void __init default_setup_apic_routing(void)
{
#ifdef CONFIG_X86_X2APIC
- if (x2apic_mode && (apic != &apic_x2apic_phys &&
+ if (x2apic_mode
#ifdef CONFIG_X86_UV
- apic != &apic_x2apic_uv_x &&
+ && apic != &apic_x2apic_uv_x
#endif
- apic != &apic_x2apic_cluster)) {
+ ) {
if (x2apic_phys)
apic = &apic_x2apic_phys;
else
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 442b5508893f..151ace69a5aa 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -403,7 +403,15 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
static struct apm_user *user_list;
static DEFINE_SPINLOCK(user_list_lock);
-static const struct desc_struct bad_bios_desc = { { { 0, 0x00409200 } } };
+
+/*
+ * Set up a segment that references the real mode segment 0x40
+ * that extends up to the end of page zero (that we have reserved).
+ * This is for buggy BIOS's that refer to (real mode) segment 0x40
+ * even though they are called in protected mode.
+ */
+static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
+ (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
static const char driver_version[] = "1.16ac"; /* no spaces */
@@ -2332,15 +2340,6 @@ static int __init apm_init(void)
pm_flags |= PM_APM;
/*
- * Set up a segment that references the real mode segment 0x40
- * that extends up to the end of page zero (that we have reserved).
- * This is for buggy BIOS's that refer to (real mode) segment 0x40
- * even though they are called in protected mode.
- */
- set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
- _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
-
- /*
* Set up the long jump entry point to the APM BIOS, which is called
* from inline assembly.
*/
@@ -2358,12 +2357,12 @@ static int __init apm_init(void)
* code to that CPU.
*/
gdt = get_cpu_gdt_table(0);
- set_base(gdt[APM_CS >> 3],
- __va((unsigned long)apm_info.bios.cseg << 4));
- set_base(gdt[APM_CS_16 >> 3],
- __va((unsigned long)apm_info.bios.cseg_16 << 4));
- set_base(gdt[APM_DS >> 3],
- __va((unsigned long)apm_info.bios.dseg << 4));
+ set_desc_base(&gdt[APM_CS >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
+ set_desc_base(&gdt[APM_CS_16 >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
+ set_desc_base(&gdt[APM_DS >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
proc_create("apm", 0, NULL, &apm_file_ops);
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 898ecc47e129..4a6aeedcd965 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -3,6 +3,7 @@
* This code generates raw asm output which is post-processed to extract
* and format the required data.
*/
+#define COMPILE_OFFSETS
#include <linux/crypto.h>
#include <linux/sched.h>
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 63fddcd082cd..22a47c82f3c0 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -2,7 +2,7 @@
#include <linux/bitops.h>
#include <linux/mm.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/processor.h>
#include <asm/apic.h>
#include <asm/cpu.h>
@@ -45,8 +45,8 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
#define CBAR_ENB (0x80000000)
#define CBAR_KEY (0X000000CB)
if (c->x86_model == 9 || c->x86_model == 10) {
- if (inl (CBAR) & CBAR_ENB)
- outl (0 | CBAR_KEY, CBAR);
+ if (inl(CBAR) & CBAR_ENB)
+ outl(0 | CBAR_KEY, CBAR);
}
}
@@ -87,9 +87,10 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
d = d2-d;
if (d > 20*K6_BUG_LOOP)
- printk("system stability may be impaired when more than 32 MB are used.\n");
+ printk(KERN_CONT
+ "system stability may be impaired when more than 32 MB are used.\n");
else
- printk("probably OK (after B9730xxxx).\n");
+ printk(KERN_CONT "probably OK (after B9730xxxx).\n");
printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
}
@@ -219,8 +220,9 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
rdmsr(MSR_K7_CLK_CTL, l, h);
if ((l & 0xfff00000) != 0x20000000) {
- printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
- ((l & 0x000fffff)|0x20000000));
+ printk(KERN_INFO
+ "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
+ l, ((l & 0x000fffff)|0x20000000));
wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
}
}
@@ -251,6 +253,64 @@ static int __cpuinit nearby_node(int apicid)
#endif
/*
+ * Fixup core topology information for AMD multi-node processors.
+ * Assumption 1: Number of cores in each internal node is the same.
+ * Assumption 2: Mixed systems with both single-node and dual-node
+ * processors are not supported.
+ */
+#ifdef CONFIG_X86_HT
+static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_PCI
+ u32 t, cpn;
+ u8 n, n_id;
+ int cpu = smp_processor_id();
+
+ /* fixup topology information only once for a core */
+ if (cpu_has(c, X86_FEATURE_AMD_DCM))
+ return;
+
+ /* check for multi-node processor on boot cpu */
+ t = read_pci_config(0, 24, 3, 0xe8);
+ if (!(t & (1 << 29)))
+ return;
+
+ set_cpu_cap(c, X86_FEATURE_AMD_DCM);
+
+ /* cores per node: each internal node has half the number of cores */
+ cpn = c->x86_max_cores >> 1;
+
+ /* even-numbered NB_id of this dual-node processor */
+ n = c->phys_proc_id << 1;
+
+ /*
+ * determine internal node id and assign cores fifty-fifty to
+ * each node of the dual-node processor
+ */
+ t = read_pci_config(0, 24 + n, 3, 0xe8);
+ n = (t>>30) & 0x3;
+ if (n == 0) {
+ if (c->cpu_core_id < cpn)
+ n_id = 0;
+ else
+ n_id = 1;
+ } else {
+ if (c->cpu_core_id < cpn)
+ n_id = 1;
+ else
+ n_id = 0;
+ }
+
+ /* compute entire NodeID, use llc_shared_map to store sibling info */
+ per_cpu(cpu_llc_id, cpu) = (c->phys_proc_id << 1) + n_id;
+
+ /* fixup core id to be in range from 0 to cpn */
+ c->cpu_core_id = c->cpu_core_id % cpn;
+#endif
+}
+#endif
+
+/*
* On a AMD dual core setup the lower bits of the APIC id distingush the cores.
* Assumes number of cores is a power of two.
*/
@@ -267,6 +327,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
c->phys_proc_id = c->initial_apicid >> bits;
/* use socket ID also for last level cache */
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
+ /* fixup topology information on multi-node processors */
+ if ((c->x86 == 0x10) && (c->x86_model == 9))
+ amd_fixup_dcm(c);
#endif
}
@@ -275,9 +338,10 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
int cpu = smp_processor_id();
int node;
- unsigned apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
+ unsigned apicid = c->apicid;
+
+ node = per_cpu(cpu_llc_id, cpu);
- node = c->phys_proc_id;
if (apicid_to_node[apicid] != NUMA_NO_NODE)
node = apicid_to_node[apicid];
if (!node_online(node)) {
@@ -398,18 +462,30 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
u32 level;
level = cpuid_eax(1);
- if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
+ if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
/*
* Some BIOSes incorrectly force this feature, but only K8
* revision D (model = 0x14) and later actually support it.
+ * (AMD Erratum #110, docId: 25759).
*/
- if (c->x86_model < 0x14)
+ if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
+ u64 val;
+
clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
+ if (!rdmsrl_amd_safe(0xc001100d, &val)) {
+ val &= ~(1ULL << 32);
+ wrmsrl_amd_safe(0xc001100d, val);
+ }
+ }
+
}
if (c->x86 == 0x10 || c->x86 == 0x11)
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+
+ /* get apicid instead of initial apic id from cpuid */
+ c->apicid = hard_smp_processor_id();
#else
/*
@@ -494,27 +570,30 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
* benefit in doing so.
*/
if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
- printk(KERN_DEBUG "tseg: %010llx\n", tseg);
- if ((tseg>>PMD_SHIFT) <
+ printk(KERN_DEBUG "tseg: %010llx\n", tseg);
+ if ((tseg>>PMD_SHIFT) <
(max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
- ((tseg>>PMD_SHIFT) <
+ ((tseg>>PMD_SHIFT) <
(max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
- (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
- set_memory_4k((unsigned long)__va(tseg), 1);
+ (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
+ set_memory_4k((unsigned long)__va(tseg), 1);
}
}
#endif
}
#ifdef CONFIG_X86_32
-static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
+static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
+ unsigned int size)
{
/* AMD errata T13 (order #21922) */
if ((c->x86 == 6)) {
- if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */
+ /* Duron Rev A0 */
+ if (c->x86_model == 3 && c->x86_mask == 0)
size = 64;
+ /* Tbird rev A1/A2 */
if (c->x86_model == 4 &&
- (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */
+ (c->x86_mask == 0 || c->x86_mask == 1))
size = 256;
}
return size;
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c8e315f1aa83..01a265212395 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -81,7 +81,7 @@ static void __init check_fpu(void)
boot_cpu_data.fdiv_bug = fdiv_bug;
if (boot_cpu_data.fdiv_bug)
- printk("Hmm, FPU with FDIV bug.\n");
+ printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n");
}
static void __init check_hlt(void)
@@ -98,7 +98,7 @@ static void __init check_hlt(void)
halt();
halt();
halt();
- printk("OK.\n");
+ printk(KERN_CONT "OK.\n");
}
/*
@@ -122,9 +122,9 @@ static void __init check_popad(void)
* CPU hard. Too bad.
*/
if (res != 12345678)
- printk("Buggy.\n");
+ printk(KERN_CONT "Buggy.\n");
else
- printk("OK.\n");
+ printk(KERN_CONT "OK.\n");
#endif
}
@@ -156,7 +156,7 @@ void __init check_bugs(void)
{
identify_boot_cpu();
#ifndef CONFIG_SMP
- printk("CPU: ");
+ printk(KERN_INFO "CPU: ");
print_cpu_info(&boot_cpu_data);
#endif
check_config();
diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c
index 9a3ed0649d4e..04f0fe5af83e 100644
--- a/arch/x86/kernel/cpu/bugs_64.c
+++ b/arch/x86/kernel/cpu/bugs_64.c
@@ -15,7 +15,7 @@ void __init check_bugs(void)
{
identify_boot_cpu();
#if !defined(CONFIG_SMP)
- printk("CPU: ");
+ printk(KERN_INFO "CPU: ");
print_cpu_info(&boot_cpu_data);
#endif
alternative_instructions();
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 5ce60a88027b..55a6abe40394 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -18,8 +18,8 @@
#include <asm/hypervisor.h>
#include <asm/processor.h>
#include <asm/sections.h>
-#include <asm/topology.h>
-#include <asm/cpumask.h>
+#include <linux/topology.h>
+#include <linux/cpumask.h>
#include <asm/pgtable.h>
#include <asm/atomic.h>
#include <asm/proto.h>
@@ -28,13 +28,13 @@
#include <asm/desc.h>
#include <asm/i387.h>
#include <asm/mtrr.h>
-#include <asm/numa.h>
+#include <linux/numa.h>
#include <asm/asm.h>
#include <asm/cpu.h>
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/pat.h>
-#include <asm/smp.h>
+#include <linux/smp.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/uv/uv.h>
@@ -94,45 +94,45 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
* TLS descriptors are currently at a different place compared to i386.
* Hopefully nobody expects them at a fixed place (Wine?)
*/
- [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
- [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
- [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
- [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
- [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
- [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
+ [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
+ [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
+ [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
+ [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
#else
- [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
- [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
- [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
- [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
+ [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+ [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
+ [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
/*
* Segments used for calling PnP BIOS have byte granularity.
* They code segments and data segments have fixed 64k limits,
* the transfer segment sizes are set at run time.
*/
/* 32-bit code */
- [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
+ [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
/* 16-bit code */
- [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
+ [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
/* 16-bit data */
- [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
+ [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
/* 16-bit data */
- [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
+ [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
/* 16-bit data */
- [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
+ [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
/*
* The APM segments have byte granularity and their bases
* are set at run time. All have 64k limits.
*/
/* 32-bit code */
- [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
+ [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
/* 16-bit code */
- [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
+ [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
/* data */
- [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
+ [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
- [GDT_ENTRY_ESPFIX_SS] = { { { 0x0000ffff, 0x00cf9200 } } },
- [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
+ [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+ [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
GDT_STACK_CANARY_INIT
#endif
} };
@@ -982,7 +982,7 @@ static __init int setup_disablecpuid(char *arg)
__setup("clearcpuid=", setup_disablecpuid);
#ifdef CONFIG_X86_64
-struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
+struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE);
@@ -1043,7 +1043,7 @@ DEFINE_PER_CPU(struct orig_ist, orig_ist);
#else /* CONFIG_X86_64 */
#ifdef CONFIG_CC_STACKPROTECTOR
-DEFINE_PER_CPU(unsigned long, stack_canary);
+DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
#endif
/* Make sure %fs and %gs are initialized properly in idle threads */
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 593171e967ef..19807b89f058 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -3,10 +3,10 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <asm/dma.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/processor-cyrix.h>
#include <asm/processor-flags.h>
-#include <asm/timer.h>
+#include <linux/timer.h>
#include <asm/pci-direct.h>
#include <asm/tsc.h>
@@ -282,7 +282,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
* The 5510/5520 companion chips have a funky PIT.
*/
if (vendor == PCI_VENDOR_ID_CYRIX &&
- (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520))
+ (device == PCI_DEVICE_ID_CYRIX_5510 ||
+ device == PCI_DEVICE_ID_CYRIX_5520))
mark_tsc_unstable("cyrix 5510/5520 detected");
}
#endif
@@ -299,7 +300,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
* ? : 0x7x
* GX1 : 0x8x GX1 datasheet 56
*/
- if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f))
+ if ((0x30 <= dir1 && dir1 <= 0x6f) ||
+ (0x80 <= dir1 && dir1 <= 0x8f))
geode_configure();
return;
} else { /* MediaGX */
@@ -427,9 +429,12 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
local_irq_save(flags);
ccr3 = getCx86(CX86_CCR3);
- setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
- setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */
- setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
+ /* enable MAPEN */
+ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
+ /* enable cpuid */
+ setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80);
+ /* disable MAPEN */
+ setCx86(CX86_CCR3, ccr3);
local_irq_restore(flags);
}
}
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index fb5b86af0b01..93ba8eeb100a 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -28,11 +28,10 @@
static inline void __cpuinit
detect_hypervisor_vendor(struct cpuinfo_x86 *c)
{
- if (vmware_platform()) {
+ if (vmware_platform())
c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE;
- } else {
+ else
c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE;
- }
}
unsigned long get_hypervisor_tsc_freq(void)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3260ab044996..80a722a071b5 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -7,17 +7,17 @@
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/module.h>
+#include <linux/uaccess.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/msr.h>
-#include <asm/uaccess.h>
#include <asm/ds.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
#ifdef CONFIG_X86_64
-#include <asm/topology.h>
+#include <linux/topology.h>
#include <asm/numa_64.h>
#endif
@@ -174,7 +174,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_F00F_BUG
/*
* All current models of Pentium and Pentium with MMX technology CPUs
- * have the F0 0F bug, which lets nonprivileged users lock up the system.
+ * have the F0 0F bug, which lets nonprivileged users lock up the
+ * system.
* Note that the workaround only should be initialized once...
*/
c->f00f_bug = 0;
@@ -207,7 +208,7 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
- wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
+ wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
}
}
@@ -283,7 +284,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
/* Intel has a non-standard dependency on %ecx for this CPUID level. */
cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
if (eax & 0x1f)
- return ((eax >> 26) + 1);
+ return (eax >> 26) + 1;
else
return 1;
}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 789efe217e1a..804c40e2bc3e 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -3,7 +3,7 @@
*
* Changes:
* Venkatesh Pallipadi : Adding cache identification through cpuid(4)
- * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
+ * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
* Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
*/
@@ -16,7 +16,7 @@
#include <linux/pci.h>
#include <asm/processor.h>
-#include <asm/smp.h>
+#include <linux/smp.h>
#include <asm/k8.h>
#define LVL_1_INST 1
@@ -25,14 +25,15 @@
#define LVL_3 4
#define LVL_TRACE 5
-struct _cache_table
-{
+struct _cache_table {
unsigned char descriptor;
char cache_type;
short size;
};
-/* all the cache descriptor types we care about (no TLB or trace cache entries) */
+/* All the cache descriptor types we care about (no TLB or
+ trace cache entries) */
+
static const struct _cache_table __cpuinitconst cache_table[] =
{
{ 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
@@ -105,8 +106,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
};
-enum _cache_type
-{
+enum _cache_type {
CACHE_TYPE_NULL = 0,
CACHE_TYPE_DATA = 1,
CACHE_TYPE_INST = 2,
@@ -170,31 +170,31 @@ unsigned short num_cache_leaves;
Maybe later */
union l1_cache {
struct {
- unsigned line_size : 8;
- unsigned lines_per_tag : 8;
- unsigned assoc : 8;
- unsigned size_in_kb : 8;
+ unsigned line_size:8;
+ unsigned lines_per_tag:8;
+ unsigned assoc:8;
+ unsigned size_in_kb:8;
};
unsigned val;
};
union l2_cache {
struct {
- unsigned line_size : 8;
- unsigned lines_per_tag : 4;
- unsigned assoc : 4;
- unsigned size_in_kb : 16;
+ unsigned line_size:8;
+ unsigned lines_per_tag:4;
+ unsigned assoc:4;
+ unsigned size_in_kb:16;
};
unsigned val;
};
union l3_cache {
struct {
- unsigned line_size : 8;
- unsigned lines_per_tag : 4;
- unsigned assoc : 4;
- unsigned res : 2;
- unsigned size_encoded : 14;
+ unsigned line_size:8;
+ unsigned lines_per_tag:4;
+ unsigned assoc:4;
+ unsigned res:2;
+ unsigned size_encoded:14;
};
unsigned val;
};
@@ -241,7 +241,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
case 0:
if (!l1->val)
return;
- assoc = l1->assoc;
+ assoc = assocs[l1->assoc];
line_size = l1->line_size;
lines_per_tag = l1->lines_per_tag;
size_in_kb = l1->size_in_kb;
@@ -249,7 +249,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
case 2:
if (!l2.val)
return;
- assoc = l2.assoc;
+ assoc = assocs[l2.assoc];
line_size = l2.line_size;
lines_per_tag = l2.lines_per_tag;
/* cpu_data has errata corrections for K7 applied */
@@ -258,10 +258,14 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
case 3:
if (!l3.val)
return;
- assoc = l3.assoc;
+ assoc = assocs[l3.assoc];
line_size = l3.line_size;
lines_per_tag = l3.lines_per_tag;
size_in_kb = l3.size_encoded * 512;
+ if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
+ size_in_kb = size_in_kb >> 1;
+ assoc = assoc >> 1;
+ }
break;
default:
return;
@@ -270,18 +274,14 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
eax->split.is_self_initializing = 1;
eax->split.type = types[leaf];
eax->split.level = levels[leaf];
- if (leaf == 3)
- eax->split.num_threads_sharing =
- current_cpu_data.x86_max_cores - 1;
- else
- eax->split.num_threads_sharing = 0;
+ eax->split.num_threads_sharing = 0;
eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
- if (assoc == 0xf)
+ if (assoc == 0xffff)
eax->split.is_fully_associative = 1;
ebx->split.coherency_line_size = line_size - 1;
- ebx->split.ways_of_associativity = assocs[assoc] - 1;
+ ebx->split.ways_of_associativity = assoc - 1;
ebx->split.physical_line_partition = lines_per_tag - 1;
ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
(ebx->split.ways_of_associativity + 1) - 1;
@@ -350,7 +350,8 @@ static int __cpuinit find_num_cache_leaves(void)
unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
{
- unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
+ /* Cache sizes */
+ unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
@@ -377,8 +378,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
retval = cpuid4_cache_lookup_regs(i, &this_leaf);
if (retval >= 0) {
- switch(this_leaf.eax.split.level) {
- case 1:
+ switch (this_leaf.eax.split.level) {
+ case 1:
if (this_leaf.eax.split.type ==
CACHE_TYPE_DATA)
new_l1d = this_leaf.size/1024;
@@ -386,19 +387,20 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
CACHE_TYPE_INST)
new_l1i = this_leaf.size/1024;
break;
- case 2:
+ case 2:
new_l2 = this_leaf.size/1024;
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
index_msb = get_count_order(num_threads_sharing);
l2_id = c->apicid >> index_msb;
break;
- case 3:
+ case 3:
new_l3 = this_leaf.size/1024;
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
- index_msb = get_count_order(num_threads_sharing);
+ index_msb = get_count_order(
+ num_threads_sharing);
l3_id = c->apicid >> index_msb;
break;
- default:
+ default:
break;
}
}
@@ -421,22 +423,21 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
/* Number of times to iterate */
n = cpuid_eax(2) & 0xFF;
- for ( i = 0 ; i < n ; i++ ) {
+ for (i = 0 ; i < n ; i++) {
cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
/* If bit 31 is set, this is an unknown format */
- for ( j = 0 ; j < 3 ; j++ ) {
- if (regs[j] & (1 << 31)) regs[j] = 0;
- }
+ for (j = 0 ; j < 3 ; j++)
+ if (regs[j] & (1 << 31))
+ regs[j] = 0;
/* Byte 0 is level count, not a descriptor */
- for ( j = 1 ; j < 16 ; j++ ) {
+ for (j = 1 ; j < 16 ; j++) {
unsigned char des = dp[j];
unsigned char k = 0;
/* look up this descriptor in the table */
- while (cache_table[k].descriptor != 0)
- {
+ while (cache_table[k].descriptor != 0) {
if (cache_table[k].descriptor == des) {
if (only_trace && cache_table[k].cache_type != LVL_TRACE)
break;
@@ -488,14 +489,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
}
if (trace)
- printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
- else if ( l1i )
- printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
+ printk(KERN_INFO "CPU: Trace cache: %dK uops", trace);
+ else if (l1i)
+ printk(KERN_INFO "CPU: L1 I cache: %dK", l1i);
if (l1d)
- printk(", L1 D cache: %dK\n", l1d);
+ printk(KERN_CONT ", L1 D cache: %dK\n", l1d);
else
- printk("\n");
+ printk(KERN_CONT "\n");
if (l2)
printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
@@ -522,6 +523,18 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
int index_msb, i;
struct cpuinfo_x86 *c = &cpu_data(cpu);
+ if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
+ struct cpuinfo_x86 *d;
+ for_each_online_cpu(i) {
+ if (!per_cpu(cpuid4_info, i))
+ continue;
+ d = &cpu_data(i);
+ this_leaf = CPUID4_INFO_IDX(i, index);
+ cpumask_copy(to_cpumask(this_leaf->shared_cpu_map),
+ d->llc_shared_map);
+ }
+ return;
+ }
this_leaf = CPUID4_INFO_IDX(cpu, index);
num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
@@ -558,8 +571,13 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
}
}
#else
-static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
-static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
+static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
+{
+}
+
+static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
+{
+}
#endif
static void __cpuinit free_cache_attributes(unsigned int cpu)
@@ -645,7 +663,7 @@ static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
static ssize_t show_##file_name \
(struct _cpuid4_info *this_leaf, char *buf) \
{ \
- return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
+ return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
}
show_one_plus(level, eax.split.level, 0);
@@ -656,7 +674,7 @@ show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
{
- return sprintf (buf, "%luK\n", this_leaf->size / 1024);
+ return sprintf(buf, "%luK\n", this_leaf->size / 1024);
}
static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
@@ -669,7 +687,7 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
const struct cpumask *mask;
mask = to_cpumask(this_leaf->shared_cpu_map);
- n = type?
+ n = type ?
cpulist_scnprintf(buf, len-2, mask) :
cpumask_scnprintf(buf, len-2, mask);
buf[n++] = '\n';
@@ -800,7 +818,7 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
show_cache_disable_1, store_cache_disable_1);
-static struct attribute * default_attrs[] = {
+static struct attribute *default_attrs[] = {
&type.attr,
&level.attr,
&coherency_line_size.attr,
@@ -815,7 +833,7 @@ static struct attribute * default_attrs[] = {
NULL
};
-static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
+static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct _cache_attr *fattr = to_attr(attr);
struct _index_kobject *this_leaf = to_object(kobj);
@@ -828,8 +846,8 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
return ret;
}
-static ssize_t store(struct kobject * kobj, struct attribute * attr,
- const char * buf, size_t count)
+static ssize_t store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
{
struct _cache_attr *fattr = to_attr(attr);
struct _index_kobject *this_leaf = to_object(kobj);
@@ -883,7 +901,7 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
goto err_out;
per_cpu(index_kobject, cpu) = kzalloc(
- sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
+ sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
if (unlikely(per_cpu(index_kobject, cpu) == NULL))
goto err_out;
@@ -917,7 +935,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
}
for (i = 0; i < num_cache_leaves; i++) {
- this_object = INDEX_KOBJECT_PTR(cpu,i);
+ this_object = INDEX_KOBJECT_PTR(cpu, i);
this_object->cpu = cpu;
this_object->index = i;
retval = kobject_init_and_add(&(this_object->kobj),
@@ -925,9 +943,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
per_cpu(cache_kobject, cpu),
"index%1lu", i);
if (unlikely(retval)) {
- for (j = 0; j < i; j++) {
- kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
- }
+ for (j = 0; j < i; j++)
+ kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
kobject_put(per_cpu(cache_kobject, cpu));
cpuid4_cache_sysfs_exit(cpu);
return retval;
@@ -952,7 +969,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
for (i = 0; i < num_cache_leaves; i++)
- kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
+ kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
kobject_put(per_cpu(cache_kobject, cpu));
cpuid4_cache_sysfs_exit(cpu);
}
@@ -977,8 +994,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
-{
+static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
.notifier_call = cacheinfo_cpu_callback,
};
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index ddae21620bda..1fecba404fd8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -489,12 +489,14 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
int i, err = 0;
struct threshold_bank *b = NULL;
char name[32];
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
sprintf(name, "threshold_bank%i", bank);
#ifdef CONFIG_SMP
if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
- i = cpumask_first(cpu_core_mask(cpu));
+ i = cpumask_first(c->llc_shared_map);
/* first core not up yet */
if (cpu_data(i).cpu_core_id)
@@ -514,7 +516,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err)
goto out;
- cpumask_copy(b->cpus, cpu_core_mask(cpu));
+ cpumask_copy(b->cpus, c->llc_shared_map);
per_cpu(threshold_banks, cpu)[bank] = b;
goto out;
@@ -539,7 +541,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
#ifndef CONFIG_SMP
cpumask_setall(b->cpus);
#else
- cpumask_copy(b->cpus, cpu_core_mask(cpu));
+ cpumask_copy(b->cpus, c->llc_shared_map);
#endif
per_cpu(threshold_banks, cpu)[bank] = b;
diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
index ee2331b0e58f..33af14110dfd 100644
--- a/arch/x86/kernel/cpu/mtrr/amd.c
+++ b/arch/x86/kernel/cpu/mtrr/amd.c
@@ -7,15 +7,15 @@
static void
amd_get_mtrr(unsigned int reg, unsigned long *base,
- unsigned long *size, mtrr_type * type)
+ unsigned long *size, mtrr_type *type)
{
unsigned long low, high;
rdmsr(MSR_K6_UWCCR, low, high);
- /* Upper dword is region 1, lower is region 0 */
+ /* Upper dword is region 1, lower is region 0 */
if (reg == 1)
low = high;
- /* The base masks off on the right alignment */
+ /* The base masks off on the right alignment */
*base = (low & 0xFFFE0000) >> PAGE_SHIFT;
*type = 0;
if (low & 1)
@@ -27,74 +27,81 @@ amd_get_mtrr(unsigned int reg, unsigned long *base,
return;
}
/*
- * This needs a little explaining. The size is stored as an
- * inverted mask of bits of 128K granularity 15 bits long offset
- * 2 bits
+ * This needs a little explaining. The size is stored as an
+ * inverted mask of bits of 128K granularity 15 bits long offset
+ * 2 bits.
*
- * So to get a size we do invert the mask and add 1 to the lowest
- * mask bit (4 as its 2 bits in). This gives us a size we then shift
- * to turn into 128K blocks
+ * So to get a size we do invert the mask and add 1 to the lowest
+ * mask bit (4 as its 2 bits in). This gives us a size we then shift
+ * to turn into 128K blocks.
*
- * eg 111 1111 1111 1100 is 512K
+ * eg 111 1111 1111 1100 is 512K
*
- * invert 000 0000 0000 0011
- * +1 000 0000 0000 0100
- * *128K ...
+ * invert 000 0000 0000 0011
+ * +1 000 0000 0000 0100
+ * *128K ...
*/
low = (~low) & 0x1FFFC;
*size = (low + 4) << (15 - PAGE_SHIFT);
- return;
}
-static void amd_set_mtrr(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type)
-/* [SUMMARY] Set variable MTRR register on the local CPU.
- <reg> The register to set.
- <base> The base address of the region.
- <size> The size of the region. If this is 0 the region is disabled.
- <type> The type of the region.
- [RETURNS] Nothing.
-*/
+/**
+ * amd_set_mtrr - Set variable MTRR register on the local CPU.
+ *
+ * @reg The register to set.
+ * @base The base address of the region.
+ * @size The size of the region. If this is 0 the region is disabled.
+ * @type The type of the region.
+ *
+ * Returns nothing.
+ */
+static void
+amd_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
{
u32 regs[2];
/*
- * Low is MTRR0 , High MTRR 1
+ * Low is MTRR0, High MTRR 1
*/
rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
/*
- * Blank to disable
+ * Blank to disable
*/
- if (size == 0)
+ if (size == 0) {
regs[reg] = 0;
- else
- /* Set the register to the base, the type (off by one) and an
- inverted bitmask of the size The size is the only odd
- bit. We are fed say 512K We invert this and we get 111 1111
- 1111 1011 but if you subtract one and invert you get the
- desired 111 1111 1111 1100 mask
-
- But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */
+ } else {
+ /*
+ * Set the register to the base, the type (off by one) and an
+ * inverted bitmask of the size The size is the only odd
+ * bit. We are fed say 512K We invert this and we get 111 1111
+ * 1111 1011 but if you subtract one and invert you get the
+ * desired 111 1111 1111 1100 mask
+ *
+ * But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!
+ */
regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
| (base << PAGE_SHIFT) | (type + 1);
+ }
/*
- * The writeback rule is quite specific. See the manual. Its
- * disable local interrupts, write back the cache, set the mtrr
+ * The writeback rule is quite specific. See the manual. Its
+ * disable local interrupts, write back the cache, set the mtrr
*/
wbinvd();
wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
}
-static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
+static int
+amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
{
- /* Apply the K6 block alignment and size rules
- In order
- o Uncached or gathering only
- o 128K or bigger block
- o Power of 2 block
- o base suitably aligned to the power
- */
+ /*
+ * Apply the K6 block alignment and size rules
+ * In order
+ * o Uncached or gathering only
+ * o 128K or bigger block
+ * o Power of 2 block
+ * o base suitably aligned to the power
+ */
if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
|| (size & ~(size - 1)) - size || (base & (size - 1)))
return -EINVAL;
@@ -115,5 +122,3 @@ int __init amd_init_mtrr(void)
set_mtrr_ops(&amd_mtrr_ops);
return 0;
}
-
-//arch_initcall(amd_mtrr_init);
diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
index cb9aa3a7a7ab..de89f14eff3a 100644
--- a/arch/x86/kernel/cpu/mtrr/centaur.c
+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
@@ -1,7 +1,9 @@
#include <linux/init.h>
#include <linux/mm.h>
+
#include <asm/mtrr.h>
#include <asm/msr.h>
+
#include "mtrr.h"
static struct {
@@ -12,25 +14,25 @@ static struct {
static u8 centaur_mcr_reserved;
static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */
-/*
- * Report boot time MCR setups
+/**
+ * centaur_get_free_region - Get a free MTRR.
+ *
+ * @base: The starting (base) address of the region.
+ * @size: The size (in bytes) of the region.
+ *
+ * Returns: the index of the region on success, else -1 on error.
*/
-
static int
centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg)
-/* [SUMMARY] Get a free MTRR.
- <base> The starting (base) address of the region.
- <size> The size (in bytes) of the region.
- [RETURNS] The index of the region on success, else -1 on error.
-*/
{
- int i, max;
- mtrr_type ltype;
unsigned long lbase, lsize;
+ mtrr_type ltype;
+ int i, max;
max = num_var_ranges;
if (replace_reg >= 0 && replace_reg < max)
return replace_reg;
+
for (i = 0; i < max; ++i) {
if (centaur_mcr_reserved & (1 << i))
continue;
@@ -38,11 +40,14 @@ centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg)
if (lsize == 0)
return i;
}
+
return -ENOSPC;
}
-void
-mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
+/*
+ * Report boot time MCR setups
+ */
+void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
{
centaur_mcr[mcr].low = lo;
centaur_mcr[mcr].high = hi;
@@ -54,33 +59,35 @@ centaur_get_mcr(unsigned int reg, unsigned long *base,
{
*base = centaur_mcr[reg].high >> PAGE_SHIFT;
*size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
- *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */
+ *type = MTRR_TYPE_WRCOMB; /* write-combining */
+
if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2))
*type = MTRR_TYPE_UNCACHABLE;
if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25)
*type = MTRR_TYPE_WRBACK;
if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31)
*type = MTRR_TYPE_WRBACK;
-
}
-static void centaur_set_mcr(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type)
+static void
+centaur_set_mcr(unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type)
{
unsigned long low, high;
if (size == 0) {
- /* Disable */
+ /* Disable */
high = low = 0;
} else {
high = base << PAGE_SHIFT;
- if (centaur_mcr_type == 0)
- low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */
- else {
+ if (centaur_mcr_type == 0) {
+ /* Only support write-combining... */
+ low = -size << PAGE_SHIFT | 0x1f;
+ } else {
if (type == MTRR_TYPE_UNCACHABLE)
- low = -size << PAGE_SHIFT | 0x02; /* NC */
+ low = -size << PAGE_SHIFT | 0x02; /* NC */
else
- low = -size << PAGE_SHIFT | 0x09; /* WWO,WC */
+ low = -size << PAGE_SHIFT | 0x09; /* WWO, WC */
}
}
centaur_mcr[reg].high = high;
@@ -88,118 +95,16 @@ static void centaur_set_mcr(unsigned int reg, unsigned long base,
wrmsr(MSR_IDT_MCR0 + reg, low, high);
}
-#if 0
-/*
- * Initialise the later (saner) Winchip MCR variant. In this version
- * the BIOS can pass us the registers it has used (but not their values)
- * and the control register is read/write
- */
-
-static void __init
-centaur_mcr1_init(void)
-{
- unsigned i;
- u32 lo, hi;
-
- /* Unfortunately, MCR's are read-only, so there is no way to
- * find out what the bios might have done.
- */
-
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- if (((lo >> 17) & 7) == 1) { /* Type 1 Winchip2 MCR */
- lo &= ~0x1C0; /* clear key */
- lo |= 0x040; /* set key to 1 */
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi); /* unlock MCR */
- }
-
- centaur_mcr_type = 1;
-
- /*
- * Clear any unconfigured MCR's.
- */
-
- for (i = 0; i < 8; ++i) {
- if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) {
- if (!(lo & (1 << (9 + i))))
- wrmsr(MSR_IDT_MCR0 + i, 0, 0);
- else
- /*
- * If the BIOS set up an MCR we cannot see it
- * but we don't wish to obliterate it
- */
- centaur_mcr_reserved |= (1 << i);
- }
- }
- /*
- * Throw the main write-combining switch...
- * However if OOSTORE is enabled then people have already done far
- * cleverer things and we should behave.
- */
-
- lo |= 15; /* Write combine enables */
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-}
-
-/*
- * Initialise the original winchip with read only MCR registers
- * no used bitmask for the BIOS to pass on and write only control
- */
-
-static void __init
-centaur_mcr0_init(void)
-{
- unsigned i;
-
- /* Unfortunately, MCR's are read-only, so there is no way to
- * find out what the bios might have done.
- */
-
- /* Clear any unconfigured MCR's.
- * This way we are sure that the centaur_mcr array contains the actual
- * values. The disadvantage is that any BIOS tweaks are thus undone.
- *
- */
- for (i = 0; i < 8; ++i) {
- if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0)
- wrmsr(MSR_IDT_MCR0 + i, 0, 0);
- }
-
- wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); /* Write only */
-}
-
-/*
- * Initialise Winchip series MCR registers
- */
-
-static void __init
-centaur_mcr_init(void)
-{
- struct set_mtrr_context ctxt;
-
- set_mtrr_prepare_save(&ctxt);
- set_mtrr_cache_disable(&ctxt);
-
- if (boot_cpu_data.x86_model == 4)
- centaur_mcr0_init();
- else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9)
- centaur_mcr1_init();
-
- set_mtrr_done(&ctxt);
-}
-#endif
-
-static int centaur_validate_add_page(unsigned long base,
- unsigned long size, unsigned int type)
+static int
+centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
{
/*
- * FIXME: Winchip2 supports uncached
+ * FIXME: Winchip2 supports uncached
*/
- if (type != MTRR_TYPE_WRCOMB &&
+ if (type != MTRR_TYPE_WRCOMB &&
(centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) {
- printk(KERN_WARNING
- "mtrr: only write-combining%s supported\n",
- centaur_mcr_type ? " and uncacheable are"
- : " is");
+ pr_warning("mtrr: only write-combining%s supported\n",
+ centaur_mcr_type ? " and uncacheable are" : " is");
return -EINVAL;
}
return 0;
@@ -207,7 +112,6 @@ static int centaur_validate_add_page(unsigned long base,
static struct mtrr_ops centaur_mtrr_ops = {
.vendor = X86_VENDOR_CENTAUR,
-// .init = centaur_mcr_init,
.set = centaur_set_mcr,
.get = centaur_get_mcr,
.get_free_region = centaur_get_free_region,
@@ -220,5 +124,3 @@ int __init centaur_init_mtrr(void)
set_mtrr_ops(&centaur_mtrr_ops);
return 0;
}
-
-//arch_initcall(centaur_init_mtrr);
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 1d584a18a50d..315738c74aad 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -1,51 +1,75 @@
-/* MTRR (Memory Type Range Register) cleanup
-
- Copyright (C) 2009 Yinghai Lu
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
-
- You should have received a copy of the GNU Library General Public
- License along with this library; if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
+/*
+ * MTRR (Memory Type Range Register) cleanup
+ *
+ * Copyright (C) 2009 Yinghai Lu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/cpu.h>
-#include <linux/mutex.h>
#include <linux/sort.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/kvm_para.h>
+#include <asm/processor.h>
#include <asm/e820.h>
#include <asm/mtrr.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
#include <asm/msr.h>
-#include <asm/kvm_para.h>
-#include "mtrr.h"
-/* should be related to MTRR_VAR_RANGES nums */
-#define RANGE_NUM 256
+#include "mtrr.h"
struct res_range {
- unsigned long start;
- unsigned long end;
+ unsigned long start;
+ unsigned long end;
+};
+
+struct var_mtrr_range_state {
+ unsigned long base_pfn;
+ unsigned long size_pfn;
+ mtrr_type type;
+};
+
+struct var_mtrr_state {
+ unsigned long range_startk;
+ unsigned long range_sizek;
+ unsigned long chunk_sizek;
+ unsigned long gran_sizek;
+ unsigned int reg;
};
+/* Should be related to MTRR_VAR_RANGES nums */
+#define RANGE_NUM 256
+
+static struct res_range __initdata range[RANGE_NUM];
+static int __initdata nr_range;
+
+static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
+
+static int __initdata debug_print;
+#define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0)
+
+
static int __init
-add_range(struct res_range *range, int nr_range, unsigned long start,
- unsigned long end)
+add_range(struct res_range *range, int nr_range,
+ unsigned long start, unsigned long end)
{
- /* out of slots */
+ /* Out of slots: */
if (nr_range >= RANGE_NUM)
return nr_range;
@@ -58,12 +82,12 @@ add_range(struct res_range *range, int nr_range, unsigned long start,
}
static int __init
-add_range_with_merge(struct res_range *range, int nr_range, unsigned long start,
- unsigned long end)
+add_range_with_merge(struct res_range *range, int nr_range,
+ unsigned long start, unsigned long end)
{
int i;
- /* try to merge it with old one */
+ /* Try to merge it with old one: */
for (i = 0; i < nr_range; i++) {
unsigned long final_start, final_end;
unsigned long common_start, common_end;
@@ -84,7 +108,7 @@ add_range_with_merge(struct res_range *range, int nr_range, unsigned long start,
return nr_range;
}
- /* need to add that */
+ /* Need to add it: */
return add_range(range, nr_range, start, end);
}
@@ -117,7 +141,7 @@ subtract_range(struct res_range *range, unsigned long start, unsigned long end)
}
if (start > range[j].start && end < range[j].end) {
- /* find the new spare */
+ /* Find the new spare: */
for (i = 0; i < RANGE_NUM; i++) {
if (range[i].end == 0)
break;
@@ -146,14 +170,8 @@ static int __init cmp_range(const void *x1, const void *x2)
return start1 - start2;
}
-struct var_mtrr_range_state {
- unsigned long base_pfn;
- unsigned long size_pfn;
- mtrr_type type;
-};
-
-static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
-static int __initdata debug_print;
+#define BIOS_BUG_MSG KERN_WARNING \
+ "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
static int __init
x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
@@ -180,7 +198,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
range[i].start, range[i].end + 1);
}
- /* take out UC ranges */
+ /* Take out UC ranges: */
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
if (type != MTRR_TYPE_UNCACHABLE &&
@@ -193,9 +211,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed &&
(mtrr_state.enabled & 1)) {
/* Var MTRR contains UC entry below 1M? Skip it: */
- printk(KERN_WARNING "WARNING: BIOS bug: VAR MTRR %d "
- "contains strange UC entry under 1M, check "
- "with your system vendor!\n", i);
+ printk(BIOS_BUG_MSG, i);
if (base + size <= (1<<(20-PAGE_SHIFT)))
continue;
size -= (1<<(20-PAGE_SHIFT)) - base;
@@ -237,17 +253,13 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
return nr_range;
}
-static struct res_range __initdata range[RANGE_NUM];
-static int __initdata nr_range;
-
#ifdef CONFIG_MTRR_SANITIZER
static unsigned long __init sum_ranges(struct res_range *range, int nr_range)
{
- unsigned long sum;
+ unsigned long sum = 0;
int i;
- sum = 0;
for (i = 0; i < nr_range; i++)
sum += range[i].end + 1 - range[i].start;
@@ -278,17 +290,9 @@ static int __init mtrr_cleanup_debug_setup(char *str)
}
early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
-struct var_mtrr_state {
- unsigned long range_startk;
- unsigned long range_sizek;
- unsigned long chunk_sizek;
- unsigned long gran_sizek;
- unsigned int reg;
-};
-
static void __init
set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
- unsigned char type, unsigned int address_bits)
+ unsigned char type, unsigned int address_bits)
{
u32 base_lo, base_hi, mask_lo, mask_hi;
u64 base, mask;
@@ -301,7 +305,7 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
mask = (1ULL << address_bits) - 1;
mask &= ~((((u64)sizek) << 10) - 1);
- base = ((u64)basek) << 10;
+ base = ((u64)basek) << 10;
base |= type;
mask |= 0x800;
@@ -317,15 +321,14 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
static void __init
save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
- unsigned char type)
+ unsigned char type)
{
range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10);
range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10);
range_state[reg].type = type;
}
-static void __init
-set_var_mtrr_all(unsigned int address_bits)
+static void __init set_var_mtrr_all(unsigned int address_bits)
{
unsigned long basek, sizek;
unsigned char type;
@@ -342,11 +345,11 @@ set_var_mtrr_all(unsigned int address_bits)
static unsigned long to_size_factor(unsigned long sizek, char *factorp)
{
- char factor;
unsigned long base = sizek;
+ char factor;
if (base & ((1<<10) - 1)) {
- /* not MB alignment */
+ /* Not MB-aligned: */
factor = 'K';
} else if (base & ((1<<20) - 1)) {
factor = 'M';
@@ -372,11 +375,12 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
unsigned long max_align, align;
unsigned long sizek;
- /* Compute the maximum size I can make a range */
+ /* Compute the maximum size with which we can make a range: */
if (range_startk)
max_align = ffs(range_startk) - 1;
else
max_align = 32;
+
align = fls(range_sizek) - 1;
if (align > max_align)
align = max_align;
@@ -386,11 +390,10 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
char start_factor = 'K', size_factor = 'K';
unsigned long start_base, size_base;
- start_base = to_size_factor(range_startk,
- &start_factor),
- size_base = to_size_factor(sizek, &size_factor),
+ start_base = to_size_factor(range_startk, &start_factor);
+ size_base = to_size_factor(sizek, &size_factor);
- printk(KERN_DEBUG "Setting variable MTRR %d, "
+ Dprintk("Setting variable MTRR %d, "
"base: %ld%cB, range: %ld%cB, type %s\n",
reg, start_base, start_factor,
size_base, size_factor,
@@ -425,10 +428,11 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
chunk_sizek = state->chunk_sizek;
gran_sizek = state->gran_sizek;
- /* align with gran size, prevent small block used up MTRRs */
+ /* Align with gran size, prevent small block used up MTRRs: */
range_basek = ALIGN(state->range_startk, gran_sizek);
if ((range_basek > basek) && basek)
return second_sizek;
+
state->range_sizek -= (range_basek - state->range_startk);
range_sizek = ALIGN(state->range_sizek, gran_sizek);
@@ -439,22 +443,21 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
}
state->range_sizek = range_sizek;
- /* try to append some small hole */
+ /* Try to append some small hole: */
range0_basek = state->range_startk;
range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
- /* no increase */
+ /* No increase: */
if (range0_sizek == state->range_sizek) {
- if (debug_print)
- printk(KERN_DEBUG "rangeX: %016lx - %016lx\n",
- range0_basek<<10,
- (range0_basek + state->range_sizek)<<10);
+ Dprintk("rangeX: %016lx - %016lx\n",
+ range0_basek<<10,
+ (range0_basek + state->range_sizek)<<10);
state->reg = range_to_mtrr(state->reg, range0_basek,
state->range_sizek, MTRR_TYPE_WRBACK);
return 0;
}
- /* only cut back, when it is not the last */
+ /* Only cut back when it is not the last: */
if (sizek) {
while (range0_basek + range0_sizek > (basek + sizek)) {
if (range0_sizek >= chunk_sizek)
@@ -470,16 +473,16 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
second_try:
range_basek = range0_basek + range0_sizek;
- /* one hole in the middle */
+ /* One hole in the middle: */
if (range_basek > basek && range_basek <= (basek + sizek))
second_sizek = range_basek - basek;
if (range0_sizek > state->range_sizek) {
- /* one hole in middle or at end */
+ /* One hole in middle or at the end: */
hole_sizek = range0_sizek - state->range_sizek - second_sizek;
- /* hole size should be less than half of range0 size */
+ /* Hole size should be less than half of range0 size: */
if (hole_sizek >= (range0_sizek >> 1) &&
range0_sizek >= chunk_sizek) {
range0_sizek -= chunk_sizek;
@@ -491,32 +494,30 @@ second_try:
}
if (range0_sizek) {
- if (debug_print)
- printk(KERN_DEBUG "range0: %016lx - %016lx\n",
- range0_basek<<10,
- (range0_basek + range0_sizek)<<10);
+ Dprintk("range0: %016lx - %016lx\n",
+ range0_basek<<10,
+ (range0_basek + range0_sizek)<<10);
state->reg = range_to_mtrr(state->reg, range0_basek,
range0_sizek, MTRR_TYPE_WRBACK);
}
if (range0_sizek < state->range_sizek) {
- /* need to handle left over */
+ /* Need to handle left over range: */
range_sizek = state->range_sizek - range0_sizek;
- if (debug_print)
- printk(KERN_DEBUG "range: %016lx - %016lx\n",
- range_basek<<10,
- (range_basek + range_sizek)<<10);
+ Dprintk("range: %016lx - %016lx\n",
+ range_basek<<10,
+ (range_basek + range_sizek)<<10);
+
state->reg = range_to_mtrr(state->reg, range_basek,
range_sizek, MTRR_TYPE_WRBACK);
}
if (hole_sizek) {
hole_basek = range_basek - hole_sizek - second_sizek;
- if (debug_print)
- printk(KERN_DEBUG "hole: %016lx - %016lx\n",
- hole_basek<<10,
- (hole_basek + hole_sizek)<<10);
+ Dprintk("hole: %016lx - %016lx\n",
+ hole_basek<<10,
+ (hole_basek + hole_sizek)<<10);
state->reg = range_to_mtrr(state->reg, hole_basek,
hole_sizek, MTRR_TYPE_UNCACHABLE);
}
@@ -537,23 +538,23 @@ set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
basek = base_pfn << (PAGE_SHIFT - 10);
sizek = size_pfn << (PAGE_SHIFT - 10);
- /* See if I can merge with the last range */
+ /* See if I can merge with the last range: */
if ((basek <= 1024) ||
(state->range_startk + state->range_sizek == basek)) {
unsigned long endk = basek + sizek;
state->range_sizek = endk - state->range_startk;
return;
}
- /* Write the range mtrrs */
+ /* Write the range mtrrs: */
if (state->range_sizek != 0)
second_sizek = range_to_mtrr_with_hole(state, basek, sizek);
- /* Allocate an msr */
+ /* Allocate an msr: */
state->range_startk = basek + second_sizek;
state->range_sizek = sizek - second_sizek;
}
-/* mininum size of mtrr block that can take hole */
+/* Mininum size of mtrr block that can take hole: */
static u64 mtrr_chunk_size __initdata = (256ULL<<20);
static int __init parse_mtrr_chunk_size_opt(char *p)
@@ -565,7 +566,7 @@ static int __init parse_mtrr_chunk_size_opt(char *p)
}
early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt);
-/* granity of mtrr of block */
+/* Granularity of mtrr of block: */
static u64 mtrr_gran_size __initdata;
static int __init parse_mtrr_gran_size_opt(char *p)
@@ -577,7 +578,7 @@ static int __init parse_mtrr_gran_size_opt(char *p)
}
early_param("mtrr_gran_size", parse_mtrr_gran_size_opt);
-static int nr_mtrr_spare_reg __initdata =
+static unsigned long nr_mtrr_spare_reg __initdata =
CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT;
static int __init parse_mtrr_spare_reg(char *arg)
@@ -586,7 +587,6 @@ static int __init parse_mtrr_spare_reg(char *arg)
nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0);
return 0;
}
-
early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg);
static int __init
@@ -594,8 +594,8 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range,
u64 chunk_size, u64 gran_size)
{
struct var_mtrr_state var_state;
- int i;
int num_reg;
+ int i;
var_state.range_startk = 0;
var_state.range_sizek = 0;
@@ -605,17 +605,18 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range,
memset(range_state, 0, sizeof(range_state));
- /* Write the range etc */
- for (i = 0; i < nr_range; i++)
+ /* Write the range: */
+ for (i = 0; i < nr_range; i++) {
set_var_mtrr_range(&var_state, range[i].start,
range[i].end - range[i].start + 1);
+ }
- /* Write the last range */
+ /* Write the last range: */
if (var_state.range_sizek != 0)
range_to_mtrr_with_hole(&var_state, 0, 0);
num_reg = var_state.reg;
- /* Clear out the extra MTRR's */
+ /* Clear out the extra MTRR's: */
while (var_state.reg < num_var_ranges) {
save_var_mtrr(var_state.reg, 0, 0, 0);
var_state.reg++;
@@ -625,11 +626,11 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range,
}
struct mtrr_cleanup_result {
- unsigned long gran_sizek;
- unsigned long chunk_sizek;
- unsigned long lose_cover_sizek;
- unsigned int num_reg;
- int bad;
+ unsigned long gran_sizek;
+ unsigned long chunk_sizek;
+ unsigned long lose_cover_sizek;
+ unsigned int num_reg;
+ int bad;
};
/*
@@ -645,10 +646,10 @@ static unsigned long __initdata min_loss_pfn[RANGE_NUM];
static void __init print_out_mtrr_range_state(void)
{
- int i;
char start_factor = 'K', size_factor = 'K';
unsigned long start_base, size_base;
mtrr_type type;
+ int i;
for (i = 0; i < num_var_ranges; i++) {
@@ -676,10 +677,10 @@ static int __init mtrr_need_cleanup(void)
int i;
mtrr_type type;
unsigned long size;
- /* extra one for all 0 */
+ /* Extra one for all 0: */
int num[MTRR_NUM_TYPES + 1];
- /* check entries number */
+ /* Check entries number: */
memset(num, 0, sizeof(num));
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
@@ -693,88 +694,86 @@ static int __init mtrr_need_cleanup(void)
num[type]++;
}
- /* check if we got UC entries */
+ /* Check if we got UC entries: */
if (!num[MTRR_TYPE_UNCACHABLE])
return 0;
- /* check if we only had WB and UC */
+ /* Check if we only had WB and UC */
if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
- num_var_ranges - num[MTRR_NUM_TYPES])
+ num_var_ranges - num[MTRR_NUM_TYPES])
return 0;
return 1;
}
static unsigned long __initdata range_sums;
-static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
- unsigned long extra_remove_base,
- unsigned long extra_remove_size,
- int i)
+
+static void __init
+mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
+ unsigned long x_remove_base,
+ unsigned long x_remove_size, int i)
{
- int num_reg;
static struct res_range range_new[RANGE_NUM];
- static int nr_range_new;
unsigned long range_sums_new;
+ static int nr_range_new;
+ int num_reg;
- /* convert ranges to var ranges state */
- num_reg = x86_setup_var_mtrrs(range, nr_range,
- chunk_size, gran_size);
+ /* Convert ranges to var ranges state: */
+ num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
- /* we got new setting in range_state, check it */
+ /* We got new setting in range_state, check it: */
memset(range_new, 0, sizeof(range_new));
nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
- extra_remove_base, extra_remove_size);
+ x_remove_base, x_remove_size);
range_sums_new = sum_ranges(range_new, nr_range_new);
result[i].chunk_sizek = chunk_size >> 10;
result[i].gran_sizek = gran_size >> 10;
result[i].num_reg = num_reg;
+
if (range_sums < range_sums_new) {
- result[i].lose_cover_sizek =
- (range_sums_new - range_sums) << PSHIFT;
+ result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT;
result[i].bad = 1;
- } else
- result[i].lose_cover_sizek =
- (range_sums - range_sums_new) << PSHIFT;
+ } else {
+ result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT;
+ }
- /* double check it */
+ /* Double check it: */
if (!result[i].bad && !result[i].lose_cover_sizek) {
- if (nr_range_new != nr_range ||
- memcmp(range, range_new, sizeof(range)))
- result[i].bad = 1;
+ if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range)))
+ result[i].bad = 1;
}
- if (!result[i].bad && (range_sums - range_sums_new <
- min_loss_pfn[num_reg])) {
- min_loss_pfn[num_reg] =
- range_sums - range_sums_new;
- }
+ if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg]))
+ min_loss_pfn[num_reg] = range_sums - range_sums_new;
}
static void __init mtrr_print_out_one_result(int i)
{
- char gran_factor, chunk_factor, lose_factor;
unsigned long gran_base, chunk_base, lose_base;
+ char gran_factor, chunk_factor, lose_factor;
gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
- printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
- result[i].bad ? "*BAD*" : " ",
- gran_base, gran_factor, chunk_base, chunk_factor);
- printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
- result[i].num_reg, result[i].bad ? "-" : "",
- lose_base, lose_factor);
+
+ pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t",
+ result[i].bad ? "*BAD*" : " ",
+ gran_base, gran_factor, chunk_base, chunk_factor);
+ pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n",
+ result[i].num_reg, result[i].bad ? "-" : "",
+ lose_base, lose_factor);
}
static int __init mtrr_search_optimal_index(void)
{
- int i;
int num_reg_good;
int index_good;
+ int i;
if (nr_mtrr_spare_reg >= num_var_ranges)
nr_mtrr_spare_reg = num_var_ranges - 1;
+
num_reg_good = -1;
for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
if (!min_loss_pfn[i])
@@ -796,24 +795,24 @@ static int __init mtrr_search_optimal_index(void)
return index_good;
}
-
int __init mtrr_cleanup(unsigned address_bits)
{
- unsigned long extra_remove_base, extra_remove_size;
+ unsigned long x_remove_base, x_remove_size;
unsigned long base, size, def, dummy;
- mtrr_type type;
u64 chunk_size, gran_size;
+ mtrr_type type;
int index_good;
int i;
if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
return 0;
+
rdmsr(MSR_MTRRdefType, def, dummy);
def &= 0xff;
if (def != MTRR_TYPE_UNCACHABLE)
return 0;
- /* get it and store it aside */
+ /* Get it and store it aside: */
memset(range_state, 0, sizeof(range_state));
for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &base, &size, &type);
@@ -822,29 +821,28 @@ int __init mtrr_cleanup(unsigned address_bits)
range_state[i].type = type;
}
- /* check if we need handle it and can handle it */
+ /* Check if we need handle it and can handle it: */
if (!mtrr_need_cleanup())
return 0;
- /* print original var MTRRs at first, for debugging: */
+ /* Print original var MTRRs at first, for debugging: */
printk(KERN_DEBUG "original variable MTRRs\n");
print_out_mtrr_range_state();
memset(range, 0, sizeof(range));
- extra_remove_size = 0;
- extra_remove_base = 1 << (32 - PAGE_SHIFT);
+ x_remove_size = 0;
+ x_remove_base = 1 << (32 - PAGE_SHIFT);
if (mtrr_tom2)
- extra_remove_size =
- (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base;
- nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base,
- extra_remove_size);
+ x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
+
+ nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
/*
- * [0, 1M) should always be coverred by var mtrr with WB
- * and fixed mtrrs should take effective before var mtrr for it
+ * [0, 1M) should always be covered by var mtrr with WB
+ * and fixed mtrrs should take effect before var mtrr for it:
*/
nr_range = add_range_with_merge(range, nr_range, 0,
(1ULL<<(20 - PAGE_SHIFT)) - 1);
- /* sort the ranges */
+ /* Sort the ranges: */
sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
range_sums = sum_ranges(range, nr_range);
@@ -854,7 +852,7 @@ int __init mtrr_cleanup(unsigned address_bits)
if (mtrr_chunk_size && mtrr_gran_size) {
i = 0;
mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size,
- extra_remove_base, extra_remove_size, i);
+ x_remove_base, x_remove_size, i);
mtrr_print_out_one_result(i);
@@ -880,7 +878,7 @@ int __init mtrr_cleanup(unsigned address_bits)
continue;
mtrr_calc_range_state(chunk_size, gran_size,
- extra_remove_base, extra_remove_size, i);
+ x_remove_base, x_remove_size, i);
if (debug_print) {
mtrr_print_out_one_result(i);
printk(KERN_INFO "\n");
@@ -890,7 +888,7 @@ int __init mtrr_cleanup(unsigned address_bits)
}
}
- /* try to find the optimal index */
+ /* Try to find the optimal index: */
index_good = mtrr_search_optimal_index();
if (index_good != -1) {
@@ -898,7 +896,7 @@ int __init mtrr_cleanup(unsigned address_bits)
i = index_good;
mtrr_print_out_one_result(i);
- /* convert ranges to var ranges state */
+ /* Convert ranges to var ranges state: */
chunk_size = result[i].chunk_sizek;
chunk_size <<= 10;
gran_size = result[i].gran_sizek;
@@ -941,8 +939,8 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
* Note this won't check if the MTRRs < 4GB where the magic bit doesn't
* apply to are wrong, but so far we don't know of any such case in the wild.
*/
-#define Tom2Enabled (1U << 21)
-#define Tom2ForceMemTypeWB (1U << 22)
+#define Tom2Enabled (1U << 21)
+#define Tom2ForceMemTypeWB (1U << 22)
int __init amd_special_default_mtrr(void)
{
@@ -952,7 +950,7 @@ int __init amd_special_default_mtrr(void)
return 0;
if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
return 0;
- /* In case some hypervisor doesn't pass SYSCFG through */
+ /* In case some hypervisor doesn't pass SYSCFG through: */
if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
return 0;
/*
@@ -965,19 +963,21 @@ int __init amd_special_default_mtrr(void)
return 0;
}
-static u64 __init real_trim_memory(unsigned long start_pfn,
- unsigned long limit_pfn)
+static u64 __init
+real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn)
{
u64 trim_start, trim_size;
+
trim_start = start_pfn;
trim_start <<= PAGE_SHIFT;
+
trim_size = limit_pfn;
trim_size <<= PAGE_SHIFT;
trim_size -= trim_start;
- return e820_update_range(trim_start, trim_size, E820_RAM,
- E820_RESERVED);
+ return e820_update_range(trim_start, trim_size, E820_RAM, E820_RESERVED);
}
+
/**
* mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
* @end_pfn: ending page frame number
@@ -985,7 +985,7 @@ static u64 __init real_trim_memory(unsigned long start_pfn,
* Some buggy BIOSes don't setup the MTRRs properly for systems with certain
* memory configurations. This routine checks that the highest MTRR matches
* the end of memory, to make sure the MTRRs having a write back type cover
- * all of the memory the kernel is intending to use. If not, it'll trim any
+ * all of the memory the kernel is intending to use. If not, it'll trim any
* memory off the end by adjusting end_pfn, removing it from the kernel's
* allocation pools, warning the user with an obnoxious message.
*/
@@ -994,21 +994,22 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
unsigned long i, base, size, highest_pfn = 0, def, dummy;
mtrr_type type;
u64 total_trim_size;
-
/* extra one for all 0 */
int num[MTRR_NUM_TYPES + 1];
+
/*
* Make sure we only trim uncachable memory on machines that
* support the Intel MTRR architecture:
*/
if (!is_cpu(INTEL) || disable_mtrr_trim)
return 0;
+
rdmsr(MSR_MTRRdefType, def, dummy);
def &= 0xff;
if (def != MTRR_TYPE_UNCACHABLE)
return 0;
- /* get it and store it aside */
+ /* Get it and store it aside: */
memset(range_state, 0, sizeof(range_state));
for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &base, &size, &type);
@@ -1017,7 +1018,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
range_state[i].type = type;
}
- /* Find highest cached pfn */
+ /* Find highest cached pfn: */
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
if (type != MTRR_TYPE_WRBACK)
@@ -1028,13 +1029,13 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
highest_pfn = base + size;
}
- /* kvm/qemu doesn't have mtrr set right, don't trim them all */
+ /* kvm/qemu doesn't have mtrr set right, don't trim them all: */
if (!highest_pfn) {
printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n");
return 0;
}
- /* check entries number */
+ /* Check entries number: */
memset(num, 0, sizeof(num));
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
@@ -1046,11 +1047,11 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
num[type]++;
}
- /* no entry for WB? */
+ /* No entry for WB? */
if (!num[MTRR_TYPE_WRBACK])
return 0;
- /* check if we only had WB and UC */
+ /* Check if we only had WB and UC: */
if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
num_var_ranges - num[MTRR_NUM_TYPES])
return 0;
@@ -1066,31 +1067,31 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
}
nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
+ /* Check the head: */
total_trim_size = 0;
- /* check the head */
if (range[0].start)
total_trim_size += real_trim_memory(0, range[0].start);
- /* check the holes */
+
+ /* Check the holes: */
for (i = 0; i < nr_range - 1; i++) {
if (range[i].end + 1 < range[i+1].start)
total_trim_size += real_trim_memory(range[i].end + 1,
range[i+1].start);
}
- /* check the top */
+
+ /* Check the top: */
i = nr_range - 1;
if (range[i].end + 1 < end_pfn)
total_trim_size += real_trim_memory(range[i].end + 1,
end_pfn);
if (total_trim_size) {
- printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
- " all of memory, losing %lluMB of RAM.\n",
- total_trim_size >> 20);
+ pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20);
if (!changed_by_mtrr_cleanup)
WARN_ON(1);
- printk(KERN_INFO "update e820 for mtrr\n");
+ pr_info("update e820 for mtrr\n");
update_e820();
return 1;
@@ -1098,4 +1099,3 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
return 0;
}
-
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index ff14c320040c..228d982ce09c 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -1,38 +1,40 @@
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/mm.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-#include <asm/io.h>
+
#include <asm/processor-cyrix.h>
#include <asm/processor-flags.h>
+#include <asm/mtrr.h>
+#include <asm/msr.h>
+
#include "mtrr.h"
static void
cyrix_get_arr(unsigned int reg, unsigned long *base,
unsigned long *size, mtrr_type * type)
{
- unsigned long flags;
unsigned char arr, ccr3, rcr, shift;
+ unsigned long flags;
arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
- /* Save flags and disable interrupts */
local_irq_save(flags);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
- ((unsigned char *) base)[3] = getCx86(arr);
- ((unsigned char *) base)[2] = getCx86(arr + 1);
- ((unsigned char *) base)[1] = getCx86(arr + 2);
+ ((unsigned char *)base)[3] = getCx86(arr);
+ ((unsigned char *)base)[2] = getCx86(arr + 1);
+ ((unsigned char *)base)[1] = getCx86(arr + 2);
rcr = getCx86(CX86_RCR_BASE + reg);
- setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
+ setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
- /* Enable interrupts if it was enabled previously */
local_irq_restore(flags);
+
shift = ((unsigned char *) base)[1] & 0x0f;
*base >>= PAGE_SHIFT;
- /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
+ /*
+ * Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
* Note: shift==0xf means 4G, this is unsupported.
*/
if (shift)
@@ -76,17 +78,20 @@ cyrix_get_arr(unsigned int reg, unsigned long *base,
}
}
+/*
+ * cyrix_get_free_region - get a free ARR.
+ *
+ * @base: the starting (base) address of the region.
+ * @size: the size (in bytes) of the region.
+ *
+ * Returns: the index of the region on success, else -1 on error.
+*/
static int
cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
-/* [SUMMARY] Get a free ARR.
- <base> The starting (base) address of the region.
- <size> The size (in bytes) of the region.
- [RETURNS] The index of the region on success, else -1 on error.
-*/
{
- int i;
- mtrr_type ltype;
unsigned long lbase, lsize;
+ mtrr_type ltype;
+ int i;
switch (replace_reg) {
case 7:
@@ -107,14 +112,17 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
cyrix_get_arr(7, &lbase, &lsize, &ltype);
if (lsize == 0)
return 7;
- /* Else try ARR0-ARR6 first */
+ /* Else try ARR0-ARR6 first */
} else {
for (i = 0; i < 7; i++) {
cyrix_get_arr(i, &lbase, &lsize, &ltype);
if (lsize == 0)
return i;
}
- /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
+ /*
+ * ARR0-ARR6 isn't free
+ * try ARR7 but its size must be at least 256K
+ */
cyrix_get_arr(i, &lbase, &lsize, &ltype);
if ((lsize == 0) && (size >= 0x40))
return i;
@@ -122,21 +130,22 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
return -ENOSPC;
}
-static u32 cr4 = 0;
-static u32 ccr3;
+static u32 cr4, ccr3;
static void prepare_set(void)
{
u32 cr0;
/* Save value of CR4 and clear Page Global Enable (bit 7) */
- if ( cpu_has_pge ) {
+ if (cpu_has_pge) {
cr4 = read_cr4();
write_cr4(cr4 & ~X86_CR4_PGE);
}
- /* Disable and flush caches. Note that wbinvd flushes the TLBs as
- a side-effect */
+ /*
+ * Disable and flush caches.
+ * Note that wbinvd flushes the TLBs as a side-effect
+ */
cr0 = read_cr0() | X86_CR0_CD;
wbinvd();
write_cr0(cr0);
@@ -147,22 +156,21 @@ static void prepare_set(void)
/* Cyrix ARRs - everything else was excluded at the top */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
-
}
static void post_set(void)
{
- /* Flush caches and TLBs */
+ /* Flush caches and TLBs */
wbinvd();
/* Cyrix ARRs - everything else was excluded at the top */
setCx86(CX86_CCR3, ccr3);
-
- /* Enable caches */
+
+ /* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
- /* Restore value of CR4 */
- if ( cpu_has_pge )
+ /* Restore value of CR4 */
+ if (cpu_has_pge)
write_cr4(cr4);
}
@@ -178,7 +186,8 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
size >>= 6;
size &= 0x7fff; /* make sure arr_size <= 14 */
- for (arr_size = 0; size; arr_size++, size >>= 1) ;
+ for (arr_size = 0; size; arr_size++, size >>= 1)
+ ;
if (reg < 7) {
switch (type) {
@@ -215,18 +224,18 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
prepare_set();
base <<= PAGE_SHIFT;
- setCx86(arr, ((unsigned char *) &base)[3]);
- setCx86(arr + 1, ((unsigned char *) &base)[2]);
- setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
+ setCx86(arr + 0, ((unsigned char *)&base)[3]);
+ setCx86(arr + 1, ((unsigned char *)&base)[2]);
+ setCx86(arr + 2, (((unsigned char *)&base)[1]) | arr_size);
setCx86(CX86_RCR_BASE + reg, arr_type);
post_set();
}
typedef struct {
- unsigned long base;
- unsigned long size;
- mtrr_type type;
+ unsigned long base;
+ unsigned long size;
+ mtrr_type type;
} arr_state_t;
static arr_state_t arr_state[8] = {
@@ -247,16 +256,17 @@ static void cyrix_set_all(void)
setCx86(CX86_CCR0 + i, ccr_state[i]);
for (; i < 7; i++)
setCx86(CX86_CCR4 + i, ccr_state[i]);
- for (i = 0; i < 8; i++)
- cyrix_set_arr(i, arr_state[i].base,
+
+ for (i = 0; i < 8; i++) {
+ cyrix_set_arr(i, arr_state[i].base,
arr_state[i].size, arr_state[i].type);
+ }
post_set();
}
static struct mtrr_ops cyrix_mtrr_ops = {
.vendor = X86_VENDOR_CYRIX,
-// .init = cyrix_arr_init,
.set_all = cyrix_set_all,
.set = cyrix_set_arr,
.get = cyrix_get_arr,
@@ -270,5 +280,3 @@ int __init cyrix_init_mtrr(void)
set_mtrr_ops(&cyrix_mtrr_ops);
return 0;
}
-
-//arch_initcall(cyrix_init_mtrr);
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 0543f69f0b27..55da0c5f68dd 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -1,28 +1,34 @@
-/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
- because MTRRs can span upto 40 bits (36bits on most modern x86) */
+/*
+ * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
+ * because MTRRs can span upto 40 bits (36bits on most modern x86)
+ */
+#define DEBUG
+
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/mm.h>
-#include <linux/module.h>
-#include <asm/io.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-#include <asm/system.h>
-#include <asm/cpufeature.h>
+
#include <asm/processor-flags.h>
+#include <asm/cpufeature.h>
#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/mtrr.h>
+#include <asm/msr.h>
#include <asm/pat.h>
+
#include "mtrr.h"
struct fixed_range_block {
- int base_msr; /* start address of an MTRR block */
- int ranges; /* number of MTRRs in this block */
+ int base_msr; /* start address of an MTRR block */
+ int ranges; /* number of MTRRs in this block */
};
static struct fixed_range_block fixed_range_blocks[] = {
- { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
- { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
- { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
+ { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
+ { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
+ { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
{}
};
@@ -30,10 +36,10 @@ static unsigned long smp_changes_mask;
static int mtrr_state_set;
u64 mtrr_tom2;
-struct mtrr_state_type mtrr_state = {};
+struct mtrr_state_type mtrr_state;
EXPORT_SYMBOL_GPL(mtrr_state);
-/**
+/*
* BIOS is expected to clear MtrrFixDramModEn bit, see for example
* "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
* Opteron Processors" (26094 Rev. 3.30 February 2006), section
@@ -104,9 +110,8 @@ u8 mtrr_type_lookup(u64 start, u64 end)
* Look of multiple ranges matching this address and pick type
* as per MTRR precedence
*/
- if (!(mtrr_state.enabled & 2)) {
+ if (!(mtrr_state.enabled & 2))
return mtrr_state.def_type;
- }
prev_match = 0xFF;
for (i = 0; i < num_var_ranges; ++i) {
@@ -125,9 +130,8 @@ u8 mtrr_type_lookup(u64 start, u64 end)
if (start_state != end_state)
return 0xFE;
- if ((start & mask) != (base & mask)) {
+ if ((start & mask) != (base & mask))
continue;
- }
curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
if (prev_match == 0xFF) {
@@ -148,9 +152,8 @@ u8 mtrr_type_lookup(u64 start, u64 end)
curr_match = MTRR_TYPE_WRTHROUGH;
}
- if (prev_match != curr_match) {
+ if (prev_match != curr_match)
return MTRR_TYPE_UNCACHABLE;
- }
}
if (mtrr_tom2) {
@@ -164,7 +167,7 @@ u8 mtrr_type_lookup(u64 start, u64 end)
return mtrr_state.def_type;
}
-/* Get the MSR pair relating to a var range */
+/* Get the MSR pair relating to a var range */
static void
get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
{
@@ -172,7 +175,7 @@ get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
}
-/* fill the MSR pair relating to a var range */
+/* Fill the MSR pair relating to a var range */
void fill_mtrr_var_range(unsigned int index,
u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
{
@@ -186,10 +189,9 @@ void fill_mtrr_var_range(unsigned int index,
vr[index].mask_hi = mask_hi;
}
-static void
-get_fixed_ranges(mtrr_type * frs)
+static void get_fixed_ranges(mtrr_type *frs)
{
- unsigned int *p = (unsigned int *) frs;
+ unsigned int *p = (unsigned int *)frs;
int i;
k8_check_syscfg_dram_mod_en();
@@ -217,22 +219,22 @@ static void __init print_fixed_last(void)
if (!last_fixed_end)
return;
- printk(KERN_DEBUG " %05X-%05X %s\n", last_fixed_start,
- last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
+ pr_debug(" %05X-%05X %s\n", last_fixed_start,
+ last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
last_fixed_end = 0;
}
static void __init update_fixed_last(unsigned base, unsigned end,
- mtrr_type type)
+ mtrr_type type)
{
last_fixed_start = base;
last_fixed_end = end;
last_fixed_type = type;
}
-static void __init print_fixed(unsigned base, unsigned step,
- const mtrr_type *types)
+static void __init
+print_fixed(unsigned base, unsigned step, const mtrr_type *types)
{
unsigned i;
@@ -259,54 +261,55 @@ static void __init print_mtrr_state(void)
unsigned int i;
int high_width;
- printk(KERN_DEBUG "MTRR default type: %s\n",
- mtrr_attrib_to_str(mtrr_state.def_type));
+ pr_debug("MTRR default type: %s\n",
+ mtrr_attrib_to_str(mtrr_state.def_type));
if (mtrr_state.have_fixed) {
- printk(KERN_DEBUG "MTRR fixed ranges %sabled:\n",
- mtrr_state.enabled & 1 ? "en" : "dis");
+ pr_debug("MTRR fixed ranges %sabled:\n",
+ mtrr_state.enabled & 1 ? "en" : "dis");
print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
for (i = 0; i < 2; ++i)
- print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
+ print_fixed(0x80000 + i * 0x20000, 0x04000,
+ mtrr_state.fixed_ranges + (i + 1) * 8);
for (i = 0; i < 8; ++i)
- print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
+ print_fixed(0xC0000 + i * 0x08000, 0x01000,
+ mtrr_state.fixed_ranges + (i + 3) * 8);
/* tail */
print_fixed_last();
}
- printk(KERN_DEBUG "MTRR variable ranges %sabled:\n",
- mtrr_state.enabled & 2 ? "en" : "dis");
+ pr_debug("MTRR variable ranges %sabled:\n",
+ mtrr_state.enabled & 2 ? "en" : "dis");
if (size_or_mask & 0xffffffffUL)
high_width = ffs(size_or_mask & 0xffffffffUL) - 1;
else
high_width = ffs(size_or_mask>>32) + 32 - 1;
high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4;
+
for (i = 0; i < num_var_ranges; ++i) {
if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
- printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n",
- i,
- high_width,
- mtrr_state.var_ranges[i].base_hi,
- mtrr_state.var_ranges[i].base_lo >> 12,
- high_width,
- mtrr_state.var_ranges[i].mask_hi,
- mtrr_state.var_ranges[i].mask_lo >> 12,
- mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
+ pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n",
+ i,
+ high_width,
+ mtrr_state.var_ranges[i].base_hi,
+ mtrr_state.var_ranges[i].base_lo >> 12,
+ high_width,
+ mtrr_state.var_ranges[i].mask_hi,
+ mtrr_state.var_ranges[i].mask_lo >> 12,
+ mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
else
- printk(KERN_DEBUG " %u disabled\n", i);
- }
- if (mtrr_tom2) {
- printk(KERN_DEBUG "TOM2: %016llx aka %lldM\n",
- mtrr_tom2, mtrr_tom2>>20);
+ pr_debug(" %u disabled\n", i);
}
+ if (mtrr_tom2)
+ pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
}
-/* Grab all of the MTRR state for this CPU into *state */
+/* Grab all of the MTRR state for this CPU into *state */
void __init get_mtrr_state(void)
{
- unsigned int i;
struct mtrr_var_range *vrs;
- unsigned lo, dummy;
unsigned long flags;
+ unsigned lo, dummy;
+ unsigned int i;
vrs = mtrr_state.var_ranges;
@@ -324,6 +327,7 @@ void __init get_mtrr_state(void)
if (amd_special_default_mtrr()) {
unsigned low, high;
+
/* TOP_MEM2 */
rdmsr(MSR_K8_TOP_MEM2, low, high);
mtrr_tom2 = high;
@@ -344,10 +348,9 @@ void __init get_mtrr_state(void)
post_set();
local_irq_restore(flags);
-
}
-/* Some BIOS's are fucked and don't set all MTRRs the same! */
+/* Some BIOS's are messed up and don't set all MTRRs the same! */
void __init mtrr_state_warn(void)
{
unsigned long mask = smp_changes_mask;
@@ -355,28 +358,33 @@ void __init mtrr_state_warn(void)
if (!mask)
return;
if (mask & MTRR_CHANGE_MASK_FIXED)
- printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
+ pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_VARIABLE)
- printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
+ pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n");
if (mask & MTRR_CHANGE_MASK_DEFTYPE)
- printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
+ pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
+
printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
printk(KERN_INFO "mtrr: corrected configuration.\n");
}
-/* Doesn't attempt to pass an error out to MTRR users
- because it's quite complicated in some cases and probably not
- worth it because the best error handling is to ignore it. */
+/*
+ * Doesn't attempt to pass an error out to MTRR users
+ * because it's quite complicated in some cases and probably not
+ * worth it because the best error handling is to ignore it.
+ */
void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
{
- if (wrmsr_safe(msr, a, b) < 0)
+ if (wrmsr_safe(msr, a, b) < 0) {
printk(KERN_ERR
"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
smp_processor_id(), msr, a, b);
+ }
}
/**
- * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
+ * set_fixed_range - checks & updates a fixed-range MTRR if it
+ * differs from the value it should have
* @msr: MSR address of the MTTR which should be checked and updated
* @changed: pointer which indicates whether the MTRR needed to be changed
* @msrwords: pointer to the MSR values which the MSR should have
@@ -401,20 +409,23 @@ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
*
* Returns: The index of the region on success, else negative on error.
*/
-int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
+int
+generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
{
- int i, max;
- mtrr_type ltype;
unsigned long lbase, lsize;
+ mtrr_type ltype;
+ int i, max;
max = num_var_ranges;
if (replace_reg >= 0 && replace_reg < max)
return replace_reg;
+
for (i = 0; i < max; ++i) {
mtrr_if->get(i, &lbase, &lsize, &ltype);
if (lsize == 0)
return i;
}
+
return -ENOSPC;
}
@@ -434,7 +445,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
if ((mask_lo & 0x800) == 0) {
- /* Invalid (i.e. free) range */
+ /* Invalid (i.e. free) range */
*base = 0;
*size = 0;
*type = 0;
@@ -471,27 +482,31 @@ out_put_cpu:
}
/**
- * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
+ * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
+ * differ from the saved set
* @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
*/
-static int set_fixed_ranges(mtrr_type * frs)
+static int set_fixed_ranges(mtrr_type *frs)
{
- unsigned long long *saved = (unsigned long long *) frs;
+ unsigned long long *saved = (unsigned long long *)frs;
bool changed = false;
- int block=-1, range;
+ int block = -1, range;
k8_check_syscfg_dram_mod_en();
- while (fixed_range_blocks[++block].ranges)
- for (range=0; range < fixed_range_blocks[block].ranges; range++)
- set_fixed_range(fixed_range_blocks[block].base_msr + range,
- &changed, (unsigned int *) saved++);
+ while (fixed_range_blocks[++block].ranges) {
+ for (range = 0; range < fixed_range_blocks[block].ranges; range++)
+ set_fixed_range(fixed_range_blocks[block].base_msr + range,
+ &changed, (unsigned int *)saved++);
+ }
return changed;
}
-/* Set the MSR pair relating to a var range. Returns TRUE if
- changes are made */
+/*
+ * Set the MSR pair relating to a var range.
+ * Returns true if changes are made.
+ */
static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
{
unsigned int lo, hi;
@@ -501,6 +516,7 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
|| (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
+
mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
changed = true;
}
@@ -526,21 +542,26 @@ static u32 deftype_lo, deftype_hi;
*/
static unsigned long set_mtrr_state(void)
{
- unsigned int i;
unsigned long change_mask = 0;
+ unsigned int i;
- for (i = 0; i < num_var_ranges; i++)
+ for (i = 0; i < num_var_ranges; i++) {
if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
change_mask |= MTRR_CHANGE_MASK_VARIABLE;
+ }
if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
change_mask |= MTRR_CHANGE_MASK_FIXED;
- /* Set_mtrr_restore restores the old value of MTRRdefType,
- so to set it we fiddle with the saved value */
+ /*
+ * Set_mtrr_restore restores the old value of MTRRdefType,
+ * so to set it we fiddle with the saved value:
+ */
if ((deftype_lo & 0xff) != mtrr_state.def_type
|| ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
- deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
+
+ deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
+ (mtrr_state.enabled << 10);
change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
}
@@ -548,33 +569,36 @@ static unsigned long set_mtrr_state(void)
}
-static unsigned long cr4 = 0;
+static unsigned long cr4;
static DEFINE_SPINLOCK(set_atomicity_lock);
/*
- * Since we are disabling the cache don't allow any interrupts - they
- * would run extremely slow and would only increase the pain. The caller must
- * ensure that local interrupts are disabled and are reenabled after post_set()
- * has been called.
+ * Since we are disabling the cache don't allow any interrupts,
+ * they would run extremely slow and would only increase the pain.
+ *
+ * The caller must ensure that local interrupts are disabled and
+ * are reenabled after post_set() has been called.
*/
-
static void prepare_set(void) __acquires(set_atomicity_lock)
{
unsigned long cr0;
- /* Note that this is not ideal, since the cache is only flushed/disabled
- for this CPU while the MTRRs are changed, but changing this requires
- more invasive changes to the way the kernel boots */
+ /*
+ * Note that this is not ideal
+ * since the cache is only flushed/disabled for this CPU while the
+ * MTRRs are changed, but changing this requires more invasive
+ * changes to the way the kernel boots
+ */
spin_lock(&set_atomicity_lock);
- /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
+ /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
cr0 = read_cr0() | X86_CR0_CD;
write_cr0(cr0);
wbinvd();
- /* Save value of CR4 and clear Page Global Enable (bit 7) */
- if ( cpu_has_pge ) {
+ /* Save value of CR4 and clear Page Global Enable (bit 7) */
+ if (cpu_has_pge) {
cr4 = read_cr4();
write_cr4(cr4 & ~X86_CR4_PGE);
}
@@ -582,26 +606,26 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
__flush_tlb();
- /* Save MTRR state */
+ /* Save MTRR state */
rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
- /* Disable MTRRs, and set the default type to uncached */
+ /* Disable MTRRs, and set the default type to uncached */
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
}
static void post_set(void) __releases(set_atomicity_lock)
{
- /* Flush TLBs (no need to flush caches - they are disabled) */
+ /* Flush TLBs (no need to flush caches - they are disabled) */
__flush_tlb();
/* Intel (P6) standard MTRRs */
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
-
- /* Enable caches */
+
+ /* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
- /* Restore value of CR4 */
- if ( cpu_has_pge )
+ /* Restore value of CR4 */
+ if (cpu_has_pge)
write_cr4(cr4);
spin_unlock(&set_atomicity_lock);
}
@@ -623,24 +647,27 @@ static void generic_set_all(void)
post_set();
local_irq_restore(flags);
- /* Use the atomic bitops to update the global mask */
+ /* Use the atomic bitops to update the global mask */
for (count = 0; count < sizeof mask * 8; ++count) {
if (mask & 0x01)
set_bit(count, &smp_changes_mask);
mask >>= 1;
}
-
+
}
+/**
+ * generic_set_mtrr - set variable MTRR register on the local CPU.
+ *
+ * @reg: The register to set.
+ * @base: The base address of the region.
+ * @size: The size of the region. If this is 0 the region is disabled.
+ * @type: The type of the region.
+ *
+ * Returns nothing.
+ */
static void generic_set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type)
-/* [SUMMARY] Set variable MTRR register on the local CPU.
- <reg> The register to set.
- <base> The base address of the region.
- <size> The size of the region. If this is 0 the region is disabled.
- <type> The type of the region.
- [RETURNS] Nothing.
-*/
{
unsigned long flags;
struct mtrr_var_range *vr;
@@ -651,8 +678,10 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
prepare_set();
if (size == 0) {
- /* The invalid bit is kept in the mask, so we simply clear the
- relevant mask register to disable a range. */
+ /*
+ * The invalid bit is kept in the mask, so we simply
+ * clear the relevant mask register to disable a range.
+ */
mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
memset(vr, 0, sizeof(struct mtrr_var_range));
} else {
@@ -669,46 +698,50 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
local_irq_restore(flags);
}
-int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
+int generic_validate_add_page(unsigned long base, unsigned long size,
+ unsigned int type)
{
unsigned long lbase, last;
- /* For Intel PPro stepping <= 7, must be 4 MiB aligned
- and not touch 0x70000000->0x7003FFFF */
+ /*
+ * For Intel PPro stepping <= 7
+ * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
+ */
if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 1 &&
boot_cpu_data.x86_mask <= 7) {
if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
- printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
+ pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
return -EINVAL;
}
if (!(base + size < 0x70000 || base > 0x7003F) &&
(type == MTRR_TYPE_WRCOMB
|| type == MTRR_TYPE_WRBACK)) {
- printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
+ pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
return -EINVAL;
}
}
- /* Check upper bits of base and last are equal and lower bits are 0
- for base and 1 for last */
+ /*
+ * Check upper bits of base and last are equal and lower bits are 0
+ * for base and 1 for last
+ */
last = base + size - 1;
for (lbase = base; !(lbase & 1) && (last & 1);
- lbase = lbase >> 1, last = last >> 1) ;
+ lbase = lbase >> 1, last = last >> 1)
+ ;
if (lbase != last) {
- printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
- base, size);
+ pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
return -EINVAL;
}
return 0;
}
-
static int generic_have_wrcomb(void)
{
unsigned long config, dummy;
rdmsr(MSR_MTRRcap, config, dummy);
- return (config & (1 << 10));
+ return config & (1 << 10);
}
int positive_have_wrcomb(void)
@@ -716,14 +749,15 @@ int positive_have_wrcomb(void)
return 1;
}
-/* generic structure...
+/*
+ * Generic structure...
*/
struct mtrr_ops generic_mtrr_ops = {
- .use_intel_if = 1,
- .set_all = generic_set_all,
- .get = generic_get_mtrr,
- .get_free_region = generic_get_free_region,
- .set = generic_set_mtrr,
- .validate_add_page = generic_validate_add_page,
- .have_wrcomb = generic_have_wrcomb,
+ .use_intel_if = 1,
+ .set_all = generic_set_all,
+ .get = generic_get_mtrr,
+ .get_free_region = generic_get_free_region,
+ .set = generic_set_mtrr,
+ .validate_add_page = generic_validate_add_page,
+ .have_wrcomb = generic_have_wrcomb,
};
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index fb73a52913a4..08b6ea4c62b4 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -1,27 +1,28 @@
-#include <linux/init.h>
-#include <linux/proc_fs.h>
#include <linux/capability.h>
-#include <linux/ctype.h>
-#include <linux/module.h>
#include <linux/seq_file.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
#define LINE_SIZE 80
#include <asm/mtrr.h>
+
#include "mtrr.h"
#define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private)
static const char *const mtrr_strings[MTRR_NUM_TYPES] =
{
- "uncachable", /* 0 */
- "write-combining", /* 1 */
- "?", /* 2 */
- "?", /* 3 */
- "write-through", /* 4 */
- "write-protect", /* 5 */
- "write-back", /* 6 */
+ "uncachable", /* 0 */
+ "write-combining", /* 1 */
+ "?", /* 2 */
+ "?", /* 3 */
+ "write-through", /* 4 */
+ "write-protect", /* 5 */
+ "write-back", /* 6 */
};
const char *mtrr_attrib_to_str(int x)
@@ -35,8 +36,8 @@ static int
mtrr_file_add(unsigned long base, unsigned long size,
unsigned int type, bool increment, struct file *file, int page)
{
+ unsigned int *fcount = FILE_FCOUNT(file);
int reg, max;
- unsigned int *fcount = FILE_FCOUNT(file);
max = num_var_ranges;
if (fcount == NULL) {
@@ -61,8 +62,8 @@ static int
mtrr_file_del(unsigned long base, unsigned long size,
struct file *file, int page)
{
- int reg;
unsigned int *fcount = FILE_FCOUNT(file);
+ int reg;
if (!page) {
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)))
@@ -81,13 +82,14 @@ mtrr_file_del(unsigned long base, unsigned long size,
return reg;
}
-/* RED-PEN: seq_file can seek now. this is ignored. */
+/*
+ * seq_file can seek but we ignore it.
+ *
+ * Format of control line:
+ * "base=%Lx size=%Lx type=%s" or "disable=%d"
+ */
static ssize_t
mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
-/* Format of control line:
- "base=%Lx size=%Lx type=%s" OR:
- "disable=%d"
-*/
{
int i, err;
unsigned long reg;
@@ -100,15 +102,18 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
return -EPERM;
if (!len)
return -EINVAL;
+
memset(line, 0, LINE_SIZE);
if (len > LINE_SIZE)
len = LINE_SIZE;
if (copy_from_user(line, buf, len - 1))
return -EFAULT;
+
linelen = strlen(line);
ptr = line + linelen - 1;
if (linelen && *ptr == '\n')
*ptr = '\0';
+
if (!strncmp(line, "disable=", 8)) {
reg = simple_strtoul(line + 8, &ptr, 0);
err = mtrr_del_page(reg, 0, 0);
@@ -116,28 +121,35 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
return err;
return len;
}
+
if (strncmp(line, "base=", 5))
return -EINVAL;
+
base = simple_strtoull(line + 5, &ptr, 0);
- for (; isspace(*ptr); ++ptr) ;
+ for (; isspace(*ptr); ++ptr)
+ ;
+
if (strncmp(ptr, "size=", 5))
return -EINVAL;
+
size = simple_strtoull(ptr + 5, &ptr, 0);
if ((base & 0xfff) || (size & 0xfff))
return -EINVAL;
- for (; isspace(*ptr); ++ptr) ;
+ for (; isspace(*ptr); ++ptr)
+ ;
+
if (strncmp(ptr, "type=", 5))
return -EINVAL;
ptr += 5;
- for (; isspace(*ptr); ++ptr) ;
+ for (; isspace(*ptr); ++ptr)
+ ;
+
for (i = 0; i < MTRR_NUM_TYPES; ++i) {
if (strcmp(ptr, mtrr_strings[i]))
continue;
base >>= PAGE_SHIFT;
size >>= PAGE_SHIFT;
- err =
- mtrr_add_page((unsigned long) base, (unsigned long) size, i,
- true);
+ err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true);
if (err < 0)
return err;
return len;
@@ -181,7 +193,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
case MTRRIOC32_SET_PAGE_ENTRY:
case MTRRIOC32_DEL_PAGE_ENTRY:
case MTRRIOC32_KILL_PAGE_ENTRY: {
- struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)__arg;
+ struct mtrr_sentry32 __user *s32;
+
+ s32 = (struct mtrr_sentry32 __user *)__arg;
err = get_user(sentry.base, &s32->base);
err |= get_user(sentry.size, &s32->size);
err |= get_user(sentry.type, &s32->type);
@@ -191,7 +205,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
}
case MTRRIOC32_GET_ENTRY:
case MTRRIOC32_GET_PAGE_ENTRY: {
- struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg;
+ struct mtrr_gentry32 __user *g32;
+
+ g32 = (struct mtrr_gentry32 __user *)__arg;
err = get_user(gentry.regnum, &g32->regnum);
err |= get_user(gentry.base, &g32->base);
err |= get_user(gentry.size, &g32->size);
@@ -314,7 +330,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
if (err)
return err;
- switch(cmd) {
+ switch (cmd) {
case MTRRIOC_GET_ENTRY:
case MTRRIOC_GET_PAGE_ENTRY:
if (copy_to_user(arg, &gentry, sizeof gentry))
@@ -323,7 +339,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
#ifdef CONFIG_COMPAT
case MTRRIOC32_GET_ENTRY:
case MTRRIOC32_GET_PAGE_ENTRY: {
- struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg;
+ struct mtrr_gentry32 __user *g32;
+
+ g32 = (struct mtrr_gentry32 __user *)__arg;
err = put_user(gentry.base, &g32->base);
err |= put_user(gentry.size, &g32->size);
err |= put_user(gentry.regnum, &g32->regnum);
@@ -335,11 +353,10 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
return err;
}
-static int
-mtrr_close(struct inode *ino, struct file *file)
+static int mtrr_close(struct inode *ino, struct file *file)
{
- int i, max;
unsigned int *fcount = FILE_FCOUNT(file);
+ int i, max;
if (fcount != NULL) {
max = num_var_ranges;
@@ -359,22 +376,22 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset);
static int mtrr_open(struct inode *inode, struct file *file)
{
- if (!mtrr_if)
+ if (!mtrr_if)
return -EIO;
- if (!mtrr_if->get)
- return -ENXIO;
+ if (!mtrr_if->get)
+ return -ENXIO;
return single_open(file, mtrr_seq_show, NULL);
}
static const struct file_operations mtrr_fops = {
- .owner = THIS_MODULE,
- .open = mtrr_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = mtrr_write,
- .unlocked_ioctl = mtrr_ioctl,
- .compat_ioctl = mtrr_ioctl,
- .release = mtrr_close,
+ .owner = THIS_MODULE,
+ .open = mtrr_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = mtrr_write,
+ .unlocked_ioctl = mtrr_ioctl,
+ .compat_ioctl = mtrr_ioctl,
+ .release = mtrr_close,
};
static int mtrr_seq_show(struct seq_file *seq, void *offset)
@@ -388,23 +405,24 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
max = num_var_ranges;
for (i = 0; i < max; i++) {
mtrr_if->get(i, &base, &size, &type);
- if (size == 0)
+ if (size == 0) {
mtrr_usage_table[i] = 0;
- else {
- if (size < (0x100000 >> PAGE_SHIFT)) {
- /* less than 1MB */
- factor = 'K';
- size <<= PAGE_SHIFT - 10;
- } else {
- factor = 'M';
- size >>= 20 - PAGE_SHIFT;
- }
- /* RED-PEN: base can be > 32bit */
- len += seq_printf(seq,
- "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n",
- i, base, base >> (20 - PAGE_SHIFT), size, factor,
- mtrr_usage_table[i], mtrr_attrib_to_str(type));
+ continue;
}
+ if (size < (0x100000 >> PAGE_SHIFT)) {
+ /* less than 1MB */
+ factor = 'K';
+ size <<= PAGE_SHIFT - 10;
+ } else {
+ factor = 'M';
+ size >>= 20 - PAGE_SHIFT;
+ }
+ /* Base can be > 32bit */
+ len += seq_printf(seq, "reg%02i: base=0x%06lx000 "
+ "(%5luMB), size=%5lu%cB, count=%d: %s\n",
+ i, base, base >> (20 - PAGE_SHIFT), size,
+ factor, mtrr_usage_table[i],
+ mtrr_attrib_to_str(type));
}
return 0;
}
@@ -422,6 +440,5 @@ static int __init mtrr_if_init(void)
proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops);
return 0;
}
-
arch_initcall(mtrr_if_init);
#endif /* CONFIG_PROC_FS */
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 8fc248b5aeaf..7af0f88a4163 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -25,43 +25,48 @@
Operating System Writer's Guide" (Intel document number 242692),
section 11.11.7
- This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
- on 6-7 March 2002.
- Source: Intel Architecture Software Developers Manual, Volume 3:
+ This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
+ on 6-7 March 2002.
+ Source: Intel Architecture Software Developers Manual, Volume 3:
System Programming Guide; Section 9.11. (1997 edition - PPro).
*/
+#define DEBUG
+
+#include <linux/types.h> /* FIXME: kvm_para.h needs this */
+
+#include <linux/kvm_para.h>
+#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/init.h>
+#include <linux/sort.h>
+#include <linux/cpu.h>
#include <linux/pci.h>
#include <linux/smp.h>
-#include <linux/cpu.h>
-#include <linux/mutex.h>
-#include <linux/sort.h>
+#include <asm/processor.h>
#include <asm/e820.h>
#include <asm/mtrr.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
#include <asm/msr.h>
-#include <asm/kvm_para.h>
+
#include "mtrr.h"
-u32 num_var_ranges = 0;
+u32 num_var_ranges;
unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
static DEFINE_MUTEX(mtrr_mutex);
u64 size_or_mask, size_and_mask;
-static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
+static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
-struct mtrr_ops * mtrr_if = NULL;
+struct mtrr_ops *mtrr_if;
static void set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
-void set_mtrr_ops(struct mtrr_ops * ops)
+void set_mtrr_ops(struct mtrr_ops *ops)
{
if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
mtrr_ops[ops->vendor] = ops;
@@ -72,30 +77,36 @@ static int have_wrcomb(void)
{
struct pci_dev *dev;
u8 rev;
-
- if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
- /* ServerWorks LE chipsets < rev 6 have problems with write-combining
- Don't allow it and leave room for other chipsets to be tagged */
+
+ dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
+ if (dev != NULL) {
+ /*
+ * ServerWorks LE chipsets < rev 6 have problems with
+ * write-combining. Don't allow it and leave room for other
+ * chipsets to be tagged
+ */
if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
if (rev <= 5) {
- printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
+ pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
pci_dev_put(dev);
return 0;
}
}
- /* Intel 450NX errata # 23. Non ascending cacheline evictions to
- write combining memory may resulting in data corruption */
+ /*
+ * Intel 450NX errata # 23. Non ascending cacheline evictions to
+ * write combining memory may resulting in data corruption
+ */
if (dev->vendor == PCI_VENDOR_ID_INTEL &&
dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
- printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
+ pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
pci_dev_put(dev);
return 0;
}
pci_dev_put(dev);
- }
- return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
+ }
+ return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
}
/* This function returns the number of variable MTRRs */
@@ -103,12 +114,13 @@ static void __init set_num_var_ranges(void)
{
unsigned long config = 0, dummy;
- if (use_intel()) {
+ if (use_intel())
rdmsr(MSR_MTRRcap, config, dummy);
- } else if (is_cpu(AMD))
+ else if (is_cpu(AMD))
config = 2;
else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
config = 8;
+
num_var_ranges = config & 0xff;
}
@@ -130,10 +142,12 @@ struct set_mtrr_data {
mtrr_type smp_type;
};
+/**
+ * ipi_handler - Synchronisation handler. Executed by "other" CPUs.
+ *
+ * Returns nothing.
+ */
static void ipi_handler(void *info)
-/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
- [RETURNS] Nothing.
-*/
{
#ifdef CONFIG_SMP
struct set_mtrr_data *data = info;
@@ -142,18 +156,19 @@ static void ipi_handler(void *info)
local_irq_save(flags);
atomic_dec(&data->count);
- while(!atomic_read(&data->gate))
+ while (!atomic_read(&data->gate))
cpu_relax();
/* The master has cleared me to execute */
- if (data->smp_reg != ~0U)
- mtrr_if->set(data->smp_reg, data->smp_base,
+ if (data->smp_reg != ~0U) {
+ mtrr_if->set(data->smp_reg, data->smp_base,
data->smp_size, data->smp_type);
- else
+ } else {
mtrr_if->set_all();
+ }
atomic_dec(&data->count);
- while(atomic_read(&data->gate))
+ while (atomic_read(&data->gate))
cpu_relax();
atomic_dec(&data->count);
@@ -161,7 +176,8 @@ static void ipi_handler(void *info)
#endif
}
-static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
+static inline int types_compatible(mtrr_type type1, mtrr_type type2)
+{
return type1 == MTRR_TYPE_UNCACHABLE ||
type2 == MTRR_TYPE_UNCACHABLE ||
(type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
@@ -176,10 +192,10 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
* @type: mtrr type
*
* This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
- *
+ *
* 1. Send IPI to do the following:
* 2. Disable Interrupts
- * 3. Wait for all procs to do so
+ * 3. Wait for all procs to do so
* 4. Enter no-fill cache mode
* 5. Flush caches
* 6. Clear PGE bit
@@ -189,26 +205,27 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
* 10. Enable all range registers
* 11. Flush all TLBs and caches again
* 12. Enter normal cache mode and reenable caching
- * 13. Set PGE
+ * 13. Set PGE
* 14. Wait for buddies to catch up
* 15. Enable interrupts.
- *
+ *
* What does that mean for us? Well, first we set data.count to the number
* of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
* until it hits 0 and proceed. We set the data.gate flag and reset data.count.
- * Meanwhile, they are waiting for that flag to be set. Once it's set, each
- * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
- * differently, so we call mtrr_if->set() callback and let them take care of it.
- * When they're done, they again decrement data->count and wait for data.gate to
- * be reset.
- * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
+ * Meanwhile, they are waiting for that flag to be set. Once it's set, each
+ * CPU goes through the transition of updating MTRRs.
+ * The CPU vendors may each do it differently,
+ * so we call mtrr_if->set() callback and let them take care of it.
+ * When they're done, they again decrement data->count and wait for data.gate
+ * to be reset.
+ * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
* Everyone then enables interrupts and we all continue on.
*
* Note that the mechanism is the same for UP systems, too; all the SMP stuff
* becomes nops.
*/
-static void set_mtrr(unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type)
+static void
+set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
{
struct set_mtrr_data data;
unsigned long flags;
@@ -218,121 +235,122 @@ static void set_mtrr(unsigned int reg, unsigned long base,
data.smp_size = size;
data.smp_type = type;
atomic_set(&data.count, num_booting_cpus() - 1);
- /* make sure data.count is visible before unleashing other CPUs */
+
+ /* Make sure data.count is visible before unleashing other CPUs */
smp_wmb();
- atomic_set(&data.gate,0);
+ atomic_set(&data.gate, 0);
- /* Start the ball rolling on other CPUs */
+ /* Start the ball rolling on other CPUs */
if (smp_call_function(ipi_handler, &data, 0) != 0)
panic("mtrr: timed out waiting for other CPUs\n");
local_irq_save(flags);
- while(atomic_read(&data.count))
+ while (atomic_read(&data.count))
cpu_relax();
- /* ok, reset count and toggle gate */
+ /* Ok, reset count and toggle gate */
atomic_set(&data.count, num_booting_cpus() - 1);
smp_wmb();
- atomic_set(&data.gate,1);
+ atomic_set(&data.gate, 1);
- /* do our MTRR business */
+ /* Do our MTRR business */
- /* HACK!
+ /*
+ * HACK!
* We use this same function to initialize the mtrrs on boot.
* The state of the boot cpu's mtrrs has been saved, and we want
- * to replicate across all the APs.
+ * to replicate across all the APs.
* If we're doing that @reg is set to something special...
*/
- if (reg != ~0U)
- mtrr_if->set(reg,base,size,type);
+ if (reg != ~0U)
+ mtrr_if->set(reg, base, size, type);
- /* wait for the others */
- while(atomic_read(&data.count))
+ /* Wait for the others */
+ while (atomic_read(&data.count))
cpu_relax();
atomic_set(&data.count, num_booting_cpus() - 1);
smp_wmb();
- atomic_set(&data.gate,0);
+ atomic_set(&data.gate, 0);
/*
* Wait here for everyone to have seen the gate change
* So we're the last ones to touch 'data'
*/
- while(atomic_read(&data.count))
+ while (atomic_read(&data.count))
cpu_relax();
local_irq_restore(flags);
}
/**
- * mtrr_add_page - Add a memory type region
- * @base: Physical base address of region in pages (in units of 4 kB!)
- * @size: Physical size of region in pages (4 kB)
- * @type: Type of MTRR desired
- * @increment: If this is true do usage counting on the region
+ * mtrr_add_page - Add a memory type region
+ * @base: Physical base address of region in pages (in units of 4 kB!)
+ * @size: Physical size of region in pages (4 kB)
+ * @type: Type of MTRR desired
+ * @increment: If this is true do usage counting on the region
*
- * Memory type region registers control the caching on newer Intel and
- * non Intel processors. This function allows drivers to request an
- * MTRR is added. The details and hardware specifics of each processor's
- * implementation are hidden from the caller, but nevertheless the
- * caller should expect to need to provide a power of two size on an
- * equivalent power of two boundary.
+ * Memory type region registers control the caching on newer Intel and
+ * non Intel processors. This function allows drivers to request an
+ * MTRR is added. The details and hardware specifics of each processor's
+ * implementation are hidden from the caller, but nevertheless the
+ * caller should expect to need to provide a power of two size on an
+ * equivalent power of two boundary.
*
- * If the region cannot be added either because all regions are in use
- * or the CPU cannot support it a negative value is returned. On success
- * the register number for this entry is returned, but should be treated
- * as a cookie only.
+ * If the region cannot be added either because all regions are in use
+ * or the CPU cannot support it a negative value is returned. On success
+ * the register number for this entry is returned, but should be treated
+ * as a cookie only.
*
- * On a multiprocessor machine the changes are made to all processors.
- * This is required on x86 by the Intel processors.
+ * On a multiprocessor machine the changes are made to all processors.
+ * This is required on x86 by the Intel processors.
*
- * The available types are
+ * The available types are
*
- * %MTRR_TYPE_UNCACHABLE - No caching
+ * %MTRR_TYPE_UNCACHABLE - No caching
*
- * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
+ * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
*
- * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
+ * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
*
- * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
+ * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
*
- * BUGS: Needs a quiet flag for the cases where drivers do not mind
- * failures and do not wish system log messages to be sent.
+ * BUGS: Needs a quiet flag for the cases where drivers do not mind
+ * failures and do not wish system log messages to be sent.
*/
-
-int mtrr_add_page(unsigned long base, unsigned long size,
+int mtrr_add_page(unsigned long base, unsigned long size,
unsigned int type, bool increment)
{
+ unsigned long lbase, lsize;
int i, replace, error;
mtrr_type ltype;
- unsigned long lbase, lsize;
if (!mtrr_if)
return -ENXIO;
-
- if ((error = mtrr_if->validate_add_page(base,size,type)))
+
+ error = mtrr_if->validate_add_page(base, size, type);
+ if (error)
return error;
if (type >= MTRR_NUM_TYPES) {
- printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
+ pr_warning("mtrr: type: %u invalid\n", type);
return -EINVAL;
}
- /* If the type is WC, check that this processor supports it */
+ /* If the type is WC, check that this processor supports it */
if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
- printk(KERN_WARNING
- "mtrr: your processor doesn't support write-combining\n");
+ pr_warning("mtrr: your processor doesn't support write-combining\n");
return -ENOSYS;
}
if (!size) {
- printk(KERN_WARNING "mtrr: zero sized request\n");
+ pr_warning("mtrr: zero sized request\n");
return -EINVAL;
}
if (base & size_or_mask || size & size_or_mask) {
- printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
+ pr_warning("mtrr: base or size exceeds the MTRR width\n");
return -EINVAL;
}
@@ -341,36 +359,40 @@ int mtrr_add_page(unsigned long base, unsigned long size,
/* No CPU hotplug when we change MTRR entries */
get_online_cpus();
- /* Search for existing MTRR */
+
+ /* Search for existing MTRR */
mutex_lock(&mtrr_mutex);
for (i = 0; i < num_var_ranges; ++i) {
mtrr_if->get(i, &lbase, &lsize, &ltype);
- if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
+ if (!lsize || base > lbase + lsize - 1 ||
+ base + size - 1 < lbase)
continue;
- /* At this point we know there is some kind of overlap/enclosure */
+ /*
+ * At this point we know there is some kind of
+ * overlap/enclosure
+ */
if (base < lbase || base + size - 1 > lbase + lsize - 1) {
- if (base <= lbase && base + size - 1 >= lbase + lsize - 1) {
+ if (base <= lbase &&
+ base + size - 1 >= lbase + lsize - 1) {
/* New region encloses an existing region */
if (type == ltype) {
replace = replace == -1 ? i : -2;
continue;
- }
- else if (types_compatible(type, ltype))
+ } else if (types_compatible(type, ltype))
continue;
}
- printk(KERN_WARNING
- "mtrr: 0x%lx000,0x%lx000 overlaps existing"
- " 0x%lx000,0x%lx000\n", base, size, lbase,
- lsize);
+ pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing"
+ " 0x%lx000,0x%lx000\n", base, size, lbase,
+ lsize);
goto out;
}
- /* New region is enclosed by an existing region */
+ /* New region is enclosed by an existing region */
if (ltype != type) {
if (types_compatible(type, ltype))
continue;
- printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
- base, size, mtrr_attrib_to_str(ltype),
- mtrr_attrib_to_str(type));
+ pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
+ base, size, mtrr_attrib_to_str(ltype),
+ mtrr_attrib_to_str(type));
goto out;
}
if (increment)
@@ -378,7 +400,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
error = i;
goto out;
}
- /* Search for an empty MTRR */
+ /* Search for an empty MTRR */
i = mtrr_if->get_free_region(base, size, replace);
if (i >= 0) {
set_mtrr(i, base, size, type);
@@ -393,8 +415,9 @@ int mtrr_add_page(unsigned long base, unsigned long size,
mtrr_usage_table[replace] = 0;
}
}
- } else
- printk(KERN_INFO "mtrr: no more MTRRs available\n");
+ } else {
+ pr_info("mtrr: no more MTRRs available\n");
+ }
error = i;
out:
mutex_unlock(&mtrr_mutex);
@@ -405,10 +428,8 @@ int mtrr_add_page(unsigned long base, unsigned long size,
static int mtrr_check(unsigned long base, unsigned long size)
{
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
- printk(KERN_WARNING
- "mtrr: size and base must be multiples of 4 kiB\n");
- printk(KERN_DEBUG
- "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
+ pr_warning("mtrr: size and base must be multiples of 4 kiB\n");
+ pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
dump_stack();
return -1;
}
@@ -416,66 +437,64 @@ static int mtrr_check(unsigned long base, unsigned long size)
}
/**
- * mtrr_add - Add a memory type region
- * @base: Physical base address of region
- * @size: Physical size of region
- * @type: Type of MTRR desired
- * @increment: If this is true do usage counting on the region
+ * mtrr_add - Add a memory type region
+ * @base: Physical base address of region
+ * @size: Physical size of region
+ * @type: Type of MTRR desired
+ * @increment: If this is true do usage counting on the region
*
- * Memory type region registers control the caching on newer Intel and
- * non Intel processors. This function allows drivers to request an
- * MTRR is added. The details and hardware specifics of each processor's
- * implementation are hidden from the caller, but nevertheless the
- * caller should expect to need to provide a power of two size on an
- * equivalent power of two boundary.
+ * Memory type region registers control the caching on newer Intel and
+ * non Intel processors. This function allows drivers to request an
+ * MTRR is added. The details and hardware specifics of each processor's
+ * implementation are hidden from the caller, but nevertheless the
+ * caller should expect to need to provide a power of two size on an
+ * equivalent power of two boundary.
*
- * If the region cannot be added either because all regions are in use
- * or the CPU cannot support it a negative value is returned. On success
- * the register number for this entry is returned, but should be treated
- * as a cookie only.
+ * If the region cannot be added either because all regions are in use
+ * or the CPU cannot support it a negative value is returned. On success
+ * the register number for this entry is returned, but should be treated
+ * as a cookie only.
*
- * On a multiprocessor machine the changes are made to all processors.
- * This is required on x86 by the Intel processors.
+ * On a multiprocessor machine the changes are made to all processors.
+ * This is required on x86 by the Intel processors.
*
- * The available types are
+ * The available types are
*
- * %MTRR_TYPE_UNCACHABLE - No caching
+ * %MTRR_TYPE_UNCACHABLE - No caching
*
- * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
+ * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
*
- * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
+ * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
*
- * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
+ * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
*
- * BUGS: Needs a quiet flag for the cases where drivers do not mind
- * failures and do not wish system log messages to be sent.
+ * BUGS: Needs a quiet flag for the cases where drivers do not mind
+ * failures and do not wish system log messages to be sent.
*/
-
-int
-mtrr_add(unsigned long base, unsigned long size, unsigned int type,
- bool increment)
+int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
+ bool increment)
{
if (mtrr_check(base, size))
return -EINVAL;
return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
increment);
}
+EXPORT_SYMBOL(mtrr_add);
/**
- * mtrr_del_page - delete a memory type region
- * @reg: Register returned by mtrr_add
- * @base: Physical base address
- * @size: Size of region
+ * mtrr_del_page - delete a memory type region
+ * @reg: Register returned by mtrr_add
+ * @base: Physical base address
+ * @size: Size of region
*
- * If register is supplied then base and size are ignored. This is
- * how drivers should call it.
+ * If register is supplied then base and size are ignored. This is
+ * how drivers should call it.
*
- * Releases an MTRR region. If the usage count drops to zero the
- * register is freed and the region returns to default state.
- * On success the register is returned, on failure a negative error
- * code.
+ * Releases an MTRR region. If the usage count drops to zero the
+ * register is freed and the region returns to default state.
+ * On success the register is returned, on failure a negative error
+ * code.
*/
-
int mtrr_del_page(int reg, unsigned long base, unsigned long size)
{
int i, max;
@@ -500,22 +519,22 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
}
}
if (reg < 0) {
- printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
- size);
+ pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n",
+ base, size);
goto out;
}
}
if (reg >= max) {
- printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
+ pr_warning("mtrr: register: %d too big\n", reg);
goto out;
}
mtrr_if->get(reg, &lbase, &lsize, &ltype);
if (lsize < 1) {
- printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
+ pr_warning("mtrr: MTRR %d not used\n", reg);
goto out;
}
if (mtrr_usage_table[reg] < 1) {
- printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
+ pr_warning("mtrr: reg: %d has count=0\n", reg);
goto out;
}
if (--mtrr_usage_table[reg] < 1)
@@ -526,33 +545,31 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
put_online_cpus();
return error;
}
+
/**
- * mtrr_del - delete a memory type region
- * @reg: Register returned by mtrr_add
- * @base: Physical base address
- * @size: Size of region
+ * mtrr_del - delete a memory type region
+ * @reg: Register returned by mtrr_add
+ * @base: Physical base address
+ * @size: Size of region
*
- * If register is supplied then base and size are ignored. This is
- * how drivers should call it.
+ * If register is supplied then base and size are ignored. This is
+ * how drivers should call it.
*
- * Releases an MTRR region. If the usage count drops to zero the
- * register is freed and the region returns to default state.
- * On success the register is returned, on failure a negative error
- * code.
+ * Releases an MTRR region. If the usage count drops to zero the
+ * register is freed and the region returns to default state.
+ * On success the register is returned, on failure a negative error
+ * code.
*/
-
-int
-mtrr_del(int reg, unsigned long base, unsigned long size)
+int mtrr_del(int reg, unsigned long base, unsigned long size)
{
if (mtrr_check(base, size))
return -EINVAL;
return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
}
-
-EXPORT_SYMBOL(mtrr_add);
EXPORT_SYMBOL(mtrr_del);
-/* HACK ALERT!
+/*
+ * HACK ALERT!
* These should be called implicitly, but we can't yet until all the initcall
* stuff is done...
*/
@@ -576,29 +593,28 @@ struct mtrr_value {
static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
-static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
+static int mtrr_save(struct sys_device *sysdev, pm_message_t state)
{
int i;
for (i = 0; i < num_var_ranges; i++) {
- mtrr_if->get(i,
- &mtrr_value[i].lbase,
- &mtrr_value[i].lsize,
- &mtrr_value[i].ltype);
+ mtrr_if->get(i, &mtrr_value[i].lbase,
+ &mtrr_value[i].lsize,
+ &mtrr_value[i].ltype);
}
return 0;
}
-static int mtrr_restore(struct sys_device * sysdev)
+static int mtrr_restore(struct sys_device *sysdev)
{
int i;
for (i = 0; i < num_var_ranges; i++) {
- if (mtrr_value[i].lsize)
- set_mtrr(i,
- mtrr_value[i].lbase,
- mtrr_value[i].lsize,
- mtrr_value[i].ltype);
+ if (mtrr_value[i].lsize) {
+ set_mtrr(i, mtrr_value[i].lbase,
+ mtrr_value[i].lsize,
+ mtrr_value[i].ltype);
+ }
}
return 0;
}
@@ -615,26 +631,29 @@ int __initdata changed_by_mtrr_cleanup;
/**
* mtrr_bp_init - initialize mtrrs on the boot CPU
*
- * This needs to be called early; before any of the other CPUs are
+ * This needs to be called early; before any of the other CPUs are
* initialized (i.e. before smp_init()).
- *
+ *
*/
void __init mtrr_bp_init(void)
{
u32 phys_addr;
+
init_ifs();
phys_addr = 32;
if (cpu_has_mtrr) {
mtrr_if = &generic_mtrr_ops;
- size_or_mask = 0xff000000; /* 36 bits */
+ size_or_mask = 0xff000000; /* 36 bits */
size_and_mask = 0x00f00000;
phys_addr = 36;
- /* This is an AMD specific MSR, but we assume(hope?) that
- Intel will implement it to when they extend the address
- bus of the Xeon. */
+ /*
+ * This is an AMD specific MSR, but we assume(hope?) that
+ * Intel will implement it to when they extend the address
+ * bus of the Xeon.
+ */
if (cpuid_eax(0x80000000) >= 0x80000008) {
phys_addr = cpuid_eax(0x80000008) & 0xff;
/* CPUID workaround for Intel 0F33/0F34 CPU */
@@ -649,9 +668,11 @@ void __init mtrr_bp_init(void)
size_and_mask = ~size_or_mask & 0xfffff00000ULL;
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
boot_cpu_data.x86 == 6) {
- /* VIA C* family have Intel style MTRRs, but
- don't support PAE */
- size_or_mask = 0xfff00000; /* 32 bits */
+ /*
+ * VIA C* family have Intel style MTRRs,
+ * but don't support PAE
+ */
+ size_or_mask = 0xfff00000; /* 32 bits */
size_and_mask = 0;
phys_addr = 32;
}
@@ -694,7 +715,6 @@ void __init mtrr_bp_init(void)
changed_by_mtrr_cleanup = 1;
mtrr_if->set_all();
}
-
}
}
}
@@ -706,12 +726,17 @@ void mtrr_ap_init(void)
if (!mtrr_if || !use_intel())
return;
/*
- * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
- * but this routine will be called in cpu boot time, holding the lock
- * breaks it. This routine is called in two cases: 1.very earily time
- * of software resume, when there absolutely isn't mtrr entry changes;
- * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
- * prevent mtrr entry changes
+ * Ideally we should hold mtrr_mutex here to avoid mtrr entries
+ * changed, but this routine will be called in cpu boot time,
+ * holding the lock breaks it.
+ *
+ * This routine is called in two cases:
+ *
+ * 1. very earily time of software resume, when there absolutely
+ * isn't mtrr entry changes;
+ *
+ * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
+ * lock to prevent mtrr entry changes
*/
local_irq_save(flags);
@@ -732,19 +757,23 @@ static int __init mtrr_init_finialize(void)
{
if (!mtrr_if)
return 0;
+
if (use_intel()) {
if (!changed_by_mtrr_cleanup)
mtrr_state_warn();
- } else {
- /* The CPUs haven't MTRR and seem to not support SMP. They have
- * specific drivers, we use a tricky method to support
- * suspend/resume for them.
- * TBD: is there any system with such CPU which supports
- * suspend/resume? if no, we should remove the code.
- */
- sysdev_driver_register(&cpu_sysdev_class,
- &mtrr_sysdev_driver);
+ return 0;
}
+
+ /*
+ * The CPU has no MTRR and seems to not support SMP. They have
+ * specific drivers, we use a tricky method to support
+ * suspend/resume for them.
+ *
+ * TBD: is there any system with such CPU which supports
+ * suspend/resume? If no, we should remove the code.
+ */
+ sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver);
+
return 0;
}
subsys_initcall(mtrr_init_finialize);
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 7538b767f206..a501dee9a87a 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -1,5 +1,5 @@
/*
- * local mtrr defines.
+ * local MTRR defines.
*/
#include <linux/types.h>
@@ -14,13 +14,12 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
struct mtrr_ops {
u32 vendor;
u32 use_intel_if;
-// void (*init)(void);
void (*set)(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
void (*set_all)(void);
void (*get)(unsigned int reg, unsigned long *base,
- unsigned long *size, mtrr_type * type);
+ unsigned long *size, mtrr_type *type);
int (*get_free_region)(unsigned long base, unsigned long size,
int replace_reg);
int (*validate_add_page)(unsigned long base, unsigned long size,
@@ -39,11 +38,11 @@ extern int positive_have_wrcomb(void);
/* library functions for processor-specific routines */
struct set_mtrr_context {
- unsigned long flags;
- unsigned long cr4val;
- u32 deftype_lo;
- u32 deftype_hi;
- u32 ccr3;
+ unsigned long flags;
+ unsigned long cr4val;
+ u32 deftype_lo;
+ u32 deftype_hi;
+ u32 ccr3;
};
void set_mtrr_done(struct set_mtrr_context *ctxt);
@@ -54,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
void get_mtrr_state(void);
-extern void set_mtrr_ops(struct mtrr_ops * ops);
+extern void set_mtrr_ops(struct mtrr_ops *ops);
extern u64 size_or_mask, size_and_mask;
-extern struct mtrr_ops * mtrr_if;
+extern struct mtrr_ops *mtrr_if;
#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c
index 1f5fb1588d1f..dfc80b4e6b0d 100644
--- a/arch/x86/kernel/cpu/mtrr/state.c
+++ b/arch/x86/kernel/cpu/mtrr/state.c
@@ -1,24 +1,25 @@
-#include <linux/mm.h>
#include <linux/init.h>
-#include <asm/io.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+
#include <asm/processor-cyrix.h>
#include <asm/processor-flags.h>
-#include "mtrr.h"
+#include <asm/mtrr.h>
+#include <asm/msr.h>
+#include "mtrr.h"
-/* Put the processor into a state where MTRRs can be safely set */
+/* Put the processor into a state where MTRRs can be safely set */
void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
{
unsigned int cr0;
- /* Disable interrupts locally */
+ /* Disable interrupts locally */
local_irq_save(ctxt->flags);
if (use_intel() || is_cpu(CYRIX)) {
- /* Save value of CR4 and clear Page Global Enable (bit 7) */
+ /* Save value of CR4 and clear Page Global Enable (bit 7) */
if (cpu_has_pge) {
ctxt->cr4val = read_cr4();
write_cr4(ctxt->cr4val & ~X86_CR4_PGE);
@@ -33,50 +34,61 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
write_cr0(cr0);
wbinvd();
- if (use_intel())
- /* Save MTRR state */
+ if (use_intel()) {
+ /* Save MTRR state */
rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi);
- else
- /* Cyrix ARRs - everything else were excluded at the top */
+ } else {
+ /*
+ * Cyrix ARRs -
+ * everything else were excluded at the top
+ */
ctxt->ccr3 = getCx86(CX86_CCR3);
+ }
}
}
void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
{
- if (use_intel())
- /* Disable MTRRs, and set the default type to uncached */
+ if (use_intel()) {
+ /* Disable MTRRs, and set the default type to uncached */
mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL,
ctxt->deftype_hi);
- else if (is_cpu(CYRIX))
- /* Cyrix ARRs - everything else were excluded at the top */
- setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
+ } else {
+ if (is_cpu(CYRIX)) {
+ /* Cyrix ARRs - everything else were excluded at the top */
+ setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
+ }
+ }
}
-/* Restore the processor after a set_mtrr_prepare */
+/* Restore the processor after a set_mtrr_prepare */
void set_mtrr_done(struct set_mtrr_context *ctxt)
{
if (use_intel() || is_cpu(CYRIX)) {
- /* Flush caches and TLBs */
+ /* Flush caches and TLBs */
wbinvd();
- /* Restore MTRRdefType */
- if (use_intel())
+ /* Restore MTRRdefType */
+ if (use_intel()) {
/* Intel (P6) standard MTRRs */
- mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi);
- else
- /* Cyrix ARRs - everything else was excluded at the top */
+ mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo,
+ ctxt->deftype_hi);
+ } else {
+ /*
+ * Cyrix ARRs -
+ * everything else was excluded at the top
+ */
setCx86(CX86_CCR3, ctxt->ccr3);
+ }
- /* Enable caches */
+ /* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
- /* Restore value of CR4 */
+ /* Restore value of CR4 */
if (cpu_has_pge)
write_cr4(ctxt->cr4val);
}
- /* Re-enable interrupts locally (if enabled previously) */
+ /* Re-enable interrupts locally (if enabled previously) */
local_irq_restore(ctxt->flags);
}
-
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 900332b800f8..f9cd0849bd42 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -6,6 +6,7 @@
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
*
* For licencing details see kernel-base/COPYING
*/
@@ -20,6 +21,7 @@
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
+#include <linux/cpu.h>
#include <asm/apic.h>
#include <asm/stacktrace.h>
@@ -27,12 +29,52 @@
static u64 perf_counter_mask __read_mostly;
+/* The maximal number of PEBS counters: */
+#define MAX_PEBS_COUNTERS 4
+
+/* The size of a BTS record in bytes: */
+#define BTS_RECORD_SIZE 24
+
+/* The size of a per-cpu BTS buffer in bytes: */
+#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 1024)
+
+/* The BTS overflow threshold in bytes from the end of the buffer: */
+#define BTS_OVFL_TH (BTS_RECORD_SIZE * 64)
+
+
+/*
+ * Bits in the debugctlmsr controlling branch tracing.
+ */
+#define X86_DEBUGCTL_TR (1 << 6)
+#define X86_DEBUGCTL_BTS (1 << 7)
+#define X86_DEBUGCTL_BTINT (1 << 8)
+#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
+#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
+
+/*
+ * A debug store configuration.
+ *
+ * We only support architectures that use 64bit fields.
+ */
+struct debug_store {
+ u64 bts_buffer_base;
+ u64 bts_index;
+ u64 bts_absolute_maximum;
+ u64 bts_interrupt_threshold;
+ u64 pebs_buffer_base;
+ u64 pebs_index;
+ u64 pebs_absolute_maximum;
+ u64 pebs_interrupt_threshold;
+ u64 pebs_counter_reset[MAX_PEBS_COUNTERS];
+};
+
struct cpu_hw_counters {
struct perf_counter *counters[X86_PMC_IDX_MAX];
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long interrupts;
int enabled;
+ struct debug_store *ds;
};
/*
@@ -58,6 +100,8 @@ struct x86_pmu {
int apic;
u64 max_period;
u64 intel_ctrl;
+ void (*enable_bts)(u64 config);
+ void (*disable_bts)(void);
};
static struct x86_pmu x86_pmu __read_mostly;
@@ -577,6 +621,9 @@ x86_perf_counter_update(struct perf_counter *counter,
u64 prev_raw_count, new_raw_count;
s64 delta;
+ if (idx == X86_PMC_IDX_FIXED_BTS)
+ return 0;
+
/*
* Careful: an NMI might modify the previous counter value.
*
@@ -666,10 +713,110 @@ static void release_pmc_hardware(void)
#endif
}
+static inline bool bts_available(void)
+{
+ return x86_pmu.enable_bts != NULL;
+}
+
+static inline void init_debug_store_on_cpu(int cpu)
+{
+ struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds;
+
+ if (!ds)
+ return;
+
+ wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
+ (u32)((u64)(unsigned long)ds),
+ (u32)((u64)(unsigned long)ds >> 32));
+}
+
+static inline void fini_debug_store_on_cpu(int cpu)
+{
+ if (!per_cpu(cpu_hw_counters, cpu).ds)
+ return;
+
+ wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
+}
+
+static void release_bts_hardware(void)
+{
+ int cpu;
+
+ if (!bts_available())
+ return;
+
+ get_online_cpus();
+
+ for_each_online_cpu(cpu)
+ fini_debug_store_on_cpu(cpu);
+
+ for_each_possible_cpu(cpu) {
+ struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds;
+
+ if (!ds)
+ continue;
+
+ per_cpu(cpu_hw_counters, cpu).ds = NULL;
+
+ kfree((void *)(unsigned long)ds->bts_buffer_base);
+ kfree(ds);
+ }
+
+ put_online_cpus();
+}
+
+static int reserve_bts_hardware(void)
+{
+ int cpu, err = 0;
+
+ if (!bts_available())
+ return 0;
+
+ get_online_cpus();
+
+ for_each_possible_cpu(cpu) {
+ struct debug_store *ds;
+ void *buffer;
+
+ err = -ENOMEM;
+ buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
+ if (unlikely(!buffer))
+ break;
+
+ ds = kzalloc(sizeof(*ds), GFP_KERNEL);
+ if (unlikely(!ds)) {
+ kfree(buffer);
+ break;
+ }
+
+ ds->bts_buffer_base = (u64)(unsigned long)buffer;
+ ds->bts_index = ds->bts_buffer_base;
+ ds->bts_absolute_maximum =
+ ds->bts_buffer_base + BTS_BUFFER_SIZE;
+ ds->bts_interrupt_threshold =
+ ds->bts_absolute_maximum - BTS_OVFL_TH;
+
+ per_cpu(cpu_hw_counters, cpu).ds = ds;
+ err = 0;
+ }
+
+ if (err)
+ release_bts_hardware();
+ else {
+ for_each_online_cpu(cpu)
+ init_debug_store_on_cpu(cpu);
+ }
+
+ put_online_cpus();
+
+ return err;
+}
+
static void hw_perf_counter_destroy(struct perf_counter *counter)
{
if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
release_pmc_hardware();
+ release_bts_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
@@ -712,6 +859,42 @@ set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr)
return 0;
}
+static void intel_pmu_enable_bts(u64 config)
+{
+ unsigned long debugctlmsr;
+
+ debugctlmsr = get_debugctlmsr();
+
+ debugctlmsr |= X86_DEBUGCTL_TR;
+ debugctlmsr |= X86_DEBUGCTL_BTS;
+ debugctlmsr |= X86_DEBUGCTL_BTINT;
+
+ if (!(config & ARCH_PERFMON_EVENTSEL_OS))
+ debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
+
+ if (!(config & ARCH_PERFMON_EVENTSEL_USR))
+ debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
+
+ update_debugctlmsr(debugctlmsr);
+}
+
+static void intel_pmu_disable_bts(void)
+{
+ struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ unsigned long debugctlmsr;
+
+ if (!cpuc->ds)
+ return;
+
+ debugctlmsr = get_debugctlmsr();
+
+ debugctlmsr &=
+ ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
+ X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
+
+ update_debugctlmsr(debugctlmsr);
+}
+
/*
* Setup the hardware configuration for a given attr_type
*/
@@ -728,9 +911,13 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
err = 0;
if (!atomic_inc_not_zero(&active_counters)) {
mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware())
- err = -EBUSY;
- else
+ if (atomic_read(&active_counters) == 0) {
+ if (!reserve_pmc_hardware())
+ err = -EBUSY;
+ else
+ err = reserve_bts_hardware();
+ }
+ if (!err)
atomic_inc(&active_counters);
mutex_unlock(&pmc_reserve_mutex);
}
@@ -793,6 +980,20 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
if (config == -1LL)
return -EINVAL;
+ /*
+ * Branch tracing:
+ */
+ if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
+ (hwc->sample_period == 1)) {
+ /* BTS is not supported by this architecture. */
+ if (!bts_available())
+ return -EOPNOTSUPP;
+
+ /* BTS is currently only allowed for user-mode. */
+ if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
+ return -EOPNOTSUPP;
+ }
+
hwc->config |= config;
return 0;
@@ -817,7 +1018,18 @@ static void p6_pmu_disable_all(void)
static void intel_pmu_disable_all(void)
{
+ struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+
+ if (!cpuc->enabled)
+ return;
+
+ cpuc->enabled = 0;
+ barrier();
+
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+
+ if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
+ intel_pmu_disable_bts();
}
static void amd_pmu_disable_all(void)
@@ -875,7 +1087,25 @@ static void p6_pmu_enable_all(void)
static void intel_pmu_enable_all(void)
{
+ struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+
+ if (cpuc->enabled)
+ return;
+
+ cpuc->enabled = 1;
+ barrier();
+
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
+
+ if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
+ struct perf_counter *counter =
+ cpuc->counters[X86_PMC_IDX_FIXED_BTS];
+
+ if (WARN_ON_ONCE(!counter))
+ return;
+
+ intel_pmu_enable_bts(counter->hw.config);
+ }
}
static void amd_pmu_enable_all(void)
@@ -962,6 +1192,11 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
static inline void
intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
{
+ if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
+ intel_pmu_disable_bts();
+ return;
+ }
+
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_disable_fixed(hwc, idx);
return;
@@ -990,6 +1225,9 @@ x86_perf_counter_set_period(struct perf_counter *counter,
s64 period = hwc->sample_period;
int err, ret = 0;
+ if (idx == X86_PMC_IDX_FIXED_BTS)
+ return 0;
+
/*
* If we are way outside a reasoable range then just skip forward:
*/
@@ -1072,6 +1310,14 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
{
+ if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
+ if (!__get_cpu_var(cpu_hw_counters).enabled)
+ return;
+
+ intel_pmu_enable_bts(hwc->config);
+ return;
+ }
+
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_enable_fixed(hwc, idx);
return;
@@ -1093,11 +1339,16 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
{
unsigned int event;
+ event = hwc->config & ARCH_PERFMON_EVENT_MASK;
+
+ if (unlikely((event ==
+ x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
+ (hwc->sample_period == 1)))
+ return X86_PMC_IDX_FIXED_BTS;
+
if (!x86_pmu.num_counters_fixed)
return -1;
- event = hwc->config & ARCH_PERFMON_EVENT_MASK;
-
if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
return X86_PMC_IDX_FIXED_INSTRUCTIONS;
if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
@@ -1118,7 +1369,15 @@ static int x86_pmu_enable(struct perf_counter *counter)
int idx;
idx = fixed_mode_idx(counter, hwc);
- if (idx >= 0) {
+ if (idx == X86_PMC_IDX_FIXED_BTS) {
+ /* BTS is already occupied. */
+ if (test_and_set_bit(idx, cpuc->used_mask))
+ return -EAGAIN;
+
+ hwc->config_base = 0;
+ hwc->counter_base = 0;
+ hwc->idx = idx;
+ } else if (idx >= 0) {
/*
* Try to get the fixed counter, if that is already taken
* then try to get a generic counter:
@@ -1229,6 +1488,44 @@ void perf_counter_print_debug(void)
local_irq_restore(flags);
}
+static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc,
+ struct perf_sample_data *data)
+{
+ struct debug_store *ds = cpuc->ds;
+ struct bts_record {
+ u64 from;
+ u64 to;
+ u64 flags;
+ };
+ struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS];
+ unsigned long orig_ip = data->regs->ip;
+ struct bts_record *at, *top;
+
+ if (!counter)
+ return;
+
+ if (!ds)
+ return;
+
+ at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
+ top = (struct bts_record *)(unsigned long)ds->bts_index;
+
+ ds->bts_index = ds->bts_buffer_base;
+
+ for (; at < top; at++) {
+ data->regs->ip = at->from;
+ data->addr = at->to;
+
+ perf_counter_output(counter, 1, data);
+ }
+
+ data->regs->ip = orig_ip;
+ data->addr = 0;
+
+ /* There's new data available. */
+ counter->pending_kill = POLL_IN;
+}
+
static void x86_pmu_disable(struct perf_counter *counter)
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
@@ -1253,6 +1550,15 @@ static void x86_pmu_disable(struct perf_counter *counter)
* that we are disabling:
*/
x86_perf_counter_update(counter, hwc, idx);
+
+ /* Drain the remaining BTS records. */
+ if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
+ struct perf_sample_data data;
+ struct pt_regs regs;
+
+ data.regs = &regs;
+ intel_pmu_drain_bts_buffer(cpuc, &data);
+ }
cpuc->counters[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
@@ -1280,6 +1586,7 @@ static int intel_pmu_save_and_restart(struct perf_counter *counter)
static void intel_pmu_reset(void)
{
+ struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds;
unsigned long flags;
int idx;
@@ -1297,6 +1604,8 @@ static void intel_pmu_reset(void)
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
}
+ if (ds)
+ ds->bts_index = ds->bts_buffer_base;
local_irq_restore(flags);
}
@@ -1362,6 +1671,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
cpuc = &__get_cpu_var(cpu_hw_counters);
perf_disable();
+ intel_pmu_drain_bts_buffer(cpuc, &data);
status = intel_pmu_get_status();
if (!status) {
perf_enable();
@@ -1571,6 +1881,8 @@ static struct x86_pmu intel_pmu = {
* the generic counter period:
*/
.max_period = (1ULL << 31) - 1,
+ .enable_bts = intel_pmu_enable_bts,
+ .disable_bts = intel_pmu_disable_bts,
};
static struct x86_pmu amd_pmu = {
@@ -1962,3 +2274,8 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
return entry;
}
+
+void hw_perf_counter_setup_online(int cpu)
+{
+ init_debug_store_on_cpu(cpu);
+}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index e60ed740d2b3..392bea43b890 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -68,16 +68,16 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
/* returns the bit offset of the performance counter register */
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
- return (msr - MSR_K7_PERFCTR0);
+ return msr - MSR_K7_PERFCTR0;
case X86_VENDOR_INTEL:
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
- return (msr - MSR_ARCH_PERFMON_PERFCTR0);
+ return msr - MSR_ARCH_PERFMON_PERFCTR0;
switch (boot_cpu_data.x86) {
case 6:
- return (msr - MSR_P6_PERFCTR0);
+ return msr - MSR_P6_PERFCTR0;
case 15:
- return (msr - MSR_P4_BPU_PERFCTR0);
+ return msr - MSR_P4_BPU_PERFCTR0;
}
}
return 0;
@@ -92,16 +92,16 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
/* returns the bit offset of the event selection register */
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
- return (msr - MSR_K7_EVNTSEL0);
+ return msr - MSR_K7_EVNTSEL0;
case X86_VENDOR_INTEL:
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
- return (msr - MSR_ARCH_PERFMON_EVENTSEL0);
+ return msr - MSR_ARCH_PERFMON_EVENTSEL0;
switch (boot_cpu_data.x86) {
case 6:
- return (msr - MSR_P6_EVNTSEL0);
+ return msr - MSR_P6_EVNTSEL0;
case 15:
- return (msr - MSR_P4_BSU_ESCR0);
+ return msr - MSR_P4_BSU_ESCR0;
}
}
return 0;
@@ -113,7 +113,7 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
{
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- return (!test_bit(counter, perfctr_nmi_owner));
+ return !test_bit(counter, perfctr_nmi_owner);
}
/* checks the an msr for availability */
@@ -124,7 +124,7 @@ int avail_to_resrv_perfctr_nmi(unsigned int msr)
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
- return (!test_bit(counter, perfctr_nmi_owner));
+ return !test_bit(counter, perfctr_nmi_owner);
}
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
@@ -237,7 +237,7 @@ static unsigned int adjust_for_32bit_ctr(unsigned int hz)
*/
counter_val = (u64)cpu_khz * 1000;
do_div(counter_val, retval);
- if (counter_val > 0x7fffffffULL) {
+ if (counter_val > 0x7fffffffULL) {
u64 count = (u64)cpu_khz * 1000;
do_div(count, 0x7fffffffUL);
retval = count + 1;
@@ -251,7 +251,7 @@ static void write_watchdog_counter(unsigned int perfctr_msr,
u64 count = (u64)cpu_khz * 1000;
do_div(count, nmi_hz);
- if(descr)
+ if (descr)
pr_debug("setting %s to -0x%08Lx\n", descr, count);
wrmsrl(perfctr_msr, 0 - count);
}
@@ -262,7 +262,7 @@ static void write_watchdog_counter32(unsigned int perfctr_msr,
u64 count = (u64)cpu_khz * 1000;
do_div(count, nmi_hz);
- if(descr)
+ if (descr)
pr_debug("setting %s to -0x%08Lx\n", descr, count);
wrmsr(perfctr_msr, (u32)(-count), 0);
}
@@ -296,7 +296,7 @@ static int setup_k7_watchdog(unsigned nmi_hz)
/* setup the timer */
wrmsr(evntsel_msr, evntsel, 0);
- write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz);
+ write_watchdog_counter(perfctr_msr, "K7_PERFCTR0", nmi_hz);
/* initialize the wd struct before enabling */
wd->perfctr_msr = perfctr_msr;
@@ -387,7 +387,7 @@ static int setup_p6_watchdog(unsigned nmi_hz)
/* setup the timer */
wrmsr(evntsel_msr, evntsel, 0);
nmi_hz = adjust_for_32bit_ctr(nmi_hz);
- write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz);
+ write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0", nmi_hz);
/* initialize the wd struct before enabling */
wd->perfctr_msr = perfctr_msr;
@@ -415,7 +415,7 @@ static void __kprobes p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
apic_write(APIC_LVTPC, APIC_DM_NMI);
/* P6/ARCH_PERFMON has 32 bit counter write */
- write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz);
+ write_watchdog_counter32(wd->perfctr_msr, NULL, nmi_hz);
}
static const struct wd_ops p6_wd_ops = {
@@ -490,9 +490,9 @@ static int setup_p4_watchdog(unsigned nmi_hz)
if (smp_num_siblings == 2) {
unsigned int ebx, apicid;
- ebx = cpuid_ebx(1);
- apicid = (ebx >> 24) & 0xff;
- ht_num = apicid & 1;
+ ebx = cpuid_ebx(1);
+ apicid = (ebx >> 24) & 0xff;
+ ht_num = apicid & 1;
} else
#endif
ht_num = 0;
@@ -544,7 +544,7 @@ static int setup_p4_watchdog(unsigned nmi_hz)
}
evntsel = P4_ESCR_EVENT_SELECT(0x3F)
- | P4_ESCR_OS
+ | P4_ESCR_OS
| P4_ESCR_USR;
cccr_val |= P4_CCCR_THRESHOLD(15)
@@ -612,7 +612,7 @@ static void __kprobes p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
{
unsigned dummy;
/*
- * P4 quirks:
+ * P4 quirks:
* - An overflown perfctr will assert its interrupt
* until the OVF flag in its CCCR is cleared.
* - LVTPC is masked on interrupt and must be
@@ -662,7 +662,8 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
* NOTE: Corresponding bit = 0 in ebx indicates event present.
*/
cpuid(10, &(eax.full), &ebx, &unused, &unused);
- if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
+ if ((eax.split.mask_length <
+ (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
return 0;
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index d5e30397246b..62ac8cb6ba27 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -116,11 +116,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
#endif
seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size);
-#ifdef CONFIG_X86_64
seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
c->x86_phys_bits, c->x86_virt_bits);
-#endif
seq_printf(m, "power management:");
for (i = 0; i < 32; i++) {
@@ -128,7 +126,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (i < ARRAY_SIZE(x86_power_flags) &&
x86_power_flags[i])
seq_printf(m, "%s%s",
- x86_power_flags[i][0]?" ":"",
+ x86_power_flags[i][0] ? " " : "",
x86_power_flags[i]);
else
seq_printf(m, " [%d]", i);
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 284c399e3234..bc24f514ec93 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -49,17 +49,17 @@ static inline int __vmware_platform(void)
static unsigned long __vmware_get_tsc_khz(void)
{
- uint64_t tsc_hz;
- uint32_t eax, ebx, ecx, edx;
+ uint64_t tsc_hz;
+ uint32_t eax, ebx, ecx, edx;
- VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
+ VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
- if (ebx == UINT_MAX)
- return 0;
- tsc_hz = eax | (((uint64_t)ebx) << 32);
- do_div(tsc_hz, 1000);
- BUG_ON(tsc_hz >> 32);
- return tsc_hz;
+ if (ebx == UINT_MAX)
+ return 0;
+ tsc_hz = eax | (((uint64_t)ebx) << 32);
+ do_div(tsc_hz, 1000);
+ BUG_ON(tsc_hz >> 32);
+ return tsc_hz;
}
/*
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
index b4f14c6c09d9..37250fe490b1 100644
--- a/arch/x86/kernel/doublefault_32.c
+++ b/arch/x86/kernel/doublefault_32.c
@@ -27,9 +27,7 @@ static void doublefault_fn(void)
if (ptr_ok(gdt)) {
gdt += GDT_ENTRY_TSS << 3;
- tss = *(u16 *)(gdt+2);
- tss += *(u8 *)(gdt+4) << 16;
- tss += *(u8 *)(gdt+7) << 24;
+ tss = get_desc_base((struct desc_struct *)gdt);
printk(KERN_EMERG "double fault, tss at %08lx\n", tss);
if (ptr_ok(tss)) {
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index 48bfe1386038..ef42a038f1a6 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -509,15 +509,15 @@ enum bts_field {
bts_escape = ((unsigned long)-1 & ~bts_qual_mask)
};
-static inline unsigned long bts_get(const char *base, enum bts_field field)
+static inline unsigned long bts_get(const char *base, unsigned long field)
{
base += (ds_cfg.sizeof_ptr_field * field);
return *(unsigned long *)base;
}
-static inline void bts_set(char *base, enum bts_field field, unsigned long val)
+static inline void bts_set(char *base, unsigned long field, unsigned long val)
{
- base += (ds_cfg.sizeof_ptr_field * field);;
+ base += (ds_cfg.sizeof_ptr_field * field);
(*(unsigned long *)base) = val;
}
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index c8405718a4c3..2d8a371d4339 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -15,7 +15,6 @@
#include <linux/bug.h>
#include <linux/nmi.h>
#include <linux/sysfs.h>
-#include <linux/ftrace.h>
#include <asm/stacktrace.h>
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index d94e1ea3b9fe..9dbb527e1652 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -417,10 +417,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long return_hooker = (unsigned long)
&return_to_handler;
- /* Nmi's are currently unsupported */
- if (unlikely(in_nmi()))
- return;
-
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
@@ -498,37 +494,56 @@ static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
struct syscall_metadata *syscall_nr_to_meta(int nr)
{
- if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0)
+ if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
return NULL;
return syscalls_metadata[nr];
}
-void arch_init_ftrace_syscalls(void)
+int syscall_name_to_nr(char *name)
+{
+ int i;
+
+ if (!syscalls_metadata)
+ return -1;
+
+ for (i = 0; i < NR_syscalls; i++) {
+ if (syscalls_metadata[i]) {
+ if (!strcmp(syscalls_metadata[i]->name, name))
+ return i;
+ }
+ }
+ return -1;
+}
+
+void set_syscall_enter_id(int num, int id)
+{
+ syscalls_metadata[num]->enter_id = id;
+}
+
+void set_syscall_exit_id(int num, int id)
+{
+ syscalls_metadata[num]->exit_id = id;
+}
+
+static int __init arch_init_ftrace_syscalls(void)
{
int i;
struct syscall_metadata *meta;
unsigned long **psys_syscall_table = &sys_call_table;
- static atomic_t refs;
-
- if (atomic_inc_return(&refs) != 1)
- goto end;
syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
- FTRACE_SYSCALL_MAX, GFP_KERNEL);
+ NR_syscalls, GFP_KERNEL);
if (!syscalls_metadata) {
WARN_ON(1);
- return;
+ return -ENOMEM;
}
- for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
+ for (i = 0; i < NR_syscalls; i++) {
meta = find_syscall_meta(psys_syscall_table[i]);
syscalls_metadata[i] = meta;
}
- return;
-
- /* Paranoid: avoid overflow */
-end:
- atomic_dec(&refs);
+ return 0;
}
+arch_initcall(arch_init_ftrace_syscalls);
#endif
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index cc827ac9e8d3..7ffec6b3b331 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -439,7 +439,6 @@ is386: movl $2,%ecx # set MP
jne 1f
movl $per_cpu__gdt_page,%eax
movl $per_cpu__stack_canary,%ecx
- subl $20, %ecx
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
shrl $16, %ecx
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 3b09634a5153..7d35d0fe2329 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -218,7 +218,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
void fixup_irqs(void)
{
unsigned int irq;
- static int warned;
struct irq_desc *desc;
for_each_irq_desc(irq, desc) {
@@ -236,8 +235,8 @@ void fixup_irqs(void)
}
if (desc->chip->set_affinity)
desc->chip->set_affinity(irq, affinity);
- else if (desc->action && !(warned++))
- printk("Cannot set affinity for irq %i\n", irq);
+ else if (desc->action)
+ printk_once("Cannot set affinity for irq %i\n", irq);
}
#if 0
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 651c93b28862..fcd513bf2846 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -482,11 +482,11 @@ static void __init construct_ioapic_table(int mpc_default_type)
MP_bus_info(&bus);
}
- ioapic.type = MP_IOAPIC;
- ioapic.apicid = 2;
- ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
- ioapic.flags = MPC_APIC_USABLE;
- ioapic.apicaddr = 0xFEC00000;
+ ioapic.type = MP_IOAPIC;
+ ioapic.apicid = 2;
+ ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
+ ioapic.flags = MPC_APIC_USABLE;
+ ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE;
MP_ioapic_info(&ioapic);
/*
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 98fd6cd4e3a4..7dd950094178 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -1,6 +1,7 @@
/* ----------------------------------------------------------------------- *
*
* Copyright 2000-2008 H. Peter Anvin - All Rights Reserved
+ * Copyright 2009 Intel Corporation; author: H. Peter Anvin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -80,11 +81,8 @@ static ssize_t msr_read(struct file *file, char __user *buf,
for (; count; count -= 8) {
err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]);
- if (err) {
- if (err == -EFAULT) /* Fix idiotic error code */
- err = -EIO;
+ if (err)
break;
- }
if (copy_to_user(tmp, &data, 8)) {
err = -EFAULT;
break;
@@ -115,11 +113,8 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
break;
}
err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]);
- if (err) {
- if (err == -EFAULT) /* Fix idiotic error code */
- err = -EIO;
+ if (err)
break;
- }
tmp += 2;
bytes += 8;
}
@@ -127,6 +122,54 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
return bytes ? bytes : err;
}
+static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
+{
+ u32 __user *uregs = (u32 __user *)arg;
+ u32 regs[8];
+ int cpu = iminor(file->f_path.dentry->d_inode);
+ int err;
+
+ switch (ioc) {
+ case X86_IOC_RDMSR_REGS:
+ if (!(file->f_mode & FMODE_READ)) {
+ err = -EBADF;
+ break;
+ }
+ if (copy_from_user(&regs, uregs, sizeof regs)) {
+ err = -EFAULT;
+ break;
+ }
+ err = rdmsr_safe_regs_on_cpu(cpu, regs);
+ if (err)
+ break;
+ if (copy_to_user(uregs, &regs, sizeof regs))
+ err = -EFAULT;
+ break;
+
+ case X86_IOC_WRMSR_REGS:
+ if (!(file->f_mode & FMODE_WRITE)) {
+ err = -EBADF;
+ break;
+ }
+ if (copy_from_user(&regs, uregs, sizeof regs)) {
+ err = -EFAULT;
+ break;
+ }
+ err = wrmsr_safe_regs_on_cpu(cpu, regs);
+ if (err)
+ break;
+ if (copy_to_user(uregs, &regs, sizeof regs))
+ err = -EFAULT;
+ break;
+
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ return err;
+}
+
static int msr_open(struct inode *inode, struct file *file)
{
unsigned int cpu = iminor(file->f_path.dentry->d_inode);
@@ -157,6 +200,8 @@ static const struct file_operations msr_fops = {
.read = msr_read,
.write = msr_write,
.open = msr_open,
+ .unlocked_ioctl = msr_ioctl,
+ .compat_ioctl = msr_ioctl,
};
static int __cpuinit msr_device_create(int cpu)
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 70ec9b951d76..f5b0b4a01fb2 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -362,8 +362,9 @@ struct pv_cpu_ops pv_cpu_ops = {
#endif
.wbinvd = native_wbinvd,
.read_msr = native_read_msr_safe,
- .read_msr_amd = native_read_msr_amd_safe,
+ .rdmsr_regs = native_rdmsr_safe_regs,
.write_msr = native_write_msr_safe,
+ .wrmsr_regs = native_wrmsr_safe_regs,
.read_tsc = native_read_tsc,
.read_pmc = native_read_pmc,
.read_tscp = native_read_tscp,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 1a041bcf506b..d71c8655905b 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -3,6 +3,7 @@
#include <linux/dmar.h>
#include <linux/bootmem.h>
#include <linux/pci.h>
+#include <linux/kmemleak.h>
#include <asm/proto.h>
#include <asm/dma.h>
@@ -32,7 +33,14 @@ int no_iommu __read_mostly;
/* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly = 0;
-int iommu_pass_through;
+/*
+ * This variable becomes 1 if iommu=pt is passed on the kernel command line.
+ * If this variable is 1, IOMMU implementations do no DMA ranslation for
+ * devices and allow every device to access to whole physical memory. This is
+ * useful if a user want to use an IOMMU only for KVM device assignment to
+ * guests and not for driver dma translation.
+ */
+int iommu_pass_through __read_mostly;
dma_addr_t bad_dma_address __read_mostly = 0;
EXPORT_SYMBOL(bad_dma_address);
@@ -88,6 +96,11 @@ void __init dma32_reserve_bootmem(void)
size = roundup(dma32_bootmem_size, align);
dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
512ULL<<20);
+ /*
+ * Kmemleak should not scan this block as it may not be mapped via the
+ * kernel direct mapping.
+ */
+ kmemleak_ignore(dma32_bootmem_ptr);
if (dma32_bootmem_ptr)
dma32_bootmem_size = size;
else
@@ -147,7 +160,7 @@ again:
return NULL;
addr = page_to_phys(page);
- if (!is_buffer_dma_capable(dma_mask, addr, size)) {
+ if (addr + size > dma_mask) {
__free_pages(page, get_order(size));
if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index d2e56b8f48e7..98a827ee9ed7 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -190,14 +190,13 @@ static void iommu_full(struct device *dev, size_t size, int dir)
static inline int
need_iommu(struct device *dev, unsigned long addr, size_t size)
{
- return force_iommu ||
- !is_buffer_dma_capable(*dev->dma_mask, addr, size);
+ return force_iommu || !dma_capable(dev, addr, size);
}
static inline int
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
{
- return !is_buffer_dma_capable(*dev->dma_mask, addr, size);
+ return !dma_capable(dev, addr, size);
}
/* Map a single continuous physical area into the IOMMU.
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 71d412a09f30..a3933d4330cd 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -14,7 +14,7 @@
static int
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
- if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) {
+ if (hwdev && !dma_capable(hwdev, bus, size)) {
if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
printk(KERN_ERR
"nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
@@ -79,12 +79,29 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
free_pages((unsigned long)vaddr, get_order(size));
}
+static void nommu_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ flush_write_buffers();
+}
+
+
+static void nommu_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ flush_write_buffers();
+}
+
struct dma_map_ops nommu_dma_ops = {
- .alloc_coherent = dma_generic_alloc_coherent,
- .free_coherent = nommu_free_coherent,
- .map_sg = nommu_map_sg,
- .map_page = nommu_map_page,
- .is_phys = 1,
+ .alloc_coherent = dma_generic_alloc_coherent,
+ .free_coherent = nommu_free_coherent,
+ .map_sg = nommu_map_sg,
+ .map_page = nommu_map_page,
+ .sync_single_for_device = nommu_sync_single_for_device,
+ .sync_sg_for_device = nommu_sync_sg_for_device,
+ .is_phys = 1,
};
void __init no_iommu_init(void)
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 6af96ee44200..e8a35016115f 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -13,31 +13,6 @@
int swiotlb __read_mostly;
-void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
-{
- return alloc_bootmem_low_pages(size);
-}
-
-void *swiotlb_alloc(unsigned order, unsigned long nslabs)
-{
- return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
-}
-
-dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
-{
- return paddr;
-}
-
-phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
-{
- return baddr;
-}
-
-int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
-{
- return 0;
-}
-
static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 09ecbde91c13..8d7d5c9c1be3 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -35,10 +35,11 @@
#include <asm/proto.h>
#include <asm/ds.h>
-#include <trace/syscall.h>
-
#include "tls.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
enum x86_regset {
REGSET_GENERAL,
REGSET_FP,
@@ -1497,8 +1498,8 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
tracehook_report_syscall_entry(regs))
ret = -1L;
- if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
- ftrace_syscall_enter(regs);
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->orig_ax);
if (unlikely(current->audit_context)) {
if (IS_IA32)
@@ -1523,8 +1524,8 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
- if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
- ftrace_syscall_exit(regs);
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->ax);
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 4c578751e94e..81e58238c4ce 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -869,6 +869,8 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2fecda69ee64..c36cc1452cdc 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -434,7 +434,8 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
* For perf, we return last level cache shared map.
* And for power savings, we return cpu_core_map
*/
- if (sched_mc_power_savings || sched_smt_power_savings)
+ if ((sched_mc_power_savings || sched_smt_power_savings) &&
+ !(cpu_has(c, X86_FEATURE_AMD_DCM)))
return cpu_core_mask(cpu);
else
return c->llc_shared_map;
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index e8b9863ef8c4..3149032ff107 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -4,6 +4,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/ptrace.h>
+#include <asm/desc.h>
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
{
@@ -23,7 +24,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
* and APM bios ones we just ignore here.
*/
if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
- u32 *desc;
+ struct desc_struct *desc;
unsigned long base;
seg &= ~7UL;
@@ -33,12 +34,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
addr = -1L; /* bogus selector, access would fault */
else {
desc = child->mm->context.ldt + seg;
- base = ((desc[0] >> 16) |
- ((desc[1] & 0xff) << 16) |
- (desc[1] & 0xff000000));
+ base = get_desc_base(desc);
/* 16-bit code segment? */
- if (!((desc[1] >> 22) & 1))
+ if (!desc->d)
addr &= 0xffff;
addr += base;
}
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 6bc211accf08..45e00eb09c3a 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -18,9 +18,9 @@
#include <asm/ia32.h>
#include <asm/syscalls.h>
-asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long off)
+SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, off)
{
long error;
struct file *file;
@@ -226,7 +226,7 @@ bottomup:
}
-asmlinkage long sys_uname(struct new_utsname __user *name)
+SYSCALL_DEFINE1(uname, struct new_utsname __user *, name)
{
int err;
down_read(&uts_sem);
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 77b9689f8edb..503c1f2e8835 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -640,13 +640,13 @@ static int __init uv_ptc_init(void)
if (!is_uv_system())
return 0;
- proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL);
+ proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
+ &proc_uv_ptc_operations);
if (!proc_uv_ptc) {
printk(KERN_ERR "unable to create %s proc entry\n",
UV_PTC_BASENAME);
return -EINVAL;
}
- proc_uv_ptc->proc_fops = &proc_uv_ptc_operations;
return 0;
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 5204332f475d..6fe85c272a2b 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -76,7 +76,7 @@ char ignore_fpu_irq;
* F0 0F bug workaround.. We have a special link segment
* for this.
*/
-gate_desc idt_table[256]
+gate_desc idt_table[NR_VECTORS]
__attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
#endif
@@ -786,27 +786,6 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
#endif
}
-#ifdef CONFIG_X86_32
-unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
-{
- struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
- unsigned long base = (kesp - uesp) & -THREAD_SIZE;
- unsigned long new_kesp = kesp - base;
- unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
- __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
-
- /* Set up base for espfix segment */
- desc &= 0x00f0ff0000000000ULL;
- desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
- ((((__u64)base) << 32) & 0xff00000000000000ULL) |
- ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
- (lim_pages & 0xffff);
- *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
-
- return new_kesp;
-}
-#endif
-
asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
{
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3d4529011828..633ccc7400a4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2297,12 +2297,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
unsigned int bytes,
struct kvm_vcpu *vcpu)
{
- static int reported;
-
- if (!reported) {
- reported = 1;
- printk(KERN_WARNING "kvm: emulating exchange as write\n");
- }
+ printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
#ifndef CONFIG_X86_64
/* guests cmpxchg8b have to be emulated atomically */
if (bytes == 8) {
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 07c31899c9c2..9e609206fac9 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -9,6 +9,8 @@ lib-y += thunk_$(BITS).o
lib-y += usercopy_$(BITS).o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
+obj-y += msr-reg.o msr-reg-export.o
+
ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o
lib-y += checksum_32.o
diff --git a/arch/x86/lib/msr-reg-export.c b/arch/x86/lib/msr-reg-export.c
new file mode 100644
index 000000000000..a311cc59b65d
--- /dev/null
+++ b/arch/x86/lib/msr-reg-export.c
@@ -0,0 +1,5 @@
+#include <linux/module.h>
+#include <asm/msr.h>
+
+EXPORT_SYMBOL(native_rdmsr_safe_regs);
+EXPORT_SYMBOL(native_wrmsr_safe_regs);
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
new file mode 100644
index 000000000000..69fa10623f21
--- /dev/null
+++ b/arch/x86/lib/msr-reg.S
@@ -0,0 +1,102 @@
+#include <linux/linkage.h>
+#include <linux/errno.h>
+#include <asm/dwarf2.h>
+#include <asm/asm.h>
+#include <asm/msr.h>
+
+#ifdef CONFIG_X86_64
+/*
+ * int native_{rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
+ *
+ * reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi]
+ *
+ */
+.macro op_safe_regs op
+ENTRY(native_\op\()_safe_regs)
+ CFI_STARTPROC
+ pushq_cfi %rbx
+ pushq_cfi %rbp
+ movq %rdi, %r10 /* Save pointer */
+ xorl %r11d, %r11d /* Return value */
+ movl (%rdi), %eax
+ movl 4(%rdi), %ecx
+ movl 8(%rdi), %edx
+ movl 12(%rdi), %ebx
+ movl 20(%rdi), %ebp
+ movl 24(%rdi), %esi
+ movl 28(%rdi), %edi
+ CFI_REMEMBER_STATE
+1: \op
+2: movl %eax, (%r10)
+ movl %r11d, %eax /* Return value */
+ movl %ecx, 4(%r10)
+ movl %edx, 8(%r10)
+ movl %ebx, 12(%r10)
+ movl %ebp, 20(%r10)
+ movl %esi, 24(%r10)
+ movl %edi, 28(%r10)
+ popq_cfi %rbp
+ popq_cfi %rbx
+ ret
+3:
+ CFI_RESTORE_STATE
+ movl $-EIO, %r11d
+ jmp 2b
+
+ _ASM_EXTABLE(1b, 3b)
+ CFI_ENDPROC
+ENDPROC(native_\op\()_safe_regs)
+.endm
+
+#else /* X86_32 */
+
+.macro op_safe_regs op
+ENTRY(native_\op\()_safe_regs)
+ CFI_STARTPROC
+ pushl_cfi %ebx
+ pushl_cfi %ebp
+ pushl_cfi %esi
+ pushl_cfi %edi
+ pushl_cfi $0 /* Return value */
+ pushl_cfi %eax
+ movl 4(%eax), %ecx
+ movl 8(%eax), %edx
+ movl 12(%eax), %ebx
+ movl 20(%eax), %ebp
+ movl 24(%eax), %esi
+ movl 28(%eax), %edi
+ movl (%eax), %eax
+ CFI_REMEMBER_STATE
+1: \op
+2: pushl_cfi %eax
+ movl 4(%esp), %eax
+ popl_cfi (%eax)
+ addl $4, %esp
+ CFI_ADJUST_CFA_OFFSET -4
+ movl %ecx, 4(%eax)
+ movl %edx, 8(%eax)
+ movl %ebx, 12(%eax)
+ movl %ebp, 20(%eax)
+ movl %esi, 24(%eax)
+ movl %edi, 28(%eax)
+ popl_cfi %eax
+ popl_cfi %edi
+ popl_cfi %esi
+ popl_cfi %ebp
+ popl_cfi %ebx
+ ret
+3:
+ CFI_RESTORE_STATE
+ movl $-EIO, 4(%esp)
+ jmp 2b
+
+ _ASM_EXTABLE(1b, 3b)
+ CFI_ENDPROC
+ENDPROC(native_\op\()_safe_regs)
+.endm
+
+#endif
+
+op_safe_regs rdmsr
+op_safe_regs wrmsr
+
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index caa24aca8115..33a1e3ca22d8 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -175,3 +175,52 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
return err ? err : rv.err;
}
EXPORT_SYMBOL(wrmsr_safe_on_cpu);
+
+/*
+ * These variants are significantly slower, but allows control over
+ * the entire 32-bit GPR set.
+ */
+struct msr_regs_info {
+ u32 *regs;
+ int err;
+};
+
+static void __rdmsr_safe_regs_on_cpu(void *info)
+{
+ struct msr_regs_info *rv = info;
+
+ rv->err = rdmsr_safe_regs(rv->regs);
+}
+
+static void __wrmsr_safe_regs_on_cpu(void *info)
+{
+ struct msr_regs_info *rv = info;
+
+ rv->err = wrmsr_safe_regs(rv->regs);
+}
+
+int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+{
+ int err;
+ struct msr_regs_info rv;
+
+ rv.regs = regs;
+ rv.err = -EIO;
+ err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
+
+ return err ? err : rv.err;
+}
+EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
+
+int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+{
+ int err;
+ struct msr_regs_info rv;
+
+ rv.regs = regs;
+ rv.err = -EIO;
+ err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
+
+ return err ? err : rv.err;
+}
+EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
index 2c55ed098654..528bf954eb74 100644
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -331,6 +331,20 @@ static void kmemcheck_read_strict(struct pt_regs *regs,
kmemcheck_shadow_set(shadow, size);
}
+bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
+{
+ enum kmemcheck_shadow status;
+ void *shadow;
+
+ shadow = kmemcheck_shadow_lookup(addr);
+ if (!shadow)
+ return true;
+
+ status = kmemcheck_shadow_test(shadow, size);
+
+ return status == KMEMCHECK_SHADOW_INITIALIZED;
+}
+
/* Access may cross page boundary */
static void kmemcheck_read(struct pt_regs *regs,
unsigned long addr, unsigned int size)
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 352aa9e927e2..b2f7d3e59b86 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -827,7 +827,7 @@ static int memtype_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static struct seq_operations memtype_seq_ops = {
+static const struct seq_operations memtype_seq_ops = {
.start = memtype_seq_start,
.next = memtype_seq_next,
.stop = memtype_seq_stop,
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 89b9a5cd63da..cb88b1a0bd5f 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -1,11 +1,14 @@
/**
* @file nmi_int.c
*
- * @remark Copyright 2002-2008 OProfile authors
+ * @remark Copyright 2002-2009 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
* @author Robert Richter <robert.richter@amd.com>
+ * @author Barry Kasindorf <barry.kasindorf@amd.com>
+ * @author Jason Yeh <jason.yeh@amd.com>
+ * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
*/
#include <linux/init.h>
@@ -24,13 +27,35 @@
#include "op_counter.h"
#include "op_x86_model.h"
-static struct op_x86_model_spec const *model;
+static struct op_x86_model_spec *model;
static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
/* 0 == registered but off, 1 == registered and on */
static int nmi_enabled = 0;
+struct op_counter_config counter_config[OP_MAX_COUNTER];
+
+/* common functions */
+
+u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
+ struct op_counter_config *counter_config)
+{
+ u64 val = 0;
+ u16 event = (u16)counter_config->event;
+
+ val |= ARCH_PERFMON_EVENTSEL_INT;
+ val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
+ val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
+ val |= (counter_config->unit_mask & 0xFF) << 8;
+ event &= model->event_mask ? model->event_mask : 0xFF;
+ val |= event & 0xFF;
+ val |= (event & 0x0F00) << 24;
+
+ return val;
+}
+
+
static int profile_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
@@ -52,36 +77,214 @@ static int profile_exceptions_notify(struct notifier_block *self,
static void nmi_cpu_save_registers(struct op_msrs *msrs)
{
- unsigned int const nr_ctrs = model->num_counters;
- unsigned int const nr_ctrls = model->num_controls;
struct op_msr *counters = msrs->counters;
struct op_msr *controls = msrs->controls;
unsigned int i;
- for (i = 0; i < nr_ctrs; ++i) {
- if (counters[i].addr) {
- rdmsr(counters[i].addr,
- counters[i].saved.low,
- counters[i].saved.high);
- }
+ for (i = 0; i < model->num_counters; ++i) {
+ if (counters[i].addr)
+ rdmsrl(counters[i].addr, counters[i].saved);
+ }
+
+ for (i = 0; i < model->num_controls; ++i) {
+ if (controls[i].addr)
+ rdmsrl(controls[i].addr, controls[i].saved);
+ }
+}
+
+static void nmi_cpu_start(void *dummy)
+{
+ struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
+ model->start(msrs);
+}
+
+static int nmi_start(void)
+{
+ on_each_cpu(nmi_cpu_start, NULL, 1);
+ return 0;
+}
+
+static void nmi_cpu_stop(void *dummy)
+{
+ struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
+ model->stop(msrs);
+}
+
+static void nmi_stop(void)
+{
+ on_each_cpu(nmi_cpu_stop, NULL, 1);
+}
+
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+
+static DEFINE_PER_CPU(int, switch_index);
+
+static inline int has_mux(void)
+{
+ return !!model->switch_ctrl;
+}
+
+inline int op_x86_phys_to_virt(int phys)
+{
+ return __get_cpu_var(switch_index) + phys;
+}
+
+inline int op_x86_virt_to_phys(int virt)
+{
+ return virt % model->num_counters;
+}
+
+static void nmi_shutdown_mux(void)
+{
+ int i;
+
+ if (!has_mux())
+ return;
+
+ for_each_possible_cpu(i) {
+ kfree(per_cpu(cpu_msrs, i).multiplex);
+ per_cpu(cpu_msrs, i).multiplex = NULL;
+ per_cpu(switch_index, i) = 0;
}
+}
+
+static int nmi_setup_mux(void)
+{
+ size_t multiplex_size =
+ sizeof(struct op_msr) * model->num_virt_counters;
+ int i;
+
+ if (!has_mux())
+ return 1;
+
+ for_each_possible_cpu(i) {
+ per_cpu(cpu_msrs, i).multiplex =
+ kmalloc(multiplex_size, GFP_KERNEL);
+ if (!per_cpu(cpu_msrs, i).multiplex)
+ return 0;
+ }
+
+ return 1;
+}
+
+static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
+{
+ int i;
+ struct op_msr *multiplex = msrs->multiplex;
+
+ if (!has_mux())
+ return;
- for (i = 0; i < nr_ctrls; ++i) {
- if (controls[i].addr) {
- rdmsr(controls[i].addr,
- controls[i].saved.low,
- controls[i].saved.high);
+ for (i = 0; i < model->num_virt_counters; ++i) {
+ if (counter_config[i].enabled) {
+ multiplex[i].saved = -(u64)counter_config[i].count;
+ } else {
+ multiplex[i].addr = 0;
+ multiplex[i].saved = 0;
}
}
+
+ per_cpu(switch_index, cpu) = 0;
+}
+
+static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
+{
+ struct op_msr *multiplex = msrs->multiplex;
+ int i;
+
+ for (i = 0; i < model->num_counters; ++i) {
+ int virt = op_x86_phys_to_virt(i);
+ if (multiplex[virt].addr)
+ rdmsrl(multiplex[virt].addr, multiplex[virt].saved);
+ }
+}
+
+static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
+{
+ struct op_msr *multiplex = msrs->multiplex;
+ int i;
+
+ for (i = 0; i < model->num_counters; ++i) {
+ int virt = op_x86_phys_to_virt(i);
+ if (multiplex[virt].addr)
+ wrmsrl(multiplex[virt].addr, multiplex[virt].saved);
+ }
}
-static void nmi_save_registers(void *dummy)
+static void nmi_cpu_switch(void *dummy)
{
int cpu = smp_processor_id();
+ int si = per_cpu(switch_index, cpu);
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
- nmi_cpu_save_registers(msrs);
+
+ nmi_cpu_stop(NULL);
+ nmi_cpu_save_mpx_registers(msrs);
+
+ /* move to next set */
+ si += model->num_counters;
+ if ((si > model->num_virt_counters) || (counter_config[si].count == 0))
+ per_cpu(switch_index, cpu) = 0;
+ else
+ per_cpu(switch_index, cpu) = si;
+
+ model->switch_ctrl(model, msrs);
+ nmi_cpu_restore_mpx_registers(msrs);
+
+ nmi_cpu_start(NULL);
+}
+
+
+/*
+ * Quick check to see if multiplexing is necessary.
+ * The check should be sufficient since counters are used
+ * in ordre.
+ */
+static int nmi_multiplex_on(void)
+{
+ return counter_config[model->num_counters].count ? 0 : -EINVAL;
+}
+
+static int nmi_switch_event(void)
+{
+ if (!has_mux())
+ return -ENOSYS; /* not implemented */
+ if (nmi_multiplex_on() < 0)
+ return -EINVAL; /* not necessary */
+
+ on_each_cpu(nmi_cpu_switch, NULL, 1);
+
+ return 0;
+}
+
+static inline void mux_init(struct oprofile_operations *ops)
+{
+ if (has_mux())
+ ops->switch_events = nmi_switch_event;
+}
+
+static void mux_clone(int cpu)
+{
+ if (!has_mux())
+ return;
+
+ memcpy(per_cpu(cpu_msrs, cpu).multiplex,
+ per_cpu(cpu_msrs, 0).multiplex,
+ sizeof(struct op_msr) * model->num_virt_counters);
}
+#else
+
+inline int op_x86_phys_to_virt(int phys) { return phys; }
+inline int op_x86_virt_to_phys(int virt) { return virt; }
+static inline void nmi_shutdown_mux(void) { }
+static inline int nmi_setup_mux(void) { return 1; }
+static inline void
+nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
+static inline void mux_init(struct oprofile_operations *ops) { }
+static void mux_clone(int cpu) { }
+
+#endif
+
static void free_msrs(void)
{
int i;
@@ -95,38 +298,32 @@ static void free_msrs(void)
static int allocate_msrs(void)
{
- int success = 1;
size_t controls_size = sizeof(struct op_msr) * model->num_controls;
size_t counters_size = sizeof(struct op_msr) * model->num_counters;
int i;
for_each_possible_cpu(i) {
per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
- GFP_KERNEL);
- if (!per_cpu(cpu_msrs, i).counters) {
- success = 0;
- break;
- }
+ GFP_KERNEL);
+ if (!per_cpu(cpu_msrs, i).counters)
+ return 0;
per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
- GFP_KERNEL);
- if (!per_cpu(cpu_msrs, i).controls) {
- success = 0;
- break;
- }
+ GFP_KERNEL);
+ if (!per_cpu(cpu_msrs, i).controls)
+ return 0;
}
- if (!success)
- free_msrs();
-
- return success;
+ return 1;
}
static void nmi_cpu_setup(void *dummy)
{
int cpu = smp_processor_id();
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
+ nmi_cpu_save_registers(msrs);
spin_lock(&oprofilefs_lock);
- model->setup_ctrs(msrs);
+ model->setup_ctrs(model, msrs);
+ nmi_cpu_setup_mux(cpu, msrs);
spin_unlock(&oprofilefs_lock);
per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
apic_write(APIC_LVTPC, APIC_DM_NMI);
@@ -144,11 +341,15 @@ static int nmi_setup(void)
int cpu;
if (!allocate_msrs())
- return -ENOMEM;
+ err = -ENOMEM;
+ else if (!nmi_setup_mux())
+ err = -ENOMEM;
+ else
+ err = register_die_notifier(&profile_exceptions_nb);
- err = register_die_notifier(&profile_exceptions_nb);
if (err) {
free_msrs();
+ nmi_shutdown_mux();
return err;
}
@@ -159,45 +360,38 @@ static int nmi_setup(void)
/* Assume saved/restored counters are the same on all CPUs */
model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
for_each_possible_cpu(cpu) {
- if (cpu != 0) {
- memcpy(per_cpu(cpu_msrs, cpu).counters,
- per_cpu(cpu_msrs, 0).counters,
- sizeof(struct op_msr) * model->num_counters);
-
- memcpy(per_cpu(cpu_msrs, cpu).controls,
- per_cpu(cpu_msrs, 0).controls,
- sizeof(struct op_msr) * model->num_controls);
- }
+ if (!cpu)
+ continue;
+
+ memcpy(per_cpu(cpu_msrs, cpu).counters,
+ per_cpu(cpu_msrs, 0).counters,
+ sizeof(struct op_msr) * model->num_counters);
+
+ memcpy(per_cpu(cpu_msrs, cpu).controls,
+ per_cpu(cpu_msrs, 0).controls,
+ sizeof(struct op_msr) * model->num_controls);
+ mux_clone(cpu);
}
- on_each_cpu(nmi_save_registers, NULL, 1);
on_each_cpu(nmi_cpu_setup, NULL, 1);
nmi_enabled = 1;
return 0;
}
-static void nmi_restore_registers(struct op_msrs *msrs)
+static void nmi_cpu_restore_registers(struct op_msrs *msrs)
{
- unsigned int const nr_ctrs = model->num_counters;
- unsigned int const nr_ctrls = model->num_controls;
struct op_msr *counters = msrs->counters;
struct op_msr *controls = msrs->controls;
unsigned int i;
- for (i = 0; i < nr_ctrls; ++i) {
- if (controls[i].addr) {
- wrmsr(controls[i].addr,
- controls[i].saved.low,
- controls[i].saved.high);
- }
+ for (i = 0; i < model->num_controls; ++i) {
+ if (controls[i].addr)
+ wrmsrl(controls[i].addr, controls[i].saved);
}
- for (i = 0; i < nr_ctrs; ++i) {
- if (counters[i].addr) {
- wrmsr(counters[i].addr,
- counters[i].saved.low,
- counters[i].saved.high);
- }
+ for (i = 0; i < model->num_counters; ++i) {
+ if (counters[i].addr)
+ wrmsrl(counters[i].addr, counters[i].saved);
}
}
@@ -205,7 +399,7 @@ static void nmi_cpu_shutdown(void *dummy)
{
unsigned int v;
int cpu = smp_processor_id();
- struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
+ struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
/* restoring APIC_LVTPC can trigger an apic error because the delivery
* mode and vector nr combination can be illegal. That's by design: on
@@ -216,7 +410,7 @@ static void nmi_cpu_shutdown(void *dummy)
apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
apic_write(APIC_LVTERR, v);
- nmi_restore_registers(msrs);
+ nmi_cpu_restore_registers(msrs);
}
static void nmi_shutdown(void)
@@ -226,42 +420,18 @@ static void nmi_shutdown(void)
nmi_enabled = 0;
on_each_cpu(nmi_cpu_shutdown, NULL, 1);
unregister_die_notifier(&profile_exceptions_nb);
+ nmi_shutdown_mux();
msrs = &get_cpu_var(cpu_msrs);
model->shutdown(msrs);
free_msrs();
put_cpu_var(cpu_msrs);
}
-static void nmi_cpu_start(void *dummy)
-{
- struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
- model->start(msrs);
-}
-
-static int nmi_start(void)
-{
- on_each_cpu(nmi_cpu_start, NULL, 1);
- return 0;
-}
-
-static void nmi_cpu_stop(void *dummy)
-{
- struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
- model->stop(msrs);
-}
-
-static void nmi_stop(void)
-{
- on_each_cpu(nmi_cpu_stop, NULL, 1);
-}
-
-struct op_counter_config counter_config[OP_MAX_COUNTER];
-
static int nmi_create_files(struct super_block *sb, struct dentry *root)
{
unsigned int i;
- for (i = 0; i < model->num_counters; ++i) {
+ for (i = 0; i < model->num_virt_counters; ++i) {
struct dentry *dir;
char buf[4];
@@ -270,7 +440,7 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
* NOTE: assumes 1:1 mapping here (that counters are organized
* sequentially in their struct assignment).
*/
- if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i)))
+ if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
continue;
snprintf(buf, sizeof(buf), "%d", i);
@@ -402,6 +572,7 @@ module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
static int __init ppro_init(char **cpu_type)
{
__u8 cpu_model = boot_cpu_data.x86_model;
+ struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
if (force_arch_perfmon && cpu_has_arch_perfmon)
return 0;
@@ -428,7 +599,7 @@ static int __init ppro_init(char **cpu_type)
*cpu_type = "i386/core_2";
break;
case 26:
- arch_perfmon_setup_counters();
+ spec = &op_arch_perfmon_spec;
*cpu_type = "i386/core_i7";
break;
case 28:
@@ -439,17 +610,7 @@ static int __init ppro_init(char **cpu_type)
return 0;
}
- model = &op_ppro_spec;
- return 1;
-}
-
-static int __init arch_perfmon_init(char **cpu_type)
-{
- if (!cpu_has_arch_perfmon)
- return 0;
- *cpu_type = "i386/arch_perfmon";
- model = &op_arch_perfmon_spec;
- arch_perfmon_setup_counters();
+ model = spec;
return 1;
}
@@ -471,27 +632,26 @@ int __init op_nmi_init(struct oprofile_operations *ops)
/* Needs to be at least an Athlon (or hammer in 32bit mode) */
switch (family) {
- default:
- return -ENODEV;
case 6:
- model = &op_amd_spec;
cpu_type = "i386/athlon";
break;
case 0xf:
- model = &op_amd_spec;
- /* Actually it could be i386/hammer too, but give
- user space an consistent name. */
+ /*
+ * Actually it could be i386/hammer too, but
+ * give user space an consistent name.
+ */
cpu_type = "x86-64/hammer";
break;
case 0x10:
- model = &op_amd_spec;
cpu_type = "x86-64/family10";
break;
case 0x11:
- model = &op_amd_spec;
cpu_type = "x86-64/family11h";
break;
+ default:
+ return -ENODEV;
}
+ model = &op_amd_spec;
break;
case X86_VENDOR_INTEL:
@@ -510,8 +670,15 @@ int __init op_nmi_init(struct oprofile_operations *ops)
break;
}
- if (!cpu_type && !arch_perfmon_init(&cpu_type))
+ if (cpu_type)
+ break;
+
+ if (!cpu_has_arch_perfmon)
return -ENODEV;
+
+ /* use arch perfmon as fallback */
+ cpu_type = "i386/arch_perfmon";
+ model = &op_arch_perfmon_spec;
break;
default:
@@ -522,18 +689,23 @@ int __init op_nmi_init(struct oprofile_operations *ops)
register_cpu_notifier(&oprofile_cpu_nb);
#endif
/* default values, can be overwritten by model */
- ops->create_files = nmi_create_files;
- ops->setup = nmi_setup;
- ops->shutdown = nmi_shutdown;
- ops->start = nmi_start;
- ops->stop = nmi_stop;
- ops->cpu_type = cpu_type;
+ ops->create_files = nmi_create_files;
+ ops->setup = nmi_setup;
+ ops->shutdown = nmi_shutdown;
+ ops->start = nmi_start;
+ ops->stop = nmi_stop;
+ ops->cpu_type = cpu_type;
if (model->init)
ret = model->init(ops);
if (ret)
return ret;
+ if (!model->num_virt_counters)
+ model->num_virt_counters = model->num_counters;
+
+ mux_init(ops);
+
init_sysfs();
using_nmi = 1;
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h
index 91b6a116165e..e28398df0df2 100644
--- a/arch/x86/oprofile/op_counter.h
+++ b/arch/x86/oprofile/op_counter.h
@@ -10,7 +10,7 @@
#ifndef OP_COUNTER_H
#define OP_COUNTER_H
-#define OP_MAX_COUNTER 8
+#define OP_MAX_COUNTER 32
/* Per-perfctr configuration as set via
* oprofilefs.
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 8fdf06e4edf9..39686c29f03a 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -9,12 +9,15 @@
* @author Philippe Elie
* @author Graydon Hoare
* @author Robert Richter <robert.richter@amd.com>
- * @author Barry Kasindorf
+ * @author Barry Kasindorf <barry.kasindorf@amd.com>
+ * @author Jason Yeh <jason.yeh@amd.com>
+ * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
*/
#include <linux/oprofile.h>
#include <linux/device.h>
#include <linux/pci.h>
+#include <linux/percpu.h>
#include <asm/ptrace.h>
#include <asm/msr.h>
@@ -25,43 +28,36 @@
#define NUM_COUNTERS 4
#define NUM_CONTROLS 4
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+#define NUM_VIRT_COUNTERS 32
+#define NUM_VIRT_CONTROLS 32
+#else
+#define NUM_VIRT_COUNTERS NUM_COUNTERS
+#define NUM_VIRT_CONTROLS NUM_CONTROLS
+#endif
+
+#define OP_EVENT_MASK 0x0FFF
+#define OP_CTR_OVERFLOW (1ULL<<31)
-#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
-#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
-#define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0)
-#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
-
-#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
-#define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
-#define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
-#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
-#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
-#define CTRL_CLEAR_LO(x) (x &= (1<<21))
-#define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
-#define CTRL_SET_ENABLE(val) (val |= 1<<20)
-#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
-#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
-#define CTRL_SET_UM(val, m) (val |= (m << 8))
-#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
-#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
-#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
-#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
-
-static unsigned long reset_value[NUM_COUNTERS];
+#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
+
+static unsigned long reset_value[NUM_VIRT_COUNTERS];
#ifdef CONFIG_OPROFILE_IBS
/* IbsFetchCtl bits/masks */
-#define IBS_FETCH_HIGH_VALID_BIT (1UL << 17) /* bit 49 */
-#define IBS_FETCH_HIGH_ENABLE (1UL << 16) /* bit 48 */
-#define IBS_FETCH_LOW_MAX_CNT_MASK 0x0000FFFFUL /* MaxCnt mask */
+#define IBS_FETCH_RAND_EN (1ULL<<57)
+#define IBS_FETCH_VAL (1ULL<<49)
+#define IBS_FETCH_ENABLE (1ULL<<48)
+#define IBS_FETCH_CNT_MASK 0xFFFF0000ULL
/*IbsOpCtl bits */
-#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */
-#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */
+#define IBS_OP_CNT_CTL (1ULL<<19)
+#define IBS_OP_VAL (1ULL<<18)
+#define IBS_OP_ENABLE (1ULL<<17)
-#define IBS_FETCH_SIZE 6
-#define IBS_OP_SIZE 12
+#define IBS_FETCH_SIZE 6
+#define IBS_OP_SIZE 12
static int has_ibs; /* AMD Family10h and later */
@@ -78,6 +74,45 @@ static struct op_ibs_config ibs_config;
#endif
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+
+static void op_mux_fill_in_addresses(struct op_msrs * const msrs)
+{
+ int i;
+
+ for (i = 0; i < NUM_VIRT_COUNTERS; i++) {
+ int hw_counter = op_x86_virt_to_phys(i);
+ if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
+ msrs->multiplex[i].addr = MSR_K7_PERFCTR0 + hw_counter;
+ else
+ msrs->multiplex[i].addr = 0;
+ }
+}
+
+static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs)
+{
+ u64 val;
+ int i;
+
+ /* enable active counters */
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ int virt = op_x86_phys_to_virt(i);
+ if (!counter_config[virt].enabled)
+ continue;
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= model->reserved;
+ val |= op_x86_get_ctrl(model, &counter_config[virt]);
+ wrmsrl(msrs->controls[i].addr, val);
+ }
+}
+
+#else
+
+static inline void op_mux_fill_in_addresses(struct op_msrs * const msrs) { }
+
+#endif
+
/* functions for op_amd_spec */
static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
@@ -97,150 +132,174 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
else
msrs->controls[i].addr = 0;
}
-}
+ op_mux_fill_in_addresses(msrs);
+}
-static void op_amd_setup_ctrs(struct op_msrs const * const msrs)
+static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
+ /* setup reset_value */
+ for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
+ if (counter_config[i].enabled)
+ reset_value[i] = counter_config[i].count;
+ else
+ reset_value[i] = 0;
+ }
+
/* clear all counters */
- for (i = 0 ; i < NUM_CONTROLS; ++i) {
- if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
+ for (i = 0; i < NUM_CONTROLS; ++i) {
+ if (unlikely(!msrs->controls[i].addr))
continue;
- CTRL_READ(low, high, msrs, i);
- CTRL_CLEAR_LO(low);
- CTRL_CLEAR_HI(high);
- CTRL_WRITE(low, high, msrs, i);
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= model->reserved;
+ wrmsrl(msrs->controls[i].addr, val);
}
/* avoid a false detection of ctr overflows in NMI handler */
for (i = 0; i < NUM_COUNTERS; ++i) {
- if (unlikely(!CTR_IS_RESERVED(msrs, i)))
+ if (unlikely(!msrs->counters[i].addr))
continue;
- CTR_WRITE(1, msrs, i);
+ wrmsrl(msrs->counters[i].addr, -1LL);
}
/* enable active counters */
for (i = 0; i < NUM_COUNTERS; ++i) {
- if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
- reset_value[i] = counter_config[i].count;
+ int virt = op_x86_phys_to_virt(i);
+ if (!counter_config[virt].enabled)
+ continue;
+ if (!msrs->counters[i].addr)
+ continue;
- CTR_WRITE(counter_config[i].count, msrs, i);
-
- CTRL_READ(low, high, msrs, i);
- CTRL_CLEAR_LO(low);
- CTRL_CLEAR_HI(high);
- CTRL_SET_ENABLE(low);
- CTRL_SET_USR(low, counter_config[i].user);
- CTRL_SET_KERN(low, counter_config[i].kernel);
- CTRL_SET_UM(low, counter_config[i].unit_mask);
- CTRL_SET_EVENT_LOW(low, counter_config[i].event);
- CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
- CTRL_SET_HOST_ONLY(high, 0);
- CTRL_SET_GUEST_ONLY(high, 0);
-
- CTRL_WRITE(low, high, msrs, i);
- } else {
- reset_value[i] = 0;
- }
+ /* setup counter registers */
+ wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
+
+ /* setup control registers */
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= model->reserved;
+ val |= op_x86_get_ctrl(model, &counter_config[virt]);
+ wrmsrl(msrs->controls[i].addr, val);
}
}
#ifdef CONFIG_OPROFILE_IBS
-static inline int
+static inline void
op_amd_handle_ibs(struct pt_regs * const regs,
struct op_msrs const * const msrs)
{
- u32 low, high;
- u64 msr;
+ u64 val, ctl;
struct op_entry entry;
if (!has_ibs)
- return 1;
+ return;
if (ibs_config.fetch_enabled) {
- rdmsr(MSR_AMD64_IBSFETCHCTL, low, high);
- if (high & IBS_FETCH_HIGH_VALID_BIT) {
- rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr);
- oprofile_write_reserve(&entry, regs, msr,
+ rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
+ if (ctl & IBS_FETCH_VAL) {
+ rdmsrl(MSR_AMD64_IBSFETCHLINAD, val);
+ oprofile_write_reserve(&entry, regs, val,
IBS_FETCH_CODE, IBS_FETCH_SIZE);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
- oprofile_add_data(&entry, low);
- oprofile_add_data(&entry, high);
- rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
+ oprofile_add_data64(&entry, val);
+ oprofile_add_data64(&entry, ctl);
+ rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val);
+ oprofile_add_data64(&entry, val);
oprofile_write_commit(&entry);
/* reenable the IRQ */
- high &= ~IBS_FETCH_HIGH_VALID_BIT;
- high |= IBS_FETCH_HIGH_ENABLE;
- low &= IBS_FETCH_LOW_MAX_CNT_MASK;
- wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
+ ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK);
+ ctl |= IBS_FETCH_ENABLE;
+ wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
}
}
if (ibs_config.op_enabled) {
- rdmsr(MSR_AMD64_IBSOPCTL, low, high);
- if (low & IBS_OP_LOW_VALID_BIT) {
- rdmsrl(MSR_AMD64_IBSOPRIP, msr);
- oprofile_write_reserve(&entry, regs, msr,
+ rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
+ if (ctl & IBS_OP_VAL) {
+ rdmsrl(MSR_AMD64_IBSOPRIP, val);
+ oprofile_write_reserve(&entry, regs, val,
IBS_OP_CODE, IBS_OP_SIZE);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
- rdmsrl(MSR_AMD64_IBSOPDATA, msr);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
- rdmsrl(MSR_AMD64_IBSOPDATA2, msr);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
- rdmsrl(MSR_AMD64_IBSOPDATA3, msr);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
- rdmsrl(MSR_AMD64_IBSDCLINAD, msr);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
- rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr);
- oprofile_add_data(&entry, (u32)msr);
- oprofile_add_data(&entry, (u32)(msr >> 32));
+ oprofile_add_data64(&entry, val);
+ rdmsrl(MSR_AMD64_IBSOPDATA, val);
+ oprofile_add_data64(&entry, val);
+ rdmsrl(MSR_AMD64_IBSOPDATA2, val);
+ oprofile_add_data64(&entry, val);
+ rdmsrl(MSR_AMD64_IBSOPDATA3, val);
+ oprofile_add_data64(&entry, val);
+ rdmsrl(MSR_AMD64_IBSDCLINAD, val);
+ oprofile_add_data64(&entry, val);
+ rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
+ oprofile_add_data64(&entry, val);
oprofile_write_commit(&entry);
/* reenable the IRQ */
- high = 0;
- low &= ~IBS_OP_LOW_VALID_BIT;
- low |= IBS_OP_LOW_ENABLE;
- wrmsr(MSR_AMD64_IBSOPCTL, low, high);
+ ctl &= ~IBS_OP_VAL & 0xFFFFFFFF;
+ ctl |= IBS_OP_ENABLE;
+ wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
}
}
+}
- return 1;
+static inline void op_amd_start_ibs(void)
+{
+ u64 val;
+ if (has_ibs && ibs_config.fetch_enabled) {
+ val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
+ val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
+ val |= IBS_FETCH_ENABLE;
+ wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
+ }
+
+ if (has_ibs && ibs_config.op_enabled) {
+ val = (ibs_config.max_cnt_op >> 4) & 0xFFFF;
+ val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0;
+ val |= IBS_OP_ENABLE;
+ wrmsrl(MSR_AMD64_IBSOPCTL, val);
+ }
+}
+
+static void op_amd_stop_ibs(void)
+{
+ if (has_ibs && ibs_config.fetch_enabled)
+ /* clear max count and enable */
+ wrmsrl(MSR_AMD64_IBSFETCHCTL, 0);
+
+ if (has_ibs && ibs_config.op_enabled)
+ /* clear max count and enable */
+ wrmsrl(MSR_AMD64_IBSOPCTL, 0);
}
+#else
+
+static inline void op_amd_handle_ibs(struct pt_regs * const regs,
+ struct op_msrs const * const msrs) { }
+static inline void op_amd_start_ibs(void) { }
+static inline void op_amd_stop_ibs(void) { }
+
#endif
static int op_amd_check_ctrs(struct pt_regs * const regs,
struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
- for (i = 0 ; i < NUM_COUNTERS; ++i) {
- if (!reset_value[i])
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ int virt = op_x86_phys_to_virt(i);
+ if (!reset_value[virt])
continue;
- CTR_READ(low, high, msrs, i);
- if (CTR_OVERFLOWED(low)) {
- oprofile_add_sample(regs, i);
- CTR_WRITE(reset_value[i], msrs, i);
- }
+ rdmsrl(msrs->counters[i].addr, val);
+ /* bit is clear if overflowed: */
+ if (val & OP_CTR_OVERFLOW)
+ continue;
+ oprofile_add_sample(regs, virt);
+ wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
}
-#ifdef CONFIG_OPROFILE_IBS
op_amd_handle_ibs(regs, msrs);
-#endif
/* See op_model_ppro.c */
return 1;
@@ -248,79 +307,50 @@ static int op_amd_check_ctrs(struct pt_regs * const regs,
static void op_amd_start(struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
- for (i = 0 ; i < NUM_COUNTERS ; ++i) {
- if (reset_value[i]) {
- CTRL_READ(low, high, msrs, i);
- CTRL_SET_ACTIVE(low);
- CTRL_WRITE(low, high, msrs, i);
- }
- }
-#ifdef CONFIG_OPROFILE_IBS
- if (has_ibs && ibs_config.fetch_enabled) {
- low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
- high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */
- + IBS_FETCH_HIGH_ENABLE;
- wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ if (!reset_value[op_x86_phys_to_virt(i)])
+ continue;
+ rdmsrl(msrs->controls[i].addr, val);
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(msrs->controls[i].addr, val);
}
- if (has_ibs && ibs_config.op_enabled) {
- low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF)
- + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */
- + IBS_OP_LOW_ENABLE;
- high = 0;
- wrmsr(MSR_AMD64_IBSOPCTL, low, high);
- }
-#endif
+ op_amd_start_ibs();
}
-
static void op_amd_stop(struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
/*
* Subtle: stop on all counters to avoid race with setting our
* pm callback
*/
- for (i = 0 ; i < NUM_COUNTERS ; ++i) {
- if (!reset_value[i])
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ if (!reset_value[op_x86_phys_to_virt(i)])
continue;
- CTRL_READ(low, high, msrs, i);
- CTRL_SET_INACTIVE(low);
- CTRL_WRITE(low, high, msrs, i);
- }
-
-#ifdef CONFIG_OPROFILE_IBS
- if (has_ibs && ibs_config.fetch_enabled) {
- /* clear max count and enable */
- low = 0;
- high = 0;
- wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(msrs->controls[i].addr, val);
}
- if (has_ibs && ibs_config.op_enabled) {
- /* clear max count and enable */
- low = 0;
- high = 0;
- wrmsr(MSR_AMD64_IBSOPCTL, low, high);
- }
-#endif
+ op_amd_stop_ibs();
}
static void op_amd_shutdown(struct op_msrs const * const msrs)
{
int i;
- for (i = 0 ; i < NUM_COUNTERS ; ++i) {
- if (CTR_IS_RESERVED(msrs, i))
+ for (i = 0; i < NUM_COUNTERS; ++i) {
+ if (msrs->counters[i].addr)
release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
}
- for (i = 0 ; i < NUM_CONTROLS ; ++i) {
- if (CTRL_IS_RESERVED(msrs, i))
+ for (i = 0; i < NUM_CONTROLS; ++i) {
+ if (msrs->controls[i].addr)
release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
}
}
@@ -490,15 +520,21 @@ static void op_amd_exit(void) {}
#endif /* CONFIG_OPROFILE_IBS */
-struct op_x86_model_spec const op_amd_spec = {
- .init = op_amd_init,
- .exit = op_amd_exit,
+struct op_x86_model_spec op_amd_spec = {
.num_counters = NUM_COUNTERS,
.num_controls = NUM_CONTROLS,
+ .num_virt_counters = NUM_VIRT_COUNTERS,
+ .reserved = MSR_AMD_EVENTSEL_RESERVED,
+ .event_mask = OP_EVENT_MASK,
+ .init = op_amd_init,
+ .exit = op_amd_exit,
.fill_in_addresses = &op_amd_fill_in_addresses,
.setup_ctrs = &op_amd_setup_ctrs,
.check_ctrs = &op_amd_check_ctrs,
.start = &op_amd_start,
.stop = &op_amd_stop,
- .shutdown = &op_amd_shutdown
+ .shutdown = &op_amd_shutdown,
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+ .switch_ctrl = &op_mux_switch_ctrl,
+#endif
};
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 819b131fd752..ac6b354becdf 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -32,6 +32,8 @@
#define NUM_CCCRS_HT2 9
#define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2)
+#define OP_CTR_OVERFLOW (1ULL<<31)
+
static unsigned int num_counters = NUM_COUNTERS_NON_HT;
static unsigned int num_controls = NUM_CONTROLS_NON_HT;
@@ -350,8 +352,6 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1))
#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25))
#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9))
-#define ESCR_READ(escr, high, ev, i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
-#define ESCR_WRITE(escr, high, ev, i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
#define CCCR_RESERVED_BITS 0x38030FFF
#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS)
@@ -361,17 +361,9 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
-#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
-#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
-#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
-#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
-#define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0)
-#define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0)
-#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000))
-
/* this assigns a "stagger" to the current CPU, which is used throughout
the code in this module as an extra array offset, to select the "even"
@@ -515,7 +507,7 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
if (ev->bindings[i].virt_counter & counter_bit) {
/* modify ESCR */
- ESCR_READ(escr, high, ev, i);
+ rdmsr(ev->bindings[i].escr_address, escr, high);
ESCR_CLEAR(escr);
if (stag == 0) {
ESCR_SET_USR_0(escr, counter_config[ctr].user);
@@ -526,10 +518,11 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
}
ESCR_SET_EVENT_SELECT(escr, ev->event_select);
ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);
- ESCR_WRITE(escr, high, ev, i);
+ wrmsr(ev->bindings[i].escr_address, escr, high);
/* modify CCCR */
- CCCR_READ(cccr, high, VIRT_CTR(stag, ctr));
+ rdmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address,
+ cccr, high);
CCCR_CLEAR(cccr);
CCCR_SET_REQUIRED_BITS(cccr);
CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);
@@ -537,7 +530,8 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
CCCR_SET_PMI_OVF_0(cccr);
else
CCCR_SET_PMI_OVF_1(cccr);
- CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr));
+ wrmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address,
+ cccr, high);
return;
}
}
@@ -548,7 +542,8 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
}
-static void p4_setup_ctrs(struct op_msrs const * const msrs)
+static void p4_setup_ctrs(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs)
{
unsigned int i;
unsigned int low, high;
@@ -563,8 +558,8 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
}
/* clear the cccrs we will use */
- for (i = 0 ; i < num_counters ; i++) {
- if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
+ for (i = 0; i < num_counters; i++) {
+ if (unlikely(!msrs->controls[i].addr))
continue;
rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
CCCR_CLEAR(low);
@@ -574,17 +569,18 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
/* clear all escrs (including those outside our concern) */
for (i = num_counters; i < num_controls; i++) {
- if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
+ if (unlikely(!msrs->controls[i].addr))
continue;
wrmsr(msrs->controls[i].addr, 0, 0);
}
/* setup all counters */
- for (i = 0 ; i < num_counters ; ++i) {
- if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) {
+ for (i = 0; i < num_counters; ++i) {
+ if (counter_config[i].enabled && msrs->controls[i].addr) {
reset_value[i] = counter_config[i].count;
pmc_setup_one_p4_counter(i);
- CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i));
+ wrmsrl(p4_counters[VIRT_CTR(stag, i)].counter_address,
+ -(u64)counter_config[i].count);
} else {
reset_value[i] = 0;
}
@@ -624,14 +620,16 @@ static int p4_check_ctrs(struct pt_regs * const regs,
real = VIRT_CTR(stag, i);
- CCCR_READ(low, high, real);
- CTR_READ(ctr, high, real);
- if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {
+ rdmsr(p4_counters[real].cccr_address, low, high);
+ rdmsr(p4_counters[real].counter_address, ctr, high);
+ if (CCCR_OVF_P(low) || !(ctr & OP_CTR_OVERFLOW)) {
oprofile_add_sample(regs, i);
- CTR_WRITE(reset_value[i], real);
+ wrmsrl(p4_counters[real].counter_address,
+ -(u64)reset_value[i]);
CCCR_CLEAR_OVF(low);
- CCCR_WRITE(low, high, real);
- CTR_WRITE(reset_value[i], real);
+ wrmsr(p4_counters[real].cccr_address, low, high);
+ wrmsrl(p4_counters[real].counter_address,
+ -(u64)reset_value[i]);
}
}
@@ -653,9 +651,9 @@ static void p4_start(struct op_msrs const * const msrs)
for (i = 0; i < num_counters; ++i) {
if (!reset_value[i])
continue;
- CCCR_READ(low, high, VIRT_CTR(stag, i));
+ rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
CCCR_SET_ENABLE(low);
- CCCR_WRITE(low, high, VIRT_CTR(stag, i));
+ wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
}
}
@@ -670,9 +668,9 @@ static void p4_stop(struct op_msrs const * const msrs)
for (i = 0; i < num_counters; ++i) {
if (!reset_value[i])
continue;
- CCCR_READ(low, high, VIRT_CTR(stag, i));
+ rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
CCCR_SET_DISABLE(low);
- CCCR_WRITE(low, high, VIRT_CTR(stag, i));
+ wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
}
}
@@ -680,8 +678,8 @@ static void p4_shutdown(struct op_msrs const * const msrs)
{
int i;
- for (i = 0 ; i < num_counters ; ++i) {
- if (CTR_IS_RESERVED(msrs, i))
+ for (i = 0; i < num_counters; ++i) {
+ if (msrs->counters[i].addr)
release_perfctr_nmi(msrs->counters[i].addr);
}
/*
@@ -689,15 +687,15 @@ static void p4_shutdown(struct op_msrs const * const msrs)
* conjunction with the counter registers (hence the starting offset).
* This saves a few bits.
*/
- for (i = num_counters ; i < num_controls ; ++i) {
- if (CTRL_IS_RESERVED(msrs, i))
+ for (i = num_counters; i < num_controls; ++i) {
+ if (msrs->controls[i].addr)
release_evntsel_nmi(msrs->controls[i].addr);
}
}
#ifdef CONFIG_SMP
-struct op_x86_model_spec const op_p4_ht2_spec = {
+struct op_x86_model_spec op_p4_ht2_spec = {
.num_counters = NUM_COUNTERS_HT2,
.num_controls = NUM_CONTROLS_HT2,
.fill_in_addresses = &p4_fill_in_addresses,
@@ -709,7 +707,7 @@ struct op_x86_model_spec const op_p4_ht2_spec = {
};
#endif
-struct op_x86_model_spec const op_p4_spec = {
+struct op_x86_model_spec op_p4_spec = {
.num_counters = NUM_COUNTERS_NON_HT,
.num_controls = NUM_CONTROLS_NON_HT,
.fill_in_addresses = &p4_fill_in_addresses,
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 4da7230b3d17..4899215999de 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -10,6 +10,7 @@
* @author Philippe Elie
* @author Graydon Hoare
* @author Andi Kleen
+ * @author Robert Richter <robert.richter@amd.com>
*/
#include <linux/oprofile.h>
@@ -18,7 +19,6 @@
#include <asm/msr.h>
#include <asm/apic.h>
#include <asm/nmi.h>
-#include <asm/perf_counter.h>
#include "op_x86_model.h"
#include "op_counter.h"
@@ -26,20 +26,7 @@
static int num_counters = 2;
static int counter_width = 32;
-#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
-#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1))))
-
-#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
-#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
-#define CTRL_WRITE(l, h, msrs, c) do {wrmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
-#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
-#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
-#define CTRL_CLEAR(x) (x &= (1<<21))
-#define CTRL_SET_ENABLE(val) (val |= 1<<20)
-#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
-#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
-#define CTRL_SET_UM(val, m) (val |= (m << 8))
-#define CTRL_SET_EVENT(val, e) (val |= e)
+#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
static u64 *reset_value;
@@ -63,9 +50,10 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs)
}
-static void ppro_setup_ctrs(struct op_msrs const * const msrs)
+static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
if (!reset_value) {
@@ -93,36 +81,30 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
}
/* clear all counters */
- for (i = 0 ; i < num_counters; ++i) {
- if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
+ for (i = 0; i < num_counters; ++i) {
+ if (unlikely(!msrs->controls[i].addr))
continue;
- CTRL_READ(low, high, msrs, i);
- CTRL_CLEAR(low);
- CTRL_WRITE(low, high, msrs, i);
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= model->reserved;
+ wrmsrl(msrs->controls[i].addr, val);
}
/* avoid a false detection of ctr overflows in NMI handler */
for (i = 0; i < num_counters; ++i) {
- if (unlikely(!CTR_IS_RESERVED(msrs, i)))
+ if (unlikely(!msrs->counters[i].addr))
continue;
wrmsrl(msrs->counters[i].addr, -1LL);
}
/* enable active counters */
for (i = 0; i < num_counters; ++i) {
- if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
+ if (counter_config[i].enabled && msrs->counters[i].addr) {
reset_value[i] = counter_config[i].count;
-
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
-
- CTRL_READ(low, high, msrs, i);
- CTRL_CLEAR(low);
- CTRL_SET_ENABLE(low);
- CTRL_SET_USR(low, counter_config[i].user);
- CTRL_SET_KERN(low, counter_config[i].kernel);
- CTRL_SET_UM(low, counter_config[i].unit_mask);
- CTRL_SET_EVENT(low, counter_config[i].event);
- CTRL_WRITE(low, high, msrs, i);
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= model->reserved;
+ val |= op_x86_get_ctrl(model, &counter_config[i]);
+ wrmsrl(msrs->controls[i].addr, val);
} else {
reset_value[i] = 0;
}
@@ -143,14 +125,14 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
if (unlikely(!reset_value))
goto out;
- for (i = 0 ; i < num_counters; ++i) {
+ for (i = 0; i < num_counters; ++i) {
if (!reset_value[i])
continue;
rdmsrl(msrs->counters[i].addr, val);
- if (CTR_OVERFLOWED(val)) {
- oprofile_add_sample(regs, i);
- wrmsrl(msrs->counters[i].addr, -reset_value[i]);
- }
+ if (val & (1ULL << (counter_width - 1)))
+ continue;
+ oprofile_add_sample(regs, i);
+ wrmsrl(msrs->counters[i].addr, -reset_value[i]);
}
out:
@@ -171,16 +153,16 @@ out:
static void ppro_start(struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
if (!reset_value)
return;
for (i = 0; i < num_counters; ++i) {
if (reset_value[i]) {
- CTRL_READ(low, high, msrs, i);
- CTRL_SET_ACTIVE(low);
- CTRL_WRITE(low, high, msrs, i);
+ rdmsrl(msrs->controls[i].addr, val);
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(msrs->controls[i].addr, val);
}
}
}
@@ -188,7 +170,7 @@ static void ppro_start(struct op_msrs const * const msrs)
static void ppro_stop(struct op_msrs const * const msrs)
{
- unsigned int low, high;
+ u64 val;
int i;
if (!reset_value)
@@ -196,9 +178,9 @@ static void ppro_stop(struct op_msrs const * const msrs)
for (i = 0; i < num_counters; ++i) {
if (!reset_value[i])
continue;
- CTRL_READ(low, high, msrs, i);
- CTRL_SET_INACTIVE(low);
- CTRL_WRITE(low, high, msrs, i);
+ rdmsrl(msrs->controls[i].addr, val);
+ val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(msrs->controls[i].addr, val);
}
}
@@ -206,12 +188,12 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
{
int i;
- for (i = 0 ; i < num_counters ; ++i) {
- if (CTR_IS_RESERVED(msrs, i))
+ for (i = 0; i < num_counters; ++i) {
+ if (msrs->counters[i].addr)
release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
}
- for (i = 0 ; i < num_counters ; ++i) {
- if (CTRL_IS_RESERVED(msrs, i))
+ for (i = 0; i < num_counters; ++i) {
+ if (msrs->controls[i].addr)
release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
}
if (reset_value) {
@@ -222,8 +204,9 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
struct op_x86_model_spec op_ppro_spec = {
- .num_counters = 2, /* can be overriden */
- .num_controls = 2, /* dito */
+ .num_counters = 2,
+ .num_controls = 2,
+ .reserved = MSR_PPRO_EVENTSEL_RESERVED,
.fill_in_addresses = &ppro_fill_in_addresses,
.setup_ctrs = &ppro_setup_ctrs,
.check_ctrs = &ppro_check_ctrs,
@@ -241,7 +224,7 @@ struct op_x86_model_spec op_ppro_spec = {
* the specific CPU.
*/
-void arch_perfmon_setup_counters(void)
+static void arch_perfmon_setup_counters(void)
{
union cpuid10_eax eax;
@@ -259,11 +242,17 @@ void arch_perfmon_setup_counters(void)
op_arch_perfmon_spec.num_counters = num_counters;
op_arch_perfmon_spec.num_controls = num_counters;
- op_ppro_spec.num_counters = num_counters;
- op_ppro_spec.num_controls = num_counters;
+}
+
+static int arch_perfmon_init(struct oprofile_operations *ignore)
+{
+ arch_perfmon_setup_counters();
+ return 0;
}
struct op_x86_model_spec op_arch_perfmon_spec = {
+ .reserved = MSR_PPRO_EVENTSEL_RESERVED,
+ .init = &arch_perfmon_init,
/* num_counters/num_controls filled in at runtime */
.fill_in_addresses = &ppro_fill_in_addresses,
/* user space does the cpuid check for available events */
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 825e79064d64..b83776180c7f 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -6,51 +6,66 @@
* @remark Read the file COPYING
*
* @author Graydon Hoare
+ * @author Robert Richter <robert.richter@amd.com>
*/
#ifndef OP_X86_MODEL_H
#define OP_X86_MODEL_H
-struct op_saved_msr {
- unsigned int high;
- unsigned int low;
-};
+#include <asm/types.h>
+#include <asm/perf_counter.h>
struct op_msr {
- unsigned long addr;
- struct op_saved_msr saved;
+ unsigned long addr;
+ u64 saved;
};
struct op_msrs {
struct op_msr *counters;
struct op_msr *controls;
+ struct op_msr *multiplex;
};
struct pt_regs;
+struct oprofile_operations;
+
/* The model vtable abstracts the differences between
* various x86 CPU models' perfctr support.
*/
struct op_x86_model_spec {
- int (*init)(struct oprofile_operations *ops);
- void (*exit)(void);
- unsigned int num_counters;
- unsigned int num_controls;
- void (*fill_in_addresses)(struct op_msrs * const msrs);
- void (*setup_ctrs)(struct op_msrs const * const msrs);
- int (*check_ctrs)(struct pt_regs * const regs,
- struct op_msrs const * const msrs);
- void (*start)(struct op_msrs const * const msrs);
- void (*stop)(struct op_msrs const * const msrs);
- void (*shutdown)(struct op_msrs const * const msrs);
+ unsigned int num_counters;
+ unsigned int num_controls;
+ unsigned int num_virt_counters;
+ u64 reserved;
+ u16 event_mask;
+ int (*init)(struct oprofile_operations *ops);
+ void (*exit)(void);
+ void (*fill_in_addresses)(struct op_msrs * const msrs);
+ void (*setup_ctrs)(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs);
+ int (*check_ctrs)(struct pt_regs * const regs,
+ struct op_msrs const * const msrs);
+ void (*start)(struct op_msrs const * const msrs);
+ void (*stop)(struct op_msrs const * const msrs);
+ void (*shutdown)(struct op_msrs const * const msrs);
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+ void (*switch_ctrl)(struct op_x86_model_spec const *model,
+ struct op_msrs const * const msrs);
+#endif
};
+struct op_counter_config;
+
+extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
+ struct op_counter_config *counter_config);
+extern int op_x86_phys_to_virt(int phys);
+extern int op_x86_virt_to_phys(int virt);
+
extern struct op_x86_model_spec op_ppro_spec;
-extern struct op_x86_model_spec const op_p4_spec;
-extern struct op_x86_model_spec const op_p4_ht2_spec;
-extern struct op_x86_model_spec const op_amd_spec;
+extern struct op_x86_model_spec op_p4_spec;
+extern struct op_x86_model_spec op_p4_ht2_spec;
+extern struct op_x86_model_spec op_amd_spec;
extern struct op_x86_model_spec op_arch_perfmon_spec;
-extern void arch_perfmon_setup_counters(void);
-
#endif /* OP_X86_MODEL_H */
diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
index bd13c3e4c6db..347d882b3bb3 100644
--- a/arch/x86/pci/direct.c
+++ b/arch/x86/pci/direct.c
@@ -192,13 +192,14 @@ struct pci_raw_ops pci_direct_conf2 = {
static int __init pci_sanity_check(struct pci_raw_ops *o)
{
u32 x = 0;
- int devfn;
+ int year, devfn;
if (pci_probe & PCI_NO_CHECKS)
return 1;
/* Assume Type 1 works for newer systems.
This handles machines that don't have anything on PCI Bus 0. */
- if (dmi_get_year(DMI_BIOS_DATE) >= 2001)
+ dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL);
+ if (year >= 2001)
return 1;
for (devfn = 0; devfn < 0x100; devfn++) {
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index eb33aaa8415d..b62ccb840cfb 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -714,7 +714,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
set:
base = ((u64)high << 32) | low;
if (HYPERVISOR_set_segment_base(which, base) != 0)
- ret = -EFAULT;
+ ret = -EIO;
break;
#endif
diff --git a/block/blk-core.c b/block/blk-core.c
index e3299a77a0d8..e695634882a6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -501,6 +501,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+ q->backing_dev_info.name = "block";
err = bdi_init(&q->backing_dev_info);
if (err) {
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 4dfdd03e708f..26b5dd0cb564 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -23,11 +23,13 @@ comment "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
+ depends on CRYPTO_ANSI_CPRNG
help
This options enables the fips boot option which is
required if you want to system to operate in a FIPS 200
certification. You should say no unless you know what
- this is.
+ this is. Note that CRYPTO_ANSI_CPRNG is requred if this
+ option is selected
config CRYPTO_ALGAPI
tristate
@@ -156,7 +158,7 @@ config CRYPTO_GCM
tristate "GCM/GMAC support"
select CRYPTO_CTR
select CRYPTO_AEAD
- select CRYPTO_GF128MUL
+ select CRYPTO_GHASH
help
Support for Galois/Counter Mode (GCM) and Galois Message
Authentication Code (GMAC). Required for IPSec.
@@ -267,6 +269,18 @@ config CRYPTO_XCBC
http://csrc.nist.gov/encryption/modes/proposedmodes/
xcbc-mac/xcbc-mac-spec.pdf
+config CRYPTO_VMAC
+ tristate "VMAC support"
+ depends on EXPERIMENTAL
+ select CRYPTO_HASH
+ select CRYPTO_MANAGER
+ help
+ VMAC is a message authentication algorithm designed for
+ very high speed on 64-bit architectures.
+
+ See also:
+ <http://fastcrypto.org/vmac>
+
comment "Digest"
config CRYPTO_CRC32C
@@ -289,6 +303,13 @@ config CRYPTO_CRC32C_INTEL
gain performance compared with software implementation.
Module will be crc32c-intel.
+config CRYPTO_GHASH
+ tristate "GHASH digest algorithm"
+ select CRYPTO_SHASH
+ select CRYPTO_GF128MUL
+ help
+ GHASH is message digest algorithm for GCM (Galois/Counter Mode).
+
config CRYPTO_MD4
tristate "MD4 digest algorithm"
select CRYPTO_HASH
@@ -780,13 +801,14 @@ comment "Random Number Generation"
config CRYPTO_ANSI_CPRNG
tristate "Pseudo Random Number Generation for Cryptographic modules"
+ default m
select CRYPTO_AES
select CRYPTO_RNG
- select CRYPTO_FIPS
help
This option enables the generic pseudo random number generator
for cryptographic modules. Uses the Algorithm specified in
- ANSI X9.31 A.2.4
+ ANSI X9.31 A.2.4. Not this option must be enabled if CRYPTO_FIPS
+ is selected
source "drivers/crypto/Kconfig"
diff --git a/crypto/Makefile b/crypto/Makefile
index 673d9f7c1bda..9e8f61908cb5 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -3,7 +3,7 @@
#
obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-objs := api.o cipher.o digest.o compress.o
+crypto-objs := api.o cipher.o compress.o
obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
@@ -22,7 +22,6 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
-crypto_hash-objs := hash.o
crypto_hash-objs += ahash.o
crypto_hash-objs += shash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
@@ -33,6 +32,7 @@ cryptomgr-objs := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
+obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
@@ -83,6 +83,7 @@ obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_RNG2) += krng.o
obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
+obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
#
# generic algorithms and the async_tx api
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index e11ce37c7104..f6f08336df5d 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -14,6 +14,7 @@
*/
#include <crypto/internal/skcipher.h>
+#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -25,6 +26,8 @@
#include "internal.h"
+static const char *skcipher_default_geniv __read_mostly;
+
static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
@@ -180,7 +183,14 @@ EXPORT_SYMBOL_GPL(crypto_givcipher_type);
const char *crypto_default_geniv(const struct crypto_alg *alg)
{
- return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv";
+ if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+ CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
+ alg->cra_ablkcipher.ivsize) !=
+ alg->cra_blocksize)
+ return "chainiv";
+
+ return alg->cra_flags & CRYPTO_ALG_ASYNC ?
+ "eseqiv" : skcipher_default_geniv;
}
static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
@@ -201,8 +211,9 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
int err;
larval = crypto_larval_lookup(alg->cra_driver_name,
+ (type & ~CRYPTO_ALG_TYPE_MASK) |
CRYPTO_ALG_TYPE_GIVCIPHER,
- CRYPTO_ALG_TYPE_MASK);
+ mask | CRYPTO_ALG_TYPE_MASK);
err = PTR_ERR(larval);
if (IS_ERR(larval))
goto out;
@@ -360,3 +371,17 @@ err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
+
+static int __init skcipher_module_init(void)
+{
+ skcipher_default_geniv = num_possible_cpus() > 1 ?
+ "eseqiv" : "chainiv";
+ return 0;
+}
+
+static void skcipher_module_exit(void)
+{
+}
+
+module_init(skcipher_module_init);
+module_exit(skcipher_module_exit);
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index b8b66ec3883b..e78b7ee44a74 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1174,7 +1174,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab);
ctx->key_enc[6 * i + 11] = t; \
} while (0)
-#define loop8(i) do { \
+#define loop8tophalf(i) do { \
t = ror32(t, 8); \
t = ls_box(t) ^ rco_tab[i]; \
t ^= ctx->key_enc[8 * i]; \
@@ -1185,6 +1185,10 @@ EXPORT_SYMBOL_GPL(crypto_il_tab);
ctx->key_enc[8 * i + 10] = t; \
t ^= ctx->key_enc[8 * i + 3]; \
ctx->key_enc[8 * i + 11] = t; \
+} while (0)
+
+#define loop8(i) do { \
+ loop8tophalf(i); \
t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \
ctx->key_enc[8 * i + 12] = t; \
t ^= ctx->key_enc[8 * i + 5]; \
@@ -1245,8 +1249,9 @@ int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
ctx->key_enc[5] = le32_to_cpu(key[5]);
ctx->key_enc[6] = le32_to_cpu(key[6]);
t = ctx->key_enc[7] = le32_to_cpu(key[7]);
- for (i = 0; i < 7; ++i)
+ for (i = 0; i < 6; ++i)
loop8(i);
+ loop8tophalf(i);
break;
}
diff --git a/crypto/ahash.c b/crypto/ahash.c
index f3476374f764..33a4ff45f842 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -24,6 +24,19 @@
#include "internal.h"
+struct ahash_request_priv {
+ crypto_completion_t complete;
+ void *data;
+ u8 *result;
+ void *ubuf[] CRYPTO_MINALIGN_ATTR;
+};
+
+static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
+{
+ return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
+ halg);
+}
+
static int hash_walk_next(struct crypto_hash_walk *walk)
{
unsigned int alignmask = walk->alignmask;
@@ -132,36 +145,34 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
- struct ahash_alg *ahash = crypto_ahash_alg(tfm);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
int ret;
u8 *buffer, *alignbuffer;
unsigned long absize;
absize = keylen + alignmask;
- buffer = kmalloc(absize, GFP_ATOMIC);
+ buffer = kmalloc(absize, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
- ret = ahash->setkey(tfm, alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
+ ret = tfm->setkey(tfm, alignbuffer, keylen);
+ kzfree(buffer);
return ret;
}
-static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
- struct ahash_alg *ahash = crypto_ahash_alg(tfm);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
if ((unsigned long)key & alignmask)
return ahash_setkey_unaligned(tfm, key, keylen);
- return ahash->setkey(tfm, key, keylen);
+ return tfm->setkey(tfm, key, keylen);
}
+EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
@@ -169,44 +180,221 @@ static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
return -ENOSYS;
}
-int crypto_ahash_import(struct ahash_request *req, const u8 *in)
+static inline unsigned int ahash_align_buffer_size(unsigned len,
+ unsigned long mask)
+{
+ return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
+}
+
+static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
+{
+ struct ahash_request_priv *priv = req->priv;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ if (!err)
+ memcpy(priv->result, req->result,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+
+ kzfree(priv);
+}
+
+static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
+{
+ struct ahash_request *areq = req->data;
+ struct ahash_request_priv *priv = areq->priv;
+ crypto_completion_t complete = priv->complete;
+ void *data = priv->data;
+
+ ahash_op_unaligned_finish(areq, err);
+
+ complete(data, err);
+}
+
+static int ahash_op_unaligned(struct ahash_request *req,
+ int (*op)(struct ahash_request *))
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ unsigned int ds = crypto_ahash_digestsize(tfm);
+ struct ahash_request_priv *priv;
+ int err;
+
+ priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!priv)
+ return -ENOMEM;
- memcpy(ahash_request_ctx(req), in, crypto_ahash_reqsize(tfm));
+ priv->result = req->result;
+ priv->complete = req->base.complete;
+ priv->data = req->base.data;
- if (alg->reinit)
- alg->reinit(req);
+ req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
+ req->base.complete = ahash_op_unaligned_done;
+ req->base.data = req;
+ req->priv = priv;
- return 0;
+ err = op(req);
+ ahash_op_unaligned_finish(req, err);
+
+ return err;
}
-EXPORT_SYMBOL_GPL(crypto_ahash_import);
-static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type,
- u32 mask)
+static int crypto_ahash_op(struct ahash_request *req,
+ int (*op)(struct ahash_request *))
{
- return alg->cra_ctxsize;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ unsigned long alignmask = crypto_ahash_alignmask(tfm);
+
+ if ((unsigned long)req->result & alignmask)
+ return ahash_op_unaligned(req, op);
+
+ return op(req);
}
-static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
+int crypto_ahash_final(struct ahash_request *req)
{
- struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash;
- struct ahash_tfm *crt = &tfm->crt_ahash;
+ return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_final);
- if (alg->digestsize > PAGE_SIZE / 8)
- return -EINVAL;
+int crypto_ahash_finup(struct ahash_request *req)
+{
+ return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_finup);
+
+int crypto_ahash_digest(struct ahash_request *req)
+{
+ return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_digest);
+
+static void ahash_def_finup_finish2(struct ahash_request *req, int err)
+{
+ struct ahash_request_priv *priv = req->priv;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ if (!err)
+ memcpy(priv->result, req->result,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
- crt->init = alg->init;
- crt->update = alg->update;
- crt->final = alg->final;
- crt->digest = alg->digest;
- crt->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey;
- crt->digestsize = alg->digestsize;
+ kzfree(priv);
+}
+
+static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
+{
+ struct ahash_request *areq = req->data;
+ struct ahash_request_priv *priv = areq->priv;
+ crypto_completion_t complete = priv->complete;
+ void *data = priv->data;
+
+ ahash_def_finup_finish2(areq, err);
+
+ complete(data, err);
+}
+
+static int ahash_def_finup_finish1(struct ahash_request *req, int err)
+{
+ if (err)
+ goto out;
+
+ req->base.complete = ahash_def_finup_done2;
+ req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = crypto_ahash_reqtfm(req)->final(req);
+
+out:
+ ahash_def_finup_finish2(req, err);
+ return err;
+}
+
+static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
+{
+ struct ahash_request *areq = req->data;
+ struct ahash_request_priv *priv = areq->priv;
+ crypto_completion_t complete = priv->complete;
+ void *data = priv->data;
+
+ err = ahash_def_finup_finish1(areq, err);
+
+ complete(data, err);
+}
+
+static int ahash_def_finup(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ unsigned int ds = crypto_ahash_digestsize(tfm);
+ struct ahash_request_priv *priv;
+
+ priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->result = req->result;
+ priv->complete = req->base.complete;
+ priv->data = req->base.data;
+
+ req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
+ req->base.complete = ahash_def_finup_done1;
+ req->base.data = req;
+ req->priv = priv;
+
+ return ahash_def_finup_finish1(req, tfm->update(req));
+}
+
+static int ahash_no_export(struct ahash_request *req, void *out)
+{
+ return -ENOSYS;
+}
+
+static int ahash_no_import(struct ahash_request *req, const void *in)
+{
+ return -ENOSYS;
+}
+
+static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
+ struct ahash_alg *alg = crypto_ahash_alg(hash);
+
+ hash->setkey = ahash_nosetkey;
+ hash->export = ahash_no_export;
+ hash->import = ahash_no_import;
+
+ if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
+ return crypto_init_shash_ops_async(tfm);
+
+ hash->init = alg->init;
+ hash->update = alg->update;
+ hash->final = alg->final;
+ hash->finup = alg->finup ?: ahash_def_finup;
+ hash->digest = alg->digest;
+
+ if (alg->setkey)
+ hash->setkey = alg->setkey;
+ if (alg->export)
+ hash->export = alg->export;
+ if (alg->import)
+ hash->import = alg->import;
return 0;
}
+static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
+{
+ if (alg->cra_type == &crypto_ahash_type)
+ return alg->cra_ctxsize;
+
+ return sizeof(struct crypto_shash *);
+}
+
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
@@ -215,17 +403,101 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
"yes" : "no");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "digestsize : %u\n", alg->cra_ahash.digestsize);
+ seq_printf(m, "digestsize : %u\n",
+ __crypto_hash_alg_common(alg)->digestsize);
}
const struct crypto_type crypto_ahash_type = {
- .ctxsize = crypto_ahash_ctxsize,
- .init = crypto_init_ahash_ops,
+ .extsize = crypto_ahash_extsize,
+ .init_tfm = crypto_ahash_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .tfmsize = offsetof(struct crypto_ahash, base),
};
EXPORT_SYMBOL_GPL(crypto_ahash_type);
+struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
+ u32 mask)
+{
+ return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
+
+static int ahash_prepare_alg(struct ahash_alg *alg)
+{
+ struct crypto_alg *base = &alg->halg.base;
+
+ if (alg->halg.digestsize > PAGE_SIZE / 8 ||
+ alg->halg.statesize > PAGE_SIZE / 8)
+ return -EINVAL;
+
+ base->cra_type = &crypto_ahash_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
+
+ return 0;
+}
+
+int crypto_register_ahash(struct ahash_alg *alg)
+{
+ struct crypto_alg *base = &alg->halg.base;
+ int err;
+
+ err = ahash_prepare_alg(alg);
+ if (err)
+ return err;
+
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_ahash);
+
+int crypto_unregister_ahash(struct ahash_alg *alg)
+{
+ return crypto_unregister_alg(&alg->halg.base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
+
+int ahash_register_instance(struct crypto_template *tmpl,
+ struct ahash_instance *inst)
+{
+ int err;
+
+ err = ahash_prepare_alg(&inst->alg);
+ if (err)
+ return err;
+
+ return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(ahash_register_instance);
+
+void ahash_free_instance(struct crypto_instance *inst)
+{
+ crypto_drop_spawn(crypto_instance_ctx(inst));
+ kfree(ahash_instance(inst));
+}
+EXPORT_SYMBOL_GPL(ahash_free_instance);
+
+int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
+ struct hash_alg_common *alg,
+ struct crypto_instance *inst)
+{
+ return crypto_init_spawn2(&spawn->base, &alg->base, inst,
+ &crypto_ahash_type);
+}
+EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
+
+struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
+{
+ struct crypto_alg *alg;
+
+ alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
+ return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
+}
+EXPORT_SYMBOL_GPL(ahash_attr_alg);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/algapi.c b/crypto/algapi.c
index df0863d56995..f149b1c8b76d 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -81,16 +81,35 @@ static void crypto_destroy_instance(struct crypto_alg *alg)
crypto_tmpl_put(tmpl);
}
+static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
+ struct list_head *stack,
+ struct list_head *top,
+ struct list_head *secondary_spawns)
+{
+ struct crypto_spawn *spawn, *n;
+
+ if (list_empty(stack))
+ return NULL;
+
+ spawn = list_first_entry(stack, struct crypto_spawn, list);
+ n = list_entry(spawn->list.next, struct crypto_spawn, list);
+
+ if (spawn->alg && &n->list != stack && !n->alg)
+ n->alg = (n->list.next == stack) ? alg :
+ &list_entry(n->list.next, struct crypto_spawn,
+ list)->inst->alg;
+
+ list_move(&spawn->list, secondary_spawns);
+
+ return &n->list == stack ? top : &n->inst->alg.cra_users;
+}
+
static void crypto_remove_spawn(struct crypto_spawn *spawn,
- struct list_head *list,
- struct list_head *secondary_spawns)
+ struct list_head *list)
{
struct crypto_instance *inst = spawn->inst;
struct crypto_template *tmpl = inst->tmpl;
- list_del_init(&spawn->list);
- spawn->alg = NULL;
-
if (crypto_is_dead(&inst->alg))
return;
@@ -106,25 +125,55 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn,
hlist_del(&inst->list);
inst->alg.cra_destroy = crypto_destroy_instance;
- list_splice(&inst->alg.cra_users, secondary_spawns);
+ BUG_ON(!list_empty(&inst->alg.cra_users));
}
-static void crypto_remove_spawns(struct list_head *spawns,
- struct list_head *list, u32 new_type)
+static void crypto_remove_spawns(struct crypto_alg *alg,
+ struct list_head *list,
+ struct crypto_alg *nalg)
{
+ u32 new_type = (nalg ?: alg)->cra_flags;
struct crypto_spawn *spawn, *n;
LIST_HEAD(secondary_spawns);
+ struct list_head *spawns;
+ LIST_HEAD(stack);
+ LIST_HEAD(top);
+ spawns = &alg->cra_users;
list_for_each_entry_safe(spawn, n, spawns, list) {
if ((spawn->alg->cra_flags ^ new_type) & spawn->mask)
continue;
- crypto_remove_spawn(spawn, list, &secondary_spawns);
+ list_move(&spawn->list, &top);
}
- while (!list_empty(&secondary_spawns)) {
- list_for_each_entry_safe(spawn, n, &secondary_spawns, list)
- crypto_remove_spawn(spawn, list, &secondary_spawns);
+ spawns = &top;
+ do {
+ while (!list_empty(spawns)) {
+ struct crypto_instance *inst;
+
+ spawn = list_first_entry(spawns, struct crypto_spawn,
+ list);
+ inst = spawn->inst;
+
+ BUG_ON(&inst->alg == alg);
+
+ list_move(&spawn->list, &stack);
+
+ if (&inst->alg == nalg)
+ break;
+
+ spawn->alg = NULL;
+ spawns = &inst->alg.cra_users;
+ }
+ } while ((spawns = crypto_more_spawns(alg, &stack, &top,
+ &secondary_spawns)));
+
+ list_for_each_entry_safe(spawn, n, &secondary_spawns, list) {
+ if (spawn->alg)
+ list_move(&spawn->list, &spawn->alg->cra_users);
+ else
+ crypto_remove_spawn(spawn, list);
}
}
@@ -258,7 +307,7 @@ found:
q->cra_priority > alg->cra_priority)
continue;
- crypto_remove_spawns(&q->cra_users, &list, alg->cra_flags);
+ crypto_remove_spawns(q, &list, alg);
}
complete:
@@ -330,7 +379,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg);
list_del_init(&alg->cra_list);
- crypto_remove_spawns(&alg->cra_users, list, alg->cra_flags);
+ crypto_remove_spawns(alg, list, NULL);
return 0;
}
@@ -488,20 +537,38 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
}
EXPORT_SYMBOL_GPL(crypto_init_spawn);
+int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
+ struct crypto_instance *inst,
+ const struct crypto_type *frontend)
+{
+ int err = -EINVAL;
+
+ if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset)
+ goto out;
+
+ spawn->frontend = frontend;
+ err = crypto_init_spawn(spawn, alg, inst, frontend->maskset);
+
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(crypto_init_spawn2);
+
void crypto_drop_spawn(struct crypto_spawn *spawn)
{
+ if (!spawn->alg)
+ return;
+
down_write(&crypto_alg_sem);
list_del(&spawn->list);
up_write(&crypto_alg_sem);
}
EXPORT_SYMBOL_GPL(crypto_drop_spawn);
-struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
- u32 mask)
+static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
{
struct crypto_alg *alg;
struct crypto_alg *alg2;
- struct crypto_tfm *tfm;
down_read(&crypto_alg_sem);
alg = spawn->alg;
@@ -516,6 +583,19 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
return ERR_PTR(-EAGAIN);
}
+ return alg;
+}
+
+struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
+ u32 mask)
+{
+ struct crypto_alg *alg;
+ struct crypto_tfm *tfm;
+
+ alg = crypto_spawn_alg(spawn);
+ if (IS_ERR(alg))
+ return ERR_CAST(alg);
+
tfm = ERR_PTR(-EINVAL);
if (unlikely((alg->cra_flags ^ type) & mask))
goto out_put_alg;
@@ -532,6 +612,27 @@ out_put_alg:
}
EXPORT_SYMBOL_GPL(crypto_spawn_tfm);
+void *crypto_spawn_tfm2(struct crypto_spawn *spawn)
+{
+ struct crypto_alg *alg;
+ struct crypto_tfm *tfm;
+
+ alg = crypto_spawn_alg(spawn);
+ if (IS_ERR(alg))
+ return ERR_CAST(alg);
+
+ tfm = crypto_create_tfm(alg, spawn->frontend);
+ if (IS_ERR(tfm))
+ goto out_put_alg;
+
+ return tfm;
+
+out_put_alg:
+ crypto_mod_put(alg);
+ return tfm;
+}
+EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
+
int crypto_register_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&crypto_chain, nb);
@@ -595,7 +696,9 @@ const char *crypto_attr_alg_name(struct rtattr *rta)
}
EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
-struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
+struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
+ const struct crypto_type *frontend,
+ u32 type, u32 mask)
{
const char *name;
int err;
@@ -605,9 +708,9 @@ struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
if (IS_ERR(name))
return ERR_PTR(err);
- return crypto_alg_mod_lookup(name, type, mask);
+ return crypto_find_alg(name, frontend, type, mask);
}
-EXPORT_SYMBOL_GPL(crypto_attr_alg);
+EXPORT_SYMBOL_GPL(crypto_attr_alg2);
int crypto_attr_u32(struct rtattr *rta, u32 *num)
{
@@ -627,17 +730,20 @@ int crypto_attr_u32(struct rtattr *rta, u32 *num)
}
EXPORT_SYMBOL_GPL(crypto_attr_u32);
-struct crypto_instance *crypto_alloc_instance(const char *name,
- struct crypto_alg *alg)
+void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
+ unsigned int head)
{
struct crypto_instance *inst;
- struct crypto_spawn *spawn;
+ char *p;
int err;
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst)
+ p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn),
+ GFP_KERNEL);
+ if (!p)
return ERR_PTR(-ENOMEM);
+ inst = (void *)(p + head);
+
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
@@ -647,6 +753,25 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
+ return p;
+
+err_free_inst:
+ kfree(p);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_instance2);
+
+struct crypto_instance *crypto_alloc_instance(const char *name,
+ struct crypto_alg *alg)
+{
+ struct crypto_instance *inst;
+ struct crypto_spawn *spawn;
+ int err;
+
+ inst = crypto_alloc_instance2(name, alg, 0);
+ if (IS_ERR(inst))
+ goto out;
+
spawn = crypto_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, inst,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
@@ -658,7 +783,10 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
err_free_inst:
kfree(inst);
- return ERR_PTR(err);
+ inst = ERR_PTR(err);
+
+out:
+ return inst;
}
EXPORT_SYMBOL_GPL(crypto_alloc_instance);
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 9908dd830c26..412241ce4cfa 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -68,6 +68,11 @@ static int cryptomgr_probe(void *data)
goto err;
do {
+ if (tmpl->create) {
+ err = tmpl->create(tmpl, param->tb);
+ continue;
+ }
+
inst = tmpl->alloc(param->tb);
if (IS_ERR(inst))
err = PTR_ERR(inst);
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index d80ed4c1e009..3aa6e3834bfe 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -187,7 +187,6 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
/* Our exported functions */
static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
{
- unsigned long flags;
unsigned char *ptr = buf;
unsigned int byte_count = (unsigned int)nbytes;
int err;
@@ -196,7 +195,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
if (nbytes < 0)
return -EINVAL;
- spin_lock_irqsave(&ctx->prng_lock, flags);
+ spin_lock_bh(&ctx->prng_lock);
err = -EINVAL;
if (ctx->flags & PRNG_NEED_RESET)
@@ -268,7 +267,7 @@ empty_rbuf:
goto remainder;
done:
- spin_unlock_irqrestore(&ctx->prng_lock, flags);
+ spin_unlock_bh(&ctx->prng_lock);
dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",
err, ctx);
return err;
@@ -284,10 +283,9 @@ static int reset_prng_context(struct prng_context *ctx,
unsigned char *V, unsigned char *DT)
{
int ret;
- int rc = -EINVAL;
unsigned char *prng_key;
- spin_lock(&ctx->prng_lock);
+ spin_lock_bh(&ctx->prng_lock);
ctx->flags |= PRNG_NEED_RESET;
prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY;
@@ -308,34 +306,20 @@ static int reset_prng_context(struct prng_context *ctx,
memset(ctx->rand_data, 0, DEFAULT_BLK_SZ);
memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ);
- if (ctx->tfm)
- crypto_free_cipher(ctx->tfm);
-
- ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->tfm)) {
- dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
- ctx);
- ctx->tfm = NULL;
- goto out;
- }
-
ctx->rand_data_valid = DEFAULT_BLK_SZ;
ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen);
if (ret) {
dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
crypto_cipher_get_flags(ctx->tfm));
- crypto_free_cipher(ctx->tfm);
goto out;
}
- rc = 0;
+ ret = 0;
ctx->flags &= ~PRNG_NEED_RESET;
out:
- spin_unlock(&ctx->prng_lock);
-
- return rc;
-
+ spin_unlock_bh(&ctx->prng_lock);
+ return ret;
}
static int cprng_init(struct crypto_tfm *tfm)
@@ -343,6 +327,12 @@ static int cprng_init(struct crypto_tfm *tfm)
struct prng_context *ctx = crypto_tfm_ctx(tfm);
spin_lock_init(&ctx->prng_lock);
+ ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
+ if (IS_ERR(ctx->tfm)) {
+ dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
+ ctx);
+ return PTR_ERR(ctx->tfm);
+ }
if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
return -EINVAL;
@@ -418,17 +408,10 @@ static struct crypto_alg rng_alg = {
/* Module initalization */
static int __init prng_mod_init(void)
{
- int ret = 0;
-
if (fips_enabled)
rng_alg.cra_priority += 200;
- ret = crypto_register_alg(&rng_alg);
-
- if (ret)
- goto out;
-out:
- return 0;
+ return crypto_register_alg(&rng_alg);
}
static void __exit prng_mod_fini(void)
diff --git a/crypto/api.c b/crypto/api.c
index d5944f92b416..798526d90538 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -285,13 +285,6 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
switch (crypto_tfm_alg_type(tfm)) {
case CRYPTO_ALG_TYPE_CIPHER:
return crypto_init_cipher_ops(tfm);
-
- case CRYPTO_ALG_TYPE_DIGEST:
- if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) !=
- CRYPTO_ALG_TYPE_HASH_MASK)
- return crypto_init_digest_ops_async(tfm);
- else
- return crypto_init_digest_ops(tfm);
case CRYPTO_ALG_TYPE_COMPRESS:
return crypto_init_compress_ops(tfm);
@@ -318,11 +311,7 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
case CRYPTO_ALG_TYPE_CIPHER:
crypto_exit_cipher_ops(tfm);
break;
-
- case CRYPTO_ALG_TYPE_DIGEST:
- crypto_exit_digest_ops(tfm);
- break;
-
+
case CRYPTO_ALG_TYPE_COMPRESS:
crypto_exit_compress_ops(tfm);
break;
@@ -349,11 +338,7 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
case CRYPTO_ALG_TYPE_CIPHER:
len += crypto_cipher_ctxsize(alg);
break;
-
- case CRYPTO_ALG_TYPE_DIGEST:
- len += crypto_digest_ctxsize(alg);
- break;
-
+
case CRYPTO_ALG_TYPE_COMPRESS:
len += crypto_compress_ctxsize(alg);
break;
@@ -472,7 +457,7 @@ void *crypto_create_tfm(struct crypto_alg *alg,
int err = -ENOMEM;
tfmsize = frontend->tfmsize;
- total = tfmsize + sizeof(*tfm) + frontend->extsize(alg, frontend);
+ total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
mem = kzalloc(total, GFP_KERNEL);
if (mem == NULL)
@@ -481,7 +466,7 @@ void *crypto_create_tfm(struct crypto_alg *alg,
tfm = (struct crypto_tfm *)(mem + tfmsize);
tfm->__crt_alg = alg;
- err = frontend->init_tfm(tfm, frontend);
+ err = frontend->init_tfm(tfm);
if (err)
goto out_free_tfm;
@@ -503,6 +488,27 @@ out:
}
EXPORT_SYMBOL_GPL(crypto_create_tfm);
+struct crypto_alg *crypto_find_alg(const char *alg_name,
+ const struct crypto_type *frontend,
+ u32 type, u32 mask)
+{
+ struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) =
+ crypto_alg_mod_lookup;
+
+ if (frontend) {
+ type &= frontend->maskclear;
+ mask &= frontend->maskclear;
+ type |= frontend->type;
+ mask |= frontend->maskset;
+
+ if (frontend->lookup)
+ lookup = frontend->lookup;
+ }
+
+ return lookup(alg_name, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_find_alg);
+
/*
* crypto_alloc_tfm - Locate algorithm and allocate transform
* @alg_name: Name of algorithm
@@ -526,21 +532,13 @@ EXPORT_SYMBOL_GPL(crypto_create_tfm);
void *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask)
{
- struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
void *tfm;
int err;
- type &= frontend->maskclear;
- mask &= frontend->maskclear;
- type |= frontend->type;
- mask |= frontend->maskset;
-
- lookup = frontend->lookup ?: crypto_alg_mod_lookup;
-
for (;;) {
struct crypto_alg *alg;
- alg = lookup(alg_name, type, mask);
+ alg = crypto_find_alg(alg_name, frontend, type, mask);
if (IS_ERR(alg)) {
err = PTR_ERR(alg);
goto err;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 5793b64c81a8..4d6f49a5daeb 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -23,24 +23,36 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags);
+
struct authenc_instance_ctx {
- struct crypto_spawn auth;
+ struct crypto_ahash_spawn auth;
struct crypto_skcipher_spawn enc;
};
struct crypto_authenc_ctx {
- spinlock_t auth_lock;
- struct crypto_hash *auth;
+ unsigned int reqoff;
+ struct crypto_ahash *auth;
struct crypto_ablkcipher *enc;
};
+struct authenc_request_ctx {
+ unsigned int cryptlen;
+ struct scatterlist *sg;
+ struct scatterlist asg[2];
+ struct scatterlist cipher[2];
+ crypto_completion_t complete;
+ crypto_completion_t update_complete;
+ char tail[];
+};
+
static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
unsigned int keylen)
{
unsigned int authkeylen;
unsigned int enckeylen;
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct crypto_hash *auth = ctx->auth;
+ struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc;
struct rtattr *rta = (void *)key;
struct crypto_authenc_key_param *param;
@@ -64,11 +76,11 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
authkeylen = keylen - enckeylen;
- crypto_hash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
- crypto_hash_set_flags(auth, crypto_aead_get_flags(authenc) &
+ crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_hash_setkey(auth, key, authkeylen);
- crypto_aead_set_flags(authenc, crypto_hash_get_flags(auth) &
+ err = crypto_ahash_setkey(auth, key, authkeylen);
+ crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK);
if (err)
@@ -103,40 +115,198 @@ static void authenc_chain(struct scatterlist *head, struct scatterlist *sg,
sg_mark_end(head);
}
-static u8 *crypto_authenc_hash(struct aead_request *req, unsigned int flags,
- struct scatterlist *cipher,
- unsigned int cryptlen)
+static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
+ areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ goto out;
+
+ scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+ areq_ctx->cryptlen,
+ crypto_aead_authsize(authenc), 1);
+
+out:
+ aead_request_complete(req, err);
+}
+
+static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
+ areq_ctx->cryptlen,
+ crypto_aead_authsize(authenc), 1);
+
+out:
+ aead_request_complete(req, err);
+}
+
+static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
+ int err)
{
+ u8 *ihash;
+ unsigned int authsize;
+ struct ablkcipher_request *abreq;
+ struct aead_request *req = areq->data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct crypto_hash *auth = ctx->auth;
- struct hash_desc desc = {
- .tfm = auth,
- .flags = aead_request_flags(req) & flags,
- };
- u8 *hash = aead_request_ctx(req);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
+ areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_finup(ahreq);
+ if (err)
+ goto out;
+
+ authsize = crypto_aead_authsize(authenc);
+ ihash = ahreq->result + authsize;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0;
+ if (err)
+ goto out;
+
+ abreq = aead_request_ctx(req);
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+
+ err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+ aead_request_complete(req, err);
+}
+
+static void authenc_verify_ahash_done(struct crypto_async_request *areq,
+ int err)
+{
+ u8 *ihash;
+ unsigned int authsize;
+ struct ablkcipher_request *abreq;
+ struct aead_request *req = areq->data;
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+
+ if (err)
+ goto out;
+
+ authsize = crypto_aead_authsize(authenc);
+ ihash = ahreq->result + authsize;
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
+
+ err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0;
+ if (err)
+ goto out;
+
+ abreq = aead_request_ctx(req);
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+
+ err = crypto_ablkcipher_decrypt(abreq);
+
+out:
+ aead_request_complete(req, err);
+}
+
+static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
+{
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct crypto_ahash *auth = ctx->auth;
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ u8 *hash = areq_ctx->tail;
int err;
- hash = (u8 *)ALIGN((unsigned long)hash + crypto_hash_alignmask(auth),
- crypto_hash_alignmask(auth) + 1);
+ hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1);
+
+ ahash_request_set_tfm(ahreq, auth);
- spin_lock_bh(&ctx->auth_lock);
- err = crypto_hash_init(&desc);
+ err = crypto_ahash_init(ahreq);
if (err)
- goto auth_unlock;
+ return ERR_PTR(err);
+
+ ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->update_complete, req);
- err = crypto_hash_update(&desc, req->assoc, req->assoclen);
+ err = crypto_ahash_update(ahreq);
if (err)
- goto auth_unlock;
+ return ERR_PTR(err);
+
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
+ areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->complete, req);
- err = crypto_hash_update(&desc, cipher, cryptlen);
+ err = crypto_ahash_finup(ahreq);
if (err)
- goto auth_unlock;
+ return ERR_PTR(err);
- err = crypto_hash_final(&desc, hash);
-auth_unlock:
- spin_unlock_bh(&ctx->auth_lock);
+ return hash;
+}
+
+static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags)
+{
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct crypto_ahash *auth = ctx->auth;
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+ u8 *hash = areq_ctx->tail;
+ int err;
+ hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1);
+
+ ahash_request_set_tfm(ahreq, auth);
+ ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
+ areq_ctx->cryptlen);
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ areq_ctx->complete, req);
+
+ err = crypto_ahash_digest(ahreq);
if (err)
return ERR_PTR(err);
@@ -147,11 +317,15 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
unsigned int flags)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct scatterlist *dst = req->dst;
- struct scatterlist cipher[2];
- struct page *dstp;
+ struct scatterlist *assoc = req->assoc;
+ struct scatterlist *cipher = areq_ctx->cipher;
+ struct scatterlist *asg = areq_ctx->asg;
unsigned int ivsize = crypto_aead_ivsize(authenc);
- unsigned int cryptlen;
+ unsigned int cryptlen = req->cryptlen;
+ authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
+ struct page *dstp;
u8 *vdst;
u8 *hash;
@@ -163,10 +337,25 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
sg_set_buf(cipher, iv, ivsize);
authenc_chain(cipher, dst, vdst == iv + ivsize);
dst = cipher;
+ cryptlen += ivsize;
}
- cryptlen = req->cryptlen + ivsize;
- hash = crypto_authenc_hash(req, flags, dst, cryptlen);
+ if (sg_is_last(assoc)) {
+ authenc_ahash_fn = crypto_authenc_ahash;
+ sg_init_table(asg, 2);
+ sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
+ authenc_chain(asg, dst, 0);
+ dst = asg;
+ cryptlen += req->assoclen;
+ }
+
+ areq_ctx->cryptlen = cryptlen;
+ areq_ctx->sg = dst;
+
+ areq_ctx->complete = authenc_geniv_ahash_done;
+ areq_ctx->update_complete = authenc_geniv_ahash_update_done;
+
+ hash = authenc_ahash_fn(req, flags);
if (IS_ERR(hash))
return PTR_ERR(hash);
@@ -256,22 +445,25 @@ static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
}
static int crypto_authenc_verify(struct aead_request *req,
- struct scatterlist *cipher,
- unsigned int cryptlen)
+ authenc_ahash_t authenc_ahash_fn)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
u8 *ohash;
u8 *ihash;
unsigned int authsize;
- ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, cipher,
- cryptlen);
+ areq_ctx->complete = authenc_verify_ahash_done;
+ areq_ctx->complete = authenc_verify_ahash_update_done;
+
+ ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP);
if (IS_ERR(ohash))
return PTR_ERR(ohash);
authsize = crypto_aead_authsize(authenc);
ihash = ohash + authsize;
- scatterwalk_map_and_copy(ihash, cipher, cryptlen, authsize, 0);
+ scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
+ authsize, 0);
return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0;
}
@@ -279,10 +471,14 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
unsigned int cryptlen)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct scatterlist *src = req->src;
- struct scatterlist cipher[2];
- struct page *srcp;
+ struct scatterlist *assoc = req->assoc;
+ struct scatterlist *cipher = areq_ctx->cipher;
+ struct scatterlist *asg = areq_ctx->asg;
unsigned int ivsize = crypto_aead_ivsize(authenc);
+ authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
+ struct page *srcp;
u8 *vsrc;
srcp = sg_page(src);
@@ -293,9 +489,22 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
sg_set_buf(cipher, iv, ivsize);
authenc_chain(cipher, src, vsrc == iv + ivsize);
src = cipher;
+ cryptlen += ivsize;
+ }
+
+ if (sg_is_last(assoc)) {
+ authenc_ahash_fn = crypto_authenc_ahash;
+ sg_init_table(asg, 2);
+ sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
+ authenc_chain(asg, src, 0);
+ src = asg;
+ cryptlen += req->assoclen;
}
- return crypto_authenc_verify(req, src, cryptlen + ivsize);
+ areq_ctx->cryptlen = cryptlen;
+ areq_ctx->sg = src;
+
+ return crypto_authenc_verify(req, authenc_ahash_fn);
}
static int crypto_authenc_decrypt(struct aead_request *req)
@@ -326,38 +535,41 @@ static int crypto_authenc_decrypt(struct aead_request *req)
static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_hash *auth;
+ struct crypto_ahash *auth;
struct crypto_ablkcipher *enc;
int err;
- auth = crypto_spawn_hash(&ictx->auth);
+ auth = crypto_spawn_ahash(&ictx->auth);
if (IS_ERR(auth))
return PTR_ERR(auth);
+ ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
+ crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1);
+
enc = crypto_spawn_skcipher(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
- goto err_free_hash;
+ goto err_free_ahash;
ctx->auth = auth;
ctx->enc = enc;
+
tfm->crt_aead.reqsize = max_t(unsigned int,
- (crypto_hash_alignmask(auth) &
- ~(crypto_tfm_ctx_alignment() - 1)) +
- crypto_hash_digestsize(auth) * 2,
- sizeof(struct skcipher_givcrypt_request) +
- crypto_ablkcipher_reqsize(enc) +
- crypto_ablkcipher_ivsize(enc));
-
- spin_lock_init(&ctx->auth_lock);
+ crypto_ahash_reqsize(auth) + ctx->reqoff +
+ sizeof(struct authenc_request_ctx) +
+ sizeof(struct ahash_request),
+ sizeof(struct skcipher_givcrypt_request) +
+ crypto_ablkcipher_reqsize(enc) +
+ crypto_ablkcipher_ivsize(enc));
return 0;
-err_free_hash:
- crypto_free_hash(auth);
+err_free_ahash:
+ crypto_free_ahash(auth);
return err;
}
@@ -365,7 +577,7 @@ static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_hash(ctx->auth);
+ crypto_free_ahash(ctx->auth);
crypto_free_ablkcipher(ctx->enc);
}
@@ -373,7 +585,8 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
{
struct crypto_attr_type *algt;
struct crypto_instance *inst;
- struct crypto_alg *auth;
+ struct hash_alg_common *auth;
+ struct crypto_alg *auth_base;
struct crypto_alg *enc;
struct authenc_instance_ctx *ctx;
const char *enc_name;
@@ -387,11 +600,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
- auth = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_HASH_MASK);
+ auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
+ CRYPTO_ALG_TYPE_AHASH_MASK);
if (IS_ERR(auth))
return ERR_PTR(PTR_ERR(auth));
+ auth_base = &auth->base;
+
enc_name = crypto_attr_alg_name(tb[2]);
err = PTR_ERR(enc_name);
if (IS_ERR(enc_name))
@@ -404,7 +619,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
ctx = crypto_instance_ctx(inst);
- err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK);
+ err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
if (err)
goto err_free_inst;
@@ -419,28 +634,25 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
- "authenc(%s,%s)", auth->cra_name, enc->cra_name) >=
+ "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_drop_enc;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "authenc(%s,%s)", auth->cra_driver_name,
+ "authenc(%s,%s)", auth_base->cra_driver_name,
enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_enc;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
- inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority;
+ inst->alg.cra_priority = enc->cra_priority *
+ 10 + auth_base->cra_priority;
inst->alg.cra_blocksize = enc->cra_blocksize;
- inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask;
+ inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
inst->alg.cra_type = &crypto_aead_type;
inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
- inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ?
- auth->cra_hash.digestsize :
- auth->cra_type ?
- __crypto_shash_alg(auth)->digestsize :
- auth->cra_digest.dia_digestsize;
+ inst->alg.cra_aead.maxauthsize = auth->digestsize;
inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
@@ -453,13 +665,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt;
out:
- crypto_mod_put(auth);
+ crypto_mod_put(auth_base);
return inst;
err_drop_enc:
crypto_drop_skcipher(&ctx->enc);
err_drop_auth:
- crypto_drop_spawn(&ctx->auth);
+ crypto_drop_ahash(&ctx->auth);
err_free_inst:
kfree(inst);
out_put_auth:
@@ -472,7 +684,7 @@ static void crypto_authenc_free(struct crypto_instance *inst)
struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_skcipher(&ctx->enc);
- crypto_drop_spawn(&ctx->auth);
+ crypto_drop_ahash(&ctx->auth);
kfree(inst);
}
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index ae5fa99d5d36..35335825a4ef 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -39,6 +39,11 @@ struct cryptd_instance_ctx {
struct cryptd_queue *queue;
};
+struct hashd_instance_ctx {
+ struct crypto_shash_spawn spawn;
+ struct cryptd_queue *queue;
+};
+
struct cryptd_blkcipher_ctx {
struct crypto_blkcipher *child;
};
@@ -48,11 +53,12 @@ struct cryptd_blkcipher_request_ctx {
};
struct cryptd_hash_ctx {
- struct crypto_hash *child;
+ struct crypto_shash *child;
};
struct cryptd_hash_request_ctx {
crypto_completion_t complete;
+ struct shash_desc desc;
};
static void cryptd_queue_worker(struct work_struct *work);
@@ -249,32 +255,24 @@ static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
crypto_free_blkcipher(ctx->child);
}
-static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
- struct cryptd_queue *queue)
+static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
+ unsigned int tail)
{
+ char *p;
struct crypto_instance *inst;
- struct cryptd_instance_ctx *ctx;
int err;
- inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
- if (!inst) {
- inst = ERR_PTR(-ENOMEM);
- goto out;
- }
+ p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ inst = (void *)(p + head);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_inst;
- ctx = crypto_instance_ctx(inst);
- err = crypto_init_spawn(&ctx->spawn, alg, inst,
- CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
- if (err)
- goto out_free_inst;
-
- ctx->queue = queue;
-
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
inst->alg.cra_priority = alg->cra_priority + 50;
@@ -282,29 +280,41 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
inst->alg.cra_alignmask = alg->cra_alignmask;
out:
- return inst;
+ return p;
out_free_inst:
- kfree(inst);
- inst = ERR_PTR(err);
+ kfree(p);
+ p = ERR_PTR(err);
goto out;
}
-static struct crypto_instance *cryptd_alloc_blkcipher(
- struct rtattr **tb, struct cryptd_queue *queue)
+static int cryptd_create_blkcipher(struct crypto_template *tmpl,
+ struct rtattr **tb,
+ struct cryptd_queue *queue)
{
+ struct cryptd_instance_ctx *ctx;
struct crypto_instance *inst;
struct crypto_alg *alg;
+ int err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_CAST(alg);
+ return PTR_ERR(alg);
- inst = cryptd_alloc_instance(alg, queue);
+ inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
+ ctx = crypto_instance_ctx(inst);
+ ctx->queue = queue;
+
+ err = crypto_init_spawn(&ctx->spawn, alg, inst,
+ CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
+ if (err)
+ goto out_free_inst;
+
inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
inst->alg.cra_type = &crypto_ablkcipher_type;
@@ -323,26 +333,34 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
+ err = crypto_register_instance(tmpl, inst);
+ if (err) {
+ crypto_drop_spawn(&ctx->spawn);
+out_free_inst:
+ kfree(inst);
+ }
+
out_put_alg:
crypto_mod_put(alg);
- return inst;
+ return err;
}
static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct crypto_spawn *spawn = &ictx->spawn;
+ struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct crypto_shash_spawn *spawn = &ictx->spawn;
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_hash *cipher;
+ struct crypto_shash *hash;
- cipher = crypto_spawn_hash(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
+ hash = crypto_spawn_shash(spawn);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
- ctx->child = cipher;
- tfm->crt_ahash.reqsize =
- sizeof(struct cryptd_hash_request_ctx);
+ ctx->child = hash;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct cryptd_hash_request_ctx) +
+ crypto_shash_descsize(hash));
return 0;
}
@@ -350,22 +368,22 @@ static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_hash(ctx->child);
+ crypto_free_shash(ctx->child);
}
static int cryptd_hash_setkey(struct crypto_ahash *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
- struct crypto_hash *child = ctx->child;
+ struct crypto_shash *child = ctx->child;
int err;
- crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_hash_setkey(child, key, keylen);
- crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
+ crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_shash_setkey(child, key, keylen);
+ crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
return err;
}
@@ -385,21 +403,19 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
-
- rctx = ahash_request_ctx(req);
+ struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct crypto_shash *child = ctx->child;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct shash_desc *desc = &rctx->desc;
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->tfm = child;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_hash_crt(child)->init(&desc);
+ err = crypto_shash_init(desc);
req->base.complete = rctx->complete;
@@ -416,23 +432,15 @@ static int cryptd_hash_init_enqueue(struct ahash_request *req)
static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
+ struct ahash_request *req = ahash_request_cast(req_async);
struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypto_hash_crt(child)->update(&desc,
- req->src,
- req->nbytes);
+ err = shash_ahash_update(req, &rctx->desc);
req->base.complete = rctx->complete;
@@ -449,21 +457,13 @@ static int cryptd_hash_update_enqueue(struct ahash_request *req)
static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
-
- rctx = ahash_request_ctx(req);
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypto_hash_crt(child)->final(&desc, req->result);
+ err = crypto_shash_final(&rctx->desc, req->result);
req->base.complete = rctx->complete;
@@ -478,26 +478,44 @@ static int cryptd_hash_final_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_final);
}
-static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
+static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_hash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
- struct hash_desc desc;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- rctx = ahash_request_ctx(req);
+ if (unlikely(err == -EINPROGRESS))
+ goto out;
+
+ err = shash_ahash_finup(req, &rctx->desc);
+
+ req->base.complete = rctx->complete;
+
+out:
+ local_bh_disable();
+ rctx->complete(&req->base, err);
+ local_bh_enable();
+}
+
+static int cryptd_hash_finup_enqueue(struct ahash_request *req)
+{
+ return cryptd_hash_enqueue(req, cryptd_hash_finup);
+}
+
+static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
+{
+ struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct crypto_shash *child = ctx->child;
+ struct ahash_request *req = ahash_request_cast(req_async);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct shash_desc *desc = &rctx->desc;
if (unlikely(err == -EINPROGRESS))
goto out;
- desc.tfm = child;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->tfm = child;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_hash_crt(child)->digest(&desc,
- req->src,
- req->nbytes,
- req->result);
+ err = shash_ahash_digest(req, desc);
req->base.complete = rctx->complete;
@@ -512,64 +530,108 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_digest);
}
-static struct crypto_instance *cryptd_alloc_hash(
- struct rtattr **tb, struct cryptd_queue *queue)
+static int cryptd_hash_export(struct ahash_request *req, void *out)
{
- struct crypto_instance *inst;
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ return crypto_shash_export(&rctx->desc, out);
+}
+
+static int cryptd_hash_import(struct ahash_request *req, const void *in)
+{
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ return crypto_shash_import(&rctx->desc, in);
+}
+
+static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+ struct cryptd_queue *queue)
+{
+ struct hashd_instance_ctx *ctx;
+ struct ahash_instance *inst;
+ struct shash_alg *salg;
struct crypto_alg *alg;
+ int err;
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_HASH_MASK);
- if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ salg = shash_attr_alg(tb[1], 0, 0);
+ if (IS_ERR(salg))
+ return PTR_ERR(salg);
- inst = cryptd_alloc_instance(alg, queue);
+ alg = &salg->base;
+ inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
+ sizeof(*ctx));
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
- inst->alg.cra_type = &crypto_ahash_type;
+ ctx = ahash_instance_ctx(inst);
+ ctx->queue = queue;
- inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize;
- inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
+ err = crypto_init_shash_spawn(&ctx->spawn, salg,
+ ahash_crypto_instance(inst));
+ if (err)
+ goto out_free_inst;
- inst->alg.cra_init = cryptd_hash_init_tfm;
- inst->alg.cra_exit = cryptd_hash_exit_tfm;
+ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
- inst->alg.cra_ahash.init = cryptd_hash_init_enqueue;
- inst->alg.cra_ahash.update = cryptd_hash_update_enqueue;
- inst->alg.cra_ahash.final = cryptd_hash_final_enqueue;
- inst->alg.cra_ahash.setkey = cryptd_hash_setkey;
- inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue;
+ inst->alg.halg.digestsize = salg->digestsize;
+ inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
+
+ inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
+ inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
+
+ inst->alg.init = cryptd_hash_init_enqueue;
+ inst->alg.update = cryptd_hash_update_enqueue;
+ inst->alg.final = cryptd_hash_final_enqueue;
+ inst->alg.finup = cryptd_hash_finup_enqueue;
+ inst->alg.export = cryptd_hash_export;
+ inst->alg.import = cryptd_hash_import;
+ inst->alg.setkey = cryptd_hash_setkey;
+ inst->alg.digest = cryptd_hash_digest_enqueue;
+
+ err = ahash_register_instance(tmpl, inst);
+ if (err) {
+ crypto_drop_shash(&ctx->spawn);
+out_free_inst:
+ kfree(inst);
+ }
out_put_alg:
crypto_mod_put(alg);
- return inst;
+ return err;
}
static struct cryptd_queue queue;
-static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
+static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
- return ERR_CAST(algt);
+ return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
- return cryptd_alloc_blkcipher(tb, &queue);
+ return cryptd_create_blkcipher(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_DIGEST:
- return cryptd_alloc_hash(tb, &queue);
+ return cryptd_create_hash(tmpl, tb, &queue);
}
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
static void cryptd_free(struct crypto_instance *inst)
{
struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
+ struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
+
+ switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_drop_shash(&hctx->spawn);
+ kfree(ahash_instance(inst));
+ return;
+ }
crypto_drop_spawn(&ctx->spawn);
kfree(inst);
@@ -577,7 +639,7 @@ static void cryptd_free(struct crypto_instance *inst)
static struct crypto_template cryptd_tmpl = {
.name = "cryptd",
- .alloc = cryptd_alloc,
+ .create = cryptd_create,
.free = cryptd_free,
.module = THIS_MODULE,
};
@@ -620,6 +682,41 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
}
EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
+struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
+ u32 type, u32 mask)
+{
+ char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+ struct crypto_ahash *tfm;
+
+ if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+ "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+ tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+ if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
+ crypto_free_ahash(tfm);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return __cryptd_ahash_cast(tfm);
+}
+EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
+
+struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
+{
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
+
+ return ctx->child;
+}
+EXPORT_SYMBOL_GPL(cryptd_ahash_child);
+
+void cryptd_free_ahash(struct cryptd_ahash *tfm)
+{
+ crypto_free_ahash(&tfm->base);
+}
+EXPORT_SYMBOL_GPL(cryptd_free_ahash);
+
static int __init cryptd_init(void)
{
int err;
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 2d7425f0e7b8..6c3bfabb9d1d 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -219,6 +219,8 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt;
inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt;
+ inst->alg.cra_blkcipher.geniv = "chainiv";
+
out:
crypto_mod_put(alg);
return inst;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index e70afd0c73dd..5fc3292483ef 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -11,7 +11,10 @@
#include <crypto/gf128mul.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
+#include <crypto/hash.h>
+#include "internal.h"
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -21,11 +24,12 @@
struct gcm_instance_ctx {
struct crypto_skcipher_spawn ctr;
+ struct crypto_ahash_spawn ghash;
};
struct crypto_gcm_ctx {
struct crypto_ablkcipher *ctr;
- struct gf128mul_4k *gf128;
+ struct crypto_ahash *ghash;
};
struct crypto_rfc4106_ctx {
@@ -34,10 +38,9 @@ struct crypto_rfc4106_ctx {
};
struct crypto_gcm_ghash_ctx {
- u32 bytes;
- u32 flags;
- struct gf128mul_4k *gf128;
- u8 buffer[16];
+ unsigned int cryptlen;
+ struct scatterlist *src;
+ crypto_completion_t complete;
};
struct crypto_gcm_req_priv_ctx {
@@ -45,8 +48,11 @@ struct crypto_gcm_req_priv_ctx {
u8 iauth_tag[16];
struct scatterlist src[2];
struct scatterlist dst[2];
- struct crypto_gcm_ghash_ctx ghash;
- struct ablkcipher_request abreq;
+ struct crypto_gcm_ghash_ctx ghash_ctx;
+ union {
+ struct ahash_request ahreq;
+ struct ablkcipher_request abreq;
+ } u;
};
struct crypto_gcm_setkey_result {
@@ -54,6 +60,8 @@ struct crypto_gcm_setkey_result {
struct completion completion;
};
+static void *gcm_zeroes;
+
static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
struct aead_request *req)
{
@@ -62,113 +70,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
}
-static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags,
- struct gf128mul_4k *gf128)
-{
- ctx->bytes = 0;
- ctx->flags = flags;
- ctx->gf128 = gf128;
- memset(ctx->buffer, 0, 16);
-}
-
-static void crypto_gcm_ghash_update(struct crypto_gcm_ghash_ctx *ctx,
- const u8 *src, unsigned int srclen)
-{
- u8 *dst = ctx->buffer;
-
- if (ctx->bytes) {
- int n = min(srclen, ctx->bytes);
- u8 *pos = dst + (16 - ctx->bytes);
-
- ctx->bytes -= n;
- srclen -= n;
-
- while (n--)
- *pos++ ^= *src++;
-
- if (!ctx->bytes)
- gf128mul_4k_lle((be128 *)dst, ctx->gf128);
- }
-
- while (srclen >= 16) {
- crypto_xor(dst, src, 16);
- gf128mul_4k_lle((be128 *)dst, ctx->gf128);
- src += 16;
- srclen -= 16;
- }
-
- if (srclen) {
- ctx->bytes = 16 - srclen;
- while (srclen--)
- *dst++ ^= *src++;
- }
-}
-
-static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx,
- struct scatterlist *sg, int len)
-{
- struct scatter_walk walk;
- u8 *src;
- int n;
-
- if (!len)
- return;
-
- scatterwalk_start(&walk, sg);
-
- while (len) {
- n = scatterwalk_clamp(&walk, len);
-
- if (!n) {
- scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
- n = scatterwalk_clamp(&walk, len);
- }
-
- src = scatterwalk_map(&walk, 0);
-
- crypto_gcm_ghash_update(ctx, src, n);
- len -= n;
-
- scatterwalk_unmap(src, 0);
- scatterwalk_advance(&walk, n);
- scatterwalk_done(&walk, 0, len);
- if (len)
- crypto_yield(ctx->flags);
- }
-}
-
-static void crypto_gcm_ghash_flush(struct crypto_gcm_ghash_ctx *ctx)
-{
- u8 *dst = ctx->buffer;
-
- if (ctx->bytes) {
- u8 *tmp = dst + (16 - ctx->bytes);
-
- while (ctx->bytes--)
- *tmp++ ^= 0;
-
- gf128mul_4k_lle((be128 *)dst, ctx->gf128);
- }
-
- ctx->bytes = 0;
-}
-
-static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx,
- unsigned int authlen,
- unsigned int cryptlen, u8 *dst)
-{
- u8 *buf = ctx->buffer;
- u128 lengths;
-
- lengths.a = cpu_to_be64(authlen * 8);
- lengths.b = cpu_to_be64(cryptlen * 8);
-
- crypto_gcm_ghash_flush(ctx);
- crypto_xor(buf, (u8 *)&lengths, 16);
- gf128mul_4k_lle((be128 *)buf, ctx->gf128);
- crypto_xor(dst, buf, 16);
-}
-
static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
{
struct crypto_gcm_setkey_result *result = req->data;
@@ -184,6 +85,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
+ struct crypto_ahash *ghash = ctx->ghash;
struct crypto_ablkcipher *ctr = ctx->ctr;
struct {
be128 hash;
@@ -233,13 +135,12 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (err)
goto out;
- if (ctx->gf128 != NULL)
- gf128mul_free_4k(ctx->gf128);
-
- ctx->gf128 = gf128mul_init_4k_lle(&data->hash);
-
- if (ctx->gf128 == NULL)
- err = -ENOMEM;
+ crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128));
+ crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) &
+ CRYPTO_TFM_RES_MASK);
out:
kfree(data);
@@ -272,8 +173,6 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req,
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- u32 flags = req->base.tfm->crt_flags;
- struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
struct scatterlist *dst;
__be32 counter = cpu_to_be32(1);
@@ -296,108 +195,398 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req,
ablkcipher_request_set_crypt(ablk_req, pctx->src, dst,
cryptlen + sizeof(pctx->auth_tag),
req->iv);
+}
+
+static inline unsigned int gcm_remain(unsigned int len)
+{
+ len &= 0xfU;
+ return len ? 16 - len : 0;
+}
+
+static void gcm_hash_len_done(struct crypto_async_request *areq, int err);
+static void gcm_hash_final_done(struct crypto_async_request *areq, int err);
- crypto_gcm_ghash_init(ghash, flags, ctx->gf128);
+static int gcm_hash_update(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx,
+ crypto_completion_t complete,
+ struct scatterlist *src,
+ unsigned int len)
+{
+ struct ahash_request *ahreq = &pctx->u.ahreq;
- crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen);
- crypto_gcm_ghash_flush(ghash);
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ complete, req);
+ ahash_request_set_crypt(ahreq, src, NULL, len);
+
+ return crypto_ahash_update(ahreq);
}
-static int crypto_gcm_hash(struct aead_request *req)
+static int gcm_hash_remain(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx,
+ unsigned int remain,
+ crypto_completion_t complete)
{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct ahash_request *ahreq = &pctx->u.ahreq;
+
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ complete, req);
+ sg_init_one(pctx->src, gcm_zeroes, remain);
+ ahash_request_set_crypt(ahreq, pctx->src, NULL, remain);
+
+ return crypto_ahash_update(ahreq);
+}
+
+static int gcm_hash_len(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
+{
+ struct ahash_request *ahreq = &pctx->u.ahreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+ u128 lengths;
+
+ lengths.a = cpu_to_be64(req->assoclen * 8);
+ lengths.b = cpu_to_be64(gctx->cryptlen * 8);
+ memcpy(pctx->iauth_tag, &lengths, 16);
+ sg_init_one(pctx->src, pctx->iauth_tag, 16);
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ gcm_hash_len_done, req);
+ ahash_request_set_crypt(ahreq, pctx->src,
+ NULL, sizeof(lengths));
+
+ return crypto_ahash_update(ahreq);
+}
+
+static int gcm_hash_final(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
+{
+ struct ahash_request *ahreq = &pctx->u.ahreq;
+
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ gcm_hash_final_done, req);
+ ahash_request_set_crypt(ahreq, NULL, pctx->iauth_tag, 0);
+
+ return crypto_ahash_final(ahreq);
+}
+
+static void gcm_hash_final_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- u8 *auth_tag = pctx->auth_tag;
- struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+
+ if (!err)
+ crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
- crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen);
- crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen,
- auth_tag);
+ gctx->complete(areq, err);
+}
+
+static void gcm_hash_len_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+
+ if (!err) {
+ err = gcm_hash_final(req, pctx);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ gcm_hash_final_done(areq, err);
+}
+
+static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+
+ if (!err) {
+ err = gcm_hash_len(req, pctx);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ gcm_hash_len_done(areq, err);
+}
+
+static void gcm_hash_crypt_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+ unsigned int remain;
+
+ if (!err) {
+ remain = gcm_remain(gctx->cryptlen);
+ BUG_ON(!remain);
+ err = gcm_hash_remain(req, pctx, remain,
+ gcm_hash_crypt_remain_done);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ gcm_hash_crypt_remain_done(areq, err);
+}
+
+static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+ crypto_completion_t complete;
+ unsigned int remain = 0;
+
+ if (!err && gctx->cryptlen) {
+ remain = gcm_remain(gctx->cryptlen);
+ complete = remain ? gcm_hash_crypt_done :
+ gcm_hash_crypt_remain_done;
+ err = gcm_hash_update(req, pctx, complete,
+ gctx->src, gctx->cryptlen);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ if (remain)
+ gcm_hash_crypt_done(areq, err);
+ else
+ gcm_hash_crypt_remain_done(areq, err);
+}
+
+static void gcm_hash_assoc_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ unsigned int remain;
+
+ if (!err) {
+ remain = gcm_remain(req->assoclen);
+ BUG_ON(!remain);
+ err = gcm_hash_remain(req, pctx, remain,
+ gcm_hash_assoc_remain_done);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ gcm_hash_assoc_remain_done(areq, err);
+}
+
+static void gcm_hash_init_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ crypto_completion_t complete;
+ unsigned int remain = 0;
+
+ if (!err && req->assoclen) {
+ remain = gcm_remain(req->assoclen);
+ complete = remain ? gcm_hash_assoc_done :
+ gcm_hash_assoc_remain_done;
+ err = gcm_hash_update(req, pctx, complete,
+ req->assoc, req->assoclen);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ if (remain)
+ gcm_hash_assoc_done(areq, err);
+ else
+ gcm_hash_assoc_remain_done(areq, err);
+}
+
+static int gcm_hash(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
+{
+ struct ahash_request *ahreq = &pctx->u.ahreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+ struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int remain;
+ crypto_completion_t complete;
+ int err;
+
+ ahash_request_set_tfm(ahreq, ctx->ghash);
+
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ gcm_hash_init_done, req);
+ err = crypto_ahash_init(ahreq);
+ if (err)
+ return err;
+ remain = gcm_remain(req->assoclen);
+ complete = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done;
+ err = gcm_hash_update(req, pctx, complete, req->assoc, req->assoclen);
+ if (err)
+ return err;
+ if (remain) {
+ err = gcm_hash_remain(req, pctx, remain,
+ gcm_hash_assoc_remain_done);
+ if (err)
+ return err;
+ }
+ remain = gcm_remain(gctx->cryptlen);
+ complete = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done;
+ err = gcm_hash_update(req, pctx, complete, gctx->src, gctx->cryptlen);
+ if (err)
+ return err;
+ if (remain) {
+ err = gcm_hash_remain(req, pctx, remain,
+ gcm_hash_crypt_remain_done);
+ if (err)
+ return err;
+ }
+ err = gcm_hash_len(req, pctx);
+ if (err)
+ return err;
+ err = gcm_hash_final(req, pctx);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void gcm_enc_copy_hash(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ u8 *auth_tag = pctx->auth_tag;
scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen,
crypto_aead_authsize(aead), 1);
- return 0;
}
-static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err)
+static void gcm_enc_hash_done(struct crypto_async_request *areq,
+ int err)
{
struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err)
- err = crypto_gcm_hash(req);
+ gcm_enc_copy_hash(req, pctx);
aead_request_complete(req, err);
}
+static void gcm_encrypt_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+
+ if (!err) {
+ err = gcm_hash(req, pctx);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ gcm_enc_hash_done(areq, err);
+}
+
static int crypto_gcm_encrypt(struct aead_request *req)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- struct ablkcipher_request *abreq = &pctx->abreq;
+ struct ablkcipher_request *abreq = &pctx->u.abreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
int err;
crypto_gcm_init_crypt(abreq, req, req->cryptlen);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
- crypto_gcm_encrypt_done, req);
+ gcm_encrypt_done, req);
+
+ gctx->src = req->dst;
+ gctx->cryptlen = req->cryptlen;
+ gctx->complete = gcm_enc_hash_done;
err = crypto_ablkcipher_encrypt(abreq);
if (err)
return err;
- return crypto_gcm_hash(req);
+ err = gcm_hash(req, pctx);
+ if (err)
+ return err;
+
+ crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
+ gcm_enc_copy_hash(req, pctx);
+
+ return 0;
}
-static int crypto_gcm_verify(struct aead_request *req)
+static int crypto_gcm_verify(struct aead_request *req,
+ struct crypto_gcm_req_priv_ctx *pctx)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
u8 *auth_tag = pctx->auth_tag;
u8 *iauth_tag = pctx->iauth_tag;
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int cryptlen = req->cryptlen - authsize;
- crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag);
-
- authsize = crypto_aead_authsize(aead);
+ crypto_xor(auth_tag, iauth_tag, 16);
scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
}
-static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err)
+static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err)
- err = crypto_gcm_verify(req);
+ err = crypto_gcm_verify(req, pctx);
aead_request_complete(req, err);
}
+static void gcm_dec_hash_done(struct crypto_async_request *areq, int err)
+{
+ struct aead_request *req = areq->data;
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ struct ablkcipher_request *abreq = &pctx->u.abreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
+
+ if (!err) {
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ gcm_decrypt_done, req);
+ crypto_gcm_init_crypt(abreq, req, gctx->cryptlen);
+ err = crypto_ablkcipher_decrypt(abreq);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+ }
+
+ gcm_decrypt_done(areq, err);
+}
+
static int crypto_gcm_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- struct ablkcipher_request *abreq = &pctx->abreq;
- struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
- unsigned int cryptlen = req->cryptlen;
+ struct ablkcipher_request *abreq = &pctx->u.abreq;
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
unsigned int authsize = crypto_aead_authsize(aead);
+ unsigned int cryptlen = req->cryptlen;
int err;
if (cryptlen < authsize)
return -EINVAL;
cryptlen -= authsize;
- crypto_gcm_init_crypt(abreq, req, cryptlen);
- ablkcipher_request_set_callback(abreq, aead_request_flags(req),
- crypto_gcm_decrypt_done, req);
+ gctx->src = req->src;
+ gctx->cryptlen = cryptlen;
+ gctx->complete = gcm_dec_hash_done;
- crypto_gcm_ghash_update_sg(ghash, req->src, cryptlen);
+ err = gcm_hash(req, pctx);
+ if (err)
+ return err;
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ gcm_decrypt_done, req);
+ crypto_gcm_init_crypt(abreq, req, cryptlen);
err = crypto_ablkcipher_decrypt(abreq);
if (err)
return err;
- return crypto_gcm_verify(req);
+ return crypto_gcm_verify(req, pctx);
}
static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
@@ -406,43 +595,56 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ablkcipher *ctr;
+ struct crypto_ahash *ghash;
unsigned long align;
int err;
+ ghash = crypto_spawn_ahash(&ictx->ghash);
+ if (IS_ERR(ghash))
+ return PTR_ERR(ghash);
+
ctr = crypto_spawn_skcipher(&ictx->ctr);
err = PTR_ERR(ctr);
if (IS_ERR(ctr))
- return err;
+ goto err_free_hash;
ctx->ctr = ctr;
- ctx->gf128 = NULL;
+ ctx->ghash = ghash;
align = crypto_tfm_alg_alignmask(tfm);
align &= ~(crypto_tfm_ctx_alignment() - 1);
tfm->crt_aead.reqsize = align +
- sizeof(struct crypto_gcm_req_priv_ctx) +
- crypto_ablkcipher_reqsize(ctr);
+ offsetof(struct crypto_gcm_req_priv_ctx, u) +
+ max(sizeof(struct ablkcipher_request) +
+ crypto_ablkcipher_reqsize(ctr),
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(ghash));
return 0;
+
+err_free_hash:
+ crypto_free_ahash(ghash);
+ return err;
}
static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
- if (ctx->gf128 != NULL)
- gf128mul_free_4k(ctx->gf128);
-
+ crypto_free_ahash(ctx->ghash);
crypto_free_ablkcipher(ctx->ctr);
}
static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
const char *full_name,
- const char *ctr_name)
+ const char *ctr_name,
+ const char *ghash_name)
{
struct crypto_attr_type *algt;
struct crypto_instance *inst;
struct crypto_alg *ctr;
+ struct crypto_alg *ghash_alg;
+ struct ahash_alg *ghash_ahash_alg;
struct gcm_instance_ctx *ctx;
int err;
@@ -454,17 +656,31 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
+ ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
+ CRYPTO_ALG_TYPE_HASH,
+ CRYPTO_ALG_TYPE_AHASH_MASK);
+ err = PTR_ERR(ghash_alg);
+ if (IS_ERR(ghash_alg))
+ return ERR_PTR(err);
+
+ err = -ENOMEM;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
- return ERR_PTR(-ENOMEM);
+ goto out_put_ghash;
ctx = crypto_instance_ctx(inst);
+ ghash_ahash_alg = container_of(ghash_alg, struct ahash_alg, halg.base);
+ err = crypto_init_ahash_spawn(&ctx->ghash, &ghash_ahash_alg->halg,
+ inst);
+ if (err)
+ goto err_free_inst;
+
crypto_set_skcipher_spawn(&ctx->ctr, inst);
err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
crypto_requires_sync(algt->type,
algt->mask));
if (err)
- goto err_free_inst;
+ goto err_drop_ghash;
ctr = crypto_skcipher_spawn_alg(&ctx->ctr);
@@ -479,7 +695,8 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "gcm_base(%s)", ctr->cra_driver_name) >=
+ "gcm_base(%s,%s)", ctr->cra_driver_name,
+ ghash_alg->cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto out_put_ctr;
@@ -502,12 +719,16 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
inst->alg.cra_aead.decrypt = crypto_gcm_decrypt;
out:
+ crypto_mod_put(ghash_alg);
return inst;
out_put_ctr:
crypto_drop_skcipher(&ctx->ctr);
+err_drop_ghash:
+ crypto_drop_ahash(&ctx->ghash);
err_free_inst:
kfree(inst);
+out_put_ghash:
inst = ERR_PTR(err);
goto out;
}
@@ -532,7 +753,7 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb)
CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG);
- return crypto_gcm_alloc_common(tb, full_name, ctr_name);
+ return crypto_gcm_alloc_common(tb, full_name, ctr_name, "ghash");
}
static void crypto_gcm_free(struct crypto_instance *inst)
@@ -540,6 +761,7 @@ static void crypto_gcm_free(struct crypto_instance *inst)
struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_skcipher(&ctx->ctr);
+ crypto_drop_ahash(&ctx->ghash);
kfree(inst);
}
@@ -554,6 +776,7 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
{
int err;
const char *ctr_name;
+ const char *ghash_name;
char full_name[CRYPTO_MAX_ALG_NAME];
ctr_name = crypto_attr_alg_name(tb[1]);
@@ -561,11 +784,16 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
if (IS_ERR(ctr_name))
return ERR_PTR(err);
- if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s)",
- ctr_name) >= CRYPTO_MAX_ALG_NAME)
+ ghash_name = crypto_attr_alg_name(tb[2]);
+ err = PTR_ERR(ghash_name);
+ if (IS_ERR(ghash_name))
+ return ERR_PTR(err);
+
+ if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
+ ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG);
- return crypto_gcm_alloc_common(tb, full_name, ctr_name);
+ return crypto_gcm_alloc_common(tb, full_name, ctr_name, ghash_name);
}
static struct crypto_template crypto_gcm_base_tmpl = {
@@ -784,6 +1012,10 @@ static int __init crypto_gcm_module_init(void)
{
int err;
+ gcm_zeroes = kzalloc(16, GFP_KERNEL);
+ if (!gcm_zeroes)
+ return -ENOMEM;
+
err = crypto_register_template(&crypto_gcm_base_tmpl);
if (err)
goto out;
@@ -796,18 +1028,20 @@ static int __init crypto_gcm_module_init(void)
if (err)
goto out_undo_gcm;
-out:
- return err;
+ return 0;
out_undo_gcm:
crypto_unregister_template(&crypto_gcm_tmpl);
out_undo_base:
crypto_unregister_template(&crypto_gcm_base_tmpl);
- goto out;
+out:
+ kfree(gcm_zeroes);
+ return err;
}
static void __exit crypto_gcm_module_exit(void)
{
+ kfree(gcm_zeroes);
crypto_unregister_template(&crypto_rfc4106_tmpl);
crypto_unregister_template(&crypto_gcm_tmpl);
crypto_unregister_template(&crypto_gcm_base_tmpl);
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
new file mode 100644
index 000000000000..be4425616931
--- /dev/null
+++ b/crypto/ghash-generic.c
@@ -0,0 +1,170 @@
+/*
+ * GHASH: digest algorithm for GCM (Galois/Counter Mode).
+ *
+ * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi>
+ * Copyright (c) 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * The algorithm implementation is copied from gcm.c.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/gf128mul.h>
+#include <crypto/internal/hash.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+struct ghash_ctx {
+ struct gf128mul_4k *gf128;
+};
+
+struct ghash_desc_ctx {
+ u8 buffer[GHASH_BLOCK_SIZE];
+ u32 bytes;
+};
+
+static int ghash_init(struct shash_desc *desc)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ memset(dctx, 0, sizeof(*dctx));
+
+ return 0;
+}
+
+static int ghash_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+
+ if (keylen != GHASH_BLOCK_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (ctx->gf128)
+ gf128mul_free_4k(ctx->gf128);
+ ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
+ if (!ctx->gf128)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int ghash_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ u8 *dst = dctx->buffer;
+
+ if (dctx->bytes) {
+ int n = min(srclen, dctx->bytes);
+ u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
+
+ dctx->bytes -= n;
+ srclen -= n;
+
+ while (n--)
+ *pos++ ^= *src++;
+
+ if (!dctx->bytes)
+ gf128mul_4k_lle((be128 *)dst, ctx->gf128);
+ }
+
+ while (srclen >= GHASH_BLOCK_SIZE) {
+ crypto_xor(dst, src, GHASH_BLOCK_SIZE);
+ gf128mul_4k_lle((be128 *)dst, ctx->gf128);
+ src += GHASH_BLOCK_SIZE;
+ srclen -= GHASH_BLOCK_SIZE;
+ }
+
+ if (srclen) {
+ dctx->bytes = GHASH_BLOCK_SIZE - srclen;
+ while (srclen--)
+ *dst++ ^= *src++;
+ }
+
+ return 0;
+}
+
+static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+{
+ u8 *dst = dctx->buffer;
+
+ if (dctx->bytes) {
+ u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
+
+ while (dctx->bytes--)
+ *tmp++ ^= 0;
+
+ gf128mul_4k_lle((be128 *)dst, ctx->gf128);
+ }
+
+ dctx->bytes = 0;
+}
+
+static int ghash_final(struct shash_desc *desc, u8 *dst)
+{
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ u8 *buf = dctx->buffer;
+
+ ghash_flush(ctx, dctx);
+ memcpy(dst, buf, GHASH_BLOCK_SIZE);
+
+ return 0;
+}
+
+static void ghash_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+ if (ctx->gf128)
+ gf128mul_free_4k(ctx->gf128);
+}
+
+static struct shash_alg ghash_alg = {
+ .digestsize = GHASH_DIGEST_SIZE,
+ .init = ghash_init,
+ .update = ghash_update,
+ .final = ghash_final,
+ .setkey = ghash_setkey,
+ .descsize = sizeof(struct ghash_desc_ctx),
+ .base = {
+ .cra_name = "ghash",
+ .cra_driver_name = "ghash-generic",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ghash_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list),
+ .cra_exit = ghash_exit_tfm,
+ },
+};
+
+static int __init ghash_mod_init(void)
+{
+ return crypto_register_shash(&ghash_alg);
+}
+
+static void __exit ghash_mod_exit(void)
+{
+ crypto_unregister_shash(&ghash_alg);
+}
+
+module_init(ghash_mod_init);
+module_exit(ghash_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
+MODULE_ALIAS("ghash");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 0ad39c374963..15c2eb534541 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -27,7 +27,7 @@
#include <linux/string.h>
struct hmac_ctx {
- struct crypto_hash *child;
+ struct crypto_shash *hash;
};
static inline void *align_ptr(void *p, unsigned int align)
@@ -35,65 +35,45 @@ static inline void *align_ptr(void *p, unsigned int align)
return (void *)ALIGN((unsigned long)p, align);
}
-static inline struct hmac_ctx *hmac_ctx(struct crypto_hash *tfm)
+static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm)
{
- return align_ptr(crypto_hash_ctx_aligned(tfm) +
- crypto_hash_blocksize(tfm) * 2 +
- crypto_hash_digestsize(tfm), sizeof(void *));
+ return align_ptr(crypto_shash_ctx_aligned(tfm) +
+ crypto_shash_statesize(tfm) * 2,
+ crypto_tfm_ctx_alignment());
}
-static int hmac_setkey(struct crypto_hash *parent,
+static int hmac_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
- int bs = crypto_hash_blocksize(parent);
- int ds = crypto_hash_digestsize(parent);
- char *ipad = crypto_hash_ctx_aligned(parent);
- char *opad = ipad + bs;
- char *digest = opad + bs;
- struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
- struct crypto_hash *tfm = ctx->child;
+ int bs = crypto_shash_blocksize(parent);
+ int ds = crypto_shash_digestsize(parent);
+ int ss = crypto_shash_statesize(parent);
+ char *ipad = crypto_shash_ctx_aligned(parent);
+ char *opad = ipad + ss;
+ struct hmac_ctx *ctx = align_ptr(opad + ss,
+ crypto_tfm_ctx_alignment());
+ struct crypto_shash *hash = ctx->hash;
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(hash)];
+ } desc;
unsigned int i;
+ desc.shash.tfm = hash;
+ desc.shash.flags = crypto_shash_get_flags(parent) &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
if (keylen > bs) {
- struct hash_desc desc;
- struct scatterlist tmp;
- int tmplen;
int err;
- desc.tfm = tfm;
- desc.flags = crypto_hash_get_flags(parent);
- desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypto_hash_init(&desc);
+ err = crypto_shash_digest(&desc.shash, inkey, keylen, ipad);
if (err)
return err;
- tmplen = bs * 2 + ds;
- sg_init_one(&tmp, ipad, tmplen);
-
- for (; keylen > tmplen; inkey += tmplen, keylen -= tmplen) {
- memcpy(ipad, inkey, tmplen);
- err = crypto_hash_update(&desc, &tmp, tmplen);
- if (err)
- return err;
- }
-
- if (keylen) {
- memcpy(ipad, inkey, keylen);
- err = crypto_hash_update(&desc, &tmp, keylen);
- if (err)
- return err;
- }
-
- err = crypto_hash_final(&desc, digest);
- if (err)
- return err;
-
- inkey = digest;
keylen = ds;
- }
+ } else
+ memcpy(ipad, inkey, keylen);
- memcpy(ipad, inkey, keylen);
memset(ipad + keylen, 0, bs - keylen);
memcpy(opad, ipad, bs);
@@ -102,184 +82,178 @@ static int hmac_setkey(struct crypto_hash *parent,
opad[i] ^= 0x5c;
}
- return 0;
+ return crypto_shash_init(&desc.shash) ?:
+ crypto_shash_update(&desc.shash, ipad, bs) ?:
+ crypto_shash_export(&desc.shash, ipad) ?:
+ crypto_shash_init(&desc.shash) ?:
+ crypto_shash_update(&desc.shash, opad, bs) ?:
+ crypto_shash_export(&desc.shash, opad);
}
-static int hmac_init(struct hash_desc *pdesc)
+static int hmac_export(struct shash_desc *pdesc, void *out)
{
- struct crypto_hash *parent = pdesc->tfm;
- int bs = crypto_hash_blocksize(parent);
- int ds = crypto_hash_digestsize(parent);
- char *ipad = crypto_hash_ctx_aligned(parent);
- struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, sizeof(void *));
- struct hash_desc desc;
- struct scatterlist tmp;
- int err;
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
- desc.tfm = ctx->child;
- desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- sg_init_one(&tmp, ipad, bs);
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_hash_init(&desc);
- if (unlikely(err))
- return err;
-
- return crypto_hash_update(&desc, &tmp, bs);
+ return crypto_shash_export(desc, out);
}
-static int hmac_update(struct hash_desc *pdesc,
- struct scatterlist *sg, unsigned int nbytes)
+static int hmac_import(struct shash_desc *pdesc, const void *in)
{
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
- struct hash_desc desc;
- desc.tfm = ctx->child;
- desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->tfm = ctx->hash;
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- return crypto_hash_update(&desc, sg, nbytes);
+ return crypto_shash_import(desc, in);
}
-static int hmac_final(struct hash_desc *pdesc, u8 *out)
+static int hmac_init(struct shash_desc *pdesc)
{
- struct crypto_hash *parent = pdesc->tfm;
- int bs = crypto_hash_blocksize(parent);
- int ds = crypto_hash_digestsize(parent);
- char *opad = crypto_hash_ctx_aligned(parent) + bs;
- char *digest = opad + bs;
- struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
- struct hash_desc desc;
- struct scatterlist tmp;
- int err;
+ return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm));
+}
- desc.tfm = ctx->child;
- desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- sg_init_one(&tmp, opad, bs + ds);
+static int hmac_update(struct shash_desc *pdesc,
+ const u8 *data, unsigned int nbytes)
+{
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
- err = crypto_hash_final(&desc, digest);
- if (unlikely(err))
- return err;
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- return crypto_hash_digest(&desc, &tmp, bs + ds, out);
+ return crypto_shash_update(desc, data, nbytes);
}
-static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
- unsigned int nbytes, u8 *out)
+static int hmac_final(struct shash_desc *pdesc, u8 *out)
{
- struct crypto_hash *parent = pdesc->tfm;
- int bs = crypto_hash_blocksize(parent);
- int ds = crypto_hash_digestsize(parent);
- char *ipad = crypto_hash_ctx_aligned(parent);
- char *opad = ipad + bs;
- char *digest = opad + bs;
- struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
- struct hash_desc desc;
- struct scatterlist sg1[2];
- struct scatterlist sg2[1];
- int err;
+ struct crypto_shash *parent = pdesc->tfm;
+ int ds = crypto_shash_digestsize(parent);
+ int ss = crypto_shash_statesize(parent);
+ char *opad = crypto_shash_ctx_aligned(parent) + ss;
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
- desc.tfm = ctx->child;
- desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- sg_init_table(sg1, 2);
- sg_set_buf(sg1, ipad, bs);
- scatterwalk_sg_chain(sg1, 2, sg);
+ return crypto_shash_final(desc, out) ?:
+ crypto_shash_import(desc, opad) ?:
+ crypto_shash_finup(desc, out, ds, out);
+}
- sg_init_table(sg2, 1);
- sg_set_buf(sg2, opad, bs + ds);
+static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
+ unsigned int nbytes, u8 *out)
+{
- err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest);
- if (unlikely(err))
- return err;
+ struct crypto_shash *parent = pdesc->tfm;
+ int ds = crypto_shash_digestsize(parent);
+ int ss = crypto_shash_statesize(parent);
+ char *opad = crypto_shash_ctx_aligned(parent) + ss;
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
- return crypto_hash_digest(&desc, sg2, bs + ds, out);
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_finup(desc, data, nbytes, out) ?:
+ crypto_shash_import(desc, opad) ?:
+ crypto_shash_finup(desc, out, ds, out);
}
static int hmac_init_tfm(struct crypto_tfm *tfm)
{
- struct crypto_hash *hash;
+ struct crypto_shash *parent = __crypto_shash_cast(tfm);
+ struct crypto_shash *hash;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm));
+ struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst);
+ struct hmac_ctx *ctx = hmac_ctx(parent);
- hash = crypto_spawn_hash(spawn);
+ hash = crypto_spawn_shash(spawn);
if (IS_ERR(hash))
return PTR_ERR(hash);
- ctx->child = hash;
+ parent->descsize = sizeof(struct shash_desc) +
+ crypto_shash_descsize(hash);
+
+ ctx->hash = hash;
return 0;
}
static void hmac_exit_tfm(struct crypto_tfm *tfm)
{
- struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm));
- crypto_free_hash(ctx->child);
+ struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm));
+ crypto_free_shash(ctx->hash);
}
-static void hmac_free(struct crypto_instance *inst)
+static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- crypto_drop_spawn(crypto_instance_ctx(inst));
- kfree(inst);
-}
-
-static struct crypto_instance *hmac_alloc(struct rtattr **tb)
-{
- struct crypto_instance *inst;
+ struct shash_instance *inst;
struct crypto_alg *alg;
+ struct shash_alg *salg;
int err;
int ds;
+ int ss;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
if (err)
- return ERR_PTR(err);
-
- alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
- CRYPTO_ALG_TYPE_HASH_MASK);
- if (IS_ERR(alg))
- return ERR_CAST(alg);
-
- inst = ERR_PTR(-EINVAL);
- ds = alg->cra_type == &crypto_hash_type ?
- alg->cra_hash.digestsize :
- alg->cra_type ?
- __crypto_shash_alg(alg)->digestsize :
- alg->cra_digest.dia_digestsize;
- if (ds > alg->cra_blocksize)
+ return err;
+
+ salg = shash_attr_alg(tb[1], 0, 0);
+ if (IS_ERR(salg))
+ return PTR_ERR(salg);
+
+ err = -EINVAL;
+ ds = salg->digestsize;
+ ss = salg->statesize;
+ alg = &salg->base;
+ if (ds > alg->cra_blocksize ||
+ ss < alg->cra_blocksize)
goto out_put_alg;
- inst = crypto_alloc_instance("hmac", alg);
+ inst = shash_alloc_instance("hmac", alg);
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_hash_type;
-
- inst->alg.cra_hash.digestsize = ds;
-
- inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) +
- ALIGN(inst->alg.cra_blocksize * 2 + ds,
- sizeof(void *));
-
- inst->alg.cra_init = hmac_init_tfm;
- inst->alg.cra_exit = hmac_exit_tfm;
-
- inst->alg.cra_hash.init = hmac_init;
- inst->alg.cra_hash.update = hmac_update;
- inst->alg.cra_hash.final = hmac_final;
- inst->alg.cra_hash.digest = hmac_digest;
- inst->alg.cra_hash.setkey = hmac_setkey;
+ err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg,
+ shash_crypto_instance(inst));
+ if (err)
+ goto out_free_inst;
+
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
+
+ ss = ALIGN(ss, alg->cra_alignmask + 1);
+ inst->alg.digestsize = ds;
+ inst->alg.statesize = ss;
+
+ inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) +
+ ALIGN(ss * 2, crypto_tfm_ctx_alignment());
+
+ inst->alg.base.cra_init = hmac_init_tfm;
+ inst->alg.base.cra_exit = hmac_exit_tfm;
+
+ inst->alg.init = hmac_init;
+ inst->alg.update = hmac_update;
+ inst->alg.final = hmac_final;
+ inst->alg.finup = hmac_finup;
+ inst->alg.export = hmac_export;
+ inst->alg.import = hmac_import;
+ inst->alg.setkey = hmac_setkey;
+
+ err = shash_register_instance(tmpl, inst);
+ if (err) {
+out_free_inst:
+ shash_free_instance(shash_crypto_instance(inst));
+ }
out_put_alg:
crypto_mod_put(alg);
- return inst;
+ return err;
}
static struct crypto_template hmac_tmpl = {
.name = "hmac",
- .alloc = hmac_alloc,
- .free = hmac_free,
+ .create = hmac_create,
+ .free = shash_free_instance,
.module = THIS_MODULE,
};
diff --git a/crypto/internal.h b/crypto/internal.h
index 113579a82dff..2d226362e594 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -25,12 +25,7 @@
#include <linux/notifier.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
-
-#ifdef CONFIG_CRYPTO_FIPS
-extern int fips_enabled;
-#else
-#define fips_enabled 0
-#endif
+#include <linux/fips.h>
/* Crypto notification events. */
enum {
@@ -65,18 +60,6 @@ static inline void crypto_exit_proc(void)
{ }
#endif
-static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg)
-{
- unsigned int len = alg->cra_ctxsize;
-
- if (alg->cra_alignmask) {
- len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
- len += alg->cra_digest.dia_digestsize;
- }
-
- return len;
-}
-
static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize;
@@ -91,12 +74,9 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
-int crypto_init_digest_ops(struct crypto_tfm *tfm);
-int crypto_init_digest_ops_async(struct crypto_tfm *tfm);
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
int crypto_init_compress_ops(struct crypto_tfm *tfm);
-void crypto_exit_digest_ops(struct crypto_tfm *tfm);
void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
void crypto_exit_compress_ops(struct crypto_tfm *tfm);
@@ -111,12 +91,12 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend);
+struct crypto_alg *crypto_find_alg(const char *alg_name,
+ const struct crypto_type *frontend,
+ u32 type, u32 mask);
void *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask);
-int crypto_register_instance(struct crypto_template *tmpl,
- struct crypto_instance *inst);
-
int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb);
int crypto_probing_notify(unsigned long val, void *v);
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
index bcadc03726b7..f7c4a7d7412e 100644
--- a/crypto/pcompress.c
+++ b/crypto/pcompress.c
@@ -36,14 +36,12 @@ static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask)
return 0;
}
-static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg,
- const struct crypto_type *frontend)
+static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize;
}
-static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm,
- const struct crypto_type *frontend)
+static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
{
return 0;
}
diff --git a/crypto/rng.c b/crypto/rng.c
index 6e94bc735578..ba05e7380e76 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -123,4 +123,4 @@ void crypto_put_default_rng(void)
EXPORT_SYMBOL_GPL(crypto_put_default_rng);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Random Number Genertor");
+MODULE_DESCRIPTION("Random Number Generator");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 9efef20454cb..0416091bf45a 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -25,31 +25,21 @@
#include <crypto/sha.h>
#include <asm/byteorder.h>
-struct sha1_ctx {
- u64 count;
- u32 state[5];
- u8 buffer[64];
-};
-
static int sha1_init(struct shash_desc *desc)
{
- struct sha1_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_state *sctx = shash_desc_ctx(desc);
- static const struct sha1_ctx initstate = {
- 0,
- { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
- { 0, }
+ *sctx = (struct sha1_state){
+ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
- *sctx = initstate;
-
return 0;
}
static int sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha1_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial, done;
const u8 *src;
@@ -85,7 +75,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
/* Add padding and return the message digest. */
static int sha1_final(struct shash_desc *desc, u8 *out)
{
- struct sha1_ctx *sctx = shash_desc_ctx(desc);
+ struct sha1_state *sctx = shash_desc_ctx(desc);
__be32 *dst = (__be32 *)out;
u32 i, index, padlen;
__be64 bits;
@@ -111,12 +101,31 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
return 0;
}
+static int sha1_export(struct shash_desc *desc, void *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int sha1_import(struct shash_desc *desc, const void *in)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init,
.update = sha1_update,
.final = sha1_final,
- .descsize = sizeof(struct sha1_ctx),
+ .export = sha1_export,
+ .import = sha1_import,
+ .descsize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 6349d8339d37..c48459ebf05b 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -25,12 +25,6 @@
#include <crypto/sha.h>
#include <asm/byteorder.h>
-struct sha256_ctx {
- u32 count[2];
- u32 state[8];
- u8 buf[128];
-};
-
static inline u32 Ch(u32 x, u32 y, u32 z)
{
return z ^ (x & (y ^ z));
@@ -222,7 +216,7 @@ static void sha256_transform(u32 *state, const u8 *input)
static int sha224_init(struct shash_desc *desc)
{
- struct sha256_ctx *sctx = shash_desc_ctx(desc);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA224_H0;
sctx->state[1] = SHA224_H1;
sctx->state[2] = SHA224_H2;
@@ -231,15 +225,14 @@ static int sha224_init(struct shash_desc *desc)
sctx->state[5] = SHA224_H5;
sctx->state[6] = SHA224_H6;
sctx->state[7] = SHA224_H7;
- sctx->count[0] = 0;
- sctx->count[1] = 0;
+ sctx->count = 0;
return 0;
}
static int sha256_init(struct shash_desc *desc)
{
- struct sha256_ctx *sctx = shash_desc_ctx(desc);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA256_H0;
sctx->state[1] = SHA256_H1;
sctx->state[2] = SHA256_H2;
@@ -248,7 +241,7 @@ static int sha256_init(struct shash_desc *desc)
sctx->state[5] = SHA256_H5;
sctx->state[6] = SHA256_H6;
sctx->state[7] = SHA256_H7;
- sctx->count[0] = sctx->count[1] = 0;
+ sctx->count = 0;
return 0;
}
@@ -256,58 +249,54 @@ static int sha256_init(struct shash_desc *desc)
static int sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- struct sha256_ctx *sctx = shash_desc_ctx(desc);
- unsigned int i, index, part_len;
-
- /* Compute number of bytes mod 128 */
- index = (unsigned int)((sctx->count[0] >> 3) & 0x3f);
-
- /* Update number of bits */
- if ((sctx->count[0] += (len << 3)) < (len << 3)) {
- sctx->count[1]++;
- sctx->count[1] += (len >> 29);
- }
-
- part_len = 64 - index;
-
- /* Transform as many times as possible. */
- if (len >= part_len) {
- memcpy(&sctx->buf[index], data, part_len);
- sha256_transform(sctx->state, sctx->buf);
-
- for (i = part_len; i + 63 < len; i += 64)
- sha256_transform(sctx->state, &data[i]);
- index = 0;
- } else {
- i = 0;
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial, done;
+ const u8 *src;
+
+ partial = sctx->count & 0x3f;
+ sctx->count += len;
+ done = 0;
+ src = data;
+
+ if ((partial + len) > 63) {
+ if (partial) {
+ done = -partial;
+ memcpy(sctx->buf + partial, data, done + 64);
+ src = sctx->buf;
+ }
+
+ do {
+ sha256_transform(sctx->state, src);
+ done += 64;
+ src = data + done;
+ } while (done + 63 < len);
+
+ partial = 0;
}
-
- /* Buffer remaining input */
- memcpy(&sctx->buf[index], &data[i], len-i);
+ memcpy(sctx->buf + partial, src, len - done);
return 0;
}
static int sha256_final(struct shash_desc *desc, u8 *out)
{
- struct sha256_ctx *sctx = shash_desc_ctx(desc);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
__be32 *dst = (__be32 *)out;
- __be32 bits[2];
+ __be64 bits;
unsigned int index, pad_len;
int i;
static const u8 padding[64] = { 0x80, };
/* Save number of bits */
- bits[1] = cpu_to_be32(sctx->count[0]);
- bits[0] = cpu_to_be32(sctx->count[1]);
+ bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64. */
- index = (sctx->count[0] >> 3) & 0x3f;
+ index = sctx->count & 0x3f;
pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
sha256_update(desc, padding, pad_len);
/* Append length (before padding) */
- sha256_update(desc, (const u8 *)bits, sizeof(bits));
+ sha256_update(desc, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 8; i++)
@@ -331,12 +320,31 @@ static int sha224_final(struct shash_desc *desc, u8 *hash)
return 0;
}
+static int sha256_export(struct shash_desc *desc, void *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(out, sctx, sizeof(*sctx));
+ return 0;
+}
+
+static int sha256_import(struct shash_desc *desc, const void *in)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ memcpy(sctx, in, sizeof(*sctx));
+ return 0;
+}
+
static struct shash_alg sha256 = {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_init,
.update = sha256_update,
.final = sha256_final,
- .descsize = sizeof(struct sha256_ctx),
+ .export = sha256_export,
+ .import = sha256_import,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name= "sha256-generic",
@@ -351,7 +359,7 @@ static struct shash_alg sha224 = {
.init = sha224_init,
.update = sha256_update,
.final = sha224_final,
- .descsize = sizeof(struct sha256_ctx),
+ .descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
.cra_driver_name= "sha224-generic",
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 3bea38d12242..9ed9f60316e5 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -21,12 +21,6 @@
#include <linux/percpu.h>
#include <asm/byteorder.h>
-struct sha512_ctx {
- u64 state[8];
- u32 count[4];
- u8 buf[128];
-};
-
static DEFINE_PER_CPU(u64[80], msg_schedule);
static inline u64 Ch(u64 x, u64 y, u64 z)
@@ -141,7 +135,7 @@ sha512_transform(u64 *state, const u8 *input)
static int
sha512_init(struct shash_desc *desc)
{
- struct sha512_ctx *sctx = shash_desc_ctx(desc);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA512_H0;
sctx->state[1] = SHA512_H1;
sctx->state[2] = SHA512_H2;
@@ -150,7 +144,7 @@ sha512_init(struct shash_desc *desc)
sctx->state[5] = SHA512_H5;
sctx->state[6] = SHA512_H6;
sctx->state[7] = SHA512_H7;
- sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0;
+ sctx->count[0] = sctx->count[1] = 0;
return 0;
}
@@ -158,7 +152,7 @@ sha512_init(struct shash_desc *desc)
static int
sha384_init(struct shash_desc *desc)
{
- struct sha512_ctx *sctx = shash_desc_ctx(desc);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA384_H0;
sctx->state[1] = SHA384_H1;
sctx->state[2] = SHA384_H2;
@@ -167,7 +161,7 @@ sha384_init(struct shash_desc *desc)
sctx->state[5] = SHA384_H5;
sctx->state[6] = SHA384_H6;
sctx->state[7] = SHA384_H7;
- sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0;
+ sctx->count[0] = sctx->count[1] = 0;
return 0;
}
@@ -175,20 +169,16 @@ sha384_init(struct shash_desc *desc)
static int
sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
- struct sha512_ctx *sctx = shash_desc_ctx(desc);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, part_len;
/* Compute number of bytes mod 128 */
- index = (unsigned int)((sctx->count[0] >> 3) & 0x7F);
-
- /* Update number of bits */
- if ((sctx->count[0] += (len << 3)) < (len << 3)) {
- if ((sctx->count[1] += 1) < 1)
- if ((sctx->count[2] += 1) < 1)
- sctx->count[3]++;
- sctx->count[1] += (len >> 29);
- }
+ index = sctx->count[0] & 0x7f;
+
+ /* Update number of bytes */
+ if (!(sctx->count[0] += len))
+ sctx->count[1]++;
part_len = 128 - index;
@@ -214,21 +204,19 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
static int
sha512_final(struct shash_desc *desc, u8 *hash)
{
- struct sha512_ctx *sctx = shash_desc_ctx(desc);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
static u8 padding[128] = { 0x80, };
__be64 *dst = (__be64 *)hash;
- __be32 bits[4];
+ __be64 bits[2];
unsigned int index, pad_len;
int i;
/* Save number of bits */
- bits[3] = cpu_to_be32(sctx->count[0]);
- bits[2] = cpu_to_be32(sctx->count[1]);
- bits[1] = cpu_to_be32(sctx->count[2]);
- bits[0] = cpu_to_be32(sctx->count[3]);
+ bits[1] = cpu_to_be64(sctx->count[0] << 3);
+ bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
/* Pad out to 112 mod 128. */
- index = (sctx->count[0] >> 3) & 0x7f;
+ index = sctx->count[0] & 0x7f;
pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
sha512_update(desc, padding, pad_len);
@@ -240,7 +228,7 @@ sha512_final(struct shash_desc *desc, u8 *hash)
dst[i] = cpu_to_be64(sctx->state[i]);
/* Zeroize sensitive information. */
- memset(sctx, 0, sizeof(struct sha512_ctx));
+ memset(sctx, 0, sizeof(struct sha512_state));
return 0;
}
@@ -262,7 +250,7 @@ static struct shash_alg sha512 = {
.init = sha512_init,
.update = sha512_update,
.final = sha512_final,
- .descsize = sizeof(struct sha512_ctx),
+ .descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha512",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
@@ -276,7 +264,7 @@ static struct shash_alg sha384 = {
.init = sha384_init,
.update = sha512_update,
.final = sha384_final,
- .descsize = sizeof(struct sha512_ctx),
+ .descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha384",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
diff --git a/crypto/shash.c b/crypto/shash.c
index 2ccc8b0076ce..91f7b9d83881 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -22,6 +22,12 @@
static const struct crypto_type crypto_shash_type;
+static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return -ENOSYS;
+}
+
static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -39,8 +45,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
err = shash->setkey(tfm, alignbuffer, keylen);
- memset(alignbuffer, 0, keylen);
- kfree(buffer);
+ kzfree(buffer);
return err;
}
@@ -50,9 +55,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
- if (!shash->setkey)
- return -ENOSYS;
-
if ((unsigned long)key & alignmask)
return shash_setkey_unaligned(tfm, key, keylen);
@@ -74,15 +76,19 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
unsigned long alignmask = crypto_shash_alignmask(tfm);
unsigned int unaligned_len = alignmask + 1 -
((unsigned long)data & alignmask);
- u8 buf[shash_align_buffer_size(unaligned_len, alignmask)]
+ u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)]
__attribute__ ((aligned));
+ u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
+ int err;
if (unaligned_len > len)
unaligned_len = len;
memcpy(buf, data, unaligned_len);
+ err = shash->update(desc, buf, unaligned_len);
+ memset(buf, 0, unaligned_len);
- return shash->update(desc, buf, unaligned_len) ?:
+ return err ?:
shash->update(desc, data + unaligned_len, len - unaligned_len);
}
@@ -106,12 +112,19 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
unsigned long alignmask = crypto_shash_alignmask(tfm);
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned int ds = crypto_shash_digestsize(tfm);
- u8 buf[shash_align_buffer_size(ds, alignmask)]
+ u8 ubuf[shash_align_buffer_size(ds, alignmask)]
__attribute__ ((aligned));
+ u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
int err;
err = shash->final(desc, buf);
+ if (err)
+ goto out;
+
memcpy(out, buf, ds);
+
+out:
+ memset(buf, 0, ds);
return err;
}
@@ -142,8 +155,7 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
- if (((unsigned long)data | (unsigned long)out) & alignmask ||
- !shash->finup)
+ if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_finup_unaligned(desc, data, len, out);
return shash->finup(desc, data, len, out);
@@ -154,8 +166,7 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return crypto_shash_init(desc) ?:
- crypto_shash_update(desc, data, len) ?:
- crypto_shash_final(desc, out);
+ crypto_shash_finup(desc, data, len, out);
}
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
@@ -165,27 +176,24 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
- if (((unsigned long)data | (unsigned long)out) & alignmask ||
- !shash->digest)
+ if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_digest_unaligned(desc, data, len, out);
return shash->digest(desc, data, len, out);
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
-int crypto_shash_import(struct shash_desc *desc, const u8 *in)
+static int shash_default_export(struct shash_desc *desc, void *out)
{
- struct crypto_shash *tfm = desc->tfm;
- struct shash_alg *alg = crypto_shash_alg(tfm);
-
- memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm));
-
- if (alg->reinit)
- alg->reinit(desc);
+ memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm));
+ return 0;
+}
+static int shash_default_import(struct shash_desc *desc, const void *in)
+{
+ memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm));
return 0;
}
-EXPORT_SYMBOL_GPL(crypto_shash_import);
static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
@@ -206,9 +214,8 @@ static int shash_async_init(struct ahash_request *req)
return crypto_shash_init(desc);
}
-static int shash_async_update(struct ahash_request *req)
+int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
{
- struct shash_desc *desc = ahash_request_ctx(req);
struct crypto_hash_walk walk;
int nbytes;
@@ -218,13 +225,51 @@ static int shash_async_update(struct ahash_request *req)
return nbytes;
}
+EXPORT_SYMBOL_GPL(shash_ahash_update);
+
+static int shash_async_update(struct ahash_request *req)
+{
+ return shash_ahash_update(req, ahash_request_ctx(req));
+}
static int shash_async_final(struct ahash_request *req)
{
return crypto_shash_final(ahash_request_ctx(req), req->result);
}
-static int shash_async_digest(struct ahash_request *req)
+int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
+{
+ struct crypto_hash_walk walk;
+ int nbytes;
+
+ nbytes = crypto_hash_walk_first(req, &walk);
+ if (!nbytes)
+ return crypto_shash_final(desc, req->result);
+
+ do {
+ nbytes = crypto_hash_walk_last(&walk) ?
+ crypto_shash_finup(desc, walk.data, nbytes,
+ req->result) :
+ crypto_shash_update(desc, walk.data, nbytes);
+ nbytes = crypto_hash_walk_done(&walk, nbytes);
+ } while (nbytes > 0);
+
+ return nbytes;
+}
+EXPORT_SYMBOL_GPL(shash_ahash_finup);
+
+static int shash_async_finup(struct ahash_request *req)
+{
+ struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct shash_desc *desc = ahash_request_ctx(req);
+
+ desc->tfm = *ctx;
+ desc->flags = req->base.flags;
+
+ return shash_ahash_finup(req, desc);
+}
+
+int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
{
struct scatterlist *sg = req->src;
unsigned int offset = sg->offset;
@@ -232,34 +277,40 @@ static int shash_async_digest(struct ahash_request *req)
int err;
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
- struct crypto_shash **ctx =
- crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct shash_desc *desc = ahash_request_ctx(req);
void *data;
- desc->tfm = *ctx;
- desc->flags = req->base.flags;
-
data = crypto_kmap(sg_page(sg), 0);
err = crypto_shash_digest(desc, data + offset, nbytes,
req->result);
crypto_kunmap(data, 0);
crypto_yield(desc->flags);
- goto out;
- }
+ } else
+ err = crypto_shash_init(desc) ?:
+ shash_ahash_finup(req, desc);
- err = shash_async_init(req);
- if (err)
- goto out;
+ return err;
+}
+EXPORT_SYMBOL_GPL(shash_ahash_digest);
- err = shash_async_update(req);
- if (err)
- goto out;
+static int shash_async_digest(struct ahash_request *req)
+{
+ struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct shash_desc *desc = ahash_request_ctx(req);
- err = shash_async_final(req);
+ desc->tfm = *ctx;
+ desc->flags = req->base.flags;
-out:
- return err;
+ return shash_ahash_digest(req, desc);
+}
+
+static int shash_async_export(struct ahash_request *req, void *out)
+{
+ return crypto_shash_export(ahash_request_ctx(req), out);
+}
+
+static int shash_async_import(struct ahash_request *req, const void *in)
+{
+ return crypto_shash_import(ahash_request_ctx(req), in);
}
static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
@@ -269,11 +320,11 @@ static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
crypto_free_shash(*ctx);
}
-static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
+int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
{
struct crypto_alg *calg = tfm->__crt_alg;
struct shash_alg *alg = __crypto_shash_alg(calg);
- struct ahash_tfm *crt = &tfm->crt_ahash;
+ struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
struct crypto_shash *shash;
@@ -291,11 +342,17 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->init = shash_async_init;
crt->update = shash_async_update;
- crt->final = shash_async_final;
+ crt->final = shash_async_final;
+ crt->finup = shash_async_finup;
crt->digest = shash_async_digest;
- crt->setkey = shash_async_setkey;
- crt->digestsize = alg->digestsize;
+ if (alg->setkey)
+ crt->setkey = shash_async_setkey;
+ if (alg->export)
+ crt->export = shash_async_export;
+ if (alg->import)
+ crt->import = shash_async_import;
+
crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
return 0;
@@ -304,14 +361,16 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key,
unsigned int keylen)
{
- struct shash_desc *desc = crypto_hash_ctx(tfm);
+ struct shash_desc **descp = crypto_hash_ctx(tfm);
+ struct shash_desc *desc = *descp;
return crypto_shash_setkey(desc->tfm, key, keylen);
}
static int shash_compat_init(struct hash_desc *hdesc)
{
- struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc *desc = *descp;
desc->flags = hdesc->flags;
@@ -321,7 +380,8 @@ static int shash_compat_init(struct hash_desc *hdesc)
static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
unsigned int len)
{
- struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc *desc = *descp;
struct crypto_hash_walk walk;
int nbytes;
@@ -334,7 +394,9 @@ static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
static int shash_compat_final(struct hash_desc *hdesc, u8 *out)
{
- return crypto_shash_final(crypto_hash_ctx(hdesc->tfm), out);
+ struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
+
+ return crypto_shash_final(*descp, out);
}
static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
@@ -344,7 +406,8 @@ static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
int err;
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
- struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
+ struct shash_desc *desc = *descp;
void *data;
desc->flags = hdesc->flags;
@@ -372,9 +435,11 @@ out:
static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm)
{
- struct shash_desc *desc= crypto_tfm_ctx(tfm);
+ struct shash_desc **descp = crypto_tfm_ctx(tfm);
+ struct shash_desc *desc = *descp;
crypto_free_shash(desc->tfm);
+ kzfree(desc);
}
static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
@@ -382,8 +447,9 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
struct hash_tfm *crt = &tfm->crt_hash;
struct crypto_alg *calg = tfm->__crt_alg;
struct shash_alg *alg = __crypto_shash_alg(calg);
- struct shash_desc *desc = crypto_tfm_ctx(tfm);
+ struct shash_desc **descp = crypto_tfm_ctx(tfm);
struct crypto_shash *shash;
+ struct shash_desc *desc;
if (!crypto_mod_get(calg))
return -EAGAIN;
@@ -394,6 +460,14 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
return PTR_ERR(shash);
}
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash),
+ GFP_KERNEL);
+ if (!desc) {
+ crypto_free_shash(shash);
+ return -ENOMEM;
+ }
+
+ *descp = desc;
desc->tfm = shash;
tfm->exit = crypto_exit_shash_ops_compat;
@@ -413,8 +487,6 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
switch (mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_HASH_MASK:
return crypto_init_shash_ops_compat(tfm);
- case CRYPTO_ALG_TYPE_AHASH_MASK:
- return crypto_init_shash_ops_async(tfm);
}
return -EINVAL;
@@ -423,26 +495,23 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type,
u32 mask)
{
- struct shash_alg *salg = __crypto_shash_alg(alg);
-
switch (mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_HASH_MASK:
- return sizeof(struct shash_desc) + salg->descsize;
- case CRYPTO_ALG_TYPE_AHASH_MASK:
- return sizeof(struct crypto_shash *);
+ return sizeof(struct shash_desc *);
}
return 0;
}
-static int crypto_shash_init_tfm(struct crypto_tfm *tfm,
- const struct crypto_type *frontend)
+static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
+ struct crypto_shash *hash = __crypto_shash_cast(tfm);
+
+ hash->descsize = crypto_shash_alg(hash)->descsize;
return 0;
}
-static unsigned int crypto_shash_extsize(struct crypto_alg *alg,
- const struct crypto_type *frontend)
+static unsigned int crypto_shash_extsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize;
}
@@ -456,7 +525,6 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "type : shash\n");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n", salg->digestsize);
- seq_printf(m, "descsize : %u\n", salg->descsize);
}
static const struct crypto_type crypto_shash_type = {
@@ -480,18 +548,43 @@ struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_shash);
-int crypto_register_shash(struct shash_alg *alg)
+static int shash_prepare_alg(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->base;
if (alg->digestsize > PAGE_SIZE / 8 ||
- alg->descsize > PAGE_SIZE / 8)
+ alg->descsize > PAGE_SIZE / 8 ||
+ alg->statesize > PAGE_SIZE / 8)
return -EINVAL;
base->cra_type = &crypto_shash_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
+ if (!alg->finup)
+ alg->finup = shash_finup_unaligned;
+ if (!alg->digest)
+ alg->digest = shash_digest_unaligned;
+ if (!alg->export) {
+ alg->export = shash_default_export;
+ alg->import = shash_default_import;
+ alg->statesize = alg->descsize;
+ }
+ if (!alg->setkey)
+ alg->setkey = shash_no_setkey;
+
+ return 0;
+}
+
+int crypto_register_shash(struct shash_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+ int err;
+
+ err = shash_prepare_alg(alg);
+ if (err)
+ return err;
+
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_shash);
@@ -502,5 +595,44 @@ int crypto_unregister_shash(struct shash_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_unregister_shash);
+int shash_register_instance(struct crypto_template *tmpl,
+ struct shash_instance *inst)
+{
+ int err;
+
+ err = shash_prepare_alg(&inst->alg);
+ if (err)
+ return err;
+
+ return crypto_register_instance(tmpl, shash_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(shash_register_instance);
+
+void shash_free_instance(struct crypto_instance *inst)
+{
+ crypto_drop_spawn(crypto_instance_ctx(inst));
+ kfree(shash_instance(inst));
+}
+EXPORT_SYMBOL_GPL(shash_free_instance);
+
+int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
+ struct shash_alg *alg,
+ struct crypto_instance *inst)
+{
+ return crypto_init_spawn2(&spawn->base, &alg->base, inst,
+ &crypto_shash_type);
+}
+EXPORT_SYMBOL_GPL(crypto_init_shash_spawn);
+
+struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
+{
+ struct crypto_alg *alg;
+
+ alg = crypto_attr_alg2(rta, &crypto_shash_type, type, mask);
+ return IS_ERR(alg) ? ERR_CAST(alg) :
+ container_of(alg, struct shash_alg, base);
+}
+EXPORT_SYMBOL_GPL(shash_attr_alg);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Synchronous cryptographic hash type");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d59ba5079d14..aa3f84ccc786 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -45,6 +45,9 @@
*/
static unsigned int sec;
+static char *alg = NULL;
+static u32 type;
+static u32 mask;
static int mode;
static char *tvmem[TVMEMSIZE];
@@ -716,6 +719,10 @@ static int do_test(int m)
ret += tcrypt_test("hmac(rmd160)");
break;
+ case 109:
+ ret += tcrypt_test("vmac(aes)");
+ break;
+
case 150:
ret += tcrypt_test("ansi_cprng");
break;
@@ -885,6 +892,12 @@ static int do_test(int m)
return ret;
}
+static int do_alg_test(const char *alg, u32 type, u32 mask)
+{
+ return crypto_has_alg(alg, type, mask ?: CRYPTO_ALG_TYPE_MASK) ?
+ 0 : -ENOENT;
+}
+
static int __init tcrypt_mod_init(void)
{
int err = -ENOMEM;
@@ -896,7 +909,11 @@ static int __init tcrypt_mod_init(void)
goto err_free_tv;
}
- err = do_test(mode);
+ if (alg)
+ err = do_alg_test(alg, type, mask);
+ else
+ err = do_test(mode);
+
if (err) {
printk(KERN_ERR "tcrypt: one or more tests failed!\n");
goto err_free_tv;
@@ -928,6 +945,9 @@ static void __exit tcrypt_mod_fini(void) { }
module_init(tcrypt_mod_init);
module_exit(tcrypt_mod_fini);
+module_param(alg, charp, 0);
+module_param(type, uint, 0);
+module_param(mask, uint, 0);
module_param(mode, int, 0);
module_param(sec, uint, 0);
MODULE_PARM_DESC(sec, "Length in seconds of speed tests "
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index e9e9d84293b9..6d5b746637be 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -190,10 +190,6 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
hash_buff = xbuf[0];
- ret = -EINVAL;
- if (WARN_ON(template[i].psize > PAGE_SIZE))
- goto out;
-
memcpy(hash_buff, template[i].plaintext, template[i].psize);
sg_init_one(&sg[0], hash_buff, template[i].psize);
@@ -2252,6 +2248,15 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "vmac(aes)",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = {
+ .vecs = aes_vmac128_tv_template,
+ .count = VMAC_AES_TEST_VECTORS
+ }
+ }
+ }, {
.alg = "wp256",
.test = alg_test_hash,
.suite = {
@@ -2348,6 +2353,7 @@ static int alg_find_test(const char *alg)
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
{
int i;
+ int j;
int rc;
if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
@@ -2369,14 +2375,22 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
}
i = alg_find_test(alg);
- if (i < 0)
+ j = alg_find_test(driver);
+ if (i < 0 && j < 0)
goto notest;
- if (fips_enabled && !alg_test_descs[i].fips_allowed)
+ if (fips_enabled && ((i >= 0 && !alg_test_descs[i].fips_allowed) ||
+ (j >= 0 && !alg_test_descs[j].fips_allowed)))
goto non_fips_alg;
- rc = alg_test_descs[i].test(alg_test_descs + i, driver,
- type, mask);
+ rc = 0;
+ if (i >= 0)
+ rc |= alg_test_descs[i].test(alg_test_descs + i, driver,
+ type, mask);
+ if (j >= 0)
+ rc |= alg_test_descs[j].test(alg_test_descs + j, driver,
+ type, mask);
+
test_done:
if (fips_enabled && rc)
panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 69316228fc19..9963b18983ab 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1654,6 +1654,22 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
}
};
+#define VMAC_AES_TEST_VECTORS 1
+static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01',
+ '\x02', '\x03', '\x02', '\x02',
+ '\x02', '\x04', '\x01', '\x07',
+ '\x04', '\x01', '\x04', '\x03',};
+static struct hash_testvec aes_vmac128_tv_template[] = {
+ {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .plaintext = vmac_string,
+ .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7",
+ .psize = 128,
+ .ksize = 16,
+ },
+};
+
/*
* SHA384 HMAC test vectors from RFC4231
*/
diff --git a/crypto/vmac.c b/crypto/vmac.c
new file mode 100644
index 000000000000..0a9468e575de
--- /dev/null
+++ b/crypto/vmac.c
@@ -0,0 +1,678 @@
+/*
+ * Modified to interface to the Linux kernel
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+/* --------------------------------------------------------------------------
+ * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+ * This implementation is herby placed in the public domain.
+ * The authors offers no warranty. Use at your own risk.
+ * Please send bug reports to the authors.
+ * Last modified: 17 APR 08, 1700 PDT
+ * ----------------------------------------------------------------------- */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <asm/byteorder.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/vmac.h>
+#include <crypto/internal/hash.h>
+
+/*
+ * Constants and masks
+ */
+#define UINT64_C(x) x##ULL
+const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
+const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
+const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
+const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
+const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
+
+#ifdef __LITTLE_ENDIAN
+#define INDEX_HIGH 1
+#define INDEX_LOW 0
+#else
+#define INDEX_HIGH 0
+#define INDEX_LOW 1
+#endif
+
+/*
+ * The following routines are used in this implementation. They are
+ * written via macros to simulate zero-overhead call-by-reference.
+ *
+ * MUL64: 64x64->128-bit multiplication
+ * PMUL64: assumes top bits cleared on inputs
+ * ADD128: 128x128->128-bit addition
+ */
+
+#define ADD128(rh, rl, ih, il) \
+ do { \
+ u64 _il = (il); \
+ (rl) += (_il); \
+ if ((rl) < (_il)) \
+ (rh)++; \
+ (rh) += (ih); \
+ } while (0)
+
+#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
+
+#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
+ do { \
+ u64 _i1 = (i1), _i2 = (i2); \
+ u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
+ rh = MUL32(_i1>>32, _i2>>32); \
+ rl = MUL32(_i1, _i2); \
+ ADD128(rh, rl, (m >> 32), (m << 32)); \
+ } while (0)
+
+#define MUL64(rh, rl, i1, i2) \
+ do { \
+ u64 _i1 = (i1), _i2 = (i2); \
+ u64 m1 = MUL32(_i1, _i2>>32); \
+ u64 m2 = MUL32(_i1>>32, _i2); \
+ rh = MUL32(_i1>>32, _i2>>32); \
+ rl = MUL32(_i1, _i2); \
+ ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
+ ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
+ } while (0)
+
+/*
+ * For highest performance the L1 NH and L2 polynomial hashes should be
+ * carefully implemented to take advantage of one's target architechture.
+ * Here these two hash functions are defined multiple time; once for
+ * 64-bit architectures, once for 32-bit SSE2 architectures, and once
+ * for the rest (32-bit) architectures.
+ * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
+ * Optionally, nh_vmac_nhbytes can be defined (for multiples of
+ * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
+ * NH computations at once).
+ */
+
+#ifdef CONFIG_64BIT
+
+#define nh_16(mp, kp, nw, rh, rl) \
+ do { \
+ int i; u64 th, tl; \
+ rh = rl = 0; \
+ for (i = 0; i < nw; i += 2) { \
+ MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
+ le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ ADD128(rh, rl, th, tl); \
+ } \
+ } while (0)
+
+#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
+ do { \
+ int i; u64 th, tl; \
+ rh1 = rl1 = rh = rl = 0; \
+ for (i = 0; i < nw; i += 2) { \
+ MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
+ le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
+ le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
+ ADD128(rh1, rl1, th, tl); \
+ } \
+ } while (0)
+
+#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
+#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
+ do { \
+ int i; u64 th, tl; \
+ rh = rl = 0; \
+ for (i = 0; i < nw; i += 8) { \
+ MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
+ le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
+ le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
+ le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
+ le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
+ ADD128(rh, rl, th, tl); \
+ } \
+ } while (0)
+
+#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
+ do { \
+ int i; u64 th, tl; \
+ rh1 = rl1 = rh = rl = 0; \
+ for (i = 0; i < nw; i += 8) { \
+ MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
+ le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
+ le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
+ ADD128(rh1, rl1, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
+ le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \
+ le64_to_cpup((mp)+i+3)+(kp)[i+5]); \
+ ADD128(rh1, rl1, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
+ le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \
+ le64_to_cpup((mp)+i+5)+(kp)[i+7]); \
+ ADD128(rh1, rl1, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
+ le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
+ ADD128(rh, rl, th, tl); \
+ MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \
+ le64_to_cpup((mp)+i+7)+(kp)[i+9]); \
+ ADD128(rh1, rl1, th, tl); \
+ } \
+ } while (0)
+#endif
+
+#define poly_step(ah, al, kh, kl, mh, ml) \
+ do { \
+ u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
+ /* compute ab*cd, put bd into result registers */ \
+ PMUL64(t3h, t3l, al, kh); \
+ PMUL64(t2h, t2l, ah, kl); \
+ PMUL64(t1h, t1l, ah, 2*kh); \
+ PMUL64(ah, al, al, kl); \
+ /* add 2 * ac to result */ \
+ ADD128(ah, al, t1h, t1l); \
+ /* add together ad + bc */ \
+ ADD128(t2h, t2l, t3h, t3l); \
+ /* now (ah,al), (t2l,2*t2h) need summing */ \
+ /* first add the high registers, carrying into t2h */ \
+ ADD128(t2h, ah, z, t2l); \
+ /* double t2h and add top bit of ah */ \
+ t2h = 2 * t2h + (ah >> 63); \
+ ah &= m63; \
+ /* now add the low registers */ \
+ ADD128(ah, al, mh, ml); \
+ ADD128(ah, al, z, t2h); \
+ } while (0)
+
+#else /* ! CONFIG_64BIT */
+
+#ifndef nh_16
+#define nh_16(mp, kp, nw, rh, rl) \
+ do { \
+ u64 t1, t2, m1, m2, t; \
+ int i; \
+ rh = rl = t = 0; \
+ for (i = 0; i < nw; i += 2) { \
+ t1 = le64_to_cpup(mp+i) + kp[i]; \
+ t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \
+ m2 = MUL32(t1 >> 32, t2); \
+ m1 = MUL32(t1, t2 >> 32); \
+ ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
+ MUL32(t1, t2)); \
+ rh += (u64)(u32)(m1 >> 32) \
+ + (u32)(m2 >> 32); \
+ t += (u64)(u32)m1 + (u32)m2; \
+ } \
+ ADD128(rh, rl, (t >> 32), (t << 32)); \
+ } while (0)
+#endif
+
+static void poly_step_func(u64 *ahi, u64 *alo,
+ const u64 *kh, const u64 *kl,
+ const u64 *mh, const u64 *ml)
+{
+#define a0 (*(((u32 *)alo)+INDEX_LOW))
+#define a1 (*(((u32 *)alo)+INDEX_HIGH))
+#define a2 (*(((u32 *)ahi)+INDEX_LOW))
+#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
+#define k0 (*(((u32 *)kl)+INDEX_LOW))
+#define k1 (*(((u32 *)kl)+INDEX_HIGH))
+#define k2 (*(((u32 *)kh)+INDEX_LOW))
+#define k3 (*(((u32 *)kh)+INDEX_HIGH))
+
+ u64 p, q, t;
+ u32 t2;
+
+ p = MUL32(a3, k3);
+ p += p;
+ p += *(u64 *)mh;
+ p += MUL32(a0, k2);
+ p += MUL32(a1, k1);
+ p += MUL32(a2, k0);
+ t = (u32)(p);
+ p >>= 32;
+ p += MUL32(a0, k3);
+ p += MUL32(a1, k2);
+ p += MUL32(a2, k1);
+ p += MUL32(a3, k0);
+ t |= ((u64)((u32)p & 0x7fffffff)) << 32;
+ p >>= 31;
+ p += (u64)(((u32 *)ml)[INDEX_LOW]);
+ p += MUL32(a0, k0);
+ q = MUL32(a1, k3);
+ q += MUL32(a2, k2);
+ q += MUL32(a3, k1);
+ q += q;
+ p += q;
+ t2 = (u32)(p);
+ p >>= 32;
+ p += (u64)(((u32 *)ml)[INDEX_HIGH]);
+ p += MUL32(a0, k1);
+ p += MUL32(a1, k0);
+ q = MUL32(a2, k3);
+ q += MUL32(a3, k2);
+ q += q;
+ p += q;
+ *(u64 *)(alo) = (p << 32) | t2;
+ p >>= 32;
+ *(u64 *)(ahi) = p + t;
+
+#undef a0
+#undef a1
+#undef a2
+#undef a3
+#undef k0
+#undef k1
+#undef k2
+#undef k3
+}
+
+#define poly_step(ah, al, kh, kl, mh, ml) \
+ poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
+
+#endif /* end of specialized NH and poly definitions */
+
+/* At least nh_16 is defined. Defined others as needed here */
+#ifndef nh_16_2
+#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
+ do { \
+ nh_16(mp, kp, nw, rh, rl); \
+ nh_16(mp, ((kp)+2), nw, rh2, rl2); \
+ } while (0)
+#endif
+#ifndef nh_vmac_nhbytes
+#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
+ nh_16(mp, kp, nw, rh, rl)
+#endif
+#ifndef nh_vmac_nhbytes_2
+#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
+ do { \
+ nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
+ nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
+ } while (0)
+#endif
+
+static void vhash_abort(struct vmac_ctx *ctx)
+{
+ ctx->polytmp[0] = ctx->polykey[0] ;
+ ctx->polytmp[1] = ctx->polykey[1] ;
+ ctx->first_block_processed = 0;
+}
+
+static u64 l3hash(u64 p1, u64 p2,
+ u64 k1, u64 k2, u64 len)
+{
+ u64 rh, rl, t, z = 0;
+
+ /* fully reduce (p1,p2)+(len,0) mod p127 */
+ t = p1 >> 63;
+ p1 &= m63;
+ ADD128(p1, p2, len, t);
+ /* At this point, (p1,p2) is at most 2^127+(len<<64) */
+ t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
+ ADD128(p1, p2, z, t);
+ p1 &= m63;
+
+ /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
+ t = p1 + (p2 >> 32);
+ t += (t >> 32);
+ t += (u32)t > 0xfffffffeu;
+ p1 += (t >> 32);
+ p2 += (p1 << 32);
+
+ /* compute (p1+k1)%p64 and (p2+k2)%p64 */
+ p1 += k1;
+ p1 += (0 - (p1 < k1)) & 257;
+ p2 += k2;
+ p2 += (0 - (p2 < k2)) & 257;
+
+ /* compute (p1+k1)*(p2+k2)%p64 */
+ MUL64(rh, rl, p1, p2);
+ t = rh >> 56;
+ ADD128(t, rl, z, rh);
+ rh <<= 8;
+ ADD128(t, rl, z, rh);
+ t += t << 8;
+ rl += t;
+ rl += (0 - (rl < t)) & 257;
+ rl += (0 - (rl > p64-1)) & 257;
+ return rl;
+}
+
+static void vhash_update(const unsigned char *m,
+ unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
+ struct vmac_ctx *ctx)
+{
+ u64 rh, rl, *mptr;
+ const u64 *kptr = (u64 *)ctx->nhkey;
+ int i;
+ u64 ch, cl;
+ u64 pkh = ctx->polykey[0];
+ u64 pkl = ctx->polykey[1];
+
+ mptr = (u64 *)m;
+ i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
+
+ ch = ctx->polytmp[0];
+ cl = ctx->polytmp[1];
+
+ if (!ctx->first_block_processed) {
+ ctx->first_block_processed = 1;
+ nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+ rh &= m62;
+ ADD128(ch, cl, rh, rl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ i--;
+ }
+
+ while (i--) {
+ nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+ rh &= m62;
+ poly_step(ch, cl, pkh, pkl, rh, rl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ }
+
+ ctx->polytmp[0] = ch;
+ ctx->polytmp[1] = cl;
+}
+
+static u64 vhash(unsigned char m[], unsigned int mbytes,
+ u64 *tagl, struct vmac_ctx *ctx)
+{
+ u64 rh, rl, *mptr;
+ const u64 *kptr = (u64 *)ctx->nhkey;
+ int i, remaining;
+ u64 ch, cl;
+ u64 pkh = ctx->polykey[0];
+ u64 pkl = ctx->polykey[1];
+
+ mptr = (u64 *)m;
+ i = mbytes / VMAC_NHBYTES;
+ remaining = mbytes % VMAC_NHBYTES;
+
+ if (ctx->first_block_processed) {
+ ch = ctx->polytmp[0];
+ cl = ctx->polytmp[1];
+ } else if (i) {
+ nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
+ ch &= m62;
+ ADD128(ch, cl, pkh, pkl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ i--;
+ } else if (remaining) {
+ nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
+ ch &= m62;
+ ADD128(ch, cl, pkh, pkl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ goto do_l3;
+ } else {/* Empty String */
+ ch = pkh; cl = pkl;
+ goto do_l3;
+ }
+
+ while (i--) {
+ nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
+ rh &= m62;
+ poly_step(ch, cl, pkh, pkl, rh, rl);
+ mptr += (VMAC_NHBYTES/sizeof(u64));
+ }
+ if (remaining) {
+ nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
+ rh &= m62;
+ poly_step(ch, cl, pkh, pkl, rh, rl);
+ }
+
+do_l3:
+ vhash_abort(ctx);
+ remaining *= 8;
+ return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
+}
+
+static u64 vmac(unsigned char m[], unsigned int mbytes,
+ unsigned char n[16], u64 *tagl,
+ struct vmac_ctx_t *ctx)
+{
+ u64 *in_n, *out_p;
+ u64 p, h;
+ int i;
+
+ in_n = ctx->__vmac_ctx.cached_nonce;
+ out_p = ctx->__vmac_ctx.cached_aes;
+
+ i = n[15] & 1;
+ if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
+ in_n[0] = *(u64 *)(n);
+ in_n[1] = *(u64 *)(n+8);
+ ((unsigned char *)in_n)[15] &= 0xFE;
+ crypto_cipher_encrypt_one(ctx->child,
+ (unsigned char *)out_p, (unsigned char *)in_n);
+
+ ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
+ }
+ p = be64_to_cpup(out_p + i);
+ h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
+ return p + h;
+}
+
+static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
+{
+ u64 in[2] = {0}, out[2];
+ unsigned i;
+ int err = 0;
+
+ err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
+ if (err)
+ return err;
+
+ /* Fill nh key */
+ ((unsigned char *)in)[0] = 0x80;
+ for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
+ crypto_cipher_encrypt_one(ctx->child,
+ (unsigned char *)out, (unsigned char *)in);
+ ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
+ ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
+ ((unsigned char *)in)[15] += 1;
+ }
+
+ /* Fill poly key */
+ ((unsigned char *)in)[0] = 0xC0;
+ in[1] = 0;
+ for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
+ crypto_cipher_encrypt_one(ctx->child,
+ (unsigned char *)out, (unsigned char *)in);
+ ctx->__vmac_ctx.polytmp[i] =
+ ctx->__vmac_ctx.polykey[i] =
+ be64_to_cpup(out) & mpoly;
+ ctx->__vmac_ctx.polytmp[i+1] =
+ ctx->__vmac_ctx.polykey[i+1] =
+ be64_to_cpup(out+1) & mpoly;
+ ((unsigned char *)in)[15] += 1;
+ }
+
+ /* Fill ip key */
+ ((unsigned char *)in)[0] = 0xE0;
+ in[1] = 0;
+ for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
+ do {
+ crypto_cipher_encrypt_one(ctx->child,
+ (unsigned char *)out, (unsigned char *)in);
+ ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
+ ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
+ ((unsigned char *)in)[15] += 1;
+ } while (ctx->__vmac_ctx.l3key[i] >= p64
+ || ctx->__vmac_ctx.l3key[i+1] >= p64);
+ }
+
+ /* Invalidate nonce/aes cache and reset other elements */
+ ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
+ ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
+ ctx->__vmac_ctx.first_block_processed = 0;
+
+ return err;
+}
+
+static int vmac_setkey(struct crypto_shash *parent,
+ const u8 *key, unsigned int keylen)
+{
+ struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+
+ if (keylen != VMAC_KEY_LEN) {
+ crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ return vmac_set_key((u8 *)key, ctx);
+}
+
+static int vmac_init(struct shash_desc *pdesc)
+{
+ struct crypto_shash *parent = pdesc->tfm;
+ struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+
+ memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
+ return 0;
+}
+
+static int vmac_update(struct shash_desc *pdesc, const u8 *p,
+ unsigned int len)
+{
+ struct crypto_shash *parent = pdesc->tfm;
+ struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+
+ vhash_update(p, len, &ctx->__vmac_ctx);
+
+ return 0;
+}
+
+static int vmac_final(struct shash_desc *pdesc, u8 *out)
+{
+ struct crypto_shash *parent = pdesc->tfm;
+ struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+ vmac_t mac;
+ u8 nonce[16] = {};
+
+ mac = vmac(NULL, 0, nonce, NULL, ctx);
+ memcpy(out, &mac, sizeof(vmac_t));
+ memset(&mac, 0, sizeof(vmac_t));
+ memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
+ return 0;
+}
+
+static int vmac_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_cipher *cipher;
+ struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+ struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
+ return 0;
+}
+
+static void vmac_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+ crypto_free_cipher(ctx->child);
+}
+
+static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct shash_instance *inst;
+ struct crypto_alg *alg;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
+ if (err)
+ return err;
+
+ alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
+ CRYPTO_ALG_TYPE_MASK);
+ if (IS_ERR(alg))
+ return PTR_ERR(alg);
+
+ inst = shash_alloc_instance("vmac", alg);
+ err = PTR_ERR(inst);
+ if (IS_ERR(inst))
+ goto out_put_alg;
+
+ err = crypto_init_spawn(shash_instance_ctx(inst), alg,
+ shash_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ if (err)
+ goto out_free_inst;
+
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
+
+ inst->alg.digestsize = sizeof(vmac_t);
+ inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
+ inst->alg.base.cra_init = vmac_init_tfm;
+ inst->alg.base.cra_exit = vmac_exit_tfm;
+
+ inst->alg.init = vmac_init;
+ inst->alg.update = vmac_update;
+ inst->alg.final = vmac_final;
+ inst->alg.setkey = vmac_setkey;
+
+ err = shash_register_instance(tmpl, inst);
+ if (err) {
+out_free_inst:
+ shash_free_instance(shash_crypto_instance(inst));
+ }
+
+out_put_alg:
+ crypto_mod_put(alg);
+ return err;
+}
+
+static struct crypto_template vmac_tmpl = {
+ .name = "vmac",
+ .create = vmac_create,
+ .free = shash_free_instance,
+ .module = THIS_MODULE,
+};
+
+static int __init vmac_module_init(void)
+{
+ return crypto_register_template(&vmac_tmpl);
+}
+
+static void __exit vmac_module_exit(void)
+{
+ crypto_unregister_template(&vmac_tmpl);
+}
+
+module_init(vmac_module_init);
+module_exit(vmac_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VMAC hash algorithm");
+
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index b63b633e549c..bb7b67fba349 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -19,211 +19,142 @@
* Kazunori Miyazawa <miyazawa@linux-ipv6.org>
*/
-#include <crypto/scatterwalk.h>
-#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
#include <linux/err.h>
-#include <linux/hardirq.h>
#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/rtnetlink.h>
-#include <linux/slab.h>
-#include <linux/scatterlist.h>
static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
0x02020202, 0x02020202, 0x02020202, 0x02020202,
0x03030303, 0x03030303, 0x03030303, 0x03030303};
+
/*
* +------------------------
* | <parent tfm>
* +------------------------
- * | crypto_xcbc_ctx
+ * | xcbc_tfm_ctx
* +------------------------
- * | odds (block size)
+ * | consts (block size * 2)
* +------------------------
- * | prev (block size)
+ */
+struct xcbc_tfm_ctx {
+ struct crypto_cipher *child;
+ u8 ctx[];
+};
+
+/*
* +------------------------
- * | key (block size)
+ * | <shash desc>
* +------------------------
- * | consts (block size * 3)
+ * | xcbc_desc_ctx
+ * +------------------------
+ * | odds (block size)
+ * +------------------------
+ * | prev (block size)
* +------------------------
*/
-struct crypto_xcbc_ctx {
- struct crypto_cipher *child;
- u8 *odds;
- u8 *prev;
- u8 *key;
- u8 *consts;
- void (*xor)(u8 *a, const u8 *b, unsigned int bs);
- unsigned int keylen;
+struct xcbc_desc_ctx {
unsigned int len;
+ u8 ctx[];
};
-static void xor_128(u8 *a, const u8 *b, unsigned int bs)
-{
- ((u32 *)a)[0] ^= ((u32 *)b)[0];
- ((u32 *)a)[1] ^= ((u32 *)b)[1];
- ((u32 *)a)[2] ^= ((u32 *)b)[2];
- ((u32 *)a)[3] ^= ((u32 *)b)[3];
-}
-
-static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent,
- struct crypto_xcbc_ctx *ctx)
+static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
+ const u8 *inkey, unsigned int keylen)
{
- int bs = crypto_hash_blocksize(parent);
+ unsigned long alignmask = crypto_shash_alignmask(parent);
+ struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent);
+ int bs = crypto_shash_blocksize(parent);
+ u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
int err = 0;
u8 key1[bs];
- if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen)))
- return err;
+ if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen)))
+ return err;
- crypto_cipher_encrypt_one(ctx->child, key1, ctx->consts);
+ crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs);
+ crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2);
+ crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks);
return crypto_cipher_setkey(ctx->child, key1, bs);
-}
-
-static int crypto_xcbc_digest_setkey(struct crypto_hash *parent,
- const u8 *inkey, unsigned int keylen)
-{
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
-
- if (keylen != crypto_cipher_blocksize(ctx->child))
- return -EINVAL;
- ctx->keylen = keylen;
- memcpy(ctx->key, inkey, keylen);
- ctx->consts = (u8*)ks;
-
- return _crypto_xcbc_digest_setkey(parent, ctx);
}
-static int crypto_xcbc_digest_init(struct hash_desc *pdesc)
+static int crypto_xcbc_digest_init(struct shash_desc *pdesc)
{
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(pdesc->tfm);
- int bs = crypto_hash_blocksize(pdesc->tfm);
+ unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm);
+ struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
+ int bs = crypto_shash_blocksize(pdesc->tfm);
+ u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs;
ctx->len = 0;
- memset(ctx->odds, 0, bs);
- memset(ctx->prev, 0, bs);
+ memset(prev, 0, bs);
return 0;
}
-static int crypto_xcbc_digest_update2(struct hash_desc *pdesc,
- struct scatterlist *sg,
- unsigned int nbytes)
+static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p,
+ unsigned int len)
{
- struct crypto_hash *parent = pdesc->tfm;
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
- struct crypto_cipher *tfm = ctx->child;
- int bs = crypto_hash_blocksize(parent);
-
- for (;;) {
- struct page *pg = sg_page(sg);
- unsigned int offset = sg->offset;
- unsigned int slen = sg->length;
-
- if (unlikely(slen > nbytes))
- slen = nbytes;
-
- nbytes -= slen;
-
- while (slen > 0) {
- unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
- char *p = crypto_kmap(pg, 0) + offset;
-
- /* checking the data can fill the block */
- if ((ctx->len + len) <= bs) {
- memcpy(ctx->odds + ctx->len, p, len);
- ctx->len += len;
- slen -= len;
-
- /* checking the rest of the page */
- if (len + offset >= PAGE_SIZE) {
- offset = 0;
- pg++;
- } else
- offset += len;
-
- crypto_kunmap(p, 0);
- crypto_yield(pdesc->flags);
- continue;
- }
-
- /* filling odds with new data and encrypting it */
- memcpy(ctx->odds + ctx->len, p, bs - ctx->len);
- len -= bs - ctx->len;
- p += bs - ctx->len;
-
- ctx->xor(ctx->prev, ctx->odds, bs);
- crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev);
-
- /* clearing the length */
- ctx->len = 0;
-
- /* encrypting the rest of data */
- while (len > bs) {
- ctx->xor(ctx->prev, p, bs);
- crypto_cipher_encrypt_one(tfm, ctx->prev,
- ctx->prev);
- p += bs;
- len -= bs;
- }
-
- /* keeping the surplus of blocksize */
- if (len) {
- memcpy(ctx->odds, p, len);
- ctx->len = len;
- }
- crypto_kunmap(p, 0);
- crypto_yield(pdesc->flags);
- slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
- offset = 0;
- pg++;
- }
-
- if (!nbytes)
- break;
- sg = scatterwalk_sg_next(sg);
+ struct crypto_shash *parent = pdesc->tfm;
+ unsigned long alignmask = crypto_shash_alignmask(parent);
+ struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
+ struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
+ struct crypto_cipher *tfm = tctx->child;
+ int bs = crypto_shash_blocksize(parent);
+ u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
+ u8 *prev = odds + bs;
+
+ /* checking the data can fill the block */
+ if ((ctx->len + len) <= bs) {
+ memcpy(odds + ctx->len, p, len);
+ ctx->len += len;
+ return 0;
}
- return 0;
-}
+ /* filling odds with new data and encrypting it */
+ memcpy(odds + ctx->len, p, bs - ctx->len);
+ len -= bs - ctx->len;
+ p += bs - ctx->len;
-static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
- struct scatterlist *sg,
- unsigned int nbytes)
-{
- if (WARN_ON_ONCE(in_irq()))
- return -EDEADLK;
- return crypto_xcbc_digest_update2(pdesc, sg, nbytes);
-}
+ crypto_xor(prev, odds, bs);
+ crypto_cipher_encrypt_one(tfm, prev, prev);
-static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
-{
- struct crypto_hash *parent = pdesc->tfm;
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
- struct crypto_cipher *tfm = ctx->child;
- int bs = crypto_hash_blocksize(parent);
- int err = 0;
-
- if (ctx->len == bs) {
- u8 key2[bs];
+ /* clearing the length */
+ ctx->len = 0;
- if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
- return err;
+ /* encrypting the rest of data */
+ while (len > bs) {
+ crypto_xor(prev, p, bs);
+ crypto_cipher_encrypt_one(tfm, prev, prev);
+ p += bs;
+ len -= bs;
+ }
- crypto_cipher_encrypt_one(tfm, key2,
- (u8 *)(ctx->consts + bs));
+ /* keeping the surplus of blocksize */
+ if (len) {
+ memcpy(odds, p, len);
+ ctx->len = len;
+ }
- ctx->xor(ctx->prev, ctx->odds, bs);
- ctx->xor(ctx->prev, key2, bs);
- _crypto_xcbc_digest_setkey(parent, ctx);
+ return 0;
+}
- crypto_cipher_encrypt_one(tfm, out, ctx->prev);
- } else {
- u8 key3[bs];
+static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out)
+{
+ struct crypto_shash *parent = pdesc->tfm;
+ unsigned long alignmask = crypto_shash_alignmask(parent);
+ struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
+ struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
+ struct crypto_cipher *tfm = tctx->child;
+ int bs = crypto_shash_blocksize(parent);
+ u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1);
+ u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
+ u8 *prev = odds + bs;
+ unsigned int offset = 0;
+
+ if (ctx->len != bs) {
unsigned int rlen;
- u8 *p = ctx->odds + ctx->len;
+ u8 *p = odds + ctx->len;
+
*p = 0x80;
p++;
@@ -231,32 +162,15 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
if (rlen)
memset(p, 0, rlen);
- if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
- return err;
-
- crypto_cipher_encrypt_one(tfm, key3,
- (u8 *)(ctx->consts + bs * 2));
-
- ctx->xor(ctx->prev, ctx->odds, bs);
- ctx->xor(ctx->prev, key3, bs);
-
- _crypto_xcbc_digest_setkey(parent, ctx);
-
- crypto_cipher_encrypt_one(tfm, out, ctx->prev);
+ offset += bs;
}
- return 0;
-}
+ crypto_xor(prev, odds, bs);
+ crypto_xor(prev, consts + offset, bs);
-static int crypto_xcbc_digest(struct hash_desc *pdesc,
- struct scatterlist *sg, unsigned int nbytes, u8 *out)
-{
- if (WARN_ON_ONCE(in_irq()))
- return -EDEADLK;
+ crypto_cipher_encrypt_one(tfm, out, prev);
- crypto_xcbc_digest_init(pdesc);
- crypto_xcbc_digest_update2(pdesc, sg, nbytes);
- return crypto_xcbc_digest_final(pdesc, out);
+ return 0;
}
static int xcbc_init_tfm(struct crypto_tfm *tfm)
@@ -264,95 +178,95 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm)
struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
- int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm));
+ struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- switch(bs) {
- case 16:
- ctx->xor = xor_128;
- break;
- default:
- return -EINVAL;
- }
-
ctx->child = cipher;
- ctx->odds = (u8*)(ctx+1);
- ctx->prev = ctx->odds + bs;
- ctx->key = ctx->prev + bs;
return 0;
};
static void xcbc_exit_tfm(struct crypto_tfm *tfm)
{
- struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
+ struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
}
-static struct crypto_instance *xcbc_alloc(struct rtattr **tb)
+static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_instance *inst;
+ struct shash_instance *inst;
struct crypto_alg *alg;
+ unsigned long alignmask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
if (err)
- return ERR_PTR(err);
+ return err;
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_CAST(alg);
+ return PTR_ERR(alg);
switch(alg->cra_blocksize) {
case 16:
break;
default:
- inst = ERR_PTR(-EINVAL);
goto out_put_alg;
}
- inst = crypto_alloc_instance("xcbc", alg);
+ inst = shash_alloc_instance("xcbc", alg);
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_hash_type;
-
- inst->alg.cra_hash.digestsize = alg->cra_blocksize;
- inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) +
- ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *));
- inst->alg.cra_init = xcbc_init_tfm;
- inst->alg.cra_exit = xcbc_exit_tfm;
-
- inst->alg.cra_hash.init = crypto_xcbc_digest_init;
- inst->alg.cra_hash.update = crypto_xcbc_digest_update;
- inst->alg.cra_hash.final = crypto_xcbc_digest_final;
- inst->alg.cra_hash.digest = crypto_xcbc_digest;
- inst->alg.cra_hash.setkey = crypto_xcbc_digest_setkey;
+ err = crypto_init_spawn(shash_instance_ctx(inst), alg,
+ shash_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ if (err)
+ goto out_free_inst;
+
+ alignmask = alg->cra_alignmask | 3;
+ inst->alg.base.cra_alignmask = alignmask;
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+
+ inst->alg.digestsize = alg->cra_blocksize;
+ inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx),
+ crypto_tfm_ctx_alignment()) +
+ (alignmask &
+ ~(crypto_tfm_ctx_alignment() - 1)) +
+ alg->cra_blocksize * 2;
+
+ inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx),
+ alignmask + 1) +
+ alg->cra_blocksize * 2;
+ inst->alg.base.cra_init = xcbc_init_tfm;
+ inst->alg.base.cra_exit = xcbc_exit_tfm;
+
+ inst->alg.init = crypto_xcbc_digest_init;
+ inst->alg.update = crypto_xcbc_digest_update;
+ inst->alg.final = crypto_xcbc_digest_final;
+ inst->alg.setkey = crypto_xcbc_digest_setkey;
+
+ err = shash_register_instance(tmpl, inst);
+ if (err) {
+out_free_inst:
+ shash_free_instance(shash_crypto_instance(inst));
+ }
out_put_alg:
crypto_mod_put(alg);
- return inst;
-}
-
-static void xcbc_free(struct crypto_instance *inst)
-{
- crypto_drop_spawn(crypto_instance_ctx(inst));
- kfree(inst);
+ return err;
}
static struct crypto_template crypto_xcbc_tmpl = {
.name = "xcbc",
- .alloc = xcbc_alloc,
- .free = xcbc_free,
+ .create = xcbc_create,
+ .free = shash_free_instance,
.module = THIS_MODULE,
};
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index f6baa77deefb..0c4ca4d318b3 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -78,9 +78,10 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
static int __init blacklist_by_year(void)
{
- int year = dmi_get_year(DMI_BIOS_DATE);
+ int year;
+
/* Doesn't exist? Likely an old system */
- if (year == -1) {
+ if (!dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL)) {
printk(KERN_ERR PREFIX "no DMI BIOS year, "
"acpi=force is required to enable ACPI\n" );
return 1;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index b17c57f85032..ab2fa4eeb364 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -26,6 +26,17 @@ config ATA_NONSTANDARD
bool
default n
+config ATA_VERBOSE_ERROR
+ bool "Verbose ATA error reporting"
+ default y
+ help
+ This option adds parsing of ATA command descriptions and error bits
+ in libata kernel output, making it easier to interpret.
+ This option will enlarge the kernel by approx. 6KB. Disable it only
+ if kernel size is more important than ease of debugging.
+
+ If unsure, say Y.
+
config ATA_ACPI
bool "ATA ACPI Support"
depends on ACPI && PCI
@@ -586,6 +597,16 @@ config PATA_RB532
If unsure, say N.
+config PATA_RDC
+ tristate "RDC PATA support"
+ depends on PCI
+ help
+ This option enables basic support for the later RDC PATA controllers
+ controllers via the new ATA layer. For the RDC 1010, you need to
+ enable the IT821X driver instead.
+
+ If unsure, say N.
+
config PATA_RZ1000
tristate "PC Tech RZ1000 PATA support"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 38906f9bbb4e..463eb52236aa 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
obj-$(CONFIG_PATA_QDI) += pata_qdi.o
obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
+obj-$(CONFIG_PATA_RDC) += pata_rdc.o
obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index fe3eba5d6b3e..d4cd9c203314 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -329,10 +329,24 @@ static ssize_t ahci_activity_store(struct ata_device *dev,
enum sw_activity val);
static void ahci_init_sw_activity(struct ata_link *link);
+static ssize_t ahci_show_host_caps(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_host_version(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_port_cmd(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
+DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
+DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
+
static struct device_attribute *ahci_shost_attrs[] = {
&dev_attr_link_power_management_policy,
&dev_attr_em_message_type,
&dev_attr_em_message,
+ &dev_attr_ahci_host_caps,
+ &dev_attr_ahci_host_version,
+ &dev_attr_ahci_port_cmd,
NULL
};
@@ -539,6 +553,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
+ /* AMD */
+ { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD SB900 */
+ /* AMD is using RAID class only for ahci controllers */
+ { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
+
/* VIA */
{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
{ PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
@@ -702,6 +722,36 @@ static void ahci_enable_ahci(void __iomem *mmio)
WARN_ON(1);
}
+static ssize_t ahci_show_host_caps(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ return sprintf(buf, "%x\n", hpriv->cap);
+}
+
+static ssize_t ahci_show_host_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
+
+ return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
+}
+
+static ssize_t ahci_show_port_cmd(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ void __iomem *port_mmio = ahci_port_base(ap);
+
+ return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
+}
+
/**
* ahci_save_initial_config - Save and fixup initial config values
* @pdev: target PCI device
@@ -1584,7 +1634,7 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
}
-static int ahci_kick_engine(struct ata_port *ap, int force_restart)
+static int ahci_kick_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_host_priv *hpriv = ap->host->private_data;
@@ -1592,18 +1642,16 @@ static int ahci_kick_engine(struct ata_port *ap, int force_restart)
u32 tmp;
int busy, rc;
- /* do we need to kick the port? */
- busy = status & (ATA_BUSY | ATA_DRQ);
- if (!busy && !force_restart)
- return 0;
-
/* stop engine */
rc = ahci_stop_engine(ap);
if (rc)
goto out_restart;
- /* need to do CLO? */
- if (!busy) {
+ /* need to do CLO?
+ * always do CLO if PMP is attached (AHCI-1.3 9.2)
+ */
+ busy = status & (ATA_BUSY | ATA_DRQ);
+ if (!busy && !sata_pmp_attached(ap)) {
rc = 0;
goto out_restart;
}
@@ -1651,7 +1699,7 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1, timeout_msec);
if (tmp & 0x1) {
- ahci_kick_engine(ap, 1);
+ ahci_kick_engine(ap);
return -EBUSY;
}
} else
@@ -1674,7 +1722,7 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
DPRINTK("ENTER\n");
/* prepare for SRST (AHCI-1.1 10.4.1) */
- rc = ahci_kick_engine(ap, 1);
+ rc = ahci_kick_engine(ap);
if (rc && rc != -EOPNOTSUPP)
ata_link_printk(link, KERN_WARNING,
"failed to reset engine (errno=%d)\n", rc);
@@ -1890,7 +1938,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
ahci_check_ready);
if (rc)
- ahci_kick_engine(ap, 0);
+ ahci_kick_engine(ap);
}
return rc;
}
@@ -2271,7 +2319,7 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
/* make DMA engine forget about the failed command */
if (qc->flags & ATA_QCFLAG_FAILED)
- ahci_kick_engine(ap, 1);
+ ahci_kick_engine(ap);
}
static void ahci_pmp_attach(struct ata_port *ap)
@@ -2603,14 +2651,18 @@ static void ahci_p5wdh_workaround(struct ata_host *host)
}
/*
- * SB600 ahci controller on ASUS M2A-VM can't do 64bit DMA with older
- * BIOS. The oldest version known to be broken is 0901 and working is
- * 1501 which was released on 2007-10-26. Force 32bit DMA on anything
- * older than 1501. Please read bko#9412 for more info.
+ * SB600 ahci controller on certain boards can't do 64bit DMA with
+ * older BIOS.
*/
-static bool ahci_asus_m2a_vm_32bit_only(struct pci_dev *pdev)
+static bool ahci_sb600_32bit_only(struct pci_dev *pdev)
{
static const struct dmi_system_id sysids[] = {
+ /*
+ * The oldest version known to be broken is 0901 and
+ * working is 1501 which was released on 2007-10-26.
+ * Force 32bit DMA on anything older than 1501.
+ * Please read bko#9412 for more info.
+ */
{
.ident = "ASUS M2A-VM",
.matches = {
@@ -2618,31 +2670,48 @@ static bool ahci_asus_m2a_vm_32bit_only(struct pci_dev *pdev)
"ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
},
+ .driver_data = "20071026", /* yyyymmdd */
+ },
+ /*
+ * It's yet unknown whether more recent BIOS fixes the
+ * problem. Blacklist the whole board for the time
+ * being. Please read the following thread for more
+ * info.
+ *
+ * http://thread.gmane.org/gmane.linux.ide/42326
+ */
+ {
+ .ident = "Gigabyte GA-MA69VM-S2",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR,
+ "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "GA-MA69VM-S2"),
+ },
},
{ }
};
- const char *cutoff_mmdd = "10/26";
- const char *date;
- int year;
+ const struct dmi_system_id *match;
+ match = dmi_first_match(sysids);
if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
- !dmi_check_system(sysids))
+ !match)
return false;
- /*
- * Argh.... both version and date are free form strings.
- * Let's hope they're using the same date format across
- * different versions.
- */
- date = dmi_get_system_info(DMI_BIOS_DATE);
- year = dmi_get_year(DMI_BIOS_DATE);
- if (date && strlen(date) >= 10 && date[2] == '/' && date[5] == '/' &&
- (year > 2007 ||
- (year == 2007 && strncmp(date, cutoff_mmdd, 5) >= 0)))
- return false;
+ if (match->driver_data) {
+ int year, month, date;
+ char buf[9];
+
+ dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+ snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
- dev_printk(KERN_WARNING, &pdev->dev, "ASUS M2A-VM: BIOS too old, "
- "forcing 32bit DMA, update BIOS\n");
+ if (strcmp(buf, match->driver_data) >= 0)
+ return false;
+
+ dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
+ "forcing 32bit DMA, update BIOS\n", match->ident);
+ } else
+ dev_printk(KERN_WARNING, &pdev->dev, "%s: this board can't "
+ "do 64bit DMA, forcing 32bit\n", match->ident);
return true;
}
@@ -2857,8 +2926,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
- /* apply ASUS M2A_VM quirk */
- if (ahci_asus_m2a_vm_32bit_only(pdev))
+ /* apply sb600 32bit only quirk */
+ if (ahci_sb600_32bit_only(pdev))
hpriv->flags |= AHCI_HFLAG_32BIT_ONLY;
if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
@@ -2869,7 +2938,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ)
- pi.flags |= ATA_FLAG_NCQ;
+ pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index ac176da1f94e..01964b6e6f6b 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -689,6 +689,7 @@ static int ata_acpi_run_tf(struct ata_device *dev,
struct ata_taskfile tf, ptf, rtf;
unsigned int err_mask;
const char *level;
+ const char *descr;
char msg[60];
int rc;
@@ -736,11 +737,13 @@ static int ata_acpi_run_tf(struct ata_device *dev,
snprintf(msg, sizeof(msg), "filtered out");
rc = 0;
}
+ descr = ata_get_cmd_descript(tf.command);
ata_dev_printk(dev, level,
- "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x %s\n",
+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x (%s) %s\n",
tf.command, tf.feature, tf.nsect, tf.lbal,
- tf.lbam, tf.lbah, tf.device, msg);
+ tf.lbam, tf.lbah, tf.device,
+ (descr ? descr : "unknown"), msg);
return rc;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 072ba5ea138f..df31deac5c82 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -709,7 +709,13 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
head = tf->device & 0xf;
sect = tf->lbal;
- block = (cyl * dev->heads + head) * dev->sectors + sect;
+ if (!sect) {
+ ata_dev_printk(dev, KERN_WARNING, "device reported "
+ "invalid CHS sector 0\n");
+ sect = 1; /* oh well */
+ }
+
+ block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
}
return block;
@@ -2299,29 +2305,49 @@ static inline u8 ata_dev_knobble(struct ata_device *dev)
return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
}
-static void ata_dev_config_ncq(struct ata_device *dev,
+static int ata_dev_config_ncq(struct ata_device *dev,
char *desc, size_t desc_sz)
{
struct ata_port *ap = dev->link->ap;
int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
+ unsigned int err_mask;
+ char *aa_desc = "";
if (!ata_id_has_ncq(dev->id)) {
desc[0] = '\0';
- return;
+ return 0;
}
if (dev->horkage & ATA_HORKAGE_NONCQ) {
snprintf(desc, desc_sz, "NCQ (not used)");
- return;
+ return 0;
}
if (ap->flags & ATA_FLAG_NCQ) {
hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
dev->flags |= ATA_DFLAG_NCQ;
}
+ if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
+ (ap->flags & ATA_FLAG_FPDMA_AA) &&
+ ata_id_has_fpdma_aa(dev->id)) {
+ err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
+ SATA_FPDMA_AA);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_ERR, "failed to enable AA"
+ "(error_mask=0x%x)\n", err_mask);
+ if (err_mask != AC_ERR_DEV) {
+ dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
+ return -EIO;
+ }
+ } else
+ aa_desc = ", AA";
+ }
+
if (hdepth >= ddepth)
- snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
+ snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
else
- snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
+ snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
+ ddepth, aa_desc);
+ return 0;
}
/**
@@ -2461,7 +2487,7 @@ int ata_dev_configure(struct ata_device *dev)
if (ata_id_has_lba(id)) {
const char *lba_desc;
- char ncq_desc[20];
+ char ncq_desc[24];
lba_desc = "LBA";
dev->flags |= ATA_DFLAG_LBA;
@@ -2475,7 +2501,9 @@ int ata_dev_configure(struct ata_device *dev)
}
/* config NCQ */
- ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
+ rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
+ if (rc)
+ return rc;
/* print device info to dmesg */
if (ata_msg_drv(ap) && print_info) {
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 79711b64054b..a04488f0de88 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -40,6 +40,7 @@
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
#include "../scsi/scsi_transport_api.h"
#include <linux/libata.h>
@@ -999,7 +1000,9 @@ static void __ata_port_freeze(struct ata_port *ap)
* ata_port_freeze - abort & freeze port
* @ap: ATA port to freeze
*
- * Abort and freeze @ap.
+ * Abort and freeze @ap. The freeze operation must be called
+ * first, because some hardware requires special operations
+ * before the taskfile registers are accessible.
*
* LOCKING:
* spin_lock_irqsave(host lock)
@@ -1013,8 +1016,8 @@ int ata_port_freeze(struct ata_port *ap)
WARN_ON(!ap->ops->error_handler);
- nr_aborted = ata_port_abort(ap);
__ata_port_freeze(ap);
+ nr_aborted = ata_port_abort(ap);
return nr_aborted;
}
@@ -2110,6 +2113,116 @@ void ata_eh_autopsy(struct ata_port *ap)
}
/**
+ * ata_get_cmd_descript - get description for ATA command
+ * @command: ATA command code to get description for
+ *
+ * Return a textual description of the given command, or NULL if the
+ * command is not known.
+ *
+ * LOCKING:
+ * None
+ */
+const char *ata_get_cmd_descript(u8 command)
+{
+#ifdef CONFIG_ATA_VERBOSE_ERROR
+ static const struct
+ {
+ u8 command;
+ const char *text;
+ } cmd_descr[] = {
+ { ATA_CMD_DEV_RESET, "DEVICE RESET" },
+ { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
+ { ATA_CMD_STANDBY, "STANDBY" },
+ { ATA_CMD_IDLE, "IDLE" },
+ { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
+ { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
+ { ATA_CMD_NOP, "NOP" },
+ { ATA_CMD_FLUSH, "FLUSH CACHE" },
+ { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
+ { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
+ { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
+ { ATA_CMD_SERVICE, "SERVICE" },
+ { ATA_CMD_READ, "READ DMA" },
+ { ATA_CMD_READ_EXT, "READ DMA EXT" },
+ { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
+ { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
+ { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
+ { ATA_CMD_WRITE, "WRITE DMA" },
+ { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
+ { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
+ { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
+ { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
+ { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
+ { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
+ { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
+ { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
+ { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
+ { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
+ { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
+ { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
+ { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
+ { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
+ { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
+ { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
+ { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
+ { ATA_CMD_SET_FEATURES, "SET FEATURES" },
+ { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
+ { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
+ { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
+ { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
+ { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
+ { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
+ { ATA_CMD_SLEEP, "SLEEP" },
+ { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
+ { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
+ { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
+ { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
+ { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
+ { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
+ { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
+ { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
+ { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
+ { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
+ { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
+ { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
+ { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
+ { ATA_CMD_PMP_READ, "READ BUFFER" },
+ { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
+ { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
+ { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
+ { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
+ { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
+ { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
+ { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
+ { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
+ { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
+ { ATA_CMD_SMART, "SMART" },
+ { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
+ { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
+ { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
+ { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
+ { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
+ { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
+ { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
+ { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
+ { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
+ { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
+ { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
+ { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
+ { ATA_CMD_RESTORE, "RECALIBRATE" },
+ { 0, NULL } /* terminate list */
+ };
+
+ unsigned int i;
+ for (i = 0; cmd_descr[i].text; i++)
+ if (cmd_descr[i].command == command)
+ return cmd_descr[i].text;
+#endif
+
+ return NULL;
+}
+
+/**
* ata_eh_link_report - report error handling to user
* @link: ATA link EH is going on
*
@@ -2175,6 +2288,7 @@ static void ata_eh_link_report(struct ata_link *link)
ata_link_printk(link, KERN_ERR, "%s\n", desc);
}
+#ifdef CONFIG_ATA_VERBOSE_ERROR
if (ehc->i.serror)
ata_link_printk(link, KERN_ERR,
"SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
@@ -2195,6 +2309,7 @@ static void ata_eh_link_report(struct ata_link *link)
ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
+#endif
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
@@ -2226,14 +2341,23 @@ static void ata_eh_link_report(struct ata_link *link)
dma_str[qc->dma_dir]);
}
- if (ata_is_atapi(qc->tf.protocol))
- snprintf(cdb_buf, sizeof(cdb_buf),
+ if (ata_is_atapi(qc->tf.protocol)) {
+ if (qc->scsicmd)
+ scsi_print_command(qc->scsicmd);
+ else
+ snprintf(cdb_buf, sizeof(cdb_buf),
"cdb %02x %02x %02x %02x %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x %02x %02x %02x\n ",
cdb[0], cdb[1], cdb[2], cdb[3],
cdb[4], cdb[5], cdb[6], cdb[7],
cdb[8], cdb[9], cdb[10], cdb[11],
cdb[12], cdb[13], cdb[14], cdb[15]);
+ } else {
+ const char *descr = ata_get_cmd_descript(cmd->command);
+ if (descr)
+ ata_dev_printk(qc->dev, KERN_ERR,
+ "failed command: %s\n", descr);
+ }
ata_dev_printk(qc->dev, KERN_ERR,
"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
@@ -2252,6 +2376,7 @@ static void ata_eh_link_report(struct ata_link *link)
res->device, qc->err_mask, ata_err_string(qc->err_mask),
qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
+#ifdef CONFIG_ATA_VERBOSE_ERROR
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
ATA_ERR)) {
if (res->command & ATA_BUSY)
@@ -2275,6 +2400,7 @@ static void ata_eh_link_report(struct ata_link *link)
res->feature & ATA_UNC ? "UNC " : "",
res->feature & ATA_IDNF ? "IDNF " : "",
res->feature & ATA_ABORTED ? "ABRT " : "");
+#endif
}
}
@@ -2574,11 +2700,17 @@ int ata_eh_reset(struct ata_link *link, int classify,
postreset(slave, classes);
}
- /* clear cached SError */
+ /*
+ * Some controllers can't be frozen very well and may set
+ * spuruious error conditions during reset. Clear accumulated
+ * error information. As reset is the final recovery action,
+ * nothing is lost by doing this.
+ */
spin_lock_irqsave(link->ap->lock, flags);
- link->eh_info.serror = 0;
+ memset(&link->eh_info, 0, sizeof(link->eh_info));
if (slave)
- slave->eh_info.serror = 0;
+ memset(&slave->eh_info, 0, sizeof(link->eh_info));
+ ap->pflags &= ~ATA_PFLAG_EH_PENDING;
spin_unlock_irqrestore(link->ap->lock, flags);
/* Make sure onlineness and classification result correspond.
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 619f2c33950e..51f0ffb78cbd 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -221,6 +221,8 @@ static const char *sata_pmp_spec_rev_str(const u32 *gscr)
{
u32 rev = gscr[SATA_PMP_GSCR_REV];
+ if (rev & (1 << 3))
+ return "1.2";
if (rev & (1 << 2))
return "1.1";
if (rev & (1 << 1))
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d0dfeef55db5..b4ee28dec521 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1119,10 +1119,6 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
} else {
- if (ata_id_is_ssd(dev->id))
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT,
- sdev->request_queue);
-
/* ATA devices must be sector aligned */
blk_queue_update_dma_alignment(sdev->request_queue,
ATA_SECT_SIZE - 1);
@@ -1257,23 +1253,6 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
return queue_depth;
}
-/* XXX: for spindown warning */
-static void ata_delayed_done_timerfn(unsigned long arg)
-{
- struct scsi_cmnd *scmd = (void *)arg;
-
- scmd->scsi_done(scmd);
-}
-
-/* XXX: for spindown warning */
-static void ata_delayed_done(struct scsi_cmnd *scmd)
-{
- static struct timer_list timer;
-
- setup_timer(&timer, ata_delayed_done_timerfn, (unsigned long)scmd);
- mod_timer(&timer, jiffies + 5 * HZ);
-}
-
/**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
* @qc: Storage for translated ATA taskfile
@@ -1338,32 +1317,6 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
system_entering_hibernation())
goto skip;
- /* XXX: This is for backward compatibility, will be
- * removed. Read Documentation/feature-removal-schedule.txt
- * for more info.
- */
- if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) &&
- (system_state == SYSTEM_HALT ||
- system_state == SYSTEM_POWER_OFF)) {
- static unsigned long warned;
-
- if (!test_and_set_bit(0, &warned)) {
- ata_dev_printk(qc->dev, KERN_WARNING,
- "DISK MIGHT NOT BE SPUN DOWN PROPERLY. "
- "UPDATE SHUTDOWN UTILITY\n");
- ata_dev_printk(qc->dev, KERN_WARNING,
- "For more info, visit "
- "http://linux-ata.org/shutdown.html\n");
-
- /* ->scsi_done is not used, use it for
- * delayed completion.
- */
- scmd->scsi_done = qc->scsidone;
- qc->scsidone = ata_delayed_done;
- }
- goto skip;
- }
-
/* Issue ATA STANDBY IMMEDIATE command */
tf->command = ATA_CMD_STANDBYNOW1;
}
@@ -1764,14 +1717,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
}
}
- /* XXX: track spindown state for spindown skipping and warning */
- if (unlikely(qc->tf.command == ATA_CMD_STANDBY ||
- qc->tf.command == ATA_CMD_STANDBYNOW1))
- qc->dev->flags |= ATA_DFLAG_SPUNDOWN;
- else if (likely(system_state != SYSTEM_HALT &&
- system_state != SYSTEM_POWER_OFF))
- qc->dev->flags &= ~ATA_DFLAG_SPUNDOWN;
-
if (need_sense && !ap->ops->error_handler)
ata_dump_status(ap->print_id, &qc->result_tf);
@@ -2815,28 +2760,6 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
goto invalid_fld;
/*
- * Filter TPM commands by default. These provide an
- * essentially uncontrolled encrypted "back door" between
- * applications and the disk. Set libata.allow_tpm=1 if you
- * have a real reason for wanting to use them. This ensures
- * that installed software cannot easily mess stuff up without
- * user intent. DVR type users will probably ship with this enabled
- * for movie content management.
- *
- * Note that for ATA8 we can issue a DCS change and DCS freeze lock
- * for this and should do in future but that it is not sufficient as
- * DCS is an optional feature set. Thus we also do the software filter
- * so that we comply with the TC consortium stated goal that the user
- * can turn off TC features of their system.
- */
- if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
- goto invalid_fld;
-
- /* We may not issue DMA commands if no DMA mode is set */
- if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
- goto invalid_fld;
-
- /*
* 12 and 16 byte CDBs use different offsets to
* provide the various register values.
*/
@@ -2885,6 +2808,41 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
tf->device = dev->devno ?
tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
+ /* READ/WRITE LONG use a non-standard sect_size */
+ qc->sect_size = ATA_SECT_SIZE;
+ switch (tf->command) {
+ case ATA_CMD_READ_LONG:
+ case ATA_CMD_READ_LONG_ONCE:
+ case ATA_CMD_WRITE_LONG:
+ case ATA_CMD_WRITE_LONG_ONCE:
+ if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
+ goto invalid_fld;
+ qc->sect_size = scsi_bufflen(scmd);
+ }
+
+ /*
+ * Set flags so that all registers will be written, pass on
+ * write indication (used for PIO/DMA setup), result TF is
+ * copied back and we don't whine too much about its failure.
+ */
+ tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ if (scmd->sc_data_direction == DMA_TO_DEVICE)
+ tf->flags |= ATA_TFLAG_WRITE;
+
+ qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
+
+ /*
+ * Set transfer length.
+ *
+ * TODO: find out if we need to do more here to
+ * cover scatter/gather case.
+ */
+ ata_qc_set_pc_nbytes(qc);
+
+ /* We may not issue DMA commands if no DMA mode is set */
+ if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
+ goto invalid_fld;
+
/* sanity check for pio multi commands */
if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf))
goto invalid_fld;
@@ -2901,18 +2859,6 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
multi_count);
}
- /* READ/WRITE LONG use a non-standard sect_size */
- qc->sect_size = ATA_SECT_SIZE;
- switch (tf->command) {
- case ATA_CMD_READ_LONG:
- case ATA_CMD_READ_LONG_ONCE:
- case ATA_CMD_WRITE_LONG:
- case ATA_CMD_WRITE_LONG_ONCE:
- if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
- goto invalid_fld;
- qc->sect_size = scsi_bufflen(scmd);
- }
-
/*
* Filter SET_FEATURES - XFER MODE command -- otherwise,
* SET_FEATURES - XFER MODE must be preceded/succeeded
@@ -2920,30 +2866,27 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
* controller (i.e. the reason for ->set_piomode(),
* ->set_dmamode(), and ->post_set_mode() hooks).
*/
- if ((tf->command == ATA_CMD_SET_FEATURES)
- && (tf->feature == SETFEATURES_XFER))
+ if (tf->command == ATA_CMD_SET_FEATURES &&
+ tf->feature == SETFEATURES_XFER)
goto invalid_fld;
/*
- * Set flags so that all registers will be written,
- * and pass on write indication (used for PIO/DMA
- * setup.)
- */
- tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE);
-
- if (scmd->sc_data_direction == DMA_TO_DEVICE)
- tf->flags |= ATA_TFLAG_WRITE;
-
- /*
- * Set transfer length.
+ * Filter TPM commands by default. These provide an
+ * essentially uncontrolled encrypted "back door" between
+ * applications and the disk. Set libata.allow_tpm=1 if you
+ * have a real reason for wanting to use them. This ensures
+ * that installed software cannot easily mess stuff up without
+ * user intent. DVR type users will probably ship with this enabled
+ * for movie content management.
*
- * TODO: find out if we need to do more here to
- * cover scatter/gather case.
+ * Note that for ATA8 we can issue a DCS change and DCS freeze lock
+ * for this and should do in future but that it is not sufficient as
+ * DCS is an optional feature set. Thus we also do the software filter
+ * so that we comply with the TC consortium stated goal that the user
+ * can turn off TC features of their system.
*/
- ata_qc_set_pc_nbytes(qc);
-
- /* request result TF and be quiet about device error */
- qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
+ if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
+ goto invalid_fld;
return 0;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 89a1e0018e71..be8e2628f82c 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -164,6 +164,7 @@ extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
extern void ata_eh_done(struct ata_link *link, struct ata_device *dev,
unsigned int action);
extern void ata_eh_autopsy(struct ata_port *ap);
+const char *ata_get_cmd_descript(u8 command);
extern void ata_eh_report(struct ata_port *ap);
extern int ata_eh_reset(struct ata_link *link, int classify,
ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 45915566e4e9..aa4b3f6ae771 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -246,6 +246,7 @@ static const struct pci_device_id atiixp[] = {
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_SB900_IDE), },
{ },
};
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index d33aa28239a9..403f56165cec 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -202,7 +202,8 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
}
static const struct pci_device_id cs5535[] = {
- { PCI_VDEVICE(NS, 0x002D), },
+ { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_CS5535_IDE), },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5535_IDE), },
{ },
};
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index abdd19fe990a..d6f69561dc86 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -213,7 +213,7 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
* This is tI, C.F. spec. says 0, but Sony CF card requires
* more, we use 20 nS.
*/
- dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);;
+ dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);
dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
dma_tim.s.dmarq = dma_arq;
@@ -841,7 +841,7 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
ocd = pdev->dev.platform_data;
cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
- res_cs0->end - res_cs0->start + 1);
+ resource_size(res_cs0));
if (!cs0)
return -ENOMEM;
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index d8d743af3225..3f6ebc6c665a 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -151,14 +151,14 @@ int __devinit __pata_platform_probe(struct device *dev,
*/
if (mmio) {
ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start,
- io_res->end - io_res->start + 1);
+ resource_size(io_res));
ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start,
- ctl_res->end - ctl_res->start + 1);
+ resource_size(ctl_res));
} else {
ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start,
- io_res->end - io_res->start + 1);
+ resource_size(io_res));
ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start,
- ctl_res->end - ctl_res->start + 1);
+ resource_size(ctl_res));
}
if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) {
dev_err(dev, "failed to map IO/CTL base\n");
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 8e3cdef8a25f..45f1e10f917b 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -151,7 +151,7 @@ static __devinit int rb532_pata_driver_probe(struct platform_device *pdev)
info->irq = irq;
info->iobase = devm_ioremap_nocache(&pdev->dev, res->start,
- res->end - res->start + 1);
+ resource_size(res));
if (!info->iobase)
return -ENOMEM;
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
new file mode 100644
index 000000000000..c843a1e07c4f
--- /dev/null
+++ b/drivers/ata/pata_rdc.c
@@ -0,0 +1,400 @@
+/*
+ * pata_rdc - Driver for later RDC PATA controllers
+ *
+ * This is actually a driver for hardware meeting
+ * INCITS 370-2004 (1510D): ATA Host Adapter Standards
+ *
+ * Based on ata_piix.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME "pata_rdc"
+#define DRV_VERSION "0.01"
+
+struct rdc_host_priv {
+ u32 saved_iocfg;
+};
+
+/**
+ * rdc_pata_cable_detect - Probe host controller cable detect info
+ * @ap: Port for which cable detect info is desired
+ *
+ * Read 80c cable indicator from ATA PCI device's PCI config
+ * register. This register is normally set by firmware (BIOS).
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static int rdc_pata_cable_detect(struct ata_port *ap)
+{
+ struct rdc_host_priv *hpriv = ap->host->private_data;
+ u8 mask;
+
+ /* check BIOS cable detect results */
+ mask = 0x30 << (2 * ap->port_no);
+ if ((hpriv->saved_iocfg & mask) == 0)
+ return ATA_CBL_PATA40;
+ return ATA_CBL_PATA80;
+}
+
+/**
+ * rdc_pata_prereset - prereset for PATA host controller
+ * @link: Target link
+ * @deadline: deadline jiffies for the operation
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+static int rdc_pata_prereset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ static const struct pci_bits rdc_enable_bits[] = {
+ { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
+ { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
+ };
+
+ if (!pci_test_config_bits(pdev, &rdc_enable_bits[ap->port_no]))
+ return -ENOENT;
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * rdc_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: um
+ *
+ * Set PIO mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned int is_slave = (adev->devno != 0);
+ unsigned int master_port= ap->port_no ? 0x42 : 0x40;
+ unsigned int slave_port = 0x44;
+ u16 master_data;
+ u8 slave_data;
+ u8 udma_enable;
+ int control = 0;
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ if (pio >= 2)
+ control |= 1; /* TIME1 enable */
+ if (ata_pio_need_iordy(adev))
+ control |= 2; /* IE enable */
+
+ if (adev->class == ATA_DEV_ATA)
+ control |= 4; /* PPE enable */
+
+ /* PIO configuration clears DTE unconditionally. It will be
+ * programmed in set_dmamode which is guaranteed to be called
+ * after set_piomode if any DMA mode is available.
+ */
+ pci_read_config_word(dev, master_port, &master_data);
+ if (is_slave) {
+ /* clear TIME1|IE1|PPE1|DTE1 */
+ master_data &= 0xff0f;
+ /* Enable SITRE (separate slave timing register) */
+ master_data |= 0x4000;
+ /* enable PPE1, IE1 and TIME1 as needed */
+ master_data |= (control << 4);
+ pci_read_config_byte(dev, slave_port, &slave_data);
+ slave_data &= (ap->port_no ? 0x0f : 0xf0);
+ /* Load the timing nibble for this slave */
+ slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
+ << (ap->port_no ? 4 : 0);
+ } else {
+ /* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
+ master_data &= 0xccf0;
+ /* Enable PPE, IE and TIME as appropriate */
+ master_data |= control;
+ /* load ISP and RCT */
+ master_data |=
+ (timings[pio][0] << 12) |
+ (timings[pio][1] << 8);
+ }
+ pci_write_config_word(dev, master_port, master_data);
+ if (is_slave)
+ pci_write_config_byte(dev, slave_port, slave_data);
+
+ /* Ensure the UDMA bit is off - it will be turned back on if
+ UDMA is selected */
+
+ pci_read_config_byte(dev, 0x48, &udma_enable);
+ udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
+ pci_write_config_byte(dev, 0x48, udma_enable);
+}
+
+/**
+ * rdc_set_dmamode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Drive in question
+ *
+ * Set UDMA mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ u8 master_port = ap->port_no ? 0x42 : 0x40;
+ u16 master_data;
+ u8 speed = adev->dma_mode;
+ int devid = adev->devno + 2 * ap->port_no;
+ u8 udma_enable = 0;
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ pci_read_config_word(dev, master_port, &master_data);
+ pci_read_config_byte(dev, 0x48, &udma_enable);
+
+ if (speed >= XFER_UDMA_0) {
+ unsigned int udma = adev->dma_mode - XFER_UDMA_0;
+ u16 udma_timing;
+ u16 ideconf;
+ int u_clock, u_speed;
+
+ /*
+ * UDMA is handled by a combination of clock switching and
+ * selection of dividers
+ *
+ * Handy rule: Odd modes are UDMATIMx 01, even are 02
+ * except UDMA0 which is 00
+ */
+ u_speed = min(2 - (udma & 1), udma);
+ if (udma == 5)
+ u_clock = 0x1000; /* 100Mhz */
+ else if (udma > 2)
+ u_clock = 1; /* 66Mhz */
+ else
+ u_clock = 0; /* 33Mhz */
+
+ udma_enable |= (1 << devid);
+
+ /* Load the CT/RP selection */
+ pci_read_config_word(dev, 0x4A, &udma_timing);
+ udma_timing &= ~(3 << (4 * devid));
+ udma_timing |= u_speed << (4 * devid);
+ pci_write_config_word(dev, 0x4A, udma_timing);
+
+ /* Select a 33/66/100Mhz clock */
+ pci_read_config_word(dev, 0x54, &ideconf);
+ ideconf &= ~(0x1001 << devid);
+ ideconf |= u_clock << devid;
+ pci_write_config_word(dev, 0x54, ideconf);
+ } else {
+ /*
+ * MWDMA is driven by the PIO timings. We must also enable
+ * IORDY unconditionally along with TIME1. PPE has already
+ * been set when the PIO timing was set.
+ */
+ unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
+ unsigned int control;
+ u8 slave_data;
+ const unsigned int needed_pio[3] = {
+ XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+ };
+ int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+ control = 3; /* IORDY|TIME1 */
+
+ /* If the drive MWDMA is faster than it can do PIO then
+ we must force PIO into PIO0 */
+
+ if (adev->pio_mode < needed_pio[mwdma])
+ /* Enable DMA timing only */
+ control |= 8; /* PIO cycles in PIO0 */
+
+ if (adev->devno) { /* Slave */
+ master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
+ master_data |= control << 4;
+ pci_read_config_byte(dev, 0x44, &slave_data);
+ slave_data &= (ap->port_no ? 0x0f : 0xf0);
+ /* Load the matching timing */
+ slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
+ pci_write_config_byte(dev, 0x44, slave_data);
+ } else { /* Master */
+ master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
+ and master timing bits */
+ master_data |= control;
+ master_data |=
+ (timings[pio][0] << 12) |
+ (timings[pio][1] << 8);
+ }
+
+ udma_enable &= ~(1 << devid);
+ pci_write_config_word(dev, master_port, master_data);
+ }
+ pci_write_config_byte(dev, 0x48, udma_enable);
+}
+
+static struct ata_port_operations rdc_pata_ops = {
+ .inherits = &ata_bmdma32_port_ops,
+ .cable_detect = rdc_pata_cable_detect,
+ .set_piomode = rdc_set_piomode,
+ .set_dmamode = rdc_set_dmamode,
+ .prereset = rdc_pata_prereset,
+};
+
+static struct ata_port_info rdc_port_info = {
+
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &rdc_pata_ops,
+};
+
+static struct scsi_host_template rdc_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+/**
+ * rdc_init_one - Register PIIX ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in rdc_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer. We probe for combined mode (sigh),
+ * and then hand over control to libata, for it to do the rest.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int __devinit rdc_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int printed_version;
+ struct device *dev = &pdev->dev;
+ struct ata_port_info port_info[2];
+ const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
+ unsigned long port_flags;
+ struct ata_host *host;
+ struct rdc_host_priv *hpriv;
+ int rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ port_info[0] = rdc_port_info;
+ port_info[1] = rdc_port_info;
+
+ port_flags = port_info[0].flags;
+
+ /* enable device and prepare host */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+
+ /* Save IOCFG, this will be used for cable detection, quirk
+ * detection and restoration on detach.
+ */
+ pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg);
+
+ rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ if (rc)
+ return rc;
+ host->private_data = hpriv;
+
+ pci_intx(pdev, 1);
+
+ host->flags |= ATA_HOST_PARALLEL_SCAN;
+
+ pci_set_master(pdev);
+ return ata_pci_sff_activate_host(host, ata_sff_interrupt, &rdc_sht);
+}
+
+static void rdc_remove_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct rdc_host_priv *hpriv = host->private_data;
+
+ pci_write_config_dword(pdev, 0x54, hpriv->saved_iocfg);
+
+ ata_pci_remove_one(pdev);
+}
+
+static const struct pci_device_id rdc_pci_tbl[] = {
+ { PCI_DEVICE(0x17F3, 0x1011), },
+ { PCI_DEVICE(0x17F3, 0x1012), },
+ { } /* terminate list */
+};
+
+static struct pci_driver rdc_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = rdc_pci_tbl,
+ .probe = rdc_init_one,
+ .remove = rdc_remove_one,
+};
+
+
+static int __init rdc_init(void)
+{
+ return pci_register_driver(&rdc_pci_driver);
+}
+
+static void __exit rdc_exit(void)
+{
+ pci_unregister_driver(&rdc_pci_driver);
+}
+
+module_init(rdc_init);
+module_exit(rdc_exit);
+
+MODULE_AUTHOR("Alan Cox (based on ata_piix)");
+MODULE_DESCRIPTION("SCSI low-level driver for RDC PATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, rdc_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index 0c574c065c62..a5e4dfe60b41 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -85,7 +85,6 @@ static int rz1000_fifo_disable(struct pci_dev *pdev)
static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -93,8 +92,7 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en
};
const struct ata_port_info *ppi[] = { &info, NULL };
- if (!printed_version++)
- printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+ printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
if (rz1000_fifo_disable(pdev) == 0)
return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 94eaa432c40a..d344db42a002 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1257,6 +1257,7 @@ static struct scsi_host_template sata_fsl_sht = {
static struct ata_port_operations sata_fsl_ops = {
.inherits = &sata_pmp_port_ops,
+ .qc_defer = ata_std_qc_defer,
.qc_prep = sata_fsl_qc_prep,
.qc_issue = sata_fsl_qc_issue,
.qc_fill_rtf = sata_fsl_qc_fill_rtf,
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 8d890cc5a7ee..4406902b4293 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -405,7 +405,7 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance)
struct ata_host *host = dev_instance;
struct inic_host_priv *hpriv = host->private_data;
u16 host_irq_stat;
- int i, handled = 0;;
+ int i, handled = 0;
host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index c19417e02208..17f9ff9067a2 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4013,7 +4013,7 @@ static int mv_platform_probe(struct platform_device *pdev)
host->iomap = NULL;
hpriv->base = devm_ioremap(&pdev->dev, res->start,
- res->end - res->start + 1);
+ resource_size(res));
hpriv->base -= SATAHC0_REG_BASE;
/*
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 35bd5cc7f285..3cb69d5fb817 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -565,6 +565,19 @@ static void sil_freeze(struct ata_port *ap)
tmp |= SIL_MASK_IDE0_INT << ap->port_no;
writel(tmp, mmio_base + SIL_SYSCFG);
readl(mmio_base + SIL_SYSCFG); /* flush */
+
+ /* Ensure DMA_ENABLE is off.
+ *
+ * This is because the controller will not give us access to the
+ * taskfile registers while a DMA is in progress
+ */
+ iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE,
+ ap->ioaddr.bmdma_addr);
+
+ /* According to ata_bmdma_stop, an HDMA transition requires
+ * on PIO cycle. But we can't read a taskfile register.
+ */
+ ioread8(ap->ioaddr.bmdma_addr);
}
static void sil_thaw(struct ata_port *ap)
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 77aa8d7ecec4..e6946fc527d0 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -846,6 +846,17 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
if (!ata_is_atapi(qc->tf.protocol)) {
prb = &cb->ata.prb;
sge = cb->ata.sge;
+ if (ata_is_data(qc->tf.protocol)) {
+ u16 prot = 0;
+ ctrl = PRB_CTRL_PROTOCOL;
+ if (ata_is_ncq(qc->tf.protocol))
+ prot |= PRB_PROT_NCQ;
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ prot |= PRB_PROT_WRITE;
+ else
+ prot |= PRB_PROT_READ;
+ prb->prot = cpu_to_le16(prot);
+ }
} else {
prb = &cb->atapi.prb;
sge = cb->atapi.sge;
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 8f9833228619..f8a91bfd66a8 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -109,8 +109,9 @@ MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
MODULE_VERSION(DRV_VERSION);
-static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
+static unsigned int get_scr_cfg_addr(struct ata_link *link, unsigned int sc_reg)
{
+ struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int addr = SIS_SCR_BASE + (4 * sc_reg);
u8 pmr;
@@ -131,6 +132,9 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
break;
}
}
+ if (link->pmp)
+ addr += 0x10;
+
return addr;
}
@@ -138,24 +142,12 @@ static u32 sis_scr_cfg_read(struct ata_link *link,
unsigned int sc_reg, u32 *val)
{
struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
- unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
- u32 val2 = 0;
- u8 pmr;
+ unsigned int cfg_addr = get_scr_cfg_addr(link, sc_reg);
if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
return -EINVAL;
- pci_read_config_byte(pdev, SIS_PMR, &pmr);
-
pci_read_config_dword(pdev, cfg_addr, val);
-
- if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
- (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
- pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
-
- *val |= val2;
- *val &= 0xfffffffb; /* avoid problems with powerdowned ports */
-
return 0;
}
@@ -163,28 +155,16 @@ static int sis_scr_cfg_write(struct ata_link *link,
unsigned int sc_reg, u32 val)
{
struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
- unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
- u8 pmr;
-
- if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
- return -EINVAL;
-
- pci_read_config_byte(pdev, SIS_PMR, &pmr);
+ unsigned int cfg_addr = get_scr_cfg_addr(link, sc_reg);
pci_write_config_dword(pdev, cfg_addr, val);
-
- if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
- (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
- pci_write_config_dword(pdev, cfg_addr+0x10, val);
-
return 0;
}
static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
struct ata_port *ap = link->ap;
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u8 pmr;
+ void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10;
if (sc_reg > SCR_CONTROL)
return -EINVAL;
@@ -192,39 +172,23 @@ static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
if (ap->flags & SIS_FLAG_CFGSCR)
return sis_scr_cfg_read(link, sc_reg, val);
- pci_read_config_byte(pdev, SIS_PMR, &pmr);
-
- *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
-
- if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
- (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
- *val |= ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
-
- *val &= 0xfffffffb;
-
+ *val = ioread32(base + sc_reg * 4);
return 0;
}
static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
struct ata_port *ap = link->ap;
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u8 pmr;
+ void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10;
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- pci_read_config_byte(pdev, SIS_PMR, &pmr);
-
if (ap->flags & SIS_FLAG_CFGSCR)
return sis_scr_cfg_write(link, sc_reg, val);
- else {
- iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
- if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
- (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
- iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
- return 0;
- }
+
+ iowrite32(val, base + (sc_reg * 4));
+ return 0;
}
static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -236,7 +200,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 genctl, val;
u8 pmr;
u8 port2_start = 0x20;
- int rc;
+ int i, rc;
if (!printed_version++)
dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
@@ -319,6 +283,17 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
+ for (i = 0; i < 2; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ if (ap->flags & ATA_FLAG_SATA &&
+ ap->flags & ATA_FLAG_SLAVE_POSS) {
+ rc = ata_slave_link_init(ap);
+ if (rc)
+ return rc;
+ }
+ }
+
if (!(pi.flags & SIS_FLAG_CFGSCR)) {
void __iomem *mmio;
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 5e41e6dd657b..db195abad698 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -155,7 +155,7 @@ struct aoedev {
u16 fw_ver; /* version of blade's firmware */
struct work_struct work;/* disk create work struct */
struct gendisk *gd;
- struct request_queue blkq;
+ struct request_queue *blkq;
struct hd_geometry geo;
sector_t ssize;
struct timer_list timer;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 2307a271bdc9..95d344971eda 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -264,9 +264,13 @@ aoeblk_gdalloc(void *vp)
goto err_disk;
}
- blk_queue_make_request(&d->blkq, aoeblk_make_request);
- if (bdi_init(&d->blkq.backing_dev_info))
+ d->blkq = blk_alloc_queue(GFP_KERNEL);
+ if (!d->blkq)
goto err_mempool;
+ blk_queue_make_request(d->blkq, aoeblk_make_request);
+ d->blkq->backing_dev_info.name = "aoe";
+ if (bdi_init(&d->blkq->backing_dev_info))
+ goto err_blkq;
spin_lock_irqsave(&d->lock, flags);
gd->major = AOE_MAJOR;
gd->first_minor = d->sysminor * AOE_PARTITIONS;
@@ -276,7 +280,7 @@ aoeblk_gdalloc(void *vp)
snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
d->aoemajor, d->aoeminor);
- gd->queue = &d->blkq;
+ gd->queue = d->blkq;
d->gd = gd;
d->flags &= ~DEVFL_GDALLOC;
d->flags |= DEVFL_UP;
@@ -287,6 +291,9 @@ aoeblk_gdalloc(void *vp)
aoedisk_add_sysfs(d);
return;
+err_blkq:
+ blk_cleanup_queue(d->blkq);
+ d->blkq = NULL;
err_mempool:
mempool_destroy(d->bufpool);
err_disk:
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index eeea477d9601..fa67027789aa 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -113,6 +113,7 @@ aoedev_freedev(struct aoedev *d)
if (d->bufpool)
mempool_destroy(d->bufpool);
skbpoolfree(d);
+ blk_cleanup_queue(d->blkq);
kfree(d);
}
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 8c9d50db5c3a..c58557790585 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -49,6 +49,7 @@
#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040
#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042
#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044
+#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046
/* cover 915 and 945 variants */
@@ -81,7 +82,8 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB)
extern int agp_memory_reserved;
@@ -1216,6 +1218,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
case PCI_DEVICE_ID_INTEL_G41_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
+ case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
*gtt_offset = *gtt_size = MB(2);
break;
default:
@@ -2195,6 +2198,8 @@ static const struct intel_driver_description {
"IGDNG/D", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
"IGDNG/M", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
+ "IGDNG/MA", NULL, &intel_i965_driver },
{ 0, 0, 0, NULL, NULL, NULL }
};
@@ -2398,6 +2403,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_G41_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
{ }
};
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 86105efb4eb6..0ecac7e532f6 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -1006,7 +1006,7 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
priv->dev->release = (void (*)(struct device *)) kfree;
rc = device_register(priv->dev);
if (rc) {
- kfree(priv->dev);
+ put_device(priv->dev);
goto out_error_dev;
}
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index cd0ba51f7c80..0d8c5788b8e4 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -44,8 +44,8 @@
* want to register another driver on the same PCI id.
*/
static const struct pci_device_id pci_tbl[] = {
- { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
- { 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { PCI_VDEVICE(AMD, 0x7443), 0, },
+ { PCI_VDEVICE(AMD, 0x746b), 0, },
{ 0, }, /* terminate list */
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index 64d513f68368..4c4d4e140f98 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -46,8 +46,7 @@
* want to register another driver on the same PCI id.
*/
static const struct pci_device_id pci_tbl[] = {
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, },
{ 0, }, /* terminate list */
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index afa8813e737a..645237bda682 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -822,6 +822,7 @@ static const struct file_operations zero_fops = {
* - permits private mappings, "copies" are taken of the source of zeros
*/
static struct backing_dev_info zero_bdi = {
+ .name = "char/mem",
.capabilities = BDI_CAP_MAP_COPY,
};
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8c7444857a4b..d8a9255e1a3f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -240,6 +240,7 @@
#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/cryptohash.h>
+#include <linux/fips.h>
#ifdef CONFIG_GENERIC_HARDIRQS
# include <linux/irq.h>
@@ -413,6 +414,7 @@ struct entropy_store {
unsigned add_ptr;
int entropy_count;
int input_rotate;
+ __u8 *last_data;
};
static __u32 input_pool_data[INPUT_POOL_WORDS];
@@ -852,12 +854,21 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
{
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
+ unsigned long flags;
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
while (nbytes) {
extract_buf(r, tmp);
+
+ if (r->last_data) {
+ spin_lock_irqsave(&r->lock, flags);
+ if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
+ panic("Hardware RNG duplicated output!\n");
+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
+ spin_unlock_irqrestore(&r->lock, flags);
+ }
i = min_t(int, nbytes, EXTRACT_SIZE);
memcpy(buf, tmp, i);
nbytes -= i;
@@ -940,6 +951,9 @@ static void init_std_data(struct entropy_store *r)
now = ktime_get_real();
mix_pool_bytes(r, &now, sizeof(now));
mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
+ /* Enable continuous test in fips mode */
+ if (fips_enabled)
+ r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL);
}
static int rand_initialize(void)
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 5d7a02f63e1c..50eecfe1d724 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -24,6 +24,7 @@
#include <linux/sysrq.h>
#include <linux/kbd_kern.h>
#include <linux/proc_fs.h>
+#include <linux/nmi.h>
#include <linux/quotaops.h>
#include <linux/perf_counter.h>
#include <linux/kernel.h>
@@ -222,12 +223,20 @@ static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
{
- struct pt_regs *regs = get_irq_regs();
- if (regs) {
- printk(KERN_INFO "CPU%d:\n", smp_processor_id());
- show_regs(regs);
+ /*
+ * Fall back to the workqueue based printing if the
+ * backtrace printing did not succeed or the
+ * architecture has no support for it:
+ */
+ if (!trigger_all_cpu_backtrace()) {
+ struct pt_regs *regs = get_irq_regs();
+
+ if (regs) {
+ printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+ show_regs(regs);
+ }
+ schedule_work(&sysrq_showallcpus);
}
- schedule_work(&sysrq_showallcpus);
}
static struct sysrq_key_op sysrq_showallcpus_op = {
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index aec1931608aa..0b73e4ec1add 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -450,6 +450,12 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
goto out_err;
}
+ /* Default timeouts */
+ chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
if (request_locality(chip, 0) != 0) {
rc = -ENODEV;
goto out_err;
@@ -457,12 +463,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
- /* Default timeouts */
- chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
- chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
- chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
- chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
-
dev_info(dev,
"1.2 TPM (device-id 0x%X, rev-id %d)\n",
vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5b27692372bf..b08403d7d1ca 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -13,7 +13,6 @@ if CRYPTO_HW
config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE"
depends on X86 && !UML
- select CRYPTO_ALGAPI
help
Some VIA processors come with an integrated crypto engine
(so called VIA PadLock ACE, Advanced Cryptography Engine)
@@ -39,6 +38,7 @@ config CRYPTO_DEV_PADLOCK_AES
config CRYPTO_DEV_PADLOCK_SHA
tristate "PadLock driver for SHA1 and SHA256 algorithms"
depends on CRYPTO_DEV_PADLOCK
+ select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
help
@@ -157,6 +157,19 @@ config S390_PRNG
ANSI X9.17 standard. The PRNG is usable via the char device
/dev/prandom.
+config CRYPTO_DEV_MV_CESA
+ tristate "Marvell's Cryptographic Engine"
+ depends on PLAT_ORION
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER2
+ help
+ This driver allows you to utilize the Cryptographic Engines and
+ Security Accelerator (CESA) which can be found on the Marvell Orion
+ and Kirkwood SoCs, such as QNAP's TS-209.
+
+ Currently the driver supports AES in ECB and CBC mode without DMA.
+
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_DES
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 9bf4a2bc8846..6ffcb3f7f942 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
+obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 61b6e1bec8c6..a33243c17b00 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -208,7 +208,8 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
}
}
- tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx);
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct crypto4xx_ctx));
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 4c0dfb2b872e..46e899ac924e 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -31,8 +31,6 @@
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/cacheflush.h>
-#include <crypto/internal/hash.h>
-#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
#include "crypto4xx_reg_def.h"
@@ -998,10 +996,15 @@ static int crypto4xx_alg_init(struct crypto_tfm *tfm)
ctx->sa_out_dma_addr = 0;
ctx->sa_len = 0;
- if (alg->cra_type == &crypto_ablkcipher_type)
+ switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ default:
tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
- else if (alg->cra_type == &crypto_ahash_type)
- tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct crypto4xx_ctx));
+ break;
+ }
return 0;
}
@@ -1015,7 +1018,8 @@ static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
}
int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
- struct crypto_alg *crypto_alg, int array_size)
+ struct crypto4xx_alg_common *crypto_alg,
+ int array_size)
{
struct crypto4xx_alg *alg;
int i;
@@ -1027,13 +1031,18 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
return -ENOMEM;
alg->alg = crypto_alg[i];
- INIT_LIST_HEAD(&alg->alg.cra_list);
- if (alg->alg.cra_init == NULL)
- alg->alg.cra_init = crypto4xx_alg_init;
- if (alg->alg.cra_exit == NULL)
- alg->alg.cra_exit = crypto4xx_alg_exit;
alg->dev = sec_dev;
- rc = crypto_register_alg(&alg->alg);
+
+ switch (alg->alg.type) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ rc = crypto_register_ahash(&alg->alg.u.hash);
+ break;
+
+ default:
+ rc = crypto_register_alg(&alg->alg.u.cipher);
+ break;
+ }
+
if (rc) {
list_del(&alg->entry);
kfree(alg);
@@ -1051,7 +1060,14 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
list_del(&alg->entry);
- crypto_unregister_alg(&alg->alg);
+ switch (alg->alg.type) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_unregister_ahash(&alg->alg.u.hash);
+ break;
+
+ default:
+ crypto_unregister_alg(&alg->alg.u.cipher);
+ }
kfree(alg);
}
}
@@ -1104,17 +1120,18 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
/**
* Supported Crypto Algorithms
*/
-struct crypto_alg crypto4xx_alg[] = {
+struct crypto4xx_alg_common crypto4xx_alg[] = {
/* Crypto AES modes */
- {
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
.cra_module = THIS_MODULE,
.cra_u = {
.ablkcipher = {
@@ -1126,29 +1143,7 @@ struct crypto_alg crypto4xx_alg[] = {
.decrypt = crypto4xx_decrypt,
}
}
- },
- /* Hash SHA1 */
- {
- .cra_name = "sha1",
- .cra_driver_name = "sha1-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ahash_type,
- .cra_init = crypto4xx_sha1_alg_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ahash = {
- .digestsize = SHA1_DIGEST_SIZE,
- .init = crypto4xx_hash_init,
- .update = crypto4xx_hash_update,
- .final = crypto4xx_hash_final,
- .digest = crypto4xx_hash_digest,
- }
- }
- },
+ }},
};
/**
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 1ef103449364..da9cbe3b9fc3 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -22,6 +22,8 @@
#ifndef __CRYPTO4XX_CORE_H__
#define __CRYPTO4XX_CORE_H__
+#include <crypto/internal/hash.h>
+
#define PPC460SX_SDR0_SRST 0x201
#define PPC405EX_SDR0_SRST 0x200
#define PPC460EX_SDR0_SRST 0x201
@@ -138,14 +140,31 @@ struct crypto4xx_req_ctx {
u16 sa_len;
};
+struct crypto4xx_alg_common {
+ u32 type;
+ union {
+ struct crypto_alg cipher;
+ struct ahash_alg hash;
+ } u;
+};
+
struct crypto4xx_alg {
struct list_head entry;
- struct crypto_alg alg;
+ struct crypto4xx_alg_common alg;
struct crypto4xx_device *dev;
};
-#define crypto_alg_to_crypto4xx_alg(x) \
- container_of(x, struct crypto4xx_alg, alg)
+static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
+ struct crypto_alg *x)
+{
+ switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ return container_of(__crypto_ahash_alg(x),
+ struct crypto4xx_alg, alg.u.hash);
+ }
+
+ return container_of(x, struct crypto4xx_alg, alg.u.cipher);
+}
extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
new file mode 100644
index 000000000000..b21ef635f352
--- /dev/null
+++ b/drivers/crypto/mv_cesa.c
@@ -0,0 +1,606 @@
+/*
+ * Support for Marvell's crypto engine which can be found on some Orion5X
+ * boards.
+ *
+ * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
+ * License: GPLv2
+ *
+ */
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include "mv_cesa.h"
+/*
+ * STM:
+ * /---------------------------------------\
+ * | | request complete
+ * \./ |
+ * IDLE -> new request -> BUSY -> done -> DEQUEUE
+ * /°\ |
+ * | | more scatter entries
+ * \________________/
+ */
+enum engine_status {
+ ENGINE_IDLE,
+ ENGINE_BUSY,
+ ENGINE_W_DEQUEUE,
+};
+
+/**
+ * struct req_progress - used for every crypt request
+ * @src_sg_it: sg iterator for src
+ * @dst_sg_it: sg iterator for dst
+ * @sg_src_left: bytes left in src to process (scatter list)
+ * @src_start: offset to add to src start position (scatter list)
+ * @crypt_len: length of current crypt process
+ * @sg_dst_left: bytes left dst to process in this scatter list
+ * @dst_start: offset to add to dst start position (scatter list)
+ * @total_req_bytes: total number of bytes processed (request).
+ *
+ * sg helper are used to iterate over the scatterlist. Since the size of the
+ * SRAM may be less than the scatter size, this struct struct is used to keep
+ * track of progress within current scatterlist.
+ */
+struct req_progress {
+ struct sg_mapping_iter src_sg_it;
+ struct sg_mapping_iter dst_sg_it;
+
+ /* src mostly */
+ int sg_src_left;
+ int src_start;
+ int crypt_len;
+ /* dst mostly */
+ int sg_dst_left;
+ int dst_start;
+ int total_req_bytes;
+};
+
+struct crypto_priv {
+ void __iomem *reg;
+ void __iomem *sram;
+ int irq;
+ struct task_struct *queue_th;
+
+ /* the lock protects queue and eng_st */
+ spinlock_t lock;
+ struct crypto_queue queue;
+ enum engine_status eng_st;
+ struct ablkcipher_request *cur_req;
+ struct req_progress p;
+ int max_req_size;
+ int sram_size;
+};
+
+static struct crypto_priv *cpg;
+
+struct mv_ctx {
+ u8 aes_enc_key[AES_KEY_LEN];
+ u32 aes_dec_key[8];
+ int key_len;
+ u32 need_calc_aes_dkey;
+};
+
+enum crypto_op {
+ COP_AES_ECB,
+ COP_AES_CBC,
+};
+
+struct mv_req_ctx {
+ enum crypto_op op;
+ int decrypt;
+};
+
+static void compute_aes_dec_key(struct mv_ctx *ctx)
+{
+ struct crypto_aes_ctx gen_aes_key;
+ int key_pos;
+
+ if (!ctx->need_calc_aes_dkey)
+ return;
+
+ crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
+
+ key_pos = ctx->key_len + 24;
+ memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_256:
+ key_pos -= 2;
+ /* fall */
+ case AES_KEYSIZE_192:
+ key_pos -= 2;
+ memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
+ 4 * 4);
+ break;
+ }
+ ctx->need_calc_aes_dkey = 0;
+}
+
+static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ switch (len) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->key_len = len;
+ ctx->need_calc_aes_dkey = 1;
+
+ memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
+ return 0;
+}
+
+static void setup_data_in(struct ablkcipher_request *req)
+{
+ int ret;
+ void *buf;
+
+ if (!cpg->p.sg_src_left) {
+ ret = sg_miter_next(&cpg->p.src_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_src_left = cpg->p.src_sg_it.length;
+ cpg->p.src_start = 0;
+ }
+
+ cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
+
+ buf = cpg->p.src_sg_it.addr;
+ buf += cpg->p.src_start;
+
+ memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
+
+ cpg->p.sg_src_left -= cpg->p.crypt_len;
+ cpg->p.src_start += cpg->p.crypt_len;
+}
+
+static void mv_process_current_q(int first_block)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct sec_accel_config op;
+
+ switch (req_ctx->op) {
+ case COP_AES_ECB:
+ op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
+ break;
+ case COP_AES_CBC:
+ op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
+ op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
+ ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
+ if (first_block)
+ memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
+ break;
+ }
+ if (req_ctx->decrypt) {
+ op.config |= CFG_DIR_DEC;
+ memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
+ AES_KEY_LEN);
+ } else {
+ op.config |= CFG_DIR_ENC;
+ memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
+ AES_KEY_LEN);
+ }
+
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_128:
+ op.config |= CFG_AES_LEN_128;
+ break;
+ case AES_KEYSIZE_192:
+ op.config |= CFG_AES_LEN_192;
+ break;
+ case AES_KEYSIZE_256:
+ op.config |= CFG_AES_LEN_256;
+ break;
+ }
+ op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
+ ENC_P_DST(SRAM_DATA_OUT_START);
+ op.enc_key_p = SRAM_DATA_KEY_P;
+
+ setup_data_in(req);
+ op.enc_len = cpg->p.crypt_len;
+ memcpy(cpg->sram + SRAM_CONFIG, &op,
+ sizeof(struct sec_accel_config));
+
+ writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
+ /* GO */
+ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
+
+ /*
+ * XXX: add timer if the interrupt does not occur for some mystery
+ * reason
+ */
+}
+
+static void mv_crypto_algo_completion(void)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ if (req_ctx->op != COP_AES_CBC)
+ return ;
+
+ memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
+}
+
+static void dequeue_complete_req(void)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ void *buf;
+ int ret;
+
+ cpg->p.total_req_bytes += cpg->p.crypt_len;
+ do {
+ int dst_copy;
+
+ if (!cpg->p.sg_dst_left) {
+ ret = sg_miter_next(&cpg->p.dst_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+ cpg->p.dst_start = 0;
+ }
+
+ buf = cpg->p.dst_sg_it.addr;
+ buf += cpg->p.dst_start;
+
+ dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+
+ memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
+
+ cpg->p.sg_dst_left -= dst_copy;
+ cpg->p.crypt_len -= dst_copy;
+ cpg->p.dst_start += dst_copy;
+ } while (cpg->p.crypt_len > 0);
+
+ BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+ if (cpg->p.total_req_bytes < req->nbytes) {
+ /* process next scatter list entry */
+ cpg->eng_st = ENGINE_BUSY;
+ mv_process_current_q(0);
+ } else {
+ sg_miter_stop(&cpg->p.src_sg_it);
+ sg_miter_stop(&cpg->p.dst_sg_it);
+ mv_crypto_algo_completion();
+ cpg->eng_st = ENGINE_IDLE;
+ req->base.complete(&req->base, 0);
+ }
+}
+
+static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
+{
+ int i = 0;
+
+ do {
+ total_bytes -= sl[i].length;
+ i++;
+
+ } while (total_bytes > 0);
+
+ return i;
+}
+
+static void mv_enqueue_new_req(struct ablkcipher_request *req)
+{
+ int num_sgs;
+
+ cpg->cur_req = req;
+ memset(&cpg->p, 0, sizeof(struct req_progress));
+
+ num_sgs = count_sgs(req->src, req->nbytes);
+ sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+
+ num_sgs = count_sgs(req->dst, req->nbytes);
+ sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+ mv_process_current_q(1);
+}
+
+static int queue_manag(void *data)
+{
+ cpg->eng_st = ENGINE_IDLE;
+ do {
+ struct ablkcipher_request *req;
+ struct crypto_async_request *async_req = NULL;
+ struct crypto_async_request *backlog;
+
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ if (cpg->eng_st == ENGINE_W_DEQUEUE)
+ dequeue_complete_req();
+
+ spin_lock_irq(&cpg->lock);
+ if (cpg->eng_st == ENGINE_IDLE) {
+ backlog = crypto_get_backlog(&cpg->queue);
+ async_req = crypto_dequeue_request(&cpg->queue);
+ if (async_req) {
+ BUG_ON(cpg->eng_st != ENGINE_IDLE);
+ cpg->eng_st = ENGINE_BUSY;
+ }
+ }
+ spin_unlock_irq(&cpg->lock);
+
+ if (backlog) {
+ backlog->complete(backlog, -EINPROGRESS);
+ backlog = NULL;
+ }
+
+ if (async_req) {
+ req = container_of(async_req,
+ struct ablkcipher_request, base);
+ mv_enqueue_new_req(req);
+ async_req = NULL;
+ }
+
+ schedule();
+
+ } while (!kthread_should_stop());
+ return 0;
+}
+
+static int mv_handle_req(struct ablkcipher_request *req)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cpg->lock, flags);
+ ret = ablkcipher_enqueue_request(&cpg->queue, req);
+ spin_unlock_irqrestore(&cpg->lock, flags);
+ wake_up_process(cpg->queue_th);
+ return ret;
+}
+
+static int mv_enc_aes_ecb(struct ablkcipher_request *req)
+{
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_ECB;
+ req_ctx->decrypt = 0;
+
+ return mv_handle_req(req);
+}
+
+static int mv_dec_aes_ecb(struct ablkcipher_request *req)
+{
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_ECB;
+ req_ctx->decrypt = 1;
+
+ compute_aes_dec_key(ctx);
+ return mv_handle_req(req);
+}
+
+static int mv_enc_aes_cbc(struct ablkcipher_request *req)
+{
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_CBC;
+ req_ctx->decrypt = 0;
+
+ return mv_handle_req(req);
+}
+
+static int mv_dec_aes_cbc(struct ablkcipher_request *req)
+{
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_CBC;
+ req_ctx->decrypt = 1;
+
+ compute_aes_dec_key(ctx);
+ return mv_handle_req(req);
+}
+
+static int mv_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
+ return 0;
+}
+
+irqreturn_t crypto_int(int irq, void *priv)
+{
+ u32 val;
+
+ val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
+ if (!(val & SEC_INT_ACCEL0_DONE))
+ return IRQ_NONE;
+
+ val &= ~SEC_INT_ACCEL0_DONE;
+ writel(val, cpg->reg + FPGA_INT_STATUS);
+ writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
+ BUG_ON(cpg->eng_st != ENGINE_BUSY);
+ cpg->eng_st = ENGINE_W_DEQUEUE;
+ wake_up_process(cpg->queue_th);
+ return IRQ_HANDLED;
+}
+
+struct crypto_alg mv_aes_alg_ecb = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "mv-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16,
+ .cra_ctxsize = sizeof(struct mv_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cra_init,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mv_setkey_aes,
+ .encrypt = mv_enc_aes_ecb,
+ .decrypt = mv_dec_aes_ecb,
+ },
+ },
+};
+
+struct crypto_alg mv_aes_alg_cbc = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "mv-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cra_init,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mv_setkey_aes,
+ .encrypt = mv_enc_aes_cbc,
+ .decrypt = mv_dec_aes_cbc,
+ },
+ },
+};
+
+static int mv_probe(struct platform_device *pdev)
+{
+ struct crypto_priv *cp;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ if (cpg) {
+ printk(KERN_ERR "Second crypto dev?\n");
+ return -EEXIST;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!res)
+ return -ENXIO;
+
+ cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+
+ spin_lock_init(&cp->lock);
+ crypto_init_queue(&cp->queue, 50);
+ cp->reg = ioremap(res->start, res->end - res->start + 1);
+ if (!cp->reg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ if (!res) {
+ ret = -ENXIO;
+ goto err_unmap_reg;
+ }
+ cp->sram_size = res->end - res->start + 1;
+ cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
+ cp->sram = ioremap(res->start, cp->sram_size);
+ if (!cp->sram) {
+ ret = -ENOMEM;
+ goto err_unmap_reg;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 || irq == NO_IRQ) {
+ ret = irq;
+ goto err_unmap_sram;
+ }
+ cp->irq = irq;
+
+ platform_set_drvdata(pdev, cp);
+ cpg = cp;
+
+ cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
+ if (IS_ERR(cp->queue_th)) {
+ ret = PTR_ERR(cp->queue_th);
+ goto err_thread;
+ }
+
+ ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
+ cp);
+ if (ret)
+ goto err_unmap_sram;
+
+ writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
+ writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
+
+ ret = crypto_register_alg(&mv_aes_alg_ecb);
+ if (ret)
+ goto err_reg;
+
+ ret = crypto_register_alg(&mv_aes_alg_cbc);
+ if (ret)
+ goto err_unreg_ecb;
+ return 0;
+err_unreg_ecb:
+ crypto_unregister_alg(&mv_aes_alg_ecb);
+err_thread:
+ free_irq(irq, cp);
+err_reg:
+ kthread_stop(cp->queue_th);
+err_unmap_sram:
+ iounmap(cp->sram);
+err_unmap_reg:
+ iounmap(cp->reg);
+err:
+ kfree(cp);
+ cpg = NULL;
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static int mv_remove(struct platform_device *pdev)
+{
+ struct crypto_priv *cp = platform_get_drvdata(pdev);
+
+ crypto_unregister_alg(&mv_aes_alg_ecb);
+ crypto_unregister_alg(&mv_aes_alg_cbc);
+ kthread_stop(cp->queue_th);
+ free_irq(cp->irq, cp);
+ memset(cp->sram, 0, cp->sram_size);
+ iounmap(cp->sram);
+ iounmap(cp->reg);
+ kfree(cp);
+ cpg = NULL;
+ return 0;
+}
+
+static struct platform_driver marvell_crypto = {
+ .probe = mv_probe,
+ .remove = mv_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mv_crypto",
+ },
+};
+MODULE_ALIAS("platform:mv_crypto");
+
+static int __init mv_crypto_init(void)
+{
+ return platform_driver_register(&marvell_crypto);
+}
+module_init(mv_crypto_init);
+
+static void __exit mv_crypto_exit(void)
+{
+ platform_driver_unregister(&marvell_crypto);
+}
+module_exit(mv_crypto_exit);
+
+MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
+MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
new file mode 100644
index 000000000000..c3e25d3bb171
--- /dev/null
+++ b/drivers/crypto/mv_cesa.h
@@ -0,0 +1,119 @@
+#ifndef __MV_CRYPTO_H__
+
+#define DIGEST_INITIAL_VAL_A 0xdd00
+#define DES_CMD_REG 0xdd58
+
+#define SEC_ACCEL_CMD 0xde00
+#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
+#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
+#define SEC_CMD_DISABLE_SEC (1 << 2)
+
+#define SEC_ACCEL_DESC_P0 0xde04
+#define SEC_DESC_P0_PTR(x) (x)
+
+#define SEC_ACCEL_DESC_P1 0xde14
+#define SEC_DESC_P1_PTR(x) (x)
+
+#define SEC_ACCEL_CFG 0xde08
+#define SEC_CFG_STOP_DIG_ERR (1 << 0)
+#define SEC_CFG_CH0_W_IDMA (1 << 7)
+#define SEC_CFG_CH1_W_IDMA (1 << 8)
+#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
+#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
+
+#define SEC_ACCEL_STATUS 0xde0c
+#define SEC_ST_ACT_0 (1 << 0)
+#define SEC_ST_ACT_1 (1 << 1)
+
+/*
+ * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
+ * 4.12. It looks like that it was part of an IRQ-controller in FPGA and
+ * someone forgot to remove it while switching to the core and moving to
+ * SEC_ACCEL_INT_STATUS.
+ */
+#define FPGA_INT_STATUS 0xdd68
+#define SEC_ACCEL_INT_STATUS 0xde20
+#define SEC_INT_AUTH_DONE (1 << 0)
+#define SEC_INT_DES_E_DONE (1 << 1)
+#define SEC_INT_AES_E_DONE (1 << 2)
+#define SEC_INT_AES_D_DONE (1 << 3)
+#define SEC_INT_ENC_DONE (1 << 4)
+#define SEC_INT_ACCEL0_DONE (1 << 5)
+#define SEC_INT_ACCEL1_DONE (1 << 6)
+#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
+#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
+
+#define SEC_ACCEL_INT_MASK 0xde24
+
+#define AES_KEY_LEN (8 * 4)
+
+struct sec_accel_config {
+
+ u32 config;
+#define CFG_OP_MAC_ONLY 0
+#define CFG_OP_CRYPT_ONLY 1
+#define CFG_OP_MAC_CRYPT 2
+#define CFG_OP_CRYPT_MAC 3
+#define CFG_MACM_MD5 (4 << 4)
+#define CFG_MACM_SHA1 (5 << 4)
+#define CFG_MACM_HMAC_MD5 (6 << 4)
+#define CFG_MACM_HMAC_SHA1 (7 << 4)
+#define CFG_ENCM_DES (1 << 8)
+#define CFG_ENCM_3DES (2 << 8)
+#define CFG_ENCM_AES (3 << 8)
+#define CFG_DIR_ENC (0 << 12)
+#define CFG_DIR_DEC (1 << 12)
+#define CFG_ENC_MODE_ECB (0 << 16)
+#define CFG_ENC_MODE_CBC (1 << 16)
+#define CFG_3DES_EEE (0 << 20)
+#define CFG_3DES_EDE (1 << 20)
+#define CFG_AES_LEN_128 (0 << 24)
+#define CFG_AES_LEN_192 (1 << 24)
+#define CFG_AES_LEN_256 (2 << 24)
+
+ u32 enc_p;
+#define ENC_P_SRC(x) (x)
+#define ENC_P_DST(x) ((x) << 16)
+
+ u32 enc_len;
+#define ENC_LEN(x) (x)
+
+ u32 enc_key_p;
+#define ENC_KEY_P(x) (x)
+
+ u32 enc_iv;
+#define ENC_IV_POINT(x) ((x) << 0)
+#define ENC_IV_BUF_POINT(x) ((x) << 16)
+
+ u32 mac_src_p;
+#define MAC_SRC_DATA_P(x) (x)
+#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
+
+ u32 mac_digest;
+ u32 mac_iv;
+}__attribute__ ((packed));
+ /*
+ * /-----------\ 0
+ * | ACCEL CFG | 4 * 8
+ * |-----------| 0x20
+ * | CRYPT KEY | 8 * 4
+ * |-----------| 0x40
+ * | IV IN | 4 * 4
+ * |-----------| 0x40 (inplace)
+ * | IV BUF | 4 * 4
+ * |-----------| 0x50
+ * | DATA IN | 16 * x (max ->max_req_size)
+ * |-----------| 0x50 (inplace operation)
+ * | DATA OUT | 16 * x (max ->max_req_size)
+ * \-----------/ SRAM size
+ */
+#define SRAM_CONFIG 0x00
+#define SRAM_DATA_KEY_P 0x20
+#define SRAM_DATA_IV 0x40
+#define SRAM_DATA_IV_BUF 0x40
+#define SRAM_DATA_IN_START 0x50
+#define SRAM_DATA_OUT_START 0x50
+
+#define SRAM_CFG_SPACE 0x50
+
+#endif
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index a2c8e8514b63..76cb6b345e7b 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -12,81 +12,43 @@
*
*/
-#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
-#include <linux/cryptohash.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include <asm/i387.h>
#include "padlock.h"
-#define SHA1_DEFAULT_FALLBACK "sha1-generic"
-#define SHA256_DEFAULT_FALLBACK "sha256-generic"
+struct padlock_sha_desc {
+ struct shash_desc fallback;
+};
struct padlock_sha_ctx {
- char *data;
- size_t used;
- int bypass;
- void (*f_sha_padlock)(const char *in, char *out, int count);
- struct hash_desc fallback;
+ struct crypto_shash *fallback;
};
-static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
-{
- return crypto_tfm_ctx(tfm);
-}
-
-/* We'll need aligned address on the stack */
-#define NEAREST_ALIGNED(ptr) \
- ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
-
-static struct crypto_alg sha1_alg, sha256_alg;
-
-static void padlock_sha_bypass(struct crypto_tfm *tfm)
+static int padlock_sha_init(struct shash_desc *desc)
{
- if (ctx(tfm)->bypass)
- return;
+ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
- crypto_hash_init(&ctx(tfm)->fallback);
- if (ctx(tfm)->data && ctx(tfm)->used) {
- struct scatterlist sg;
-
- sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used);
- crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
- }
-
- ctx(tfm)->used = 0;
- ctx(tfm)->bypass = 1;
-}
-
-static void padlock_sha_init(struct crypto_tfm *tfm)
-{
- ctx(tfm)->used = 0;
- ctx(tfm)->bypass = 0;
+ dctx->fallback.tfm = ctx->fallback;
+ dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ return crypto_shash_init(&dctx->fallback);
}
-static void padlock_sha_update(struct crypto_tfm *tfm,
- const uint8_t *data, unsigned int length)
+static int padlock_sha_update(struct shash_desc *desc,
+ const u8 *data, unsigned int length)
{
- /* Our buffer is always one page. */
- if (unlikely(!ctx(tfm)->bypass &&
- (ctx(tfm)->used + length > PAGE_SIZE)))
- padlock_sha_bypass(tfm);
-
- if (unlikely(ctx(tfm)->bypass)) {
- struct scatterlist sg;
- sg_init_one(&sg, (uint8_t *)data, length);
- crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
- return;
- }
+ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
- ctx(tfm)->used += length;
+ dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ return crypto_shash_update(&dctx->fallback, data, length);
}
static inline void padlock_output_block(uint32_t *src,
@@ -96,165 +58,206 @@ static inline void padlock_output_block(uint32_t *src,
*dst++ = swab32(*src++);
}
-static void padlock_do_sha1(const char *in, char *out, int count)
+static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
+ unsigned int count, u8 *out)
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
- char buf[128+16];
- char *result = NEAREST_ALIGNED(buf);
+ char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
+ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+ struct sha1_state state;
+ unsigned int space;
+ unsigned int leftover;
int ts_state;
+ int err;
+
+ dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = crypto_shash_export(&dctx->fallback, &state);
+ if (err)
+ goto out;
+
+ if (state.count + count > ULONG_MAX)
+ return crypto_shash_finup(&dctx->fallback, in, count, out);
+
+ leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
+ space = SHA1_BLOCK_SIZE - leftover;
+ if (space) {
+ if (count > space) {
+ err = crypto_shash_update(&dctx->fallback, in, space) ?:
+ crypto_shash_export(&dctx->fallback, &state);
+ if (err)
+ goto out;
+ count -= space;
+ in += space;
+ } else {
+ memcpy(state.buffer + leftover, in, count);
+ in = state.buffer;
+ count += leftover;
+ state.count &= ~(SHA1_BLOCK_SIZE - 1);
+ }
+ }
+
+ memcpy(result, &state.state, SHA1_DIGEST_SIZE);
- ((uint32_t *)result)[0] = SHA1_H0;
- ((uint32_t *)result)[1] = SHA1_H1;
- ((uint32_t *)result)[2] = SHA1_H2;
- ((uint32_t *)result)[3] = SHA1_H3;
- ((uint32_t *)result)[4] = SHA1_H4;
-
/* prevent taking the spurious DNA fault with padlock. */
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
- : "+S"(in), "+D"(result)
- : "c"(count), "a"(0));
+ : \
+ : "c"((unsigned long)state.count + count), \
+ "a"((unsigned long)state.count), \
+ "S"(in), "D"(result));
irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
+
+out:
+ return err;
}
-static void padlock_do_sha256(const char *in, char *out, int count)
+static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
+{
+ u8 buf[4];
+
+ return padlock_sha1_finup(desc, buf, 0, out);
+}
+
+static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
+ unsigned int count, u8 *out)
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
- char buf[128+16];
- char *result = NEAREST_ALIGNED(buf);
+ char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
+ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+ struct sha256_state state;
+ unsigned int space;
+ unsigned int leftover;
int ts_state;
+ int err;
+
+ dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = crypto_shash_export(&dctx->fallback, &state);
+ if (err)
+ goto out;
+
+ if (state.count + count > ULONG_MAX)
+ return crypto_shash_finup(&dctx->fallback, in, count, out);
+
+ leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
+ space = SHA256_BLOCK_SIZE - leftover;
+ if (space) {
+ if (count > space) {
+ err = crypto_shash_update(&dctx->fallback, in, space) ?:
+ crypto_shash_export(&dctx->fallback, &state);
+ if (err)
+ goto out;
+ count -= space;
+ in += space;
+ } else {
+ memcpy(state.buf + leftover, in, count);
+ in = state.buf;
+ count += leftover;
+ state.count &= ~(SHA1_BLOCK_SIZE - 1);
+ }
+ }
- ((uint32_t *)result)[0] = SHA256_H0;
- ((uint32_t *)result)[1] = SHA256_H1;
- ((uint32_t *)result)[2] = SHA256_H2;
- ((uint32_t *)result)[3] = SHA256_H3;
- ((uint32_t *)result)[4] = SHA256_H4;
- ((uint32_t *)result)[5] = SHA256_H5;
- ((uint32_t *)result)[6] = SHA256_H6;
- ((uint32_t *)result)[7] = SHA256_H7;
+ memcpy(result, &state.state, SHA256_DIGEST_SIZE);
/* prevent taking the spurious DNA fault with padlock. */
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
- : "+S"(in), "+D"(result)
- : "c"(count), "a"(0));
+ : \
+ : "c"((unsigned long)state.count + count), \
+ "a"((unsigned long)state.count), \
+ "S"(in), "D"(result));
irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
+
+out:
+ return err;
}
-static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
+static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
{
- if (unlikely(ctx(tfm)->bypass)) {
- crypto_hash_final(&ctx(tfm)->fallback, out);
- ctx(tfm)->bypass = 0;
- return;
- }
+ u8 buf[4];
- /* Pass the input buffer to PadLock microcode... */
- ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
-
- ctx(tfm)->used = 0;
+ return padlock_sha256_finup(desc, buf, 0, out);
}
static int padlock_cra_init(struct crypto_tfm *tfm)
{
+ struct crypto_shash *hash = __crypto_shash_cast(tfm);
const char *fallback_driver_name = tfm->__crt_alg->cra_name;
- struct crypto_hash *fallback_tfm;
-
- /* For now we'll allocate one page. This
- * could eventually be configurable one day. */
- ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
- if (!ctx(tfm)->data)
- return -ENOMEM;
+ struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_shash *fallback_tfm;
+ int err = -ENOMEM;
/* Allocate a fallback and abort if it failed. */
- fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
- free_page((unsigned long)(ctx(tfm)->data));
- return PTR_ERR(fallback_tfm);
+ err = PTR_ERR(fallback_tfm);
+ goto out;
}
- ctx(tfm)->fallback.tfm = fallback_tfm;
+ ctx->fallback = fallback_tfm;
+ hash->descsize += crypto_shash_descsize(fallback_tfm);
return 0;
-}
-
-static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
-{
- ctx(tfm)->f_sha_padlock = padlock_do_sha1;
- return padlock_cra_init(tfm);
-}
-
-static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
-{
- ctx(tfm)->f_sha_padlock = padlock_do_sha256;
-
- return padlock_cra_init(tfm);
+out:
+ return err;
}
static void padlock_cra_exit(struct crypto_tfm *tfm)
{
- if (ctx(tfm)->data) {
- free_page((unsigned long)(ctx(tfm)->data));
- ctx(tfm)->data = NULL;
- }
+ struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_hash(ctx(tfm)->fallback.tfm);
- ctx(tfm)->fallback.tfm = NULL;
+ crypto_free_shash(ctx->fallback);
}
-static struct crypto_alg sha1_alg = {
- .cra_name = "sha1",
- .cra_driver_name = "sha1-padlock",
- .cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct padlock_sha_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list),
- .cra_init = padlock_sha1_cra_init,
- .cra_exit = padlock_cra_exit,
- .cra_u = {
- .digest = {
- .dia_digestsize = SHA1_DIGEST_SIZE,
- .dia_init = padlock_sha_init,
- .dia_update = padlock_sha_update,
- .dia_final = padlock_sha_final,
- }
+static struct shash_alg sha1_alg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .init = padlock_sha_init,
+ .update = padlock_sha_update,
+ .finup = padlock_sha1_finup,
+ .final = padlock_sha1_final,
+ .descsize = sizeof(struct padlock_sha_desc),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-padlock",
+ .cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct padlock_sha_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = padlock_cra_init,
+ .cra_exit = padlock_cra_exit,
}
};
-static struct crypto_alg sha256_alg = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-padlock",
- .cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct padlock_sha_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list),
- .cra_init = padlock_sha256_cra_init,
- .cra_exit = padlock_cra_exit,
- .cra_u = {
- .digest = {
- .dia_digestsize = SHA256_DIGEST_SIZE,
- .dia_init = padlock_sha_init,
- .dia_update = padlock_sha_update,
- .dia_final = padlock_sha_final,
- }
+static struct shash_alg sha256_alg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = padlock_sha_init,
+ .update = padlock_sha_update,
+ .finup = padlock_sha256_finup,
+ .final = padlock_sha256_final,
+ .descsize = sizeof(struct padlock_sha_desc),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-padlock",
+ .cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct padlock_sha_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = padlock_cra_init,
+ .cra_exit = padlock_cra_exit,
}
};
@@ -272,11 +275,11 @@ static int __init padlock_init(void)
return -ENODEV;
}
- rc = crypto_register_alg(&sha1_alg);
+ rc = crypto_register_shash(&sha1_alg);
if (rc)
goto out;
- rc = crypto_register_alg(&sha256_alg);
+ rc = crypto_register_shash(&sha256_alg);
if (rc)
goto out_unreg1;
@@ -285,7 +288,7 @@ static int __init padlock_init(void)
return 0;
out_unreg1:
- crypto_unregister_alg(&sha1_alg);
+ crypto_unregister_shash(&sha1_alg);
out:
printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
return rc;
@@ -293,8 +296,8 @@ out:
static void __exit padlock_fini(void)
{
- crypto_unregister_alg(&sha1_alg);
- crypto_unregister_alg(&sha256_alg);
+ crypto_unregister_shash(&sha1_alg);
+ crypto_unregister_shash(&sha256_alg);
}
module_init(padlock_init);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index c70775fd3ce2..c47ffe8a73ef 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -86,6 +86,25 @@ struct talitos_request {
void *context;
};
+/* per-channel fifo management */
+struct talitos_channel {
+ /* request fifo */
+ struct talitos_request *fifo;
+
+ /* number of requests pending in channel h/w fifo */
+ atomic_t submit_count ____cacheline_aligned;
+
+ /* request submission (head) lock */
+ spinlock_t head_lock ____cacheline_aligned;
+ /* index to next free descriptor request */
+ int head;
+
+ /* request release (tail) lock */
+ spinlock_t tail_lock ____cacheline_aligned;
+ /* index to next in-progress/done descriptor request */
+ int tail;
+};
+
struct talitos_private {
struct device *dev;
struct of_device *ofdev;
@@ -101,15 +120,6 @@ struct talitos_private {
/* SEC Compatibility info */
unsigned long features;
- /* next channel to be assigned next incoming descriptor */
- atomic_t last_chan;
-
- /* per-channel number of requests pending in channel h/w fifo */
- atomic_t *submit_count;
-
- /* per-channel request fifo */
- struct talitos_request **fifo;
-
/*
* length of the request fifo
* fifo_len is chfifo_len rounded up to next power of 2
@@ -117,15 +127,10 @@ struct talitos_private {
*/
unsigned int fifo_len;
- /* per-channel index to next free descriptor request */
- int *head;
-
- /* per-channel index to next in-progress/done descriptor request */
- int *tail;
+ struct talitos_channel *chan;
- /* per-channel request submission (head) and release (tail) locks */
- spinlock_t *head_lock;
- spinlock_t *tail_lock;
+ /* next channel to be assigned next incoming descriptor */
+ atomic_t last_chan ____cacheline_aligned;
/* request callback tasklet */
struct tasklet_struct done_task;
@@ -141,6 +146,12 @@ struct talitos_private {
#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
+static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
+{
+ talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
+ talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr));
+}
+
/*
* map virtual single (contiguous) pointer to h/w descriptor pointer
*/
@@ -150,8 +161,10 @@ static void map_single_talitos_ptr(struct device *dev,
unsigned char extent,
enum dma_data_direction dir)
{
+ dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
+
talitos_ptr->len = cpu_to_be16(len);
- talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir));
+ to_talitos_ptr(talitos_ptr, dma_addr);
talitos_ptr->j_extent = extent;
}
@@ -182,9 +195,9 @@ static int reset_channel(struct device *dev, int ch)
return -EIO;
}
- /* set done writeback and IRQ */
- setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE |
- TALITOS_CCCR_LO_CDIE);
+ /* set 36-bit addressing, done writeback enable and done IRQ enable */
+ setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
+ TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
/* and ICCR writeback, if available */
if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
@@ -282,16 +295,16 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
/* emulate SEC's round-robin channel fifo polling scheme */
ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
- spin_lock_irqsave(&priv->head_lock[ch], flags);
+ spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
- if (!atomic_inc_not_zero(&priv->submit_count[ch])) {
+ if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
/* h/w fifo is full */
- spin_unlock_irqrestore(&priv->head_lock[ch], flags);
+ spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
return -EAGAIN;
}
- head = priv->head[ch];
- request = &priv->fifo[ch][head];
+ head = priv->chan[ch].head;
+ request = &priv->chan[ch].fifo[head];
/* map descriptor and save caller data */
request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
@@ -300,16 +313,19 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
request->context = context;
/* increment fifo head */
- priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1);
+ priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
smp_wmb();
request->desc = desc;
/* GO! */
wmb();
- out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc);
+ out_be32(priv->reg + TALITOS_FF(ch),
+ cpu_to_be32(upper_32_bits(request->dma_desc)));
+ out_be32(priv->reg + TALITOS_FF_LO(ch),
+ cpu_to_be32(lower_32_bits(request->dma_desc)));
- spin_unlock_irqrestore(&priv->head_lock[ch], flags);
+ spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
return -EINPROGRESS;
}
@@ -324,11 +340,11 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
unsigned long flags;
int tail, status;
- spin_lock_irqsave(&priv->tail_lock[ch], flags);
+ spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
- tail = priv->tail[ch];
- while (priv->fifo[ch][tail].desc) {
- request = &priv->fifo[ch][tail];
+ tail = priv->chan[ch].tail;
+ while (priv->chan[ch].fifo[tail].desc) {
+ request = &priv->chan[ch].fifo[tail];
/* descriptors with their done bits set don't get the error */
rmb();
@@ -354,22 +370,22 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
request->desc = NULL;
/* increment fifo tail */
- priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1);
+ priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
- spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
+ spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
- atomic_dec(&priv->submit_count[ch]);
+ atomic_dec(&priv->chan[ch].submit_count);
saved_req.callback(dev, saved_req.desc, saved_req.context,
status);
/* channel may resume processing in single desc error case */
if (error && !reset_ch && status == error)
return;
- spin_lock_irqsave(&priv->tail_lock[ch], flags);
- tail = priv->tail[ch];
+ spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
+ tail = priv->chan[ch].tail;
}
- spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
+ spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
}
/*
@@ -397,20 +413,20 @@ static void talitos_done(unsigned long data)
static struct talitos_desc *current_desc(struct device *dev, int ch)
{
struct talitos_private *priv = dev_get_drvdata(dev);
- int tail = priv->tail[ch];
+ int tail = priv->chan[ch].tail;
dma_addr_t cur_desc;
cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
- while (priv->fifo[ch][tail].dma_desc != cur_desc) {
+ while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
tail = (tail + 1) & (priv->fifo_len - 1);
- if (tail == priv->tail[ch]) {
+ if (tail == priv->chan[ch].tail) {
dev_err(dev, "couldn't locate current descriptor\n");
return NULL;
}
}
- return priv->fifo[ch][tail].desc;
+ return priv->chan[ch].fifo[tail].desc;
}
/*
@@ -929,7 +945,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
int n_sg = sg_count;
while (n_sg--) {
- link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg));
+ to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
link_tbl_ptr->j_extent = 0;
link_tbl_ptr++;
@@ -970,7 +986,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
struct talitos_desc *desc = &edesc->desc;
unsigned int cryptlen = areq->cryptlen;
unsigned int authsize = ctx->authsize;
- unsigned int ivsize;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
int sg_count, ret;
int sg_link_tbl_len;
@@ -978,11 +994,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
0, DMA_TO_DEVICE);
/* hmac data */
- map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) -
- sg_virt(areq->assoc), sg_virt(areq->assoc), 0,
- DMA_TO_DEVICE);
+ map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
+ sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
/* cipher iv */
- ivsize = crypto_aead_ivsize(aead);
map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
DMA_TO_DEVICE);
@@ -1006,7 +1020,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->src_is_chained);
if (sg_count == 1) {
- desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
+ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
} else {
sg_link_tbl_len = cryptlen;
@@ -1017,14 +1031,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
&edesc->link_tbl[0]);
if (sg_count > 1) {
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
- desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
+ to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len,
DMA_BIDIRECTIONAL);
} else {
/* Only one segment now, so no link tbl needed */
- desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->
- src));
+ to_talitos_ptr(&desc->ptr[4],
+ sg_dma_address(areq->src));
}
}
@@ -1039,14 +1053,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->dst_is_chained);
if (sg_count == 1) {
- desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
+ to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
} else {
struct talitos_ptr *link_tbl_ptr =
&edesc->link_tbl[edesc->src_nents + 1];
- desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
- edesc->dma_link_tbl +
- edesc->src_nents + 1);
+ to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
+ (edesc->src_nents + 1) *
+ sizeof(struct talitos_ptr));
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
link_tbl_ptr);
@@ -1059,11 +1073,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
link_tbl_ptr->len = cpu_to_be16(authsize);
/* icv data follows link tables */
- link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
- edesc->dma_link_tbl +
- edesc->src_nents +
- edesc->dst_nents + 2);
-
+ to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
+ (edesc->src_nents + edesc->dst_nents + 2) *
+ sizeof(struct talitos_ptr));
desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
@@ -1338,7 +1350,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/* first DWORD empty */
desc->ptr[0].len = 0;
- desc->ptr[0].ptr = 0;
+ to_talitos_ptr(&desc->ptr[0], 0);
desc->ptr[0].j_extent = 0;
/* cipher iv */
@@ -1362,20 +1374,20 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
edesc->src_is_chained);
if (sg_count == 1) {
- desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src));
+ to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
} else {
sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
&edesc->link_tbl[0]);
if (sg_count > 1) {
+ to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
- desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len,
DMA_BIDIRECTIONAL);
} else {
/* Only one segment now, so no link tbl needed */
- desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->
- src));
+ to_talitos_ptr(&desc->ptr[3],
+ sg_dma_address(areq->src));
}
}
@@ -1390,15 +1402,15 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
edesc->dst_is_chained);
if (sg_count == 1) {
- desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst));
+ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
} else {
struct talitos_ptr *link_tbl_ptr =
&edesc->link_tbl[edesc->src_nents + 1];
+ to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
+ (edesc->src_nents + 1) *
+ sizeof(struct talitos_ptr));
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
- desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
- edesc->dma_link_tbl +
- edesc->src_nents + 1);
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
link_tbl_ptr);
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@@ -1411,7 +1423,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/* last DWORD empty */
desc->ptr[6].len = 0;
- desc->ptr[6].ptr = 0;
+ to_talitos_ptr(&desc->ptr[6], 0);
desc->ptr[6].j_extent = 0;
ret = talitos_submit(dev, desc, callback, areq);
@@ -1742,17 +1754,11 @@ static int talitos_remove(struct of_device *ofdev)
if (hw_supports(dev, DESC_HDR_SEL0_RNG))
talitos_unregister_rng(dev);
- kfree(priv->submit_count);
- kfree(priv->tail);
- kfree(priv->head);
-
- if (priv->fifo)
- for (i = 0; i < priv->num_channels; i++)
- kfree(priv->fifo[i]);
+ for (i = 0; i < priv->num_channels; i++)
+ if (priv->chan[i].fifo)
+ kfree(priv->chan[i].fifo);
- kfree(priv->fifo);
- kfree(priv->head_lock);
- kfree(priv->tail_lock);
+ kfree(priv->chan);
if (priv->irq != NO_IRQ) {
free_irq(priv->irq, dev);
@@ -1872,58 +1878,36 @@ static int talitos_probe(struct of_device *ofdev,
if (of_device_is_compatible(np, "fsl,sec2.1"))
priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
- priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
- GFP_KERNEL);
- priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
- GFP_KERNEL);
- if (!priv->head_lock || !priv->tail_lock) {
- dev_err(dev, "failed to allocate fifo locks\n");
+ priv->chan = kzalloc(sizeof(struct talitos_channel) *
+ priv->num_channels, GFP_KERNEL);
+ if (!priv->chan) {
+ dev_err(dev, "failed to allocate channel management space\n");
err = -ENOMEM;
goto err_out;
}
for (i = 0; i < priv->num_channels; i++) {
- spin_lock_init(&priv->head_lock[i]);
- spin_lock_init(&priv->tail_lock[i]);
- }
-
- priv->fifo = kmalloc(sizeof(struct talitos_request *) *
- priv->num_channels, GFP_KERNEL);
- if (!priv->fifo) {
- dev_err(dev, "failed to allocate request fifo\n");
- err = -ENOMEM;
- goto err_out;
+ spin_lock_init(&priv->chan[i].head_lock);
+ spin_lock_init(&priv->chan[i].tail_lock);
}
priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
for (i = 0; i < priv->num_channels; i++) {
- priv->fifo[i] = kzalloc(sizeof(struct talitos_request) *
- priv->fifo_len, GFP_KERNEL);
- if (!priv->fifo[i]) {
+ priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
+ priv->fifo_len, GFP_KERNEL);
+ if (!priv->chan[i].fifo) {
dev_err(dev, "failed to allocate request fifo %d\n", i);
err = -ENOMEM;
goto err_out;
}
}
- priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels,
- GFP_KERNEL);
- if (!priv->submit_count) {
- dev_err(dev, "failed to allocate fifo submit count space\n");
- err = -ENOMEM;
- goto err_out;
- }
for (i = 0; i < priv->num_channels; i++)
- atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1));
+ atomic_set(&priv->chan[i].submit_count,
+ -(priv->chfifo_len - 1));
- priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
- priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
- if (!priv->head || !priv->tail) {
- dev_err(dev, "failed to allocate request index space\n");
- err = -ENOMEM;
- goto err_out;
- }
+ dma_set_mask(dev, DMA_BIT_MASK(36));
/* reset and initialize the h/w */
err = init_device(dev);
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 575981f0cfda..ff5a1450e145 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -57,6 +57,7 @@
#define TALITOS_CCCR_RESET 0x1 /* channel reset */
#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */
+#define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */
#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 24c84ae81527..938100f14b16 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -568,35 +568,76 @@ const struct dmi_device * dmi_find_device(int type, const char *name,
EXPORT_SYMBOL(dmi_find_device);
/**
- * dmi_get_year - Return year of a DMI date
- * @field: data index (like dmi_get_system_info)
+ * dmi_get_date - parse a DMI date
+ * @field: data index (see enum dmi_field)
+ * @yearp: optional out parameter for the year
+ * @monthp: optional out parameter for the month
+ * @dayp: optional out parameter for the day
*
- * Returns -1 when the field doesn't exist. 0 when it is broken.
+ * The date field is assumed to be in the form resembling
+ * [mm[/dd]]/yy[yy] and the result is stored in the out
+ * parameters any or all of which can be omitted.
+ *
+ * If the field doesn't exist, all out parameters are set to zero
+ * and false is returned. Otherwise, true is returned with any
+ * invalid part of date set to zero.
+ *
+ * On return, year, month and day are guaranteed to be in the
+ * range of [0,9999], [0,12] and [0,31] respectively.
*/
-int dmi_get_year(int field)
+bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
{
- int year;
- const char *s = dmi_get_system_info(field);
+ int year = 0, month = 0, day = 0;
+ bool exists;
+ const char *s, *y;
+ char *e;
- if (!s)
- return -1;
- if (*s == '\0')
- return 0;
- s = strrchr(s, '/');
- if (!s)
- return 0;
+ s = dmi_get_system_info(field);
+ exists = s;
+ if (!exists)
+ goto out;
- s += 1;
- year = simple_strtoul(s, NULL, 0);
- if (year && year < 100) { /* 2-digit year */
+ /*
+ * Determine year first. We assume the date string resembles
+ * mm/dd/yy[yy] but the original code extracted only the year
+ * from the end. Keep the behavior in the spirit of no
+ * surprises.
+ */
+ y = strrchr(s, '/');
+ if (!y)
+ goto out;
+
+ y++;
+ year = simple_strtoul(y, &e, 10);
+ if (y != e && year < 100) { /* 2-digit year */
year += 1900;
if (year < 1996) /* no dates < spec 1.0 */
year += 100;
}
+ if (year > 9999) /* year should fit in %04d */
+ year = 0;
+
+ /* parse the mm and dd */
+ month = simple_strtoul(s, &e, 10);
+ if (s == e || *e != '/' || !month || month > 12) {
+ month = 0;
+ goto out;
+ }
- return year;
+ s = e + 1;
+ day = simple_strtoul(s, &e, 10);
+ if (s == y || s == e || *e != '/' || day > 31)
+ day = 0;
+out:
+ if (yearp)
+ *yearp = year;
+ if (monthp)
+ *monthp = month;
+ if (dayp)
+ *dayp = day;
+ return exists;
}
-EXPORT_SYMBOL(dmi_get_year);
+EXPORT_SYMBOL(dmi_get_date);
/**
* dmi_walk - Walk the DMI table and get called back for every record
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c07a755b3a3..80e5ba490dc2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2267,8 +2267,6 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
fence_list) {
old_obj = old_obj_priv->obj;
- reg = &dev_priv->fence_regs[old_obj_priv->fence_reg];
-
if (old_obj_priv->pin_count)
continue;
@@ -2290,8 +2288,11 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
*/
i915_gem_object_flush_gpu_write_domain(old_obj);
ret = i915_gem_object_wait_rendering(old_obj);
- if (ret != 0)
+ if (ret != 0) {
+ drm_gem_object_unreference(old_obj);
return ret;
+ }
+
break;
}
@@ -2299,10 +2300,14 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
* Zap this virtual mapping so we can set up a fence again
* for this object next time we need it.
*/
- i915_gem_release_mmap(reg->obj);
+ i915_gem_release_mmap(old_obj);
+
i = old_obj_priv->fence_reg;
+ reg = &dev_priv->fence_regs[i];
+
old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
list_del_init(&old_obj_priv->fence_list);
+
drm_gem_object_unreference(old_obj);
}
@@ -4227,15 +4232,11 @@ int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- int ret;
-
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- ret = i915_gem_idle(dev);
drm_irq_uninstall(dev);
-
- return ret;
+ return i915_gem_idle(dev);
}
void
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3fadb5358858..748ed50c55ca 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2005,7 +2005,21 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
return;
}
-const static int latency_ns = 3000; /* default for non-igd platforms */
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ * - memory configuration (speed, channels)
+ * - chipset
+ * - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value. It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+const static int latency_ns = 5000;
static int intel_get_fifo_size(struct drm_device *dev, int plane)
{
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f2afc4af4bc9..2b914d732076 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1263,7 +1263,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
if (IS_eDP(intel_output)) {
intel_output->crtc_mask = (1 << 1);
- intel_output->clone_mask = (1 << INTEL_OUTPUT_EDP);
+ intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
} else
intel_output->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = true;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 25aa6facc12d..26a6227c15fe 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -74,6 +74,7 @@
#define INTEL_LVDS_CLONE_BIT 14
#define INTEL_DVO_TMDS_CLONE_BIT 15
#define INTEL_DVO_LVDS_CLONE_BIT 16
+#define INTEL_EDP_CLONE_BIT 17
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 2fbe13a0de81..5b1c9e9fdba0 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1730,6 +1730,7 @@ intel_tv_init(struct drm_device *dev)
drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
tv_priv = (struct intel_tv_priv *)(intel_output + 1);
intel_output->type = INTEL_OUTPUT_TVOUT;
+ intel_output->crtc_mask = (1 << 0) | (1 << 1);
intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT);
intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1));
intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 053f4ec397f7..051bca6e3a4f 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -995,7 +995,7 @@ static const unsigned r300_reg_safe_bm[159] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x0003FC01, 0xFFFFFFF8, 0xFE800B19,
+ 0x0003FC01, 0xFFFFFCF8, 0xFF800B19,
};
static int r300_packet0_check(struct radeon_cs_parser *p,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 7ca6c13569b5..93d8f8889302 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -266,6 +266,7 @@ static struct radeon_asic rs400_asic = {
/*
* rs600.
*/
+int rs600_init(struct radeon_device *dev);
void rs600_errata(struct radeon_device *rdev);
void rs600_vram_info(struct radeon_device *rdev);
int rs600_mc_init(struct radeon_device *rdev);
@@ -281,7 +282,7 @@ uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
static struct radeon_asic rs600_asic = {
- .init = &r300_init,
+ .init = &rs600_init,
.errata = &rs600_errata,
.vram_info = &rs600_vram_info,
.gpu_reset = &r300_gpu_reset,
@@ -316,7 +317,6 @@ static struct radeon_asic rs600_asic = {
/*
* rs690,rs740
*/
-int rs690_init(struct radeon_device *rdev);
void rs690_errata(struct radeon_device *rdev);
void rs690_vram_info(struct radeon_device *rdev);
int rs690_mc_init(struct radeon_device *rdev);
@@ -325,7 +325,7 @@ uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs690_bandwidth_update(struct radeon_device *rdev);
static struct radeon_asic rs690_asic = {
- .init = &rs690_init,
+ .init = &rs600_init,
.errata = &rs690_errata,
.vram_info = &rs690_vram_info,
.gpu_reset = &r300_gpu_reset,
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 7e8ce983a908..02fd11aad6a2 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -409,3 +409,68 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
((reg) & RS600_MC_ADDR_MASK));
WREG32(RS600_MC_DATA, v);
}
+
+static const unsigned rs600_reg_safe_bm[219] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
+ 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
+ 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
+ 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
+ 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
+ 0x00000000, 0x0000C100, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0003FC01, 0xFFFFFCF8, 0xFF800B19, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+};
+
+int rs600_init(struct radeon_device *rdev)
+{
+ rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
+ rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index bc6b7c5339bc..879882533e45 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -653,67 +653,3 @@ void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);
}
-static const unsigned rs690_reg_safe_bm[219] = {
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0x17FF1FFF,0xFFFFFFFC,0xFFFFFFFF,0xFF30FFBF,
- 0xFFFFFFF8,0xC3E6FFFF,0xFFFFF6DF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFF03F,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFEFCE,0xF00EBFFF,0x007C0000,
- 0xF0000078,0xFF000009,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFF7FF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFC78,0xFFFFFFFF,0xFFFFFFFE,0xFFFFFFFF,
- 0x38FF8F50,0xFFF88082,0xF000000C,0xFAE009FF,
- 0x0000FFFF,0xFFFFFFFF,0xFFFFFFFF,0x00000000,
- 0x00000000,0x0000C100,0x00000000,0x00000000,
- 0x00000000,0x00000000,0x00000000,0x00000000,
- 0x00000000,0xFFFF0000,0xFFFFFFFF,0xFF80FFFF,
- 0x00000000,0x00000000,0x00000000,0x00000000,
- 0x0003FC01,0xFFFFFFF8,0xFE800B19,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
- 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
-};
-
-int rs690_init(struct radeon_device *rdev)
-{
- rdev->config.r300.reg_safe_bm = rs690_reg_safe_bm;
- rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs690_reg_safe_bm);
- return 0;
-}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 31a7f668ae5a..0566fb67e460 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -508,7 +508,7 @@ static const unsigned r500_reg_safe_bm[219] = {
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF,
+ 0x0003FC01, 0x3FFFFCF8, 0xFF800B19, 0xFFDFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
index 923cbfe259d3..6396c3ad3252 100644
--- a/drivers/ide/atiixp.c
+++ b/drivers/ide/atiixp.c
@@ -177,6 +177,7 @@ static const struct pci_device_id atiixp_pci_tbl[] = {
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), 0 },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), 1 },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), 0 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_SB900_IDE), 0 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl);
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 527908ff298c..063b933d864a 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -408,6 +408,7 @@ static struct pcmcia_device_id ide_ids[] = {
PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
+ PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 8f9509e1ebf7..55d093a36ae4 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -362,6 +362,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
* In either case, must tell the provider to reject.
*/
cm_id_priv->state = IW_CM_STATE_DESTROYING;
+ cm_id->device->iwcm->reject(cm_id, NULL, 0);
break;
case IW_CM_STATE_CONN_SENT:
case IW_CM_STATE_DESTROYING:
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index de922a04ca2d..7522008fda86 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2,6 +2,7 @@
* Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2009 HNR Consulting. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -45,14 +46,21 @@ MODULE_DESCRIPTION("kernel IB MAD API");
MODULE_AUTHOR("Hal Rosenstock");
MODULE_AUTHOR("Sean Hefty");
+int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
+int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
+
+module_param_named(send_queue_size, mad_sendq_size, int, 0444);
+MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
+module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
+MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
+
static struct kmem_cache *ib_mad_cache;
static struct list_head ib_mad_port_list;
static u32 ib_mad_client_id = 0;
/* Port list lock */
-static spinlock_t ib_mad_port_list_lock;
-
+static DEFINE_SPINLOCK(ib_mad_port_list_lock);
/* Forward declarations */
static int method_in_use(struct ib_mad_mgmt_method_table **method,
@@ -1974,7 +1982,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
unsigned long delay;
if (list_empty(&mad_agent_priv->wait_list)) {
- cancel_delayed_work(&mad_agent_priv->timed_work);
+ __cancel_delayed_work(&mad_agent_priv->timed_work);
} else {
mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
struct ib_mad_send_wr_private,
@@ -1983,7 +1991,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
if (time_after(mad_agent_priv->timeout,
mad_send_wr->timeout)) {
mad_agent_priv->timeout = mad_send_wr->timeout;
- cancel_delayed_work(&mad_agent_priv->timed_work);
+ __cancel_delayed_work(&mad_agent_priv->timed_work);
delay = mad_send_wr->timeout - jiffies;
if ((long)delay <= 0)
delay = 1;
@@ -2023,7 +2031,7 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
/* Reschedule a work item if we have a shorter timeout */
if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
- cancel_delayed_work(&mad_agent_priv->timed_work);
+ __cancel_delayed_work(&mad_agent_priv->timed_work);
queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
&mad_agent_priv->timed_work, delay);
}
@@ -2736,8 +2744,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
qp_init_attr.send_cq = qp_info->port_priv->cq;
qp_init_attr.recv_cq = qp_info->port_priv->cq;
qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
- qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
- qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
+ qp_init_attr.cap.max_send_wr = mad_sendq_size;
+ qp_init_attr.cap.max_recv_wr = mad_recvq_size;
qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
qp_init_attr.qp_type = qp_type;
@@ -2752,8 +2760,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
goto error;
}
/* Use minimum queue sizes unless the CQ is resized */
- qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
- qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
+ qp_info->send_queue.max_active = mad_sendq_size;
+ qp_info->recv_queue.max_active = mad_recvq_size;
return 0;
error:
@@ -2792,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device,
init_mad_qp(port_priv, &port_priv->qp_info[0]);
init_mad_qp(port_priv, &port_priv->qp_info[1]);
- cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
+ cq_size = (mad_sendq_size + mad_recvq_size) * 2;
port_priv->cq = ib_create_cq(port_priv->device,
ib_mad_thread_completion_handler,
NULL, port_priv, cq_size, 0);
@@ -2984,7 +2992,11 @@ static int __init ib_mad_init_module(void)
{
int ret;
- spin_lock_init(&ib_mad_port_list_lock);
+ mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
+ mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
+
+ mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
+ mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
ib_mad_cache = kmem_cache_create("ib_mad",
sizeof(struct ib_mad_private),
@@ -3021,4 +3033,3 @@ static void __exit ib_mad_cleanup_module(void)
module_init(ib_mad_init_module);
module_exit(ib_mad_cleanup_module);
-
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 05ce331733b0..9430ab4969c5 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -2,6 +2,7 @@
* Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2009 HNR Consulting. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -49,6 +50,8 @@
/* QP and CQ parameters */
#define IB_MAD_QP_SEND_SIZE 128
#define IB_MAD_QP_RECV_SIZE 512
+#define IB_MAD_QP_MIN_SIZE 64
+#define IB_MAD_QP_MAX_SIZE 8192
#define IB_MAD_SEND_REQ_MAX_SG 2
#define IB_MAD_RECV_REQ_MAX_SG 1
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 107f170c57cd..8d82ba171353 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -106,6 +106,8 @@ struct mcast_group {
struct ib_sa_query *query;
int query_id;
u16 pkey_index;
+ u8 leave_state;
+ int retries;
};
struct mcast_member {
@@ -350,6 +352,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
rec = group->rec;
rec.join_state = leave_state;
+ group->leave_state = leave_state;
ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
port->port_num, IB_SA_METHOD_DELETE, &rec,
@@ -542,7 +545,11 @@ static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
{
struct mcast_group *group = context;
- mcast_work_handler(&group->work);
+ if (status && group->retries > 0 &&
+ !send_leave(group, group->leave_state))
+ group->retries--;
+ else
+ mcast_work_handler(&group->work);
}
static struct mcast_group *acquire_group(struct mcast_port *port,
@@ -565,6 +572,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
if (!group)
return NULL;
+ group->retries = 3;
group->port = port;
group->rec.mgid = *mgid;
group->pkey_index = MCAST_INVALID_PKEY_INDEX;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1865049e80f7..82543716d59e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -109,10 +109,10 @@ static struct ib_client sa_client = {
.remove = ib_sa_remove_one
};
-static spinlock_t idr_lock;
+static DEFINE_SPINLOCK(idr_lock);
static DEFINE_IDR(query_idr);
-static spinlock_t tid_lock;
+static DEFINE_SPINLOCK(tid_lock);
static u32 tid;
#define PATH_REC_FIELD(field) \
@@ -1077,9 +1077,6 @@ static int __init ib_sa_init(void)
{
int ret;
- spin_lock_init(&idr_lock);
- spin_lock_init(&tid_lock);
-
get_random_bytes(&tid, sizeof tid);
ret = ib_register_client(&sa_client);
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 87236753bce9..5855e4405d9b 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -52,6 +52,10 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
hop_cnt = smp->hop_cnt;
/* See section 14.2.2.2, Vol 1 IB spec */
+ /* C14-6 -- valid hop_cnt values are from 0 to 63 */
+ if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
+ return IB_SMI_DISCARD;
+
if (!ib_get_smp_direction(smp)) {
/* C14-9:1 */
if (hop_cnt && hop_ptr == 0) {
@@ -133,6 +137,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
hop_cnt = smp->hop_cnt;
/* See section 14.2.2.2, Vol 1 IB spec */
+ /* C14-6 -- valid hop_cnt values are from 0 to 63 */
+ if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
+ return IB_SMI_DISCARD;
+
if (!ib_get_smp_direction(smp)) {
/* C14-9:1 -- sender should have incremented hop_ptr */
if (hop_cnt && hop_ptr == 0)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index eb36a81dd09b..d3fff9e008a3 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -73,7 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr);
DEFINE_IDR(ib_uverbs_qp_idr);
DEFINE_IDR(ib_uverbs_srq_idr);
-static spinlock_t map_lock;
+static DEFINE_SPINLOCK(map_lock);
static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -584,14 +584,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (hdr.command < 0 ||
hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
- !uverbs_cmd_table[hdr.command] ||
- !(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
+ !uverbs_cmd_table[hdr.command])
return -EINVAL;
if (!file->ucontext &&
hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT)
return -EINVAL;
+ if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
+ return -ENOSYS;
+
return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr,
hdr.in_words * 4, hdr.out_words * 4);
}
@@ -836,8 +838,6 @@ static int __init ib_uverbs_init(void)
{
int ret;
- spin_lock_init(&map_lock);
-
ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
"infiniband_verbs");
if (ret) {
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 0cfbb6d2f762..8250740c94b0 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -86,11 +86,7 @@ MODULE_DEVICE_TABLE(pci, c2_pci_table);
static void c2_print_macaddr(struct net_device *netdev)
{
- pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, "
- "IRQ %u\n", netdev->name,
- netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
- netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
- netdev->irq);
+ pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq);
}
static void c2_set_rxbufsize(struct c2_port *c2_port)
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index f1948fad85d7..ad723bd8bf49 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -780,11 +780,11 @@ int c2_register_device(struct c2_dev *dev)
/* Register pseudo network device */
dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
if (!dev->pseudo_netdev)
- goto out3;
+ goto out;
ret = register_netdev(dev->pseudo_netdev);
if (ret)
- goto out2;
+ goto out_free_netdev;
pr_debug("%s:%u\n", __func__, __LINE__);
strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
@@ -851,6 +851,10 @@ int c2_register_device(struct c2_dev *dev)
dev->ibdev.post_recv = c2_post_receive;
dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
+ if (dev->ibdev.iwcm == NULL) {
+ ret = -ENOMEM;
+ goto out_unregister_netdev;
+ }
dev->ibdev.iwcm->add_ref = c2_add_ref;
dev->ibdev.iwcm->rem_ref = c2_rem_ref;
dev->ibdev.iwcm->get_qp = c2_get_qp;
@@ -862,23 +866,25 @@ int c2_register_device(struct c2_dev *dev)
ret = ib_register_device(&dev->ibdev);
if (ret)
- goto out1;
+ goto out_free_iwcm;
for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
ret = device_create_file(&dev->ibdev.dev,
c2_dev_attributes[i]);
if (ret)
- goto out0;
+ goto out_unregister_ibdev;
}
- goto out3;
+ goto out;
-out0:
+out_unregister_ibdev:
ib_unregister_device(&dev->ibdev);
-out1:
+out_free_iwcm:
+ kfree(dev->ibdev.iwcm);
+out_unregister_netdev:
unregister_netdev(dev->pseudo_netdev);
-out2:
+out_free_netdev:
free_netdev(dev->pseudo_netdev);
-out3:
+out:
pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
return ret;
}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 62f9cf2f94ec..72ed3396b721 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -852,7 +852,9 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
wqe->qpcaps = attr->qpcaps;
wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
wqe->rqe_count = cpu_to_be16(attr->rqe_count);
- wqe->flags_rtr_type = cpu_to_be16(attr->flags|V_RTR_TYPE(attr->rtr_type));
+ wqe->flags_rtr_type = cpu_to_be16(attr->flags |
+ V_RTR_TYPE(attr->rtr_type) |
+ V_CHAN(attr->chan));
wqe->ord = cpu_to_be32(attr->ord);
wqe->ird = cpu_to_be32(attr->ird);
wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
@@ -1032,6 +1034,7 @@ err3:
err2:
cxio_hal_destroy_ctrl_qp(rdev_p);
err1:
+ rdev_p->t3cdev_p->ulp = NULL;
list_del(&rdev_p->entry);
return err;
}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 32e3b1461d81..a197a5b7ac7f 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -327,6 +327,11 @@ enum rdma_init_rtr_types {
#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
+#define S_CHAN 4
+#define M_CHAN 0x3
+#define V_CHAN(x) ((x) << S_CHAN)
+#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
+
struct t3_rdma_init_attr {
u32 tid;
u32 qpid;
@@ -346,6 +351,7 @@ struct t3_rdma_init_attr {
u16 flags;
u16 rqe_count;
u32 irs;
+ u32 chan;
};
struct t3_rdma_init_wr {
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 26fc0a4eaa74..b0ea0105ddf6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -51,7 +51,7 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
static void open_rnic_dev(struct t3cdev *);
static void close_rnic_dev(struct t3cdev *);
-static void iwch_err_handler(struct t3cdev *, u32, u32);
+static void iwch_event_handler(struct t3cdev *, u32, u32);
struct cxgb3_client t3c_client = {
.name = "iw_cxgb3",
@@ -59,7 +59,7 @@ struct cxgb3_client t3c_client = {
.remove = close_rnic_dev,
.handlers = t3c_handlers,
.redirect = iwch_ep_redirect,
- .err_handler = iwch_err_handler
+ .event_handler = iwch_event_handler
};
static LIST_HEAD(dev_list);
@@ -105,11 +105,9 @@ static void rnic_init(struct iwch_dev *rnicp)
static void open_rnic_dev(struct t3cdev *tdev)
{
struct iwch_dev *rnicp;
- static int vers_printed;
PDBG("%s t3cdev %p\n", __func__, tdev);
- if (!vers_printed++)
- printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
+ printk_once(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
DRV_VERSION);
rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
if (!rnicp) {
@@ -162,21 +160,36 @@ static void close_rnic_dev(struct t3cdev *tdev)
mutex_unlock(&dev_mutex);
}
-static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error)
+static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
{
struct cxio_rdev *rdev = tdev->ulp;
- struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev);
+ struct iwch_dev *rnicp;
struct ib_event event;
+ u32 portnum = port_id + 1;
- if (status == OFFLOAD_STATUS_DOWN) {
+ if (!rdev)
+ return;
+ rnicp = rdev_to_iwch_dev(rdev);
+ switch (evt) {
+ case OFFLOAD_STATUS_DOWN: {
rdev->flags = CXIO_ERROR_FATAL;
-
- event.device = &rnicp->ibdev;
event.event = IB_EVENT_DEVICE_FATAL;
- event.element.port_num = 0;
- ib_dispatch_event(&event);
+ break;
+ }
+ case OFFLOAD_PORT_DOWN: {
+ event.event = IB_EVENT_PORT_ERR;
+ break;
+ }
+ case OFFLOAD_PORT_UP: {
+ event.event = IB_EVENT_PORT_ACTIVE;
+ break;
+ }
}
+ event.device = &rnicp->ibdev;
+ event.element.port_num = portnum;
+ ib_dispatch_event(&event);
+
return;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 52d7bb0c2a12..66b41351910a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -286,7 +286,7 @@ void __free_ep(struct kref *kref)
ep = container_of(container_of(kref, struct iwch_ep_common, kref),
struct iwch_ep, com);
PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
- if (ep->com.flags & RELEASE_RESOURCES) {
+ if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
dst_release(ep->dst);
l2t_release(L2DATA(ep->com.tdev), ep->l2t);
@@ -297,7 +297,7 @@ void __free_ep(struct kref *kref)
static void release_ep_resources(struct iwch_ep *ep)
{
PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- ep->com.flags |= RELEASE_RESOURCES;
+ set_bit(RELEASE_RESOURCES, &ep->com.flags);
put_ep(&ep->com);
}
@@ -786,10 +786,12 @@ static void connect_request_upcall(struct iwch_ep *ep)
event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
event.provider_data = ep;
- if (state_read(&ep->parent_ep->com) != DEAD)
+ if (state_read(&ep->parent_ep->com) != DEAD) {
+ get_ep(&ep->com);
ep->parent_ep->com.cm_id->event_handler(
ep->parent_ep->com.cm_id,
&event);
+ }
put_ep(&ep->parent_ep->com);
ep->parent_ep = NULL;
}
@@ -1156,8 +1158,7 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
* We get 2 abort replies from the HW. The first one must
* be ignored except for scribbling that we need one more.
*/
- if (!(ep->com.flags & ABORT_REQ_IN_PROGRESS)) {
- ep->com.flags |= ABORT_REQ_IN_PROGRESS;
+ if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
return CPL_RET_BUF_DONE;
}
@@ -1477,10 +1478,14 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
/*
* We're gonna mark this puppy DEAD, but keep
* the reference on it until the ULP accepts or
- * rejects the CR.
+ * rejects the CR. Also wake up anyone waiting
+ * in rdma connection migration (see iwch_accept_cr()).
*/
__state_set(&ep->com, CLOSING);
- get_ep(&ep->com);
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p\n", ep);
+ wake_up(&ep->com.waitq);
break;
case MPA_REP_SENT:
__state_set(&ep->com, CLOSING);
@@ -1561,8 +1566,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
* We get 2 peer aborts from the HW. The first one must
* be ignored except for scribbling that we need one more.
*/
- if (!(ep->com.flags & PEER_ABORT_IN_PROGRESS)) {
- ep->com.flags |= PEER_ABORT_IN_PROGRESS;
+ if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
return CPL_RET_BUF_DONE;
}
@@ -1589,9 +1593,13 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
/*
* We're gonna mark this puppy DEAD, but keep
* the reference on it until the ULP accepts or
- * rejects the CR.
+ * rejects the CR. Also wake up anyone waiting
+ * in rdma connection migration (see iwch_accept_cr()).
*/
- get_ep(&ep->com);
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p\n", ep);
+ wake_up(&ep->com.waitq);
break;
case MORIBUND:
case CLOSING:
@@ -1797,6 +1805,7 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
err = send_mpa_reject(ep, pdata, pdata_len);
err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
}
+ put_ep(&ep->com);
return 0;
}
@@ -1810,8 +1819,10 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- if (state_read(&ep->com) == DEAD)
- return -ECONNRESET;
+ if (state_read(&ep->com) == DEAD) {
+ err = -ECONNRESET;
+ goto err;
+ }
BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
BUG_ON(!qp);
@@ -1819,15 +1830,14 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
(conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
abort_connection(ep, NULL, GFP_KERNEL);
- return -EINVAL;
+ err = -EINVAL;
+ goto err;
}
cm_id->add_ref(cm_id);
ep->com.cm_id = cm_id;
ep->com.qp = qp;
- ep->com.rpl_done = 0;
- ep->com.rpl_err = 0;
ep->ird = conn_param->ird;
ep->ord = conn_param->ord;
@@ -1836,8 +1846,6 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
- get_ep(&ep->com);
-
/* bind QP to EP and move to RTS */
attrs.mpa_attr = ep->mpa_attr;
attrs.max_ird = ep->ird;
@@ -1855,30 +1863,31 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = iwch_modify_qp(ep->com.qp->rhp,
ep->com.qp, mask, &attrs, 1);
if (err)
- goto err;
+ goto err1;
/* if needed, wait for wr_ack */
if (iwch_rqes_posted(qp)) {
wait_event(ep->com.waitq, ep->com.rpl_done);
err = ep->com.rpl_err;
if (err)
- goto err;
+ goto err1;
}
err = send_mpa_reply(ep, conn_param->private_data,
conn_param->private_data_len);
if (err)
- goto err;
+ goto err1;
state_set(&ep->com, FPDU_MODE);
established_upcall(ep);
put_ep(&ep->com);
return 0;
-err:
+err1:
ep->com.cm_id = NULL;
ep->com.qp = NULL;
cm_id->rem_ref(cm_id);
+err:
put_ep(&ep->com);
return err;
}
@@ -2097,14 +2106,17 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
ep->com.state = CLOSING;
start_ep_timer(ep);
}
+ set_bit(CLOSE_SENT, &ep->com.flags);
break;
case CLOSING:
- close = 1;
- if (abrupt) {
- stop_ep_timer(ep);
- ep->com.state = ABORTING;
- } else
- ep->com.state = MORIBUND;
+ if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
+ close = 1;
+ if (abrupt) {
+ stop_ep_timer(ep);
+ ep->com.state = ABORTING;
+ } else
+ ep->com.state = MORIBUND;
+ }
break;
case MORIBUND:
case ABORTING:
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 43c0aea7eadc..b9efadfffb4f 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -145,9 +145,10 @@ enum iwch_ep_state {
};
enum iwch_ep_flags {
- PEER_ABORT_IN_PROGRESS = (1 << 0),
- ABORT_REQ_IN_PROGRESS = (1 << 1),
- RELEASE_RESOURCES = (1 << 2),
+ PEER_ABORT_IN_PROGRESS = 0,
+ ABORT_REQ_IN_PROGRESS = 1,
+ RELEASE_RESOURCES = 2,
+ CLOSE_SENT = 3,
};
struct iwch_ep_common {
@@ -162,7 +163,7 @@ struct iwch_ep_common {
wait_queue_head_t waitq;
int rpl_done;
int rpl_err;
- u32 flags;
+ unsigned long flags;
};
struct iwch_listen_ep {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index ec49a5cbdebb..e1ec65ebb016 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -39,7 +39,7 @@
#include "iwch.h"
#include "iwch_provider.h"
-static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
+static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
{
u32 mmid;
@@ -47,14 +47,15 @@ static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
mhp->attr.stag = stag;
mmid = stag >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
+ return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
}
int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
struct iwch_mr *mhp, int shift)
{
u32 stag;
+ int ret;
if (cxio_register_phys_mem(&rhp->rdev,
&stag, mhp->attr.pdid,
@@ -66,9 +67,11 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
mhp->attr.pbl_size, mhp->attr.pbl_addr))
return -ENOMEM;
- iwch_finish_mem_reg(mhp, stag);
-
- return 0;
+ ret = iwch_finish_mem_reg(mhp, stag);
+ if (ret)
+ cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+ return ret;
}
int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
@@ -77,6 +80,7 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
int npages)
{
u32 stag;
+ int ret;
/* We could support this... */
if (npages > mhp->attr.pbl_size)
@@ -93,9 +97,12 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
mhp->attr.pbl_size, mhp->attr.pbl_addr))
return -ENOMEM;
- iwch_finish_mem_reg(mhp, stag);
+ ret = iwch_finish_mem_reg(mhp, stag);
+ if (ret)
+ cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
- return 0;
+ return ret;
}
int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index e2a63214008a..6895523779d0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -195,7 +195,11 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
spin_lock_init(&chp->lock);
atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
- insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
+ if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
+ cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
+ kfree(chp);
+ return ERR_PTR(-ENOMEM);
+ }
if (ucontext) {
struct iwch_mm_entry *mm;
@@ -750,7 +754,11 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
mhp->attr.stag = stag;
mmid = (stag) >> 8;
mhp->ibmw.rkey = stag;
- insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+ if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+ cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
+ kfree(mhp);
+ return ERR_PTR(-ENOMEM);
+ }
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
return &(mhp->ibmw);
}
@@ -778,37 +786,43 @@ static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
struct iwch_mr *mhp;
u32 mmid;
u32 stag = 0;
- int ret;
+ int ret = 0;
php = to_iwch_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
- return ERR_PTR(-ENOMEM);
+ goto err;
mhp->rhp = rhp;
ret = iwch_alloc_pbl(mhp, pbl_depth);
- if (ret) {
- kfree(mhp);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto err1;
mhp->attr.pbl_size = pbl_depth;
ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
mhp->attr.pbl_size, mhp->attr.pbl_addr);
- if (ret) {
- iwch_free_pbl(mhp);
- kfree(mhp);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto err2;
mhp->attr.pdid = php->pdid;
mhp->attr.type = TPT_NON_SHARED_MR;
mhp->attr.stag = stag;
mhp->attr.state = 1;
mmid = (stag) >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+ if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
+ goto err3;
+
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
return &(mhp->ibmr);
+err3:
+ cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+err2:
+ iwch_free_pbl(mhp);
+err1:
+ kfree(mhp);
+err:
+ return ERR_PTR(ret);
}
static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
@@ -961,7 +975,13 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
spin_lock_init(&qhp->lock);
init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1);
- insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid);
+
+ if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
+ cxio_destroy_qp(&rhp->rdev, &qhp->wq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ kfree(qhp);
+ return ERR_PTR(-ENOMEM);
+ }
if (udata) {
@@ -1418,6 +1438,7 @@ int iwch_register_device(struct iwch_dev *dev)
bail2:
ib_unregister_device(&dev->ibdev);
bail1:
+ kfree(dev->ibdev.iwcm);
return ret;
}
@@ -1430,5 +1451,6 @@ void iwch_unregister_device(struct iwch_dev *dev)
device_remove_file(&dev->ibdev.dev,
iwch_class_attributes[i]);
ib_unregister_device(&dev->ibdev);
+ kfree(dev->ibdev.iwcm);
return;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 27bbdc8e773a..6e8653471941 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -889,6 +889,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
init_attr.rqe_count = iwch_rqes_posted(qhp);
init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
+ init_attr.chan = qhp->ep->l2t->smt_idx;
if (peer2peer) {
init_attr.rtr_type = RTR_READ;
if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index fab18a2c74a8..5b635aa5947e 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
#include "ehca_tools.h"
#include "hcp_if.h"
-#define HCAD_VERSION "0028"
+#define HCAD_VERSION "0029"
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -64,7 +64,7 @@ static int ehca_hw_level = 0;
static int ehca_poll_all_eqs = 1;
int ehca_debug_level = 0;
-int ehca_nr_ports = 2;
+int ehca_nr_ports = -1;
int ehca_use_hp_mr = 0;
int ehca_port_act_time = 30;
int ehca_static_rate = -1;
@@ -95,8 +95,8 @@ MODULE_PARM_DESC(hw_level,
"Hardware level (0: autosensing (default), "
"0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
MODULE_PARM_DESC(nr_ports,
- "number of connected ports (-1: autodetect, 1: port one only, "
- "2: two ports (default)");
+ "number of connected ports (-1: autodetect (default), "
+ "1: port one only, 2: two ports)");
MODULE_PARM_DESC(use_hp_mr,
"Use high performance MRs (default: no)");
MODULE_PARM_DESC(port_act_time,
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 5a3d96f84c79..8fd88cd828fd 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -786,7 +786,11 @@ repoll:
wc->slid = cqe->rlid;
wc->dlid_path_bits = cqe->dlid;
wc->src_qp = cqe->remote_qp_number;
- wc->wc_flags = cqe->w_completion_flags;
+ /*
+ * HW has "Immed data present" and "GRH present" in bits 6 and 5.
+ * SW defines those in bits 1 and 0, so we can just shift and mask.
+ */
+ wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
wc->sl = cqe->service_level;
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index c568b28f4e20..8c1213f8916a 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -125,14 +125,30 @@ struct ib_perf {
u8 data[192];
} __attribute__ ((packed));
+/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
+struct tcslfl {
+ u32 tc:8;
+ u32 sl:4;
+ u32 fl:20;
+} __attribute__ ((packed));
+
+/* IP Version/TC/FL packed into 32 bits, as in GRH */
+struct vertcfl {
+ u32 ver:4;
+ u32 tc:8;
+ u32 fl:20;
+} __attribute__ ((packed));
static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
struct ib_perf *in_perf = (struct ib_perf *)in_mad;
struct ib_perf *out_perf = (struct ib_perf *)out_mad;
struct ib_class_port_info *poi =
(struct ib_class_port_info *)out_perf->data;
+ struct tcslfl *tcslfl =
+ (struct tcslfl *)&poi->redirect_tcslfl;
struct ehca_shca *shca =
container_of(ibdev, struct ehca_shca, ib_device);
struct ehca_sport *sport = &shca->sport[port_num - 1];
@@ -158,10 +174,29 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
poi->base_version = 1;
poi->class_version = 1;
poi->resp_time_value = 18;
- poi->redirect_lid = sport->saved_attr.lid;
- poi->redirect_qp = sport->pma_qp_nr;
+
+ /* copy local routing information from WC where applicable */
+ tcslfl->sl = in_wc->sl;
+ poi->redirect_lid =
+ sport->saved_attr.lid | in_wc->dlid_path_bits;
+ poi->redirect_qp = sport->pma_qp_nr;
poi->redirect_qkey = IB_QP1_QKEY;
- poi->redirect_pkey = IB_DEFAULT_PKEY_FULL;
+
+ ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
+ &poi->redirect_pkey);
+
+ /* if request was globally routed, copy route info */
+ if (in_grh) {
+ struct vertcfl *vertcfl =
+ (struct vertcfl *)&in_grh->version_tclass_flow;
+ memcpy(poi->redirect_gid, in_grh->dgid.raw,
+ sizeof(poi->redirect_gid));
+ tcslfl->tc = vertcfl->tc;
+ tcslfl->fl = vertcfl->fl;
+ } else
+ /* else only fill in default GID */
+ ehca_query_gid(ibdev, port_num, 0,
+ (union ib_gid *)&poi->redirect_gid);
ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
sport->saved_attr.lid, sport->pma_qp_nr);
@@ -183,8 +218,7 @@ perf_reply:
int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad,
- struct ib_mad *out_mad)
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
{
int ret;
@@ -196,7 +230,8 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_SUCCESS;
ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
- ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad);
+ ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
+ in_mad, out_mad);
return ret;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 23173982b32c..38a287006612 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1616,7 +1616,7 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
pd->port_cnt = 1;
port_fp(fp) = pd;
pd->port_pid = get_pid(task_pid(current));
- strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
+ strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
ipath_stats.sps_ports++;
ret = 0;
} else
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 16a702d46018..ceb98ee78666 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -60,7 +60,7 @@ static int recv_subn_get_nodedescription(struct ib_smp *smp,
if (smp->attr_mod)
smp->status |= IB_SMP_INVALID_FIELD;
- strncpy(smp->data, ibdev->node_desc, sizeof(smp->data));
+ memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
return reply(smp);
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ae3d7590346e..3cb3f47a10b8 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -342,6 +342,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx4_ib_alloc_ucontext_resp resp;
int err;
+ if (!dev->ib_active)
+ return ERR_PTR(-EAGAIN);
+
resp.qp_tab_size = dev->dev->caps.num_qps;
resp.bf_reg_size = dev->dev->caps.bf_reg_size;
resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
@@ -540,15 +543,11 @@ static struct device_attribute *mlx4_class_attributes[] = {
static void *mlx4_ib_add(struct mlx4_dev *dev)
{
- static int mlx4_ib_version_printed;
struct mlx4_ib_dev *ibdev;
int num_ports = 0;
int i;
- if (!mlx4_ib_version_printed) {
- printk(KERN_INFO "%s", mlx4_ib_version);
- ++mlx4_ib_version_printed;
- }
+ printk_once(KERN_INFO "%s", mlx4_ib_version);
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
num_ports++;
@@ -673,6 +672,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_reg;
}
+ ibdev->ib_active = true;
+
return ibdev;
err_reg:
@@ -729,6 +730,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
break;
case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
+ ibdev->ib_active = false;
ibev.event = IB_EVENT_DEVICE_FATAL;
break;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 8a7dd6795fa0..3486d7675e56 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -175,6 +175,7 @@ struct mlx4_ib_dev {
spinlock_t sm_lock;
struct mutex cap_mask_mutex;
+ bool ib_active;
};
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index c4a02648c8af..219b10397b4d 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -615,10 +615,12 @@ static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
}
static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
+ __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{
- if (send_cq == recv_cq)
+ if (send_cq == recv_cq) {
spin_lock_irq(&send_cq->lock);
- else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+ __acquire(&recv_cq->lock);
+ } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_lock_irq(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
@@ -628,10 +630,12 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
}
static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
+ __releases(&send_cq->lock) __releases(&recv_cq->lock)
{
- if (send_cq == recv_cq)
+ if (send_cq == recv_cq) {
+ __release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
- else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+ } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
} else {
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 65ad359fdf16..056b2a4c6970 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -88,6 +88,7 @@ static void handle_catas(struct mthca_dev *dev)
event.device = &dev->ib_dev;
event.event = IB_EVENT_DEVICE_FATAL;
event.element.port_num = 0;
+ dev->active = false;
ib_dispatch_event(&event);
diff --git a/drivers/infiniband/hw/mthca/mthca_config_reg.h b/drivers/infiniband/hw/mthca/mthca_config_reg.h
index 75671f75cac4..155bc66395be 100644
--- a/drivers/infiniband/hw/mthca/mthca_config_reg.h
+++ b/drivers/infiniband/hw/mthca/mthca_config_reg.h
@@ -34,8 +34,6 @@
#ifndef MTHCA_CONFIG_REG_H
#define MTHCA_CONFIG_REG_H
-#include <asm/page.h>
-
#define MTHCA_HCR_BASE 0x80680
#define MTHCA_HCR_SIZE 0x0001c
#define MTHCA_ECR_BASE 0x80700
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 9ef611f6dd36..7e6a6d64ad4e 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -357,6 +357,7 @@ struct mthca_dev {
struct ib_ah *sm_ah[MTHCA_MAX_PORTS];
spinlock_t sm_lock;
u8 rate[MTHCA_MAX_PORTS];
+ bool active;
};
#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 90e4e450a120..8c31fa36e95e 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -829,27 +829,34 @@ int mthca_init_eq_table(struct mthca_dev *dev)
if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
static const char *eq_name[] = {
- [MTHCA_EQ_COMP] = DRV_NAME " (comp)",
- [MTHCA_EQ_ASYNC] = DRV_NAME " (async)",
- [MTHCA_EQ_CMD] = DRV_NAME " (cmd)"
+ [MTHCA_EQ_COMP] = DRV_NAME "-comp",
+ [MTHCA_EQ_ASYNC] = DRV_NAME "-async",
+ [MTHCA_EQ_CMD] = DRV_NAME "-cmd"
};
for (i = 0; i < MTHCA_NUM_EQ; ++i) {
+ snprintf(dev->eq_table.eq[i].irq_name,
+ IB_DEVICE_NAME_MAX,
+ "%s@pci:%s", eq_name[i],
+ pci_name(dev->pdev));
err = request_irq(dev->eq_table.eq[i].msi_x_vector,
mthca_is_memfree(dev) ?
mthca_arbel_msi_x_interrupt :
mthca_tavor_msi_x_interrupt,
- 0, eq_name[i], dev->eq_table.eq + i);
+ 0, dev->eq_table.eq[i].irq_name,
+ dev->eq_table.eq + i);
if (err)
goto err_out_cmd;
dev->eq_table.eq[i].have_irq = 1;
}
} else {
+ snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
+ DRV_NAME "@pci:%s", pci_name(dev->pdev));
err = request_irq(dev->pdev->irq,
mthca_is_memfree(dev) ?
mthca_arbel_interrupt :
mthca_tavor_interrupt,
- IRQF_SHARED, DRV_NAME, dev);
+ IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
if (err)
goto err_out_cmd;
dev->eq_table.have_irq = 1;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 13da9f1d24c0..b01b28987874 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -1116,6 +1116,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
pci_set_drvdata(pdev, mdev);
mdev->hca_type = hca_type;
+ mdev->active = true;
+
return 0;
err_unregister:
@@ -1215,15 +1217,11 @@ int __mthca_restart_one(struct pci_dev *pdev)
static int __devinit mthca_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- static int mthca_version_printed = 0;
int ret;
mutex_lock(&mthca_device_mutex);
- if (!mthca_version_printed) {
- printk(KERN_INFO "%s", mthca_version);
- ++mthca_version_printed;
- }
+ printk_once(KERN_INFO "%s", mthca_version);
if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 87ad889e367b..bcf7a4014820 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -334,6 +334,9 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
struct mthca_ucontext *context;
int err;
+ if (!(to_mdev(ibdev)->active))
+ return ERR_PTR(-EAGAIN);
+
memset(&uresp, 0, sizeof uresp);
uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index c621f8794b88..90f4c4d2e983 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -113,6 +113,7 @@ struct mthca_eq {
int nent;
struct mthca_buf_list *page_list;
struct mthca_mr mr;
+ char irq_name[IB_DEVICE_NAME_MAX];
};
struct mthca_av;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index f5081bfde6db..c10576fa60c1 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1319,10 +1319,12 @@ int mthca_alloc_qp(struct mthca_dev *dev,
}
static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
+ __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{
- if (send_cq == recv_cq)
+ if (send_cq == recv_cq) {
spin_lock_irq(&send_cq->lock);
- else if (send_cq->cqn < recv_cq->cqn) {
+ __acquire(&recv_cq->lock);
+ } else if (send_cq->cqn < recv_cq->cqn) {
spin_lock_irq(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
@@ -1332,10 +1334,12 @@ static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
}
static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
+ __releases(&send_cq->lock) __releases(&recv_cq->lock)
{
- if (send_cq == recv_cq)
+ if (send_cq == recv_cq) {
+ __release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
- else if (send_cq->cqn < recv_cq->cqn) {
+ } else if (send_cq->cqn < recv_cq->cqn) {
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
} else {
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index acb6817f6060..2a13a163d337 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index bf1720f7f35f..bcc6abc4faff 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -523,7 +523,7 @@ int nes_cm_disconn(struct nes_qp *);
void nes_cm_disconn_worker(void *);
/* nes_verbs.c */
-int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32);
+int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32);
int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
struct nes_ib_device *nes_init_ofa_device(struct net_device *);
void nes_destroy_ofa_device(struct nes_ib_device *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 114b802771ad..73473db19863 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2450,19 +2450,16 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod
*/
int nes_cm_disconn(struct nes_qp *nesqp)
{
- unsigned long flags;
-
- spin_lock_irqsave(&nesqp->lock, flags);
- if (nesqp->disconn_pending == 0) {
- nesqp->disconn_pending++;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- /* init our disconnect work element, to */
- INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
+ struct disconn_work *work;
- queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work);
- } else
- spin_unlock_irqrestore(&nesqp->lock, flags);
+ work = kzalloc(sizeof *work, GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM; /* Timer will clean up */
+ nes_add_ref(&nesqp->ibqp);
+ work->nesqp = nesqp;
+ INIT_WORK(&work->work, nes_disconnect_worker);
+ queue_work(g_cm_core->disconn_wq, &work->work);
return 0;
}
@@ -2472,11 +2469,14 @@ int nes_cm_disconn(struct nes_qp *nesqp)
*/
static void nes_disconnect_worker(struct work_struct *work)
{
- struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work);
+ struct disconn_work *dwork = container_of(work, struct disconn_work, work);
+ struct nes_qp *nesqp = dwork->nesqp;
+ kfree(dwork);
nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
nesqp->last_aeq, nesqp->hwqp.qp_id);
nes_cm_disconn_true(nesqp);
+ nes_rem_ref(&nesqp->ibqp);
}
@@ -2493,7 +2493,12 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
u16 last_ae;
u8 original_hw_tcp_state;
u8 original_ibqp_state;
- u8 issued_disconnect_reset = 0;
+ enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK;
+ int issue_disconn = 0;
+ int issue_close = 0;
+ int issue_flush = 0;
+ u32 flush_q = NES_CQP_FLUSH_RQ;
+ struct ib_event ibevent;
if (!nesqp) {
nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n");
@@ -2517,24 +2522,55 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
original_ibqp_state = nesqp->ibqp_state;
last_ae = nesqp->last_aeq;
+ if (nesqp->term_flags) {
+ issue_disconn = 1;
+ issue_close = 1;
+ nesqp->cm_id = NULL;
+ if (nesqp->flush_issued == 0) {
+ nesqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ } else if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
+ ((original_ibqp_state == IB_QPS_RTS) &&
+ (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ issue_disconn = 1;
+ if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET)
+ disconn_status = IW_CM_EVENT_STATUS_RESET;
+ }
+
+ if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
+ (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
+ (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
+ (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ issue_close = 1;
+ nesqp->cm_id = NULL;
+ if (nesqp->flush_issued == 0) {
+ nesqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state);
+ if ((issue_flush) && (nesqp->destroyed == 0)) {
+ /* Flush the queue(s) */
+ if (nesqp->hw_iwarp_state >= NES_AEQE_IWARP_STATE_TERMINATE)
+ flush_q |= NES_CQP_FLUSH_SQ;
+ flush_wqes(nesvnic->nesdev, nesqp, flush_q, 1);
- if ((nesqp->cm_id) && (cm_id->event_handler)) {
- if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
- ((original_ibqp_state == IB_QPS_RTS) &&
- (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ if (nesqp->term_flags) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.event = nesqp->terminate_eventtype;
+ ibevent.element.qp = &nesqp->ibqp;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ }
+
+ if ((cm_id) && (cm_id->event_handler)) {
+ if (issue_disconn) {
atomic_inc(&cm_disconnects);
cm_event.event = IW_CM_EVENT_DISCONNECT;
- if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) {
- cm_event.status = IW_CM_EVENT_STATUS_RESET;
- nes_debug(NES_DBG_CM, "Generating a CM "
- "Disconnect Event (status reset) for "
- "QP%u, cm_id = %p. \n",
- nesqp->hwqp.qp_id, cm_id);
- } else
- cm_event.status = IW_CM_EVENT_STATUS_OK;
-
+ cm_event.status = disconn_status;
cm_event.local_addr = cm_id->local_addr;
cm_event.remote_addr = cm_id->remote_addr;
cm_event.private_data = NULL;
@@ -2547,29 +2583,14 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
nesqp->hwqp.sq_tail, cm_id,
atomic_read(&nesqp->refcount));
- spin_unlock_irqrestore(&nesqp->lock, flags);
ret = cm_id->event_handler(cm_id, &cm_event);
if (ret)
nes_debug(NES_DBG_CM, "OFA CM event_handler "
"returned, ret=%d\n", ret);
- spin_lock_irqsave(&nesqp->lock, flags);
}
- nesqp->disconn_pending = 0;
- /* There might have been another AE while the lock was released */
- original_hw_tcp_state = nesqp->hw_tcp_state;
- original_ibqp_state = nesqp->ibqp_state;
- last_ae = nesqp->last_aeq;
-
- if ((issued_disconnect_reset == 0) && (nesqp->cm_id) &&
- ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
- (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
- (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
- (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ if (issue_close) {
atomic_inc(&cm_closes);
- nesqp->cm_id = NULL;
- nesqp->in_disconnect = 0;
- spin_unlock_irqrestore(&nesqp->lock, flags);
nes_disconnect(nesqp, 1);
cm_id->provider_data = nesqp;
@@ -2588,28 +2609,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
}
cm_id->rem_ref(cm_id);
-
- spin_lock_irqsave(&nesqp->lock, flags);
- if (nesqp->flush_issued == 0) {
- nesqp->flush_issued = 1;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- flush_wqes(nesvnic->nesdev, nesqp,
- NES_CQP_FLUSH_RQ, 1);
- } else
- spin_unlock_irqrestore(&nesqp->lock, flags);
- } else {
- cm_id = nesqp->cm_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- /* check to see if the inbound reset beat the outbound reset */
- if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
- nes_debug(NES_DBG_CM, "QP%u: Decing refcount "
- "due to inbound reset beating the "
- "outbound reset.\n", nesqp->hwqp.qp_id);
- }
}
- } else {
- nesqp->disconn_pending = 0;
- spin_unlock_irqrestore(&nesqp->lock, flags);
}
return 0;
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index 8b7e7c0e496e..90e8e4d8a5ce 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -410,8 +410,6 @@ struct nes_cm_ops {
int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
enum nes_timer_type, int, int);
-int nes_cm_disconn(struct nes_qp *);
-
int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
int nes_reject(struct iw_cm_id *, const void *, u8);
int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 4a84d02ece06..63a1a8e1e8a3 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -74,6 +74,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
static void process_critical_error(struct nes_device *nesdev);
static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
+static void nes_terminate_timeout(unsigned long context);
+static void nes_terminate_start_timer(struct nes_qp *nesqp);
#ifdef CONFIG_INFINIBAND_NES_DEBUG
static unsigned char *nes_iwarp_state_str[] = {
@@ -2903,6 +2905,417 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
}
+static u8 *locate_mpa(u8 *pkt, u32 aeq_info)
+{
+ u16 pkt_len;
+
+ if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) {
+ /* skip over ethernet header */
+ pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2));
+ pkt += ETH_HLEN;
+
+ /* Skip over IP and TCP headers */
+ pkt += 4 * (pkt[0] & 0x0f);
+ pkt += 4 * ((pkt[12] >> 4) & 0x0f);
+ }
+ return pkt;
+}
+
+/* Determine if incoming error pkt is rdma layer */
+static u32 iwarp_opcode(struct nes_qp *nesqp, u32 aeq_info)
+{
+ u8 *pkt;
+ u16 *mpa;
+ u32 opcode = 0xffffffff;
+
+ if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
+ pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
+ mpa = (u16 *)locate_mpa(pkt, aeq_info);
+ opcode = be16_to_cpu(mpa[1]) & 0xf;
+ }
+
+ return opcode;
+}
+
+/* Build iWARP terminate header */
+static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 aeq_info)
+{
+ u8 *pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
+ u16 ddp_seg_len;
+ int copy_len = 0;
+ u8 is_tagged = 0;
+ u8 flush_code = 0;
+ struct nes_terminate_hdr *termhdr;
+
+ termhdr = (struct nes_terminate_hdr *)nesqp->hwqp.q2_vbase;
+ memset(termhdr, 0, 64);
+
+ if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
+
+ /* Use data from offending packet to fill in ddp & rdma hdrs */
+ pkt = locate_mpa(pkt, aeq_info);
+ ddp_seg_len = be16_to_cpu(*(u16 *)pkt);
+ if (ddp_seg_len) {
+ copy_len = 2;
+ termhdr->hdrct = DDP_LEN_FLAG;
+ if (pkt[2] & 0x80) {
+ is_tagged = 1;
+ if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
+ copy_len += TERM_DDP_LEN_TAGGED;
+ termhdr->hdrct |= DDP_HDR_FLAG;
+ }
+ } else {
+ if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
+ copy_len += TERM_DDP_LEN_UNTAGGED;
+ termhdr->hdrct |= DDP_HDR_FLAG;
+ }
+
+ if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
+ if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
+ copy_len += TERM_RDMA_LEN;
+ termhdr->hdrct |= RDMA_HDR_FLAG;
+ }
+ }
+ }
+ }
+ }
+
+ switch (async_event_id) {
+ case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
+ switch (iwarp_opcode(nesqp, aeq_info)) {
+ case IWARP_OPCODE_WRITE:
+ flush_code = IB_WC_LOC_PROT_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+ termhdr->error_code = DDP_TAGGED_INV_STAG;
+ break;
+ default:
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_INV_STAG;
+ }
+ break;
+ case NES_AEQE_AEID_AMP_INVALID_STAG:
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_INV_STAG;
+ break;
+ case NES_AEQE_AEID_AMP_BAD_QP:
+ flush_code = IB_WC_LOC_QP_OP_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+ termhdr->error_code = DDP_UNTAGGED_INV_QN;
+ break;
+ case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
+ case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
+ switch (iwarp_opcode(nesqp, aeq_info)) {
+ case IWARP_OPCODE_SEND_INV:
+ case IWARP_OPCODE_SEND_SE_INV:
+ flush_code = IB_WC_REM_OP_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+ termhdr->error_code = RDMAP_CANT_INV_STAG;
+ break;
+ default:
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_INV_STAG;
+ }
+ break;
+ case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
+ if (aeq_info & (NES_AEQE_Q2_DATA_ETHERNET | NES_AEQE_Q2_DATA_MPA)) {
+ flush_code = IB_WC_LOC_PROT_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+ termhdr->error_code = DDP_TAGGED_BOUNDS;
+ } else {
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_INV_BOUNDS;
+ }
+ break;
+ case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
+ case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_ACCESS;
+ break;
+ case NES_AEQE_AEID_AMP_TO_WRAP:
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_TO_WRAP;
+ break;
+ case NES_AEQE_AEID_AMP_BAD_PD:
+ switch (iwarp_opcode(nesqp, aeq_info)) {
+ case IWARP_OPCODE_WRITE:
+ flush_code = IB_WC_LOC_PROT_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+ termhdr->error_code = DDP_TAGGED_UNASSOC_STAG;
+ break;
+ case IWARP_OPCODE_SEND_INV:
+ case IWARP_OPCODE_SEND_SE_INV:
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_CANT_INV_STAG;
+ break;
+ default:
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_UNASSOC_STAG;
+ }
+ break;
+ case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
+ flush_code = IB_WC_LOC_LEN_ERR;
+ termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
+ termhdr->error_code = MPA_MARKER;
+ break;
+ case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
+ flush_code = IB_WC_GENERAL_ERR;
+ termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
+ termhdr->error_code = MPA_CRC;
+ break;
+ case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
+ case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
+ flush_code = IB_WC_LOC_LEN_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
+ termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
+ break;
+ case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
+ case NES_AEQE_AEID_DDP_NO_L_BIT:
+ flush_code = IB_WC_FATAL_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
+ termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
+ break;
+ case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
+ case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
+ flush_code = IB_WC_GENERAL_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+ termhdr->error_code = DDP_UNTAGGED_INV_MSN_RANGE;
+ break;
+ case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ flush_code = IB_WC_LOC_LEN_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+ termhdr->error_code = DDP_UNTAGGED_INV_TOO_LONG;
+ break;
+ case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
+ flush_code = IB_WC_GENERAL_ERR;
+ if (is_tagged) {
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+ termhdr->error_code = DDP_TAGGED_INV_DDP_VER;
+ } else {
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+ termhdr->error_code = DDP_UNTAGGED_INV_DDP_VER;
+ }
+ break;
+ case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
+ flush_code = IB_WC_GENERAL_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+ termhdr->error_code = DDP_UNTAGGED_INV_MO;
+ break;
+ case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+ flush_code = IB_WC_REM_OP_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+ termhdr->error_code = DDP_UNTAGGED_INV_MSN_NO_BUF;
+ break;
+ case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
+ flush_code = IB_WC_GENERAL_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+ termhdr->error_code = DDP_UNTAGGED_INV_QN;
+ break;
+ case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ flush_code = IB_WC_GENERAL_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+ termhdr->error_code = RDMAP_INV_RDMAP_VER;
+ break;
+ case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
+ flush_code = IB_WC_LOC_QP_OP_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+ termhdr->error_code = RDMAP_UNEXPECTED_OP;
+ break;
+ default:
+ flush_code = IB_WC_FATAL_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+ termhdr->error_code = RDMAP_UNSPECIFIED;
+ break;
+ }
+
+ if (copy_len)
+ memcpy(termhdr + 1, pkt, copy_len);
+
+ if ((flush_code) && ((NES_AEQE_INBOUND_RDMA & aeq_info) == 0)) {
+ if (aeq_info & NES_AEQE_SQ)
+ nesqp->term_sq_flush_code = flush_code;
+ else
+ nesqp->term_rq_flush_code = flush_code;
+ }
+
+ return sizeof(struct nes_terminate_hdr) + copy_len;
+}
+
+static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *nesqp,
+ struct nes_hw_aeqe *aeqe, enum ib_event_type eventtype)
+{
+ u64 context;
+ unsigned long flags;
+ u32 aeq_info;
+ u16 async_event_id;
+ u8 tcp_state;
+ u8 iwarp_state;
+ u32 termlen = 0;
+ u32 mod_qp_flags = NES_CQP_QP_IWARP_STATE_TERMINATE |
+ NES_CQP_QP_TERM_DONT_SEND_FIN;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+
+ if (nesqp->term_flags & NES_TERM_SENT)
+ return; /* Sanity check */
+
+ aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+ tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
+ iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
+ async_event_id = (u16)aeq_info;
+
+ context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
+ aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
+ if (!context) {
+ WARN_ON(!context);
+ return;
+ }
+
+ nesqp = (struct nes_qp *)(unsigned long)context;
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ nesqp->terminate_eventtype = eventtype;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+
+ if (nesadapter->send_term_ok)
+ termlen = nes_bld_terminate_hdr(nesqp, async_event_id, aeq_info);
+ else
+ mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG;
+
+ nes_terminate_start_timer(nesqp);
+ nesqp->term_flags |= NES_TERM_SENT;
+ nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
+}
+
+static void nes_terminate_send_fin(struct nes_device *nesdev,
+ struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
+{
+ u32 aeq_info;
+ u16 async_event_id;
+ u8 tcp_state;
+ u8 iwarp_state;
+ unsigned long flags;
+
+ aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+ tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
+ iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
+ async_event_id = (u16)aeq_info;
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+
+ /* Send the fin only */
+ nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_TERMINATE |
+ NES_CQP_QP_TERM_DONT_SEND_TERM_MSG, 0, 0);
+}
+
+/* Cleanup after a terminate sent or received */
+static void nes_terminate_done(struct nes_qp *nesqp, int timeout_occurred)
+{
+ u32 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
+ unsigned long flags;
+ struct nes_vnic *nesvnic = to_nesvnic(nesqp->ibqp.device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ u8 first_time = 0;
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+ if (nesqp->hte_added) {
+ nesqp->hte_added = 0;
+ next_iwarp_state |= NES_CQP_QP_DEL_HTE;
+ }
+
+ first_time = (nesqp->term_flags & NES_TERM_DONE) == 0;
+ nesqp->term_flags |= NES_TERM_DONE;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+
+ /* Make sure we go through this only once */
+ if (first_time) {
+ if (timeout_occurred == 0)
+ del_timer(&nesqp->terminate_timer);
+ else
+ next_iwarp_state |= NES_CQP_QP_RESET;
+
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+ nes_cm_disconn(nesqp);
+ }
+}
+
+static void nes_terminate_received(struct nes_device *nesdev,
+ struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
+{
+ u32 aeq_info;
+ u8 *pkt;
+ u32 *mpa;
+ u8 ddp_ctl;
+ u8 rdma_ctl;
+ u16 aeq_id = 0;
+
+ aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+ if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
+ /* Terminate is not a performance path so the silicon */
+ /* did not validate the frame - do it now */
+ pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
+ mpa = (u32 *)locate_mpa(pkt, aeq_info);
+ ddp_ctl = (be32_to_cpu(mpa[0]) >> 8) & 0xff;
+ rdma_ctl = be32_to_cpu(mpa[0]) & 0xff;
+ if ((ddp_ctl & 0xc0) != 0x40)
+ aeq_id = NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC;
+ else if ((ddp_ctl & 0x03) != 1)
+ aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION;
+ else if (be32_to_cpu(mpa[2]) != 2)
+ aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_QN;
+ else if (be32_to_cpu(mpa[3]) != 1)
+ aeq_id = NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN;
+ else if (be32_to_cpu(mpa[4]) != 0)
+ aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_MO;
+ else if ((rdma_ctl & 0xc0) != 0x40)
+ aeq_id = NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION;
+
+ if (aeq_id) {
+ /* Bad terminate recvd - send back a terminate */
+ aeq_info = (aeq_info & 0xffff0000) | aeq_id;
+ aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
+ nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
+ return;
+ }
+ }
+
+ nesqp->term_flags |= NES_TERM_RCVD;
+ nesqp->terminate_eventtype = IB_EVENT_QP_FATAL;
+ nes_terminate_start_timer(nesqp);
+ nes_terminate_send_fin(nesdev, nesqp, aeqe);
+}
+
+/* Timeout routine in case terminate fails to complete */
+static void nes_terminate_timeout(unsigned long context)
+{
+ struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
+
+ nes_terminate_done(nesqp, 1);
+}
+
+/* Set a timer in case hw cannot complete the terminate sequence */
+static void nes_terminate_start_timer(struct nes_qp *nesqp)
+{
+ init_timer(&nesqp->terminate_timer);
+ nesqp->terminate_timer.function = nes_terminate_timeout;
+ nesqp->terminate_timer.expires = jiffies + HZ;
+ nesqp->terminate_timer.data = (unsigned long)nesqp;
+ add_timer(&nesqp->terminate_timer);
+}
+
/**
* nes_process_iwarp_aeqe
*/
@@ -2910,28 +3323,27 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
struct nes_hw_aeqe *aeqe)
{
u64 context;
- u64 aeqe_context = 0;
unsigned long flags;
struct nes_qp *nesqp;
+ struct nes_hw_cq *hw_cq;
+ struct nes_cq *nescq;
int resource_allocated;
- /* struct iw_cm_id *cm_id; */
struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct ib_event ibevent;
- /* struct iw_cm_event cm_event; */
u32 aeq_info;
u32 next_iwarp_state = 0;
u16 async_event_id;
u8 tcp_state;
u8 iwarp_state;
+ int must_disconn = 1;
+ int must_terminate = 0;
+ struct ib_event ibevent;
nes_debug(NES_DBG_AEQ, "\n");
aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
- if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) {
+ if ((NES_AEQE_INBOUND_RDMA & aeq_info) || (!(NES_AEQE_QP & aeq_info))) {
context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
} else {
- aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
- aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
BUG_ON(!context);
@@ -2948,7 +3360,11 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
switch (async_event_id) {
case NES_AEQE_AEID_LLP_FIN_RECEIVED:
- nesqp = *((struct nes_qp **)&context);
+ nesqp = (struct nes_qp *)(unsigned long)context;
+
+ if (nesqp->term_flags)
+ return; /* Ignore it, wait for close complete */
+
if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
nesqp->cm_id->add_ref(nesqp->cm_id);
schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
@@ -2959,18 +3375,24 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
async_event_id, nesqp->last_aeq, tcp_state);
}
+
if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
(nesqp->ibqp_state != IB_QPS_RTS)) {
/* FIN Received but tcp state or IB state moved on,
should expect a close complete */
return;
}
+
case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
+ nesqp = (struct nes_qp *)(unsigned long)context;
+ if (nesqp->term_flags) {
+ nes_terminate_done(nesqp, 0);
+ return;
+ }
+
case NES_AEQE_AEID_LLP_CONNECTION_RESET:
- case NES_AEQE_AEID_TERMINATE_SENT:
- case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
case NES_AEQE_AEID_RESET_SENT:
- nesqp = *((struct nes_qp **)&context);
+ nesqp = (struct nes_qp *)(unsigned long)context;
if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
tcp_state = NES_AEQE_TCP_STATE_CLOSED;
}
@@ -2982,12 +3404,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
(tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
nesqp->hte_added = 0;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u to remove hte\n",
- nesqp->hwqp.qp_id);
- nes_hw_modify_qp(nesdev, nesqp,
- NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE, 0);
- spin_lock_irqsave(&nesqp->lock, flags);
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
}
if ((nesqp->ibqp_state == IB_QPS_RTS) &&
@@ -2999,151 +3416,106 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
break;
case NES_AEQE_IWARP_STATE_TERMINATE:
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
- nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
- if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
- next_iwarp_state |= 0x02000000;
- nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
- }
+ must_disconn = 0; /* terminate path takes care of disconn */
+ if (nesqp->term_flags == 0)
+ must_terminate = 1;
break;
- default:
- next_iwarp_state = 0;
- }
- spin_unlock_irqrestore(&nesqp->lock, flags);
- if (next_iwarp_state) {
- nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
- " also added another reference\n",
- nesqp->hwqp.qp_id, next_iwarp_state);
- nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
}
- nes_cm_disconn(nesqp);
} else {
if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) {
/* FIN Received but ib state not RTS,
close complete will be on its way */
- spin_unlock_irqrestore(&nesqp->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&nesqp->lock, flags);
- if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000;
- nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
- nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
- " also added another reference\n",
- nesqp->hwqp.qp_id, next_iwarp_state);
- nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
+ must_disconn = 0;
}
- nes_cm_disconn(nesqp);
}
- break;
- case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
- nesqp = *((struct nes_qp **)&context);
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TERMINATE_RECEIVED"
- " event on QP%u \n Q2 Data:\n",
- nesqp->hwqp.qp_id);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_FATAL;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
- if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
- ((nesqp->ibqp_state == IB_QPS_RTS)&&
- (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+
+ if (must_terminate)
+ nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
+ else if (must_disconn) {
+ if (next_iwarp_state) {
+ nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n",
+ nesqp->hwqp.qp_id, next_iwarp_state);
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+ }
nes_cm_disconn(nesqp);
- } else {
- nesqp->in_disconnect = 0;
- wake_up(&nesqp->kick_waitq);
}
break;
- case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
- nesqp = *((struct nes_qp **)&context);
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR;
- nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
- nesqp->last_aeq = async_event_id;
- if (nesqp->cm_id) {
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
- " event on QP%u, remote IP = 0x%08X \n",
- nesqp->hwqp.qp_id,
- ntohl(nesqp->cm_id->remote_addr.sin_addr.s_addr));
- } else {
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
- " event on QP%u \n",
- nesqp->hwqp.qp_id);
- }
- spin_unlock_irqrestore(&nesqp->lock, flags);
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_RESET;
- nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_FATAL;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
+
+ case NES_AEQE_AEID_TERMINATE_SENT:
+ nesqp = (struct nes_qp *)(unsigned long)context;
+ nes_terminate_send_fin(nesdev, nesqp, aeqe);
break;
- case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
- if (NES_AEQE_INBOUND_RDMA&aeq_info) {
- nesqp = nesadapter->qp_table[le32_to_cpu(
- aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
- } else {
- /* TODO: get the actual WQE and mask off wqe index */
- context &= ~((u64)511);
- nesqp = *((struct nes_qp **)&context);
- }
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_BAD_STAG_INDEX event on QP%u\n",
- nesqp->hwqp.qp_id);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_ACCESS_ERR;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
+
+ case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
+ nesqp = (struct nes_qp *)(unsigned long)context;
+ nes_terminate_received(nesdev, nesqp, aeqe);
break;
+
+ case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
+ case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
- nesqp = *((struct nes_qp **)&context);
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_UNALLOCATED_STAG event on QP%u\n",
- nesqp->hwqp.qp_id);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_ACCESS_ERR;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
- break;
+ case NES_AEQE_AEID_AMP_INVALID_STAG:
+ case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
+ case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
- nesqp = nesadapter->qp_table[le32_to_cpu(aeqe->aeqe_words
- [NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_PRIV_OPERATION_DENIED event on QP%u,"
- " nesqp = %p, AE reported %p\n",
- nesqp->hwqp.qp_id, nesqp, *((struct nes_qp **)&context));
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_ACCESS_ERR;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
+ case NES_AEQE_AEID_AMP_TO_WRAP:
+ nesqp = (struct nes_qp *)(unsigned long)context;
+ nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR);
+ break;
+
+ case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
+ case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
+ case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
+ case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
+ nesqp = (struct nes_qp *)(unsigned long)context;
+ if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) {
+ aeq_info &= 0xffff0000;
+ aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
+ aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
}
+
+ case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
+ case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
+ case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+ case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
+ case NES_AEQE_AEID_AMP_BAD_QP:
+ case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
+ case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
+ case NES_AEQE_AEID_DDP_NO_L_BIT:
+ case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
+ case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
+ case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
+ case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case NES_AEQE_AEID_AMP_BAD_PD:
+ case NES_AEQE_AEID_AMP_FASTREG_SHARED:
+ case NES_AEQE_AEID_AMP_FASTREG_VALID_STAG:
+ case NES_AEQE_AEID_AMP_FASTREG_MW_STAG:
+ case NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS:
+ case NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW:
+ case NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH:
+ case NES_AEQE_AEID_AMP_INVALIDATE_SHARED:
+ case NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS:
+ case NES_AEQE_AEID_AMP_MWBIND_VALID_STAG:
+ case NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG:
+ case NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG:
+ case NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG:
+ case NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS:
+ case NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS:
+ case NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT:
+ case NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED:
+ case NES_AEQE_AEID_BAD_CLOSE:
+ case NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO:
+ case NES_AEQE_AEID_STAG_ZERO_INVALID:
+ case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST:
+ case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ nesqp = (struct nes_qp *)(unsigned long)context;
+ nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
break;
+
case NES_AEQE_AEID_CQ_OPERATION_ERROR:
context <<= 1;
nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n",
@@ -3153,83 +3525,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
if (resource_allocated) {
printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
__func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
+ hw_cq = (struct nes_hw_cq *)(unsigned long)context;
+ if (hw_cq) {
+ nescq = container_of(hw_cq, struct nes_cq, hw_cq);
+ if (nescq->ibcq.event_handler) {
+ ibevent.device = nescq->ibcq.device;
+ ibevent.event = IB_EVENT_CQ_ERR;
+ ibevent.element.cq = &nescq->ibcq;
+ nescq->ibcq.event_handler(&ibevent, nescq->ibcq.cq_context);
+ }
+ }
}
break;
- case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
- nesqp = nesadapter->qp_table[le32_to_cpu(
- aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG"
- "_FOR_AVAILABLE_BUFFER event on QP%u\n",
- nesqp->hwqp.qp_id);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_ACCESS_ERR;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
- /* tell cm to disconnect, cm will queue work to thread */
- nes_cm_disconn(nesqp);
- break;
- case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
- nesqp = *((struct nes_qp **)&context);
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_INVALID_MSN"
- "_NO_BUFFER_AVAILABLE event on QP%u\n",
- nesqp->hwqp.qp_id);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_FATAL;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
- /* tell cm to disconnect, cm will queue work to thread */
- nes_cm_disconn(nesqp);
- break;
- case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
- nesqp = *((struct nes_qp **)&context);
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR"
- " event on QP%u \n Q2 Data:\n",
- nesqp->hwqp.qp_id);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_FATAL;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
- /* tell cm to disconnect, cm will queue work to thread */
- nes_cm_disconn(nesqp);
- break;
- /* TODO: additional AEs need to be here */
- case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
- nesqp = *((struct nes_qp **)&context);
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_ACCESS_ERR;
- nesqp->ibqp.event_handler(&ibevent,
- nesqp->ibqp.qp_context);
- }
- nes_cm_disconn(nesqp);
- break;
+
default:
nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n",
async_event_id);
@@ -3238,7 +3546,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
}
-
/**
* nes_iwarp_ce_handler
*/
@@ -3373,6 +3680,8 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
{
struct nes_cqp_request *cqp_request;
struct nes_hw_cqp_wqe *cqp_wqe;
+ u32 sq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
+ u32 rq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
int ret;
cqp_request = nes_get_cqp_request(nesdev);
@@ -3389,6 +3698,24 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
cqp_wqe = &cqp_request->cqp_wqe;
nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ /* If wqe in error was identified, set code to be put into cqe */
+ if ((nesqp->term_sq_flush_code) && (which_wq & NES_CQP_FLUSH_SQ)) {
+ which_wq |= NES_CQP_FLUSH_MAJ_MIN;
+ sq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_sq_flush_code;
+ nesqp->term_sq_flush_code = 0;
+ }
+
+ if ((nesqp->term_rq_flush_code) && (which_wq & NES_CQP_FLUSH_RQ)) {
+ which_wq |= NES_CQP_FLUSH_MAJ_MIN;
+ rq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_rq_flush_code;
+ nesqp->term_rq_flush_code = 0;
+ }
+
+ if (which_wq & NES_CQP_FLUSH_MAJ_MIN) {
+ cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_SQ_CODE] = cpu_to_le32(sq_code);
+ cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_RQ_CODE] = cpu_to_le32(rq_code);
+ }
+
cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq);
cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id);
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index c3654c6383fe..f28a41ba9fa1 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -241,6 +241,7 @@ enum nes_cqp_stag_wqeword_idx {
};
#define NES_CQP_OP_IWARP_STATE_SHIFT 28
+#define NES_CQP_OP_TERMLEN_SHIFT 28
enum nes_cqp_qp_bits {
NES_CQP_QP_ARP_VALID = (1<<8),
@@ -265,12 +266,16 @@ enum nes_cqp_qp_bits {
NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT),
NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT),
NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT),
+ NES_CQP_QP_TERM_DONT_SEND_FIN = (1<<24),
+ NES_CQP_QP_TERM_DONT_SEND_TERM_MSG = (1<<25),
NES_CQP_QP_RESET = (1<<31),
};
enum nes_cqp_qp_wqe_word_idx {
NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6,
NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7,
+ NES_CQP_QP_WQE_FLUSH_SQ_CODE = 8,
+ NES_CQP_QP_WQE_FLUSH_RQ_CODE = 9,
NES_CQP_QP_WQE_NEW_MSS_IDX = 15,
};
@@ -361,6 +366,7 @@ enum nes_cqp_arp_bits {
enum nes_cqp_flush_bits {
NES_CQP_FLUSH_SQ = (1<<30),
NES_CQP_FLUSH_RQ = (1<<31),
+ NES_CQP_FLUSH_MAJ_MIN = (1<<28),
};
enum nes_cqe_opcode_bits {
@@ -633,11 +639,14 @@ enum nes_aeqe_bits {
NES_AEQE_INBOUND_RDMA = (1<<19),
NES_AEQE_IWARP_STATE_MASK = (7<<20),
NES_AEQE_TCP_STATE_MASK = (0xf<<24),
+ NES_AEQE_Q2_DATA_WRITTEN = (0x3<<28),
NES_AEQE_VALID = (1<<31),
};
#define NES_AEQE_IWARP_STATE_SHIFT 20
#define NES_AEQE_TCP_STATE_SHIFT 24
+#define NES_AEQE_Q2_DATA_ETHERNET (1<<28)
+#define NES_AEQE_Q2_DATA_MPA (1<<29)
enum nes_aeqe_iwarp_state {
NES_AEQE_IWARP_STATE_NON_EXISTANT = 0,
@@ -751,6 +760,15 @@ enum nes_iwarp_sq_wqe_bits {
NES_IWARP_SQ_OP_NOP = 12,
};
+enum nes_iwarp_cqe_major_code {
+ NES_IWARP_CQE_MAJOR_FLUSH = 1,
+ NES_IWARP_CQE_MAJOR_DRV = 0x8000
+};
+
+enum nes_iwarp_cqe_minor_code {
+ NES_IWARP_CQE_MINOR_FLUSH = 1
+};
+
#define NES_EEPROM_READ_REQUEST (1<<16)
#define NES_MAC_ADDR_VALID (1<<20)
@@ -1119,6 +1137,7 @@ struct nes_adapter {
u8 netdev_max; /* from host nic address count in EEPROM */
u8 port_count;
u8 virtwq;
+ u8 send_term_ok;
u8 et_use_adaptive_rx_coalesce;
u8 adapter_fcn_count;
u8 pft_mcast_map[NES_PFT_SIZE];
@@ -1217,6 +1236,90 @@ struct nes_ib_device {
u32 num_pd;
};
+enum nes_hdrct_flags {
+ DDP_LEN_FLAG = 0x80,
+ DDP_HDR_FLAG = 0x40,
+ RDMA_HDR_FLAG = 0x20
+};
+
+enum nes_term_layers {
+ LAYER_RDMA = 0,
+ LAYER_DDP = 1,
+ LAYER_MPA = 2
+};
+
+enum nes_term_error_types {
+ RDMAP_CATASTROPHIC = 0,
+ RDMAP_REMOTE_PROT = 1,
+ RDMAP_REMOTE_OP = 2,
+ DDP_CATASTROPHIC = 0,
+ DDP_TAGGED_BUFFER = 1,
+ DDP_UNTAGGED_BUFFER = 2,
+ DDP_LLP = 3
+};
+
+enum nes_term_rdma_errors {
+ RDMAP_INV_STAG = 0x00,
+ RDMAP_INV_BOUNDS = 0x01,
+ RDMAP_ACCESS = 0x02,
+ RDMAP_UNASSOC_STAG = 0x03,
+ RDMAP_TO_WRAP = 0x04,
+ RDMAP_INV_RDMAP_VER = 0x05,
+ RDMAP_UNEXPECTED_OP = 0x06,
+ RDMAP_CATASTROPHIC_LOCAL = 0x07,
+ RDMAP_CATASTROPHIC_GLOBAL = 0x08,
+ RDMAP_CANT_INV_STAG = 0x09,
+ RDMAP_UNSPECIFIED = 0xff
+};
+
+enum nes_term_ddp_errors {
+ DDP_CATASTROPHIC_LOCAL = 0x00,
+ DDP_TAGGED_INV_STAG = 0x00,
+ DDP_TAGGED_BOUNDS = 0x01,
+ DDP_TAGGED_UNASSOC_STAG = 0x02,
+ DDP_TAGGED_TO_WRAP = 0x03,
+ DDP_TAGGED_INV_DDP_VER = 0x04,
+ DDP_UNTAGGED_INV_QN = 0x01,
+ DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
+ DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
+ DDP_UNTAGGED_INV_MO = 0x04,
+ DDP_UNTAGGED_INV_TOO_LONG = 0x05,
+ DDP_UNTAGGED_INV_DDP_VER = 0x06
+};
+
+enum nes_term_mpa_errors {
+ MPA_CLOSED = 0x01,
+ MPA_CRC = 0x02,
+ MPA_MARKER = 0x03,
+ MPA_REQ_RSP = 0x04,
+};
+
+struct nes_terminate_hdr {
+ u8 layer_etype;
+ u8 error_code;
+ u8 hdrct;
+ u8 rsvd;
+};
+
+/* Used to determine how to fill in terminate error codes */
+#define IWARP_OPCODE_WRITE 0
+#define IWARP_OPCODE_READREQ 1
+#define IWARP_OPCODE_READRSP 2
+#define IWARP_OPCODE_SEND 3
+#define IWARP_OPCODE_SEND_INV 4
+#define IWARP_OPCODE_SEND_SE 5
+#define IWARP_OPCODE_SEND_SE_INV 6
+#define IWARP_OPCODE_TERM 7
+
+/* These values are used only during terminate processing */
+#define TERM_DDP_LEN_TAGGED 14
+#define TERM_DDP_LEN_UNTAGGED 18
+#define TERM_RDMA_LEN 28
+#define RDMA_OPCODE_MASK 0x0f
+#define RDMA_READ_REQ_OPCODE 1
+#define BAD_FRAME_OFFSET 64
+#define CQE_MAJOR_DRV 0x8000
+
#define nes_vlan_rx vlan_hwaccel_receive_skb
#define nes_netif_rx netif_receive_skb
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index a282031d15c7..9687c397ce1a 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -183,6 +183,9 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada
} else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) {
nesadapter->virtwq = 1;
}
+ if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3))
+ nesadapter->send_term_ok = 1;
+
nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
(u32)((u8)eeprom_data);
@@ -548,7 +551,7 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
}
if (cqp_request == NULL) {
- cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL);
+ cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_ATOMIC);
if (cqp_request) {
cqp_request->dynamic = 1;
INIT_LIST_HEAD(&cqp_request->list);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 21e0fd336cf7..a680c42d6e8c 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -667,15 +667,32 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
*/
static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
{
+ struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+ struct net_device *netdev = nesvnic->netdev;
+
memset(props, 0, sizeof(*props));
- props->max_mtu = IB_MTU_2048;
- props->active_mtu = IB_MTU_2048;
+ props->max_mtu = IB_MTU_4096;
+
+ if (netdev->mtu >= 4096)
+ props->active_mtu = IB_MTU_4096;
+ else if (netdev->mtu >= 2048)
+ props->active_mtu = IB_MTU_2048;
+ else if (netdev->mtu >= 1024)
+ props->active_mtu = IB_MTU_1024;
+ else if (netdev->mtu >= 512)
+ props->active_mtu = IB_MTU_512;
+ else
+ props->active_mtu = IB_MTU_256;
+
props->lid = 1;
props->lmc = 0;
props->sm_lid = 0;
props->sm_sl = 0;
- props->state = IB_PORT_ACTIVE;
+ if (nesvnic->linkup)
+ props->state = IB_PORT_ACTIVE;
+ else
+ props->state = IB_PORT_DOWN;
props->phys_state = 0;
props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
@@ -1506,12 +1523,45 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
/**
+ * nes_clean_cq
+ */
+static void nes_clean_cq(struct nes_qp *nesqp, struct nes_cq *nescq)
+{
+ u32 cq_head;
+ u32 lo;
+ u32 hi;
+ u64 u64temp;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&nescq->lock, flags);
+
+ cq_head = nescq->hw_cq.cq_head;
+ while (le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
+ rmb();
+ lo = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+ hi = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]);
+ u64temp = (((u64)hi) << 32) | ((u64)lo);
+ u64temp &= ~(NES_SW_CONTEXT_ALIGN-1);
+ if (u64temp == (u64)(unsigned long)nesqp) {
+ /* Zero the context value so cqe will be ignored */
+ nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = 0;
+ nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX] = 0;
+ }
+
+ if (++cq_head >= nescq->hw_cq.cq_size)
+ cq_head = 0;
+ }
+
+ spin_unlock_irqrestore(&nescq->lock, flags);
+}
+
+
+/**
* nes_destroy_qp
*/
static int nes_destroy_qp(struct ib_qp *ibqp)
{
struct nes_qp *nesqp = to_nesqp(ibqp);
- /* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
struct nes_ucontext *nes_ucontext;
struct ib_qp_attr attr;
struct iw_cm_id *cm_id;
@@ -1548,7 +1598,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret);
}
-
if (nesqp->user_mode) {
if ((ibqp->uobject)&&(ibqp->uobject->context)) {
nes_ucontext = to_nesucontext(ibqp->uobject->context);
@@ -1560,6 +1609,13 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
}
if (nesqp->pbl_pbase)
kunmap(nesqp->page);
+ } else {
+ /* Clean any pending completions from the cq(s) */
+ if (nesqp->nesscq)
+ nes_clean_cq(nesqp, nesqp->nesscq);
+
+ if ((nesqp->nesrcq) && (nesqp->nesrcq != nesqp->nesscq))
+ nes_clean_cq(nesqp, nesqp->nesrcq);
}
nes_rem_ref(&nesqp->ibqp);
@@ -2884,7 +2940,7 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
* nes_hw_modify_qp
*/
int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
- u32 next_iwarp_state, u32 wait_completion)
+ u32 next_iwarp_state, u32 termlen, u32 wait_completion)
{
struct nes_hw_cqp_wqe *cqp_wqe;
/* struct iw_cm_id *cm_id = nesqp->cm_id; */
@@ -2916,6 +2972,13 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase);
+ /* If sending a terminate message, fill in the length (in words) */
+ if (((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) == NES_CQP_QP_IWARP_STATE_TERMINATE) &&
+ !(next_iwarp_state & NES_CQP_QP_TERM_DONT_SEND_TERM_MSG)) {
+ termlen = ((termlen + 3) >> 2) << NES_CQP_OP_TERMLEN_SHIFT;
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_NEW_MSS_IDX, termlen);
+ }
+
atomic_set(&cqp_request->refcount, 2);
nes_post_cqp_request(nesdev, cqp_request);
@@ -3086,6 +3149,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n",
nesqp->hwqp.qp_id);
+ if (nesqp->term_flags)
+ del_timer(&nesqp->terminate_timer);
+
next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
/* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
if (nesqp->hte_added) {
@@ -3163,7 +3229,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (issue_modify_qp) {
nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n");
- ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 1);
+ ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 1);
if (ret)
nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)"
" failed for QP%u.\n",
@@ -3328,6 +3394,12 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
head = nesqp->hwqp.sq_head;
while (ib_wr) {
+ /* Check for QP error */
+ if (nesqp->term_flags) {
+ err = -EINVAL;
+ break;
+ }
+
/* Check for SQ overflow */
if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
err = -EINVAL;
@@ -3484,6 +3556,12 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
head = nesqp->hwqp.rq_head;
while (ib_wr) {
+ /* Check for QP error */
+ if (nesqp->term_flags) {
+ err = -EINVAL;
+ break;
+ }
+
if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
err = -EINVAL;
break;
@@ -3547,7 +3625,6 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
{
u64 u64temp;
u64 wrid;
- /* u64 u64temp; */
unsigned long flags = 0;
struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
struct nes_device *nesdev = nesvnic->nesdev;
@@ -3555,12 +3632,13 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
struct nes_qp *nesqp;
struct nes_hw_cqe cqe;
u32 head;
- u32 wq_tail;
+ u32 wq_tail = 0;
u32 cq_size;
u32 cqe_count = 0;
u32 wqe_index;
u32 u32temp;
- /* u32 counter; */
+ u32 move_cq_head = 1;
+ u32 err_code;
nes_debug(NES_DBG_CQ, "\n");
@@ -3570,29 +3648,40 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
cq_size = nescq->hw_cq.cq_size;
while (cqe_count < num_entries) {
- if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
- NES_CQE_VALID) {
- /*
- * Make sure we read CQ entry contents *after*
- * we've checked the valid bit.
- */
- rmb();
-
- cqe = nescq->hw_cq.cq_vbase[head];
- nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
- u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
- wqe_index = u32temp &
- (nesdev->nesadapter->max_qp_wr - 1);
- u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
- /* parse CQE, get completion context from WQE (either rq or sq */
- u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
- ((u64)u32temp);
- nesqp = *((struct nes_qp **)&u64temp);
+ if ((le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
+ NES_CQE_VALID) == 0)
+ break;
+
+ /*
+ * Make sure we read CQ entry contents *after*
+ * we've checked the valid bit.
+ */
+ rmb();
+
+ cqe = nescq->hw_cq.cq_vbase[head];
+ u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+ wqe_index = u32temp & (nesdev->nesadapter->max_qp_wr - 1);
+ u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
+ /* parse CQE, get completion context from WQE (either rq or sq) */
+ u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
+ ((u64)u32temp);
+
+ if (u64temp) {
+ nesqp = (struct nes_qp *)(unsigned long)u64temp;
memset(entry, 0, sizeof *entry);
if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) {
entry->status = IB_WC_SUCCESS;
} else {
- entry->status = IB_WC_WR_FLUSH_ERR;
+ err_code = le32_to_cpu(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]);
+ if (NES_IWARP_CQE_MAJOR_DRV == (err_code >> 16)) {
+ entry->status = err_code & 0x0000ffff;
+
+ /* The rest of the cqe's will be marked as flushed */
+ nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX] =
+ cpu_to_le32((NES_IWARP_CQE_MAJOR_FLUSH << 16) |
+ NES_IWARP_CQE_MINOR_FLUSH);
+ } else
+ entry->status = IB_WC_WR_FLUSH_ERR;
}
entry->qp = &nesqp->ibqp;
@@ -3601,20 +3690,18 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
if (nesqp->skip_lsmm) {
nesqp->skip_lsmm = 0;
- wq_tail = nesqp->hwqp.sq_tail++;
+ nesqp->hwqp.sq_tail++;
}
/* Working on a SQ Completion*/
- wq_tail = wqe_index;
- nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
- wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
+ wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
- ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
+ ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])));
- entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+ entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
- switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+ switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) {
case NES_IWARP_SQ_OP_RDMAW:
nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n");
@@ -3623,7 +3710,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
case NES_IWARP_SQ_OP_RDMAR:
nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n");
entry->opcode = IB_WC_RDMA_READ;
- entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+ entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]);
break;
case NES_IWARP_SQ_OP_SENDINV:
@@ -3634,33 +3721,54 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
entry->opcode = IB_WC_SEND;
break;
}
+
+ nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
+ if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.sq_tail != nesqp->hwqp.sq_head)) {
+ move_cq_head = 0;
+ wq_tail = nesqp->hwqp.sq_tail;
+ }
} else {
/* Working on a RQ Completion*/
- wq_tail = wqe_index;
- nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
- wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
- ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
+ wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
+ ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
entry->opcode = IB_WC_RECV;
+
+ nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
+ if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)) {
+ move_cq_head = 0;
+ wq_tail = nesqp->hwqp.rq_tail;
+ }
}
+
entry->wr_id = wrid;
+ entry++;
+ cqe_count++;
+ }
+ if (move_cq_head) {
+ nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
if (++head >= cq_size)
head = 0;
- cqe_count++;
nescq->polled_completions++;
+
if ((nescq->polled_completions > (cq_size / 2)) ||
(nescq->polled_completions == 255)) {
nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes"
- " are pending %u of %u.\n",
- nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
+ " are pending %u of %u.\n",
+ nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
nes_write32(nesdev->regs+NES_CQE_ALLOC,
- nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
+ nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
nescq->polled_completions = 0;
}
- entry++;
- } else
- break;
+ } else {
+ /* Update the wqe index and set status to flush */
+ wqe_index = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+ wqe_index = (wqe_index & (~(nesdev->nesadapter->max_qp_wr - 1))) | wq_tail;
+ nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] =
+ cpu_to_le32(wqe_index);
+ move_cq_head = 1; /* ready for next pass */
+ }
}
if (nescq->polled_completions) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index 41c07f29f7c9..89822d75f82e 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -40,6 +40,10 @@ struct nes_device;
#define NES_MAX_USER_DB_REGIONS 4096
#define NES_MAX_USER_WQ_REGIONS 4096
+#define NES_TERM_SENT 0x01
+#define NES_TERM_RCVD 0x02
+#define NES_TERM_DONE 0x04
+
struct nes_ucontext {
struct ib_ucontext ibucontext;
struct nes_device *nesdev;
@@ -119,6 +123,11 @@ struct nes_wq {
spinlock_t lock;
};
+struct disconn_work {
+ struct work_struct work;
+ struct nes_qp *nesqp;
+};
+
struct iw_cm_id;
struct ietf_mpa_frame;
@@ -127,7 +136,6 @@ struct nes_qp {
void *allocated_buffer;
struct iw_cm_id *cm_id;
struct workqueue_struct *wq;
- struct work_struct disconn_work;
struct nes_cq *nesscq;
struct nes_cq *nesrcq;
struct nes_pd *nespd;
@@ -155,9 +163,13 @@ struct nes_qp {
void *pbl_vbase;
dma_addr_t pbl_pbase;
struct page *page;
+ struct timer_list terminate_timer;
+ enum ib_event_type terminate_eventtype;
wait_queue_head_t kick_waitq;
u16 in_disconnect;
u16 private_data_len;
+ u16 term_sq_flush_code;
+ u16 term_rq_flush_code;
u8 active_conn;
u8 skip_lsmm;
u8 user_mode;
@@ -165,7 +177,7 @@ struct nes_qp {
u8 hw_iwarp_state;
u8 flush_issued;
u8 hw_tcp_state;
- u8 disconn_pending;
+ u8 term_flags;
u8 destroyed;
};
#endif /* NES_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 181b1f32325f..8f4b4fca2a1d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -31,7 +31,6 @@
*/
#include <rdma/ib_cm.h>
-#include <rdma/ib_cache.h>
#include <net/dst.h>
#include <net/icmp.h>
#include <linux/icmpv6.h>
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index e7e5adf84e84..e35f4a0ea9d5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -36,7 +36,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <rdma/ib_cache.h>
#include <linux/ip.h>
#include <linux/tcp.h>
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index e319d91f60a6..2bf5116deec4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -604,8 +604,11 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
skb_queue_len(&neigh->queue));
goto err_drop;
}
- } else
+ } else {
+ spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
+ return;
+ }
} else {
neigh->ah = NULL;
@@ -688,7 +691,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
ipoib_dbg(priv, "Send unicast ARP to %04x\n",
be16_to_cpu(path->pathrec.dlid));
+ spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
+ return;
} else if ((path->query || !path_rec_start(dev, path)) &&
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
/* put pseudoheader back on for next time */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index a0e97532e714..25874fc680c9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -720,7 +720,9 @@ out:
}
}
+ spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
+ return;
}
unlock:
@@ -758,6 +760,20 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
}
}
+static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen,
+ const u8 *broadcast)
+{
+ if (addrlen != INFINIBAND_ALEN)
+ return 0;
+ /* reserved QPN, prefix, scope */
+ if (memcmp(addr, broadcast, 6))
+ return 0;
+ /* signature lower, pkey */
+ if (memcmp(addr + 7, broadcast + 7, 3))
+ return 0;
+ return 1;
+}
+
void ipoib_mcast_restart_task(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
@@ -791,6 +807,11 @@ void ipoib_mcast_restart_task(struct work_struct *work)
for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
union ib_gid mgid;
+ if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr,
+ mclist->dmi_addrlen,
+ dev->broadcast))
+ continue;
+
memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
mcast = __ipoib_mcast_find(dev, &mgid);
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 6e186b1a062d..652bd33109e3 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -582,7 +582,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
break;
case STATUSTYPE_TABLE:
sz = 0;
- table_args = strstr(lc->usr_argv_str, " ");
+ table_args = strchr(lc->usr_argv_str, ' ');
BUG_ON(!table_args); /* There will always be a ' ' */
table_args++;
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index fb5df5c6203e..c97ab82ec743 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1286,6 +1286,7 @@ static int cxgb_open(struct net_device *dev)
if (!other_ports)
schedule_chk_task(adapter);
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
return 0;
}
@@ -1318,6 +1319,7 @@ static int cxgb_close(struct net_device *dev)
if (!adapter->open_device_map)
cxgb_down(adapter);
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
return 0;
}
@@ -2717,7 +2719,7 @@ static int t3_adapter_error(struct adapter *adapter, int reset)
if (is_offload(adapter) &&
test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
- cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
offload_close(&adapter->tdev);
}
@@ -2782,7 +2784,7 @@ static void t3_resume_ports(struct adapter *adapter)
}
if (is_offload(adapter) && !ofld_disable)
- cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
}
/*
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index f9f54b57b28c..75064eea1d87 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -153,14 +153,14 @@ void cxgb3_remove_clients(struct t3cdev *tdev)
mutex_unlock(&cxgb3_db_lock);
}
-void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error)
+void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
{
struct cxgb3_client *client;
mutex_lock(&cxgb3_db_lock);
list_for_each_entry(client, &client_list, client_list) {
- if (client->err_handler)
- client->err_handler(tdev, status, error);
+ if (client->event_handler)
+ client->event_handler(tdev, event, port);
}
mutex_unlock(&cxgb3_db_lock);
}
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 55945f422aec..670aa62042da 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -64,14 +64,16 @@ void cxgb3_register_client(struct cxgb3_client *client);
void cxgb3_unregister_client(struct cxgb3_client *client);
void cxgb3_add_clients(struct t3cdev *tdev);
void cxgb3_remove_clients(struct t3cdev *tdev);
-void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error);
+void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port);
typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
struct sk_buff *skb, void *ctx);
enum {
OFFLOAD_STATUS_UP,
- OFFLOAD_STATUS_DOWN
+ OFFLOAD_STATUS_DOWN,
+ OFFLOAD_PORT_DOWN,
+ OFFLOAD_PORT_UP
};
struct cxgb3_client {
@@ -82,7 +84,7 @@ struct cxgb3_client {
int (*redirect)(void *ctx, struct dst_entry *old,
struct dst_entry *new, struct l2t_entry *l2t);
struct list_head client_list;
- void (*err_handler)(struct t3cdev *tdev, u32 status, u32 error);
+ void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port);
};
/*
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 24f7ca5e17de..a00ec639c380 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -491,7 +491,7 @@ static int gfar_remove(struct of_device *ofdev)
dev_set_drvdata(&ofdev->dev, NULL);
- unregister_netdev(dev);
+ unregister_netdev(priv->ndev);
iounmap(priv->regs);
free_netdev(priv->ndev);
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index ac57b6a42c6e..ccfe276943f0 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -34,7 +34,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index b9ceddde46c0..bffb7995cb70 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
@@ -42,6 +41,10 @@
#include "fw.h"
enum {
+ MLX4_IRQNAME_SIZE = 64
+};
+
+enum {
MLX4_NUM_ASYNC_EQE = 0x100,
MLX4_NUM_SPARE_EQE = 0x80,
MLX4_EQ_ENTRY_SIZE = 0x20
@@ -526,48 +529,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
iounmap(priv->clr_base);
}
-int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
-{
- struct mlx4_priv *priv = mlx4_priv(dev);
- int ret;
-
- /*
- * We assume that mapping one page is enough for the whole EQ
- * context table. This is fine with all current HCAs, because
- * we only use 32 EQs and each EQ uses 64 bytes of context
- * memory, or 1 KB total.
- */
- priv->eq_table.icm_virt = icm_virt;
- priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
- if (!priv->eq_table.icm_page)
- return -ENOMEM;
- priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
- __free_page(priv->eq_table.icm_page);
- return -ENOMEM;
- }
-
- ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
- if (ret) {
- pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->eq_table.icm_page);
- }
-
- return ret;
-}
-
-void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
-{
- struct mlx4_priv *priv = mlx4_priv(dev);
-
- mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
- pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->eq_table.icm_page);
-}
-
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -615,7 +576,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
priv->eq_table.clr_int = priv->clr_base +
(priv->eq_table.inta_pin < 32 ? 4 : 0);
- priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL);
+ priv->eq_table.irq_names =
+ kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
+ GFP_KERNEL);
if (!priv->eq_table.irq_names) {
err = -ENOMEM;
goto err_out_bitmap;
@@ -638,17 +601,25 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
goto err_out_comp;
if (dev->flags & MLX4_FLAG_MSI_X) {
- static const char async_eq_name[] = "mlx4-async";
const char *eq_name;
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
if (i < dev->caps.num_comp_vectors) {
- snprintf(priv->eq_table.irq_names + i * 16, 16,
- "mlx4-comp-%d", i);
- eq_name = priv->eq_table.irq_names + i * 16;
- } else
- eq_name = async_eq_name;
+ snprintf(priv->eq_table.irq_names +
+ i * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE,
+ "mlx4-comp-%d@pci:%s", i,
+ pci_name(dev->pdev));
+ } else {
+ snprintf(priv->eq_table.irq_names +
+ i * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE,
+ "mlx4-async@pci:%s",
+ pci_name(dev->pdev));
+ }
+ eq_name = priv->eq_table.irq_names +
+ i * MLX4_IRQNAME_SIZE;
err = request_irq(priv->eq_table.eq[i].irq,
mlx4_msi_x_interrupt, 0, eq_name,
priv->eq_table.eq + i);
@@ -658,8 +629,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
priv->eq_table.eq[i].have_irq = 1;
}
} else {
+ snprintf(priv->eq_table.irq_names,
+ MLX4_IRQNAME_SIZE,
+ DRV_NAME "@pci:%s",
+ pci_name(dev->pdev));
err = request_irq(dev->pdev->irq, mlx4_interrupt,
- IRQF_SHARED, DRV_NAME, dev);
+ IRQF_SHARED, priv->eq_table.irq_names, dev);
if (err)
goto err_out_async;
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index baf4bf66062c..04b382fcb8c8 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index dac621b1e9fc..3dd481e77f92 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
goto err_unmap_aux;
}
- err = mlx4_map_eq_icm(dev, init_hca->eqc_base);
+ err = mlx4_init_icm_table(dev, &priv->eq_table.table,
+ init_hca->eqc_base, dev_cap->eqc_entry_sz,
+ dev->caps.num_eqs, dev->caps.num_eqs,
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
goto err_unmap_cmpt;
@@ -668,7 +671,7 @@ err_unmap_mtt:
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
err_unmap_eq:
- mlx4_unmap_eq_icm(dev);
+ mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
err_unmap_cmpt:
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
@@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
+ mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
- mlx4_unmap_eq_icm(dev);
mlx4_UNMAP_ICM_AUX(dev);
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
@@ -786,7 +789,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
return 0;
err_close:
- mlx4_close_hca(dev);
+ mlx4_CLOSE_HCA(dev, 0);
err_free_icm:
mlx4_free_icms(dev);
@@ -1070,18 +1073,12 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_disable_pdev;
}
- err = pci_request_region(pdev, 0, DRV_NAME);
+ err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- dev_err(&pdev->dev, "Cannot request control region, aborting.\n");
+ dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
goto err_disable_pdev;
}
- err = pci_request_region(pdev, 2, DRV_NAME);
- if (err) {
- dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
- goto err_release_bar0;
- }
-
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1090,7 +1087,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
- goto err_release_bar2;
+ goto err_release_regions;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1101,7 +1098,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (err) {
dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
"aborting.\n");
- goto err_release_bar2;
+ goto err_release_regions;
}
}
@@ -1110,7 +1107,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
dev_err(&pdev->dev, "Device struct alloc failed, "
"aborting.\n");
err = -ENOMEM;
- goto err_release_bar2;
+ goto err_release_regions;
}
dev = &priv->dev;
@@ -1205,11 +1202,8 @@ err_cmd:
err_free_dev:
kfree(priv);
-err_release_bar2:
- pci_release_region(pdev, 2);
-
-err_release_bar0:
- pci_release_region(pdev, 0);
+err_release_regions:
+ pci_release_regions(pdev);
err_disable_pdev:
pci_disable_device(pdev);
@@ -1265,8 +1259,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
pci_disable_msix(pdev);
kfree(priv);
- pci_release_region(pdev, 2);
- pci_release_region(pdev, 0);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index 6053c357a470..5ccbce9866fe 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 5bd79c2b184f..bc72d6e4919b 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -205,9 +205,7 @@ struct mlx4_eq_table {
void __iomem **uar_map;
u32 clr_mask;
struct mlx4_eq *eq;
- u64 icm_virt;
- struct page *icm_page;
- dma_addr_t icm_dma;
+ struct mlx4_icm_table table;
struct mlx4_icm_table cmpt_table;
int have_irq;
u8 inta_pin;
@@ -373,9 +371,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca);
-int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
-void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
-
int mlx4_cmd_init(struct mlx4_dev *dev);
void mlx4_cmd_cleanup(struct mlx4_dev *dev);
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index f96948be0a44..ca7ab8e7b4cc 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -32,7 +32,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index 26d1a7a9e375..c4988d6bd5b2 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <asm/page.h>
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index bd22df95adf9..ca25b9dc8378 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -32,8 +32,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
-
#include "mlx4.h"
#include "fw.h"
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index 1c565ef8d179..42ab9fc01d3e 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -33,8 +33,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
-
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c
index 3951b884c0fb..e5741dab3825 100644
--- a/drivers/net/mlx4/reset.c
+++ b/drivers/net/mlx4/reset.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index fe9f218691f5..1377d0dc8f1f 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -31,8 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
-
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 42b6c6319bc2..87214a257d2a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -130,17 +130,10 @@ static inline struct tun_sock *tun_sk(struct sock *sk)
static int tun_attach(struct tun_struct *tun, struct file *file)
{
struct tun_file *tfile = file->private_data;
- const struct cred *cred = current_cred();
int err;
ASSERT_RTNL();
- /* Check permissions */
- if (((tun->owner != -1 && cred->euid != tun->owner) ||
- (tun->group != -1 && !in_egroup_p(tun->group))) &&
- !capable(CAP_NET_ADMIN))
- return -EPERM;
-
netif_tx_lock_bh(tun->dev);
err = -EINVAL;
@@ -926,6 +919,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
dev = __dev_get_by_name(net, ifr->ifr_name);
if (dev) {
+ const struct cred *cred = current_cred();
+
if (ifr->ifr_flags & IFF_TUN_EXCL)
return -EBUSY;
if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
@@ -935,6 +930,14 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
else
return -EINVAL;
+ if (((tun->owner != -1 && cred->euid != tun->owner) ||
+ (tun->group != -1 && !in_egroup_p(tun->group))) &&
+ !capable(CAP_NET_ADMIN))
+ return -EPERM;
+ err = security_tun_dev_attach(tun->sk);
+ if (err < 0)
+ return err;
+
err = tun_attach(tun, file);
if (err < 0)
return err;
@@ -947,6 +950,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ err = security_tun_dev_create();
+ if (err < 0)
+ return err;
/* Set dev type */
if (ifr->ifr_flags & IFF_TUN) {
@@ -989,6 +995,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->sk = sk;
container_of(sk, struct tun_sock, sk)->tun = tun;
+ security_tun_dev_post_create(sk);
+
tun_net_init(dev);
if (strchr(dev->name, '%')) {
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 242257b19441..a7aae24f2889 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -21,7 +21,6 @@
#include <linux/sched.h>
#include <linux/oprofile.h>
-#include <linux/vmalloc.h>
#include <linux/errno.h>
#include "event_buffer.h"
@@ -407,6 +406,21 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val)
return op_cpu_buffer_add_data(entry, val);
}
+int oprofile_add_data64(struct op_entry *entry, u64 val)
+{
+ if (!entry->event)
+ return 0;
+ if (op_cpu_buffer_get_size(entry) < 2)
+ /*
+ * the function returns 0 to indicate a too small
+ * buffer, even if there is some space left
+ */
+ return 0;
+ if (!op_cpu_buffer_add_data(entry, (u32)val))
+ return 0;
+ return op_cpu_buffer_add_data(entry, (u32)(val >> 32));
+}
+
int oprofile_write_commit(struct op_entry *entry)
{
if (!entry->event)
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index 3cffce90f82a..dc8a0428260d 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -12,6 +12,8 @@
#include <linux/init.h>
#include <linux/oprofile.h>
#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include <linux/time.h>
#include <asm/mutex.h>
#include "oprof.h"
@@ -87,6 +89,69 @@ out:
return err;
}
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+
+static void switch_worker(struct work_struct *work);
+static DECLARE_DELAYED_WORK(switch_work, switch_worker);
+
+static void start_switch_worker(void)
+{
+ if (oprofile_ops.switch_events)
+ schedule_delayed_work(&switch_work, oprofile_time_slice);
+}
+
+static void stop_switch_worker(void)
+{
+ cancel_delayed_work_sync(&switch_work);
+}
+
+static void switch_worker(struct work_struct *work)
+{
+ if (oprofile_ops.switch_events())
+ return;
+
+ atomic_inc(&oprofile_stats.multiplex_counter);
+ start_switch_worker();
+}
+
+/* User inputs in ms, converts to jiffies */
+int oprofile_set_timeout(unsigned long val_msec)
+{
+ int err = 0;
+ unsigned long time_slice;
+
+ mutex_lock(&start_mutex);
+
+ if (oprofile_started) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (!oprofile_ops.switch_events) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ time_slice = msecs_to_jiffies(val_msec);
+ if (time_slice == MAX_JIFFY_OFFSET) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ oprofile_time_slice = time_slice;
+
+out:
+ mutex_unlock(&start_mutex);
+ return err;
+
+}
+
+#else
+
+static inline void start_switch_worker(void) { }
+static inline void stop_switch_worker(void) { }
+
+#endif
/* Actually start profiling (echo 1>/dev/oprofile/enable) */
int oprofile_start(void)
@@ -108,6 +173,8 @@ int oprofile_start(void)
if ((err = oprofile_ops.start()))
goto out;
+ start_switch_worker();
+
oprofile_started = 1;
out:
mutex_unlock(&start_mutex);
@@ -123,6 +190,9 @@ void oprofile_stop(void)
goto out;
oprofile_ops.stop();
oprofile_started = 0;
+
+ stop_switch_worker();
+
/* wake up the daemon to read what remains */
wake_up_buffer_waiter();
out:
@@ -155,7 +225,6 @@ post_sync:
mutex_unlock(&start_mutex);
}
-
int oprofile_set_backtrace(unsigned long val)
{
int err = 0;
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index c288d3c24b50..cb92f5c98c1a 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -24,6 +24,8 @@ struct oprofile_operations;
extern unsigned long oprofile_buffer_size;
extern unsigned long oprofile_cpu_buffer_size;
extern unsigned long oprofile_buffer_watershed;
+extern unsigned long oprofile_time_slice;
+
extern struct oprofile_operations oprofile_ops;
extern unsigned long oprofile_started;
extern unsigned long oprofile_backtrace_depth;
@@ -35,5 +37,6 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root);
void oprofile_timer_init(struct oprofile_operations *ops);
int oprofile_set_backtrace(unsigned long depth);
+int oprofile_set_timeout(unsigned long time);
#endif /* OPROF_H */
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index 5d36ffc30dd5..bbd7516e0869 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -9,6 +9,7 @@
#include <linux/fs.h>
#include <linux/oprofile.h>
+#include <linux/jiffies.h>
#include "event_buffer.h"
#include "oprofile_stats.h"
@@ -17,10 +18,51 @@
#define BUFFER_SIZE_DEFAULT 131072
#define CPU_BUFFER_SIZE_DEFAULT 8192
#define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
+#define TIME_SLICE_DEFAULT 1
unsigned long oprofile_buffer_size;
unsigned long oprofile_cpu_buffer_size;
unsigned long oprofile_buffer_watershed;
+unsigned long oprofile_time_slice;
+
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+
+static ssize_t timeout_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
+ buf, count, offset);
+}
+
+
+static ssize_t timeout_write(struct file *file, char const __user *buf,
+ size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ retval = oprofile_set_timeout(val);
+
+ if (retval)
+ return retval;
+ return count;
+}
+
+
+static const struct file_operations timeout_fops = {
+ .read = timeout_read,
+ .write = timeout_write,
+};
+
+#endif
+
static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
@@ -129,6 +171,7 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root)
oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT;
oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
+ oprofile_time_slice = msecs_to_jiffies(TIME_SLICE_DEFAULT);
oprofilefs_create_file(sb, root, "enable", &enable_fops);
oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
@@ -139,6 +182,9 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root)
oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+ oprofilefs_create_file(sb, root, "time_slice", &timeout_fops);
+#endif
oprofile_create_stats_files(sb, root);
if (oprofile_ops.create_files)
oprofile_ops.create_files(sb, root);
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index 3c2270a8300c..61689e814d46 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -34,6 +34,7 @@ void oprofile_reset_stats(void)
atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
atomic_set(&oprofile_stats.event_lost_overflow, 0);
atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
+ atomic_set(&oprofile_stats.multiplex_counter, 0);
}
@@ -76,4 +77,8 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
&oprofile_stats.event_lost_overflow);
oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping",
&oprofile_stats.bt_lost_no_mapping);
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+ oprofilefs_create_ro_atomic(sb, dir, "multiplex_counter",
+ &oprofile_stats.multiplex_counter);
+#endif
}
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
index 3da0d08dc1f9..0b54e46c3c14 100644
--- a/drivers/oprofile/oprofile_stats.h
+++ b/drivers/oprofile/oprofile_stats.h
@@ -17,6 +17,7 @@ struct oprofile_stat_struct {
atomic_t sample_lost_no_mapping;
atomic_t bt_lost_no_mapping;
atomic_t event_lost_overflow;
+ atomic_t multiplex_counter;
};
extern struct oprofile_stat_struct oprofile_stats;
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 4f5b8712931f..44803644ca05 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -55,15 +55,12 @@ static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
return desc->irq_2_iommu;
}
-static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
+static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
{
struct irq_desc *desc;
struct irq_2_iommu *irq_iommu;
- /*
- * alloc irq desc if not allocated already.
- */
- desc = irq_to_desc_alloc_node(irq, node);
+ desc = irq_to_desc(irq);
if (!desc) {
printk(KERN_INFO "can not get irq_desc for %d\n", irq);
return NULL;
@@ -72,16 +69,11 @@ static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
irq_iommu = desc->irq_2_iommu;
if (!irq_iommu)
- desc->irq_2_iommu = get_one_free_irq_2_iommu(node);
+ desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
return desc->irq_2_iommu;
}
-static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
-{
- return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id));
-}
-
#else /* !CONFIG_SPARSE_IRQ */
static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 06b965623962..85ce23997be4 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -992,7 +992,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX,
static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
{
- /* set sb600/sb700/sb800 sata to ahci mode */
+ /* set SBX00 SATA in IDE mode to AHCI mode */
u8 tmp;
pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
@@ -1011,6 +1011,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode);
/*
* Serverworks CSB5 IDE does not fully support native mode
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index 7e6b5a3b3281..fc83783c3a96 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -55,12 +55,13 @@ __asm__(".text \n"
#define Q2_SET_SEL(cpu, selname, address, size) \
do { \
-struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \
-set_base(gdt[(selname) >> 3], (u32)(address)); \
-set_limit(gdt[(selname) >> 3], size); \
+ struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \
+ set_desc_base(&gdt[(selname) >> 3], (u32)(address)); \
+ set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
} while(0)
-static struct desc_struct bad_bios_desc;
+static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
+ (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
/*
* At some point we want to use this stack frame pointer to unwind
@@ -476,19 +477,15 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
pnp_bios_callpoint.offset = header->fields.pm16offset;
pnp_bios_callpoint.segment = PNP_CS16;
- bad_bios_desc.a = 0;
- bad_bios_desc.b = 0x00409200;
-
- set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
- _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
for_each_possible_cpu(i) {
struct desc_struct *gdt = get_cpu_gdt_table(i);
if (!gdt)
continue;
- set_base(gdt[GDT_ENTRY_PNPBIOS_CS32], &pnp_bios_callfunc);
- set_base(gdt[GDT_ENTRY_PNPBIOS_CS16],
- __va(header->fields.pm16cseg));
- set_base(gdt[GDT_ENTRY_PNPBIOS_DS],
- __va(header->fields.pm16dseg));
+ set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS32],
+ (unsigned long)&pnp_bios_callfunc);
+ set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS16],
+ (unsigned long)__va(header->fields.pm16cseg));
+ set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
+ (unsigned long)__va(header->fields.pm16dseg));
}
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 3f62dd50bbbe..e109da4583a8 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -669,14 +669,14 @@ static void dasd_profile_end(struct dasd_block *block,
* memory and 2) dasd_smalloc_request uses the static ccw memory
* that gets allocated for each device.
*/
-struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
+struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
int datasize,
struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
/* Sanity checks */
- BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
+ BUG_ON(datasize > PAGE_SIZE ||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
@@ -700,14 +700,13 @@ struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
return ERR_PTR(-ENOMEM);
}
}
- strncpy((char *) &cqr->magic, magic, 4);
- ASCEBC((char *) &cqr->magic, 4);
+ cqr->magic = magic;
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
return cqr;
}
-struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
+struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
int datasize,
struct dasd_device *device)
{
@@ -717,7 +716,7 @@ struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
int size;
/* Sanity checks */
- BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
+ BUG_ON(datasize > PAGE_SIZE ||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
@@ -744,8 +743,7 @@ struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
cqr->data = data;
memset(cqr->data, 0, datasize);
}
- strncpy((char *) &cqr->magic, magic, 4);
- ASCEBC((char *) &cqr->magic, 4);
+ cqr->magic = magic;
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
return cqr;
@@ -899,9 +897,6 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
switch (rc) {
case 0:
cqr->status = DASD_CQR_IN_IO;
- DBF_DEV_EVENT(DBF_DEBUG, device,
- "start_IO: request %p started successful",
- cqr);
break;
case -EBUSY:
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
@@ -1699,8 +1694,11 @@ static void __dasd_process_request_queue(struct dasd_block *block)
* for that. State DASD_STATE_ONLINE is normal block device
* operation.
*/
- if (basedev->state < DASD_STATE_READY)
+ if (basedev->state < DASD_STATE_READY) {
+ while ((req = blk_fetch_request(block->request_queue)))
+ __blk_end_request_all(req, -EIO);
return;
+ }
/* Now we try to fetch requests from the request queue */
while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY &&
@@ -2530,7 +2528,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
void *rdc_buffer,
int rdc_buffer_size,
- char *magic)
+ int magic)
{
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
@@ -2561,7 +2559,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
}
-int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
+int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
void *rdc_buffer, int rdc_buffer_size)
{
int ret;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 27991b692056..e8ff7b0c961d 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -7,7 +7,7 @@
*
*/
-#define KMSG_COMPONENT "dasd"
+#define KMSG_COMPONENT "dasd-eckd"
#include <linux/timer.h>
#include <linux/slab.h>
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 5b7bbc87593b..70a008c00522 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -5,7 +5,7 @@
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
-#define KMSG_COMPONENT "dasd"
+#define KMSG_COMPONENT "dasd-eckd"
#include <linux/list.h>
#include <asm/ebcdic.h>
@@ -379,8 +379,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
int rc;
unsigned long flags;
- cqr = dasd_kmalloc_request("ECKD",
- 1 /* PSF */ + 1 /* RSSD */ ,
+ cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data)),
device);
if (IS_ERR(cqr))
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 644086ba2ede..4e49b4a6c880 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -8,7 +8,7 @@
*
*/
-#define KMSG_COMPONENT "dasd"
+#define KMSG_COMPONENT "dasd-diag"
#include <linux/stddef.h>
#include <linux/kernel.h>
@@ -523,8 +523,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
/* Build the request */
datasize = sizeof(struct dasd_diag_req) +
count*sizeof(struct dasd_diag_bio);
- cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0,
- datasize, memdev);
+ cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
if (IS_ERR(cqr))
return cqr;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index c11770f5b368..a1ce573648a2 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -10,7 +10,7 @@
* Author.........: Nigel Hislop <hislop_nigel@emc.com>
*/
-#define KMSG_COMPONENT "dasd"
+#define KMSG_COMPONENT "dasd-eckd"
#include <linux/stddef.h>
#include <linux/kernel.h>
@@ -730,7 +730,8 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
- cqr = dasd_smalloc_request("ECKD", 1 /* RCD */, ciw->count, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count,
+ device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
@@ -934,8 +935,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
struct dasd_eckd_private *private;
private = (struct dasd_eckd_private *) device->private;
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- 1 /* PSF */ + 1 /* RSSD */ ,
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_features)),
device);
@@ -998,7 +998,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
struct dasd_psf_ssc_data *psf_ssc_data;
struct ccw1 *ccw;
- cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ ,
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
sizeof(struct dasd_psf_ssc_data),
device);
@@ -1149,8 +1149,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
goto out_err3;
/* Read Device Characteristics */
- rc = dasd_generic_read_dev_chars(device, "ECKD", &private->rdc_data,
- 64);
+ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
+ &private->rdc_data, 64);
if (rc) {
DBF_EVENT(DBF_WARNING,
"Read device characteristics failed, rc=%d for "
@@ -1217,8 +1217,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
cplength = 8;
datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- cplength, datasize, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
@@ -1499,8 +1498,7 @@ dasd_eckd_format_device(struct dasd_device * device,
return ERR_PTR(-EINVAL);
}
/* Allocate the format ccw request. */
- fcp = dasd_smalloc_request(dasd_eckd_discipline.name,
- cplength, datasize, device);
+ fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
if (IS_ERR(fcp))
return fcp;
@@ -1783,8 +1781,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
datasize += count*sizeof(struct LO_eckd_data);
}
/* Allocate the ccw request. */
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- cplength, datasize, startdev);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
+ startdev);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
@@ -1948,8 +1946,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
cidaw * sizeof(unsigned long long);
/* Allocate the ccw request. */
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- cplength, datasize, startdev);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
+ startdev);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
@@ -2249,8 +2247,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
/* Allocate the ccw request. */
itcw_size = itcw_calc_size(0, ctidaw, 0);
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- 0, itcw_size, startdev);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
if (IS_ERR(cqr))
return cqr;
@@ -2557,8 +2554,7 @@ dasd_eckd_release(struct dasd_device *device)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- 1, 32, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
@@ -2600,8 +2596,7 @@ dasd_eckd_reserve(struct dasd_device *device)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- 1, 32, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
@@ -2642,8 +2637,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- 1, 32, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
@@ -2681,8 +2675,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
struct ccw1 *ccw;
int rc;
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- 1 /* PSF */ + 1 /* RSSD */ ,
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_perf_stats_t)),
device);
@@ -2828,7 +2821,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
}
/* setup CCWs for PSF + RSSD */
- cqr = dasd_smalloc_request("ECKD", 2 , 0, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
@@ -3254,7 +3247,7 @@ int dasd_eckd_restore_device(struct dasd_device *device)
/* Read Device Characteristics */
memset(&private->rdc_data, 0, sizeof(private->rdc_data));
- rc = dasd_generic_read_dev_chars(device, "ECKD",
+ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
&private->rdc_data, 64);
if (rc) {
DBF_EVENT(DBF_WARNING,
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index c24c8c30380d..d96039eae59b 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -6,7 +6,7 @@
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
-#define KMSG_COMPONENT "dasd"
+#define KMSG_COMPONENT "dasd-eckd"
#include <linux/init.h>
#include <linux/fs.h>
@@ -464,7 +464,7 @@ int dasd_eer_enable(struct dasd_device *device)
if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
return -EPERM; /* FIXME: -EMEDIUMTYPE ? */
- cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */,
+ cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
SNSS_DATA_SIZE, device);
if (IS_ERR(cqr))
return -ENOMEM;
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index cb8f9cef7429..7656384a811d 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -99,8 +99,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
cqr->lpm = LPM_ANYPATH;
cqr->status = DASD_CQR_FILLED;
} else {
- dev_err(&device->cdev->dev,
- "default ERP has run out of retries and failed\n");
+ pr_err("%s: default ERP has run out of retries and failed\n",
+ dev_name(&device->cdev->dev));
cqr->status = DASD_CQR_FAILED;
cqr->stopclk = get_clock();
}
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 31849ad5e59f..f245377e8e27 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -5,7 +5,7 @@
* Copyright IBM Corp. 1999, 2009
*/
-#define KMSG_COMPONENT "dasd"
+#define KMSG_COMPONENT "dasd-fba"
#include <linux/stddef.h>
#include <linux/kernel.h>
@@ -152,8 +152,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
block->base = device;
/* Read Device Characteristics */
- rc = dasd_generic_read_dev_chars(device, "FBA ", &private->rdc_data,
- 32);
+ rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
+ &private->rdc_data, 32);
if (rc) {
DBF_EVENT(DBF_WARNING, "Read device characteristics returned "
"error %d for device: %s",
@@ -305,8 +305,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
datasize += (count - 1)*sizeof(struct LO_fba_data);
}
/* Allocate the ccw request. */
- cqr = dasd_smalloc_request(dasd_fba_discipline.name,
- cplength, datasize, memdev);
+ cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index b699ca356ac5..5e47a1ee52b9 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -59,6 +59,11 @@
#include <asm/dasd.h>
#include <asm/idals.h>
+/* DASD discipline magic */
+#define DASD_ECKD_MAGIC 0xC5C3D2C4
+#define DASD_DIAG_MAGIC 0xC4C9C1C7
+#define DASD_FBA_MAGIC 0xC6C2C140
+
/*
* SECTION: Type definitions
*/
@@ -540,9 +545,9 @@ extern struct block_device_operations dasd_device_operations;
extern struct kmem_cache *dasd_page_cache;
struct dasd_ccw_req *
-dasd_kmalloc_request(char *, int, int, struct dasd_device *);
+dasd_kmalloc_request(int , int, int, struct dasd_device *);
struct dasd_ccw_req *
-dasd_smalloc_request(char *, int, int, struct dasd_device *);
+dasd_smalloc_request(int , int, int, struct dasd_device *);
void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
@@ -587,7 +592,7 @@ void dasd_generic_handle_state_change(struct dasd_device *);
int dasd_generic_pm_freeze(struct ccw_device *);
int dasd_generic_restore_device(struct ccw_device *);
-int dasd_generic_read_dev_chars(struct dasd_device *, char *, void *, int);
+int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
char *dasd_get_sense(struct irb *);
/* externals in dasd_devmap.c */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index df918ef27965..f756a1b0c57a 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -98,8 +98,8 @@ static int dasd_ioctl_quiesce(struct dasd_block *block)
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
- dev_info(&base->cdev->dev, "The DASD has been put in the quiesce "
- "state\n");
+ pr_info("%s: The DASD has been put in the quiesce "
+ "state\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
base->stopped |= DASD_STOPPED_QUIESCE;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
@@ -119,8 +119,8 @@ static int dasd_ioctl_resume(struct dasd_block *block)
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
- dev_info(&base->cdev->dev, "I/O operations have been resumed "
- "on the DASD\n");
+ pr_info("%s: I/O operations have been resumed "
+ "on the DASD\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
base->stopped &= ~DASD_STOPPED_QUIESCE;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
@@ -146,8 +146,8 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
return -EPERM;
if (base->state != DASD_STATE_BASIC) {
- dev_warn(&base->cdev->dev,
- "The DASD cannot be formatted while it is enabled\n");
+ pr_warning("%s: The DASD cannot be formatted while it is "
+ "enabled\n", dev_name(&base->cdev->dev));
return -EBUSY;
}
@@ -175,9 +175,9 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
dasd_sfree_request(cqr, cqr->memdev);
if (rc) {
if (rc != -ERESTARTSYS)
- dev_err(&base->cdev->dev,
- "Formatting unit %d failed with "
- "rc=%d\n", fdata->start_unit, rc);
+ pr_err("%s: Formatting unit %d failed with "
+ "rc=%d\n", dev_name(&base->cdev->dev),
+ fdata->start_unit, rc);
return rc;
}
fdata->start_unit++;
@@ -204,9 +204,9 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
if (copy_from_user(&fdata, argp, sizeof(struct format_data_t)))
return -EFAULT;
if (bdev != bdev->bd_contains) {
- dev_warn(&block->base->cdev->dev,
- "The specified DASD is a partition and cannot be "
- "formatted\n");
+ pr_warning("%s: The specified DASD is a partition and cannot "
+ "be formatted\n",
+ dev_name(&block->base->cdev->dev));
return -EINVAL;
}
return dasd_format(block, &fdata);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index db442cd6621e..ee604e92a5fa 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -42,7 +42,6 @@
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <asm/uaccess.h>
-#include <asm/checksum.h>
#define XPRAM_NAME "xpram"
#define XPRAM_DEVS 1 /* one partition */
@@ -51,7 +50,6 @@
typedef struct {
unsigned int size; /* size of xpram segment in pages */
unsigned int offset; /* start page of xpram segment */
- unsigned int csum; /* partition checksum for suspend */
} xpram_device_t;
static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
@@ -387,58 +385,6 @@ out:
}
/*
- * Save checksums for all partitions.
- */
-static int xpram_save_checksums(void)
-{
- unsigned long mem_page;
- int rc, i;
-
- rc = 0;
- mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
- if (!mem_page)
- return -ENOMEM;
- for (i = 0; i < xpram_devs; i++) {
- rc = xpram_page_in(mem_page, xpram_devices[i].offset);
- if (rc)
- goto fail;
- xpram_devices[i].csum = csum_partial((const void *) mem_page,
- PAGE_SIZE, 0);
- }
-fail:
- free_page(mem_page);
- return rc ? -ENXIO : 0;
-}
-
-/*
- * Verify checksums for all partitions.
- */
-static int xpram_validate_checksums(void)
-{
- unsigned long mem_page;
- unsigned int csum;
- int rc, i;
-
- rc = 0;
- mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
- if (!mem_page)
- return -ENOMEM;
- for (i = 0; i < xpram_devs; i++) {
- rc = xpram_page_in(mem_page, xpram_devices[i].offset);
- if (rc)
- goto fail;
- csum = csum_partial((const void *) mem_page, PAGE_SIZE, 0);
- if (xpram_devices[i].csum != csum) {
- rc = -EINVAL;
- goto fail;
- }
- }
-fail:
- free_page(mem_page);
- return rc ? -ENXIO : 0;
-}
-
-/*
* Resume failed: Print error message and call panic.
*/
static void xpram_resume_error(const char *message)
@@ -458,21 +404,10 @@ static int xpram_restore(struct device *dev)
xpram_resume_error("xpram disappeared");
if (xpram_pages != xpram_highest_page_index() + 1)
xpram_resume_error("Size of xpram changed");
- if (xpram_validate_checksums())
- xpram_resume_error("Data of xpram changed");
return 0;
}
-/*
- * Save necessary state in suspend.
- */
-static int xpram_freeze(struct device *dev)
-{
- return xpram_save_checksums();
-}
-
static struct dev_pm_ops xpram_pm_ops = {
- .freeze = xpram_freeze,
.restore = xpram_restore,
};
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 0769ced52dbd..4e34d3686c23 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -82,6 +82,16 @@ config SCLP_CPI
You should only select this option if you know what you are doing,
need this feature and intend to run your kernel in LPAR.
+config SCLP_ASYNC
+ tristate "Support for Call Home via Asynchronous SCLP Records"
+ depends on S390
+ help
+ This option enables the call home function, which is able to inform
+ the service element and connected organisations about a kernel panic.
+ You should only select this option if you know what you are doing,
+ want for inform other people about your kernel panics,
+ need this feature and intend to run your kernel in LPAR.
+
config S390_TAPE
tristate "S/390 tape device support"
depends on CCW
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 7e73e39a1741..efb500ab66c0 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
+obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 3234e90bd7f9..89ece1c235aa 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -581,7 +581,7 @@ static int __init mon_init(void)
monreader_device->release = (void (*)(struct device *))kfree;
rc = device_register(monreader_device);
if (rc) {
- kfree(monreader_device);
+ put_device(monreader_device);
goto out_driver;
}
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 60e7cb07095b..6bb5a6bdfab5 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -27,6 +27,7 @@
#define EVTYP_VT220MSG 0x1A
#define EVTYP_CONFMGMDATA 0x04
#define EVTYP_SDIAS 0x1C
+#define EVTYP_ASYNC 0x0A
#define EVTYP_OPCMD_MASK 0x80000000
#define EVTYP_MSG_MASK 0x40000000
@@ -38,6 +39,7 @@
#define EVTYP_VT220MSG_MASK 0x00000040
#define EVTYP_CONFMGMDATA_MASK 0x10000000
#define EVTYP_SDIAS_MASK 0x00000010
+#define EVTYP_ASYNC_MASK 0x00400000
#define GNRLMSGFLGS_DOM 0x8000
#define GNRLMSGFLGS_SNDALRM 0x4000
@@ -85,12 +87,12 @@ struct sccb_header {
} __attribute__((packed));
extern u64 sclp_facilities;
-
#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
+
struct gds_subvector {
u8 length;
u8 key;
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
new file mode 100644
index 000000000000..daaec185ed36
--- /dev/null
+++ b/drivers/s390/char/sclp_async.c
@@ -0,0 +1,224 @@
+/*
+ * Enable Asynchronous Notification via SCLP.
+ *
+ * Copyright IBM Corp. 2009
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/kmod.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/sysctl.h>
+#include <linux/utsname.h>
+#include "sclp.h"
+
+static int callhome_enabled;
+static struct sclp_req *request;
+static struct sclp_async_sccb *sccb;
+static int sclp_async_send_wait(char *message);
+static struct ctl_table_header *callhome_sysctl_header;
+static DEFINE_SPINLOCK(sclp_async_lock);
+static char nodename[64];
+#define SCLP_NORMAL_WRITE 0x00
+
+struct async_evbuf {
+ struct evbuf_header header;
+ u64 reserved;
+ u8 rflags;
+ u8 empty;
+ u8 rtype;
+ u8 otype;
+ char comp_id[12];
+ char data[3000]; /* there is still some space left */
+} __attribute__((packed));
+
+struct sclp_async_sccb {
+ struct sccb_header header;
+ struct async_evbuf evbuf;
+} __attribute__((packed));
+
+static struct sclp_register sclp_async_register = {
+ .send_mask = EVTYP_ASYNC_MASK,
+};
+
+static int call_home_on_panic(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ strncat(data, nodename, strlen(nodename));
+ sclp_async_send_wait(data);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block call_home_panic_nb = {
+ .notifier_call = call_home_on_panic,
+ .priority = INT_MAX,
+};
+
+static int proc_handler_callhome(ctl_table *ctl, int write, struct file *filp,
+ void __user *buffer, size_t *count,
+ loff_t *ppos)
+{
+ unsigned long val;
+ int len, rc;
+ char buf[2];
+
+ if (!*count | (*ppos && !write)) {
+ *count = 0;
+ return 0;
+ }
+ if (!write) {
+ len = sprintf(buf, "%d\n", callhome_enabled);
+ buf[len] = '\0';
+ rc = copy_to_user(buffer, buf, sizeof(buf));
+ if (rc != 0)
+ return -EFAULT;
+ } else {
+ len = *count;
+ rc = copy_from_user(buf, buffer, sizeof(buf));
+ if (rc != 0)
+ return -EFAULT;
+ if (strict_strtoul(buf, 0, &val) != 0)
+ return -EINVAL;
+ if (val != 0 && val != 1)
+ return -EINVAL;
+ callhome_enabled = val;
+ }
+ *count = len;
+ *ppos += len;
+ return 0;
+}
+
+static struct ctl_table callhome_table[] = {
+ {
+ .procname = "callhome",
+ .mode = 0644,
+ .proc_handler = &proc_handler_callhome,
+ },
+ { .ctl_name = 0 }
+};
+
+static struct ctl_table kern_dir_table[] = {
+ {
+ .ctl_name = CTL_KERN,
+ .procname = "kernel",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = callhome_table,
+ },
+ { .ctl_name = 0 }
+};
+
+/*
+ * Function used to transfer asynchronous notification
+ * records which waits for send completion
+ */
+static int sclp_async_send_wait(char *message)
+{
+ struct async_evbuf *evb;
+ int rc;
+ unsigned long flags;
+
+ if (!callhome_enabled)
+ return 0;
+ sccb->evbuf.header.type = EVTYP_ASYNC;
+ sccb->evbuf.rtype = 0xA5;
+ sccb->evbuf.otype = 0x00;
+ evb = &sccb->evbuf;
+ request->command = SCLP_CMDW_WRITE_EVENT_DATA;
+ request->sccb = sccb;
+ request->status = SCLP_REQ_FILLED;
+ strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
+ /*
+ * Retain Queue
+ * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
+ */
+ strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id));
+ sccb->evbuf.header.length = sizeof(sccb->evbuf);
+ sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
+ sccb->header.function_code = SCLP_NORMAL_WRITE;
+ rc = sclp_add_request(request);
+ if (rc)
+ return rc;
+ spin_lock_irqsave(&sclp_async_lock, flags);
+ while (request->status != SCLP_REQ_DONE &&
+ request->status != SCLP_REQ_FAILED) {
+ sclp_sync_wait();
+ }
+ spin_unlock_irqrestore(&sclp_async_lock, flags);
+ if (request->status != SCLP_REQ_DONE)
+ return -EIO;
+ rc = ((struct sclp_async_sccb *)
+ request->sccb)->header.response_code;
+ if (rc != 0x0020)
+ return -EIO;
+ if (evb->header.flags != 0x80)
+ return -EIO;
+ return rc;
+}
+
+static int __init sclp_async_init(void)
+{
+ int rc;
+
+ rc = sclp_register(&sclp_async_register);
+ if (rc)
+ return rc;
+ callhome_sysctl_header = register_sysctl_table(kern_dir_table);
+ if (!callhome_sysctl_header) {
+ rc = -ENOMEM;
+ goto out_sclp;
+ }
+ if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) {
+ rc = -EOPNOTSUPP;
+ goto out_sclp;
+ }
+ rc = -ENOMEM;
+ request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
+ if (!request)
+ goto out_sys;
+ sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ goto out_mem;
+ rc = atomic_notifier_chain_register(&panic_notifier_list,
+ &call_home_panic_nb);
+ if (rc)
+ goto out_mem;
+
+ strncpy(nodename, init_utsname()->nodename, 64);
+ return 0;
+
+out_mem:
+ kfree(request);
+ free_page((unsigned long) sccb);
+out_sys:
+ unregister_sysctl_table(callhome_sysctl_header);
+out_sclp:
+ sclp_unregister(&sclp_async_register);
+ return rc;
+
+}
+module_init(sclp_async_init);
+
+static void __exit sclp_async_exit(void)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &call_home_panic_nb);
+ unregister_sysctl_table(callhome_sysctl_header);
+ sclp_unregister(&sclp_async_register);
+ free_page((unsigned long) sccb);
+ kfree(request);
+}
+module_exit(sclp_async_exit);
+
+MODULE_AUTHOR("Copyright IBM Corp. 2009");
+MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5a519fac37b7..2fe45ff77b75 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -8,7 +8,7 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
-#define KMSG_COMPONENT "tape"
+#define KMSG_COMPONENT "tape_34xx"
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 418f72dd39b4..e4cc3aae9162 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -8,7 +8,7 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
-#define KMSG_COMPONENT "tape"
+#define KMSG_COMPONENT "tape_3590"
#include <linux/module.h>
#include <linux/init.h>
@@ -39,8 +39,6 @@ EXPORT_SYMBOL(TAPE_DBF_AREA);
* - Read Alternate: implemented
*******************************************************************/
-#define KMSG_COMPONENT "tape"
-
static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
[0x00] = "",
[0x10] = "Lost Sense",
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 47ff695255ea..4cb9e70507ab 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -302,8 +302,6 @@ tapeblock_revalidate_disk(struct gendisk *disk)
if (!device->blk_data.medium_changed)
return 0;
- dev_info(&device->cdev->dev, "Determining the size of the recorded "
- "area...\n");
rc = tape_mtop(device, MTFSFM, 1);
if (rc)
return rc;
@@ -312,6 +310,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
if (rc < 0)
return rc;
+ pr_info("%s: Determining the size of the recorded area...\n",
+ dev_name(&device->cdev->dev));
DBF_LH(3, "Image file ends at %d\n", rc);
nr_of_blks = rc;
@@ -330,8 +330,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
device->bof = rc;
nr_of_blks -= rc;
- dev_info(&device->cdev->dev, "The size of the recorded area is %i "
- "blocks\n", nr_of_blks);
+ pr_info("%s: The size of the recorded area is %i blocks\n",
+ dev_name(&device->cdev->dev), nr_of_blks);
set_capacity(device->blk_data.disk,
nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
@@ -366,8 +366,8 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
if (device->required_tapemarks) {
DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
- dev_warn(&device->cdev->dev, "Opening the tape failed because"
- " of missing end-of-file marks\n");
+ pr_warning("%s: Opening the tape failed because of missing "
+ "end-of-file marks\n", dev_name(&device->cdev->dev));
rc = -EPERM;
goto put_device;
}
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 1d420d947596..5cd31e071647 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -214,13 +214,15 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
switch(newstate){
case MS_UNLOADED:
device->tape_generic_status |= GMT_DR_OPEN(~0);
- dev_info(&device->cdev->dev, "The tape cartridge has been "
- "successfully unloaded\n");
+ if (device->medium_state == MS_LOADED)
+ pr_info("%s: The tape cartridge has been successfully "
+ "unloaded\n", dev_name(&device->cdev->dev));
break;
case MS_LOADED:
device->tape_generic_status &= ~GMT_DR_OPEN(~0);
- dev_info(&device->cdev->dev, "A tape cartridge has been "
- "mounted\n");
+ if (device->medium_state == MS_UNLOADED)
+ pr_info("%s: A tape cartridge has been mounted\n",
+ dev_name(&device->cdev->dev));
break;
default:
// print nothing
@@ -358,11 +360,11 @@ tape_generic_online(struct tape_device *device,
out_char:
tapechar_cleanup_device(device);
+out_minor:
+ tape_remove_minor(device);
out_discipline:
device->discipline->cleanup_device(device);
device->discipline = NULL;
-out_minor:
- tape_remove_minor(device);
out:
module_put(discipline->owner);
return rc;
@@ -654,8 +656,8 @@ tape_generic_remove(struct ccw_device *cdev)
*/
DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
device->cdev_id);
- dev_warn(&device->cdev->dev, "A tape unit was detached"
- " while in use\n");
+ pr_warning("%s: A tape unit was detached while in "
+ "use\n", dev_name(&device->cdev->dev));
tape_state_set(device, TS_NOT_OPER);
__tape_discard_requests(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 1a9420ba518d..750354ad16e5 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -68,7 +68,7 @@ tape_std_assign(struct tape_device *device)
* to another host (actually this shouldn't happen but it does).
* So we set up a timeout for this call.
*/
- init_timer(&timeout);
+ init_timer_on_stack(&timeout);
timeout.function = tape_std_assign_timeout;
timeout.data = (unsigned long) request;
timeout.expires = jiffies + 2 * HZ;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index c20a4fe6da51..d1a142fa3eb4 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -765,8 +765,10 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
} else
return -ENOMEM;
ret = device_register(dev);
- if (ret)
+ if (ret) {
+ put_device(dev);
return ret;
+ }
ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
if (ret) {
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 31b902e94f7b..77571b68539a 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -1026,9 +1026,15 @@ static int __init ur_init(void)
debug_set_level(vmur_dbf, 6);
+ vmur_class = class_create(THIS_MODULE, "vmur");
+ if (IS_ERR(vmur_class)) {
+ rc = PTR_ERR(vmur_class);
+ goto fail_free_dbf;
+ }
+
rc = ccw_driver_register(&ur_driver);
if (rc)
- goto fail_free_dbf;
+ goto fail_class_destroy;
rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
if (rc) {
@@ -1038,18 +1044,13 @@ static int __init ur_init(void)
}
ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
- vmur_class = class_create(THIS_MODULE, "vmur");
- if (IS_ERR(vmur_class)) {
- rc = PTR_ERR(vmur_class);
- goto fail_unregister_region;
- }
pr_info("%s loaded.\n", ur_banner);
return 0;
-fail_unregister_region:
- unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
fail_unregister_driver:
ccw_driver_unregister(&ur_driver);
+fail_class_destroy:
+ class_destroy(vmur_class);
fail_free_dbf:
debug_unregister(vmur_dbf);
return rc;
@@ -1057,9 +1058,9 @@ fail_free_dbf:
static void __exit ur_exit(void)
{
- class_destroy(vmur_class);
unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
ccw_driver_unregister(&ur_driver);
+ class_destroy(vmur_class);
debug_unregister(vmur_dbf);
pr_info("%s unloaded.\n", ur_banner);
}
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 1bbae433fbd8..c431198bdbc4 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -275,7 +275,7 @@ struct zcore_header {
u32 num_pages;
u32 pad1;
u64 tod;
- cpuid_t cpu_id;
+ struct cpuid cpu_id;
u32 arch_id;
u32 volnr;
u32 build_arch;
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index adb3dd301528..fa4c9662f65e 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,7 +2,7 @@
# Makefile for the S/390 common i/o drivers
#
-obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \
+obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
fcx.o itcw.o crw.o
ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 3e5f304ad88f..40002830d48a 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -417,7 +417,8 @@ int chp_new(struct chp_id chpid)
if (ret) {
CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
chpid.cssid, chpid.id, ret);
- goto out_free;
+ put_device(&chp->dev);
+ goto out;
}
ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
if (ret) {
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 425e8f89a6c5..37aa611d4ac5 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -37,29 +37,6 @@ struct channel_path_desc {
struct channel_path;
-struct css_general_char {
- u64 : 12;
- u32 dynio : 1; /* bit 12 */
- u32 : 28;
- u32 aif : 1; /* bit 41 */
- u32 : 3;
- u32 mcss : 1; /* bit 45 */
- u32 fcs : 1; /* bit 46 */
- u32 : 1;
- u32 ext_mb : 1; /* bit 48 */
- u32 : 7;
- u32 aif_tdd : 1; /* bit 56 */
- u32 : 1;
- u32 qebsm : 1; /* bit 58 */
- u32 : 8;
- u32 aif_osa : 1; /* bit 67 */
- u32 : 14;
- u32 cib : 1; /* bit 82 */
- u32 : 5;
- u32 fcx : 1; /* bit 88 */
- u32 : 7;
-}__attribute__((packed));
-
struct css_chsc_char {
u64 res;
u64 : 20;
@@ -72,7 +49,6 @@ struct css_chsc_char {
u32 : 19;
}__attribute__((packed));
-extern struct css_general_char css_general_characteristics;
extern struct css_chsc_char css_chsc_characteristics;
struct chsc_ssd_info {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 5ec7789bd9d8..138124fcfcad 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -139,12 +139,11 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
__u8 lpm, /* logical path mask */
__u8 key) /* storage key */
{
- char dbf_txt[15];
int ccode;
union orb *orb;
- CIO_TRACE_EVENT(4, "stIO");
- CIO_TRACE_EVENT(4, dev_name(&sch->dev));
+ CIO_TRACE_EVENT(5, "stIO");
+ CIO_TRACE_EVENT(5, dev_name(&sch->dev));
orb = &to_io_private(sch)->orb;
memset(orb, 0, sizeof(union orb));
@@ -169,8 +168,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
ccode = ssch(sch->schid, orb);
/* process condition code */
- sprintf(dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT(4, dbf_txt);
+ CIO_HEX_EVENT(5, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@@ -201,16 +199,14 @@ cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
int
cio_resume (struct subchannel *sch)
{
- char dbf_txt[15];
int ccode;
- CIO_TRACE_EVENT (4, "resIO");
+ CIO_TRACE_EVENT(4, "resIO");
CIO_TRACE_EVENT(4, dev_name(&sch->dev));
ccode = rsch (sch->schid);
- sprintf (dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT (4, dbf_txt);
+ CIO_HEX_EVENT(4, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@@ -235,13 +231,12 @@ cio_resume (struct subchannel *sch)
int
cio_halt(struct subchannel *sch)
{
- char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
- CIO_TRACE_EVENT (2, "haltIO");
+ CIO_TRACE_EVENT(2, "haltIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
/*
@@ -249,8 +244,7 @@ cio_halt(struct subchannel *sch)
*/
ccode = hsch (sch->schid);
- sprintf (dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@@ -270,13 +264,12 @@ cio_halt(struct subchannel *sch)
int
cio_clear(struct subchannel *sch)
{
- char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
- CIO_TRACE_EVENT (2, "clearIO");
+ CIO_TRACE_EVENT(2, "clearIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
/*
@@ -284,8 +277,7 @@ cio_clear(struct subchannel *sch)
*/
ccode = csch (sch->schid);
- sprintf (dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@@ -306,19 +298,17 @@ cio_clear(struct subchannel *sch)
int
cio_cancel (struct subchannel *sch)
{
- char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
- CIO_TRACE_EVENT (2, "cancelIO");
+ CIO_TRACE_EVENT(2, "cancelIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
ccode = xsch (sch->schid);
- sprintf (dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0: /* success */
@@ -429,11 +419,10 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
*/
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{
- char dbf_txt[15];
int retry;
int ret;
- CIO_TRACE_EVENT (2, "ensch");
+ CIO_TRACE_EVENT(2, "ensch");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
if (sch_is_pseudo_sch(sch))
@@ -460,8 +449,7 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
} else
break;
}
- sprintf (dbf_txt, "ret:%d", ret);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
EXPORT_SYMBOL_GPL(cio_enable_subchannel);
@@ -472,11 +460,10 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
*/
int cio_disable_subchannel(struct subchannel *sch)
{
- char dbf_txt[15];
int retry;
int ret;
- CIO_TRACE_EVENT (2, "dissch");
+ CIO_TRACE_EVENT(2, "dissch");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
if (sch_is_pseudo_sch(sch))
@@ -495,8 +482,7 @@ int cio_disable_subchannel(struct subchannel *sch)
} else
break;
}
- sprintf (dbf_txt, "ret:%d", ret);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
EXPORT_SYMBOL_GPL(cio_disable_subchannel);
@@ -578,11 +564,6 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
goto out;
}
mutex_init(&sch->reg_mutex);
- /* Set a name for the subchannel */
- if (cio_is_console(schid))
- sch->dev.init_name = cio_get_console_sch_name(schid);
- else
- dev_set_name(&sch->dev, "0.%x.%04x", schid.ssid, schid.sch_no);
/*
* The first subchannel that is not-operational (ccode==3)
@@ -686,7 +667,6 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
#ifdef CONFIG_CCW_CONSOLE
static struct subchannel console_subchannel;
-static char console_sch_name[10] = "0.x.xxxx";
static struct io_subchannel_private console_priv;
static int console_subchannel_in_use;
@@ -873,12 +853,6 @@ cio_get_console_subchannel(void)
return &console_subchannel;
}
-const char *cio_get_console_sch_name(struct subchannel_id schid)
-{
- snprintf(console_sch_name, 10, "0.%x.%04x", schid.ssid, schid.sch_no);
- return (const char *)console_sch_name;
-}
-
#endif
static int
__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 5150fba742ac..2e43558c704b 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -133,15 +133,11 @@ extern int cio_is_console(struct subchannel_id);
extern struct subchannel *cio_get_console_subchannel(void);
extern spinlock_t * cio_get_console_lock(void);
extern void *cio_get_console_priv(void);
-extern const char *cio_get_console_sch_name(struct subchannel_id schid);
-extern const char *cio_get_console_cdev_name(struct subchannel *sch);
#else
#define cio_is_console(schid) 0
#define cio_get_console_subchannel() NULL
#define cio_get_console_lock() NULL
#define cio_get_console_priv() NULL
-#define cio_get_console_sch_name(schid) NULL
-#define cio_get_console_cdev_name(sch) NULL
#endif
#endif
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 85d43c6bcb66..e995123fd805 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -152,24 +152,15 @@ css_alloc_subchannel(struct subchannel_id schid)
}
static void
-css_free_subchannel(struct subchannel *sch)
-{
- if (sch) {
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
- kfree(sch->lock);
- kfree(sch);
- }
-}
-
-static void
css_subchannel_release(struct device *dev)
{
struct subchannel *sch;
sch = to_subchannel(dev);
if (!cio_is_console(sch->schid)) {
+ /* Reset intparm to zeroes. */
+ sch->config.intparm = 0;
+ cio_commit_config(sch);
kfree(sch->lock);
kfree(sch);
}
@@ -180,6 +171,8 @@ static int css_sch_device_register(struct subchannel *sch)
int ret;
mutex_lock(&sch->reg_mutex);
+ dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
+ sch->schid.sch_no);
ret = device_register(&sch->dev);
mutex_unlock(&sch->reg_mutex);
return ret;
@@ -327,7 +320,7 @@ int css_probe_device(struct subchannel_id schid)
return PTR_ERR(sch);
ret = css_register_subchannel(sch);
if (ret)
- css_free_subchannel(sch);
+ put_device(&sch->dev);
return ret;
}
@@ -644,7 +637,10 @@ __init_channel_subsystem(struct subchannel_id schid, void *data)
* not working) so we do it now. This is true e.g. for the
* console subchannel.
*/
- css_register_subchannel(sch);
+ if (css_register_subchannel(sch)) {
+ if (!cio_is_console(schid))
+ put_device(&sch->dev);
+ }
return 0;
}
@@ -661,8 +657,8 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
css->global_pgid.pgid_high.cpu_addr = 0;
#endif
}
- css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
- css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
+ css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident;
+ css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine;
css->global_pgid.tod_high = tod_high;
}
@@ -920,8 +916,10 @@ init_channel_subsystem (void)
goto out_device;
}
ret = device_register(&css->pseudo_subchannel->dev);
- if (ret)
+ if (ret) {
+ put_device(&css->pseudo_subchannel->dev);
goto out_file;
+ }
}
ret = register_reboot_notifier(&css_reboot_notifier);
if (ret)
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d593bc76afe3..0f95405c2c5e 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -307,8 +307,11 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
static void ccw_device_unregister(struct ccw_device *cdev)
{
- if (test_and_clear_bit(1, &cdev->private->registered))
+ if (test_and_clear_bit(1, &cdev->private->registered)) {
device_del(&cdev->dev);
+ /* Release reference from device_initialize(). */
+ put_device(&cdev->dev);
+ }
}
static void ccw_device_remove_orphan_cb(struct work_struct *work)
@@ -319,7 +322,6 @@ static void ccw_device_remove_orphan_cb(struct work_struct *work)
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
ccw_device_unregister(cdev);
- put_device(&cdev->dev);
/* Release cdev reference for workqueue processing. */
put_device(&cdev->dev);
}
@@ -333,15 +335,15 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
* Forced offline in disconnected state means
* 'throw away device'.
*/
- /* Get cdev reference for workqueue processing. */
- if (!get_device(&cdev->dev))
- return;
if (ccw_device_is_orphan(cdev)) {
/*
* Deregister ccw device.
* Unfortunately, we cannot do this directly from the
* attribute method.
*/
+ /* Get cdev reference for workqueue processing. */
+ if (!get_device(&cdev->dev))
+ return;
spin_lock_irqsave(cdev->ccwlock, flags);
cdev->private->state = DEV_STATE_NOT_OPER;
spin_unlock_irqrestore(cdev->ccwlock, flags);
@@ -380,30 +382,34 @@ int ccw_device_set_offline(struct ccw_device *cdev)
}
cdev->online = 0;
spin_lock_irq(cdev->ccwlock);
- ret = ccw_device_offline(cdev);
- if (ret == -ENODEV) {
- if (cdev->private->state != DEV_STATE_NOT_OPER) {
- cdev->private->state = DEV_STATE_OFFLINE;
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
- }
+ /* Wait until a final state or DISCONNECTED is reached */
+ while (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
spin_unlock_irq(cdev->ccwlock);
- /* Give up reference from ccw_device_set_online(). */
- put_device(&cdev->dev);
- return ret;
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ spin_lock_irq(cdev->ccwlock);
}
+ ret = ccw_device_offline(cdev);
+ if (ret)
+ goto error;
spin_unlock_irq(cdev->ccwlock);
- if (ret == 0) {
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- /* Give up reference from ccw_device_set_online(). */
- put_device(&cdev->dev);
- } else {
- CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
- "device 0.%x.%04x\n",
- ret, cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
- cdev->online = 1;
- }
- return ret;
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ /* Give up reference from ccw_device_set_online(). */
+ put_device(&cdev->dev);
+ return 0;
+
+error:
+ CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ cdev->private->state = DEV_STATE_OFFLINE;
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ spin_unlock_irq(cdev->ccwlock);
+ /* Give up reference from ccw_device_set_online(). */
+ put_device(&cdev->dev);
+ return -ENODEV;
}
/**
@@ -421,6 +427,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
int ccw_device_set_online(struct ccw_device *cdev)
{
int ret;
+ int ret2;
if (!cdev)
return -ENODEV;
@@ -444,28 +451,53 @@ int ccw_device_set_online(struct ccw_device *cdev)
put_device(&cdev->dev);
return ret;
}
- if (cdev->private->state != DEV_STATE_ONLINE) {
+ spin_lock_irq(cdev->ccwlock);
+ /* Check if online processing was successful */
+ if ((cdev->private->state != DEV_STATE_ONLINE) &&
+ (cdev->private->state != DEV_STATE_W4SENSE)) {
+ spin_unlock_irq(cdev->ccwlock);
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
return -ENODEV;
}
- if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
- cdev->online = 1;
- return 0;
- }
+ spin_unlock_irq(cdev->ccwlock);
+ if (cdev->drv->set_online)
+ ret = cdev->drv->set_online(cdev);
+ if (ret)
+ goto rollback;
+ cdev->online = 1;
+ return 0;
+
+rollback:
spin_lock_irq(cdev->ccwlock);
- ret = ccw_device_offline(cdev);
+ /* Wait until a final state or DISCONNECTED is reached */
+ while (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ spin_lock_irq(cdev->ccwlock);
+ }
+ ret2 = ccw_device_offline(cdev);
+ if (ret2)
+ goto error;
spin_unlock_irq(cdev->ccwlock);
- if (ret == 0)
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- else
- CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
- "device 0.%x.%04x\n",
- ret, cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
- return (ret == 0) ? -ENODEV : ret;
+ return ret;
+
+error:
+ CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
+ "device 0.%x.%04x\n",
+ ret2, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ cdev->private->state = DEV_STATE_OFFLINE;
+ spin_unlock_irq(cdev->ccwlock);
+ /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
+ return ret;
}
static int online_store_handle_offline(struct ccw_device *cdev)
@@ -637,8 +669,12 @@ static int ccw_device_register(struct ccw_device *cdev)
int ret;
dev->bus = &ccw_bus_type;
-
- if ((ret = device_add(dev)))
+ ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ if (ret)
+ return ret;
+ ret = device_add(dev);
+ if (ret)
return ret;
set_bit(1, &cdev->private->registered);
@@ -1024,9 +1060,6 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
return;
sch = to_subchannel(cdev->dev.parent);
css_sch_device_unregister(sch);
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
/* Release cdev reference for workqueue processing.*/
put_device(&cdev->dev);
/* Release subchannel reference for local processing. */
@@ -1035,6 +1068,9 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
{
+ /* Get cdev reference for workqueue processing. */
+ if (!get_device(&cdev->dev))
+ return;
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister);
queue_work(slow_path_wq, &cdev->private->kick_work);
@@ -1055,9 +1091,6 @@ io_subchannel_recog_done(struct ccw_device *cdev)
/* Device did not respond in time. */
case DEV_STATE_NOT_OPER:
cdev->private->flags.recog_done = 1;
- /* Remove device found not operational. */
- if (!get_device(&cdev->dev))
- break;
ccw_device_schedule_sch_unregister(cdev);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
@@ -1095,13 +1128,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
init_waitqueue_head(&priv->wait_q);
init_timer(&priv->timer);
- /* Set an initial name for the device. */
- if (cio_is_console(sch->schid))
- cdev->dev.init_name = cio_get_console_cdev_name(sch);
- else
- dev_set_name(&cdev->dev, "0.%x.%04x",
- sch->schid.ssid, sch->schib.pmcw.dev);
-
/* Increase counter of devices currently in recognition. */
atomic_inc(&ccw_device_init_count);
@@ -1171,8 +1197,8 @@ static void io_subchannel_irq(struct subchannel *sch)
cdev = sch_get_cdev(sch);
- CIO_TRACE_EVENT(3, "IRQ");
- CIO_TRACE_EVENT(3, dev_name(&sch->dev));
+ CIO_TRACE_EVENT(6, "IRQ");
+ CIO_TRACE_EVENT(6, dev_name(&sch->dev));
if (cdev)
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
}
@@ -1210,9 +1236,6 @@ static void io_subchannel_do_unreg(struct work_struct *work)
sch = container_of(work, struct subchannel, work);
css_sch_device_unregister(sch);
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
put_device(&sch->dev);
}
@@ -1334,7 +1357,6 @@ io_subchannel_remove (struct subchannel *sch)
cdev->private->state = DEV_STATE_NOT_OPER;
spin_unlock_irqrestore(cdev->ccwlock, flags);
ccw_device_unregister(cdev);
- put_device(&cdev->dev);
kfree(sch->private);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
@@ -1571,8 +1593,6 @@ static int purge_fn(struct device *dev, void *data)
spin_unlock_irq(cdev->ccwlock);
if (!unreg)
goto out;
- if (!get_device(&cdev->dev))
- goto out;
CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
priv->dev_id.devno);
ccw_device_schedule_sch_unregister(cdev);
@@ -1688,10 +1708,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
spin_unlock_irqrestore(sch->lock, flags);
css_sch_device_unregister(sch);
spin_lock_irqsave(sch->lock, flags);
-
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
break;
case REPROBE:
ccw_device_trigger_reprobe(cdev);
@@ -1712,7 +1728,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
#ifdef CONFIG_CCW_CONSOLE
static struct ccw_device console_cdev;
-static char console_cdev_name[10] = "0.x.xxxx";
static struct ccw_device_private console_private;
static int console_cdev_in_use;
@@ -1796,13 +1811,6 @@ int ccw_device_force_console(void)
return ccw_device_pm_restore(&console_cdev.dev);
}
EXPORT_SYMBOL_GPL(ccw_device_force_console);
-
-const char *cio_get_console_cdev_name(struct subchannel *sch)
-{
- snprintf(console_cdev_name, 10, "0.%x.%04x",
- sch->schid.ssid, sch->schib.pmcw.dev);
- return (const char *)console_cdev_name;
-}
#endif
/*
@@ -2020,7 +2028,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
spin_unlock_irq(sch->lock);
if (ret) {
CIO_MSG_EVENT(0, "Couldn't start recognition for device "
- "%s (ret=%d)\n", dev_name(&cdev->dev), ret);
+ "0.%x.%04x (ret=%d)\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
spin_lock_irq(sch->lock);
cdev->private->state = DEV_STATE_DISCONNECTED;
spin_unlock_irq(sch->lock);
@@ -2083,8 +2093,9 @@ static int ccw_device_pm_restore(struct device *dev)
}
/* check if the device id has changed */
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
- CIO_MSG_EVENT(0, "resume: sch %s: failed (devno changed from "
- "%04x to %04x)\n", dev_name(&sch->dev),
+ CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
+ "changed from %04x to %04x)\n",
+ sch->schid.ssid, sch->schid.sch_no,
cdev->private->dev_id.devno,
sch->schib.pmcw.dev);
goto out_unreg_unlock;
@@ -2117,8 +2128,9 @@ static int ccw_device_pm_restore(struct device *dev)
if (cm_enabled) {
ret = ccw_set_cmf(cdev, 1);
if (ret) {
- CIO_MSG_EVENT(2, "resume: cdev %s: cmf failed "
- "(rc=%d)\n", dev_name(&cdev->dev), ret);
+ CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
+ "(rc=%d)\n", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
ret = 0;
}
}
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 3db88c52d287..e728ce447f6e 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -394,6 +394,13 @@ ccw_device_done(struct ccw_device *cdev, int state)
ccw_device_schedule_sch_unregister(cdev);
cdev->private->flags.donotify = 0;
}
+ if (state == DEV_STATE_NOT_OPER) {
+ CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
+ cdev->private->dev_id.devno, sch->schid.sch_no);
+ if (!ccw_device_notify(cdev, CIO_GONE))
+ ccw_device_schedule_sch_unregister(cdev);
+ cdev->private->flags.donotify = 0;
+ }
if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0;
@@ -731,6 +738,17 @@ static void ccw_device_generic_notoper(struct ccw_device *cdev,
}
/*
+ * Handle path verification event in offline state.
+ */
+static void ccw_device_offline_verify(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ css_schedule_eval(sch->schid);
+}
+
+/*
* Handle path verification event.
*/
static void
@@ -887,6 +905,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
}
call_handler:
cdev->private->state = DEV_STATE_ONLINE;
+ /* In case sensing interfered with setting the device online */
+ wake_up(&cdev->private->wait_q);
/* Call the handler. */
if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
/* Start delayed path verification. */
@@ -1149,7 +1169,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
- [DEV_EVENT_VERIFY] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
},
[DEV_STATE_VERIFY] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index b1241f8fae88..ff7748a9199d 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -1,7 +1,7 @@
/*
* linux/drivers/s390/cio/qdio.h
*
- * Copyright 2000,2008 IBM Corp.
+ * Copyright 2000,2009 IBM Corp.
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
@@ -246,6 +246,7 @@ struct qdio_q {
atomic_t nr_buf_used;
struct qdio_irq *irq_ptr;
+ struct dentry *debugfs_q;
struct tasklet_struct tasklet;
/* error condition during a data transfer */
@@ -267,6 +268,7 @@ struct qdio_irq {
struct qib qib;
u32 *dsci; /* address of device state change indicator */
struct ccw_device *cdev;
+ struct dentry *debugfs_dev;
unsigned long int_parm;
struct subchannel_id schid;
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index b8626d4df116..1b78f639ead3 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -1,14 +1,12 @@
/*
* drivers/s390/cio/qdio_debug.c
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008,2009
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
-#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
-#include <asm/qdio.h>
#include <asm/debug.h>
#include "qdio_debug.h"
#include "qdio.h"
@@ -17,10 +15,7 @@ debug_info_t *qdio_dbf_setup;
debug_info_t *qdio_dbf_error;
static struct dentry *debugfs_root;
-#define MAX_DEBUGFS_QUEUES 32
-static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
-static DEFINE_MUTEX(debugfs_mutex);
-#define QDIO_DEBUGFS_NAME_LEN 40
+#define QDIO_DEBUGFS_NAME_LEN 10
void qdio_allocate_dbf(struct qdio_initialize *init_data,
struct qdio_irq *irq_ptr)
@@ -130,20 +125,6 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
filp->f_path.dentry->d_inode->i_private);
}
-static void remove_debugfs_entry(struct qdio_q *q)
-{
- int i;
-
- for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) {
- if (!debugfs_queues[i])
- continue;
- if (debugfs_queues[i]->d_inode->i_private == q) {
- debugfs_remove(debugfs_queues[i]);
- debugfs_queues[i] = NULL;
- }
- }
-}
-
static struct file_operations debugfs_fops = {
.owner = THIS_MODULE,
.open = qstat_seq_open,
@@ -155,22 +136,15 @@ static struct file_operations debugfs_fops = {
static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
{
- int i = 0;
char name[QDIO_DEBUGFS_NAME_LEN];
- while (debugfs_queues[i] != NULL) {
- i++;
- if (i >= MAX_DEBUGFS_QUEUES)
- return;
- }
- snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%s_%d",
- dev_name(&cdev->dev),
+ snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
q->is_input_q ? "input" : "output",
q->nr);
- debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
- debugfs_root, q, &debugfs_fops);
- if (IS_ERR(debugfs_queues[i]))
- debugfs_queues[i] = NULL;
+ q->debugfs_q = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
+ q->irq_ptr->debugfs_dev, q, &debugfs_fops);
+ if (IS_ERR(q->debugfs_q))
+ q->debugfs_q = NULL;
}
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
@@ -178,12 +152,14 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
struct qdio_q *q;
int i;
- mutex_lock(&debugfs_mutex);
+ irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev),
+ debugfs_root);
+ if (IS_ERR(irq_ptr->debugfs_dev))
+ irq_ptr->debugfs_dev = NULL;
for_each_input_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
for_each_output_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
- mutex_unlock(&debugfs_mutex);
}
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
@@ -191,17 +167,16 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd
struct qdio_q *q;
int i;
- mutex_lock(&debugfs_mutex);
for_each_input_queue(irq_ptr, q, i)
- remove_debugfs_entry(q);
+ debugfs_remove(q->debugfs_q);
for_each_output_queue(irq_ptr, q, i)
- remove_debugfs_entry(q);
- mutex_unlock(&debugfs_mutex);
+ debugfs_remove(q->debugfs_q);
+ debugfs_remove(irq_ptr->debugfs_dev);
}
int __init qdio_debug_init(void)
{
- debugfs_root = debugfs_create_dir("qdio_queues", NULL);
+ debugfs_root = debugfs_create_dir("qdio", NULL);
qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16);
debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 0038750ad945..9aef402a5f1b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -798,8 +798,10 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
if (!qdio_inbound_q_done(q)) {
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
tasklet_schedule(&q->tasklet);
+ return;
+ }
}
qdio_stop_polling(q);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ed3dcdea7fe1..090b32a339c6 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -648,7 +648,9 @@ static int ap_bus_suspend(struct device *dev, pm_message_t state)
/* Poll on the device until all requests are finished. */
do {
flags = 0;
+ spin_lock_bh(&ap_dev->lock);
__ap_poll_device(ap_dev, &flags);
+ spin_unlock_bh(&ap_dev->lock);
} while ((flags & 1) || (flags & 2));
ap_device_remove(dev);
@@ -1109,12 +1111,15 @@ static void ap_scan_bus(struct work_struct *unused)
ap_dev->device.bus = &ap_bus_type;
ap_dev->device.parent = ap_root_device;
- dev_set_name(&ap_dev->device, "card%02x",
- AP_QID_DEVICE(ap_dev->qid));
+ if (dev_set_name(&ap_dev->device, "card%02x",
+ AP_QID_DEVICE(ap_dev->qid))) {
+ kfree(ap_dev);
+ continue;
+ }
ap_dev->device.release = ap_device_release;
rc = device_register(&ap_dev->device);
if (rc) {
- kfree(ap_dev);
+ put_device(&ap_dev->device);
continue;
}
/* Add device attributes. */
@@ -1407,14 +1412,12 @@ static void ap_reset(struct ap_device *ap_dev)
static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
{
- spin_lock(&ap_dev->lock);
if (!ap_dev->unregistered) {
if (ap_poll_queue(ap_dev, flags))
ap_dev->unregistered = 1;
if (ap_dev->reset == AP_RESET_DO)
ap_reset(ap_dev);
}
- spin_unlock(&ap_dev->lock);
return 0;
}
@@ -1441,7 +1444,9 @@ static void ap_poll_all(unsigned long dummy)
flags = 0;
spin_lock(&ap_device_list_lock);
list_for_each_entry(ap_dev, &ap_device_list, list) {
+ spin_lock(&ap_dev->lock);
__ap_poll_device(ap_dev, &flags);
+ spin_unlock(&ap_dev->lock);
}
spin_unlock(&ap_device_list_lock);
} while (flags & 1);
@@ -1487,7 +1492,9 @@ static int ap_poll_thread(void *data)
flags = 0;
spin_lock_bh(&ap_device_list_lock);
list_for_each_entry(ap_dev, &ap_device_list, list) {
+ spin_lock(&ap_dev->lock);
__ap_poll_device(ap_dev, &flags);
+ spin_unlock(&ap_dev->lock);
}
spin_unlock_bh(&ap_device_list_lock);
}
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index e38e5d306faf..2930fc763ac5 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -403,10 +403,14 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
return len;
}
-void __init s390_virtio_console_init(void)
+static int __init s390_virtio_console_init(void)
{
- virtio_cons_early_init(early_put_chars);
+ if (!MACHINE_IS_KVM)
+ return -ENODEV;
+ return virtio_cons_early_init(early_put_chars);
}
+console_initcall(s390_virtio_console_init);
+
/*
* We do this after core stuff, but before the drivers.
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 8c36eafcfbfe..87dff11061b0 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1839,9 +1839,10 @@ static int netiucv_register_device(struct net_device *ndev)
return -ENOMEM;
ret = device_register(dev);
-
- if (ret)
+ if (ret) {
+ put_device(dev);
return ret;
+ }
ret = netiucv_add_files(dev);
if (ret)
goto out_unreg;
@@ -2226,8 +2227,10 @@ static int __init netiucv_init(void)
netiucv_dev->release = (void (*)(struct device *))kfree;
netiucv_dev->driver = &netiucv_driver;
rc = device_register(netiucv_dev);
- if (rc)
+ if (rc) {
+ put_device(netiucv_dev);
goto out_driver;
+ }
netiucv_banner();
return rc;
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index e76a320d373b..102000d1af6f 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -219,13 +219,13 @@ static int __init smsg_init(void)
smsg_dev->driver = &smsg_driver;
rc = device_register(smsg_dev);
if (rc)
- goto out_free_dev;
+ goto out_put;
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
return 0;
-out_free_dev:
- kfree(smsg_dev);
+out_put:
+ put_device(smsg_dev);
out_free_path:
iucv_path_free(smsg_path);
smsg_path = NULL;
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
index 042d9bce9914..d0ab23a58355 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_init.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
@@ -26,7 +26,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
static void open_s3_dev(struct t3cdev *);
static void close_s3_dev(struct t3cdev *);
-static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error);
+static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port);
static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
static struct cxgb3_client t3c_client = {
@@ -34,7 +34,7 @@ static struct cxgb3_client t3c_client = {
.handlers = cxgb3i_cpl_handlers,
.add = open_s3_dev,
.remove = close_s3_dev,
- .err_handler = s3_err_handler,
+ .event_handler = s3_event_handler,
};
/**
@@ -66,16 +66,16 @@ static void close_s3_dev(struct t3cdev *t3dev)
cxgb3i_ddp_cleanup(t3dev);
}
-static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error)
+static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port)
{
struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev);
- cxgb3i_log_info("snic 0x%p, tdev 0x%p, status 0x%x, err 0x%x.\n",
- snic, tdev, status, error);
+ cxgb3i_log_info("snic 0x%p, tdev 0x%p, event 0x%x, port 0x%x.\n",
+ snic, tdev, event, port);
if (!snic)
return;
- switch (status) {
+ switch (event) {
case OFFLOAD_STATUS_DOWN:
snic->flags |= CXGB3I_ADAPTER_FLAG_RESET;
break;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 9d7c99394ec6..640f65c6ef84 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1752,12 +1752,12 @@ static int comedi_open(struct inode *inode, struct file *file)
mutex_lock(&dev->mutex);
if (dev->attached)
goto ok;
- if (!capable(CAP_SYS_MODULE) && dev->in_request_module) {
+ if (!capable(CAP_NET_ADMIN) && dev->in_request_module) {
DPRINTK("in request module\n");
mutex_unlock(&dev->mutex);
return -ENODEV;
}
- if (capable(CAP_SYS_MODULE) && dev->in_request_module)
+ if (capable(CAP_NET_ADMIN) && dev->in_request_module)
goto ok;
dev->in_request_module = 1;
@@ -1770,8 +1770,8 @@ static int comedi_open(struct inode *inode, struct file *file)
dev->in_request_module = 0;
- if (!dev->attached && !capable(CAP_SYS_MODULE)) {
- DPRINTK("not attached and not CAP_SYS_MODULE\n");
+ if (!dev->attached && !capable(CAP_NET_ADMIN)) {
+ DPRINTK("not attached and not CAP_NET_ADMIN\n");
mutex_unlock(&dev->mutex);
return -ENODEV;
}
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index 7b605795b770..e63c9bea6c54 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -1950,14 +1950,7 @@ static int pohmelfs_get_sb(struct file_system_type *fs_type,
*/
static void pohmelfs_kill_super(struct super_block *sb)
{
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .range_start = 0,
- .range_end = LLONG_MAX,
- .nr_to_write = LONG_MAX,
- };
- generic_sync_sb_inodes(sb, &wbc);
-
+ sync_inodes_sb(sb);
kill_anon_super(sb);
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index b7c1603cd4bd..7c1e65d54872 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -501,22 +501,22 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
}
}
- /*
- * Now fill out the bss section. First pad the last page up
- * to the page boundary, and then perform a mmap to make sure
- * that there are zero-mapped pages up to and including the
- * last bss page.
- */
- if (padzero(elf_bss)) {
- error = -EFAULT;
- goto out_close;
- }
+ if (last_bss > elf_bss) {
+ /*
+ * Now fill out the bss section. First pad the last page up
+ * to the page boundary, and then perform a mmap to make sure
+ * that there are zero-mapped pages up to and including the
+ * last bss page.
+ */
+ if (padzero(elf_bss)) {
+ error = -EFAULT;
+ goto out_close;
+ }
- /* What we have mapped so far */
- elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
+ /* What we have mapped so far */
+ elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
- /* Map the last of the bss segment */
- if (last_bss > elf_bss) {
+ /* Map the last of the bss segment */
down_write(&current->mm->mmap_sem);
error = do_brk(elf_bss, last_bss - elf_bss);
up_write(&current->mm->mmap_sem);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e83be2e4602c..15831d5c7367 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1352,6 +1352,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
{
int err;
+ bdi->name = "btrfs";
bdi->capabilities = BDI_CAP_MAP_COPY;
err = bdi_init(bdi);
if (err)
diff --git a/fs/buffer.c b/fs/buffer.c
index 28f320fac4d4..90a98865b0cc 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -281,7 +281,7 @@ static void free_more_memory(void)
struct zone *zone;
int nid;
- wakeup_pdflush(1024);
+ wakeup_flusher_threads(1024);
yield();
for_each_online_node(nid) {
diff --git a/fs/char_dev.c b/fs/char_dev.c
index a173551e19d7..3cbc57f932d2 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -31,6 +31,7 @@
* - no readahead or I/O queue unplugging required
*/
struct backing_dev_info directly_mappable_cdev_bdi = {
+ .name = "char",
.capabilities = (
#ifdef CONFIG_MMU
/* permit private copies of the data to be taken */
@@ -237,8 +238,10 @@ int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
}
/**
- * register_chrdev() - Register a major number for character devices.
+ * __register_chrdev() - create and register a cdev occupying a range of minors
* @major: major device number or 0 for dynamic allocation
+ * @baseminor: first of the requested range of minor numbers
+ * @count: the number of minor numbers required
* @name: name of this range of devices
* @fops: file operations associated with this devices
*
@@ -254,19 +257,17 @@ int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
* /dev. It only helps to keep track of the different owners of devices. If
* your module name has only one type of devices it's ok to use e.g. the name
* of the module here.
- *
- * This function registers a range of 256 minor numbers. The first minor number
- * is 0.
*/
-int register_chrdev(unsigned int major, const char *name,
- const struct file_operations *fops)
+int __register_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name,
+ const struct file_operations *fops)
{
struct char_device_struct *cd;
struct cdev *cdev;
char *s;
int err = -ENOMEM;
- cd = __register_chrdev_region(major, 0, 256, name);
+ cd = __register_chrdev_region(major, baseminor, count, name);
if (IS_ERR(cd))
return PTR_ERR(cd);
@@ -280,7 +281,7 @@ int register_chrdev(unsigned int major, const char *name,
for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
*s = '!';
- err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
+ err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
if (err)
goto out;
@@ -290,7 +291,7 @@ int register_chrdev(unsigned int major, const char *name,
out:
kobject_put(&cdev->kobj);
out2:
- kfree(__unregister_chrdev_region(cd->major, 0, 256));
+ kfree(__unregister_chrdev_region(cd->major, baseminor, count));
return err;
}
@@ -316,10 +317,23 @@ void unregister_chrdev_region(dev_t from, unsigned count)
}
}
-void unregister_chrdev(unsigned int major, const char *name)
+/**
+ * __unregister_chrdev - unregister and destroy a cdev
+ * @major: major device number
+ * @baseminor: first of the range of minor numbers
+ * @count: the number of minor numbers this cdev is occupying
+ * @name: name of this range of devices
+ *
+ * Unregister and destroy the cdev occupying the region described by
+ * @major, @baseminor and @count. This function undoes what
+ * __register_chrdev() did.
+ */
+void __unregister_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name)
{
struct char_device_struct *cd;
- cd = __unregister_chrdev_region(major, 0, 256);
+
+ cd = __unregister_chrdev_region(major, baseminor, count);
if (cd && cd->cdev)
cdev_del(cd->cdev);
kfree(cd);
@@ -568,6 +582,6 @@ EXPORT_SYMBOL(cdev_alloc);
EXPORT_SYMBOL(cdev_del);
EXPORT_SYMBOL(cdev_add);
EXPORT_SYMBOL(cdev_index);
-EXPORT_SYMBOL(register_chrdev);
-EXPORT_SYMBOL(unregister_chrdev);
+EXPORT_SYMBOL(__register_chrdev);
+EXPORT_SYMBOL(__unregister_chrdev);
EXPORT_SYMBOL(directly_mappable_cdev_bdi);
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 4921e7426d95..a2f746066c5d 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -51,6 +51,7 @@ static const struct address_space_operations configfs_aops = {
};
static struct backing_dev_info configfs_backing_dev_info = {
+ .name = "configfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
diff --git a/fs/dcache.c b/fs/dcache.c
index 9e5cd3c3a6ba..a100fa35a48f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -32,6 +32,7 @@
#include <linux/swap.h>
#include <linux/bootmem.h>
#include <linux/fs_struct.h>
+#include <linux/hardirq.h>
#include "internal.h"
int sysctl_vfs_cache_pressure __read_mostly = 100;
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index d636e1297cad..a63d44256a70 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -230,7 +230,7 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
return error;
}
-static int
+int
ext2_check_acl(struct inode *inode, int mask)
{
struct posix_acl *acl = ext2_get_acl(inode, ACL_TYPE_ACCESS);
@@ -246,12 +246,6 @@ ext2_check_acl(struct inode *inode, int mask)
return -EAGAIN;
}
-int
-ext2_permission(struct inode *inode, int mask)
-{
- return generic_permission(inode, mask, ext2_check_acl);
-}
-
/*
* Initialize the ACLs of a new inode. Called from ext2_new_inode.
*
diff --git a/fs/ext2/acl.h b/fs/ext2/acl.h
index ecefe478898f..3ff6cbb9ac44 100644
--- a/fs/ext2/acl.h
+++ b/fs/ext2/acl.h
@@ -54,13 +54,13 @@ static inline int ext2_acl_count(size_t size)
#ifdef CONFIG_EXT2_FS_POSIX_ACL
/* acl.c */
-extern int ext2_permission (struct inode *, int);
+extern int ext2_check_acl (struct inode *, int);
extern int ext2_acl_chmod (struct inode *);
extern int ext2_init_acl (struct inode *, struct inode *);
#else
#include <linux/sched.h>
-#define ext2_permission NULL
+#define ext2_check_acl NULL
#define ext2_get_acl NULL
#define ext2_set_acl NULL
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 2b9e47dc9222..a2f3afd1a1c1 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -85,6 +85,6 @@ const struct inode_operations ext2_file_inode_operations = {
.removexattr = generic_removexattr,
#endif
.setattr = ext2_setattr,
- .permission = ext2_permission,
+ .check_acl = ext2_check_acl,
.fiemap = ext2_fiemap,
};
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 78d9b925fc94..23701f289e98 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -400,7 +400,7 @@ const struct inode_operations ext2_dir_inode_operations = {
.removexattr = generic_removexattr,
#endif
.setattr = ext2_setattr,
- .permission = ext2_permission,
+ .check_acl = ext2_check_acl,
};
const struct inode_operations ext2_special_inode_operations = {
@@ -411,5 +411,5 @@ const struct inode_operations ext2_special_inode_operations = {
.removexattr = generic_removexattr,
#endif
.setattr = ext2_setattr,
- .permission = ext2_permission,
+ .check_acl = ext2_check_acl,
};
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index e167bae37ef0..c9b0df376b5f 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -238,7 +238,7 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type,
return error;
}
-static int
+int
ext3_check_acl(struct inode *inode, int mask)
{
struct posix_acl *acl = ext3_get_acl(inode, ACL_TYPE_ACCESS);
@@ -254,12 +254,6 @@ ext3_check_acl(struct inode *inode, int mask)
return -EAGAIN;
}
-int
-ext3_permission(struct inode *inode, int mask)
-{
- return generic_permission(inode, mask, ext3_check_acl);
-}
-
/*
* Initialize the ACLs of a new inode. Called from ext3_new_inode.
*
diff --git a/fs/ext3/acl.h b/fs/ext3/acl.h
index 07d15a3a5969..597334626de9 100644
--- a/fs/ext3/acl.h
+++ b/fs/ext3/acl.h
@@ -54,13 +54,13 @@ static inline int ext3_acl_count(size_t size)
#ifdef CONFIG_EXT3_FS_POSIX_ACL
/* acl.c */
-extern int ext3_permission (struct inode *, int);
+extern int ext3_check_acl (struct inode *, int);
extern int ext3_acl_chmod (struct inode *);
extern int ext3_init_acl (handle_t *, struct inode *, struct inode *);
#else /* CONFIG_EXT3_FS_POSIX_ACL */
#include <linux/sched.h>
-#define ext3_permission NULL
+#define ext3_check_acl NULL
static inline int
ext3_acl_chmod(struct inode *inode)
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index 5b49704b231b..299253214789 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -137,7 +137,7 @@ const struct inode_operations ext3_file_inode_operations = {
.listxattr = ext3_listxattr,
.removexattr = generic_removexattr,
#endif
- .permission = ext3_permission,
+ .check_acl = ext3_check_acl,
.fiemap = ext3_fiemap,
};
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 6ff7b9730234..aad6400c9b77 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -2445,7 +2445,7 @@ const struct inode_operations ext3_dir_inode_operations = {
.listxattr = ext3_listxattr,
.removexattr = generic_removexattr,
#endif
- .permission = ext3_permission,
+ .check_acl = ext3_check_acl,
};
const struct inode_operations ext3_special_inode_operations = {
@@ -2456,5 +2456,5 @@ const struct inode_operations ext3_special_inode_operations = {
.listxattr = ext3_listxattr,
.removexattr = generic_removexattr,
#endif
- .permission = ext3_permission,
+ .check_acl = ext3_check_acl,
};
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index f6d8967149ca..0df88b2a69b0 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -236,7 +236,7 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type,
return error;
}
-static int
+int
ext4_check_acl(struct inode *inode, int mask)
{
struct posix_acl *acl = ext4_get_acl(inode, ACL_TYPE_ACCESS);
@@ -252,12 +252,6 @@ ext4_check_acl(struct inode *inode, int mask)
return -EAGAIN;
}
-int
-ext4_permission(struct inode *inode, int mask)
-{
- return generic_permission(inode, mask, ext4_check_acl);
-}
-
/*
* Initialize the ACLs of a new inode. Called from ext4_new_inode.
*
diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
index 949789d2bba6..9d843d5deac4 100644
--- a/fs/ext4/acl.h
+++ b/fs/ext4/acl.h
@@ -54,13 +54,13 @@ static inline int ext4_acl_count(size_t size)
#ifdef CONFIG_EXT4_FS_POSIX_ACL
/* acl.c */
-extern int ext4_permission(struct inode *, int);
+extern int ext4_check_acl(struct inode *, int);
extern int ext4_acl_chmod(struct inode *);
extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
#else /* CONFIG_EXT4_FS_POSIX_ACL */
#include <linux/sched.h>
-#define ext4_permission NULL
+#define ext4_check_acl NULL
static inline int
ext4_acl_chmod(struct inode *inode)
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 3f1873fef1c6..27f3c5354c0e 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -207,7 +207,7 @@ const struct inode_operations ext4_file_inode_operations = {
.listxattr = ext4_listxattr,
.removexattr = generic_removexattr,
#endif
- .permission = ext4_permission,
+ .check_acl = ext4_check_acl,
.fallocate = ext4_fallocate,
.fiemap = ext4_fiemap,
};
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index de04013d16ff..114abe5d2c1d 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2536,7 +2536,7 @@ const struct inode_operations ext4_dir_inode_operations = {
.listxattr = ext4_listxattr,
.removexattr = generic_removexattr,
#endif
- .permission = ext4_permission,
+ .check_acl = ext4_check_acl,
.fiemap = ext4_fiemap,
};
@@ -2548,5 +2548,5 @@ const struct inode_operations ext4_special_inode_operations = {
.listxattr = ext4_listxattr,
.removexattr = generic_removexattr,
#endif
- .permission = ext4_permission,
+ .check_acl = ext4_check_acl,
};
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index c54226be5294..da86ef58e427 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -19,171 +19,223 @@
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
#include "internal.h"
+#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
-/**
- * writeback_acquire - attempt to get exclusive writeback access to a device
- * @bdi: the device's backing_dev_info structure
- *
- * It is a waste of resources to have more than one pdflush thread blocked on
- * a single request queue. Exclusion at the request_queue level is obtained
- * via a flag in the request_queue's backing_dev_info.state.
- *
- * Non-request_queue-backed address_spaces will share default_backing_dev_info,
- * unless they implement their own. Which is somewhat inefficient, as this
- * may prevent concurrent writeback against multiple devices.
+/*
+ * We don't actually have pdflush, but this one is exported though /proc...
*/
-static int writeback_acquire(struct backing_dev_info *bdi)
+int nr_pdflush_threads;
+
+/*
+ * Work items for the bdi_writeback threads
+ */
+struct bdi_work {
+ struct list_head list;
+ struct list_head wait_list;
+ struct rcu_head rcu_head;
+
+ unsigned long seen;
+ atomic_t pending;
+
+ struct super_block *sb;
+ unsigned long nr_pages;
+ enum writeback_sync_modes sync_mode;
+
+ unsigned long state;
+};
+
+enum {
+ WS_USED_B = 0,
+ WS_ONSTACK_B,
+};
+
+#define WS_USED (1 << WS_USED_B)
+#define WS_ONSTACK (1 << WS_ONSTACK_B)
+
+static inline bool bdi_work_on_stack(struct bdi_work *work)
{
- return !test_and_set_bit(BDI_pdflush, &bdi->state);
+ return test_bit(WS_ONSTACK_B, &work->state);
+}
+
+static inline void bdi_work_init(struct bdi_work *work,
+ struct writeback_control *wbc)
+{
+ INIT_RCU_HEAD(&work->rcu_head);
+ work->sb = wbc->sb;
+ work->nr_pages = wbc->nr_to_write;
+ work->sync_mode = wbc->sync_mode;
+ work->state = WS_USED;
+}
+
+static inline void bdi_work_init_on_stack(struct bdi_work *work,
+ struct writeback_control *wbc)
+{
+ bdi_work_init(work, wbc);
+ work->state |= WS_ONSTACK;
}
/**
* writeback_in_progress - determine whether there is writeback in progress
* @bdi: the device's backing_dev_info structure.
*
- * Determine whether there is writeback in progress against a backing device.
+ * Determine whether there is writeback waiting to be handled against a
+ * backing device.
*/
int writeback_in_progress(struct backing_dev_info *bdi)
{
- return test_bit(BDI_pdflush, &bdi->state);
+ return !list_empty(&bdi->work_list);
}
-/**
- * writeback_release - relinquish exclusive writeback access against a device.
- * @bdi: the device's backing_dev_info structure
- */
-static void writeback_release(struct backing_dev_info *bdi)
+static void bdi_work_clear(struct bdi_work *work)
{
- BUG_ON(!writeback_in_progress(bdi));
- clear_bit(BDI_pdflush, &bdi->state);
+ clear_bit(WS_USED_B, &work->state);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&work->state, WS_USED_B);
}
-static noinline void block_dump___mark_inode_dirty(struct inode *inode)
+static void bdi_work_free(struct rcu_head *head)
{
- if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
- struct dentry *dentry;
- const char *name = "?";
+ struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
- dentry = d_find_alias(inode);
- if (dentry) {
- spin_lock(&dentry->d_lock);
- name = (const char *) dentry->d_name.name;
- }
- printk(KERN_DEBUG
- "%s(%d): dirtied inode %lu (%s) on %s\n",
- current->comm, task_pid_nr(current), inode->i_ino,
- name, inode->i_sb->s_id);
- if (dentry) {
- spin_unlock(&dentry->d_lock);
- dput(dentry);
- }
- }
+ if (!bdi_work_on_stack(work))
+ kfree(work);
+ else
+ bdi_work_clear(work);
}
-/**
- * __mark_inode_dirty - internal function
- * @inode: inode to mark
- * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
- * Mark an inode as dirty. Callers should use mark_inode_dirty or
- * mark_inode_dirty_sync.
- *
- * Put the inode on the super block's dirty list.
- *
- * CAREFUL! We mark it dirty unconditionally, but move it onto the
- * dirty list only if it is hashed or if it refers to a blockdev.
- * If it was not hashed, it will never be added to the dirty list
- * even if it is later hashed, as it will have been marked dirty already.
- *
- * In short, make sure you hash any inodes _before_ you start marking
- * them dirty.
- *
- * This function *must* be atomic for the I_DIRTY_PAGES case -
- * set_page_dirty() is called under spinlock in several places.
- *
- * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
- * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
- * the kernel-internal blockdev inode represents the dirtying time of the
- * blockdev's pages. This is why for I_DIRTY_PAGES we always use
- * page->mapping->host, so the page-dirtying time is recorded in the internal
- * blockdev inode.
- */
-void __mark_inode_dirty(struct inode *inode, int flags)
+static void wb_work_complete(struct bdi_work *work)
{
- struct super_block *sb = inode->i_sb;
+ const enum writeback_sync_modes sync_mode = work->sync_mode;
/*
- * Don't do this for I_DIRTY_PAGES - that doesn't actually
- * dirty the inode itself
+ * For allocated work, we can clear the done/seen bit right here.
+ * For on-stack work, we need to postpone both the clear and free
+ * to after the RCU grace period, since the stack could be invalidated
+ * as soon as bdi_work_clear() has done the wakeup.
*/
- if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
- if (sb->s_op->dirty_inode)
- sb->s_op->dirty_inode(inode);
- }
+ if (!bdi_work_on_stack(work))
+ bdi_work_clear(work);
+ if (sync_mode == WB_SYNC_NONE || bdi_work_on_stack(work))
+ call_rcu(&work->rcu_head, bdi_work_free);
+}
+static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
+{
/*
- * make sure that changes are seen by all cpus before we test i_state
- * -- mikulas
+ * The caller has retrieved the work arguments from this work,
+ * drop our reference. If this is the last ref, delete and free it
*/
- smp_mb();
+ if (atomic_dec_and_test(&work->pending)) {
+ struct backing_dev_info *bdi = wb->bdi;
- /* avoid the locking if we can */
- if ((inode->i_state & flags) == flags)
- return;
+ spin_lock(&bdi->wb_lock);
+ list_del_rcu(&work->list);
+ spin_unlock(&bdi->wb_lock);
- if (unlikely(block_dump))
- block_dump___mark_inode_dirty(inode);
-
- spin_lock(&inode_lock);
- if ((inode->i_state & flags) != flags) {
- const int was_dirty = inode->i_state & I_DIRTY;
+ wb_work_complete(work);
+ }
+}
- inode->i_state |= flags;
+static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
+{
+ if (work) {
+ work->seen = bdi->wb_mask;
+ BUG_ON(!work->seen);
+ atomic_set(&work->pending, bdi->wb_cnt);
+ BUG_ON(!bdi->wb_cnt);
/*
- * If the inode is being synced, just update its dirty state.
- * The unlocker will place the inode on the appropriate
- * superblock list, based upon its state.
+ * Make sure stores are seen before it appears on the list
*/
- if (inode->i_state & I_SYNC)
- goto out;
+ smp_mb();
- /*
- * Only add valid (hashed) inodes to the superblock's
- * dirty list. Add blockdev inodes as well.
- */
- if (!S_ISBLK(inode->i_mode)) {
- if (hlist_unhashed(&inode->i_hash))
- goto out;
- }
- if (inode->i_state & (I_FREEING|I_CLEAR))
- goto out;
+ spin_lock(&bdi->wb_lock);
+ list_add_tail_rcu(&work->list, &bdi->work_list);
+ spin_unlock(&bdi->wb_lock);
+ }
+
+ /*
+ * If the default thread isn't there, make sure we add it. When
+ * it gets created and wakes up, we'll run this work.
+ */
+ if (unlikely(list_empty_careful(&bdi->wb_list)))
+ wake_up_process(default_backing_dev_info.wb.task);
+ else {
+ struct bdi_writeback *wb = &bdi->wb;
/*
- * If the inode was already on s_dirty/s_io/s_more_io, don't
- * reposition it (that would break s_dirty time-ordering).
+ * If we failed allocating the bdi work item, wake up the wb
+ * thread always. As a safety precaution, it'll flush out
+ * everything
*/
- if (!was_dirty) {
- inode->dirtied_when = jiffies;
- list_move(&inode->i_list, &sb->s_dirty);
- }
+ if (!wb_has_dirty_io(wb)) {
+ if (work)
+ wb_clear_pending(wb, work);
+ } else if (wb->task)
+ wake_up_process(wb->task);
}
-out:
- spin_unlock(&inode_lock);
}
-EXPORT_SYMBOL(__mark_inode_dirty);
+/*
+ * Used for on-stack allocated work items. The caller needs to wait until
+ * the wb threads have acked the work before it's safe to continue.
+ */
+static void bdi_wait_on_work_clear(struct bdi_work *work)
+{
+ wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
+ TASK_UNINTERRUPTIBLE);
+}
-static int write_inode(struct inode *inode, int sync)
+static struct bdi_work *bdi_alloc_work(struct writeback_control *wbc)
{
- if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
- return inode->i_sb->s_op->write_inode(inode, sync);
- return 0;
+ struct bdi_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work)
+ bdi_work_init(work, wbc);
+
+ return work;
+}
+
+void bdi_start_writeback(struct writeback_control *wbc)
+{
+ const bool must_wait = wbc->sync_mode == WB_SYNC_ALL;
+ struct bdi_work work_stack, *work = NULL;
+
+ if (!must_wait)
+ work = bdi_alloc_work(wbc);
+
+ if (!work) {
+ work = &work_stack;
+ bdi_work_init_on_stack(work, wbc);
+ }
+
+ bdi_queue_work(wbc->bdi, work);
+
+ /*
+ * If the sync mode is WB_SYNC_ALL, block waiting for the work to
+ * complete. If not, we only need to wait for the work to be started,
+ * if we allocated it on-stack. We use the same mechanism, if the
+ * wait bit is set in the bdi_work struct, then threads will not
+ * clear pending until after they are done.
+ *
+ * Note that work == &work_stack if must_wait is true, so we don't
+ * need to do call_rcu() here ever, since the completion path will
+ * have done that for us.
+ */
+ if (must_wait || work == &work_stack) {
+ bdi_wait_on_work_clear(work);
+ if (work != &work_stack)
+ call_rcu(&work->rcu_head, bdi_work_free);
+ }
}
/*
@@ -191,31 +243,32 @@ static int write_inode(struct inode *inode, int sync)
* furthest end of its superblock's dirty-inode list.
*
* Before stamping the inode's ->dirtied_when, we check to see whether it is
- * already the most-recently-dirtied inode on the s_dirty list. If that is
+ * already the most-recently-dirtied inode on the b_dirty list. If that is
* the case then the inode must have been redirtied while it was being written
* out and we don't reset its dirtied_when.
*/
static void redirty_tail(struct inode *inode)
{
- struct super_block *sb = inode->i_sb;
+ struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
- if (!list_empty(&sb->s_dirty)) {
- struct inode *tail_inode;
+ if (!list_empty(&wb->b_dirty)) {
+ struct inode *tail;
- tail_inode = list_entry(sb->s_dirty.next, struct inode, i_list);
- if (time_before(inode->dirtied_when,
- tail_inode->dirtied_when))
+ tail = list_entry(wb->b_dirty.next, struct inode, i_list);
+ if (time_before(inode->dirtied_when, tail->dirtied_when))
inode->dirtied_when = jiffies;
}
- list_move(&inode->i_list, &sb->s_dirty);
+ list_move(&inode->i_list, &wb->b_dirty);
}
/*
- * requeue inode for re-scanning after sb->s_io list is exhausted.
+ * requeue inode for re-scanning after bdi->b_io list is exhausted.
*/
static void requeue_io(struct inode *inode)
{
- list_move(&inode->i_list, &inode->i_sb->s_more_io);
+ struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
+
+ list_move(&inode->i_list, &wb->b_more_io);
}
static void inode_sync_complete(struct inode *inode)
@@ -262,20 +315,18 @@ static void move_expired_inodes(struct list_head *delaying_queue,
/*
* Queue all expired dirty inodes for io, eldest first.
*/
-static void queue_io(struct super_block *sb,
- unsigned long *older_than_this)
+static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
{
- list_splice_init(&sb->s_more_io, sb->s_io.prev);
- move_expired_inodes(&sb->s_dirty, &sb->s_io, older_than_this);
+ list_splice_init(&wb->b_more_io, wb->b_io.prev);
+ move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
}
-int sb_has_dirty_inodes(struct super_block *sb)
+static int write_inode(struct inode *inode, int sync)
{
- return !list_empty(&sb->s_dirty) ||
- !list_empty(&sb->s_io) ||
- !list_empty(&sb->s_more_io);
+ if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
+ return inode->i_sb->s_op->write_inode(inode, sync);
+ return 0;
}
-EXPORT_SYMBOL(sb_has_dirty_inodes);
/*
* Wait for writeback on an inode to complete.
@@ -322,11 +373,11 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
if (inode->i_state & I_SYNC) {
/*
* If this inode is locked for writeback and we are not doing
- * writeback-for-data-integrity, move it to s_more_io so that
+ * writeback-for-data-integrity, move it to b_more_io so that
* writeback can proceed with the other inodes on s_io.
*
* We'll have another go at writing back this inode when we
- * completed a full scan of s_io.
+ * completed a full scan of b_io.
*/
if (!wait) {
requeue_io(inode);
@@ -371,11 +422,11 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
/*
* We didn't write back all the pages. nfs_writepages()
* sometimes bales out without doing anything. Redirty
- * the inode; Move it from s_io onto s_more_io/s_dirty.
+ * the inode; Move it from b_io onto b_more_io/b_dirty.
*/
/*
* akpm: if the caller was the kupdate function we put
- * this inode at the head of s_dirty so it gets first
+ * this inode at the head of b_dirty so it gets first
* consideration. Otherwise, move it to the tail, for
* the reasons described there. I'm not really sure
* how much sense this makes. Presumably I had a good
@@ -385,7 +436,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
if (wbc->for_kupdate) {
/*
* For the kupdate function we move the inode
- * to s_more_io so it will get more writeout as
+ * to b_more_io so it will get more writeout as
* soon as the queue becomes uncongested.
*/
inode->i_state |= I_DIRTY_PAGES;
@@ -434,50 +485,84 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
}
/*
- * Write out a superblock's list of dirty inodes. A wait will be performed
- * upon no inodes, all inodes or the final one, depending upon sync_mode.
- *
- * If older_than_this is non-NULL, then only write out inodes which
- * had their first dirtying at a time earlier than *older_than_this.
- *
- * If we're a pdflush thread, then implement pdflush collision avoidance
- * against the entire list.
+ * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
+ * before calling writeback. So make sure that we do pin it, so it doesn't
+ * go away while we are writing inodes from it.
*
- * If `bdi' is non-zero then we're being asked to writeback a specific queue.
- * This function assumes that the blockdev superblock's inodes are backed by
- * a variety of queues, so all inodes are searched. For other superblocks,
- * assume that all inodes are backed by the same queue.
- *
- * FIXME: this linear search could get expensive with many fileystems. But
- * how to fix? We need to go from an address_space to all inodes which share
- * a queue with that address_space. (Easy: have a global "dirty superblocks"
- * list).
- *
- * The inodes to be written are parked on sb->s_io. They are moved back onto
- * sb->s_dirty as they are selected for writing. This way, none can be missed
- * on the writer throttling path, and we get decent balancing between many
- * throttled threads: we don't want them all piling up on inode_sync_wait.
+ * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
+ * 1 if we failed.
*/
-void generic_sync_sb_inodes(struct super_block *sb,
+static int pin_sb_for_writeback(struct writeback_control *wbc,
+ struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+
+ /*
+ * Caller must already hold the ref for this
+ */
+ if (wbc->sync_mode == WB_SYNC_ALL) {
+ WARN_ON(!rwsem_is_locked(&sb->s_umount));
+ return 0;
+ }
+
+ spin_lock(&sb_lock);
+ sb->s_count++;
+ if (down_read_trylock(&sb->s_umount)) {
+ if (sb->s_root) {
+ spin_unlock(&sb_lock);
+ return 0;
+ }
+ /*
+ * umounted, drop rwsem again and fall through to failure
+ */
+ up_read(&sb->s_umount);
+ }
+
+ sb->s_count--;
+ spin_unlock(&sb_lock);
+ return 1;
+}
+
+static void unpin_sb_for_writeback(struct writeback_control *wbc,
+ struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ return;
+
+ up_read(&sb->s_umount);
+ put_super(sb);
+}
+
+static void writeback_inodes_wb(struct bdi_writeback *wb,
struct writeback_control *wbc)
{
+ struct super_block *sb = wbc->sb;
+ const int is_blkdev_sb = sb_is_blkdev_sb(sb);
const unsigned long start = jiffies; /* livelock avoidance */
- int sync = wbc->sync_mode == WB_SYNC_ALL;
spin_lock(&inode_lock);
- if (!wbc->for_kupdate || list_empty(&sb->s_io))
- queue_io(sb, wbc->older_than_this);
- while (!list_empty(&sb->s_io)) {
- struct inode *inode = list_entry(sb->s_io.prev,
+ if (!wbc->for_kupdate || list_empty(&wb->b_io))
+ queue_io(wb, wbc->older_than_this);
+
+ while (!list_empty(&wb->b_io)) {
+ struct inode *inode = list_entry(wb->b_io.prev,
struct inode, i_list);
- struct address_space *mapping = inode->i_mapping;
- struct backing_dev_info *bdi = mapping->backing_dev_info;
long pages_skipped;
- if (!bdi_cap_writeback_dirty(bdi)) {
+ /*
+ * super block given and doesn't match, skip this inode
+ */
+ if (sb && sb != inode->i_sb) {
+ redirty_tail(inode);
+ continue;
+ }
+
+ if (!bdi_cap_writeback_dirty(wb->bdi)) {
redirty_tail(inode);
- if (sb_is_blkdev_sb(sb)) {
+ if (is_blkdev_sb) {
/*
* Dirty memory-backed blockdev: the ramdisk
* driver does this. Skip just this inode
@@ -497,21 +582,14 @@ void generic_sync_sb_inodes(struct super_block *sb,
continue;
}
- if (wbc->nonblocking && bdi_write_congested(bdi)) {
+ if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
wbc->encountered_congestion = 1;
- if (!sb_is_blkdev_sb(sb))
+ if (!is_blkdev_sb)
break; /* Skip a congested fs */
requeue_io(inode);
continue; /* Skip a congested blockdev */
}
- if (wbc->bdi && bdi != wbc->bdi) {
- if (!sb_is_blkdev_sb(sb))
- break; /* fs has the wrong queue */
- requeue_io(inode);
- continue; /* blockdev has wrong queue */
- }
-
/*
* Was this inode dirtied after sync_sb_inodes was called?
* This keeps sync from extra jobs and livelock.
@@ -519,16 +597,16 @@ void generic_sync_sb_inodes(struct super_block *sb,
if (inode_dirtied_after(inode, start))
break;
- /* Is another pdflush already flushing this queue? */
- if (current_is_pdflush() && !writeback_acquire(bdi))
- break;
+ if (pin_sb_for_writeback(wbc, inode)) {
+ requeue_io(inode);
+ continue;
+ }
BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
__iget(inode);
pages_skipped = wbc->pages_skipped;
writeback_single_inode(inode, wbc);
- if (current_is_pdflush())
- writeback_release(bdi);
+ unpin_sb_for_writeback(wbc, inode);
if (wbc->pages_skipped != pages_skipped) {
/*
* writeback is not making progress due to locked
@@ -544,144 +622,571 @@ void generic_sync_sb_inodes(struct super_block *sb,
wbc->more_io = 1;
break;
}
- if (!list_empty(&sb->s_more_io))
+ if (!list_empty(&wb->b_more_io))
wbc->more_io = 1;
}
- if (sync) {
- struct inode *inode, *old_inode = NULL;
+ spin_unlock(&inode_lock);
+ /* Leave any unwritten inodes on b_io */
+}
+
+void writeback_inodes_wbc(struct writeback_control *wbc)
+{
+ struct backing_dev_info *bdi = wbc->bdi;
+ writeback_inodes_wb(&bdi->wb, wbc);
+}
+
+/*
+ * The maximum number of pages to writeout in a single bdi flush/kupdate
+ * operation. We do this so we don't hold I_SYNC against an inode for
+ * enormous amounts of time, which would block a userspace task which has
+ * been forced to throttle against that inode. Also, the code reevaluates
+ * the dirty each time it has written this many pages.
+ */
+#define MAX_WRITEBACK_PAGES 1024
+
+static inline bool over_bground_thresh(void)
+{
+ unsigned long background_thresh, dirty_thresh;
+
+ get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
+
+ return (global_page_state(NR_FILE_DIRTY) +
+ global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
+}
+
+/*
+ * Explicit flushing or periodic writeback of "old" data.
+ *
+ * Define "old": the first time one of an inode's pages is dirtied, we mark the
+ * dirtying-time in the inode's address_space. So this periodic writeback code
+ * just walks the superblock inode list, writing back any inodes which are
+ * older than a specific point in time.
+ *
+ * Try to run once per dirty_writeback_interval. But if a writeback event
+ * takes longer than a dirty_writeback_interval interval, then leave a
+ * one-second gap.
+ *
+ * older_than_this takes precedence over nr_to_write. So we'll only write back
+ * all dirty pages if they are all attached to "old" mappings.
+ */
+static long wb_writeback(struct bdi_writeback *wb, long nr_pages,
+ struct super_block *sb,
+ enum writeback_sync_modes sync_mode, int for_kupdate)
+{
+ struct writeback_control wbc = {
+ .bdi = wb->bdi,
+ .sb = sb,
+ .sync_mode = sync_mode,
+ .older_than_this = NULL,
+ .for_kupdate = for_kupdate,
+ .range_cyclic = 1,
+ };
+ unsigned long oldest_jif;
+ long wrote = 0;
+
+ if (wbc.for_kupdate) {
+ wbc.older_than_this = &oldest_jif;
+ oldest_jif = jiffies -
+ msecs_to_jiffies(dirty_expire_interval * 10);
+ }
+
+ for (;;) {
/*
- * Data integrity sync. Must wait for all pages under writeback,
- * because there may have been pages dirtied before our sync
- * call, but which had writeout started before we write it out.
- * In which case, the inode may not be on the dirty list, but
- * we still have to wait for that writeout.
+ * Don't flush anything for non-integrity writeback where
+ * no nr_pages was given
*/
- list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
- struct address_space *mapping;
+ if (!for_kupdate && nr_pages <= 0 && sync_mode == WB_SYNC_NONE)
+ break;
- if (inode->i_state &
- (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
- continue;
- mapping = inode->i_mapping;
- if (mapping->nrpages == 0)
+ /*
+ * If no specific pages were given and this is just a
+ * periodic background writeout and we are below the
+ * background dirty threshold, don't do anything
+ */
+ if (for_kupdate && nr_pages <= 0 && !over_bground_thresh())
+ break;
+
+ wbc.more_io = 0;
+ wbc.encountered_congestion = 0;
+ wbc.nr_to_write = MAX_WRITEBACK_PAGES;
+ wbc.pages_skipped = 0;
+ writeback_inodes_wb(wb, &wbc);
+ nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
+ wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
+
+ /*
+ * If we ran out of stuff to write, bail unless more_io got set
+ */
+ if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
+ if (wbc.more_io && !wbc.for_kupdate)
continue;
- __iget(inode);
- spin_unlock(&inode_lock);
+ break;
+ }
+ }
+
+ return wrote;
+}
+
+/*
+ * Return the next bdi_work struct that hasn't been processed by this
+ * wb thread yet
+ */
+static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
+ struct bdi_writeback *wb)
+{
+ struct bdi_work *work, *ret = NULL;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(work, &bdi->work_list, list) {
+ if (!test_and_clear_bit(wb->nr, &work->seen))
+ continue;
+
+ ret = work;
+ break;
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
+static long wb_check_old_data_flush(struct bdi_writeback *wb)
+{
+ unsigned long expired;
+ long nr_pages;
+
+ expired = wb->last_old_flush +
+ msecs_to_jiffies(dirty_writeback_interval * 10);
+ if (time_before(jiffies, expired))
+ return 0;
+
+ wb->last_old_flush = jiffies;
+ nr_pages = global_page_state(NR_FILE_DIRTY) +
+ global_page_state(NR_UNSTABLE_NFS) +
+ (inodes_stat.nr_inodes - inodes_stat.nr_unused);
+
+ if (nr_pages)
+ return wb_writeback(wb, nr_pages, NULL, WB_SYNC_NONE, 1);
+
+ return 0;
+}
+
+/*
+ * Retrieve work items and do the writeback they describe
+ */
+long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
+{
+ struct backing_dev_info *bdi = wb->bdi;
+ struct bdi_work *work;
+ long nr_pages, wrote = 0;
+
+ while ((work = get_next_work_item(bdi, wb)) != NULL) {
+ enum writeback_sync_modes sync_mode;
+
+ nr_pages = work->nr_pages;
+
+ /*
+ * Override sync mode, in case we must wait for completion
+ */
+ if (force_wait)
+ work->sync_mode = sync_mode = WB_SYNC_ALL;
+ else
+ sync_mode = work->sync_mode;
+
+ /*
+ * If this isn't a data integrity operation, just notify
+ * that we have seen this work and we are now starting it.
+ */
+ if (sync_mode == WB_SYNC_NONE)
+ wb_clear_pending(wb, work);
+
+ wrote += wb_writeback(wb, nr_pages, work->sb, sync_mode, 0);
+
+ /*
+ * This is a data integrity writeback, so only do the
+ * notification when we have completed the work.
+ */
+ if (sync_mode == WB_SYNC_ALL)
+ wb_clear_pending(wb, work);
+ }
+
+ /*
+ * Check for periodic writeback, kupdated() style
+ */
+ wrote += wb_check_old_data_flush(wb);
+
+ return wrote;
+}
+
+/*
+ * Handle writeback of dirty data for the device backed by this bdi. Also
+ * wakes up periodically and does kupdated style flushing.
+ */
+int bdi_writeback_task(struct bdi_writeback *wb)
+{
+ unsigned long last_active = jiffies;
+ unsigned long wait_jiffies = -1UL;
+ long pages_written;
+
+ while (!kthread_should_stop()) {
+ pages_written = wb_do_writeback(wb, 0);
+
+ if (pages_written)
+ last_active = jiffies;
+ else if (wait_jiffies != -1UL) {
+ unsigned long max_idle;
+
/*
- * We hold a reference to 'inode' so it couldn't have
- * been removed from s_inodes list while we dropped the
- * inode_lock. We cannot iput the inode now as we can
- * be holding the last reference and we cannot iput it
- * under inode_lock. So we keep the reference and iput
- * it later.
+ * Longest period of inactivity that we tolerate. If we
+ * see dirty data again later, the task will get
+ * recreated automatically.
*/
- iput(old_inode);
- old_inode = inode;
+ max_idle = max(5UL * 60 * HZ, wait_jiffies);
+ if (time_after(jiffies, max_idle + last_active))
+ break;
+ }
- filemap_fdatawait(mapping);
+ wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(wait_jiffies);
+ try_to_freeze();
+ }
- cond_resched();
+ return 0;
+}
+
+/*
+ * Schedule writeback for all backing devices. Expensive! If this is a data
+ * integrity operation, writeback will be complete when this returns. If
+ * we are simply called for WB_SYNC_NONE, then writeback will merely be
+ * scheduled to run.
+ */
+static void bdi_writeback_all(struct writeback_control *wbc)
+{
+ const bool must_wait = wbc->sync_mode == WB_SYNC_ALL;
+ struct backing_dev_info *bdi;
+ struct bdi_work *work;
+ LIST_HEAD(list);
+
+restart:
+ spin_lock(&bdi_lock);
+
+ list_for_each_entry(bdi, &bdi_list, bdi_list) {
+ struct bdi_work *work;
- spin_lock(&inode_lock);
+ if (!bdi_has_dirty_io(bdi))
+ continue;
+
+ /*
+ * If work allocation fails, do the writes inline. We drop
+ * the lock and restart the list writeout. This should be OK,
+ * since this happens rarely and because the writeout should
+ * eventually make more free memory available.
+ */
+ work = bdi_alloc_work(wbc);
+ if (!work) {
+ struct writeback_control __wbc;
+
+ /*
+ * Not a data integrity writeout, just continue
+ */
+ if (!must_wait)
+ continue;
+
+ spin_unlock(&bdi_lock);
+ __wbc = *wbc;
+ __wbc.bdi = bdi;
+ writeback_inodes_wbc(&__wbc);
+ goto restart;
}
- spin_unlock(&inode_lock);
- iput(old_inode);
- } else
- spin_unlock(&inode_lock);
+ if (must_wait)
+ list_add_tail(&work->wait_list, &list);
+
+ bdi_queue_work(bdi, work);
+ }
+
+ spin_unlock(&bdi_lock);
- return; /* Leave any unwritten inodes on s_io */
+ /*
+ * If this is for WB_SYNC_ALL, wait for pending work to complete
+ * before returning.
+ */
+ while (!list_empty(&list)) {
+ work = list_entry(list.next, struct bdi_work, wait_list);
+ list_del(&work->wait_list);
+ bdi_wait_on_work_clear(work);
+ call_rcu(&work->rcu_head, bdi_work_free);
+ }
}
-EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
-static void sync_sb_inodes(struct super_block *sb,
- struct writeback_control *wbc)
+/*
+ * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
+ * the whole world.
+ */
+void wakeup_flusher_threads(long nr_pages)
{
- generic_sync_sb_inodes(sb, wbc);
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .older_than_this = NULL,
+ .range_cyclic = 1,
+ };
+
+ if (nr_pages == 0)
+ nr_pages = global_page_state(NR_FILE_DIRTY) +
+ global_page_state(NR_UNSTABLE_NFS);
+ wbc.nr_to_write = nr_pages;
+ bdi_writeback_all(&wbc);
}
-/*
- * Start writeback of dirty pagecache data against all unlocked inodes.
+static noinline void block_dump___mark_inode_dirty(struct inode *inode)
+{
+ if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
+ struct dentry *dentry;
+ const char *name = "?";
+
+ dentry = d_find_alias(inode);
+ if (dentry) {
+ spin_lock(&dentry->d_lock);
+ name = (const char *) dentry->d_name.name;
+ }
+ printk(KERN_DEBUG
+ "%s(%d): dirtied inode %lu (%s) on %s\n",
+ current->comm, task_pid_nr(current), inode->i_ino,
+ name, inode->i_sb->s_id);
+ if (dentry) {
+ spin_unlock(&dentry->d_lock);
+ dput(dentry);
+ }
+ }
+}
+
+/**
+ * __mark_inode_dirty - internal function
+ * @inode: inode to mark
+ * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
+ * Mark an inode as dirty. Callers should use mark_inode_dirty or
+ * mark_inode_dirty_sync.
*
- * Note:
- * We don't need to grab a reference to superblock here. If it has non-empty
- * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
- * past sync_inodes_sb() until the ->s_dirty/s_io/s_more_io lists are all
- * empty. Since __sync_single_inode() regains inode_lock before it finally moves
- * inode from superblock lists we are OK.
+ * Put the inode on the super block's dirty list.
+ *
+ * CAREFUL! We mark it dirty unconditionally, but move it onto the
+ * dirty list only if it is hashed or if it refers to a blockdev.
+ * If it was not hashed, it will never be added to the dirty list
+ * even if it is later hashed, as it will have been marked dirty already.
*
- * If `older_than_this' is non-zero then only flush inodes which have a
- * flushtime older than *older_than_this.
+ * In short, make sure you hash any inodes _before_ you start marking
+ * them dirty.
*
- * If `bdi' is non-zero then we will scan the first inode against each
- * superblock until we find the matching ones. One group will be the dirty
- * inodes against a filesystem. Then when we hit the dummy blockdev superblock,
- * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not
- * super-efficient but we're about to do a ton of I/O...
+ * This function *must* be atomic for the I_DIRTY_PAGES case -
+ * set_page_dirty() is called under spinlock in several places.
+ *
+ * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
+ * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
+ * the kernel-internal blockdev inode represents the dirtying time of the
+ * blockdev's pages. This is why for I_DIRTY_PAGES we always use
+ * page->mapping->host, so the page-dirtying time is recorded in the internal
+ * blockdev inode.
*/
-void
-writeback_inodes(struct writeback_control *wbc)
+void __mark_inode_dirty(struct inode *inode, int flags)
{
- struct super_block *sb;
+ struct super_block *sb = inode->i_sb;
- might_sleep();
- spin_lock(&sb_lock);
-restart:
- list_for_each_entry_reverse(sb, &super_blocks, s_list) {
- if (sb_has_dirty_inodes(sb)) {
- /* we're making our own get_super here */
- sb->s_count++;
- spin_unlock(&sb_lock);
- /*
- * If we can't get the readlock, there's no sense in
- * waiting around, most of the time the FS is going to
- * be unmounted by the time it is released.
- */
- if (down_read_trylock(&sb->s_umount)) {
- if (sb->s_root)
- sync_sb_inodes(sb, wbc);
- up_read(&sb->s_umount);
+ /*
+ * Don't do this for I_DIRTY_PAGES - that doesn't actually
+ * dirty the inode itself
+ */
+ if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
+ if (sb->s_op->dirty_inode)
+ sb->s_op->dirty_inode(inode);
+ }
+
+ /*
+ * make sure that changes are seen by all cpus before we test i_state
+ * -- mikulas
+ */
+ smp_mb();
+
+ /* avoid the locking if we can */
+ if ((inode->i_state & flags) == flags)
+ return;
+
+ if (unlikely(block_dump))
+ block_dump___mark_inode_dirty(inode);
+
+ spin_lock(&inode_lock);
+ if ((inode->i_state & flags) != flags) {
+ const int was_dirty = inode->i_state & I_DIRTY;
+
+ inode->i_state |= flags;
+
+ /*
+ * If the inode is being synced, just update its dirty state.
+ * The unlocker will place the inode on the appropriate
+ * superblock list, based upon its state.
+ */
+ if (inode->i_state & I_SYNC)
+ goto out;
+
+ /*
+ * Only add valid (hashed) inodes to the superblock's
+ * dirty list. Add blockdev inodes as well.
+ */
+ if (!S_ISBLK(inode->i_mode)) {
+ if (hlist_unhashed(&inode->i_hash))
+ goto out;
+ }
+ if (inode->i_state & (I_FREEING|I_CLEAR))
+ goto out;
+
+ /*
+ * If the inode was already on b_dirty/b_io/b_more_io, don't
+ * reposition it (that would break b_dirty time-ordering).
+ */
+ if (!was_dirty) {
+ struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
+ struct backing_dev_info *bdi = wb->bdi;
+
+ if (bdi_cap_writeback_dirty(bdi) &&
+ !test_bit(BDI_registered, &bdi->state)) {
+ WARN_ON(1);
+ printk(KERN_ERR "bdi-%s not registered\n",
+ bdi->name);
}
- spin_lock(&sb_lock);
- if (__put_super_and_need_restart(sb))
- goto restart;
+
+ inode->dirtied_when = jiffies;
+ list_move(&inode->i_list, &wb->b_dirty);
}
- if (wbc->nr_to_write <= 0)
- break;
}
- spin_unlock(&sb_lock);
+out:
+ spin_unlock(&inode_lock);
}
+EXPORT_SYMBOL(__mark_inode_dirty);
/*
- * writeback and wait upon the filesystem's dirty inodes. The caller will
- * do this in two passes - one to write, and one to wait.
+ * Write out a superblock's list of dirty inodes. A wait will be performed
+ * upon no inodes, all inodes or the final one, depending upon sync_mode.
+ *
+ * If older_than_this is non-NULL, then only write out inodes which
+ * had their first dirtying at a time earlier than *older_than_this.
*
- * A finite limit is set on the number of pages which will be written.
- * To prevent infinite livelock of sys_sync().
+ * If we're a pdlfush thread, then implement pdflush collision avoidance
+ * against the entire list.
*
- * We add in the number of potentially dirty inodes, because each inode write
- * can dirty pagecache in the underlying blockdev.
+ * If `bdi' is non-zero then we're being asked to writeback a specific queue.
+ * This function assumes that the blockdev superblock's inodes are backed by
+ * a variety of queues, so all inodes are searched. For other superblocks,
+ * assume that all inodes are backed by the same queue.
+ *
+ * The inodes to be written are parked on bdi->b_io. They are moved back onto
+ * bdi->b_dirty as they are selected for writing. This way, none can be missed
+ * on the writer throttling path, and we get decent balancing between many
+ * throttled threads: we don't want them all piling up on inode_sync_wait.
*/
-void sync_inodes_sb(struct super_block *sb, int wait)
+static void wait_sb_inodes(struct writeback_control *wbc)
+{
+ struct inode *inode, *old_inode = NULL;
+
+ /*
+ * We need to be protected against the filesystem going from
+ * r/o to r/w or vice versa.
+ */
+ WARN_ON(!rwsem_is_locked(&wbc->sb->s_umount));
+
+ spin_lock(&inode_lock);
+
+ /*
+ * Data integrity sync. Must wait for all pages under writeback,
+ * because there may have been pages dirtied before our sync
+ * call, but which had writeout started before we write it out.
+ * In which case, the inode may not be on the dirty list, but
+ * we still have to wait for that writeout.
+ */
+ list_for_each_entry(inode, &wbc->sb->s_inodes, i_sb_list) {
+ struct address_space *mapping;
+
+ if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
+ continue;
+ mapping = inode->i_mapping;
+ if (mapping->nrpages == 0)
+ continue;
+ __iget(inode);
+ spin_unlock(&inode_lock);
+ /*
+ * We hold a reference to 'inode' so it couldn't have
+ * been removed from s_inodes list while we dropped the
+ * inode_lock. We cannot iput the inode now as we can
+ * be holding the last reference and we cannot iput it
+ * under inode_lock. So we keep the reference and iput
+ * it later.
+ */
+ iput(old_inode);
+ old_inode = inode;
+
+ filemap_fdatawait(mapping);
+
+ cond_resched();
+
+ spin_lock(&inode_lock);
+ }
+ spin_unlock(&inode_lock);
+ iput(old_inode);
+}
+
+/**
+ * writeback_inodes_sb - writeback dirty inodes from given super_block
+ * @sb: the superblock
+ *
+ * Start writeback on some inodes on this super_block. No guarantees are made
+ * on how many (if any) will be written, and this function does not wait
+ * for IO completion of submitted IO. The number of pages submitted is
+ * returned.
+ */
+long writeback_inodes_sb(struct super_block *sb)
{
struct writeback_control wbc = {
- .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
+ .sb = sb,
+ .sync_mode = WB_SYNC_NONE,
.range_start = 0,
.range_end = LLONG_MAX,
};
+ unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
+ unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
+ long nr_to_write;
- if (!wait) {
- unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
- unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
-
- wbc.nr_to_write = nr_dirty + nr_unstable +
+ nr_to_write = nr_dirty + nr_unstable +
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
- } else
- wbc.nr_to_write = LONG_MAX; /* doesn't actually matter */
- sync_sb_inodes(sb, &wbc);
+ wbc.nr_to_write = nr_to_write;
+ bdi_writeback_all(&wbc);
+ return nr_to_write - wbc.nr_to_write;
+}
+EXPORT_SYMBOL(writeback_inodes_sb);
+
+/**
+ * sync_inodes_sb - sync sb inode pages
+ * @sb: the superblock
+ *
+ * This function writes and waits on any dirty inode belonging to this
+ * super_block. The number of pages synced is returned.
+ */
+long sync_inodes_sb(struct super_block *sb)
+{
+ struct writeback_control wbc = {
+ .sb = sb,
+ .sync_mode = WB_SYNC_ALL,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ };
+ long nr_to_write = LONG_MAX; /* doesn't actually matter */
+
+ wbc.nr_to_write = nr_to_write;
+ bdi_writeback_all(&wbc);
+ wait_sb_inodes(&wbc);
+ return nr_to_write - wbc.nr_to_write;
}
+EXPORT_SYMBOL(sync_inodes_sb);
/**
* write_inode_now - write an inode to disk
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index f91ccc4a189d..4567db6f9430 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -801,6 +801,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
{
int err;
+ fc->bdi.name = "fuse";
fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
fc->bdi.unplug_io_fn = default_unplug_io_fn;
/* fuse does it's own writeback accounting */
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index cb88dac8ccaa..a93b885311d8 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -44,6 +44,7 @@ static const struct inode_operations hugetlbfs_dir_inode_operations;
static const struct inode_operations hugetlbfs_inode_operations;
static struct backing_dev_info hugetlbfs_backing_dev_info = {
+ .name = "hugetlbfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 8fcb6239218e..7edb62e97419 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -258,7 +258,7 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
return rc;
}
-static int jffs2_check_acl(struct inode *inode, int mask)
+int jffs2_check_acl(struct inode *inode, int mask)
{
struct posix_acl *acl;
int rc;
@@ -274,11 +274,6 @@ static int jffs2_check_acl(struct inode *inode, int mask)
return -EAGAIN;
}
-int jffs2_permission(struct inode *inode, int mask)
-{
- return generic_permission(inode, mask, jffs2_check_acl);
-}
-
int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
{
struct posix_acl *acl, *clone;
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index fc929f2a14f6..f0ba63e3c36b 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -26,7 +26,7 @@ struct jffs2_acl_header {
#ifdef CONFIG_JFFS2_FS_POSIX_ACL
-extern int jffs2_permission(struct inode *, int);
+extern int jffs2_check_acl(struct inode *, int);
extern int jffs2_acl_chmod(struct inode *);
extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *);
extern int jffs2_init_acl_post(struct inode *);
@@ -36,7 +36,7 @@ extern struct xattr_handler jffs2_acl_default_xattr_handler;
#else
-#define jffs2_permission (NULL)
+#define jffs2_check_acl (NULL)
#define jffs2_acl_chmod(inode) (0)
#define jffs2_init_acl_pre(dir_i,inode,mode) (0)
#define jffs2_init_acl_post(inode) (0)
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 6f60cc910f4c..7aa4417e085f 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -55,7 +55,7 @@ const struct inode_operations jffs2_dir_inode_operations =
.rmdir = jffs2_rmdir,
.mknod = jffs2_mknod,
.rename = jffs2_rename,
- .permission = jffs2_permission,
+ .check_acl = jffs2_check_acl,
.setattr = jffs2_setattr,
.setxattr = jffs2_setxattr,
.getxattr = jffs2_getxattr,
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 23c947539864..b7b74e299142 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -56,7 +56,7 @@ const struct file_operations jffs2_file_operations =
const struct inode_operations jffs2_file_inode_operations =
{
- .permission = jffs2_permission,
+ .check_acl = jffs2_check_acl,
.setattr = jffs2_setattr,
.setxattr = jffs2_setxattr,
.getxattr = jffs2_getxattr,
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c
index b7339c3b6ad9..4ec11e8bda8c 100644
--- a/fs/jffs2/symlink.c
+++ b/fs/jffs2/symlink.c
@@ -21,7 +21,7 @@ const struct inode_operations jffs2_symlink_inode_operations =
{
.readlink = generic_readlink,
.follow_link = jffs2_follow_link,
- .permission = jffs2_permission,
+ .check_acl = jffs2_check_acl,
.setattr = jffs2_setattr,
.setxattr = jffs2_setxattr,
.getxattr = jffs2_getxattr,
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index a29c7c3e3fb8..d66477c34306 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -114,7 +114,7 @@ out:
return rc;
}
-static int jfs_check_acl(struct inode *inode, int mask)
+int jfs_check_acl(struct inode *inode, int mask)
{
struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
@@ -129,11 +129,6 @@ static int jfs_check_acl(struct inode *inode, int mask)
return -EAGAIN;
}
-int jfs_permission(struct inode *inode, int mask)
-{
- return generic_permission(inode, mask, jfs_check_acl);
-}
-
int jfs_init_acl(tid_t tid, struct inode *inode, struct inode *dir)
{
struct posix_acl *acl = NULL;
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 7f6063acaa3b..2b70fa78e4a7 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -96,7 +96,7 @@ const struct inode_operations jfs_file_inode_operations = {
.removexattr = jfs_removexattr,
#ifdef CONFIG_JFS_POSIX_ACL
.setattr = jfs_setattr,
- .permission = jfs_permission,
+ .check_acl = jfs_check_acl,
#endif
};
diff --git a/fs/jfs/jfs_acl.h b/fs/jfs/jfs_acl.h
index 88475f10a389..b07bd417ef85 100644
--- a/fs/jfs/jfs_acl.h
+++ b/fs/jfs/jfs_acl.h
@@ -20,7 +20,7 @@
#ifdef CONFIG_JFS_POSIX_ACL
-int jfs_permission(struct inode *, int);
+int jfs_check_acl(struct inode *, int);
int jfs_init_acl(tid_t, struct inode *, struct inode *);
int jfs_setattr(struct dentry *, struct iattr *);
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 514ee2edb92a..c79a4270f083 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1543,7 +1543,7 @@ const struct inode_operations jfs_dir_inode_operations = {
.removexattr = jfs_removexattr,
#ifdef CONFIG_JFS_POSIX_ACL
.setattr = jfs_setattr,
- .permission = jfs_permission,
+ .check_acl = jfs_check_acl,
#endif
};
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 99d737bd4325..7cb076ac6b45 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -87,18 +87,6 @@ static unsigned int nlm_hash_address(const struct sockaddr *sap)
return hash & (NLM_HOST_NRHASH - 1);
}
-static void nlm_clear_port(struct sockaddr *sap)
-{
- switch (sap->sa_family) {
- case AF_INET:
- ((struct sockaddr_in *)sap)->sin_port = 0;
- break;
- case AF_INET6:
- ((struct sockaddr_in6 *)sap)->sin6_port = 0;
- break;
- }
-}
-
/*
* Common host lookup routine for server & client
*/
@@ -177,7 +165,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
host->h_addrbuf = nsm->sm_addrbuf;
memcpy(nlm_addr(host), ni->sap, ni->salen);
host->h_addrlen = ni->salen;
- nlm_clear_port(nlm_addr(host));
+ rpc_set_port(nlm_addr(host), 0);
memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len);
host->h_version = ni->version;
host->h_proto = ni->protocol;
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 7fce1b525849..30c933188dd7 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -61,43 +61,6 @@ static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm)
return (struct sockaddr *)&nsm->sm_addr;
}
-static void nsm_display_ipv4_address(const struct sockaddr *sap, char *buf,
- const size_t len)
-{
- const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
- snprintf(buf, len, "%pI4", &sin->sin_addr.s_addr);
-}
-
-static void nsm_display_ipv6_address(const struct sockaddr *sap, char *buf,
- const size_t len)
-{
- const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
-
- if (ipv6_addr_v4mapped(&sin6->sin6_addr))
- snprintf(buf, len, "%pI4", &sin6->sin6_addr.s6_addr32[3]);
- else if (sin6->sin6_scope_id != 0)
- snprintf(buf, len, "%pI6%%%u", &sin6->sin6_addr,
- sin6->sin6_scope_id);
- else
- snprintf(buf, len, "%pI6", &sin6->sin6_addr);
-}
-
-static void nsm_display_address(const struct sockaddr *sap,
- char *buf, const size_t len)
-{
- switch (sap->sa_family) {
- case AF_INET:
- nsm_display_ipv4_address(sap, buf, len);
- break;
- case AF_INET6:
- nsm_display_ipv6_address(sap, buf, len);
- break;
- default:
- snprintf(buf, len, "unsupported address family");
- break;
- }
-}
-
static struct rpc_clnt *nsm_create(void)
{
struct sockaddr_in sin = {
@@ -307,8 +270,11 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
memcpy(nsm_addr(new), sap, salen);
new->sm_addrlen = salen;
nsm_init_private(new);
- nsm_display_address((const struct sockaddr *)&new->sm_addr,
- new->sm_addrbuf, sizeof(new->sm_addrbuf));
+
+ if (rpc_ntop(nsm_addr(new), new->sm_addrbuf,
+ sizeof(new->sm_addrbuf)) == 0)
+ (void)snprintf(new->sm_addrbuf, sizeof(new->sm_addrbuf),
+ "unsupported address family");
memcpy(new->sm_name, hostname, hostname_len);
new->sm_name[hostname_len] = '\0';
diff --git a/fs/locks.c b/fs/locks.c
index b6440f52178f..19ee18a6829b 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -768,7 +768,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
* give it the opportunity to lock the file.
*/
if (found)
- cond_resched_bkl();
+ cond_resched();
find_conflict:
for_each_lock(inode, before) {
@@ -1591,7 +1591,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
if (can_sleep)
lock->fl_flags |= FL_SLEEP;
- error = security_file_lock(filp, cmd);
+ error = security_file_lock(filp, lock->fl_type);
if (error)
goto out_free;
diff --git a/fs/namei.c b/fs/namei.c
index f3c5b278895a..d11f404667e9 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -169,19 +169,10 @@ void putname(const char *name)
EXPORT_SYMBOL(putname);
#endif
-
-/**
- * generic_permission - check for access rights on a Posix-like filesystem
- * @inode: inode to check access rights for
- * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
- * @check_acl: optional callback to check for Posix ACLs
- *
- * Used to check for read/write/execute permissions on a file.
- * We use "fsuid" for this, letting us set arbitrary permissions
- * for filesystem access without changing the "normal" uids which
- * are used for other things..
+/*
+ * This does basic POSIX ACL permission checking
*/
-int generic_permission(struct inode *inode, int mask,
+static int acl_permission_check(struct inode *inode, int mask,
int (*check_acl)(struct inode *inode, int mask))
{
umode_t mode = inode->i_mode;
@@ -193,9 +184,7 @@ int generic_permission(struct inode *inode, int mask,
else {
if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) {
int error = check_acl(inode, mask);
- if (error == -EACCES)
- goto check_capabilities;
- else if (error != -EAGAIN)
+ if (error != -EAGAIN)
return error;
}
@@ -208,8 +197,32 @@ int generic_permission(struct inode *inode, int mask,
*/
if ((mask & ~mode) == 0)
return 0;
+ return -EACCES;
+}
+
+/**
+ * generic_permission - check for access rights on a Posix-like filesystem
+ * @inode: inode to check access rights for
+ * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
+ * @check_acl: optional callback to check for Posix ACLs
+ *
+ * Used to check for read/write/execute permissions on a file.
+ * We use "fsuid" for this, letting us set arbitrary permissions
+ * for filesystem access without changing the "normal" uids which
+ * are used for other things..
+ */
+int generic_permission(struct inode *inode, int mask,
+ int (*check_acl)(struct inode *inode, int mask))
+{
+ int ret;
+
+ /*
+ * Do the basic POSIX ACL permission checks.
+ */
+ ret = acl_permission_check(inode, mask, check_acl);
+ if (ret != -EACCES)
+ return ret;
- check_capabilities:
/*
* Read/write DACs are always overridable.
* Executable DACs are overridable if at least one exec bit is set.
@@ -262,7 +275,7 @@ int inode_permission(struct inode *inode, int mask)
if (inode->i_op->permission)
retval = inode->i_op->permission(inode, mask);
else
- retval = generic_permission(inode, mask, NULL);
+ retval = generic_permission(inode, mask, inode->i_op->check_acl);
if (retval)
return retval;
@@ -432,29 +445,22 @@ static struct dentry * cached_lookup(struct dentry * parent, struct qstr * name,
*/
static int exec_permission_lite(struct inode *inode)
{
- umode_t mode = inode->i_mode;
+ int ret;
- if (inode->i_op->permission)
- return -EAGAIN;
-
- if (current_fsuid() == inode->i_uid)
- mode >>= 6;
- else if (in_group_p(inode->i_gid))
- mode >>= 3;
-
- if (mode & MAY_EXEC)
- goto ok;
-
- if ((inode->i_mode & S_IXUGO) && capable(CAP_DAC_OVERRIDE))
- goto ok;
-
- if (S_ISDIR(inode->i_mode) && capable(CAP_DAC_OVERRIDE))
+ if (inode->i_op->permission) {
+ ret = inode->i_op->permission(inode, MAY_EXEC);
+ if (!ret)
+ goto ok;
+ return ret;
+ }
+ ret = acl_permission_check(inode, MAY_EXEC, inode->i_op->check_acl);
+ if (!ret)
goto ok;
- if (S_ISDIR(inode->i_mode) && capable(CAP_DAC_READ_SEARCH))
+ if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
goto ok;
- return -EACCES;
+ return ret;
ok:
return security_inode_permission(inode, MAY_EXEC);
}
@@ -853,12 +859,6 @@ static int __link_path_walk(const char *name, struct nameidata *nd)
nd->flags |= LOOKUP_CONTINUE;
err = exec_permission_lite(inode);
- if (err == -EAGAIN)
- err = inode_permission(nd->path.dentry->d_inode,
- MAY_EXEC);
- if (!err)
- err = ima_path_check(&nd->path, MAY_EXEC,
- IMA_COUNT_UPDATE);
if (err)
break;
@@ -1533,37 +1533,42 @@ int may_open(struct path *path, int acc_mode, int flag)
if (error)
return error;
- error = ima_path_check(path,
- acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC),
+ error = ima_path_check(path, acc_mode ?
+ acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC) :
+ ACC_MODE(flag) & (MAY_READ | MAY_WRITE),
IMA_COUNT_UPDATE);
+
if (error)
return error;
/*
* An append-only file must be opened in append mode for writing.
*/
if (IS_APPEND(inode)) {
+ error = -EPERM;
if ((flag & FMODE_WRITE) && !(flag & O_APPEND))
- return -EPERM;
+ goto err_out;
if (flag & O_TRUNC)
- return -EPERM;
+ goto err_out;
}
/* O_NOATIME can only be set by the owner or superuser */
if (flag & O_NOATIME)
- if (!is_owner_or_cap(inode))
- return -EPERM;
+ if (!is_owner_or_cap(inode)) {
+ error = -EPERM;
+ goto err_out;
+ }
/*
* Ensure there are no outstanding leases on the file.
*/
error = break_lease(inode, flag);
if (error)
- return error;
+ goto err_out;
if (flag & O_TRUNC) {
error = get_write_access(inode);
if (error)
- return error;
+ goto err_out;
/*
* Refuse to truncate files with mandatory locks held on them.
@@ -1581,12 +1586,17 @@ int may_open(struct path *path, int acc_mode, int flag)
}
put_write_access(inode);
if (error)
- return error;
+ goto err_out;
} else
if (flag & FMODE_WRITE)
vfs_dq_init(inode);
return 0;
+err_out:
+ ima_counts_put(path, acc_mode ?
+ acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC) :
+ ACC_MODE(flag) & (MAY_READ | MAY_WRITE));
+ return error;
}
/*
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 845159814de2..da7fda639eac 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -6,7 +6,8 @@ obj-$(CONFIG_NFS_FS) += nfs.o
nfs-y := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
direct.o pagelist.o proc.o read.o symlink.o unlink.o \
- write.o namespace.o mount_clnt.o
+ write.o namespace.o mount_clnt.o \
+ dns_resolve.o cache_lib.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c
new file mode 100644
index 000000000000..b4ffd0146ea6
--- /dev/null
+++ b/fs/nfs/cache_lib.c
@@ -0,0 +1,140 @@
+/*
+ * linux/fs/nfs/cache_lib.c
+ *
+ * Helper routines for the NFS client caches
+ *
+ * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com>
+ */
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/sunrpc/cache.h>
+#include <linux/sunrpc/rpc_pipe_fs.h>
+
+#include "cache_lib.h"
+
+#define NFS_CACHE_UPCALL_PATHLEN 256
+#define NFS_CACHE_UPCALL_TIMEOUT 15
+
+static char nfs_cache_getent_prog[NFS_CACHE_UPCALL_PATHLEN] =
+ "/sbin/nfs_cache_getent";
+static unsigned long nfs_cache_getent_timeout = NFS_CACHE_UPCALL_TIMEOUT;
+
+module_param_string(cache_getent, nfs_cache_getent_prog,
+ sizeof(nfs_cache_getent_prog), 0600);
+MODULE_PARM_DESC(cache_getent, "Path to the client cache upcall program");
+module_param_named(cache_getent_timeout, nfs_cache_getent_timeout, ulong, 0600);
+MODULE_PARM_DESC(cache_getent_timeout, "Timeout (in seconds) after which "
+ "the cache upcall is assumed to have failed");
+
+int nfs_cache_upcall(struct cache_detail *cd, char *entry_name)
+{
+ static char *envp[] = { "HOME=/",
+ "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+ NULL
+ };
+ char *argv[] = {
+ nfs_cache_getent_prog,
+ cd->name,
+ entry_name,
+ NULL
+ };
+ int ret = -EACCES;
+
+ if (nfs_cache_getent_prog[0] == '\0')
+ goto out;
+ ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
+ /*
+ * Disable the upcall mechanism if we're getting an ENOENT or
+ * EACCES error. The admin can re-enable it on the fly by using
+ * sysfs to set the 'cache_getent' parameter once the problem
+ * has been fixed.
+ */
+ if (ret == -ENOENT || ret == -EACCES)
+ nfs_cache_getent_prog[0] = '\0';
+out:
+ return ret > 0 ? 0 : ret;
+}
+
+/*
+ * Deferred request handling
+ */
+void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq)
+{
+ if (atomic_dec_and_test(&dreq->count))
+ kfree(dreq);
+}
+
+static void nfs_dns_cache_revisit(struct cache_deferred_req *d, int toomany)
+{
+ struct nfs_cache_defer_req *dreq;
+
+ dreq = container_of(d, struct nfs_cache_defer_req, deferred_req);
+
+ complete_all(&dreq->completion);
+ nfs_cache_defer_req_put(dreq);
+}
+
+static struct cache_deferred_req *nfs_dns_cache_defer(struct cache_req *req)
+{
+ struct nfs_cache_defer_req *dreq;
+
+ dreq = container_of(req, struct nfs_cache_defer_req, req);
+ dreq->deferred_req.revisit = nfs_dns_cache_revisit;
+ atomic_inc(&dreq->count);
+
+ return &dreq->deferred_req;
+}
+
+struct nfs_cache_defer_req *nfs_cache_defer_req_alloc(void)
+{
+ struct nfs_cache_defer_req *dreq;
+
+ dreq = kzalloc(sizeof(*dreq), GFP_KERNEL);
+ if (dreq) {
+ init_completion(&dreq->completion);
+ atomic_set(&dreq->count, 1);
+ dreq->req.defer = nfs_dns_cache_defer;
+ }
+ return dreq;
+}
+
+int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq)
+{
+ if (wait_for_completion_timeout(&dreq->completion,
+ nfs_cache_getent_timeout * HZ) == 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+int nfs_cache_register(struct cache_detail *cd)
+{
+ struct nameidata nd;
+ struct vfsmount *mnt;
+ int ret;
+
+ mnt = rpc_get_mount();
+ if (IS_ERR(mnt))
+ return PTR_ERR(mnt);
+ ret = vfs_path_lookup(mnt->mnt_root, mnt, "/cache", 0, &nd);
+ if (ret)
+ goto err;
+ ret = sunrpc_cache_register_pipefs(nd.path.dentry,
+ cd->name, 0600, cd);
+ path_put(&nd.path);
+ if (!ret)
+ return ret;
+err:
+ rpc_put_mount();
+ return ret;
+}
+
+void nfs_cache_unregister(struct cache_detail *cd)
+{
+ sunrpc_cache_unregister_pipefs(cd);
+ rpc_put_mount();
+}
+
diff --git a/fs/nfs/cache_lib.h b/fs/nfs/cache_lib.h
new file mode 100644
index 000000000000..76f856e284e4
--- /dev/null
+++ b/fs/nfs/cache_lib.h
@@ -0,0 +1,27 @@
+/*
+ * Helper routines for the NFS client caches
+ *
+ * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com>
+ */
+
+#include <linux/completion.h>
+#include <linux/sunrpc/cache.h>
+#include <asm/atomic.h>
+
+/*
+ * Deferred request handling
+ */
+struct nfs_cache_defer_req {
+ struct cache_req req;
+ struct cache_deferred_req deferred_req;
+ struct completion completion;
+ atomic_t count;
+};
+
+extern int nfs_cache_upcall(struct cache_detail *cd, char *entry_name);
+extern struct nfs_cache_defer_req *nfs_cache_defer_req_alloc(void);
+extern void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq);
+extern int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq);
+
+extern int nfs_cache_register(struct cache_detail *cd);
+extern void nfs_cache_unregister(struct cache_detail *cd);
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 7f604c7941fb..293fa0528a6e 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -43,21 +43,29 @@ static struct svc_program nfs4_callback_program;
unsigned int nfs_callback_set_tcpport;
unsigned short nfs_callback_tcpport;
unsigned short nfs_callback_tcpport6;
-static const int nfs_set_port_min = 0;
-static const int nfs_set_port_max = 65535;
+#define NFS_CALLBACK_MAXPORTNR (65535U)
-static int param_set_port(const char *val, struct kernel_param *kp)
+static int param_set_portnr(const char *val, struct kernel_param *kp)
{
- char *endp;
- int num = simple_strtol(val, &endp, 0);
- if (endp == val || *endp || num < nfs_set_port_min || num > nfs_set_port_max)
+ unsigned long num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+ ret = strict_strtoul(val, 0, &num);
+ if (ret == -EINVAL || num > NFS_CALLBACK_MAXPORTNR)
return -EINVAL;
- *((int *)kp->arg) = num;
+ *((unsigned int *)kp->arg) = num;
return 0;
}
-module_param_call(callback_tcpport, param_set_port, param_get_int,
- &nfs_callback_set_tcpport, 0644);
+static int param_get_portnr(char *buffer, struct kernel_param *kp)
+{
+ return param_get_uint(buffer, kp);
+}
+#define param_check_portnr(name, p) __param_check(name, p, unsigned int);
+
+module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
/*
* This is the NFSv4 callback kernel thread.
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 8d25ccb2d51d..e350bd6a2334 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -809,6 +809,9 @@ static int nfs_init_server(struct nfs_server *server,
/* Initialise the client representation from the mount data */
server->flags = data->flags;
server->options = data->options;
+ server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
+ NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
+ NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
if (data->rsize)
server->rsize = nfs_block_size(data->rsize, NULL);
@@ -879,6 +882,7 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, struct nfs_fsinfo *
server->rsize = NFS_MAX_FILE_IO_SIZE;
server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ server->backing_dev_info.name = "nfs";
server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD;
if (server->wsize > max_rpc_payload)
@@ -1074,10 +1078,6 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
(unsigned long long) server->fsid.major,
(unsigned long long) server->fsid.minor);
- BUG_ON(!server->nfs_client);
- BUG_ON(!server->nfs_client->rpc_ops);
- BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
-
spin_lock(&nfs_client_lock);
list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
list_add_tail(&server->master_link, &nfs_volume_list);
@@ -1274,7 +1274,7 @@ static int nfs4_init_server(struct nfs_server *server,
/* Initialise the client representation from the mount data */
server->flags = data->flags;
- server->caps |= NFS_CAP_ATOMIC_OPEN;
+ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
server->options = data->options;
/* Get a client record */
@@ -1359,10 +1359,6 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
server->namelen = NFS4_MAXNAMLEN;
- BUG_ON(!server->nfs_client);
- BUG_ON(!server->nfs_client->rpc_ops);
- BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
-
spin_lock(&nfs_client_lock);
list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
list_add_tail(&server->master_link, &nfs_volume_list);
@@ -1400,7 +1396,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
/* Initialise the client representation from the parent server */
nfs_server_copy_userdata(server, parent_server);
- server->caps |= NFS_CAP_ATOMIC_OPEN;
+ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
/* Get a client representation.
* Note: NFSv4 always uses TCP, */
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e4e089a8f294..6c3210099d51 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -934,9 +934,6 @@ out:
* back into its cache. We let the server do generic write
* parameter checking and report problems.
*
- * We also avoid an unnecessary invocation of generic_osync_inode(),
- * as it is fairly meaningless to sync the metadata of an NFS file.
- *
* We eliminate local atime updates, see direct read above.
*
* We avoid unnecessary page cache invalidations for normal cached
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
new file mode 100644
index 000000000000..f4d54ba97cc6
--- /dev/null
+++ b/fs/nfs/dns_resolve.c
@@ -0,0 +1,335 @@
+/*
+ * linux/fs/nfs/dns_resolve.c
+ *
+ * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com>
+ *
+ * Resolves DNS hostnames into valid ip addresses
+ */
+
+#include <linux/hash.h>
+#include <linux/string.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/socket.h>
+#include <linux/seq_file.h>
+#include <linux/inet.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/cache.h>
+#include <linux/sunrpc/svcauth.h>
+
+#include "dns_resolve.h"
+#include "cache_lib.h"
+
+#define NFS_DNS_HASHBITS 4
+#define NFS_DNS_HASHTBL_SIZE (1 << NFS_DNS_HASHBITS)
+
+static struct cache_head *nfs_dns_table[NFS_DNS_HASHTBL_SIZE];
+
+struct nfs_dns_ent {
+ struct cache_head h;
+
+ char *hostname;
+ size_t namelen;
+
+ struct sockaddr_storage addr;
+ size_t addrlen;
+};
+
+
+static void nfs_dns_ent_init(struct cache_head *cnew,
+ struct cache_head *ckey)
+{
+ struct nfs_dns_ent *new;
+ struct nfs_dns_ent *key;
+
+ new = container_of(cnew, struct nfs_dns_ent, h);
+ key = container_of(ckey, struct nfs_dns_ent, h);
+
+ kfree(new->hostname);
+ new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL);
+ if (new->hostname) {
+ new->namelen = key->namelen;
+ memcpy(&new->addr, &key->addr, key->addrlen);
+ new->addrlen = key->addrlen;
+ } else {
+ new->namelen = 0;
+ new->addrlen = 0;
+ }
+}
+
+static void nfs_dns_ent_put(struct kref *ref)
+{
+ struct nfs_dns_ent *item;
+
+ item = container_of(ref, struct nfs_dns_ent, h.ref);
+ kfree(item->hostname);
+ kfree(item);
+}
+
+static struct cache_head *nfs_dns_ent_alloc(void)
+{
+ struct nfs_dns_ent *item = kmalloc(sizeof(*item), GFP_KERNEL);
+
+ if (item != NULL) {
+ item->hostname = NULL;
+ item->namelen = 0;
+ item->addrlen = 0;
+ return &item->h;
+ }
+ return NULL;
+};
+
+static unsigned int nfs_dns_hash(const struct nfs_dns_ent *key)
+{
+ return hash_str(key->hostname, NFS_DNS_HASHBITS);
+}
+
+static void nfs_dns_request(struct cache_detail *cd,
+ struct cache_head *ch,
+ char **bpp, int *blen)
+{
+ struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h);
+
+ qword_add(bpp, blen, key->hostname);
+ (*bpp)[-1] = '\n';
+}
+
+static int nfs_dns_upcall(struct cache_detail *cd,
+ struct cache_head *ch)
+{
+ struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h);
+ int ret;
+
+ ret = nfs_cache_upcall(cd, key->hostname);
+ if (ret)
+ ret = sunrpc_cache_pipe_upcall(cd, ch, nfs_dns_request);
+ return ret;
+}
+
+static int nfs_dns_match(struct cache_head *ca,
+ struct cache_head *cb)
+{
+ struct nfs_dns_ent *a;
+ struct nfs_dns_ent *b;
+
+ a = container_of(ca, struct nfs_dns_ent, h);
+ b = container_of(cb, struct nfs_dns_ent, h);
+
+ if (a->namelen == 0 || a->namelen != b->namelen)
+ return 0;
+ return memcmp(a->hostname, b->hostname, a->namelen) == 0;
+}
+
+static int nfs_dns_show(struct seq_file *m, struct cache_detail *cd,
+ struct cache_head *h)
+{
+ struct nfs_dns_ent *item;
+ long ttl;
+
+ if (h == NULL) {
+ seq_puts(m, "# ip address hostname ttl\n");
+ return 0;
+ }
+ item = container_of(h, struct nfs_dns_ent, h);
+ ttl = (long)item->h.expiry_time - (long)get_seconds();
+ if (ttl < 0)
+ ttl = 0;
+
+ if (!test_bit(CACHE_NEGATIVE, &h->flags)) {
+ char buf[INET6_ADDRSTRLEN+IPV6_SCOPE_ID_LEN+1];
+
+ rpc_ntop((struct sockaddr *)&item->addr, buf, sizeof(buf));
+ seq_printf(m, "%15s ", buf);
+ } else
+ seq_puts(m, "<none> ");
+ seq_printf(m, "%15s %ld\n", item->hostname, ttl);
+ return 0;
+}
+
+struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
+ struct nfs_dns_ent *key)
+{
+ struct cache_head *ch;
+
+ ch = sunrpc_cache_lookup(cd,
+ &key->h,
+ nfs_dns_hash(key));
+ if (!ch)
+ return NULL;
+ return container_of(ch, struct nfs_dns_ent, h);
+}
+
+struct nfs_dns_ent *nfs_dns_update(struct cache_detail *cd,
+ struct nfs_dns_ent *new,
+ struct nfs_dns_ent *key)
+{
+ struct cache_head *ch;
+
+ ch = sunrpc_cache_update(cd,
+ &new->h, &key->h,
+ nfs_dns_hash(key));
+ if (!ch)
+ return NULL;
+ return container_of(ch, struct nfs_dns_ent, h);
+}
+
+static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen)
+{
+ char buf1[NFS_DNS_HOSTNAME_MAXLEN+1];
+ struct nfs_dns_ent key, *item;
+ unsigned long ttl;
+ ssize_t len;
+ int ret = -EINVAL;
+
+ if (buf[buflen-1] != '\n')
+ goto out;
+ buf[buflen-1] = '\0';
+
+ len = qword_get(&buf, buf1, sizeof(buf1));
+ if (len <= 0)
+ goto out;
+ key.addrlen = rpc_pton(buf1, len,
+ (struct sockaddr *)&key.addr,
+ sizeof(key.addr));
+
+ len = qword_get(&buf, buf1, sizeof(buf1));
+ if (len <= 0)
+ goto out;
+
+ key.hostname = buf1;
+ key.namelen = len;
+ memset(&key.h, 0, sizeof(key.h));
+
+ ttl = get_expiry(&buf);
+ if (ttl == 0)
+ goto out;
+ key.h.expiry_time = ttl + get_seconds();
+
+ ret = -ENOMEM;
+ item = nfs_dns_lookup(cd, &key);
+ if (item == NULL)
+ goto out;
+
+ if (key.addrlen == 0)
+ set_bit(CACHE_NEGATIVE, &key.h.flags);
+
+ item = nfs_dns_update(cd, &key, item);
+ if (item == NULL)
+ goto out;
+
+ ret = 0;
+ cache_put(&item->h, cd);
+out:
+ return ret;
+}
+
+static struct cache_detail nfs_dns_resolve = {
+ .owner = THIS_MODULE,
+ .hash_size = NFS_DNS_HASHTBL_SIZE,
+ .hash_table = nfs_dns_table,
+ .name = "dns_resolve",
+ .cache_put = nfs_dns_ent_put,
+ .cache_upcall = nfs_dns_upcall,
+ .cache_parse = nfs_dns_parse,
+ .cache_show = nfs_dns_show,
+ .match = nfs_dns_match,
+ .init = nfs_dns_ent_init,
+ .update = nfs_dns_ent_init,
+ .alloc = nfs_dns_ent_alloc,
+};
+
+static int do_cache_lookup(struct cache_detail *cd,
+ struct nfs_dns_ent *key,
+ struct nfs_dns_ent **item,
+ struct nfs_cache_defer_req *dreq)
+{
+ int ret = -ENOMEM;
+
+ *item = nfs_dns_lookup(cd, key);
+ if (*item) {
+ ret = cache_check(cd, &(*item)->h, &dreq->req);
+ if (ret)
+ *item = NULL;
+ }
+ return ret;
+}
+
+static int do_cache_lookup_nowait(struct cache_detail *cd,
+ struct nfs_dns_ent *key,
+ struct nfs_dns_ent **item)
+{
+ int ret = -ENOMEM;
+
+ *item = nfs_dns_lookup(cd, key);
+ if (!*item)
+ goto out_err;
+ ret = -ETIMEDOUT;
+ if (!test_bit(CACHE_VALID, &(*item)->h.flags)
+ || (*item)->h.expiry_time < get_seconds()
+ || cd->flush_time > (*item)->h.last_refresh)
+ goto out_put;
+ ret = -ENOENT;
+ if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
+ goto out_put;
+ return 0;
+out_put:
+ cache_put(&(*item)->h, cd);
+out_err:
+ *item = NULL;
+ return ret;
+}
+
+static int do_cache_lookup_wait(struct cache_detail *cd,
+ struct nfs_dns_ent *key,
+ struct nfs_dns_ent **item)
+{
+ struct nfs_cache_defer_req *dreq;
+ int ret = -ENOMEM;
+
+ dreq = nfs_cache_defer_req_alloc();
+ if (!dreq)
+ goto out;
+ ret = do_cache_lookup(cd, key, item, dreq);
+ if (ret == -EAGAIN) {
+ ret = nfs_cache_wait_for_upcall(dreq);
+ if (!ret)
+ ret = do_cache_lookup_nowait(cd, key, item);
+ }
+ nfs_cache_defer_req_put(dreq);
+out:
+ return ret;
+}
+
+ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
+ struct sockaddr *sa, size_t salen)
+{
+ struct nfs_dns_ent key = {
+ .hostname = name,
+ .namelen = namelen,
+ };
+ struct nfs_dns_ent *item = NULL;
+ ssize_t ret;
+
+ ret = do_cache_lookup_wait(&nfs_dns_resolve, &key, &item);
+ if (ret == 0) {
+ if (salen >= item->addrlen) {
+ memcpy(sa, &item->addr, item->addrlen);
+ ret = item->addrlen;
+ } else
+ ret = -EOVERFLOW;
+ cache_put(&item->h, &nfs_dns_resolve);
+ } else if (ret == -ENOENT)
+ ret = -ESRCH;
+ return ret;
+}
+
+int nfs_dns_resolver_init(void)
+{
+ return nfs_cache_register(&nfs_dns_resolve);
+}
+
+void nfs_dns_resolver_destroy(void)
+{
+ nfs_cache_unregister(&nfs_dns_resolve);
+}
+
diff --git a/fs/nfs/dns_resolve.h b/fs/nfs/dns_resolve.h
new file mode 100644
index 000000000000..a3f0938babf7
--- /dev/null
+++ b/fs/nfs/dns_resolve.h
@@ -0,0 +1,14 @@
+/*
+ * Resolve DNS hostnames into valid ip addresses
+ */
+#ifndef __LINUX_FS_NFS_DNS_RESOLVE_H
+#define __LINUX_FS_NFS_DNS_RESOLVE_H
+
+#define NFS_DNS_HOSTNAME_MAXLEN (128)
+
+extern int nfs_dns_resolver_init(void);
+extern void nfs_dns_resolver_destroy(void);
+extern ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
+ struct sockaddr *sa, size_t salen);
+
+#endif
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 05062329b678..5021b75d2d1e 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -328,6 +328,42 @@ nfs_file_fsync(struct file *file, struct dentry *dentry, int datasync)
}
/*
+ * Decide whether a read/modify/write cycle may be more efficient
+ * then a modify/write/read cycle when writing to a page in the
+ * page cache.
+ *
+ * The modify/write/read cycle may occur if a page is read before
+ * being completely filled by the writer. In this situation, the
+ * page must be completely written to stable storage on the server
+ * before it can be refilled by reading in the page from the server.
+ * This can lead to expensive, small, FILE_SYNC mode writes being
+ * done.
+ *
+ * It may be more efficient to read the page first if the file is
+ * open for reading in addition to writing, the page is not marked
+ * as Uptodate, it is not dirty or waiting to be committed,
+ * indicating that it was previously allocated and then modified,
+ * that there were valid bytes of data in that range of the file,
+ * and that the new data won't completely replace the old data in
+ * that range of the file.
+ */
+static int nfs_want_read_modify_write(struct file *file, struct page *page,
+ loff_t pos, unsigned len)
+{
+ unsigned int pglen = nfs_page_length(page);
+ unsigned int offset = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned int end = offset + len;
+
+ if ((file->f_mode & FMODE_READ) && /* open for read? */
+ !PageUptodate(page) && /* Uptodate? */
+ !PagePrivate(page) && /* i/o request already? */
+ pglen && /* valid bytes of file? */
+ (end < pglen || offset)) /* replace all valid bytes? */
+ return 1;
+ return 0;
+}
+
+/*
* This does the "real" work of the write. We must allocate and lock the
* page to be sent back to the generic routine, which then copies the
* data from user space.
@@ -340,15 +376,16 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
struct page **pagep, void **fsdata)
{
int ret;
- pgoff_t index;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
struct page *page;
- index = pos >> PAGE_CACHE_SHIFT;
+ int once_thru = 0;
dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n",
file->f_path.dentry->d_parent->d_name.name,
file->f_path.dentry->d_name.name,
mapping->host->i_ino, len, (long long) pos);
+start:
/*
* Prevent starvation issues if someone is doing a consistency
* sync-to-disk
@@ -367,6 +404,13 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
if (ret) {
unlock_page(page);
page_cache_release(page);
+ } else if (!once_thru &&
+ nfs_want_read_modify_write(file, page, pos, len)) {
+ once_thru = 1;
+ ret = nfs_readpage(file, page);
+ page_cache_release(page);
+ if (!ret)
+ goto start;
}
return ret;
}
@@ -479,6 +523,7 @@ const struct address_space_operations nfs_file_aops = {
.invalidatepage = nfs_invalidate_page,
.releasepage = nfs_release_page,
.direct_IO = nfs_direct_IO,
+ .migratepage = nfs_migrate_page,
.launder_page = nfs_launder_page,
};
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 86147b0ab2cf..21a84d45916f 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -101,7 +101,7 @@ static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
static unsigned int fnvhash32(const void *, size_t);
-static struct rpc_pipe_ops idmap_upcall_ops = {
+static const struct rpc_pipe_ops idmap_upcall_ops = {
.upcall = idmap_pipe_upcall,
.downcall = idmap_pipe_downcall,
.destroy_msg = idmap_pipe_destroy_msg,
@@ -119,8 +119,8 @@ nfs_idmap_new(struct nfs_client *clp)
if (idmap == NULL)
return -ENOMEM;
- idmap->idmap_dentry = rpc_mkpipe(clp->cl_rpcclient->cl_dentry, "idmap",
- idmap, &idmap_upcall_ops, 0);
+ idmap->idmap_dentry = rpc_mkpipe(clp->cl_rpcclient->cl_path.dentry,
+ "idmap", idmap, &idmap_upcall_ops, 0);
if (IS_ERR(idmap->idmap_dentry)) {
error = PTR_ERR(idmap->idmap_dentry);
kfree(idmap);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index bd7938eda6a8..060022b4651c 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -46,6 +46,7 @@
#include "iostat.h"
#include "internal.h"
#include "fscache.h"
+#include "dns_resolve.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -286,6 +287,11 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
/* We can't support update_atime(), since the server will reset it */
inode->i_flags |= S_NOATIME|S_NOCMTIME;
inode->i_mode = fattr->mode;
+ if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
+ && nfs_server_capable(inode, NFS_CAP_MODE))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL;
/* Why so? Because we want revalidate for devices/FIFOs, and
* that's precisely what we have in nfs_file_inode_operations.
*/
@@ -330,20 +336,46 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
nfsi->attr_gencount = fattr->gencount;
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
inode->i_atime = fattr->atime;
+ else if (nfs_server_capable(inode, NFS_CAP_ATIME))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
inode->i_mtime = fattr->mtime;
+ else if (nfs_server_capable(inode, NFS_CAP_MTIME))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA;
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
inode->i_ctime = fattr->ctime;
+ else if (nfs_server_capable(inode, NFS_CAP_CTIME))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL;
if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
nfsi->change_attr = fattr->change_attr;
+ else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA;
if (fattr->valid & NFS_ATTR_FATTR_SIZE)
inode->i_size = nfs_size_to_loff_t(fattr->size);
+ else
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA
+ | NFS_INO_REVAL_PAGECACHE;
if (fattr->valid & NFS_ATTR_FATTR_NLINK)
inode->i_nlink = fattr->nlink;
+ else if (nfs_server_capable(inode, NFS_CAP_NLINK))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_OWNER)
inode->i_uid = fattr->uid;
+ else if (nfs_server_capable(inode, NFS_CAP_OWNER))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL;
if (fattr->valid & NFS_ATTR_FATTR_GROUP)
inode->i_gid = fattr->gid;
+ else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL;
if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
inode->i_blocks = fattr->du.nfs2.blocks;
if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
@@ -1145,6 +1177,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
loff_t cur_isize, new_isize;
unsigned long invalid = 0;
unsigned long now = jiffies;
+ unsigned long save_cache_validity;
dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n",
__func__, inode->i_sb->s_id, inode->i_ino,
@@ -1171,10 +1204,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
*/
nfsi->read_cache_jiffies = fattr->time_start;
- if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) || (fattr->valid & (NFS_ATTR_FATTR_MTIME|NFS_ATTR_FATTR_CTIME)))
- nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ATIME
- | NFS_INO_REVAL_PAGECACHE);
+ save_cache_validity = nfsi->cache_validity;
+ nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ATIME
+ | NFS_INO_REVAL_FORCED
+ | NFS_INO_REVAL_PAGECACHE);
/* Do atomic weak cache consistency updates */
nfs_wcc_update_inode(inode, fattr);
@@ -1189,7 +1223,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfs_force_lookup_revalidate(inode);
nfsi->change_attr = fattr->change_attr;
}
- }
+ } else if (server->caps & NFS_CAP_CHANGE_ATTR)
+ invalid |= save_cache_validity;
if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
/* NFSv2/v3: Check if the mtime agrees */
@@ -1201,7 +1236,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfs_force_lookup_revalidate(inode);
memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
}
- }
+ } else if (server->caps & NFS_CAP_MTIME)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA
+ | NFS_INO_REVAL_PAGECACHE
+ | NFS_INO_REVAL_FORCED);
+
if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
/* If ctime has changed we should definitely clear access+acl caches */
if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) {
@@ -1215,7 +1255,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
}
memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
}
- }
+ } else if (server->caps & NFS_CAP_CTIME)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
/* Check if our cached file size is stale */
if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
@@ -1231,30 +1275,50 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
dprintk("NFS: isize change on server for file %s/%ld\n",
inode->i_sb->s_id, inode->i_ino);
}
- }
+ } else
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_REVAL_PAGECACHE
+ | NFS_INO_REVAL_FORCED);
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
+ else if (server->caps & NFS_CAP_ATIME)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATIME
+ | NFS_INO_REVAL_FORCED);
if (fattr->valid & NFS_ATTR_FATTR_MODE) {
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_mode = fattr->mode;
}
- }
+ } else if (server->caps & NFS_CAP_MODE)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
+
if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
if (inode->i_uid != fattr->uid) {
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_uid = fattr->uid;
}
- }
+ } else if (server->caps & NFS_CAP_OWNER)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
+
if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
if (inode->i_gid != fattr->gid) {
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_gid = fattr->gid;
}
- }
+ } else if (server->caps & NFS_CAP_OWNER_GROUP)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
if (inode->i_nlink != fattr->nlink) {
@@ -1263,7 +1327,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
invalid |= NFS_INO_INVALID_DATA;
inode->i_nlink = fattr->nlink;
}
- }
+ } else if (server->caps & NFS_CAP_NLINK)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_REVAL_FORCED);
if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
/*
@@ -1293,9 +1359,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
|| S_ISLNK(inode->i_mode)))
invalid &= ~NFS_INO_INVALID_DATA;
if (!nfs_have_delegation(inode, FMODE_READ) ||
- (nfsi->cache_validity & NFS_INO_REVAL_FORCED))
+ (save_cache_validity & NFS_INO_REVAL_FORCED))
nfsi->cache_validity |= invalid;
- nfsi->cache_validity &= ~NFS_INO_REVAL_FORCED;
return 0;
out_changed:
@@ -1442,6 +1507,10 @@ static int __init init_nfs_fs(void)
{
int err;
+ err = nfs_dns_resolver_init();
+ if (err < 0)
+ goto out8;
+
err = nfs_fscache_register();
if (err < 0)
goto out7;
@@ -1500,6 +1569,8 @@ out5:
out6:
nfs_fscache_unregister();
out7:
+ nfs_dns_resolver_destroy();
+out8:
return err;
}
@@ -1511,6 +1582,7 @@ static void __exit exit_nfs_fs(void)
nfs_destroy_inodecache();
nfs_destroy_nfspagecache();
nfs_fscache_unregister();
+ nfs_dns_resolver_destroy();
#ifdef CONFIG_PROC_FS
rpc_proc_unregister("nfs");
#endif
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 7dd90a6769d0..e21b1bb9972f 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -49,6 +49,11 @@ struct nfs_clone_mount {
#define NFS_MAX_SECFLAVORS (12)
/*
+ * Value used if the user did not specify a port value.
+ */
+#define NFS_UNSPEC_PORT (-1)
+
+/*
* In-kernel mount arguments
*/
struct nfs_parsed_mount_data {
@@ -63,6 +68,7 @@ struct nfs_parsed_mount_data {
unsigned int auth_flavor_len;
rpc_authflavor_t auth_flavors[1];
char *client_address;
+ unsigned int version;
unsigned int minorversion;
char *fscache_uniq;
@@ -71,7 +77,7 @@ struct nfs_parsed_mount_data {
size_t addrlen;
char *hostname;
u32 version;
- unsigned short port;
+ int port;
unsigned short protocol;
} mount_server;
@@ -80,7 +86,7 @@ struct nfs_parsed_mount_data {
size_t addrlen;
char *hostname;
char *export_path;
- unsigned short port;
+ int port;
unsigned short protocol;
} nfs_server;
@@ -102,6 +108,7 @@ struct nfs_mount_request {
};
extern int nfs_mount(struct nfs_mount_request *info);
+extern void nfs_umount(const struct nfs_mount_request *info);
/* client.c */
extern struct rpc_program nfs_program;
@@ -213,7 +220,6 @@ void nfs_zap_acl_cache(struct inode *inode);
extern int nfs_wait_bit_killable(void *word);
/* super.c */
-void nfs_parse_ip_address(char *, size_t, struct sockaddr *, size_t *);
extern struct file_system_type nfs_xdev_fs_type;
#ifdef CONFIG_NFS_V4
extern struct file_system_type nfs4_xdev_fs_type;
@@ -248,6 +254,12 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
/* write.c */
extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
+#ifdef CONFIG_MIGRATION
+extern int nfs_migrate_page(struct address_space *,
+ struct page *, struct page *);
+#else
+#define nfs_migrate_page NULL
+#endif
/* nfs4proc.c */
extern int _nfs4_call_sync(struct nfs_server *server,
@@ -368,24 +380,3 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
return ((unsigned long)len + (unsigned long)base +
PAGE_SIZE - 1) >> PAGE_SHIFT;
}
-
-#define IPV6_SCOPE_DELIMITER '%'
-
-/*
- * Set the port number in an address. Be agnostic about the address
- * family.
- */
-static inline void nfs_set_port(struct sockaddr *sap, unsigned short port)
-{
- struct sockaddr_in *ap = (struct sockaddr_in *)sap;
- struct sockaddr_in6 *ap6 = (struct sockaddr_in6 *)sap;
-
- switch (sap->sa_family) {
- case AF_INET:
- ap->sin_port = htons(port);
- break;
- case AF_INET6:
- ap6->sin6_port = htons(port);
- break;
- }
-}
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 38ef9eaec407..0adefc40cc89 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -209,6 +209,71 @@ out_mnt_err:
goto out;
}
+/**
+ * nfs_umount - Notify a server that we have unmounted this export
+ * @info: pointer to umount request arguments
+ *
+ * MOUNTPROC_UMNT is advisory, so we set a short timeout, and always
+ * use UDP.
+ */
+void nfs_umount(const struct nfs_mount_request *info)
+{
+ static const struct rpc_timeout nfs_umnt_timeout = {
+ .to_initval = 1 * HZ,
+ .to_maxval = 3 * HZ,
+ .to_retries = 2,
+ };
+ struct rpc_create_args args = {
+ .protocol = IPPROTO_UDP,
+ .address = info->sap,
+ .addrsize = info->salen,
+ .timeout = &nfs_umnt_timeout,
+ .servername = info->hostname,
+ .program = &mnt_program,
+ .version = info->version,
+ .authflavor = RPC_AUTH_UNIX,
+ .flags = RPC_CLNT_CREATE_NOPING,
+ };
+ struct mountres result;
+ struct rpc_message msg = {
+ .rpc_argp = info->dirpath,
+ .rpc_resp = &result,
+ };
+ struct rpc_clnt *clnt;
+ int status;
+
+ if (info->noresvport)
+ args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
+
+ clnt = rpc_create(&args);
+ if (unlikely(IS_ERR(clnt)))
+ goto out_clnt_err;
+
+ dprintk("NFS: sending UMNT request for %s:%s\n",
+ (info->hostname ? info->hostname : "server"), info->dirpath);
+
+ if (info->version == NFS_MNT3_VERSION)
+ msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC3_UMNT];
+ else
+ msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC_UMNT];
+
+ status = rpc_call_sync(clnt, &msg, 0);
+ rpc_shutdown_client(clnt);
+
+ if (unlikely(status < 0))
+ goto out_call_err;
+
+ return;
+
+out_clnt_err:
+ dprintk("NFS: failed to create UMNT RPC client, status=%ld\n",
+ PTR_ERR(clnt));
+ return;
+
+out_call_err:
+ dprintk("NFS: UMNT request failed, status=%d\n", status);
+}
+
/*
* XDR encode/decode functions for MOUNT
*/
@@ -258,7 +323,7 @@ static int decode_status(struct xdr_stream *xdr, struct mountres *res)
return -EIO;
status = ntohl(*p);
- for (i = 0; i <= ARRAY_SIZE(mnt_errtbl); i++) {
+ for (i = 0; i < ARRAY_SIZE(mnt_errtbl); i++) {
if (mnt_errtbl[i].status == status) {
res->errno = mnt_errtbl[i].errno;
return 0;
@@ -309,7 +374,7 @@ static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res)
return -EIO;
status = ntohl(*p);
- for (i = 0; i <= ARRAY_SIZE(mnt3_errtbl); i++) {
+ for (i = 0; i < ARRAY_SIZE(mnt3_errtbl); i++) {
if (mnt3_errtbl[i].status == status) {
res->errno = mnt3_errtbl[i].errno;
return 0;
@@ -407,6 +472,13 @@ static struct rpc_procinfo mnt_procedures[] = {
.p_statidx = MOUNTPROC_MNT,
.p_name = "MOUNT",
},
+ [MOUNTPROC_UMNT] = {
+ .p_proc = MOUNTPROC_UMNT,
+ .p_encode = (kxdrproc_t)mnt_enc_dirpath,
+ .p_arglen = MNT_enc_dirpath_sz,
+ .p_statidx = MOUNTPROC_UMNT,
+ .p_name = "UMOUNT",
+ },
};
static struct rpc_procinfo mnt3_procedures[] = {
@@ -419,6 +491,13 @@ static struct rpc_procinfo mnt3_procedures[] = {
.p_statidx = MOUNTPROC3_MNT,
.p_name = "MOUNT",
},
+ [MOUNTPROC3_UMNT] = {
+ .p_proc = MOUNTPROC3_UMNT,
+ .p_encode = (kxdrproc_t)mnt_enc_dirpath,
+ .p_arglen = MNT_enc_dirpath_sz,
+ .p_statidx = MOUNTPROC3_UMNT,
+ .p_name = "UMOUNT",
+ },
};
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index d0cc5ce0edfe..ee6a13f05443 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -299,7 +299,6 @@ static void nfs3_free_createdata(struct nfs3_createdata *data)
/*
* Create a regular file.
- * For now, we don't implement O_EXCL.
*/
static int
nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 2a2a0a7143ad..2636c26d56fa 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -17,6 +17,7 @@
#include <linux/inet.h>
#include "internal.h"
#include "nfs4_fs.h"
+#include "dns_resolve.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -95,6 +96,20 @@ static int nfs4_validate_fspath(const struct vfsmount *mnt_parent,
return 0;
}
+static size_t nfs_parse_server_name(char *string, size_t len,
+ struct sockaddr *sa, size_t salen)
+{
+ ssize_t ret;
+
+ ret = rpc_pton(string, len, sa, salen);
+ if (ret == 0) {
+ ret = nfs_dns_resolve_name(string, len, sa, salen);
+ if (ret < 0)
+ ret = 0;
+ }
+ return ret;
+}
+
static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
char *page, char *page2,
const struct nfs4_fs_location *location)
@@ -121,11 +136,12 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
if (memchr(buf->data, IPV6_SCOPE_DELIMITER, buf->len))
continue;
- nfs_parse_ip_address(buf->data, buf->len,
- mountdata->addr, &mountdata->addrlen);
- if (mountdata->addr->sa_family == AF_UNSPEC)
+ mountdata->addrlen = nfs_parse_server_name(buf->data,
+ buf->len,
+ mountdata->addr, mountdata->addrlen);
+ if (mountdata->addrlen == 0)
continue;
- nfs_set_port(mountdata->addr, NFS_PORT);
+ rpc_set_port(mountdata->addr, NFS_PORT);
memcpy(page2, buf->data, buf->len);
page2[buf->len] = '\0';
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6917311f201c..be6544aef41f 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -61,6 +61,8 @@
#define NFS4_POLL_RETRY_MIN (HZ/10)
#define NFS4_POLL_RETRY_MAX (15*HZ)
+#define NFS4_MAX_LOOP_ON_RECOVER (10)
+
struct nfs4_opendata;
static int _nfs4_proc_open(struct nfs4_opendata *data);
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
@@ -426,17 +428,19 @@ out:
static int nfs4_recover_session(struct nfs4_session *session)
{
struct nfs_client *clp = session->clp;
+ unsigned int loop;
int ret;
- for (;;) {
+ for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
ret = nfs4_wait_clnt_recover(clp);
if (ret != 0)
- return ret;
+ break;
if (!test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
break;
nfs4_schedule_state_manager(clp);
+ ret = -EIO;
}
- return 0;
+ return ret;
}
static int nfs41_setup_sequence(struct nfs4_session *session,
@@ -1444,18 +1448,20 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
static int nfs4_recover_expired_lease(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
+ unsigned int loop;
int ret;
- for (;;) {
+ for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
ret = nfs4_wait_clnt_recover(clp);
if (ret != 0)
- return ret;
+ break;
if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
!test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
break;
nfs4_schedule_state_recovery(clp);
+ ret = -EIO;
}
- return 0;
+ return ret;
}
/*
@@ -1997,12 +2003,34 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
status = nfs4_call_sync(server, &msg, &args, &res, 0);
if (status == 0) {
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
+ server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
+ NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
+ NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
+ NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
+ NFS_CAP_CTIME|NFS_CAP_MTIME);
if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
server->caps |= NFS_CAP_ACLS;
if (res.has_links != 0)
server->caps |= NFS_CAP_HARDLINKS;
if (res.has_symlinks != 0)
server->caps |= NFS_CAP_SYMLINKS;
+ if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
+ server->caps |= NFS_CAP_FILEID;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
+ server->caps |= NFS_CAP_MODE;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
+ server->caps |= NFS_CAP_NLINK;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
+ server->caps |= NFS_CAP_OWNER;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
+ server->caps |= NFS_CAP_OWNER_GROUP;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
+ server->caps |= NFS_CAP_ATIME;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
+ server->caps |= NFS_CAP_CTIME;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
+ server->caps |= NFS_CAP_MTIME;
+
memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 617273e7d47f..cfc30d362f94 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -702,29 +702,12 @@ struct compound_hdr {
u32 minorversion;
};
-/*
- * START OF "GENERIC" ENCODE ROUTINES.
- * These may look a little ugly since they are imported from a "generic"
- * set of XDR encode/decode routines which are intended to be shared by
- * all of our NFSv4 implementations (OpenBSD, MacOS X...).
- *
- * If the pain of reading these is too great, it should be a straightforward
- * task to translate them into Linux-specific versions which are more
- * consistent with the style used in NFSv2/v3...
- */
-#define WRITE32(n) *p++ = htonl(n)
-#define WRITE64(n) do { \
- *p++ = htonl((uint32_t)((n) >> 32)); \
- *p++ = htonl((uint32_t)(n)); \
-} while (0)
-#define WRITEMEM(ptr,nbytes) do { \
- p = xdr_encode_opaque_fixed(p, ptr, nbytes); \
-} while (0)
-
-#define RESERVE_SPACE(nbytes) do { \
- p = xdr_reserve_space(xdr, nbytes); \
- BUG_ON(!p); \
-} while (0)
+static __be32 *reserve_space(struct xdr_stream *xdr, size_t nbytes)
+{
+ __be32 *p = xdr_reserve_space(xdr, nbytes);
+ BUG_ON(!p);
+ return p;
+}
static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
{
@@ -749,12 +732,11 @@ static void encode_compound_hdr(struct xdr_stream *xdr,
dprintk("encode_compound: tag=%.*s\n", (int)hdr->taglen, hdr->tag);
BUG_ON(hdr->taglen > NFS4_MAXTAGLEN);
- RESERVE_SPACE(12+(XDR_QUADLEN(hdr->taglen)<<2));
- WRITE32(hdr->taglen);
- WRITEMEM(hdr->tag, hdr->taglen);
- WRITE32(hdr->minorversion);
+ p = reserve_space(xdr, 4 + hdr->taglen + 8);
+ p = xdr_encode_opaque(p, hdr->tag, hdr->taglen);
+ *p++ = cpu_to_be32(hdr->minorversion);
hdr->nops_p = p;
- WRITE32(hdr->nops);
+ *p = cpu_to_be32(hdr->nops);
}
static void encode_nops(struct compound_hdr *hdr)
@@ -829,55 +811,53 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const
len += 16;
else if (iap->ia_valid & ATTR_MTIME)
len += 4;
- RESERVE_SPACE(len);
+ p = reserve_space(xdr, len);
/*
* We write the bitmap length now, but leave the bitmap and the attribute
* buffer length to be backfilled at the end of this routine.
*/
- WRITE32(2);
+ *p++ = cpu_to_be32(2);
q = p;
p += 3;
if (iap->ia_valid & ATTR_SIZE) {
bmval0 |= FATTR4_WORD0_SIZE;
- WRITE64(iap->ia_size);
+ p = xdr_encode_hyper(p, iap->ia_size);
}
if (iap->ia_valid & ATTR_MODE) {
bmval1 |= FATTR4_WORD1_MODE;
- WRITE32(iap->ia_mode & S_IALLUGO);
+ *p++ = cpu_to_be32(iap->ia_mode & S_IALLUGO);
}
if (iap->ia_valid & ATTR_UID) {
bmval1 |= FATTR4_WORD1_OWNER;
- WRITE32(owner_namelen);
- WRITEMEM(owner_name, owner_namelen);
+ p = xdr_encode_opaque(p, owner_name, owner_namelen);
}
if (iap->ia_valid & ATTR_GID) {
bmval1 |= FATTR4_WORD1_OWNER_GROUP;
- WRITE32(owner_grouplen);
- WRITEMEM(owner_group, owner_grouplen);
+ p = xdr_encode_opaque(p, owner_group, owner_grouplen);
}
if (iap->ia_valid & ATTR_ATIME_SET) {
bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
- WRITE32(NFS4_SET_TO_CLIENT_TIME);
- WRITE32(0);
- WRITE32(iap->ia_mtime.tv_sec);
- WRITE32(iap->ia_mtime.tv_nsec);
+ *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
+ *p++ = cpu_to_be32(0);
+ *p++ = cpu_to_be32(iap->ia_mtime.tv_sec);
+ *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
}
else if (iap->ia_valid & ATTR_ATIME) {
bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
- WRITE32(NFS4_SET_TO_SERVER_TIME);
+ *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
}
if (iap->ia_valid & ATTR_MTIME_SET) {
bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
- WRITE32(NFS4_SET_TO_CLIENT_TIME);
- WRITE32(0);
- WRITE32(iap->ia_mtime.tv_sec);
- WRITE32(iap->ia_mtime.tv_nsec);
+ *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
+ *p++ = cpu_to_be32(0);
+ *p++ = cpu_to_be32(iap->ia_mtime.tv_sec);
+ *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
}
else if (iap->ia_valid & ATTR_MTIME) {
bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
- WRITE32(NFS4_SET_TO_SERVER_TIME);
+ *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
}
/*
@@ -891,7 +871,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const
len = (char *)p - (char *)q - 12;
*q++ = htonl(bmval0);
*q++ = htonl(bmval1);
- *q++ = htonl(len);
+ *q = htonl(len);
/* out: */
}
@@ -900,9 +880,9 @@ static void encode_access(struct xdr_stream *xdr, u32 access, struct compound_hd
{
__be32 *p;
- RESERVE_SPACE(8);
- WRITE32(OP_ACCESS);
- WRITE32(access);
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(OP_ACCESS);
+ *p = cpu_to_be32(access);
hdr->nops++;
hdr->replen += decode_access_maxsz;
}
@@ -911,10 +891,10 @@ static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg
{
__be32 *p;
- RESERVE_SPACE(8+NFS4_STATEID_SIZE);
- WRITE32(OP_CLOSE);
- WRITE32(arg->seqid->sequence->counter);
- WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
+ p = reserve_space(xdr, 8+NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(OP_CLOSE);
+ *p++ = cpu_to_be32(arg->seqid->sequence->counter);
+ xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE);
hdr->nops++;
hdr->replen += decode_close_maxsz;
}
@@ -923,10 +903,10 @@ static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *ar
{
__be32 *p;
- RESERVE_SPACE(16);
- WRITE32(OP_COMMIT);
- WRITE64(args->offset);
- WRITE32(args->count);
+ p = reserve_space(xdr, 16);
+ *p++ = cpu_to_be32(OP_COMMIT);
+ p = xdr_encode_hyper(p, args->offset);
+ *p = cpu_to_be32(args->count);
hdr->nops++;
hdr->replen += decode_commit_maxsz;
}
@@ -935,30 +915,28 @@ static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *
{
__be32 *p;
- RESERVE_SPACE(8);
- WRITE32(OP_CREATE);
- WRITE32(create->ftype);
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(OP_CREATE);
+ *p = cpu_to_be32(create->ftype);
switch (create->ftype) {
case NF4LNK:
- RESERVE_SPACE(4);
- WRITE32(create->u.symlink.len);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(create->u.symlink.len);
xdr_write_pages(xdr, create->u.symlink.pages, 0, create->u.symlink.len);
break;
case NF4BLK: case NF4CHR:
- RESERVE_SPACE(8);
- WRITE32(create->u.device.specdata1);
- WRITE32(create->u.device.specdata2);
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(create->u.device.specdata1);
+ *p = cpu_to_be32(create->u.device.specdata2);
break;
default:
break;
}
- RESERVE_SPACE(4 + create->name->len);
- WRITE32(create->name->len);
- WRITEMEM(create->name->name, create->name->len);
+ encode_string(xdr, create->name->len, create->name->name);
hdr->nops++;
hdr->replen += decode_create_maxsz;
@@ -969,10 +947,10 @@ static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct c
{
__be32 *p;
- RESERVE_SPACE(12);
- WRITE32(OP_GETATTR);
- WRITE32(1);
- WRITE32(bitmap);
+ p = reserve_space(xdr, 12);
+ *p++ = cpu_to_be32(OP_GETATTR);
+ *p++ = cpu_to_be32(1);
+ *p = cpu_to_be32(bitmap);
hdr->nops++;
hdr->replen += decode_getattr_maxsz;
}
@@ -981,11 +959,11 @@ static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm
{
__be32 *p;
- RESERVE_SPACE(16);
- WRITE32(OP_GETATTR);
- WRITE32(2);
- WRITE32(bm0);
- WRITE32(bm1);
+ p = reserve_space(xdr, 16);
+ *p++ = cpu_to_be32(OP_GETATTR);
+ *p++ = cpu_to_be32(2);
+ *p++ = cpu_to_be32(bm0);
+ *p = cpu_to_be32(bm1);
hdr->nops++;
hdr->replen += decode_getattr_maxsz;
}
@@ -1012,8 +990,8 @@ static void encode_getfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(OP_GETFH);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_GETFH);
hdr->nops++;
hdr->replen += decode_getfh_maxsz;
}
@@ -1022,10 +1000,9 @@ static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct
{
__be32 *p;
- RESERVE_SPACE(8 + name->len);
- WRITE32(OP_LINK);
- WRITE32(name->len);
- WRITEMEM(name->name, name->len);
+ p = reserve_space(xdr, 8 + name->len);
+ *p++ = cpu_to_be32(OP_LINK);
+ xdr_encode_opaque(p, name->name, name->len);
hdr->nops++;
hdr->replen += decode_link_maxsz;
}
@@ -1052,27 +1029,27 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args
{
__be32 *p;
- RESERVE_SPACE(32);
- WRITE32(OP_LOCK);
- WRITE32(nfs4_lock_type(args->fl, args->block));
- WRITE32(args->reclaim);
- WRITE64(args->fl->fl_start);
- WRITE64(nfs4_lock_length(args->fl));
- WRITE32(args->new_lock_owner);
+ p = reserve_space(xdr, 32);
+ *p++ = cpu_to_be32(OP_LOCK);
+ *p++ = cpu_to_be32(nfs4_lock_type(args->fl, args->block));
+ *p++ = cpu_to_be32(args->reclaim);
+ p = xdr_encode_hyper(p, args->fl->fl_start);
+ p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
+ *p = cpu_to_be32(args->new_lock_owner);
if (args->new_lock_owner){
- RESERVE_SPACE(4+NFS4_STATEID_SIZE+32);
- WRITE32(args->open_seqid->sequence->counter);
- WRITEMEM(args->open_stateid->data, NFS4_STATEID_SIZE);
- WRITE32(args->lock_seqid->sequence->counter);
- WRITE64(args->lock_owner.clientid);
- WRITE32(16);
- WRITEMEM("lock id:", 8);
- WRITE64(args->lock_owner.id);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+32);
+ *p++ = cpu_to_be32(args->open_seqid->sequence->counter);
+ p = xdr_encode_opaque_fixed(p, args->open_stateid->data, NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(args->lock_seqid->sequence->counter);
+ p = xdr_encode_hyper(p, args->lock_owner.clientid);
+ *p++ = cpu_to_be32(16);
+ p = xdr_encode_opaque_fixed(p, "lock id:", 8);
+ xdr_encode_hyper(p, args->lock_owner.id);
}
else {
- RESERVE_SPACE(NFS4_STATEID_SIZE+4);
- WRITEMEM(args->lock_stateid->data, NFS4_STATEID_SIZE);
- WRITE32(args->lock_seqid->sequence->counter);
+ p = reserve_space(xdr, NFS4_STATEID_SIZE+4);
+ p = xdr_encode_opaque_fixed(p, args->lock_stateid->data, NFS4_STATEID_SIZE);
+ *p = cpu_to_be32(args->lock_seqid->sequence->counter);
}
hdr->nops++;
hdr->replen += decode_lock_maxsz;
@@ -1082,15 +1059,15 @@ static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *ar
{
__be32 *p;
- RESERVE_SPACE(52);
- WRITE32(OP_LOCKT);
- WRITE32(nfs4_lock_type(args->fl, 0));
- WRITE64(args->fl->fl_start);
- WRITE64(nfs4_lock_length(args->fl));
- WRITE64(args->lock_owner.clientid);
- WRITE32(16);
- WRITEMEM("lock id:", 8);
- WRITE64(args->lock_owner.id);
+ p = reserve_space(xdr, 52);
+ *p++ = cpu_to_be32(OP_LOCKT);
+ *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0));
+ p = xdr_encode_hyper(p, args->fl->fl_start);
+ p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
+ p = xdr_encode_hyper(p, args->lock_owner.clientid);
+ *p++ = cpu_to_be32(16);
+ p = xdr_encode_opaque_fixed(p, "lock id:", 8);
+ xdr_encode_hyper(p, args->lock_owner.id);
hdr->nops++;
hdr->replen += decode_lockt_maxsz;
}
@@ -1099,13 +1076,13 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar
{
__be32 *p;
- RESERVE_SPACE(12+NFS4_STATEID_SIZE+16);
- WRITE32(OP_LOCKU);
- WRITE32(nfs4_lock_type(args->fl, 0));
- WRITE32(args->seqid->sequence->counter);
- WRITEMEM(args->stateid->data, NFS4_STATEID_SIZE);
- WRITE64(args->fl->fl_start);
- WRITE64(nfs4_lock_length(args->fl));
+ p = reserve_space(xdr, 12+NFS4_STATEID_SIZE+16);
+ *p++ = cpu_to_be32(OP_LOCKU);
+ *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0));
+ *p++ = cpu_to_be32(args->seqid->sequence->counter);
+ p = xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE);
+ p = xdr_encode_hyper(p, args->fl->fl_start);
+ xdr_encode_hyper(p, nfs4_lock_length(args->fl));
hdr->nops++;
hdr->replen += decode_locku_maxsz;
}
@@ -1115,10 +1092,9 @@ static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struc
int len = name->len;
__be32 *p;
- RESERVE_SPACE(8 + len);
- WRITE32(OP_LOOKUP);
- WRITE32(len);
- WRITEMEM(name->name, len);
+ p = reserve_space(xdr, 8 + len);
+ *p++ = cpu_to_be32(OP_LOOKUP);
+ xdr_encode_opaque(p, name->name, len);
hdr->nops++;
hdr->replen += decode_lookup_maxsz;
}
@@ -1127,21 +1103,21 @@ static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode)
{
__be32 *p;
- RESERVE_SPACE(8);
+ p = reserve_space(xdr, 8);
switch (fmode & (FMODE_READ|FMODE_WRITE)) {
case FMODE_READ:
- WRITE32(NFS4_SHARE_ACCESS_READ);
+ *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_READ);
break;
case FMODE_WRITE:
- WRITE32(NFS4_SHARE_ACCESS_WRITE);
+ *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_WRITE);
break;
case FMODE_READ|FMODE_WRITE:
- WRITE32(NFS4_SHARE_ACCESS_BOTH);
+ *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_BOTH);
break;
default:
- WRITE32(0);
+ *p++ = cpu_to_be32(0);
}
- WRITE32(0); /* for linux, share_deny = 0 always */
+ *p = cpu_to_be32(0); /* for linux, share_deny = 0 always */
}
static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_openargs *arg)
@@ -1151,29 +1127,29 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
* opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
* owner 4 = 32
*/
- RESERVE_SPACE(8);
- WRITE32(OP_OPEN);
- WRITE32(arg->seqid->sequence->counter);
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(OP_OPEN);
+ *p = cpu_to_be32(arg->seqid->sequence->counter);
encode_share_access(xdr, arg->fmode);
- RESERVE_SPACE(28);
- WRITE64(arg->clientid);
- WRITE32(16);
- WRITEMEM("open id:", 8);
- WRITE64(arg->id);
+ p = reserve_space(xdr, 28);
+ p = xdr_encode_hyper(p, arg->clientid);
+ *p++ = cpu_to_be32(16);
+ p = xdr_encode_opaque_fixed(p, "open id:", 8);
+ xdr_encode_hyper(p, arg->id);
}
static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
{
__be32 *p;
- RESERVE_SPACE(4);
+ p = reserve_space(xdr, 4);
switch(arg->open_flags & O_EXCL) {
case 0:
- WRITE32(NFS4_CREATE_UNCHECKED);
+ *p = cpu_to_be32(NFS4_CREATE_UNCHECKED);
encode_attrs(xdr, arg->u.attrs, arg->server);
break;
default:
- WRITE32(NFS4_CREATE_EXCLUSIVE);
+ *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
encode_nfs4_verifier(xdr, &arg->u.verifier);
}
}
@@ -1182,14 +1158,14 @@ static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *a
{
__be32 *p;
- RESERVE_SPACE(4);
+ p = reserve_space(xdr, 4);
switch (arg->open_flags & O_CREAT) {
case 0:
- WRITE32(NFS4_OPEN_NOCREATE);
+ *p = cpu_to_be32(NFS4_OPEN_NOCREATE);
break;
default:
BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL);
- WRITE32(NFS4_OPEN_CREATE);
+ *p = cpu_to_be32(NFS4_OPEN_CREATE);
encode_createmode(xdr, arg);
}
}
@@ -1198,16 +1174,16 @@ static inline void encode_delegation_type(struct xdr_stream *xdr, fmode_t delega
{
__be32 *p;
- RESERVE_SPACE(4);
+ p = reserve_space(xdr, 4);
switch (delegation_type) {
case 0:
- WRITE32(NFS4_OPEN_DELEGATE_NONE);
+ *p = cpu_to_be32(NFS4_OPEN_DELEGATE_NONE);
break;
case FMODE_READ:
- WRITE32(NFS4_OPEN_DELEGATE_READ);
+ *p = cpu_to_be32(NFS4_OPEN_DELEGATE_READ);
break;
case FMODE_WRITE|FMODE_READ:
- WRITE32(NFS4_OPEN_DELEGATE_WRITE);
+ *p = cpu_to_be32(NFS4_OPEN_DELEGATE_WRITE);
break;
default:
BUG();
@@ -1218,8 +1194,8 @@ static inline void encode_claim_null(struct xdr_stream *xdr, const struct qstr *
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(NFS4_OPEN_CLAIM_NULL);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(NFS4_OPEN_CLAIM_NULL);
encode_string(xdr, name->len, name->name);
}
@@ -1227,8 +1203,8 @@ static inline void encode_claim_previous(struct xdr_stream *xdr, fmode_t type)
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(NFS4_OPEN_CLAIM_PREVIOUS);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(NFS4_OPEN_CLAIM_PREVIOUS);
encode_delegation_type(xdr, type);
}
@@ -1236,9 +1212,9 @@ static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struc
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE);
- WRITE32(NFS4_OPEN_CLAIM_DELEGATE_CUR);
- WRITEMEM(stateid->data, NFS4_STATEID_SIZE);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(NFS4_OPEN_CLAIM_DELEGATE_CUR);
+ xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE);
encode_string(xdr, name->len, name->name);
}
@@ -1267,10 +1243,10 @@ static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_co
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE+4);
- WRITE32(OP_OPEN_CONFIRM);
- WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
- WRITE32(arg->seqid->sequence->counter);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4);
+ *p++ = cpu_to_be32(OP_OPEN_CONFIRM);
+ p = xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE);
+ *p = cpu_to_be32(arg->seqid->sequence->counter);
hdr->nops++;
hdr->replen += decode_open_confirm_maxsz;
}
@@ -1279,10 +1255,10 @@ static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_close
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE+4);
- WRITE32(OP_OPEN_DOWNGRADE);
- WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
- WRITE32(arg->seqid->sequence->counter);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4);
+ *p++ = cpu_to_be32(OP_OPEN_DOWNGRADE);
+ p = xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE);
+ *p = cpu_to_be32(arg->seqid->sequence->counter);
encode_share_access(xdr, arg->fmode);
hdr->nops++;
hdr->replen += decode_open_downgrade_maxsz;
@@ -1294,10 +1270,9 @@ encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh, struct compound_hd
int len = fh->size;
__be32 *p;
- RESERVE_SPACE(8 + len);
- WRITE32(OP_PUTFH);
- WRITE32(len);
- WRITEMEM(fh->data, len);
+ p = reserve_space(xdr, 8 + len);
+ *p++ = cpu_to_be32(OP_PUTFH);
+ xdr_encode_opaque(p, fh->data, len);
hdr->nops++;
hdr->replen += decode_putfh_maxsz;
}
@@ -1306,8 +1281,8 @@ static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(OP_PUTROOTFH);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_PUTROOTFH);
hdr->nops++;
hdr->replen += decode_putrootfh_maxsz;
}
@@ -1317,26 +1292,26 @@ static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context
nfs4_stateid stateid;
__be32 *p;
- RESERVE_SPACE(NFS4_STATEID_SIZE);
+ p = reserve_space(xdr, NFS4_STATEID_SIZE);
if (ctx->state != NULL) {
nfs4_copy_stateid(&stateid, ctx->state, ctx->lockowner);
- WRITEMEM(stateid.data, NFS4_STATEID_SIZE);
+ xdr_encode_opaque_fixed(p, stateid.data, NFS4_STATEID_SIZE);
} else
- WRITEMEM(zero_stateid.data, NFS4_STATEID_SIZE);
+ xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE);
}
static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, struct compound_hdr *hdr)
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(OP_READ);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_READ);
encode_stateid(xdr, args->context);
- RESERVE_SPACE(12);
- WRITE64(args->offset);
- WRITE32(args->count);
+ p = reserve_space(xdr, 12);
+ p = xdr_encode_hyper(p, args->offset);
+ *p = cpu_to_be32(args->count);
hdr->nops++;
hdr->replen += decode_read_maxsz;
}
@@ -1349,20 +1324,20 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg
};
__be32 *p;
- RESERVE_SPACE(12+NFS4_VERIFIER_SIZE+20);
- WRITE32(OP_READDIR);
- WRITE64(readdir->cookie);
- WRITEMEM(readdir->verifier.data, NFS4_VERIFIER_SIZE);
- WRITE32(readdir->count >> 1); /* We're not doing readdirplus */
- WRITE32(readdir->count);
- WRITE32(2);
+ p = reserve_space(xdr, 12+NFS4_VERIFIER_SIZE+20);
+ *p++ = cpu_to_be32(OP_READDIR);
+ p = xdr_encode_hyper(p, readdir->cookie);
+ p = xdr_encode_opaque_fixed(p, readdir->verifier.data, NFS4_VERIFIER_SIZE);
+ *p++ = cpu_to_be32(readdir->count >> 1); /* We're not doing readdirplus */
+ *p++ = cpu_to_be32(readdir->count);
+ *p++ = cpu_to_be32(2);
/* Switch to mounted_on_fileid if the server supports it */
if (readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
attrs[0] &= ~FATTR4_WORD0_FILEID;
else
attrs[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
- WRITE32(attrs[0] & readdir->bitmask[0]);
- WRITE32(attrs[1] & readdir->bitmask[1]);
+ *p++ = cpu_to_be32(attrs[0] & readdir->bitmask[0]);
+ *p = cpu_to_be32(attrs[1] & readdir->bitmask[1]);
hdr->nops++;
hdr->replen += decode_readdir_maxsz;
dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n",
@@ -1378,8 +1353,8 @@ static void encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(OP_READLINK);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_READLINK);
hdr->nops++;
hdr->replen += decode_readlink_maxsz;
}
@@ -1388,10 +1363,9 @@ static void encode_remove(struct xdr_stream *xdr, const struct qstr *name, struc
{
__be32 *p;
- RESERVE_SPACE(8 + name->len);
- WRITE32(OP_REMOVE);
- WRITE32(name->len);
- WRITEMEM(name->name, name->len);
+ p = reserve_space(xdr, 8 + name->len);
+ *p++ = cpu_to_be32(OP_REMOVE);
+ xdr_encode_opaque(p, name->name, name->len);
hdr->nops++;
hdr->replen += decode_remove_maxsz;
}
@@ -1400,14 +1374,10 @@ static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, co
{
__be32 *p;
- RESERVE_SPACE(8 + oldname->len);
- WRITE32(OP_RENAME);
- WRITE32(oldname->len);
- WRITEMEM(oldname->name, oldname->len);
-
- RESERVE_SPACE(4 + newname->len);
- WRITE32(newname->len);
- WRITEMEM(newname->name, newname->len);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_RENAME);
+ encode_string(xdr, oldname->len, oldname->name);
+ encode_string(xdr, newname->len, newname->name);
hdr->nops++;
hdr->replen += decode_rename_maxsz;
}
@@ -1416,9 +1386,9 @@ static void encode_renew(struct xdr_stream *xdr, const struct nfs_client *client
{
__be32 *p;
- RESERVE_SPACE(12);
- WRITE32(OP_RENEW);
- WRITE64(client_stateid->cl_clientid);
+ p = reserve_space(xdr, 12);
+ *p++ = cpu_to_be32(OP_RENEW);
+ xdr_encode_hyper(p, client_stateid->cl_clientid);
hdr->nops++;
hdr->replen += decode_renew_maxsz;
}
@@ -1428,8 +1398,8 @@ encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(OP_RESTOREFH);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_RESTOREFH);
hdr->nops++;
hdr->replen += decode_restorefh_maxsz;
}
@@ -1439,16 +1409,16 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compoun
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE);
- WRITE32(OP_SETATTR);
- WRITEMEM(zero_stateid.data, NFS4_STATEID_SIZE);
- RESERVE_SPACE(2*4);
- WRITE32(1);
- WRITE32(FATTR4_WORD0_ACL);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(OP_SETATTR);
+ xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE);
+ p = reserve_space(xdr, 2*4);
+ *p++ = cpu_to_be32(1);
+ *p = cpu_to_be32(FATTR4_WORD0_ACL);
if (arg->acl_len % 4)
return -EINVAL;
- RESERVE_SPACE(4);
- WRITE32(arg->acl_len);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(arg->acl_len);
xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
hdr->nops++;
hdr->replen += decode_setacl_maxsz;
@@ -1460,8 +1430,8 @@ encode_savefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(OP_SAVEFH);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_SAVEFH);
hdr->nops++;
hdr->replen += decode_savefh_maxsz;
}
@@ -1470,9 +1440,9 @@ static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE);
- WRITE32(OP_SETATTR);
- WRITEMEM(arg->stateid.data, NFS4_STATEID_SIZE);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(OP_SETATTR);
+ xdr_encode_opaque_fixed(p, arg->stateid.data, NFS4_STATEID_SIZE);
hdr->nops++;
hdr->replen += decode_setattr_maxsz;
encode_attrs(xdr, arg->iap, server);
@@ -1482,17 +1452,17 @@ static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclie
{
__be32 *p;
- RESERVE_SPACE(4 + NFS4_VERIFIER_SIZE);
- WRITE32(OP_SETCLIENTID);
- WRITEMEM(setclientid->sc_verifier->data, NFS4_VERIFIER_SIZE);
+ p = reserve_space(xdr, 4 + NFS4_VERIFIER_SIZE);
+ *p++ = cpu_to_be32(OP_SETCLIENTID);
+ xdr_encode_opaque_fixed(p, setclientid->sc_verifier->data, NFS4_VERIFIER_SIZE);
encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name);
- RESERVE_SPACE(4);
- WRITE32(setclientid->sc_prog);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(setclientid->sc_prog);
encode_string(xdr, setclientid->sc_netid_len, setclientid->sc_netid);
encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr);
- RESERVE_SPACE(4);
- WRITE32(setclientid->sc_cb_ident);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(setclientid->sc_cb_ident);
hdr->nops++;
hdr->replen += decode_setclientid_maxsz;
}
@@ -1501,10 +1471,10 @@ static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_
{
__be32 *p;
- RESERVE_SPACE(12 + NFS4_VERIFIER_SIZE);
- WRITE32(OP_SETCLIENTID_CONFIRM);
- WRITE64(client_state->cl_clientid);
- WRITEMEM(client_state->cl_confirm.data, NFS4_VERIFIER_SIZE);
+ p = reserve_space(xdr, 12 + NFS4_VERIFIER_SIZE);
+ *p++ = cpu_to_be32(OP_SETCLIENTID_CONFIRM);
+ p = xdr_encode_hyper(p, client_state->cl_clientid);
+ xdr_encode_opaque_fixed(p, client_state->cl_confirm.data, NFS4_VERIFIER_SIZE);
hdr->nops++;
hdr->replen += decode_setclientid_confirm_maxsz;
}
@@ -1513,15 +1483,15 @@ static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *arg
{
__be32 *p;
- RESERVE_SPACE(4);
- WRITE32(OP_WRITE);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_WRITE);
encode_stateid(xdr, args->context);
- RESERVE_SPACE(16);
- WRITE64(args->offset);
- WRITE32(args->stable);
- WRITE32(args->count);
+ p = reserve_space(xdr, 16);
+ p = xdr_encode_hyper(p, args->offset);
+ *p++ = cpu_to_be32(args->stable);
+ *p = cpu_to_be32(args->count);
xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
hdr->nops++;
@@ -1532,10 +1502,10 @@ static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *state
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
- WRITE32(OP_DELEGRETURN);
- WRITEMEM(stateid->data, NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(OP_DELEGRETURN);
+ xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE);
hdr->nops++;
hdr->replen += decode_delegreturn_maxsz;
}
@@ -1548,16 +1518,16 @@ static void encode_exchange_id(struct xdr_stream *xdr,
{
__be32 *p;
- RESERVE_SPACE(4 + sizeof(args->verifier->data));
- WRITE32(OP_EXCHANGE_ID);
- WRITEMEM(args->verifier->data, sizeof(args->verifier->data));
+ p = reserve_space(xdr, 4 + sizeof(args->verifier->data));
+ *p++ = cpu_to_be32(OP_EXCHANGE_ID);
+ xdr_encode_opaque_fixed(p, args->verifier->data, sizeof(args->verifier->data));
encode_string(xdr, args->id_len, args->id);
- RESERVE_SPACE(12);
- WRITE32(args->flags);
- WRITE32(0); /* zero length state_protect4_a */
- WRITE32(0); /* zero length implementation id array */
+ p = reserve_space(xdr, 12);
+ *p++ = cpu_to_be32(args->flags);
+ *p++ = cpu_to_be32(0); /* zero length state_protect4_a */
+ *p = cpu_to_be32(0); /* zero length implementation id array */
hdr->nops++;
hdr->replen += decode_exchange_id_maxsz;
}
@@ -1571,55 +1541,43 @@ static void encode_create_session(struct xdr_stream *xdr,
uint32_t len;
struct nfs_client *clp = args->client;
- RESERVE_SPACE(4);
- WRITE32(OP_CREATE_SESSION);
-
- RESERVE_SPACE(8);
- WRITE64(clp->cl_ex_clid);
+ len = scnprintf(machine_name, sizeof(machine_name), "%s",
+ clp->cl_ipaddr);
- RESERVE_SPACE(8);
- WRITE32(clp->cl_seqid); /*Sequence id */
- WRITE32(args->flags); /*flags */
+ p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12);
+ *p++ = cpu_to_be32(OP_CREATE_SESSION);
+ p = xdr_encode_hyper(p, clp->cl_ex_clid);
+ *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */
+ *p++ = cpu_to_be32(args->flags); /*flags */
- RESERVE_SPACE(2*28); /* 2 channel_attrs */
/* Fore Channel */
- WRITE32(args->fc_attrs.headerpadsz); /* header padding size */
- WRITE32(args->fc_attrs.max_rqst_sz); /* max req size */
- WRITE32(args->fc_attrs.max_resp_sz); /* max resp size */
- WRITE32(args->fc_attrs.max_resp_sz_cached); /* Max resp sz cached */
- WRITE32(args->fc_attrs.max_ops); /* max operations */
- WRITE32(args->fc_attrs.max_reqs); /* max requests */
- WRITE32(0); /* rdmachannel_attrs */
+ *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */
+ *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */
+ *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */
+ *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz_cached); /* Max resp sz cached */
+ *p++ = cpu_to_be32(args->fc_attrs.max_ops); /* max operations */
+ *p++ = cpu_to_be32(args->fc_attrs.max_reqs); /* max requests */
+ *p++ = cpu_to_be32(0); /* rdmachannel_attrs */
/* Back Channel */
- WRITE32(args->fc_attrs.headerpadsz); /* header padding size */
- WRITE32(args->bc_attrs.max_rqst_sz); /* max req size */
- WRITE32(args->bc_attrs.max_resp_sz); /* max resp size */
- WRITE32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */
- WRITE32(args->bc_attrs.max_ops); /* max operations */
- WRITE32(args->bc_attrs.max_reqs); /* max requests */
- WRITE32(0); /* rdmachannel_attrs */
-
- RESERVE_SPACE(4);
- WRITE32(args->cb_program); /* cb_program */
-
- RESERVE_SPACE(4); /* # of security flavors */
- WRITE32(1);
-
- RESERVE_SPACE(4);
- WRITE32(RPC_AUTH_UNIX); /* auth_sys */
+ *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */
+ *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */
+ *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */
+ *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */
+ *p++ = cpu_to_be32(args->bc_attrs.max_ops); /* max operations */
+ *p++ = cpu_to_be32(args->bc_attrs.max_reqs); /* max requests */
+ *p++ = cpu_to_be32(0); /* rdmachannel_attrs */
+
+ *p++ = cpu_to_be32(args->cb_program); /* cb_program */
+ *p++ = cpu_to_be32(1);
+ *p++ = cpu_to_be32(RPC_AUTH_UNIX); /* auth_sys */
/* authsys_parms rfc1831 */
- RESERVE_SPACE(4);
- WRITE32((u32)clp->cl_boot_time.tv_nsec); /* stamp */
- len = scnprintf(machine_name, sizeof(machine_name), "%s",
- clp->cl_ipaddr);
- RESERVE_SPACE(16 + len);
- WRITE32(len);
- WRITEMEM(machine_name, len);
- WRITE32(0); /* UID */
- WRITE32(0); /* GID */
- WRITE32(0); /* No more gids */
+ *p++ = cpu_to_be32((u32)clp->cl_boot_time.tv_nsec); /* stamp */
+ p = xdr_encode_opaque(p, machine_name, len);
+ *p++ = cpu_to_be32(0); /* UID */
+ *p++ = cpu_to_be32(0); /* GID */
+ *p = cpu_to_be32(0); /* No more gids */
hdr->nops++;
hdr->replen += decode_create_session_maxsz;
}
@@ -1629,9 +1587,9 @@ static void encode_destroy_session(struct xdr_stream *xdr,
struct compound_hdr *hdr)
{
__be32 *p;
- RESERVE_SPACE(4 + NFS4_MAX_SESSIONID_LEN);
- WRITE32(OP_DESTROY_SESSION);
- WRITEMEM(session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+ p = reserve_space(xdr, 4 + NFS4_MAX_SESSIONID_LEN);
+ *p++ = cpu_to_be32(OP_DESTROY_SESSION);
+ xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
hdr->nops++;
hdr->replen += decode_destroy_session_maxsz;
}
@@ -1655,8 +1613,8 @@ static void encode_sequence(struct xdr_stream *xdr,
WARN_ON(args->sa_slotid == NFS4_MAX_SLOT_TABLE);
slot = tp->slots + args->sa_slotid;
- RESERVE_SPACE(4);
- WRITE32(OP_SEQUENCE);
+ p = reserve_space(xdr, 4 + NFS4_MAX_SESSIONID_LEN + 16);
+ *p++ = cpu_to_be32(OP_SEQUENCE);
/*
* Sessionid + seqid + slotid + max slotid + cache_this
@@ -1670,12 +1628,11 @@ static void encode_sequence(struct xdr_stream *xdr,
((u32 *)session->sess_id.data)[3],
slot->seq_nr, args->sa_slotid,
tp->highest_used_slotid, args->sa_cache_this);
- RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 16);
- WRITEMEM(session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
- WRITE32(slot->seq_nr);
- WRITE32(args->sa_slotid);
- WRITE32(tp->highest_used_slotid);
- WRITE32(args->sa_cache_this);
+ p = xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+ *p++ = cpu_to_be32(slot->seq_nr);
+ *p++ = cpu_to_be32(args->sa_slotid);
+ *p++ = cpu_to_be32(tp->highest_used_slotid);
+ *p = cpu_to_be32(args->sa_cache_this);
hdr->nops++;
hdr->replen += decode_sequence_maxsz;
#endif /* CONFIG_NFS_V4_1 */
@@ -2466,68 +2423,53 @@ static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p,
}
#endif /* CONFIG_NFS_V4_1 */
-/*
- * START OF "GENERIC" DECODE ROUTINES.
- * These may look a little ugly since they are imported from a "generic"
- * set of XDR encode/decode routines which are intended to be shared by
- * all of our NFSv4 implementations (OpenBSD, MacOS X...).
- *
- * If the pain of reading these is too great, it should be a straightforward
- * task to translate them into Linux-specific versions which are more
- * consistent with the style used in NFSv2/v3...
- */
-#define READ32(x) (x) = ntohl(*p++)
-#define READ64(x) do { \
- (x) = (u64)ntohl(*p++) << 32; \
- (x) |= ntohl(*p++); \
-} while (0)
-#define READTIME(x) do { \
- p++; \
- (x.tv_sec) = ntohl(*p++); \
- (x.tv_nsec) = ntohl(*p++); \
-} while (0)
-#define COPYMEM(x,nbytes) do { \
- memcpy((x), p, nbytes); \
- p += XDR_QUADLEN(nbytes); \
-} while (0)
-
-#define READ_BUF(nbytes) do { \
- p = xdr_inline_decode(xdr, nbytes); \
- if (unlikely(!p)) { \
- dprintk("nfs: %s: prematurely hit end of receive" \
- " buffer\n", __func__); \
- dprintk("nfs: %s: xdr->p=%p, bytes=%u, xdr->end=%p\n", \
- __func__, xdr->p, nbytes, xdr->end); \
- return -EIO; \
- } \
-} while (0)
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+ dprintk("nfs: %s: prematurely hit end of receive buffer. "
+ "Remaining buffer length is %tu words.\n",
+ func, xdr->end - xdr->p);
+}
static int decode_opaque_inline(struct xdr_stream *xdr, unsigned int *len, char **string)
{
__be32 *p;
- READ_BUF(4);
- READ32(*len);
- READ_BUF(*len);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, *len);
+ if (unlikely(!p))
+ goto out_overflow;
*string = (char *)p;
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
__be32 *p;
- READ_BUF(8);
- READ32(hdr->status);
- READ32(hdr->taglen);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ hdr->status = be32_to_cpup(p++);
+ hdr->taglen = be32_to_cpup(p);
- READ_BUF(hdr->taglen + 4);
+ p = xdr_inline_decode(xdr, hdr->taglen + 4);
+ if (unlikely(!p))
+ goto out_overflow;
hdr->tag = (char *)p;
p += XDR_QUADLEN(hdr->taglen);
- READ32(hdr->nops);
+ hdr->nops = be32_to_cpup(p);
if (unlikely(hdr->nops < 1))
return nfs4_stat_to_errno(hdr->status);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
@@ -2536,18 +2478,23 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
uint32_t opnum;
int32_t nfserr;
- READ_BUF(8);
- READ32(opnum);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ opnum = be32_to_cpup(p++);
if (opnum != expected) {
dprintk("nfs: Server returned operation"
" %d but we issued a request for %d\n",
opnum, expected);
return -EIO;
}
- READ32(nfserr);
+ nfserr = be32_to_cpup(p);
if (nfserr != NFS_OK)
return nfs4_stat_to_errno(nfserr);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
/* Dummy routine */
@@ -2557,8 +2504,11 @@ static int decode_ace(struct xdr_stream *xdr, void *ace, struct nfs_client *clp)
unsigned int strlen;
char *str;
- READ_BUF(12);
- return decode_opaque_inline(xdr, &strlen, &str);
+ p = xdr_inline_decode(xdr, 12);
+ if (likely(p))
+ return decode_opaque_inline(xdr, &strlen, &str);
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
@@ -2566,27 +2516,39 @@ static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
uint32_t bmlen;
__be32 *p;
- READ_BUF(4);
- READ32(bmlen);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ bmlen = be32_to_cpup(p);
bitmap[0] = bitmap[1] = 0;
- READ_BUF((bmlen << 2));
+ p = xdr_inline_decode(xdr, (bmlen << 2));
+ if (unlikely(!p))
+ goto out_overflow;
if (bmlen > 0) {
- READ32(bitmap[0]);
+ bitmap[0] = be32_to_cpup(p++);
if (bmlen > 1)
- READ32(bitmap[1]);
+ bitmap[1] = be32_to_cpup(p);
}
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static inline int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen, __be32 **savep)
{
__be32 *p;
- READ_BUF(4);
- READ32(*attrlen);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *attrlen = be32_to_cpup(p);
*savep = xdr->p;
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_supported(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *bitmask)
@@ -2609,8 +2571,10 @@ static int decode_attr_type(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *
if (unlikely(bitmap[0] & (FATTR4_WORD0_TYPE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_TYPE)) {
- READ_BUF(4);
- READ32(*type);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *type = be32_to_cpup(p);
if (*type < NF4REG || *type > NF4NAMEDATTR) {
dprintk("%s: bad type %d\n", __func__, *type);
return -EIO;
@@ -2620,6 +2584,9 @@ static int decode_attr_type(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *
}
dprintk("%s: type=0%o\n", __func__, nfs_type2fmt[*type]);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *change)
@@ -2631,14 +2598,19 @@ static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t
if (unlikely(bitmap[0] & (FATTR4_WORD0_CHANGE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_CHANGE)) {
- READ_BUF(8);
- READ64(*change);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, change);
bitmap[0] &= ~FATTR4_WORD0_CHANGE;
ret = NFS_ATTR_FATTR_CHANGE;
}
dprintk("%s: change attribute=%Lu\n", __func__,
(unsigned long long)*change);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *size)
@@ -2650,13 +2622,18 @@ static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *
if (unlikely(bitmap[0] & (FATTR4_WORD0_SIZE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_SIZE)) {
- READ_BUF(8);
- READ64(*size);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, size);
bitmap[0] &= ~FATTR4_WORD0_SIZE;
ret = NFS_ATTR_FATTR_SIZE;
}
dprintk("%s: file size=%Lu\n", __func__, (unsigned long long)*size);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -2667,12 +2644,17 @@ static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, ui
if (unlikely(bitmap[0] & (FATTR4_WORD0_LINK_SUPPORT - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_LINK_SUPPORT)) {
- READ_BUF(4);
- READ32(*res);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_LINK_SUPPORT;
}
dprintk("%s: link support=%s\n", __func__, *res == 0 ? "false" : "true");
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -2683,12 +2665,17 @@ static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap,
if (unlikely(bitmap[0] & (FATTR4_WORD0_SYMLINK_SUPPORT - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_SYMLINK_SUPPORT)) {
- READ_BUF(4);
- READ32(*res);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_SYMLINK_SUPPORT;
}
dprintk("%s: symlink support=%s\n", __func__, *res == 0 ? "false" : "true");
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fsid *fsid)
@@ -2701,9 +2688,11 @@ static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs
if (unlikely(bitmap[0] & (FATTR4_WORD0_FSID - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FSID)) {
- READ_BUF(16);
- READ64(fsid->major);
- READ64(fsid->minor);
+ p = xdr_inline_decode(xdr, 16);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &fsid->major);
+ xdr_decode_hyper(p, &fsid->minor);
bitmap[0] &= ~FATTR4_WORD0_FSID;
ret = NFS_ATTR_FATTR_FSID;
}
@@ -2711,6 +2700,9 @@ static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs
(unsigned long long)fsid->major,
(unsigned long long)fsid->minor);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -2721,12 +2713,17 @@ static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (unlikely(bitmap[0] & (FATTR4_WORD0_LEASE_TIME - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_LEASE_TIME)) {
- READ_BUF(4);
- READ32(*res);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_LEASE_TIME;
}
dprintk("%s: file size=%u\n", __func__, (unsigned int)*res);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -2737,12 +2734,17 @@ static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (unlikely(bitmap[0] & (FATTR4_WORD0_ACLSUPPORT - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_ACLSUPPORT)) {
- READ_BUF(4);
- READ32(*res);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *res = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_ACLSUPPORT;
}
dprintk("%s: ACLs supported=%u\n", __func__, (unsigned int)*res);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
@@ -2754,13 +2756,18 @@ static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEID - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILEID)) {
- READ_BUF(8);
- READ64(*fileid);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, fileid);
bitmap[0] &= ~FATTR4_WORD0_FILEID;
ret = NFS_ATTR_FATTR_FILEID;
}
dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
@@ -2772,13 +2779,18 @@ static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitma
if (unlikely(bitmap[1] & (FATTR4_WORD1_MOUNTED_ON_FILEID - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) {
- READ_BUF(8);
- READ64(*fileid);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, fileid);
bitmap[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
ret = NFS_ATTR_FATTR_FILEID;
}
dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -2790,12 +2802,17 @@ static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_AVAIL - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILES_AVAIL)) {
- READ_BUF(8);
- READ64(*res);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_FILES_AVAIL;
}
dprintk("%s: files avail=%Lu\n", __func__, (unsigned long long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -2807,12 +2824,17 @@ static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_FREE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILES_FREE)) {
- READ_BUF(8);
- READ64(*res);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_FILES_FREE;
}
dprintk("%s: files free=%Lu\n", __func__, (unsigned long long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -2824,12 +2846,17 @@ static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_TOTAL - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILES_TOTAL)) {
- READ_BUF(8);
- READ64(*res);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_FILES_TOTAL;
}
dprintk("%s: files total=%Lu\n", __func__, (unsigned long long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path)
@@ -2838,8 +2865,10 @@ static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path)
__be32 *p;
int status = 0;
- READ_BUF(4);
- READ32(n);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ n = be32_to_cpup(p);
if (n == 0)
goto root_path;
dprintk("path ");
@@ -2873,6 +2902,9 @@ out_eio:
dprintk(" status %d", status);
status = -EIO;
goto out;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs4_fs_locations *res)
@@ -2890,8 +2922,10 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
status = decode_pathname(xdr, &res->fs_path);
if (unlikely(status != 0))
goto out;
- READ_BUF(4);
- READ32(n);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ n = be32_to_cpup(p);
if (n <= 0)
goto out_eio;
res->nlocations = 0;
@@ -2899,8 +2933,10 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
u32 m;
struct nfs4_fs_location *loc = &res->locations[res->nlocations];
- READ_BUF(4);
- READ32(m);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ m = be32_to_cpup(p);
loc->nservers = 0;
dprintk("%s: servers ", __func__);
@@ -2939,6 +2975,8 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
out:
dprintk("%s: fs_locations done, error = %d\n", __func__, status);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
out_eio:
status = -EIO;
goto out;
@@ -2953,12 +2991,17 @@ static int decode_attr_maxfilesize(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXFILESIZE - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXFILESIZE)) {
- READ_BUF(8);
- READ64(*res);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
bitmap[0] &= ~FATTR4_WORD0_MAXFILESIZE;
}
dprintk("%s: maxfilesize=%Lu\n", __func__, (unsigned long long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxlink)
@@ -2970,12 +3013,17 @@ static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXLINK - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXLINK)) {
- READ_BUF(4);
- READ32(*maxlink);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *maxlink = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_MAXLINK;
}
dprintk("%s: maxlink=%u\n", __func__, *maxlink);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxname)
@@ -2987,12 +3035,17 @@ static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXNAME - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXNAME)) {
- READ_BUF(4);
- READ32(*maxname);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *maxname = be32_to_cpup(p);
bitmap[0] &= ~FATTR4_WORD0_MAXNAME;
}
dprintk("%s: maxname=%u\n", __func__, *maxname);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -3005,8 +3058,10 @@ static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXREAD)) {
uint64_t maxread;
- READ_BUF(8);
- READ64(maxread);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, &maxread);
if (maxread > 0x7FFFFFFF)
maxread = 0x7FFFFFFF;
*res = (uint32_t)maxread;
@@ -3014,6 +3069,9 @@ static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
}
dprintk("%s: maxread=%lu\n", __func__, (unsigned long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -3026,8 +3084,10 @@ static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_MAXWRITE)) {
uint64_t maxwrite;
- READ_BUF(8);
- READ64(maxwrite);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, &maxwrite);
if (maxwrite > 0x7FFFFFFF)
maxwrite = 0x7FFFFFFF;
*res = (uint32_t)maxwrite;
@@ -3035,6 +3095,9 @@ static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32
}
dprintk("%s: maxwrite=%lu\n", __func__, (unsigned long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *mode)
@@ -3047,14 +3110,19 @@ static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *m
if (unlikely(bitmap[1] & (FATTR4_WORD1_MODE - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_MODE)) {
- READ_BUF(4);
- READ32(tmp);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ tmp = be32_to_cpup(p);
*mode = tmp & ~S_IFMT;
bitmap[1] &= ~FATTR4_WORD1_MODE;
ret = NFS_ATTR_FATTR_MODE;
}
dprintk("%s: file mode=0%o\n", __func__, (unsigned int)*mode);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *nlink)
@@ -3066,16 +3134,22 @@ static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t
if (unlikely(bitmap[1] & (FATTR4_WORD1_NUMLINKS - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_NUMLINKS)) {
- READ_BUF(4);
- READ32(*nlink);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *nlink = be32_to_cpup(p);
bitmap[1] &= ~FATTR4_WORD1_NUMLINKS;
ret = NFS_ATTR_FATTR_NLINK;
}
dprintk("%s: nlink=%u\n", __func__, (unsigned int)*nlink);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
-static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, uint32_t *uid)
+static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
+ struct nfs_client *clp, uint32_t *uid, int may_sleep)
{
uint32_t len;
__be32 *p;
@@ -3085,10 +3159,16 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_OWNER)) {
- READ_BUF(4);
- READ32(len);
- READ_BUF(len);
- if (len < XDR_MAX_NETOBJ) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (!may_sleep) {
+ /* do nothing */
+ } else if (len < XDR_MAX_NETOBJ) {
if (nfs_map_name_to_uid(clp, (char *)p, len, uid) == 0)
ret = NFS_ATTR_FATTR_OWNER;
else
@@ -3101,9 +3181,13 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
}
dprintk("%s: uid=%d\n", __func__, (int)*uid);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
-static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, uint32_t *gid)
+static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
+ struct nfs_client *clp, uint32_t *gid, int may_sleep)
{
uint32_t len;
__be32 *p;
@@ -3113,10 +3197,16 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER_GROUP - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) {
- READ_BUF(4);
- READ32(len);
- READ_BUF(len);
- if (len < XDR_MAX_NETOBJ) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (!may_sleep) {
+ /* do nothing */
+ } else if (len < XDR_MAX_NETOBJ) {
if (nfs_map_group_to_gid(clp, (char *)p, len, gid) == 0)
ret = NFS_ATTR_FATTR_GROUP;
else
@@ -3129,6 +3219,9 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
}
dprintk("%s: gid=%d\n", __func__, (int)*gid);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rdev)
@@ -3143,9 +3236,11 @@ static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rde
if (likely(bitmap[1] & FATTR4_WORD1_RAWDEV)) {
dev_t tmp;
- READ_BUF(8);
- READ32(major);
- READ32(minor);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ major = be32_to_cpup(p++);
+ minor = be32_to_cpup(p);
tmp = MKDEV(major, minor);
if (MAJOR(tmp) == major && MINOR(tmp) == minor)
*rdev = tmp;
@@ -3154,6 +3249,9 @@ static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rde
}
dprintk("%s: rdev=(0x%x:0x%x)\n", __func__, major, minor);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -3165,12 +3263,17 @@ static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_AVAIL - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_AVAIL)) {
- READ_BUF(8);
- READ64(*res);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_AVAIL;
}
dprintk("%s: space avail=%Lu\n", __func__, (unsigned long long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -3182,12 +3285,17 @@ static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_FREE - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_FREE)) {
- READ_BUF(8);
- READ64(*res);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_FREE;
}
dprintk("%s: space free=%Lu\n", __func__, (unsigned long long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -3199,12 +3307,17 @@ static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uin
if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_TOTAL - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_TOTAL)) {
- READ_BUF(8);
- READ64(*res);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_TOTAL;
}
dprintk("%s: space total=%Lu\n", __func__, (unsigned long long)*res);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *used)
@@ -3216,14 +3329,19 @@ static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint
if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_USED - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_SPACE_USED)) {
- READ_BUF(8);
- READ64(*used);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, used);
bitmap[1] &= ~FATTR4_WORD1_SPACE_USED;
ret = NFS_ATTR_FATTR_SPACE_USED;
}
dprintk("%s: space used=%Lu\n", __func__,
(unsigned long long)*used);
return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_time(struct xdr_stream *xdr, struct timespec *time)
@@ -3232,12 +3350,17 @@ static int decode_attr_time(struct xdr_stream *xdr, struct timespec *time)
uint64_t sec;
uint32_t nsec;
- READ_BUF(12);
- READ64(sec);
- READ32(nsec);
+ p = xdr_inline_decode(xdr, 12);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &sec);
+ nsec = be32_to_cpup(p);
time->tv_sec = (time_t)sec;
time->tv_nsec = (long)nsec;
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
@@ -3315,11 +3438,16 @@ static int decode_change_info(struct xdr_stream *xdr, struct nfs4_change_info *c
{
__be32 *p;
- READ_BUF(20);
- READ32(cinfo->atomic);
- READ64(cinfo->before);
- READ64(cinfo->after);
+ p = xdr_inline_decode(xdr, 20);
+ if (unlikely(!p))
+ goto out_overflow;
+ cinfo->atomic = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &cinfo->before);
+ xdr_decode_hyper(p, &cinfo->after);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_access(struct xdr_stream *xdr, struct nfs4_accessres *access)
@@ -3331,40 +3459,62 @@ static int decode_access(struct xdr_stream *xdr, struct nfs4_accessres *access)
status = decode_op_hdr(xdr, OP_ACCESS);
if (status)
return status;
- READ_BUF(8);
- READ32(supp);
- READ32(acc);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ supp = be32_to_cpup(p++);
+ acc = be32_to_cpup(p);
access->supported = supp;
access->access = acc;
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
-static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
+static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len)
{
__be32 *p;
+
+ p = xdr_inline_decode(xdr, len);
+ if (likely(p)) {
+ memcpy(buf, p, len);
+ return 0;
+ }
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+ return decode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
+}
+
+static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
+{
int status;
status = decode_op_hdr(xdr, OP_CLOSE);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
- if (status)
- return status;
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
- return 0;
+ if (!status)
+ status = decode_stateid(xdr, &res->stateid);
+ return status;
+}
+
+static int decode_verifier(struct xdr_stream *xdr, void *verifier)
+{
+ return decode_opaque_fixed(xdr, verifier, 8);
}
static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res)
{
- __be32 *p;
int status;
status = decode_op_hdr(xdr, OP_COMMIT);
- if (status)
- return status;
- READ_BUF(8);
- COPYMEM(res->verf->verifier, 8);
- return 0;
+ if (!status)
+ status = decode_verifier(xdr, res->verf->verifier);
+ return status;
}
static int decode_create(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
@@ -3378,10 +3528,16 @@ static int decode_create(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
return status;
if ((status = decode_change_info(xdr, cinfo)))
return status;
- READ_BUF(4);
- READ32(bmlen);
- READ_BUF(bmlen << 2);
- return 0;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ bmlen = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, bmlen << 2);
+ if (likely(p))
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_res *res)
@@ -3466,7 +3622,8 @@ xdr_error:
return status;
}
-static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr, const struct nfs_server *server)
+static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr,
+ const struct nfs_server *server, int may_sleep)
{
__be32 *savep;
uint32_t attrlen,
@@ -3538,12 +3695,14 @@ static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr, cons
goto xdr_error;
fattr->valid |= status;
- status = decode_attr_owner(xdr, bitmap, server->nfs_client, &fattr->uid);
+ status = decode_attr_owner(xdr, bitmap, server->nfs_client,
+ &fattr->uid, may_sleep);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
- status = decode_attr_group(xdr, bitmap, server->nfs_client, &fattr->gid);
+ status = decode_attr_group(xdr, bitmap, server->nfs_client,
+ &fattr->gid, may_sleep);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
@@ -3633,14 +3792,21 @@ static int decode_getfh(struct xdr_stream *xdr, struct nfs_fh *fh)
if (status)
return status;
- READ_BUF(4);
- READ32(len);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
if (len > NFS4_FHSIZE)
return -EIO;
fh->size = len;
- READ_BUF(len);
- COPYMEM(fh->data, len);
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
+ memcpy(fh->data, p, len);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
@@ -3662,10 +3828,12 @@ static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
__be32 *p;
uint32_t namelen, type;
- READ_BUF(32);
- READ64(offset);
- READ64(length);
- READ32(type);
+ p = xdr_inline_decode(xdr, 32);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &offset);
+ p = xdr_decode_hyper(p, &length);
+ type = be32_to_cpup(p++);
if (fl != NULL) {
fl->fl_start = (loff_t)offset;
fl->fl_end = fl->fl_start + (loff_t)length - 1;
@@ -3676,23 +3844,27 @@ static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
fl->fl_type = F_RDLCK;
fl->fl_pid = 0;
}
- READ64(clientid);
- READ32(namelen);
- READ_BUF(namelen);
- return -NFS4ERR_DENIED;
+ p = xdr_decode_hyper(p, &clientid);
+ namelen = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, namelen);
+ if (likely(p))
+ return -NFS4ERR_DENIED;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res)
{
- __be32 *p;
int status;
status = decode_op_hdr(xdr, OP_LOCK);
if (status == -EIO)
goto out;
if (status == 0) {
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
+ status = decode_stateid(xdr, &res->stateid);
+ if (unlikely(status))
+ goto out;
} else if (status == -NFS4ERR_DENIED)
status = decode_lock_denied(xdr, NULL);
if (res->open_seqid != NULL)
@@ -3713,16 +3885,13 @@ static int decode_lockt(struct xdr_stream *xdr, struct nfs_lockt_res *res)
static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
{
- __be32 *p;
int status;
status = decode_op_hdr(xdr, OP_LOCKU);
if (status != -EIO)
nfs_increment_lock_seqid(status, res->seqid);
- if (status == 0) {
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
- }
+ if (status == 0)
+ status = decode_stateid(xdr, &res->stateid);
return status;
}
@@ -3737,34 +3906,46 @@ static int decode_space_limit(struct xdr_stream *xdr, u64 *maxsize)
__be32 *p;
uint32_t limit_type, nblocks, blocksize;
- READ_BUF(12);
- READ32(limit_type);
+ p = xdr_inline_decode(xdr, 12);
+ if (unlikely(!p))
+ goto out_overflow;
+ limit_type = be32_to_cpup(p++);
switch (limit_type) {
case 1:
- READ64(*maxsize);
+ xdr_decode_hyper(p, maxsize);
break;
case 2:
- READ32(nblocks);
- READ32(blocksize);
+ nblocks = be32_to_cpup(p++);
+ blocksize = be32_to_cpup(p);
*maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
}
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
{
__be32 *p;
uint32_t delegation_type;
+ int status;
- READ_BUF(4);
- READ32(delegation_type);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ delegation_type = be32_to_cpup(p);
if (delegation_type == NFS4_OPEN_DELEGATE_NONE) {
res->delegation_type = 0;
return 0;
}
- READ_BUF(NFS4_STATEID_SIZE+4);
- COPYMEM(res->delegation.data, NFS4_STATEID_SIZE);
- READ32(res->do_recall);
+ status = decode_stateid(xdr, &res->delegation);
+ if (unlikely(status))
+ return status;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->do_recall = be32_to_cpup(p);
switch (delegation_type) {
case NFS4_OPEN_DELEGATE_READ:
@@ -3776,6 +3957,9 @@ static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
return -EIO;
}
return decode_ace(xdr, NULL, res->server->nfs_client);
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
@@ -3787,23 +3971,27 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
status = decode_op_hdr(xdr, OP_OPEN);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
- if (status)
+ if (!status)
+ status = decode_stateid(xdr, &res->stateid);
+ if (unlikely(status))
return status;
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
decode_change_info(xdr, &res->cinfo);
- READ_BUF(8);
- READ32(res->rflags);
- READ32(bmlen);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->rflags = be32_to_cpup(p++);
+ bmlen = be32_to_cpup(p);
if (bmlen > 10)
goto xdr_error;
- READ_BUF(bmlen << 2);
+ p = xdr_inline_decode(xdr, bmlen << 2);
+ if (unlikely(!p))
+ goto out_overflow;
savewords = min_t(uint32_t, bmlen, NFS4_BITMAP_SIZE);
for (i = 0; i < savewords; ++i)
- READ32(res->attrset[i]);
+ res->attrset[i] = be32_to_cpup(p++);
for (; i < NFS4_BITMAP_SIZE; i++)
res->attrset[i] = 0;
@@ -3811,36 +3999,33 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
xdr_error:
dprintk("%s: Bitmap too large! Length = %u\n", __func__, bmlen);
return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmres *res)
{
- __be32 *p;
int status;
status = decode_op_hdr(xdr, OP_OPEN_CONFIRM);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
- if (status)
- return status;
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
- return 0;
+ if (!status)
+ status = decode_stateid(xdr, &res->stateid);
+ return status;
}
static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *res)
{
- __be32 *p;
int status;
status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
- if (status)
- return status;
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
- return 0;
+ if (!status)
+ status = decode_stateid(xdr, &res->stateid);
+ return status;
}
static int decode_putfh(struct xdr_stream *xdr)
@@ -3863,9 +4048,11 @@ static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_
status = decode_op_hdr(xdr, OP_READ);
if (status)
return status;
- READ_BUF(8);
- READ32(eof);
- READ32(count);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ eof = be32_to_cpup(p++);
+ count = be32_to_cpup(p);
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
recvd = req->rq_rcv_buf.len - hdrlen;
if (count > recvd) {
@@ -3878,6 +4065,9 @@ static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_
res->eof = eof;
res->count = count;
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir_res *readdir)
@@ -3892,17 +4082,17 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
int status;
status = decode_op_hdr(xdr, OP_READDIR);
- if (status)
+ if (!status)
+ status = decode_verifier(xdr, readdir->verifier.data);
+ if (unlikely(status))
return status;
- READ_BUF(8);
- COPYMEM(readdir->verifier.data, 8);
dprintk("%s: verifier = %08x:%08x\n",
__func__,
((u32 *)readdir->verifier.data)[0],
((u32 *)readdir->verifier.data)[1]);
- hdrlen = (char *) p - (char *) iov->iov_base;
+ hdrlen = (char *) xdr->p - (char *) iov->iov_base;
recvd = rcvbuf->len - hdrlen;
if (pglen > recvd)
pglen = recvd;
@@ -3990,8 +4180,10 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
return status;
/* Convert length of symlink */
- READ_BUF(4);
- READ32(len);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
if (len >= rcvbuf->page_len || len <= 0) {
dprintk("nfs: server returned giant symlink!\n");
return -ENAMETOOLONG;
@@ -4015,6 +4207,9 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
kaddr[len+rcvbuf->page_base] = '\0';
kunmap_atomic(kaddr, KM_USER0);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_remove(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
@@ -4112,10 +4307,16 @@ static int decode_setattr(struct xdr_stream *xdr)
status = decode_op_hdr(xdr, OP_SETATTR);
if (status)
return status;
- READ_BUF(4);
- READ32(bmlen);
- READ_BUF(bmlen << 2);
- return 0;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ bmlen = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, bmlen << 2);
+ if (likely(p))
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
@@ -4124,35 +4325,50 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
uint32_t opnum;
int32_t nfserr;
- READ_BUF(8);
- READ32(opnum);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ opnum = be32_to_cpup(p++);
if (opnum != OP_SETCLIENTID) {
dprintk("nfs: decode_setclientid: Server returned operation"
" %d\n", opnum);
return -EIO;
}
- READ32(nfserr);
+ nfserr = be32_to_cpup(p);
if (nfserr == NFS_OK) {
- READ_BUF(8 + NFS4_VERIFIER_SIZE);
- READ64(clp->cl_clientid);
- COPYMEM(clp->cl_confirm.data, NFS4_VERIFIER_SIZE);
+ p = xdr_inline_decode(xdr, 8 + NFS4_VERIFIER_SIZE);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &clp->cl_clientid);
+ memcpy(clp->cl_confirm.data, p, NFS4_VERIFIER_SIZE);
} else if (nfserr == NFSERR_CLID_INUSE) {
uint32_t len;
/* skip netid string */
- READ_BUF(4);
- READ32(len);
- READ_BUF(len);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
/* skip uaddr string */
- READ_BUF(4);
- READ32(len);
- READ_BUF(len);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
return -NFSERR_CLID_INUSE;
} else
return nfs4_stat_to_errno(nfserr);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_setclientid_confirm(struct xdr_stream *xdr)
@@ -4169,11 +4385,16 @@ static int decode_write(struct xdr_stream *xdr, struct nfs_writeres *res)
if (status)
return status;
- READ_BUF(16);
- READ32(res->count);
- READ32(res->verf->committed);
- COPYMEM(res->verf->verifier, 8);
+ p = xdr_inline_decode(xdr, 16);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->count = be32_to_cpup(p++);
+ res->verf->committed = be32_to_cpup(p++);
+ memcpy(res->verf->verifier, p, 8);
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_delegreturn(struct xdr_stream *xdr)
@@ -4187,6 +4408,7 @@ static int decode_exchange_id(struct xdr_stream *xdr,
{
__be32 *p;
uint32_t dummy;
+ char *dummy_str;
int status;
struct nfs_client *clp = res->client;
@@ -4194,36 +4416,45 @@ static int decode_exchange_id(struct xdr_stream *xdr,
if (status)
return status;
- READ_BUF(8);
- READ64(clp->cl_ex_clid);
- READ_BUF(12);
- READ32(clp->cl_seqid);
- READ32(clp->cl_exchange_flags);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, &clp->cl_ex_clid);
+ p = xdr_inline_decode(xdr, 12);
+ if (unlikely(!p))
+ goto out_overflow;
+ clp->cl_seqid = be32_to_cpup(p++);
+ clp->cl_exchange_flags = be32_to_cpup(p++);
/* We ask for SP4_NONE */
- READ32(dummy);
+ dummy = be32_to_cpup(p);
if (dummy != SP4_NONE)
return -EIO;
/* Throw away minor_id */
- READ_BUF(8);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
/* Throw away Major id */
- READ_BUF(4);
- READ32(dummy);
- READ_BUF(dummy);
+ status = decode_opaque_inline(xdr, &dummy, &dummy_str);
+ if (unlikely(status))
+ return status;
/* Throw away server_scope */
- READ_BUF(4);
- READ32(dummy);
- READ_BUF(dummy);
+ status = decode_opaque_inline(xdr, &dummy, &dummy_str);
+ if (unlikely(status))
+ return status;
/* Throw away Implementation id array */
- READ_BUF(4);
- READ32(dummy);
- READ_BUF(dummy);
+ status = decode_opaque_inline(xdr, &dummy, &dummy_str);
+ if (unlikely(status))
+ return status;
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_chan_attrs(struct xdr_stream *xdr,
@@ -4232,22 +4463,35 @@ static int decode_chan_attrs(struct xdr_stream *xdr,
__be32 *p;
u32 nr_attrs;
- READ_BUF(28);
- READ32(attrs->headerpadsz);
- READ32(attrs->max_rqst_sz);
- READ32(attrs->max_resp_sz);
- READ32(attrs->max_resp_sz_cached);
- READ32(attrs->max_ops);
- READ32(attrs->max_reqs);
- READ32(nr_attrs);
+ p = xdr_inline_decode(xdr, 28);
+ if (unlikely(!p))
+ goto out_overflow;
+ attrs->headerpadsz = be32_to_cpup(p++);
+ attrs->max_rqst_sz = be32_to_cpup(p++);
+ attrs->max_resp_sz = be32_to_cpup(p++);
+ attrs->max_resp_sz_cached = be32_to_cpup(p++);
+ attrs->max_ops = be32_to_cpup(p++);
+ attrs->max_reqs = be32_to_cpup(p++);
+ nr_attrs = be32_to_cpup(p);
if (unlikely(nr_attrs > 1)) {
printk(KERN_WARNING "%s: Invalid rdma channel attrs count %u\n",
__func__, nr_attrs);
return -EINVAL;
}
- if (nr_attrs == 1)
- READ_BUF(4); /* skip rdma_attrs */
+ if (nr_attrs == 1) {
+ p = xdr_inline_decode(xdr, 4); /* skip rdma_attrs */
+ if (unlikely(!p))
+ goto out_overflow;
+ }
return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_sessionid(struct xdr_stream *xdr, struct nfs4_sessionid *sid)
+{
+ return decode_opaque_fixed(xdr, sid->data, NFS4_MAX_SESSIONID_LEN);
}
static int decode_create_session(struct xdr_stream *xdr,
@@ -4259,24 +4503,26 @@ static int decode_create_session(struct xdr_stream *xdr,
struct nfs4_session *session = clp->cl_session;
status = decode_op_hdr(xdr, OP_CREATE_SESSION);
-
- if (status)
+ if (!status)
+ status = decode_sessionid(xdr, &session->sess_id);
+ if (unlikely(status))
return status;
- /* sessionid */
- READ_BUF(NFS4_MAX_SESSIONID_LEN);
- COPYMEM(&session->sess_id, NFS4_MAX_SESSIONID_LEN);
-
/* seqid, flags */
- READ_BUF(8);
- READ32(clp->cl_seqid);
- READ32(session->flags);
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ clp->cl_seqid = be32_to_cpup(p++);
+ session->flags = be32_to_cpup(p);
/* Channel attributes */
status = decode_chan_attrs(xdr, &session->fc_attrs);
if (!status)
status = decode_chan_attrs(xdr, &session->bc_attrs);
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
}
static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
@@ -4300,7 +4546,9 @@ static int decode_sequence(struct xdr_stream *xdr,
return 0;
status = decode_op_hdr(xdr, OP_SEQUENCE);
- if (status)
+ if (!status)
+ status = decode_sessionid(xdr, &id);
+ if (unlikely(status))
goto out_err;
/*
@@ -4309,36 +4557,43 @@ static int decode_sequence(struct xdr_stream *xdr,
*/
status = -ESERVERFAULT;
- slot = &res->sr_session->fc_slot_table.slots[res->sr_slotid];
- READ_BUF(NFS4_MAX_SESSIONID_LEN + 20);
- COPYMEM(id.data, NFS4_MAX_SESSIONID_LEN);
if (memcmp(id.data, res->sr_session->sess_id.data,
NFS4_MAX_SESSIONID_LEN)) {
dprintk("%s Invalid session id\n", __func__);
goto out_err;
}
+
+ p = xdr_inline_decode(xdr, 20);
+ if (unlikely(!p))
+ goto out_overflow;
+
/* seqid */
- READ32(dummy);
+ slot = &res->sr_session->fc_slot_table.slots[res->sr_slotid];
+ dummy = be32_to_cpup(p++);
if (dummy != slot->seq_nr) {
dprintk("%s Invalid sequence number\n", __func__);
goto out_err;
}
/* slot id */
- READ32(dummy);
+ dummy = be32_to_cpup(p++);
if (dummy != res->sr_slotid) {
dprintk("%s Invalid slot id\n", __func__);
goto out_err;
}
/* highest slot id - currently not processed */
- READ32(dummy);
+ dummy = be32_to_cpup(p++);
/* target highest slot id - currently not processed */
- READ32(dummy);
+ dummy = be32_to_cpup(p++);
/* result flags - currently not processed */
- READ32(dummy);
+ dummy = be32_to_cpup(p);
status = 0;
out_err:
res->sr_status = status;
return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ status = -EIO;
+ goto out_err;
#else /* CONFIG_NFS_V4_1 */
return 0;
#endif /* CONFIG_NFS_V4_1 */
@@ -4370,7 +4625,8 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, __be32 *p, struct
status = decode_open_downgrade(&xdr, res);
if (status != 0)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4397,7 +4653,8 @@ static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_ac
status = decode_access(&xdr, res);
if (status != 0)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4424,7 +4681,8 @@ static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lo
goto out;
if ((status = decode_getfh(&xdr, res->fh)) != 0)
goto out;
- status = decode_getfattr(&xdr, res->fattr, res->server);
+ status = decode_getfattr(&xdr, res->fattr, res->server
+ ,!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4448,7 +4706,8 @@ static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, __be32 *p, struct nf
if ((status = decode_putrootfh(&xdr)) != 0)
goto out;
if ((status = decode_getfh(&xdr, res->fh)) == 0)
- status = decode_getfattr(&xdr, res->fattr, res->server);
+ status = decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4473,7 +4732,8 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_rem
goto out;
if ((status = decode_remove(&xdr, &res->cinfo)) != 0)
goto out;
- decode_getfattr(&xdr, &res->dir_attr, res->server);
+ decode_getfattr(&xdr, &res->dir_attr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4503,11 +4763,13 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_re
if ((status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo)) != 0)
goto out;
/* Current FH is target directory */
- if (decode_getfattr(&xdr, res->new_fattr, res->server) != 0)
+ if (decode_getfattr(&xdr, res->new_fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
if ((status = decode_restorefh(&xdr)) != 0)
goto out;
- decode_getfattr(&xdr, res->old_fattr, res->server);
+ decode_getfattr(&xdr, res->old_fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4540,11 +4802,13 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_link
* Note order: OP_LINK leaves the directory as the current
* filehandle.
*/
- if (decode_getfattr(&xdr, res->dir_attr, res->server) != 0)
+ if (decode_getfattr(&xdr, res->dir_attr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
if ((status = decode_restorefh(&xdr)) != 0)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4573,11 +4837,13 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_cr
goto out;
if ((status = decode_getfh(&xdr, res->fh)) != 0)
goto out;
- if (decode_getfattr(&xdr, res->fattr, res->server) != 0)
+ if (decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
if ((status = decode_restorefh(&xdr)) != 0)
goto out;
- decode_getfattr(&xdr, res->dir_fattr, res->server);
+ decode_getfattr(&xdr, res->dir_fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4609,7 +4875,8 @@ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_g
status = decode_putfh(&xdr);
if (status)
goto out;
- status = decode_getfattr(&xdr, res->fattr, res->server);
+ status = decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4716,7 +4983,8 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_clos
* an ESTALE error. Shouldn't be a problem,
* though, since fattr->valid will remain unset.
*/
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4748,11 +5016,13 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openr
goto out;
if (decode_getfh(&xdr, &res->fh) != 0)
goto out;
- if (decode_getfattr(&xdr, res->f_attr, res->server) != 0)
+ if (decode_getfattr(&xdr, res->f_attr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
goto out;
if (decode_restorefh(&xdr) != 0)
goto out;
- decode_getfattr(&xdr, res->dir_attr, res->server);
+ decode_getfattr(&xdr, res->dir_attr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4800,7 +5070,8 @@ static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, __be32 *p, struct nf
status = decode_open(&xdr, res);
if (status)
goto out;
- decode_getfattr(&xdr, res->f_attr, res->server);
+ decode_getfattr(&xdr, res->f_attr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -4827,7 +5098,8 @@ static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_se
status = decode_setattr(&xdr);
if (status)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -5001,7 +5273,8 @@ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writ
status = decode_write(&xdr, res);
if (status)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
if (!status)
status = res->count;
out:
@@ -5030,7 +5303,8 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, __be32 *p, struct nfs_wri
status = decode_commit(&xdr, res);
if (status)
goto out;
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -5194,7 +5468,8 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nf
if (status != 0)
goto out;
status = decode_delegreturn(&xdr);
- decode_getfattr(&xdr, res->fattr, res->server);
+ decode_getfattr(&xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
}
@@ -5222,7 +5497,8 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p,
goto out;
xdr_enter_page(&xdr, PAGE_SIZE);
status = decode_getfattr(&xdr, &res->fs_locations->fattr,
- res->fs_locations->server);
+ res->fs_locations->server,
+ !RPC_IS_ASYNC(req->rq_task));
out:
return status;
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 0b4cbdc60abd..867f70504531 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -73,7 +73,7 @@ enum {
Opt_cto, Opt_nocto,
Opt_ac, Opt_noac,
Opt_lock, Opt_nolock,
- Opt_v2, Opt_v3,
+ Opt_v2, Opt_v3, Opt_v4,
Opt_udp, Opt_tcp, Opt_rdma,
Opt_acl, Opt_noacl,
Opt_rdirplus, Opt_nordirplus,
@@ -127,6 +127,7 @@ static const match_table_t nfs_mount_option_tokens = {
{ Opt_nolock, "nolock" },
{ Opt_v2, "v2" },
{ Opt_v3, "v3" },
+ { Opt_v4, "v4" },
{ Opt_udp, "udp" },
{ Opt_tcp, "tcp" },
{ Opt_rdma, "rdma" },
@@ -158,7 +159,7 @@ static const match_table_t nfs_mount_option_tokens = {
{ Opt_mountvers, "mountvers=%s" },
{ Opt_nfsvers, "nfsvers=%s" },
{ Opt_nfsvers, "vers=%s" },
- { Opt_minorversion, "minorversion=%u" },
+ { Opt_minorversion, "minorversion=%s" },
{ Opt_sec, "sec=%s" },
{ Opt_proto, "proto=%s" },
@@ -272,6 +273,10 @@ static const struct super_operations nfs_sops = {
};
#ifdef CONFIG_NFS_V4
+static int nfs4_validate_text_mount_data(void *options,
+ struct nfs_parsed_mount_data *args, const char *dev_name);
+static int nfs4_try_mount(int flags, const char *dev_name,
+ struct nfs_parsed_mount_data *data, struct vfsmount *mnt);
static int nfs4_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
static int nfs4_remote_get_sb(struct file_system_type *fs_type,
@@ -742,127 +747,23 @@ static int nfs_verify_server_address(struct sockaddr *addr)
}
}
+ dfprintk(MOUNT, "NFS: Invalid IP address specified\n");
return 0;
}
-static void nfs_parse_ipv4_address(char *string, size_t str_len,
- struct sockaddr *sap, size_t *addr_len)
-{
- struct sockaddr_in *sin = (struct sockaddr_in *)sap;
- u8 *addr = (u8 *)&sin->sin_addr.s_addr;
-
- if (str_len <= INET_ADDRSTRLEN) {
- dfprintk(MOUNT, "NFS: parsing IPv4 address %*s\n",
- (int)str_len, string);
-
- sin->sin_family = AF_INET;
- *addr_len = sizeof(*sin);
- if (in4_pton(string, str_len, addr, '\0', NULL))
- return;
- }
-
- sap->sa_family = AF_UNSPEC;
- *addr_len = 0;
-}
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static int nfs_parse_ipv6_scope_id(const char *string, const size_t str_len,
- const char *delim,
- struct sockaddr_in6 *sin6)
-{
- char *p;
- size_t len;
-
- if ((string + str_len) == delim)
- return 1;
-
- if (*delim != IPV6_SCOPE_DELIMITER)
- return 0;
-
- if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
- return 0;
-
- len = (string + str_len) - delim - 1;
- p = kstrndup(delim + 1, len, GFP_KERNEL);
- if (p) {
- unsigned long scope_id = 0;
- struct net_device *dev;
-
- dev = dev_get_by_name(&init_net, p);
- if (dev != NULL) {
- scope_id = dev->ifindex;
- dev_put(dev);
- } else {
- if (strict_strtoul(p, 10, &scope_id) == 0) {
- kfree(p);
- return 0;
- }
- }
-
- kfree(p);
-
- sin6->sin6_scope_id = scope_id;
- dfprintk(MOUNT, "NFS: IPv6 scope ID = %lu\n", scope_id);
- return 1;
- }
-
- return 0;
-}
-
-static void nfs_parse_ipv6_address(char *string, size_t str_len,
- struct sockaddr *sap, size_t *addr_len)
-{
- struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
- u8 *addr = (u8 *)&sin6->sin6_addr.in6_u;
- const char *delim;
-
- if (str_len <= INET6_ADDRSTRLEN) {
- dfprintk(MOUNT, "NFS: parsing IPv6 address %*s\n",
- (int)str_len, string);
-
- sin6->sin6_family = AF_INET6;
- *addr_len = sizeof(*sin6);
- if (in6_pton(string, str_len, addr,
- IPV6_SCOPE_DELIMITER, &delim) != 0) {
- if (nfs_parse_ipv6_scope_id(string, str_len,
- delim, sin6) != 0)
- return;
- }
- }
-
- sap->sa_family = AF_UNSPEC;
- *addr_len = 0;
-}
-#else
-static void nfs_parse_ipv6_address(char *string, size_t str_len,
- struct sockaddr *sap, size_t *addr_len)
-{
- sap->sa_family = AF_UNSPEC;
- *addr_len = 0;
-}
-#endif
-
/*
- * Construct a sockaddr based on the contents of a string that contains
- * an IP address in presentation format.
- *
- * If there is a problem constructing the new sockaddr, set the address
- * family to AF_UNSPEC.
+ * Select between a default port value and a user-specified port value.
+ * If a zero value is set, then autobind will be used.
*/
-void nfs_parse_ip_address(char *string, size_t str_len,
- struct sockaddr *sap, size_t *addr_len)
+static void nfs_set_default_port(struct sockaddr *sap, const int parsed_port,
+ const unsigned short default_port)
{
- unsigned int i, colons;
+ unsigned short port = default_port;
- colons = 0;
- for (i = 0; i < str_len; i++)
- if (string[i] == ':')
- colons++;
+ if (parsed_port != NFS_UNSPEC_PORT)
+ port = parsed_port;
- if (colons >= 2)
- nfs_parse_ipv6_address(string, str_len, sap, addr_len);
- else
- nfs_parse_ipv4_address(string, str_len, sap, addr_len);
+ rpc_set_port(sap, port);
}
/*
@@ -904,8 +805,6 @@ static void nfs_set_mount_transport_protocol(struct nfs_parsed_mount_data *mnt)
/*
* Parse the value of the 'sec=' option.
- *
- * The flavor_len setting is for v4 mounts.
*/
static int nfs_parse_security_flavors(char *value,
struct nfs_parsed_mount_data *mnt)
@@ -916,53 +815,43 @@ static int nfs_parse_security_flavors(char *value,
switch (match_token(value, nfs_secflavor_tokens, args)) {
case Opt_sec_none:
- mnt->auth_flavor_len = 0;
mnt->auth_flavors[0] = RPC_AUTH_NULL;
break;
case Opt_sec_sys:
- mnt->auth_flavor_len = 0;
mnt->auth_flavors[0] = RPC_AUTH_UNIX;
break;
case Opt_sec_krb5:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5;
break;
case Opt_sec_krb5i:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5I;
break;
case Opt_sec_krb5p:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5P;
break;
case Opt_sec_lkey:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEY;
break;
case Opt_sec_lkeyi:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEYI;
break;
case Opt_sec_lkeyp:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEYP;
break;
case Opt_sec_spkm:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKM;
break;
case Opt_sec_spkmi:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKMI;
break;
case Opt_sec_spkmp:
- mnt->auth_flavor_len = 1;
mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKMP;
break;
default:
return 0;
}
+ mnt->auth_flavor_len = 1;
return 1;
}
@@ -1001,7 +890,6 @@ static int nfs_parse_mount_options(char *raw,
while ((p = strsep(&raw, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
unsigned long option;
- int int_option;
int token;
if (!*p)
@@ -1047,10 +935,18 @@ static int nfs_parse_mount_options(char *raw,
break;
case Opt_v2:
mnt->flags &= ~NFS_MOUNT_VER3;
+ mnt->version = 2;
break;
case Opt_v3:
mnt->flags |= NFS_MOUNT_VER3;
+ mnt->version = 3;
break;
+#ifdef CONFIG_NFS_V4
+ case Opt_v4:
+ mnt->flags &= ~NFS_MOUNT_VER3;
+ mnt->version = 4;
+ break;
+#endif
case Opt_udp:
mnt->flags &= ~NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
@@ -1264,20 +1160,33 @@ static int nfs_parse_mount_options(char *raw,
switch (option) {
case NFS2_VERSION:
mnt->flags &= ~NFS_MOUNT_VER3;
+ mnt->version = 2;
break;
case NFS3_VERSION:
mnt->flags |= NFS_MOUNT_VER3;
+ mnt->version = 3;
break;
+#ifdef CONFIG_NFS_V4
+ case NFS4_VERSION:
+ mnt->flags &= ~NFS_MOUNT_VER3;
+ mnt->version = 4;
+ break;
+#endif
default:
goto out_invalid_value;
}
break;
case Opt_minorversion:
- if (match_int(args, &int_option))
- return 0;
- if (int_option < 0 || int_option > NFS4_MAX_MINOR_VERSION)
- return 0;
- mnt->minorversion = int_option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ if (option > NFS4_MAX_MINOR_VERSION)
+ goto out_invalid_value;
+ mnt->minorversion = option;
break;
/*
@@ -1352,11 +1261,14 @@ static int nfs_parse_mount_options(char *raw,
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
- nfs_parse_ip_address(string, strlen(string),
- (struct sockaddr *)
- &mnt->nfs_server.address,
- &mnt->nfs_server.addrlen);
+ mnt->nfs_server.addrlen =
+ rpc_pton(string, strlen(string),
+ (struct sockaddr *)
+ &mnt->nfs_server.address,
+ sizeof(mnt->nfs_server.address));
kfree(string);
+ if (mnt->nfs_server.addrlen == 0)
+ goto out_invalid_address;
break;
case Opt_clientaddr:
string = match_strdup(args);
@@ -1376,11 +1288,14 @@ static int nfs_parse_mount_options(char *raw,
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
- nfs_parse_ip_address(string, strlen(string),
- (struct sockaddr *)
- &mnt->mount_server.address,
- &mnt->mount_server.addrlen);
+ mnt->mount_server.addrlen =
+ rpc_pton(string, strlen(string),
+ (struct sockaddr *)
+ &mnt->mount_server.address,
+ sizeof(mnt->mount_server.address));
kfree(string);
+ if (mnt->mount_server.addrlen == 0)
+ goto out_invalid_address;
break;
case Opt_lookupcache:
string = match_strdup(args);
@@ -1432,8 +1347,11 @@ static int nfs_parse_mount_options(char *raw,
return 1;
+out_invalid_address:
+ printk(KERN_INFO "NFS: bad IP address specified: %s\n", p);
+ return 0;
out_invalid_value:
- printk(KERN_INFO "NFS: bad mount option value specified: %s \n", p);
+ printk(KERN_INFO "NFS: bad mount option value specified: %s\n", p);
return 0;
out_nomem:
printk(KERN_INFO "NFS: not enough memory to parse option\n");
@@ -1445,13 +1363,60 @@ out_security_failure:
}
/*
+ * Match the requested auth flavors with the list returned by
+ * the server. Returns zero and sets the mount's authentication
+ * flavor on success; returns -EACCES if server does not support
+ * the requested flavor.
+ */
+static int nfs_walk_authlist(struct nfs_parsed_mount_data *args,
+ struct nfs_mount_request *request)
+{
+ unsigned int i, j, server_authlist_len = *(request->auth_flav_len);
+
+ /*
+ * Certain releases of Linux's mountd return an empty
+ * flavor list. To prevent behavioral regression with
+ * these servers (ie. rejecting mounts that used to
+ * succeed), revert to pre-2.6.32 behavior (no checking)
+ * if the returned flavor list is empty.
+ */
+ if (server_authlist_len == 0)
+ return 0;
+
+ /*
+ * We avoid sophisticated negotiating here, as there are
+ * plenty of cases where we can get it wrong, providing
+ * either too little or too much security.
+ *
+ * RFC 2623, section 2.7 suggests we SHOULD prefer the
+ * flavor listed first. However, some servers list
+ * AUTH_NULL first. Our caller plants AUTH_SYS, the
+ * preferred default, in args->auth_flavors[0] if user
+ * didn't specify sec= mount option.
+ */
+ for (i = 0; i < args->auth_flavor_len; i++)
+ for (j = 0; j < server_authlist_len; j++)
+ if (args->auth_flavors[i] == request->auth_flavs[j]) {
+ dfprintk(MOUNT, "NFS: using auth flavor %d\n",
+ request->auth_flavs[j]);
+ args->auth_flavors[0] = request->auth_flavs[j];
+ return 0;
+ }
+
+ dfprintk(MOUNT, "NFS: server does not support requested auth flavor\n");
+ nfs_umount(request);
+ return -EACCES;
+}
+
+/*
* Use the remote server's MOUNT service to request the NFS file handle
* corresponding to the provided path.
*/
static int nfs_try_mount(struct nfs_parsed_mount_data *args,
struct nfs_fh *root_fh)
{
- unsigned int auth_flavor_len = 0;
+ rpc_authflavor_t server_authlist[NFS_MAX_SECFLAVORS];
+ unsigned int server_authlist_len = ARRAY_SIZE(server_authlist);
struct nfs_mount_request request = {
.sap = (struct sockaddr *)
&args->mount_server.address,
@@ -1459,7 +1424,8 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
.protocol = args->mount_server.protocol,
.fh = root_fh,
.noresvport = args->flags & NFS_MOUNT_NORESVPORT,
- .auth_flav_len = &auth_flavor_len,
+ .auth_flav_len = &server_authlist_len,
+ .auth_flavs = server_authlist,
};
int status;
@@ -1485,23 +1451,25 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
args->mount_server.addrlen = args->nfs_server.addrlen;
}
request.salen = args->mount_server.addrlen;
-
- /*
- * autobind will be used if mount_server.port == 0
- */
- nfs_set_port(request.sap, args->mount_server.port);
+ nfs_set_default_port(request.sap, args->mount_server.port, 0);
/*
* Now ask the mount server to map our export path
* to a file handle.
*/
status = nfs_mount(&request);
- if (status == 0)
- return 0;
+ if (status != 0) {
+ dfprintk(MOUNT, "NFS: unable to mount server %s, error %d\n",
+ request.hostname, status);
+ return status;
+ }
- dfprintk(MOUNT, "NFS: unable to mount server %s, error %d\n",
- request.hostname, status);
- return status;
+ /*
+ * MNTv1 (NFSv2) does not support auth flavor negotiation.
+ */
+ if (args->mount_server.version != NFS_MNT3_VERSION)
+ return 0;
+ return nfs_walk_authlist(args, &request);
}
static int nfs_parse_simple_hostname(const char *dev_name,
@@ -1661,6 +1629,7 @@ static int nfs_validate_mount_data(void *options,
const char *dev_name)
{
struct nfs_mount_data *data = (struct nfs_mount_data *)options;
+ struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
if (data == NULL)
goto out_no_data;
@@ -1672,10 +1641,12 @@ static int nfs_validate_mount_data(void *options,
args->acregmax = NFS_DEF_ACREGMAX;
args->acdirmin = NFS_DEF_ACDIRMIN;
args->acdirmax = NFS_DEF_ACDIRMAX;
- args->mount_server.port = 0; /* autobind unless user sets port */
- args->nfs_server.port = 0; /* autobind unless user sets port */
+ args->mount_server.port = NFS_UNSPEC_PORT;
+ args->nfs_server.port = NFS_UNSPEC_PORT;
args->nfs_server.protocol = XPRT_TRANSPORT_TCP;
args->auth_flavors[0] = RPC_AUTH_UNIX;
+ args->auth_flavor_len = 1;
+ args->minorversion = 0;
switch (data->version) {
case 1:
@@ -1697,8 +1668,11 @@ static int nfs_validate_mount_data(void *options,
if (data->root.size > NFS3_FHSIZE || data->root.size == 0)
goto out_invalid_fh;
mntfh->size = data->root.size;
- } else
+ args->version = 3;
+ } else {
mntfh->size = NFS2_FHSIZE;
+ args->version = 2;
+ }
memcpy(mntfh->data, data->root.data, mntfh->size);
@@ -1720,11 +1694,9 @@ static int nfs_validate_mount_data(void *options,
args->acdirmin = data->acdirmin;
args->acdirmax = data->acdirmax;
- memcpy(&args->nfs_server.address, &data->addr,
- sizeof(data->addr));
+ memcpy(sap, &data->addr, sizeof(data->addr));
args->nfs_server.addrlen = sizeof(data->addr);
- if (!nfs_verify_server_address((struct sockaddr *)
- &args->nfs_server.address))
+ if (!nfs_verify_server_address(sap))
goto out_no_address;
if (!(data->flags & NFS_MOUNT_TCP))
@@ -1772,12 +1744,18 @@ static int nfs_validate_mount_data(void *options,
if (nfs_parse_mount_options((char *)options, args) == 0)
return -EINVAL;
- if (!nfs_verify_server_address((struct sockaddr *)
- &args->nfs_server.address))
+ if (!nfs_verify_server_address(sap))
goto out_no_address;
- nfs_set_port((struct sockaddr *)&args->nfs_server.address,
- args->nfs_server.port);
+ if (args->version == 4)
+#ifdef CONFIG_NFS_V4
+ return nfs4_validate_text_mount_data(options,
+ args, dev_name);
+#else
+ goto out_v4_not_compiled;
+#endif
+
+ nfs_set_default_port(sap, args->nfs_server.port, 0);
nfs_set_mount_transport_protocol(args);
@@ -1825,6 +1803,12 @@ out_v3_not_compiled:
return -EPROTONOSUPPORT;
#endif /* !CONFIG_NFS_V3 */
+#ifndef CONFIG_NFS_V4
+out_v4_not_compiled:
+ dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
+ return -EPROTONOSUPPORT;
+#endif /* !CONFIG_NFS_V4 */
+
out_nomem:
dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n");
return -ENOMEM;
@@ -2120,6 +2104,14 @@ static int nfs_get_sb(struct file_system_type *fs_type,
if (error < 0)
goto out;
+#ifdef CONFIG_NFS_V4
+ if (data->version == 4) {
+ error = nfs4_try_mount(flags, dev_name, data, mnt);
+ kfree(data->client_address);
+ goto out;
+ }
+#endif /* CONFIG_NFS_V4 */
+
/* Get a volume representation */
server = nfs_create_server(data, mntfh);
if (IS_ERR(server)) {
@@ -2317,6 +2309,43 @@ static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *args)
args->flags &= ~(NFS_MOUNT_NONLM|NFS_MOUNT_NOACL|NFS_MOUNT_VER3);
}
+static int nfs4_validate_text_mount_data(void *options,
+ struct nfs_parsed_mount_data *args,
+ const char *dev_name)
+{
+ struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
+
+ nfs_set_default_port(sap, args->nfs_server.port, NFS_PORT);
+
+ nfs_validate_transport_protocol(args);
+
+ nfs4_validate_mount_flags(args);
+
+ if (args->version != 4) {
+ dfprintk(MOUNT,
+ "NFS4: Illegal mount version\n");
+ return -EINVAL;
+ }
+
+ if (args->auth_flavor_len > 1) {
+ dfprintk(MOUNT,
+ "NFS4: Too many RPC auth flavours specified\n");
+ return -EINVAL;
+ }
+
+ if (args->client_address == NULL) {
+ dfprintk(MOUNT,
+ "NFS4: mount program didn't pass callback address\n");
+ return -EINVAL;
+ }
+
+ return nfs_parse_devname(dev_name,
+ &args->nfs_server.hostname,
+ NFS4_MAXNAMLEN,
+ &args->nfs_server.export_path,
+ NFS4_MAXPATHLEN);
+}
+
/*
* Validate NFSv4 mount options
*/
@@ -2324,7 +2353,7 @@ static int nfs4_validate_mount_data(void *options,
struct nfs_parsed_mount_data *args,
const char *dev_name)
{
- struct sockaddr_in *ap;
+ struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
struct nfs4_mount_data *data = (struct nfs4_mount_data *)options;
char *c;
@@ -2337,23 +2366,22 @@ static int nfs4_validate_mount_data(void *options,
args->acregmax = NFS_DEF_ACREGMAX;
args->acdirmin = NFS_DEF_ACDIRMIN;
args->acdirmax = NFS_DEF_ACDIRMAX;
- args->nfs_server.port = NFS_PORT; /* 2049 unless user set port= */
+ args->nfs_server.port = NFS_UNSPEC_PORT;
args->auth_flavors[0] = RPC_AUTH_UNIX;
- args->auth_flavor_len = 0;
+ args->auth_flavor_len = 1;
+ args->version = 4;
args->minorversion = 0;
switch (data->version) {
case 1:
- ap = (struct sockaddr_in *)&args->nfs_server.address;
if (data->host_addrlen > sizeof(args->nfs_server.address))
goto out_no_address;
if (data->host_addrlen == 0)
goto out_no_address;
args->nfs_server.addrlen = data->host_addrlen;
- if (copy_from_user(ap, data->host_addr, data->host_addrlen))
+ if (copy_from_user(sap, data->host_addr, data->host_addrlen))
return -EFAULT;
- if (!nfs_verify_server_address((struct sockaddr *)
- &args->nfs_server.address))
+ if (!nfs_verify_server_address(sap))
goto out_no_address;
if (data->auth_flavourlen) {
@@ -2399,39 +2427,14 @@ static int nfs4_validate_mount_data(void *options,
nfs_validate_transport_protocol(args);
break;
- default: {
- int status;
-
+ default:
if (nfs_parse_mount_options((char *)options, args) == 0)
return -EINVAL;
- if (!nfs_verify_server_address((struct sockaddr *)
- &args->nfs_server.address))
+ if (!nfs_verify_server_address(sap))
return -EINVAL;
- nfs_set_port((struct sockaddr *)&args->nfs_server.address,
- args->nfs_server.port);
-
- nfs_validate_transport_protocol(args);
-
- nfs4_validate_mount_flags(args);
-
- if (args->auth_flavor_len > 1)
- goto out_inval_auth;
-
- if (args->client_address == NULL)
- goto out_no_client_address;
-
- status = nfs_parse_devname(dev_name,
- &args->nfs_server.hostname,
- NFS4_MAXNAMLEN,
- &args->nfs_server.export_path,
- NFS4_MAXPATHLEN);
- if (status < 0)
- return status;
-
- break;
- }
+ return nfs4_validate_text_mount_data(options, args, dev_name);
}
return 0;
@@ -2448,10 +2451,6 @@ out_inval_auth:
out_no_address:
dfprintk(MOUNT, "NFS4: mount program didn't pass remote address\n");
return -EINVAL;
-
-out_no_client_address:
- dfprintk(MOUNT, "NFS4: mount program didn't pass callback address\n");
- return -EINVAL;
}
/*
@@ -2618,6 +2617,34 @@ out_err:
return ret;
}
+static int nfs4_try_mount(int flags, const char *dev_name,
+ struct nfs_parsed_mount_data *data,
+ struct vfsmount *mnt)
+{
+ char *export_path;
+ struct vfsmount *root_mnt;
+ int error;
+
+ dfprintk(MOUNT, "--> nfs4_try_mount()\n");
+
+ export_path = data->nfs_server.export_path;
+ data->nfs_server.export_path = "/";
+ root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, data,
+ data->nfs_server.hostname);
+ data->nfs_server.export_path = export_path;
+
+ error = PTR_ERR(root_mnt);
+ if (IS_ERR(root_mnt))
+ goto out;
+
+ error = nfs_follow_remote_path(root_mnt, export_path, mnt);
+
+out:
+ dfprintk(MOUNT, "<-- nfs4_try_mount() = %d%s\n", error,
+ error != 0 ? " [error]" : "");
+ return error;
+}
+
/*
* Get the superblock for an NFS4 mountpoint
*/
@@ -2625,8 +2652,6 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
{
struct nfs_parsed_mount_data *data;
- char *export_path;
- struct vfsmount *root_mnt;
int error = -ENOMEM;
data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -2638,17 +2663,7 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
if (error < 0)
goto out;
- export_path = data->nfs_server.export_path;
- data->nfs_server.export_path = "/";
- root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, data,
- data->nfs_server.hostname);
- data->nfs_server.export_path = export_path;
-
- error = PTR_ERR(root_mnt);
- if (IS_ERR(root_mnt))
- goto out;
-
- error = nfs_follow_remote_path(root_mnt, export_path, mnt);
+ error = nfs4_try_mount(flags, dev_name, data, mnt);
out:
kfree(data->client_address);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index a34fae21fe10..120acadc6a84 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -13,6 +13,7 @@
#include <linux/file.h>
#include <linux/writeback.h>
#include <linux/swap.h>
+#include <linux/migrate.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
@@ -26,6 +27,7 @@
#include "internal.h"
#include "iostat.h"
#include "nfs4_fs.h"
+#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
@@ -218,24 +220,17 @@ static void nfs_end_page_writeback(struct page *page)
clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
}
-/*
- * Find an associated nfs write request, and prepare to flush it out
- * May return an error if the user signalled nfs_wait_on_request().
- */
-static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
- struct page *page)
+static struct nfs_page *nfs_find_and_lock_request(struct page *page)
{
struct inode *inode = page->mapping->host;
struct nfs_page *req;
int ret;
spin_lock(&inode->i_lock);
- for(;;) {
+ for (;;) {
req = nfs_page_find_request_locked(page);
- if (req == NULL) {
- spin_unlock(&inode->i_lock);
- return 0;
- }
+ if (req == NULL)
+ break;
if (nfs_set_page_tag_locked(req))
break;
/* Note: If we hold the page lock, as is the case in nfs_writepage,
@@ -247,23 +242,40 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
ret = nfs_wait_on_request(req);
nfs_release_request(req);
if (ret != 0)
- return ret;
+ return ERR_PTR(ret);
spin_lock(&inode->i_lock);
}
- if (test_bit(PG_CLEAN, &req->wb_flags)) {
- spin_unlock(&inode->i_lock);
- BUG();
- }
- if (nfs_set_page_writeback(page) != 0) {
- spin_unlock(&inode->i_lock);
- BUG();
- }
spin_unlock(&inode->i_lock);
+ return req;
+}
+
+/*
+ * Find an associated nfs write request, and prepare to flush it out
+ * May return an error if the user signalled nfs_wait_on_request().
+ */
+static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
+ struct page *page)
+{
+ struct nfs_page *req;
+ int ret = 0;
+
+ req = nfs_find_and_lock_request(page);
+ if (!req)
+ goto out;
+ ret = PTR_ERR(req);
+ if (IS_ERR(req))
+ goto out;
+
+ ret = nfs_set_page_writeback(page);
+ BUG_ON(ret != 0);
+ BUG_ON(test_bit(PG_CLEAN, &req->wb_flags));
+
if (!nfs_pageio_add_request(pgio, req)) {
nfs_redirty_request(req);
- return pgio->pg_error;
+ ret = pgio->pg_error;
}
- return 0;
+out:
+ return ret;
}
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
@@ -1580,6 +1592,41 @@ int nfs_wb_page(struct inode *inode, struct page* page)
return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
}
+#ifdef CONFIG_MIGRATION
+int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
+ struct page *page)
+{
+ struct nfs_page *req;
+ int ret;
+
+ if (PageFsCache(page))
+ nfs_fscache_release_page(page, GFP_KERNEL);
+
+ req = nfs_find_and_lock_request(page);
+ ret = PTR_ERR(req);
+ if (IS_ERR(req))
+ goto out;
+
+ ret = migrate_page(mapping, newpage, page);
+ if (!req)
+ goto out;
+ if (ret)
+ goto out_unlock;
+ page_cache_get(newpage);
+ req->wb_page = newpage;
+ SetPagePrivate(newpage);
+ set_page_private(newpage, page_private(page));
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
+ page_cache_release(page);
+out_unlock:
+ nfs_clear_page_tag_locked(req);
+ nfs_release_request(req);
+out:
+ return ret;
+}
+#endif
+
int __init nfs_init_writepagecache(void)
{
nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 5573508f707f..36fcabbf5186 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -34,6 +34,8 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
int flags = nfsexp_flags(rqstp, exp);
int ret;
+ validate_process_creds();
+
/* discard any old override before preparing the new set */
revert_creds(get_cred(current->real_cred));
new = prepare_creds();
@@ -86,8 +88,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
else
new->cap_effective = cap_raise_nfsd_set(new->cap_effective,
new->cap_permitted);
+ validate_process_creds();
put_cred(override_creds(new));
put_cred(new);
+ validate_process_creds();
return 0;
oom:
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index b92a27629fb7..d9462643155c 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -85,6 +85,11 @@ static void expkey_request(struct cache_detail *cd,
(*bpp)[-1] = '\n';
}
+static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall(cd, h, expkey_request);
+}
+
static struct svc_expkey *svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old);
static struct svc_expkey *svc_expkey_lookup(struct svc_expkey *);
static struct cache_detail svc_expkey_cache;
@@ -259,7 +264,7 @@ static struct cache_detail svc_expkey_cache = {
.hash_table = expkey_table,
.name = "nfsd.fh",
.cache_put = expkey_put,
- .cache_request = expkey_request,
+ .cache_upcall = expkey_upcall,
.cache_parse = expkey_parse,
.cache_show = expkey_show,
.match = expkey_match,
@@ -355,6 +360,11 @@ static void svc_export_request(struct cache_detail *cd,
(*bpp)[-1] = '\n';
}
+static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall(cd, h, svc_export_request);
+}
+
static struct svc_export *svc_export_update(struct svc_export *new,
struct svc_export *old);
static struct svc_export *svc_export_lookup(struct svc_export *);
@@ -724,7 +734,7 @@ struct cache_detail svc_export_cache = {
.hash_table = export_table,
.name = "nfsd.export",
.cache_put = svc_export_put,
- .cache_request = svc_export_request,
+ .cache_upcall = svc_export_upcall,
.cache_parse = svc_export_parse,
.cache_show = svc_export_show,
.match = svc_export_match,
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 5b398421b051..cdfa86fa1471 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -146,6 +146,12 @@ idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
}
static int
+idtoname_upcall(struct cache_detail *cd, struct cache_head *ch)
+{
+ return sunrpc_cache_pipe_upcall(cd, ch, idtoname_request);
+}
+
+static int
idtoname_match(struct cache_head *ca, struct cache_head *cb)
{
struct ent *a = container_of(ca, struct ent, h);
@@ -175,10 +181,10 @@ idtoname_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
}
static void
-warn_no_idmapd(struct cache_detail *detail)
+warn_no_idmapd(struct cache_detail *detail, int has_died)
{
printk("nfsd: nfsv4 idmapping failing: has idmapd %s?\n",
- detail->last_close? "died" : "not been started");
+ has_died ? "died" : "not been started");
}
@@ -192,7 +198,7 @@ static struct cache_detail idtoname_cache = {
.hash_table = idtoname_table,
.name = "nfs4.idtoname",
.cache_put = ent_put,
- .cache_request = idtoname_request,
+ .cache_upcall = idtoname_upcall,
.cache_parse = idtoname_parse,
.cache_show = idtoname_show,
.warn_no_listener = warn_no_idmapd,
@@ -325,6 +331,12 @@ nametoid_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
}
static int
+nametoid_upcall(struct cache_detail *cd, struct cache_head *ch)
+{
+ return sunrpc_cache_pipe_upcall(cd, ch, nametoid_request);
+}
+
+static int
nametoid_match(struct cache_head *ca, struct cache_head *cb)
{
struct ent *a = container_of(ca, struct ent, h);
@@ -363,7 +375,7 @@ static struct cache_detail nametoid_cache = {
.hash_table = nametoid_table,
.name = "nfs4.nametoid",
.cache_put = ent_put,
- .cache_request = nametoid_request,
+ .cache_upcall = nametoid_upcall,
.cache_parse = nametoid_parse,
.cache_show = nametoid_show,
.warn_no_listener = warn_no_idmapd,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 6d0847562d87..7e906c5b7671 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -37,6 +37,7 @@
#include <linux/nfsd/xdr.h>
#include <linux/nfsd/syscall.h>
#include <linux/lockd/lockd.h>
+#include <linux/sunrpc/clnt.h>
#include <asm/uaccess.h>
#include <net/ipv6.h>
@@ -490,22 +491,18 @@ static ssize_t write_getfd(struct file *file, char *buf, size_t size)
*
* Input:
* buf: '\n'-terminated C string containing a
- * presentation format IPv4 address
+ * presentation format IP address
* size: length of C string in @buf
* Output:
* On success: returns zero if all specified locks were released;
* returns one if one or more locks were not released
* On error: return code is negative errno value
- *
- * Note: Only AF_INET client addresses are passed in
*/
static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
{
- struct sockaddr_in sin = {
- .sin_family = AF_INET,
- };
- int b1, b2, b3, b4;
- char c;
+ struct sockaddr_storage address;
+ struct sockaddr *sap = (struct sockaddr *)&address;
+ size_t salen = sizeof(address);
char *fo_path;
/* sanity check */
@@ -519,14 +516,10 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
if (qword_get(&buf, fo_path, size) < 0)
return -EINVAL;
- /* get ipv4 address */
- if (sscanf(fo_path, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4)
- return -EINVAL;
- if (b1 > 255 || b2 > 255 || b3 > 255 || b4 > 255)
+ if (rpc_pton(fo_path, size, sap, salen) == 0)
return -EINVAL;
- sin.sin_addr.s_addr = htonl((b1 << 24) | (b2 << 16) | (b3 << 8) | b4);
- return nlmsvc_unlock_all_by_ip((struct sockaddr *)&sin);
+ return nlmsvc_unlock_all_by_ip(sap);
}
/**
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 492c79b7800b..24d58adfe5fd 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -496,7 +496,9 @@ nfsd(void *vrqstp)
/* Lock the export hash tables for reading. */
exp_readlock();
+ validate_process_creds();
svc_process(rqstp);
+ validate_process_creds();
/* Unlock export hash tables */
exp_readunlock();
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 23341c1063bc..8fa09bfbcba7 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -684,6 +684,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
__be32 err;
int host_err;
+ validate_process_creds();
+
/*
* If we get here, then the client has already done an "open",
* and (hopefully) checked permission - so allow OWNER_OVERRIDE
@@ -740,6 +742,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
out_nfserr:
err = nfserrno(host_err);
out:
+ validate_process_creds();
return err;
}
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 1c9efb406a96..02bf17808bdc 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -325,6 +325,7 @@ clear_fields:
}
static struct backing_dev_info dlmfs_backing_dev_info = {
+ .name = "ocfs2-dlmfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
diff --git a/fs/open.c b/fs/open.c
index dd98e8076024..31191bf513e4 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -199,7 +199,7 @@ out:
int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
struct file *filp)
{
- int err;
+ int ret;
struct iattr newattrs;
/* Not pretty: "inode->i_size" shouldn't really be signed. But it is. */
@@ -214,12 +214,14 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
}
/* Remove suid/sgid on truncate too */
- newattrs.ia_valid |= should_remove_suid(dentry);
+ ret = should_remove_suid(dentry);
+ if (ret)
+ newattrs.ia_valid |= ret | ATTR_FORCE;
mutex_lock(&dentry->d_inode->i_mutex);
- err = notify_change(dentry, &newattrs);
+ ret = notify_change(dentry, &newattrs);
mutex_unlock(&dentry->d_inode->i_mutex);
- return err;
+ return ret;
}
static long do_sys_truncate(const char __user *pathname, loff_t length)
@@ -957,6 +959,8 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags,
int error;
struct file *f;
+ validate_creds(cred);
+
/*
* We must always pass in a valid mount pointer. Historically
* callers got away with not passing it, but we must enforce this at
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 0ff7566c767c..a7f0110fca4c 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -46,6 +46,7 @@ static const struct super_operations ramfs_ops;
static const struct inode_operations ramfs_dir_inode_operations;
static struct backing_dev_info ramfs_backing_dev_info = {
+ .name = "ramfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK |
BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY |
diff --git a/fs/super.c b/fs/super.c
index 2761d3e22ed9..9cda337ddae2 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -62,9 +62,6 @@ static struct super_block *alloc_super(struct file_system_type *type)
s = NULL;
goto out;
}
- INIT_LIST_HEAD(&s->s_dirty);
- INIT_LIST_HEAD(&s->s_io);
- INIT_LIST_HEAD(&s->s_more_io);
INIT_LIST_HEAD(&s->s_files);
INIT_LIST_HEAD(&s->s_instances);
INIT_HLIST_HEAD(&s->s_anon);
@@ -171,7 +168,7 @@ int __put_super_and_need_restart(struct super_block *sb)
* Drops a temporary reference, frees superblock if there's no
* references left.
*/
-static void put_super(struct super_block *sb)
+void put_super(struct super_block *sb)
{
spin_lock(&sb_lock);
__put_super(sb);
diff --git a/fs/sync.c b/fs/sync.c
index 3422ba61d86d..103cc7fdd3df 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -19,20 +19,22 @@
SYNC_FILE_RANGE_WAIT_AFTER)
/*
- * Do the filesystem syncing work. For simple filesystems sync_inodes_sb(sb, 0)
- * just dirties buffers with inodes so we have to submit IO for these buffers
- * via __sync_blockdev(). This also speeds up the wait == 1 case since in that
- * case write_inode() functions do sync_dirty_buffer() and thus effectively
- * write one block at a time.
+ * Do the filesystem syncing work. For simple filesystems
+ * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to
+ * submit IO for these buffers via __sync_blockdev(). This also speeds up the
+ * wait == 1 case since in that case write_inode() functions do
+ * sync_dirty_buffer() and thus effectively write one block at a time.
*/
static int __sync_filesystem(struct super_block *sb, int wait)
{
/* Avoid doing twice syncing and cache pruning for quota sync */
- if (!wait)
+ if (!wait) {
writeout_quota_sb(sb, -1);
- else
+ writeback_inodes_sb(sb);
+ } else {
sync_quota_sb(sb, -1);
- sync_inodes_sb(sb, wait);
+ sync_inodes_sb(sb);
+ }
if (sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, wait);
return __sync_blockdev(sb->s_bdev, wait);
@@ -118,7 +120,7 @@ restart:
*/
SYSCALL_DEFINE0(sync)
{
- wakeup_pdflush(0);
+ wakeup_flusher_threads(0);
sync_filesystems(0);
sync_filesystems(1);
if (unlikely(laptop_mode))
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 14f2d71ea3ce..0050fc40e8c9 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -760,6 +760,7 @@ static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
const struct inode_operations sysfs_dir_inode_operations = {
.lookup = sysfs_lookup,
.setattr = sysfs_setattr,
+ .setxattr = sysfs_setxattr,
};
static void remove_dir(struct sysfs_dirent *sd)
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 555f0ff988df..e28cecf179f5 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -18,6 +18,8 @@
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/sched.h>
+#include <linux/xattr.h>
+#include <linux/security.h>
#include "sysfs.h"
extern struct super_block * sysfs_sb;
@@ -29,12 +31,14 @@ static const struct address_space_operations sysfs_aops = {
};
static struct backing_dev_info sysfs_backing_dev_info = {
+ .name = "sysfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
static const struct inode_operations sysfs_inode_operations ={
.setattr = sysfs_setattr,
+ .setxattr = sysfs_setxattr,
};
int __init sysfs_inode_init(void)
@@ -42,18 +46,37 @@ int __init sysfs_inode_init(void)
return bdi_init(&sysfs_backing_dev_info);
}
+struct sysfs_inode_attrs *sysfs_init_inode_attrs(struct sysfs_dirent *sd)
+{
+ struct sysfs_inode_attrs *attrs;
+ struct iattr *iattrs;
+
+ attrs = kzalloc(sizeof(struct sysfs_inode_attrs), GFP_KERNEL);
+ if (!attrs)
+ return NULL;
+ iattrs = &attrs->ia_iattr;
+
+ /* assign default attributes */
+ iattrs->ia_mode = sd->s_mode;
+ iattrs->ia_uid = 0;
+ iattrs->ia_gid = 0;
+ iattrs->ia_atime = iattrs->ia_mtime = iattrs->ia_ctime = CURRENT_TIME;
+
+ return attrs;
+}
int sysfs_setattr(struct dentry * dentry, struct iattr * iattr)
{
struct inode * inode = dentry->d_inode;
struct sysfs_dirent * sd = dentry->d_fsdata;
- struct iattr * sd_iattr;
+ struct sysfs_inode_attrs *sd_attrs;
+ struct iattr *iattrs;
unsigned int ia_valid = iattr->ia_valid;
int error;
if (!sd)
return -EINVAL;
- sd_iattr = sd->s_iattr;
+ sd_attrs = sd->s_iattr;
error = inode_change_ok(inode, iattr);
if (error)
@@ -65,42 +88,77 @@ int sysfs_setattr(struct dentry * dentry, struct iattr * iattr)
if (error)
return error;
- if (!sd_iattr) {
+ if (!sd_attrs) {
/* setting attributes for the first time, allocate now */
- sd_iattr = kzalloc(sizeof(struct iattr), GFP_KERNEL);
- if (!sd_iattr)
+ sd_attrs = sysfs_init_inode_attrs(sd);
+ if (!sd_attrs)
return -ENOMEM;
- /* assign default attributes */
- sd_iattr->ia_mode = sd->s_mode;
- sd_iattr->ia_uid = 0;
- sd_iattr->ia_gid = 0;
- sd_iattr->ia_atime = sd_iattr->ia_mtime = sd_iattr->ia_ctime = CURRENT_TIME;
- sd->s_iattr = sd_iattr;
+ sd->s_iattr = sd_attrs;
+ } else {
+ /* attributes were changed at least once in past */
+ iattrs = &sd_attrs->ia_iattr;
+
+ if (ia_valid & ATTR_UID)
+ iattrs->ia_uid = iattr->ia_uid;
+ if (ia_valid & ATTR_GID)
+ iattrs->ia_gid = iattr->ia_gid;
+ if (ia_valid & ATTR_ATIME)
+ iattrs->ia_atime = timespec_trunc(iattr->ia_atime,
+ inode->i_sb->s_time_gran);
+ if (ia_valid & ATTR_MTIME)
+ iattrs->ia_mtime = timespec_trunc(iattr->ia_mtime,
+ inode->i_sb->s_time_gran);
+ if (ia_valid & ATTR_CTIME)
+ iattrs->ia_ctime = timespec_trunc(iattr->ia_ctime,
+ inode->i_sb->s_time_gran);
+ if (ia_valid & ATTR_MODE) {
+ umode_t mode = iattr->ia_mode;
+
+ if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+ mode &= ~S_ISGID;
+ iattrs->ia_mode = sd->s_mode = mode;
+ }
}
+ return error;
+}
- /* attributes were changed atleast once in past */
-
- if (ia_valid & ATTR_UID)
- sd_iattr->ia_uid = iattr->ia_uid;
- if (ia_valid & ATTR_GID)
- sd_iattr->ia_gid = iattr->ia_gid;
- if (ia_valid & ATTR_ATIME)
- sd_iattr->ia_atime = timespec_trunc(iattr->ia_atime,
- inode->i_sb->s_time_gran);
- if (ia_valid & ATTR_MTIME)
- sd_iattr->ia_mtime = timespec_trunc(iattr->ia_mtime,
- inode->i_sb->s_time_gran);
- if (ia_valid & ATTR_CTIME)
- sd_iattr->ia_ctime = timespec_trunc(iattr->ia_ctime,
- inode->i_sb->s_time_gran);
- if (ia_valid & ATTR_MODE) {
- umode_t mode = iattr->ia_mode;
-
- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
- mode &= ~S_ISGID;
- sd_iattr->ia_mode = sd->s_mode = mode;
- }
+int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
+{
+ struct sysfs_dirent *sd = dentry->d_fsdata;
+ struct sysfs_inode_attrs *iattrs;
+ void *secdata;
+ int error;
+ u32 secdata_len = 0;
+
+ if (!sd)
+ return -EINVAL;
+ if (!sd->s_iattr)
+ sd->s_iattr = sysfs_init_inode_attrs(sd);
+ if (!sd->s_iattr)
+ return -ENOMEM;
+
+ iattrs = sd->s_iattr;
+
+ if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) {
+ const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
+ error = security_inode_setsecurity(dentry->d_inode, suffix,
+ value, size, flags);
+ if (error)
+ goto out;
+ error = security_inode_getsecctx(dentry->d_inode,
+ &secdata, &secdata_len);
+ if (error)
+ goto out;
+ if (iattrs->ia_secdata)
+ security_release_secctx(iattrs->ia_secdata,
+ iattrs->ia_secdata_len);
+ iattrs->ia_secdata = secdata;
+ iattrs->ia_secdata_len = secdata_len;
+ } else
+ return -EINVAL;
+out:
return error;
}
@@ -146,6 +204,7 @@ static int sysfs_count_nlink(struct sysfs_dirent *sd)
static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
{
struct bin_attribute *bin_attr;
+ struct sysfs_inode_attrs *iattrs;
inode->i_private = sysfs_get(sd);
inode->i_mapping->a_ops = &sysfs_aops;
@@ -154,16 +213,20 @@ static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
inode->i_ino = sd->s_ino;
lockdep_set_class(&inode->i_mutex, &sysfs_inode_imutex_key);
- if (sd->s_iattr) {
+ iattrs = sd->s_iattr;
+ if (iattrs) {
/* sysfs_dirent has non-default attributes
* get them for the new inode from persistent copy
* in sysfs_dirent
*/
- set_inode_attr(inode, sd->s_iattr);
+ set_inode_attr(inode, &iattrs->ia_iattr);
+ if (iattrs->ia_secdata)
+ security_inode_notifysecctx(inode,
+ iattrs->ia_secdata,
+ iattrs->ia_secdata_len);
} else
set_default_inode_attr(inode, sd->s_mode);
-
/* initialize inode according to type */
switch (sysfs_type(sd)) {
case SYSFS_DIR:
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 1d897ad808e0..c5081ad77026 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -16,6 +16,7 @@
#include <linux/kobject.h>
#include <linux/namei.h>
#include <linux/mutex.h>
+#include <linux/security.h>
#include "sysfs.h"
@@ -209,6 +210,7 @@ static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *co
}
const struct inode_operations sysfs_symlink_inode_operations = {
+ .setxattr = sysfs_setxattr,
.readlink = generic_readlink,
.follow_link = sysfs_follow_link,
.put_link = sysfs_put_link,
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 3fa0d98481e2..af4c4e7482ac 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -8,6 +8,8 @@
* This file is released under the GPLv2.
*/
+#include <linux/fs.h>
+
struct sysfs_open_dirent;
/* type-specific structures for sysfs_dirent->s_* union members */
@@ -31,6 +33,12 @@ struct sysfs_elem_bin_attr {
struct hlist_head buffers;
};
+struct sysfs_inode_attrs {
+ struct iattr ia_iattr;
+ void *ia_secdata;
+ u32 ia_secdata_len;
+};
+
/*
* sysfs_dirent - the building block of sysfs hierarchy. Each and
* every sysfs node is represented by single sysfs_dirent.
@@ -56,7 +64,7 @@ struct sysfs_dirent {
unsigned int s_flags;
ino_t s_ino;
umode_t s_mode;
- struct iattr *s_iattr;
+ struct sysfs_inode_attrs *s_iattr;
};
#define SD_DEACTIVATED_BIAS INT_MIN
@@ -148,6 +156,8 @@ static inline void __sysfs_put(struct sysfs_dirent *sd)
struct inode *sysfs_get_inode(struct sysfs_dirent *sd);
void sysfs_delete_inode(struct inode *inode);
int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
+int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags);
int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name);
int sysfs_inode_init(void);
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index eaf6d891d46f..1c8991b0db13 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -65,26 +65,14 @@
static int shrink_liability(struct ubifs_info *c, int nr_to_write)
{
int nr_written;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- .range_end = LLONG_MAX,
- .nr_to_write = nr_to_write,
- };
-
- generic_sync_sb_inodes(c->vfs_sb, &wbc);
- nr_written = nr_to_write - wbc.nr_to_write;
+ nr_written = writeback_inodes_sb(c->vfs_sb);
if (!nr_written) {
/*
* Re-try again but wait on pages/inodes which are being
* written-back concurrently (e.g., by pdflush).
*/
- memset(&wbc, 0, sizeof(struct writeback_control));
- wbc.sync_mode = WB_SYNC_ALL;
- wbc.range_end = LLONG_MAX;
- wbc.nr_to_write = nr_to_write;
- generic_sync_sb_inodes(c->vfs_sb, &wbc);
- nr_written = nr_to_write - wbc.nr_to_write;
+ nr_written = sync_inodes_sb(c->vfs_sb);
}
dbg_budg("%d pages were written back", nr_written);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 26d2e0d80465..51763aa8f4de 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -438,12 +438,6 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
{
int i, err;
struct ubifs_info *c = sb->s_fs_info;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .range_start = 0,
- .range_end = LLONG_MAX,
- .nr_to_write = LONG_MAX,
- };
/*
* Zero @wait is just an advisory thing to help the file system shove
@@ -462,7 +456,7 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
* the user be able to get more accurate results of 'statfs()' after
* they synchronize the file system.
*/
- generic_sync_sb_inodes(sb, &wbc);
+ sync_inodes_sb(sb);
/*
* Synchronize write buffers, because 'ubifs_run_commit()' does not
@@ -1971,6 +1965,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
*
* Read-ahead will be disabled because @c->bdi.ra_pages is 0.
*/
+ c->bdi.name = "ubifs",
c->bdi.capabilities = BDI_CAP_MAP_COPY;
c->bdi.unplug_io_fn = default_unplug_io_fn;
err = bdi_init(&c->bdi);
diff --git a/fs/xattr.c b/fs/xattr.c
index 1c3d0af59ddf..6d4f6d3449fb 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -66,22 +66,28 @@ xattr_permission(struct inode *inode, const char *name, int mask)
return inode_permission(inode, mask);
}
-int
-vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
- size_t size, int flags)
+/**
+ * __vfs_setxattr_noperm - perform setxattr operation without performing
+ * permission checks.
+ *
+ * @dentry - object to perform setxattr on
+ * @name - xattr name to set
+ * @value - value to set @name to
+ * @size - size of @value
+ * @flags - flags to pass into filesystem operations
+ *
+ * returns the result of the internal setxattr or setsecurity operations.
+ *
+ * This function requires the caller to lock the inode's i_mutex before it
+ * is executed. It also assumes that the caller will make the appropriate
+ * permission checks.
+ */
+int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
{
struct inode *inode = dentry->d_inode;
- int error;
-
- error = xattr_permission(inode, name, MAY_WRITE);
- if (error)
- return error;
+ int error = -EOPNOTSUPP;
- mutex_lock(&inode->i_mutex);
- error = security_inode_setxattr(dentry, name, value, size, flags);
- if (error)
- goto out;
- error = -EOPNOTSUPP;
if (inode->i_op->setxattr) {
error = inode->i_op->setxattr(dentry, name, value, size, flags);
if (!error) {
@@ -97,6 +103,29 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
if (!error)
fsnotify_xattr(dentry);
}
+
+ return error;
+}
+
+
+int
+vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
+{
+ struct inode *inode = dentry->d_inode;
+ int error;
+
+ error = xattr_permission(inode, name, MAY_WRITE);
+ if (error)
+ return error;
+
+ mutex_lock(&inode->i_mutex);
+ error = security_inode_setxattr(dentry, name, value, size, flags);
+ if (error)
+ goto out;
+
+ error = __vfs_setxattr_noperm(dentry, name, value, size, flags);
+
out:
mutex_unlock(&inode->i_mutex);
return error;
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 8070b34cc287..6c32f1d63d8c 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -485,14 +485,6 @@ xfs_vn_put_link(
}
STATIC int
-xfs_vn_permission(
- struct inode *inode,
- int mask)
-{
- return generic_permission(inode, mask, xfs_check_acl);
-}
-
-STATIC int
xfs_vn_getattr(
struct vfsmount *mnt,
struct dentry *dentry,
@@ -696,7 +688,7 @@ xfs_vn_fiemap(
}
static const struct inode_operations xfs_inode_operations = {
- .permission = xfs_vn_permission,
+ .check_acl = xfs_check_acl,
.truncate = xfs_vn_truncate,
.getattr = xfs_vn_getattr,
.setattr = xfs_vn_setattr,
@@ -724,7 +716,7 @@ static const struct inode_operations xfs_dir_inode_operations = {
.rmdir = xfs_vn_unlink,
.mknod = xfs_vn_mknod,
.rename = xfs_vn_rename,
- .permission = xfs_vn_permission,
+ .check_acl = xfs_check_acl,
.getattr = xfs_vn_getattr,
.setattr = xfs_vn_setattr,
.setxattr = generic_setxattr,
@@ -749,7 +741,7 @@ static const struct inode_operations xfs_dir_ci_inode_operations = {
.rmdir = xfs_vn_unlink,
.mknod = xfs_vn_mknod,
.rename = xfs_vn_rename,
- .permission = xfs_vn_permission,
+ .check_acl = xfs_check_acl,
.getattr = xfs_vn_getattr,
.setattr = xfs_vn_setattr,
.setxattr = generic_setxattr,
@@ -762,7 +754,7 @@ static const struct inode_operations xfs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = xfs_vn_follow_link,
.put_link = xfs_vn_put_link,
- .permission = xfs_vn_permission,
+ .check_acl = xfs_check_acl,
.getattr = xfs_vn_getattr,
.setattr = xfs_vn_setattr,
.setxattr = generic_setxattr,
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 5406a601185c..e694263445f7 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -103,7 +103,6 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(dev, addr, size, dir);
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
- flush_write_buffers();
}
static inline void dma_sync_single_for_device(struct device *dev,
@@ -116,7 +115,6 @@ static inline void dma_sync_single_for_device(struct device *dev,
if (ops->sync_single_for_device)
ops->sync_single_for_device(dev, addr, size, dir);
debug_dma_sync_single_for_device(dev, addr, size, dir);
- flush_write_buffers();
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
@@ -132,7 +130,6 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
- flush_write_buffers();
} else
dma_sync_single_for_cpu(dev, addr, size, dir);
}
@@ -150,7 +147,6 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
ops->sync_single_range_for_device(dev, addr, offset, size, dir);
debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
- flush_write_buffers();
} else
dma_sync_single_for_device(dev, addr, size, dir);
}
@@ -165,7 +161,6 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
if (ops->sync_sg_for_cpu)
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
- flush_write_buffers();
}
static inline void
@@ -179,7 +174,6 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
ops->sync_sg_for_device(dev, sg, nelems, dir);
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
- flush_write_buffers();
}
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index aa00800adacc..90079c373f1c 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -81,14 +81,17 @@ extern void setup_per_cpu_areas(void);
#ifdef MODULE
#define PER_CPU_SHARED_ALIGNED_SECTION ""
+#define PER_CPU_ALIGNED_SECTION ""
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
+#define PER_CPU_ALIGNED_SECTION ".shared_aligned"
#endif
#define PER_CPU_FIRST_SECTION ".first"
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ""
+#define PER_CPU_ALIGNED_SECTION ".shared_aligned"
#define PER_CPU_FIRST_SECTION ""
#endif
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 5a2bd1cc9656..1ffb53f74d37 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -22,11 +22,9 @@ struct seq_file;
struct crypto_type {
unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
- unsigned int (*extsize)(struct crypto_alg *alg,
- const struct crypto_type *frontend);
+ unsigned int (*extsize)(struct crypto_alg *alg);
int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
- int (*init_tfm)(struct crypto_tfm *tfm,
- const struct crypto_type *frontend);
+ int (*init_tfm)(struct crypto_tfm *tfm);
void (*show)(struct seq_file *m, struct crypto_alg *alg);
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
@@ -52,6 +50,7 @@ struct crypto_template {
struct crypto_instance *(*alloc)(struct rtattr **tb);
void (*free)(struct crypto_instance *inst);
+ int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
char name[CRYPTO_MAX_ALG_NAME];
};
@@ -60,6 +59,7 @@ struct crypto_spawn {
struct list_head list;
struct crypto_alg *alg;
struct crypto_instance *inst;
+ const struct crypto_type *frontend;
u32 mask;
};
@@ -114,11 +114,19 @@ int crypto_register_template(struct crypto_template *tmpl);
void crypto_unregister_template(struct crypto_template *tmpl);
struct crypto_template *crypto_lookup_template(const char *name);
+int crypto_register_instance(struct crypto_template *tmpl,
+ struct crypto_instance *inst);
+
int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst, u32 mask);
+int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
+ struct crypto_instance *inst,
+ const struct crypto_type *frontend);
+
void crypto_drop_spawn(struct crypto_spawn *spawn);
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
u32 mask);
+void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
static inline void crypto_set_spawn(struct crypto_spawn *spawn,
struct crypto_instance *inst)
@@ -129,8 +137,19 @@ static inline void crypto_set_spawn(struct crypto_spawn *spawn,
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
int crypto_check_attr_type(struct rtattr **tb, u32 type);
const char *crypto_attr_alg_name(struct rtattr *rta);
-struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask);
+struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
+ const struct crypto_type *frontend,
+ u32 type, u32 mask);
+
+static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
+ u32 type, u32 mask)
+{
+ return crypto_attr_alg2(rta, NULL, type, mask);
+}
+
int crypto_attr_u32(struct rtattr *rta, u32 *num);
+void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
+ unsigned int head);
struct crypto_instance *crypto_alloc_instance(const char *name,
struct crypto_alg *alg);
@@ -157,12 +176,8 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
{
- unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
- unsigned long align = crypto_tfm_alg_alignmask(tfm);
-
- if (align <= crypto_tfm_ctx_alignment())
- align = 1;
- return (void *)ALIGN(addr, align);
+ return PTR_ALIGN(crypto_tfm_ctx(tfm),
+ crypto_tfm_alg_alignmask(tfm) + 1);
}
static inline struct crypto_instance *crypto_tfm_alg_instance(
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 55fa7bbdbc71..2f65a6e8ea4d 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -7,6 +7,7 @@
#include <linux/crypto.h>
#include <linux/kernel.h>
+#include <crypto/hash.h>
struct cryptd_ablkcipher {
struct crypto_ablkcipher base;
@@ -24,4 +25,20 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
+struct cryptd_ahash {
+ struct crypto_ahash base;
+};
+
+static inline struct cryptd_ahash *__cryptd_ahash_cast(
+ struct crypto_ahash *tfm)
+{
+ return (struct cryptd_ahash *)tfm;
+}
+
+/* alg_name should be algorithm to be cryptd-ed */
+struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
+ u32 type, u32 mask);
+struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
+void cryptd_free_ahash(struct cryptd_ahash *tfm);
+
#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index d56bb71617c3..26cb1eb16f4c 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -15,6 +15,42 @@
#include <linux/crypto.h>
+struct crypto_ahash;
+
+struct hash_alg_common {
+ unsigned int digestsize;
+ unsigned int statesize;
+
+ struct crypto_alg base;
+};
+
+struct ahash_request {
+ struct crypto_async_request base;
+
+ unsigned int nbytes;
+ struct scatterlist *src;
+ u8 *result;
+
+ /* This field may only be used by the ahash API code. */
+ void *priv;
+
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+struct ahash_alg {
+ int (*init)(struct ahash_request *req);
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+ int (*digest)(struct ahash_request *req);
+ int (*export)(struct ahash_request *req, void *out);
+ int (*import)(struct ahash_request *req, const void *in);
+ int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen);
+
+ struct hash_alg_common halg;
+};
+
struct shash_desc {
struct crypto_shash *tfm;
u32 flags;
@@ -24,7 +60,6 @@ struct shash_desc {
struct shash_alg {
int (*init)(struct shash_desc *desc);
- int (*reinit)(struct shash_desc *desc);
int (*update)(struct shash_desc *desc, const u8 *data,
unsigned int len);
int (*final)(struct shash_desc *desc, u8 *out);
@@ -32,38 +67,48 @@ struct shash_alg {
unsigned int len, u8 *out);
int (*digest)(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out);
+ int (*export)(struct shash_desc *desc, void *out);
+ int (*import)(struct shash_desc *desc, const void *in);
int (*setkey)(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
unsigned int descsize;
- unsigned int digestsize;
+
+ /* These fields must match hash_alg_common. */
+ unsigned int digestsize
+ __attribute__ ((aligned(__alignof__(struct hash_alg_common))));
+ unsigned int statesize;
struct crypto_alg base;
};
struct crypto_ahash {
+ int (*init)(struct ahash_request *req);
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+ int (*digest)(struct ahash_request *req);
+ int (*export)(struct ahash_request *req, void *out);
+ int (*import)(struct ahash_request *req, const void *in);
+ int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen);
+
+ unsigned int reqsize;
struct crypto_tfm base;
};
struct crypto_shash {
+ unsigned int descsize;
struct crypto_tfm base;
};
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
{
- return (struct crypto_ahash *)tfm;
+ return container_of(tfm, struct crypto_ahash, base);
}
-static inline struct crypto_ahash *crypto_alloc_ahash(const char *alg_name,
- u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- mask &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_AHASH;
- mask |= CRYPTO_ALG_TYPE_AHASH_MASK;
-
- return __crypto_ahash_cast(crypto_alloc_base(alg_name, type, mask));
-}
+struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
+ u32 mask);
static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
{
@@ -72,7 +117,7 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
static inline void crypto_free_ahash(struct crypto_ahash *tfm)
{
- crypto_free_tfm(crypto_ahash_tfm(tfm));
+ crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
}
static inline unsigned int crypto_ahash_alignmask(
@@ -81,14 +126,26 @@ static inline unsigned int crypto_ahash_alignmask(
return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
}
-static inline struct ahash_tfm *crypto_ahash_crt(struct crypto_ahash *tfm)
+static inline struct hash_alg_common *__crypto_hash_alg_common(
+ struct crypto_alg *alg)
+{
+ return container_of(alg, struct hash_alg_common, base);
+}
+
+static inline struct hash_alg_common *crypto_hash_alg_common(
+ struct crypto_ahash *tfm)
{
- return &crypto_ahash_tfm(tfm)->crt_ahash;
+ return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg);
}
static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
{
- return crypto_ahash_crt(tfm)->digestsize;
+ return crypto_hash_alg_common(tfm)->digestsize;
+}
+
+static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
+{
+ return crypto_hash_alg_common(tfm)->statesize;
}
static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
@@ -114,7 +171,7 @@ static inline struct crypto_ahash *crypto_ahash_reqtfm(
static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
{
- return crypto_ahash_crt(tfm)->reqsize;
+ return tfm->reqsize;
}
static inline void *ahash_request_ctx(struct ahash_request *req)
@@ -122,44 +179,30 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
return req->__ctx;
}
-static inline int crypto_ahash_setkey(struct crypto_ahash *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct ahash_tfm *crt = crypto_ahash_crt(tfm);
-
- return crt->setkey(tfm, key, keylen);
-}
+int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen);
+int crypto_ahash_finup(struct ahash_request *req);
+int crypto_ahash_final(struct ahash_request *req);
+int crypto_ahash_digest(struct ahash_request *req);
-static inline int crypto_ahash_digest(struct ahash_request *req)
+static inline int crypto_ahash_export(struct ahash_request *req, void *out)
{
- struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
- return crt->digest(req);
+ return crypto_ahash_reqtfm(req)->export(req, out);
}
-static inline void crypto_ahash_export(struct ahash_request *req, u8 *out)
+static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
{
- memcpy(out, ahash_request_ctx(req),
- crypto_ahash_reqsize(crypto_ahash_reqtfm(req)));
+ return crypto_ahash_reqtfm(req)->import(req, in);
}
-int crypto_ahash_import(struct ahash_request *req, const u8 *in);
-
static inline int crypto_ahash_init(struct ahash_request *req)
{
- struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
- return crt->init(req);
+ return crypto_ahash_reqtfm(req)->init(req);
}
static inline int crypto_ahash_update(struct ahash_request *req)
{
- struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
- return crt->update(req);
-}
-
-static inline int crypto_ahash_final(struct ahash_request *req)
-{
- struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
- return crt->final(req);
+ return crypto_ahash_reqtfm(req)->update(req);
}
static inline void ahash_request_set_tfm(struct ahash_request *req,
@@ -184,7 +227,7 @@ static inline struct ahash_request *ahash_request_alloc(
static inline void ahash_request_free(struct ahash_request *req)
{
- kfree(req);
+ kzfree(req);
}
static inline struct ahash_request *ahash_request_cast(
@@ -251,6 +294,11 @@ static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
return crypto_shash_alg(tfm)->digestsize;
}
+static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm)
+{
+ return crypto_shash_alg(tfm)->statesize;
+}
+
static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm)
{
return crypto_tfm_get_flags(crypto_shash_tfm(tfm));
@@ -268,7 +316,7 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
{
- return crypto_shash_alg(tfm)->descsize;
+ return tfm->descsize;
}
static inline void *shash_desc_ctx(struct shash_desc *desc)
@@ -281,12 +329,15 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out);
-static inline void crypto_shash_export(struct shash_desc *desc, u8 *out)
+static inline int crypto_shash_export(struct shash_desc *desc, void *out)
{
- memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm));
+ return crypto_shash_alg(desc->tfm)->export(desc, out);
}
-int crypto_shash_import(struct shash_desc *desc, const u8 *in);
+static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
+{
+ return crypto_shash_alg(desc->tfm)->import(desc, in);
+}
static inline int crypto_shash_init(struct shash_desc *desc)
{
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 82b70564bcab..5bfad8c80595 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -34,6 +34,22 @@ struct crypto_hash_walk {
unsigned int flags;
};
+struct ahash_instance {
+ struct ahash_alg alg;
+};
+
+struct shash_instance {
+ struct shash_alg alg;
+};
+
+struct crypto_ahash_spawn {
+ struct crypto_spawn base;
+};
+
+struct crypto_shash_spawn {
+ struct crypto_spawn base;
+};
+
extern const struct crypto_type crypto_ahash_type;
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
@@ -43,18 +59,100 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
struct crypto_hash_walk *walk,
struct scatterlist *sg, unsigned int len);
+static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
+{
+ return !(walk->entrylen | walk->total);
+}
+
+int crypto_register_ahash(struct ahash_alg *alg);
+int crypto_unregister_ahash(struct ahash_alg *alg);
+int ahash_register_instance(struct crypto_template *tmpl,
+ struct ahash_instance *inst);
+void ahash_free_instance(struct crypto_instance *inst);
+
+int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
+ struct hash_alg_common *alg,
+ struct crypto_instance *inst);
+
+static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
+
int crypto_register_shash(struct shash_alg *alg);
int crypto_unregister_shash(struct shash_alg *alg);
+int shash_register_instance(struct crypto_template *tmpl,
+ struct shash_instance *inst);
+void shash_free_instance(struct crypto_instance *inst);
+
+int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
+ struct shash_alg *alg,
+ struct crypto_instance *inst);
+
+static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
+
+int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
+int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
+int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
+
+int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
{
- return crypto_tfm_ctx(&tfm->base);
+ return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+}
+
+static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg)
+{
+ return container_of(__crypto_hash_alg_common(alg), struct ahash_alg,
+ halg);
+}
+
+static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
+ unsigned int reqsize)
+{
+ tfm->reqsize = reqsize;
+}
+
+static inline struct crypto_instance *ahash_crypto_instance(
+ struct ahash_instance *inst)
+{
+ return container_of(&inst->alg.halg.base, struct crypto_instance, alg);
}
-static inline struct ahash_alg *crypto_ahash_alg(
- struct crypto_ahash *tfm)
+static inline struct ahash_instance *ahash_instance(
+ struct crypto_instance *inst)
{
- return &crypto_ahash_tfm(tfm)->__crt_alg->cra_ahash;
+ return container_of(&inst->alg, struct ahash_instance, alg.halg.base);
+}
+
+static inline void *ahash_instance_ctx(struct ahash_instance *inst)
+{
+ return crypto_instance_ctx(ahash_crypto_instance(inst));
+}
+
+static inline unsigned int ahash_instance_headroom(void)
+{
+ return sizeof(struct ahash_alg) - sizeof(struct crypto_alg);
+}
+
+static inline struct ahash_instance *ahash_alloc_instance(
+ const char *name, struct crypto_alg *alg)
+{
+ return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
+}
+
+static inline struct crypto_ahash *crypto_spawn_ahash(
+ struct crypto_ahash_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
}
static inline int ahash_enqueue_request(struct crypto_queue *queue,
@@ -80,5 +178,46 @@ static inline void *crypto_shash_ctx(struct crypto_shash *tfm)
return crypto_tfm_ctx(&tfm->base);
}
+static inline struct crypto_instance *shash_crypto_instance(
+ struct shash_instance *inst)
+{
+ return container_of(&inst->alg.base, struct crypto_instance, alg);
+}
+
+static inline struct shash_instance *shash_instance(
+ struct crypto_instance *inst)
+{
+ return container_of(__crypto_shash_alg(&inst->alg),
+ struct shash_instance, alg);
+}
+
+static inline void *shash_instance_ctx(struct shash_instance *inst)
+{
+ return crypto_instance_ctx(shash_crypto_instance(inst));
+}
+
+static inline struct shash_instance *shash_alloc_instance(
+ const char *name, struct crypto_alg *alg)
+{
+ return crypto_alloc_instance2(name, alg,
+ sizeof(struct shash_alg) - sizeof(*alg));
+}
+
+static inline struct crypto_shash *crypto_spawn_shash(
+ struct crypto_shash_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm)
+{
+ return crypto_tfm_ctx_aligned(&tfm->base);
+}
+
+static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_shash, base);
+}
+
#endif /* _CRYPTO_INTERNAL_HASH_H */
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index c0ccc2b1a2d8..069e85ba97e1 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -5,6 +5,8 @@
#ifndef _CRYPTO_SHA_H
#define _CRYPTO_SHA_H
+#include <linux/types.h>
+
#define SHA1_DIGEST_SIZE 20
#define SHA1_BLOCK_SIZE 64
@@ -62,4 +64,22 @@
#define SHA512_H6 0x1f83d9abfb41bd6bULL
#define SHA512_H7 0x5be0cd19137e2179ULL
+struct sha1_state {
+ u64 count;
+ u32 state[SHA1_DIGEST_SIZE / 4];
+ u8 buffer[SHA1_BLOCK_SIZE];
+};
+
+struct sha256_state {
+ u64 count;
+ u32 state[SHA256_DIGEST_SIZE / 4];
+ u8 buf[SHA256_BLOCK_SIZE];
+};
+
+struct sha512_state {
+ u64 count[2];
+ u64 state[SHA512_DIGEST_SIZE / 8];
+ u8 buf[SHA512_BLOCK_SIZE];
+};
+
#endif
diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
new file mode 100644
index 000000000000..c4467c55df1e
--- /dev/null
+++ b/include/crypto/vmac.h
@@ -0,0 +1,61 @@
+/*
+ * Modified to interface to the Linux kernel
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef __CRYPTO_VMAC_H
+#define __CRYPTO_VMAC_H
+
+/* --------------------------------------------------------------------------
+ * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+ * This implementation is herby placed in the public domain.
+ * The authors offers no warranty. Use at your own risk.
+ * Please send bug reports to the authors.
+ * Last modified: 17 APR 08, 1700 PDT
+ * ----------------------------------------------------------------------- */
+
+/*
+ * User definable settings.
+ */
+#define VMAC_TAG_LEN 64
+#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
+#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
+#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
+
+/*
+ * This implementation uses u32 and u64 as names for unsigned 32-
+ * and 64-bit integer types. These are defined in C99 stdint.h. The
+ * following may need adaptation if you are not running a C99 or
+ * Microsoft C environment.
+ */
+struct vmac_ctx {
+ u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
+ u64 polykey[2*VMAC_TAG_LEN/64];
+ u64 l3key[2*VMAC_TAG_LEN/64];
+ u64 polytmp[2*VMAC_TAG_LEN/64];
+ u64 cached_nonce[2];
+ u64 cached_aes[2];
+ int first_block_processed;
+};
+
+typedef u64 vmac_t;
+
+struct vmac_ctx_t {
+ struct crypto_cipher *child;
+ struct vmac_ctx __vmac_ctx;
+};
+
+#endif /* __CRYPTO_VMAC_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 9c75921f0c16..6299a259ed19 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -210,15 +210,25 @@ enum {
ATA_CMD_STANDBY = 0xE2, /* place in standby power mode */
ATA_CMD_IDLE = 0xE3, /* place in idle power mode */
ATA_CMD_EDD = 0x90, /* execute device diagnostic */
+ ATA_CMD_DOWNLOAD_MICRO = 0x92,
+ ATA_CMD_NOP = 0x00,
ATA_CMD_FLUSH = 0xE7,
ATA_CMD_FLUSH_EXT = 0xEA,
ATA_CMD_ID_ATA = 0xEC,
ATA_CMD_ID_ATAPI = 0xA1,
+ ATA_CMD_SERVICE = 0xA2,
ATA_CMD_READ = 0xC8,
ATA_CMD_READ_EXT = 0x25,
+ ATA_CMD_READ_QUEUED = 0x26,
+ ATA_CMD_READ_STREAM_EXT = 0x2B,
+ ATA_CMD_READ_STREAM_DMA_EXT = 0x2A,
ATA_CMD_WRITE = 0xCA,
ATA_CMD_WRITE_EXT = 0x35,
+ ATA_CMD_WRITE_QUEUED = 0x36,
+ ATA_CMD_WRITE_STREAM_EXT = 0x3B,
+ ATA_CMD_WRITE_STREAM_DMA_EXT = 0x3A,
ATA_CMD_WRITE_FUA_EXT = 0x3D,
+ ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E,
ATA_CMD_FPDMA_READ = 0x60,
ATA_CMD_FPDMA_WRITE = 0x61,
ATA_CMD_PIO_READ = 0x20,
@@ -235,6 +245,7 @@ enum {
ATA_CMD_PACKET = 0xA0,
ATA_CMD_VERIFY = 0x40,
ATA_CMD_VERIFY_EXT = 0x42,
+ ATA_CMD_WRITE_UNCORR_EXT = 0x45,
ATA_CMD_STANDBYNOW1 = 0xE0,
ATA_CMD_IDLEIMMEDIATE = 0xE1,
ATA_CMD_SLEEP = 0xE6,
@@ -243,15 +254,34 @@ enum {
ATA_CMD_READ_NATIVE_MAX_EXT = 0x27,
ATA_CMD_SET_MAX = 0xF9,
ATA_CMD_SET_MAX_EXT = 0x37,
- ATA_CMD_READ_LOG_EXT = 0x2f,
+ ATA_CMD_READ_LOG_EXT = 0x2F,
+ ATA_CMD_WRITE_LOG_EXT = 0x3F,
+ ATA_CMD_READ_LOG_DMA_EXT = 0x47,
+ ATA_CMD_WRITE_LOG_DMA_EXT = 0x57,
+ ATA_CMD_TRUSTED_RCV = 0x5C,
+ ATA_CMD_TRUSTED_RCV_DMA = 0x5D,
+ ATA_CMD_TRUSTED_SND = 0x5E,
+ ATA_CMD_TRUSTED_SND_DMA = 0x5F,
ATA_CMD_PMP_READ = 0xE4,
ATA_CMD_PMP_WRITE = 0xE8,
ATA_CMD_CONF_OVERLAY = 0xB1,
+ ATA_CMD_SEC_SET_PASS = 0xF1,
+ ATA_CMD_SEC_UNLOCK = 0xF2,
+ ATA_CMD_SEC_ERASE_PREP = 0xF3,
+ ATA_CMD_SEC_ERASE_UNIT = 0xF4,
ATA_CMD_SEC_FREEZE_LOCK = 0xF5,
+ ATA_CMD_SEC_DISABLE_PASS = 0xF6,
+ ATA_CMD_CONFIG_STREAM = 0x51,
ATA_CMD_SMART = 0xB0,
ATA_CMD_MEDIA_LOCK = 0xDE,
ATA_CMD_MEDIA_UNLOCK = 0xDF,
ATA_CMD_DSM = 0x06,
+ ATA_CMD_CHK_MED_CRD_TYP = 0xD1,
+ ATA_CMD_CFA_REQ_EXT_ERR = 0x03,
+ ATA_CMD_CFA_WRITE_NE = 0x38,
+ ATA_CMD_CFA_TRANS_SECT = 0x87,
+ ATA_CMD_CFA_ERASE = 0xC0,
+ ATA_CMD_CFA_WRITE_MULT_NE = 0xCD,
/* marked obsolete in the ATA/ATAPI-7 spec */
ATA_CMD_RESTORE = 0x10,
@@ -306,6 +336,7 @@ enum {
/* SETFEATURE Sector counts for SATA features */
SATA_AN = 0x05, /* Asynchronous Notification */
SATA_DIPM = 0x03, /* Device Initiated Power Management */
+ SATA_FPDMA_AA = 0x02, /* DMA Setup FIS Auto-Activate */
/* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00,
@@ -525,6 +556,9 @@ static inline int ata_is_data(u8 prot)
#define ata_id_has_atapi_AN(id) \
( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \
((id)[78] & (1 << 5)) )
+#define ata_id_has_fpdma_aa(id) \
+ ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \
+ ((id)[78] & (1 << 2)) )
#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10))
#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11))
#define ata_id_u32(id,n) \
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 1d52425a6118..f169bcb90b58 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,6 +13,8 @@
#include <linux/proportions.h>
#include <linux/kernel.h>
#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/writeback.h>
#include <asm/atomic.h>
struct page;
@@ -23,9 +25,11 @@ struct dentry;
* Bits in backing_dev_info.state
*/
enum bdi_state {
- BDI_pdflush, /* A pdflush thread is working this device */
+ BDI_pending, /* On its way to being activated */
+ BDI_wb_alloc, /* Default embedded wb allocated */
BDI_async_congested, /* The async (write) queue is getting full */
BDI_sync_congested, /* The sync queue is getting full */
+ BDI_registered, /* bdi_register() was done */
BDI_unused, /* Available bits start here */
};
@@ -39,7 +43,22 @@ enum bdi_stat_item {
#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
+struct bdi_writeback {
+ struct list_head list; /* hangs off the bdi */
+
+ struct backing_dev_info *bdi; /* our parent bdi */
+ unsigned int nr;
+
+ unsigned long last_old_flush; /* last old data flush */
+
+ struct task_struct *task; /* writeback task */
+ struct list_head b_dirty; /* dirty inodes */
+ struct list_head b_io; /* parked for writeback */
+ struct list_head b_more_io; /* parked for more writeback */
+};
+
struct backing_dev_info {
+ struct list_head bdi_list;
unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
unsigned long state; /* Always use atomic bitops on this */
unsigned int capabilities; /* Device capabilities */
@@ -48,6 +67,8 @@ struct backing_dev_info {
void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
void *unplug_io_data;
+ char *name;
+
struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
struct prop_local_percpu completions;
@@ -56,6 +77,14 @@ struct backing_dev_info {
unsigned int min_ratio;
unsigned int max_ratio, max_prop_frac;
+ struct bdi_writeback wb; /* default writeback info for this bdi */
+ spinlock_t wb_lock; /* protects update side of wb_list */
+ struct list_head wb_list; /* the flusher threads hanging off this bdi */
+ unsigned long wb_mask; /* bitmask of registered tasks */
+ unsigned int wb_cnt; /* number of registered tasks */
+
+ struct list_head work_list;
+
struct device *dev;
#ifdef CONFIG_DEBUG_FS
@@ -71,6 +100,19 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
void bdi_unregister(struct backing_dev_info *bdi);
+void bdi_start_writeback(struct writeback_control *wbc);
+int bdi_writeback_task(struct bdi_writeback *wb);
+int bdi_has_dirty_io(struct backing_dev_info *bdi);
+
+extern spinlock_t bdi_lock;
+extern struct list_head bdi_list;
+
+static inline int wb_has_dirty_io(struct bdi_writeback *wb)
+{
+ return !list_empty(&wb->b_dirty) ||
+ !list_empty(&wb->b_io) ||
+ !list_empty(&wb->b_more_io);
+}
static inline void __add_bdi_stat(struct backing_dev_info *bdi,
enum bdi_stat_item item, s64 amount)
@@ -261,6 +303,11 @@ static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
return bdi->capabilities & BDI_CAP_SWAP_BACKED;
}
+static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
+{
+ return bdi == &default_backing_dev_info;
+}
+
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
{
return bdi_cap_writeback_dirty(mapping->backing_dev_info);
@@ -276,4 +323,10 @@ static inline bool mapping_cap_swap_backed(struct address_space *mapping)
return bdi_cap_swap_backed(mapping->backing_dev_info);
}
+static inline int bdi_sched_wait(void *word)
+{
+ schedule();
+ return 0;
+}
+
#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 4d668e05d458..47536197ffdd 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -48,6 +48,15 @@ struct notifier_block;
#ifdef CONFIG_SMP
/* Need to know about CPUs going up/down? */
+#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
+#define cpu_notifier(fn, pri) { \
+ static struct notifier_block fn##_nb __cpuinitdata = \
+ { .notifier_call = fn, .priority = pri }; \
+ register_cpu_notifier(&fn##_nb); \
+}
+#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
+#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
+#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
#ifdef CONFIG_HOTPLUG_CPU
extern int register_cpu_notifier(struct notifier_block *nb);
extern void unregister_cpu_notifier(struct notifier_block *nb);
@@ -74,6 +83,8 @@ extern void cpu_maps_update_done(void);
#else /* CONFIG_SMP */
+#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
+
static inline int register_cpu_notifier(struct notifier_block *nb)
{
return 0;
@@ -99,11 +110,7 @@ extern struct sysdev_class cpu_sysdev_class;
extern void get_online_cpus(void);
extern void put_online_cpus(void);
-#define hotcpu_notifier(fn, pri) { \
- static struct notifier_block fn##_nb __cpuinitdata = \
- { .notifier_call = fn, .priority = pri }; \
- register_cpu_notifier(&fn##_nb); \
-}
+#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
int cpu_down(unsigned int cpu);
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 4fa999696310..24520a539c6f 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -114,6 +114,13 @@ struct thread_group_cred {
*/
struct cred {
atomic_t usage;
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ atomic_t subscribers; /* number of processes subscribed */
+ void *put_addr;
+ unsigned magic;
+#define CRED_MAGIC 0x43736564
+#define CRED_MAGIC_DEAD 0x44656144
+#endif
uid_t uid; /* real UID of the task */
gid_t gid; /* real GID of the task */
uid_t suid; /* saved UID of the task */
@@ -143,7 +150,9 @@ struct cred {
};
extern void __put_cred(struct cred *);
+extern void exit_creds(struct task_struct *);
extern int copy_creds(struct task_struct *, unsigned long);
+extern struct cred *cred_alloc_blank(void);
extern struct cred *prepare_creds(void);
extern struct cred *prepare_exec_creds(void);
extern struct cred *prepare_usermodehelper_creds(void);
@@ -158,6 +167,60 @@ extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
extern void __init cred_init(void);
+/*
+ * check for validity of credentials
+ */
+#ifdef CONFIG_DEBUG_CREDENTIALS
+extern void __invalid_creds(const struct cred *, const char *, unsigned);
+extern void __validate_process_creds(struct task_struct *,
+ const char *, unsigned);
+
+static inline bool creds_are_invalid(const struct cred *cred)
+{
+ if (cred->magic != CRED_MAGIC)
+ return true;
+ if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
+ return true;
+#ifdef CONFIG_SECURITY_SELINUX
+ if ((unsigned long) cred->security < PAGE_SIZE)
+ return true;
+ if ((*(u32*)cred->security & 0xffffff00) ==
+ (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8))
+ return true;
+#endif
+ return false;
+}
+
+static inline void __validate_creds(const struct cred *cred,
+ const char *file, unsigned line)
+{
+ if (unlikely(creds_are_invalid(cred)))
+ __invalid_creds(cred, file, line);
+}
+
+#define validate_creds(cred) \
+do { \
+ __validate_creds((cred), __FILE__, __LINE__); \
+} while(0)
+
+#define validate_process_creds() \
+do { \
+ __validate_process_creds(current, __FILE__, __LINE__); \
+} while(0)
+
+extern void validate_creds_for_do_exit(struct task_struct *);
+#else
+static inline void validate_creds(const struct cred *cred)
+{
+}
+static inline void validate_creds_for_do_exit(struct task_struct *tsk)
+{
+}
+static inline void validate_process_creds(void)
+{
+}
+#endif
+
/**
* get_new_cred - Get a reference on a new set of credentials
* @cred: The new credentials to reference
@@ -186,7 +249,9 @@ static inline struct cred *get_new_cred(struct cred *cred)
*/
static inline const struct cred *get_cred(const struct cred *cred)
{
- return get_new_cred((struct cred *) cred);
+ struct cred *nonconst_cred = (struct cred *) cred;
+ validate_creds(cred);
+ return get_new_cred(nonconst_cred);
}
/**
@@ -204,7 +269,7 @@ static inline void put_cred(const struct cred *_cred)
{
struct cred *cred = (struct cred *) _cred;
- BUG_ON(atomic_read(&(cred)->usage) <= 0);
+ validate_creds(cred);
if (atomic_dec_and_test(&(cred)->usage))
__put_cred(cred);
}
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index ec29fa268b94..fd929889e8dc 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -115,7 +115,6 @@ struct crypto_async_request;
struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
-struct crypto_ahash;
struct crypto_rng;
struct crypto_tfm;
struct crypto_type;
@@ -146,16 +145,6 @@ struct ablkcipher_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
-struct ahash_request {
- struct crypto_async_request base;
-
- unsigned int nbytes;
- struct scatterlist *src;
- u8 *result;
-
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
-};
-
/**
* struct aead_request - AEAD request
* @base: Common attributes for async crypto requests
@@ -220,18 +209,6 @@ struct ablkcipher_alg {
unsigned int ivsize;
};
-struct ahash_alg {
- int (*init)(struct ahash_request *req);
- int (*reinit)(struct ahash_request *req);
- int (*update)(struct ahash_request *req);
- int (*final)(struct ahash_request *req);
- int (*digest)(struct ahash_request *req);
- int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen);
-
- unsigned int digestsize;
-};
-
struct aead_alg {
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
@@ -318,7 +295,6 @@ struct rng_alg {
#define cra_cipher cra_u.cipher
#define cra_digest cra_u.digest
#define cra_hash cra_u.hash
-#define cra_ahash cra_u.ahash
#define cra_compress cra_u.compress
#define cra_rng cra_u.rng
@@ -346,7 +322,6 @@ struct crypto_alg {
struct cipher_alg cipher;
struct digest_alg digest;
struct hash_alg hash;
- struct ahash_alg ahash;
struct compress_alg compress;
struct rng_alg rng;
} cra_u;
@@ -433,18 +408,6 @@ struct hash_tfm {
unsigned int digestsize;
};
-struct ahash_tfm {
- int (*init)(struct ahash_request *req);
- int (*update)(struct ahash_request *req);
- int (*final)(struct ahash_request *req);
- int (*digest)(struct ahash_request *req);
- int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen);
-
- unsigned int digestsize;
- unsigned int reqsize;
-};
-
struct compress_tfm {
int (*cot_compress)(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
@@ -465,7 +428,6 @@ struct rng_tfm {
#define crt_blkcipher crt_u.blkcipher
#define crt_cipher crt_u.cipher
#define crt_hash crt_u.hash
-#define crt_ahash crt_u.ahash
#define crt_compress crt_u.compress
#define crt_rng crt_u.rng
@@ -479,7 +441,6 @@ struct crypto_tfm {
struct blkcipher_tfm blkcipher;
struct cipher_tfm cipher;
struct hash_tfm hash;
- struct ahash_tfm ahash;
struct compress_tfm compress;
struct rng_tfm rng;
} crt_u;
@@ -770,7 +731,7 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc(
static inline void ablkcipher_request_free(struct ablkcipher_request *req)
{
- kfree(req);
+ kzfree(req);
}
static inline void ablkcipher_request_set_callback(
@@ -901,7 +862,7 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
static inline void aead_request_free(struct aead_request *req)
{
- kfree(req);
+ kzfree(req);
}
static inline void aead_request_set_callback(struct aead_request *req,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 07dfd460d286..c0f6c3cd788c 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -98,11 +98,6 @@ static inline int is_device_dma_capable(struct device *dev)
return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
}
-static inline int is_buffer_dma_capable(u64 mask, dma_addr_t addr, size_t size)
-{
- return addr + size <= mask;
-}
-
#ifdef CONFIG_HAS_DMA
#include <asm/dma-mapping.h>
#else
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index bb5489c82c99..a8a3e1ac281d 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -43,7 +43,7 @@ extern const char * dmi_get_system_info(int field);
extern const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from);
extern void dmi_scan_machine(void);
-extern int dmi_get_year(int field);
+extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
extern int dmi_name_in_vendors(const char *str);
extern int dmi_name_in_serial(const char *str);
extern int dmi_available;
@@ -58,7 +58,16 @@ static inline const char * dmi_get_system_info(int field) { return NULL; }
static inline const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from) { return NULL; }
static inline void dmi_scan_machine(void) { return; }
-static inline int dmi_get_year(int year) { return 0; }
+static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
+{
+ if (yearp)
+ *yearp = 0;
+ if (monthp)
+ *monthp = 0;
+ if (dayp)
+ *dayp = 0;
+ return false;
+}
static inline int dmi_name_in_vendors(const char *s) { return 0; }
static inline int dmi_name_in_serial(const char *s) { return 0; }
#define dmi_available 0
diff --git a/include/linux/fips.h b/include/linux/fips.h
new file mode 100644
index 000000000000..f8fb07b0b6b8
--- /dev/null
+++ b/include/linux/fips.h
@@ -0,0 +1,10 @@
+#ifndef _FIPS_H
+#define _FIPS_H
+
+#ifdef CONFIG_CRYPTO_FIPS
+extern int fips_enabled;
+#else
+#define fips_enabled 0
+#endif
+
+#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 73e9b643e455..a79f48373e7e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -715,7 +715,7 @@ struct posix_acl;
struct inode {
struct hlist_node i_hash;
- struct list_head i_list;
+ struct list_head i_list; /* backing dev IO list */
struct list_head i_sb_list;
struct list_head i_dentry;
unsigned long i_ino;
@@ -1336,9 +1336,6 @@ struct super_block {
struct xattr_handler **s_xattr;
struct list_head s_inodes; /* all inodes */
- struct list_head s_dirty; /* dirty inodes */
- struct list_head s_io; /* parked for writeback */
- struct list_head s_more_io; /* parked for more writeback */
struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_files;
/* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
@@ -1528,6 +1525,7 @@ struct inode_operations {
void (*put_link) (struct dentry *, struct nameidata *, void *);
void (*truncate) (struct inode *);
int (*permission) (struct inode *, int);
+ int (*check_acl)(struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
@@ -1788,6 +1786,7 @@ extern int get_sb_pseudo(struct file_system_type *, char *,
struct vfsmount *mnt);
extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
int __put_super_and_need_restart(struct super_block *sb);
+void put_super(struct super_block *sb);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
#define fops_get(fops) \
@@ -1998,12 +1997,25 @@ extern void bd_release_from_disk(struct block_device *, struct gendisk *);
#define CHRDEV_MAJOR_HASH_SIZE 255
extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
extern int register_chrdev_region(dev_t, unsigned, const char *);
-extern int register_chrdev(unsigned int, const char *,
- const struct file_operations *);
-extern void unregister_chrdev(unsigned int, const char *);
+extern int __register_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name,
+ const struct file_operations *fops);
+extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name);
extern void unregister_chrdev_region(dev_t, unsigned);
extern void chrdev_show(struct seq_file *,off_t);
+static inline int register_chrdev(unsigned int major, const char *name,
+ const struct file_operations *fops)
+{
+ return __register_chrdev(major, 0, 256, name, fops);
+}
+
+static inline void unregister_chrdev(unsigned int major, const char *name)
+{
+ __unregister_chrdev(major, 0, 256, name);
+}
+
/* fs/block_dev.c */
#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
@@ -2070,8 +2082,6 @@ static inline void invalidate_remote_inode(struct inode *inode)
extern int invalidate_inode_pages2(struct address_space *mapping);
extern int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end);
-extern void generic_sync_sb_inodes(struct super_block *sb,
- struct writeback_control *wbc);
extern int write_inode_now(struct inode *, int);
extern int filemap_fdatawrite(struct address_space *);
extern int filemap_flush(struct address_space *);
@@ -2186,7 +2196,6 @@ extern int bdev_read_only(struct block_device *);
extern int set_blocksize(struct block_device *, int);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
-extern int sb_has_dirty_inodes(struct super_block *);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index a81170de7f6b..23f7179bf74e 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -93,16 +93,22 @@ void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);
struct ring_buffer_event *
-trace_current_buffer_lock_reserve(int type, unsigned long len,
+trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
+ int type, unsigned long len,
unsigned long flags, int pc);
-void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
+void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
unsigned long flags, int pc);
-void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
+void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
unsigned long flags, int pc);
-void trace_current_buffer_discard_commit(struct ring_buffer_event *event);
+void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event);
void tracing_record_cmdline(struct task_struct *tsk);
+struct event_filter;
+
struct ftrace_event_call {
struct list_head list;
char *name;
@@ -110,16 +116,18 @@ struct ftrace_event_call {
struct dentry *dir;
struct trace_event *event;
int enabled;
- int (*regfunc)(void);
- void (*unregfunc)(void);
+ int (*regfunc)(void *);
+ void (*unregfunc)(void *);
int id;
int (*raw_init)(void);
- int (*show_format)(struct trace_seq *s);
- int (*define_fields)(void);
+ int (*show_format)(struct ftrace_event_call *call,
+ struct trace_seq *s);
+ int (*define_fields)(struct ftrace_event_call *);
struct list_head fields;
int filter_active;
- void *filter;
+ struct event_filter *filter;
void *mod;
+ void *data;
atomic_t profile_count;
int (*profile_enable)(struct ftrace_event_call *);
@@ -129,15 +137,25 @@ struct ftrace_event_call {
#define MAX_FILTER_PRED 32
#define MAX_FILTER_STR_VAL 128
-extern int init_preds(struct ftrace_event_call *call);
extern void destroy_preds(struct ftrace_event_call *call);
extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
-extern int filter_current_check_discard(struct ftrace_event_call *call,
+extern int filter_current_check_discard(struct ring_buffer *buffer,
+ struct ftrace_event_call *call,
void *rec,
struct ring_buffer_event *event);
-extern int trace_define_field(struct ftrace_event_call *call, char *type,
- char *name, int offset, int size, int is_signed);
+enum {
+ FILTER_OTHER = 0,
+ FILTER_STATIC_STRING,
+ FILTER_DYN_STRING,
+ FILTER_PTR_STRING,
+};
+
+extern int trace_define_field(struct ftrace_event_call *call,
+ const char *type, const char *name,
+ int offset, int size, int is_signed,
+ int filter_type);
+extern int trace_define_common_fields(struct ftrace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < 0)
@@ -162,11 +180,4 @@ do { \
__trace_printk(ip, fmt, ##args); \
} while (0)
-#define __common_field(type, item, is_signed) \
- ret = trace_define_field(event_call, #type, "common_" #item, \
- offsetof(typeof(field.ent), item), \
- sizeof(field.ent.item), is_signed); \
- if (ret) \
- return ret;
-
#endif /* _LINUX_FTRACE_EVENT_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 8246c697863d..6d527ee82b2b 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -64,6 +64,12 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
+#ifndef PREEMPT_ACTIVE
+#define PREEMPT_ACTIVE_BITS 1
+#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
+#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
+#endif
+
#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
#error PREEMPT_ACTIVE is too low!
#endif
@@ -132,7 +138,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
}
#endif
-#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU)
+#if defined(CONFIG_NO_HZ)
extern void rcu_irq_enter(void);
extern void rcu_irq_exit(void);
extern void rcu_nmi_enter(void);
@@ -142,7 +148,7 @@ extern void rcu_nmi_exit(void);
# define rcu_irq_exit() do { } while (0)
# define rcu_nmi_enter() do { } while (0)
# define rcu_nmi_exit() do { } while (0)
-#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */
+#endif /* #if defined(CONFIG_NO_HZ) */
/*
* It is safe to do non-atomic ops on ->hardirq_context,
diff --git a/include/linux/init.h b/include/linux/init.h
index 13b633ed695e..400adbb45414 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -103,8 +103,8 @@
#define __INIT .section ".init.text","ax"
#define __FINIT .previous
-#define __INITDATA .section ".init.data","aw"
-#define __INITRODATA .section ".init.rodata","a"
+#define __INITDATA .section ".init.data","aw",%progbits
+#define __INITRODATA .section ".init.rodata","a",%progbits
#define __FINITDATA .previous
#define __DEVINIT .section ".devinit.text", "ax"
@@ -305,9 +305,17 @@ void __init parse_early_options(char *cmdline);
#ifdef CONFIG_MODULES
#define __init_or_module
#define __initdata_or_module
+#define __initconst_or_module
+#define __INIT_OR_MODULE .text
+#define __INITDATA_OR_MODULE .data
+#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
#else
#define __init_or_module __init
#define __initdata_or_module __initdata
+#define __initconst_or_module __initconst
+#define __INIT_OR_MODULE __INIT
+#define __INITDATA_OR_MODULE __INITDATA
+#define __INITRODATA_OR_MODULE __INITRODATA
#endif /*CONFIG_MODULES*/
/* Functions marked as __devexit may be discarded at kernel link time, depending
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 7fc01b13be43..9e7f2e8fc66e 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -94,6 +94,16 @@ extern struct group_info init_groups;
# define CAP_INIT_BSET CAP_INIT_EFF_SET
#endif
+#ifdef CONFIG_TREE_PREEMPT_RCU
+#define INIT_TASK_RCU_PREEMPT(tsk) \
+ .rcu_read_lock_nesting = 0, \
+ .rcu_read_unlock_special = 0, \
+ .rcu_blocked_node = NULL, \
+ .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),
+#else
+#define INIT_TASK_RCU_PREEMPT(tsk)
+#endif
+
extern struct cred init_cred;
#ifdef CONFIG_PERF_COUNTERS
@@ -173,6 +183,7 @@ extern struct cred init_cred;
INIT_LOCKDEP \
INIT_FTRACE_GRAPH \
INIT_TRACE_RECURSION \
+ INIT_TASK_RCU_PREEMPT(tsk) \
}
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 35e7df1e9f30..1ac57e522a1f 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -50,6 +50,9 @@
* IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
* registered first in an shared interrupt is considered for
* performance reasons)
+ * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
+ * Used by threaded interrupts which need to keep the
+ * irq line disabled until the threaded handler has been run.
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -59,6 +62,7 @@
#define IRQF_PERCPU 0x00000400
#define IRQF_NOBALANCING 0x00000800
#define IRQF_IRQPOLL 0x00001000
+#define IRQF_ONESHOT 0x00002000
/*
* Bits used by threaded handlers:
diff --git a/include/linux/irq.h b/include/linux/irq.h
index cb2e77a3f7f7..ae9653dbcd78 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -69,6 +69,8 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
+#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */
+#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */
#ifdef CONFIG_IRQ_PER_CPU
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
@@ -100,6 +102,9 @@ struct msi_desc;
* @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
* @set_wake: enable/disable power-management wake-on of an IRQ
*
+ * @bus_lock: function to lock access to slow bus (i2c) chips
+ * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips
+ *
* @release: release function solely used by UML
* @typename: obsoleted by name, kept as migration helper
*/
@@ -123,6 +128,9 @@ struct irq_chip {
int (*set_type)(unsigned int irq, unsigned int flow_type);
int (*set_wake)(unsigned int irq, unsigned int on);
+ void (*bus_lock)(unsigned int irq);
+ void (*bus_sync_unlock)(unsigned int irq);
+
/* Currently used only by UML, might disappear one day.*/
#ifdef CONFIG_IRQ_RELEASE_METHOD
void (*release)(unsigned int irq, void *dev_id);
@@ -220,13 +228,6 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
/*
- * Migration helpers for obsolete names, they will go away:
- */
-#define hw_interrupt_type irq_chip
-#define no_irq_type no_irq_chip
-typedef struct irq_desc irq_desc_t;
-
-/*
* Pick up the arch-dependent methods:
*/
#include <asm/hw_irq.h>
@@ -289,6 +290,7 @@ extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_nested_irq(unsigned int irq);
/*
* Monolithic do_IRQ implementation.
@@ -379,6 +381,8 @@ set_irq_chained_handler(unsigned int irq,
__set_irq_handler(irq, handle, 1, NULL);
}
+extern void set_irq_nested_thread(unsigned int irq, int nest);
+
extern void set_irq_noprobe(unsigned int irq);
extern void set_irq_probe(unsigned int irq);
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index ec87b212ff7d..7bf89bc8cbca 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -41,6 +41,12 @@ extern struct irq_desc *irq_to_desc(unsigned int irq);
; \
else
+#ifdef CONFIG_SMP
+#define irq_node(irq) (irq_to_desc(irq)->node)
+#else
+#define irq_node(irq) 0
+#endif
+
#endif /* CONFIG_GENERIC_HARDIRQS */
#define for_each_irq_nr(irq) \
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d6320a3e8def..2b5b1e0899a8 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -125,7 +125,7 @@ extern int _cond_resched(void);
#endif
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
- void __might_sleep(char *file, int line);
+ void __might_sleep(char *file, int line, int preempt_offset);
/**
* might_sleep - annotation for functions that can sleep
*
@@ -137,8 +137,9 @@ extern int _cond_resched(void);
* supposed to.
*/
# define might_sleep() \
- do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
+ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
#else
+ static inline void __might_sleep(char *file, int line, int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
#endif
diff --git a/include/linux/key.h b/include/linux/key.h
index e544f466d69a..cd50dfa1d4c2 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -129,7 +129,10 @@ struct key {
struct rw_semaphore sem; /* change vs change sem */
struct key_user *user; /* owner of this key */
void *security; /* security data for this key */
- time_t expiry; /* time at which key expires (or 0) */
+ union {
+ time_t expiry; /* time at which key expires (or 0) */
+ time_t revoked_at; /* time at which key was revoked */
+ };
uid_t uid;
gid_t gid;
key_perm_t perm; /* access permissions */
@@ -275,6 +278,8 @@ static inline key_serial_t key_serial(struct key *key)
extern ctl_table key_sysctls[];
#endif
+extern void key_replace_session_keyring(void);
+
/*
* the userspace interface
*/
@@ -297,6 +302,7 @@ extern void key_init(void);
#define key_fsuid_changed(t) do { } while(0)
#define key_fsgid_changed(t) do { } while(0)
#define key_init() do { } while(0)
+#define key_replace_session_keyring() do { } while(0)
#endif /* CONFIG_KEYS */
#endif /* __KERNEL__ */
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
index c0688eb72093..bd383f1944fb 100644
--- a/include/linux/keyctl.h
+++ b/include/linux/keyctl.h
@@ -52,5 +52,6 @@
#define KEYCTL_SET_TIMEOUT 15 /* set key timeout */
#define KEYCTL_ASSUME_AUTHORITY 16 /* assume request_key() authorisation */
#define KEYCTL_GET_SECURITY 17 /* get key security label */
+#define KEYCTL_SESSION_TO_PARENT 18 /* apply session keyring to parent process */
#endif /* _LINUX_KEYCTL_H */
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
index 47b39b7c7e84..dc2fd545db00 100644
--- a/include/linux/kmemcheck.h
+++ b/include/linux/kmemcheck.h
@@ -34,6 +34,8 @@ void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
int kmemcheck_show_addr(unsigned long address);
int kmemcheck_hide_addr(unsigned long address);
+bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
+
#else
#define kmemcheck_enabled 0
@@ -99,6 +101,11 @@ static inline void kmemcheck_mark_initialized_pages(struct page *p,
{
}
+static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
+{
+ return true;
+}
+
#endif /* CONFIG_KMEMCHECK */
/*
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 6a63807f714e..3c7497d46ee9 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -23,18 +23,18 @@
#ifdef CONFIG_DEBUG_KMEMLEAK
-extern void kmemleak_init(void);
+extern void kmemleak_init(void) __ref;
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
- gfp_t gfp);
-extern void kmemleak_free(const void *ptr);
-extern void kmemleak_free_part(const void *ptr, size_t size);
+ gfp_t gfp) __ref;
+extern void kmemleak_free(const void *ptr) __ref;
+extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
extern void kmemleak_padding(const void *ptr, unsigned long offset,
- size_t size);
-extern void kmemleak_not_leak(const void *ptr);
-extern void kmemleak_ignore(const void *ptr);
+ size_t size) __ref;
+extern void kmemleak_not_leak(const void *ptr) __ref;
+extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, unsigned long offset,
- size_t length, gfp_t gfp);
-extern void kmemleak_no_scan(const void *ptr);
+ size_t length, gfp_t gfp) __ref;
+extern void kmemleak_no_scan(const void *ptr) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
int min_count, unsigned long flags,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index e5b6e33c6571..76319bf03e37 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -143,7 +143,6 @@ enum {
ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */
ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */
- ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */
ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
@@ -190,6 +189,7 @@ enum {
ATA_FLAG_NO_POWEROFF_SPINDOWN = (1 << 11), /* don't spindown before poweroff */
ATA_FLAG_NO_HIBERNATE_SPINDOWN = (1 << 12), /* don't spindown before hibernation */
ATA_FLAG_DEBUGMSG = (1 << 13),
+ ATA_FLAG_FPDMA_AA = (1 << 14), /* driver supports Auto-Activate */
ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */
ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */
ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */
@@ -386,6 +386,7 @@ enum {
ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
+ ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b25d1b53df0d..9ccf0e286b2a 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -149,6 +149,12 @@ struct lock_list {
struct lock_class *class;
struct stack_trace trace;
int distance;
+
+ /*
+ * The parent field is used to implement breadth-first search, and the
+ * bit 0 is reused to indicate if the lock has been accessed in BFS.
+ */
+ struct lock_list *parent;
};
/*
@@ -208,10 +214,12 @@ struct held_lock {
* interrupt context:
*/
unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
- unsigned int trylock:1;
+ unsigned int trylock:1; /* 16 bits */
+
unsigned int read:2; /* see lock_acquire() comment */
unsigned int check:2; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
+ unsigned int references:11; /* 32 bits */
};
/*
@@ -291,6 +299,10 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
extern void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip);
+#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
+
+extern int lock_is_held(struct lockdep_map *lock);
+
extern void lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip);
@@ -309,6 +321,8 @@ extern void lockdep_trace_alloc(gfp_t mask);
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
+#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
+
#else /* !LOCKDEP */
static inline void lockdep_off(void)
@@ -353,6 +367,8 @@ struct lock_class_key { };
#define lockdep_depth(tsk) (0)
+#define lockdep_assert_held(l) do { } while (0)
+
#endif /* !LOCKDEP */
#ifdef CONFIG_LOCK_STAT
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index e461b2c3d711..190c37854870 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -33,6 +33,7 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_IPC 4
#define LSM_AUDIT_DATA_TASK 5
#define LSM_AUDIT_DATA_KEY 6
+#define LSM_AUDIT_NO_AUDIT 7
struct task_struct *tsk;
union {
struct {
@@ -66,16 +67,19 @@ struct common_audit_data {
} key_struct;
#endif
} u;
- const char *function;
/* this union contains LSM specific data */
union {
+#ifdef CONFIG_SECURITY_SMACK
/* SMACK data */
struct smack_audit_data {
+ const char *function;
char *subject;
char *object;
char *request;
int result;
} smack_audit_data;
+#endif
+#ifdef CONFIG_SECURITY_SELINUX
/* SELinux data */
struct {
u32 ssid;
@@ -83,10 +87,12 @@ struct common_audit_data {
u16 tclass;
u32 requested;
u32 audited;
+ u32 denied;
struct av_decision *avd;
int result;
} selinux_audit_data;
- } lsm_priv;
+#endif
+ };
/* these callback will be implemented by a specific LSM */
void (*lsm_pre_audit)(struct audit_buffer *, void *);
void (*lsm_post_audit)(struct audit_buffer *, void *);
@@ -104,7 +110,7 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
/* Initialize an LSM audit data structure. */
#define COMMON_AUDIT_DATA_INIT(_d, _t) \
{ memset((_d), 0, sizeof(struct common_audit_data)); \
- (_d)->type = LSM_AUDIT_DATA_##_t; (_d)->function = __func__; }
+ (_d)->type = LSM_AUDIT_DATA_##_t; }
void common_lsm_audit(struct common_audit_data *a);
diff --git a/include/linux/module.h b/include/linux/module.h
index 098bdb7bfacf..f8f92d015efe 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -17,10 +17,12 @@
#include <linux/moduleparam.h>
#include <linux/marker.h>
#include <linux/tracepoint.h>
-#include <asm/local.h>
+#include <asm/local.h>
#include <asm/module.h>
+#include <trace/events/module.h>
+
/* Not Yet Implemented */
#define MODULE_SUPPORTED_DEVICE(name)
@@ -462,7 +464,10 @@ static inline local_t *__module_ref_addr(struct module *mod, int cpu)
static inline void __module_get(struct module *module)
{
if (module) {
- local_inc(__module_ref_addr(module, get_cpu()));
+ unsigned int cpu = get_cpu();
+ local_inc(__module_ref_addr(module, cpu));
+ trace_module_get(module, _THIS_IP_,
+ local_read(__module_ref_addr(module, cpu)));
put_cpu();
}
}
@@ -473,8 +478,11 @@ static inline int try_module_get(struct module *module)
if (module) {
unsigned int cpu = get_cpu();
- if (likely(module_is_live(module)))
+ if (likely(module_is_live(module))) {
local_inc(__module_ref_addr(module, cpu));
+ trace_module_get(module, _THIS_IP_,
+ local_read(__module_ref_addr(module, cpu)));
+ }
else
ret = 0;
put_cpu();
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index bd2eba530667..33b283601f62 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -472,6 +472,7 @@ enum lock_type4 {
#define NFSPROC4_NULL 0
#define NFSPROC4_COMPOUND 1
+#define NFS4_VERSION 4
#define NFS4_MINOR_VERSION 0
#if defined(CONFIG_NFS_V4_1)
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 19fe15d12042..320569eabe3b 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -167,6 +167,15 @@ struct nfs_server {
#define NFS_CAP_SYMLINKS (1U << 2)
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
+#define NFS_CAP_CHANGE_ATTR (1U << 5)
+#define NFS_CAP_FILEID (1U << 6)
+#define NFS_CAP_MODE (1U << 7)
+#define NFS_CAP_NLINK (1U << 8)
+#define NFS_CAP_OWNER (1U << 9)
+#define NFS_CAP_OWNER_GROUP (1U << 10)
+#define NFS_CAP_ATIME (1U << 11)
+#define NFS_CAP_CTIME (1U << 12)
+#define NFS_CAP_MTIME (1U << 13)
/* maximum number of slots to use */
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 29af2d5df097..b752e807adde 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -28,8 +28,23 @@ static inline void acpi_nmi_disable(void) { }
static inline void acpi_nmi_enable(void) { }
#endif
-#ifndef trigger_all_cpu_backtrace
-#define trigger_all_cpu_backtrace() do { } while (0)
+/*
+ * Create trigger_all_cpu_backtrace() out of the arch-provided
+ * base function. Return whether such support was available,
+ * to allow calling code to fall back to some other mechanism:
+ */
+#ifdef arch_trigger_all_cpu_backtrace
+static inline bool trigger_all_cpu_backtrace(void)
+{
+ arch_trigger_all_cpu_backtrace();
+
+ return true;
+}
+#else
+static inline bool trigger_all_cpu_backtrace(void)
+{
+ return false;
+}
#endif
#endif
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 1d9518bc4c58..5171639ecf0f 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -67,6 +67,9 @@ struct oprofile_operations {
/* Initiate a stack backtrace. Optional. */
void (*backtrace)(struct pt_regs * const regs, unsigned int depth);
+
+ /* Multiplex between different events. Optional. */
+ int (*switch_events)(void);
/* CPU identification string. */
char * cpu_type;
};
@@ -171,7 +174,6 @@ struct op_sample;
struct op_entry {
struct ring_buffer_event *event;
struct op_sample *sample;
- unsigned long irq_flags;
unsigned long size;
unsigned long *data;
};
@@ -180,6 +182,7 @@ void oprofile_write_reserve(struct op_entry *entry,
struct pt_regs * const regs,
unsigned long pc, int code, int size);
int oprofile_add_data(struct op_entry *entry, unsigned long val);
+int oprofile_add_data64(struct op_entry *entry, u64 val);
int oprofile_write_commit(struct op_entry *entry);
#endif /* OPROFILE_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index aec3252afcf5..ed5d7501e181 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -132,7 +132,7 @@ static inline int page_cache_get_speculative(struct page *page)
{
VM_BUG_ON(in_interrupt());
-#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
+#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT
VM_BUG_ON(!in_atomic());
# endif
@@ -170,7 +170,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
{
VM_BUG_ON(in_interrupt());
-#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
+#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT
VM_BUG_ON(!in_atomic());
# endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 73b46b6b904f..c8fdcadce437 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -376,6 +376,9 @@
#define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c
#define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390
#define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c
+/* AMD SB Chipset */
+#define PCI_DEVICE_ID_AMD_SB900_IDE 0x780c
+#define PCI_DEVICE_ID_AMD_SB900_SATA_IDE 0x7800
#define PCI_VENDOR_ID_VLSI 0x1004
#define PCI_DEVICE_ID_VLSI_82C592 0x0005
@@ -537,6 +540,7 @@
#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450
#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451
#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458
+#define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F
#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090
#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091
#define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 68438e18fff4..3058cf9dd3d4 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -66,6 +66,14 @@
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
____cacheline_aligned_in_smp
+#define DECLARE_PER_CPU_ALIGNED(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
+ ____cacheline_aligned
+
+#define DEFINE_PER_CPU_ALIGNED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
+ ____cacheline_aligned
+
/*
* Declaration/definition used for per-CPU variables that must be page aligned.
*/
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index b53f7006cc4e..972f90d7a32f 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -216,6 +216,7 @@ struct perf_counter_attr {
#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2)
#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
+#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5)
enum perf_counter_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -415,6 +416,9 @@ enum perf_callchain_context {
PERF_CONTEXT_MAX = (__u64)-4095,
};
+#define PERF_FLAG_FD_NO_GROUP (1U << 0)
+#define PERF_FLAG_FD_OUTPUT (1U << 1)
+
#ifdef __KERNEL__
/*
* Kernel-internal data types and definitions:
@@ -536,6 +540,7 @@ struct perf_counter {
struct list_head sibling_list;
int nr_siblings;
struct perf_counter *group_leader;
+ struct perf_counter *output;
const struct pmu *pmu;
enum perf_counter_active_state state;
@@ -761,6 +766,8 @@ extern int sysctl_perf_counter_mlock;
extern int sysctl_perf_counter_sample_rate;
extern void perf_counter_init(void);
+extern void perf_tpcounter_event(int event_id, u64 addr, u64 count,
+ void *record, int entry_size);
#ifndef perf_misc_flags
#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
deleted file mode 100644
index bfd92e1e5d2c..000000000000
--- a/include/linux/rcuclassic.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Read-Copy Update mechanism for mutual exclusion (classic version)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright IBM Corporation, 2001
- *
- * Author: Dipankar Sarma <dipankar@in.ibm.com>
- *
- * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
- * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
- * Papers:
- * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
- * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
- *
- * For detailed explanation of Read-Copy Update mechanism see -
- * Documentation/RCU
- *
- */
-
-#ifndef __LINUX_RCUCLASSIC_H
-#define __LINUX_RCUCLASSIC_H
-
-#include <linux/cache.h>
-#include <linux/spinlock.h>
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/seqlock.h>
-
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
-#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */
-#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-
-/* Global control variables for rcupdate callback mechanism. */
-struct rcu_ctrlblk {
- long cur; /* Current batch number. */
- long completed; /* Number of the last completed batch */
- long pending; /* Number of the last pending batch */
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
- unsigned long gp_start; /* Time at which GP started in jiffies. */
- unsigned long jiffies_stall;
- /* Time at which to check for CPU stalls. */
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-
- int signaled;
-
- spinlock_t lock ____cacheline_internodealigned_in_smp;
- DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */
- /* current batch to proceed. */
-} ____cacheline_internodealigned_in_smp;
-
-/* Is batch a before batch b ? */
-static inline int rcu_batch_before(long a, long b)
-{
- return (a - b) < 0;
-}
-
-/* Is batch a after batch b ? */
-static inline int rcu_batch_after(long a, long b)
-{
- return (a - b) > 0;
-}
-
-/* Per-CPU data for Read-Copy UPdate. */
-struct rcu_data {
- /* 1) quiescent state handling : */
- long quiescbatch; /* Batch # for grace period */
- int passed_quiesc; /* User-mode/idle loop etc. */
- int qs_pending; /* core waits for quiesc state */
-
- /* 2) batch handling */
- /*
- * if nxtlist is not NULL, then:
- * batch:
- * The batch # for the last entry of nxtlist
- * [*nxttail[1], NULL = *nxttail[2]):
- * Entries that batch # <= batch
- * [*nxttail[0], *nxttail[1]):
- * Entries that batch # <= batch - 1
- * [nxtlist, *nxttail[0]):
- * Entries that batch # <= batch - 2
- * The grace period for these entries has completed, and
- * the other grace-period-completed entries may be moved
- * here temporarily in rcu_process_callbacks().
- */
- long batch;
- struct rcu_head *nxtlist;
- struct rcu_head **nxttail[3];
- long qlen; /* # of queued callbacks */
- struct rcu_head *donelist;
- struct rcu_head **donetail;
- long blimit; /* Upper limit on a processed batch */
- int cpu;
- struct rcu_head barrier;
-};
-
-/*
- * Increment the quiescent state counter.
- * The counter is a bit degenerated: We do not need to know
- * how many quiescent states passed, just if there was at least
- * one since the start of the grace period. Thus just a flag.
- */
-extern void rcu_qsctr_inc(int cpu);
-extern void rcu_bh_qsctr_inc(int cpu);
-
-extern int rcu_pending(int cpu);
-extern int rcu_needs_cpu(int cpu);
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern struct lockdep_map rcu_lock_map;
-# define rcu_read_acquire() \
- lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
-#else
-# define rcu_read_acquire() do { } while (0)
-# define rcu_read_release() do { } while (0)
-#endif
-
-#define __rcu_read_lock() \
- do { \
- preempt_disable(); \
- __acquire(RCU); \
- rcu_read_acquire(); \
- } while (0)
-#define __rcu_read_unlock() \
- do { \
- rcu_read_release(); \
- __release(RCU); \
- preempt_enable(); \
- } while (0)
-#define __rcu_read_lock_bh() \
- do { \
- local_bh_disable(); \
- __acquire(RCU_BH); \
- rcu_read_acquire(); \
- } while (0)
-#define __rcu_read_unlock_bh() \
- do { \
- rcu_read_release(); \
- __release(RCU_BH); \
- local_bh_enable(); \
- } while (0)
-
-#define __synchronize_sched() synchronize_rcu()
-
-#define call_rcu_sched(head, func) call_rcu(head, func)
-
-extern void __rcu_init(void);
-#define rcu_init_sched() do { } while (0)
-extern void rcu_check_callbacks(int cpu, int user);
-extern void rcu_restart_cpu(int cpu);
-
-extern long rcu_batches_completed(void);
-extern long rcu_batches_completed_bh(void);
-
-#define rcu_enter_nohz() do { } while (0)
-#define rcu_exit_nohz() do { } while (0)
-
-/* A context switch is a grace period for rcuclassic. */
-static inline int rcu_blocking_is_gp(void)
-{
- return num_online_cpus() == 1;
-}
-
-#endif /* __LINUX_RCUCLASSIC_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 15fbb3ca634d..95e0615f4d75 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -51,18 +51,26 @@ struct rcu_head {
void (*func)(struct rcu_head *head);
};
-/* Internal to kernel, but needed by rcupreempt.h. */
+/* Exported common interfaces */
+extern void synchronize_rcu(void);
+extern void synchronize_rcu_bh(void);
+extern void rcu_barrier(void);
+extern void rcu_barrier_bh(void);
+extern void rcu_barrier_sched(void);
+extern void synchronize_sched_expedited(void);
+extern int sched_expedited_torture_stats(char *page);
+
+/* Internal to kernel */
+extern void rcu_init(void);
+extern void rcu_scheduler_starting(void);
+extern int rcu_needs_cpu(int cpu);
extern int rcu_scheduler_active;
-#if defined(CONFIG_CLASSIC_RCU)
-#include <linux/rcuclassic.h>
-#elif defined(CONFIG_TREE_RCU)
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
#include <linux/rcutree.h>
-#elif defined(CONFIG_PREEMPT_RCU)
-#include <linux/rcupreempt.h>
#else
#error "Unknown RCU implementation specified to kernel configuration"
-#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */
+#endif
#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
@@ -70,6 +78,16 @@ extern int rcu_scheduler_active;
(ptr)->next = NULL; (ptr)->func = NULL; \
} while (0)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern struct lockdep_map rcu_lock_map;
+# define rcu_read_acquire() \
+ lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
+# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
+#else
+# define rcu_read_acquire() do { } while (0)
+# define rcu_read_release() do { } while (0)
+#endif
+
/**
* rcu_read_lock - mark the beginning of an RCU read-side critical section.
*
@@ -99,7 +117,12 @@ extern int rcu_scheduler_active;
*
* It is illegal to block while in an RCU read-side critical section.
*/
-#define rcu_read_lock() __rcu_read_lock()
+static inline void rcu_read_lock(void)
+{
+ __rcu_read_lock();
+ __acquire(RCU);
+ rcu_read_acquire();
+}
/**
* rcu_read_unlock - marks the end of an RCU read-side critical section.
@@ -116,7 +139,12 @@ extern int rcu_scheduler_active;
* used as well. RCU does not care how the writers keep out of each
* others' way, as long as they do so.
*/
-#define rcu_read_unlock() __rcu_read_unlock()
+static inline void rcu_read_unlock(void)
+{
+ rcu_read_release();
+ __release(RCU);
+ __rcu_read_unlock();
+}
/**
* rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
@@ -129,14 +157,24 @@ extern int rcu_scheduler_active;
* can use just rcu_read_lock().
*
*/
-#define rcu_read_lock_bh() __rcu_read_lock_bh()
+static inline void rcu_read_lock_bh(void)
+{
+ __rcu_read_lock_bh();
+ __acquire(RCU_BH);
+ rcu_read_acquire();
+}
/*
* rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
*
* See rcu_read_lock_bh() for more information.
*/
-#define rcu_read_unlock_bh() __rcu_read_unlock_bh()
+static inline void rcu_read_unlock_bh(void)
+{
+ rcu_read_release();
+ __release(RCU_BH);
+ __rcu_read_unlock_bh();
+}
/**
* rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
@@ -147,17 +185,34 @@ extern int rcu_scheduler_active;
* - call_rcu_sched() and rcu_barrier_sched()
* on the write-side to insure proper synchronization.
*/
-#define rcu_read_lock_sched() preempt_disable()
-#define rcu_read_lock_sched_notrace() preempt_disable_notrace()
+static inline void rcu_read_lock_sched(void)
+{
+ preempt_disable();
+ __acquire(RCU_SCHED);
+ rcu_read_acquire();
+}
+static inline notrace void rcu_read_lock_sched_notrace(void)
+{
+ preempt_disable_notrace();
+ __acquire(RCU_SCHED);
+}
/*
* rcu_read_unlock_sched - marks the end of a RCU-classic critical section
*
* See rcu_read_lock_sched for more information.
*/
-#define rcu_read_unlock_sched() preempt_enable()
-#define rcu_read_unlock_sched_notrace() preempt_enable_notrace()
-
+static inline void rcu_read_unlock_sched(void)
+{
+ rcu_read_release();
+ __release(RCU_SCHED);
+ preempt_enable();
+}
+static inline notrace void rcu_read_unlock_sched_notrace(void)
+{
+ __release(RCU_SCHED);
+ preempt_enable_notrace();
+}
/**
@@ -259,15 +314,4 @@ extern void call_rcu(struct rcu_head *head,
extern void call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head));
-/* Exported common interfaces */
-extern void synchronize_rcu(void);
-extern void rcu_barrier(void);
-extern void rcu_barrier_bh(void);
-extern void rcu_barrier_sched(void);
-
-/* Internal to kernel */
-extern void rcu_init(void);
-extern void rcu_scheduler_starting(void);
-extern int rcu_needs_cpu(int cpu);
-
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
deleted file mode 100644
index fce522782ffa..000000000000
--- a/include/linux/rcupreempt.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Read-Copy Update mechanism for mutual exclusion (RT implementation)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2006
- *
- * Author: Paul McKenney <paulmck@us.ibm.com>
- *
- * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
- * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
- * Papers:
- * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
- * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
- *
- * For detailed explanation of Read-Copy Update mechanism see -
- * Documentation/RCU
- *
- */
-
-#ifndef __LINUX_RCUPREEMPT_H
-#define __LINUX_RCUPREEMPT_H
-
-#include <linux/cache.h>
-#include <linux/spinlock.h>
-#include <linux/threads.h>
-#include <linux/smp.h>
-#include <linux/cpumask.h>
-#include <linux/seqlock.h>
-
-extern void rcu_qsctr_inc(int cpu);
-static inline void rcu_bh_qsctr_inc(int cpu) { }
-
-/*
- * Someone might want to pass call_rcu_bh as a function pointer.
- * So this needs to just be a rename and not a macro function.
- * (no parentheses)
- */
-#define call_rcu_bh call_rcu
-
-/**
- * call_rcu_sched - Queue RCU callback for invocation after sched grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
- *
- * The update function will be invoked some time after a full
- * synchronize_sched()-style grace period elapses, in other words after
- * all currently executing preempt-disabled sections of code (including
- * hardirq handlers, NMI handlers, and local_irq_save() blocks) have
- * completed.
- */
-extern void call_rcu_sched(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
-
-extern void __rcu_read_lock(void) __acquires(RCU);
-extern void __rcu_read_unlock(void) __releases(RCU);
-extern int rcu_pending(int cpu);
-extern int rcu_needs_cpu(int cpu);
-
-#define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); }
-#define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); }
-
-extern void __synchronize_sched(void);
-
-extern void __rcu_init(void);
-extern void rcu_init_sched(void);
-extern void rcu_check_callbacks(int cpu, int user);
-extern void rcu_restart_cpu(int cpu);
-extern long rcu_batches_completed(void);
-
-/*
- * Return the number of RCU batches processed thus far. Useful for debug
- * and statistic. The _bh variant is identifcal to straight RCU
- */
-static inline long rcu_batches_completed_bh(void)
-{
- return rcu_batches_completed();
-}
-
-#ifdef CONFIG_RCU_TRACE
-struct rcupreempt_trace;
-extern long *rcupreempt_flipctr(int cpu);
-extern long rcupreempt_data_completed(void);
-extern int rcupreempt_flip_flag(int cpu);
-extern int rcupreempt_mb_flag(int cpu);
-extern char *rcupreempt_try_flip_state_name(void);
-extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
-#endif
-
-struct softirq_action;
-
-#ifdef CONFIG_NO_HZ
-extern void rcu_enter_nohz(void);
-extern void rcu_exit_nohz(void);
-#else
-# define rcu_enter_nohz() do { } while (0)
-# define rcu_exit_nohz() do { } while (0)
-#endif
-
-/*
- * A context switch is a grace period for rcupreempt synchronize_rcu()
- * only during early boot, before the scheduler has been initialized.
- * So, how the heck do we get a context switch? Well, if the caller
- * invokes synchronize_rcu(), they are willing to accept a context
- * switch, so we simply pretend that one happened.
- *
- * After boot, there might be a blocked or preempted task in an RCU
- * read-side critical section, so we cannot then take the fastpath.
- */
-static inline int rcu_blocking_is_gp(void)
-{
- return num_online_cpus() == 1 && !rcu_scheduler_active;
-}
-
-#endif /* __LINUX_RCUPREEMPT_H */
diff --git a/include/linux/rcupreempt_trace.h b/include/linux/rcupreempt_trace.h
deleted file mode 100644
index b99ae073192a..000000000000
--- a/include/linux/rcupreempt_trace.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Read-Copy Update mechanism for mutual exclusion (RT implementation)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2006
- *
- * Author: Paul McKenney <paulmck@us.ibm.com>
- *
- * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
- * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
- * Papers:
- * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
- * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
- *
- * For detailed explanation of the Preemptible Read-Copy Update mechanism see -
- * http://lwn.net/Articles/253651/
- */
-
-#ifndef __LINUX_RCUPREEMPT_TRACE_H
-#define __LINUX_RCUPREEMPT_TRACE_H
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <asm/atomic.h>
-
-/*
- * PREEMPT_RCU data structures.
- */
-
-struct rcupreempt_trace {
- long next_length;
- long next_add;
- long wait_length;
- long wait_add;
- long done_length;
- long done_add;
- long done_remove;
- atomic_t done_invoked;
- long rcu_check_callbacks;
- atomic_t rcu_try_flip_1;
- atomic_t rcu_try_flip_e1;
- long rcu_try_flip_i1;
- long rcu_try_flip_ie1;
- long rcu_try_flip_g1;
- long rcu_try_flip_a1;
- long rcu_try_flip_ae1;
- long rcu_try_flip_a2;
- long rcu_try_flip_z1;
- long rcu_try_flip_ze1;
- long rcu_try_flip_z2;
- long rcu_try_flip_m1;
- long rcu_try_flip_me1;
- long rcu_try_flip_m2;
-};
-
-#ifdef CONFIG_RCU_TRACE
-#define RCU_TRACE(fn, arg) fn(arg);
-#else
-#define RCU_TRACE(fn, arg)
-#endif
-
-extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace);
-
-#endif /* __LINUX_RCUPREEMPT_TRACE_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 5a5153806c42..a89307717825 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,264 +30,57 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
-#include <linux/cache.h>
-#include <linux/spinlock.h>
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/seqlock.h>
+extern void rcu_sched_qs(int cpu);
+extern void rcu_bh_qs(int cpu);
-/*
- * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
- * In theory, it should be possible to add more levels straightforwardly.
- * In practice, this has not been tested, so there is probably some
- * bug somewhere.
- */
-#define MAX_RCU_LVLS 3
-#define RCU_FANOUT (CONFIG_RCU_FANOUT)
-#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
-#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
-
-#if NR_CPUS <= RCU_FANOUT
-# define NUM_RCU_LVLS 1
-# define NUM_RCU_LVL_0 1
-# define NUM_RCU_LVL_1 (NR_CPUS)
-# define NUM_RCU_LVL_2 0
-# define NUM_RCU_LVL_3 0
-#elif NR_CPUS <= RCU_FANOUT_SQ
-# define NUM_RCU_LVLS 2
-# define NUM_RCU_LVL_0 1
-# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
-# define NUM_RCU_LVL_2 (NR_CPUS)
-# define NUM_RCU_LVL_3 0
-#elif NR_CPUS <= RCU_FANOUT_CUBE
-# define NUM_RCU_LVLS 3
-# define NUM_RCU_LVL_0 1
-# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
-# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
-# define NUM_RCU_LVL_3 NR_CPUS
-#else
-# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
-#endif /* #if (NR_CPUS) <= RCU_FANOUT */
-
-#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
-#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
-
-/*
- * Dynticks per-CPU state.
- */
-struct rcu_dynticks {
- int dynticks_nesting; /* Track nesting level, sort of. */
- int dynticks; /* Even value for dynticks-idle, else odd. */
- int dynticks_nmi; /* Even value for either dynticks-idle or */
- /* not in nmi handler, else odd. So this */
- /* remains even for nmi from irq handler. */
-};
-
-/*
- * Definition for node within the RCU grace-period-detection hierarchy.
- */
-struct rcu_node {
- spinlock_t lock;
- unsigned long qsmask; /* CPUs or groups that need to switch in */
- /* order for current grace period to proceed.*/
- unsigned long qsmaskinit;
- /* Per-GP initialization for qsmask. */
- unsigned long grpmask; /* Mask to apply to parent qsmask. */
- int grplo; /* lowest-numbered CPU or group here. */
- int grphi; /* highest-numbered CPU or group here. */
- u8 grpnum; /* CPU/group number for next level up. */
- u8 level; /* root is at level 0. */
- struct rcu_node *parent;
-} ____cacheline_internodealigned_in_smp;
-
-/* Index values for nxttail array in struct rcu_data. */
-#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
-#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
-#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
-#define RCU_NEXT_TAIL 3
-#define RCU_NEXT_SIZE 4
-
-/* Per-CPU data for read-copy update. */
-struct rcu_data {
- /* 1) quiescent-state and grace-period handling : */
- long completed; /* Track rsp->completed gp number */
- /* in order to detect GP end. */
- long gpnum; /* Highest gp number that this CPU */
- /* is aware of having started. */
- long passed_quiesc_completed;
- /* Value of completed at time of qs. */
- bool passed_quiesc; /* User-mode/idle loop etc. */
- bool qs_pending; /* Core waits for quiesc state. */
- bool beenonline; /* CPU online at least once. */
- struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
- unsigned long grpmask; /* Mask to apply to leaf qsmask. */
-
- /* 2) batch handling */
- /*
- * If nxtlist is not NULL, it is partitioned as follows.
- * Any of the partitions might be empty, in which case the
- * pointer to that partition will be equal to the pointer for
- * the following partition. When the list is empty, all of
- * the nxttail elements point to nxtlist, which is NULL.
- *
- * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
- * Entries that might have arrived after current GP ended
- * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
- * Entries known to have arrived before current GP ended
- * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
- * Entries that batch # <= ->completed - 1: waiting for current GP
- * [nxtlist, *nxttail[RCU_DONE_TAIL]):
- * Entries that batch # <= ->completed
- * The grace period for these entries has completed, and
- * the other grace-period-completed entries may be moved
- * here temporarily in rcu_process_callbacks().
- */
- struct rcu_head *nxtlist;
- struct rcu_head **nxttail[RCU_NEXT_SIZE];
- long qlen; /* # of queued callbacks */
- long blimit; /* Upper limit on a processed batch */
-
-#ifdef CONFIG_NO_HZ
- /* 3) dynticks interface. */
- struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
- int dynticks_snap; /* Per-GP tracking for dynticks. */
- int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
-#endif /* #ifdef CONFIG_NO_HZ */
-
- /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
-#ifdef CONFIG_NO_HZ
- unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
-#endif /* #ifdef CONFIG_NO_HZ */
- unsigned long offline_fqs; /* Kicked due to being offline. */
- unsigned long resched_ipi; /* Sent a resched IPI. */
-
- /* 5) __rcu_pending() statistics. */
- long n_rcu_pending; /* rcu_pending() calls since boot. */
- long n_rp_qs_pending;
- long n_rp_cb_ready;
- long n_rp_cpu_needs_gp;
- long n_rp_gp_completed;
- long n_rp_gp_started;
- long n_rp_need_fqs;
- long n_rp_need_nothing;
-
- int cpu;
-};
-
-/* Values for signaled field in struct rcu_state. */
-#define RCU_GP_INIT 0 /* Grace period being initialized. */
-#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
-#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
-#ifdef CONFIG_NO_HZ
-#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
-#else /* #ifdef CONFIG_NO_HZ */
-#define RCU_SIGNAL_INIT RCU_FORCE_QS
-#endif /* #else #ifdef CONFIG_NO_HZ */
-
-#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
-#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */
-#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */
-#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
- /* to take at least one */
- /* scheduling clock irq */
- /* before ratting on them. */
-
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-
-/*
- * RCU global state, including node hierarchy. This hierarchy is
- * represented in "heap" form in a dense array. The root (first level)
- * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
- * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
- * and the third level in ->node[m+1] and following (->node[m+1] referenced
- * by ->level[2]). The number of levels is determined by the number of
- * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
- * consisting of a single rcu_node.
- */
-struct rcu_state {
- struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
- struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
- u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
- u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
- struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
-
- /* The following fields are guarded by the root rcu_node's lock. */
-
- u8 signaled ____cacheline_internodealigned_in_smp;
- /* Force QS state. */
- long gpnum; /* Current gp number. */
- long completed; /* # of last completed gp. */
- spinlock_t onofflock; /* exclude on/offline and */
- /* starting new GP. */
- spinlock_t fqslock; /* Only one task forcing */
- /* quiescent states. */
- unsigned long jiffies_force_qs; /* Time at which to invoke */
- /* force_quiescent_state(). */
- unsigned long n_force_qs; /* Number of calls to */
- /* force_quiescent_state(). */
- unsigned long n_force_qs_lh; /* ~Number of calls leaving */
- /* due to lock unavailable. */
- unsigned long n_force_qs_ngp; /* Number of calls leaving */
- /* due to no GP active. */
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
- unsigned long gp_start; /* Time at which GP started, */
- /* but in jiffies. */
- unsigned long jiffies_stall; /* Time at which to check */
- /* for CPU stalls. */
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-#ifdef CONFIG_NO_HZ
- long dynticks_completed; /* Value of completed @ snap. */
-#endif /* #ifdef CONFIG_NO_HZ */
-};
+extern int rcu_needs_cpu(int cpu);
-extern void rcu_qsctr_inc(int cpu);
-extern void rcu_bh_qsctr_inc(int cpu);
+#ifdef CONFIG_TREE_PREEMPT_RCU
-extern int rcu_pending(int cpu);
-extern int rcu_needs_cpu(int cpu);
+extern void __rcu_read_lock(void);
+extern void __rcu_read_unlock(void);
+extern void exit_rcu(void);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern struct lockdep_map rcu_lock_map;
-# define rcu_read_acquire() \
- lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
-#else
-# define rcu_read_acquire() do { } while (0)
-# define rcu_read_release() do { } while (0)
-#endif
+#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
static inline void __rcu_read_lock(void)
{
preempt_disable();
- __acquire(RCU);
- rcu_read_acquire();
}
+
static inline void __rcu_read_unlock(void)
{
- rcu_read_release();
- __release(RCU);
preempt_enable();
}
+
+static inline void exit_rcu(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
+
static inline void __rcu_read_lock_bh(void)
{
local_bh_disable();
- __acquire(RCU_BH);
- rcu_read_acquire();
}
static inline void __rcu_read_unlock_bh(void)
{
- rcu_read_release();
- __release(RCU_BH);
local_bh_enable();
}
#define __synchronize_sched() synchronize_rcu()
-#define call_rcu_sched(head, func) call_rcu(head, func)
+extern void call_rcu_sched(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu));
-static inline void rcu_init_sched(void)
+static inline void synchronize_rcu_expedited(void)
{
+ synchronize_sched_expedited();
+}
+
+static inline void synchronize_rcu_bh_expedited(void)
+{
+ synchronize_sched_expedited();
}
extern void __rcu_init(void);
@@ -296,6 +89,11 @@ extern void rcu_restart_cpu(int cpu);
extern long rcu_batches_completed(void);
extern long rcu_batches_completed_bh(void);
+extern long rcu_batches_completed_sched(void);
+
+static inline void rcu_init_sched(void)
+{
+}
#ifdef CONFIG_NO_HZ
void rcu_enter_nohz(void);
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 29f8599e6bea..5fcc31ed5771 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -75,20 +75,6 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event)
}
/*
- * ring_buffer_event_discard can discard any event in the ring buffer.
- * it is up to the caller to protect against a reader from
- * consuming it or a writer from wrapping and replacing it.
- *
- * No external protection is needed if this is called before
- * the event is commited. But in that case it would be better to
- * use ring_buffer_discard_commit.
- *
- * Note, if an event that has not been committed is discarded
- * with ring_buffer_event_discard, it must still be committed.
- */
-void ring_buffer_event_discard(struct ring_buffer_event *event);
-
-/*
* ring_buffer_discard_commit will remove an event that has not
* ben committed yet. If this is used, then ring_buffer_unlock_commit
* must not be called on the discarded event. This function
@@ -154,8 +140,17 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer);
void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_reset(struct ring_buffer *buffer);
+#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
struct ring_buffer *buffer_b, int cpu);
+#else
+static inline int
+ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
+ struct ring_buffer *buffer_b, int cpu)
+{
+ return -ENODEV;
+}
+#endif
int ring_buffer_empty(struct ring_buffer *buffer);
int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
@@ -170,7 +165,6 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu);
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0f1ea4a66957..f3d74bd04d18 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -38,6 +38,8 @@
#define SCHED_BATCH 3
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE 5
+/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
+#define SCHED_RESET_ON_FORK 0x40000000
#ifdef __KERNEL__
@@ -796,18 +798,19 @@ enum cpu_idle_type {
#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE
#ifdef CONFIG_SMP
-#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
-#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */
-#define SD_BALANCE_EXEC 4 /* Balance on exec */
-#define SD_BALANCE_FORK 8 /* Balance on fork, clone */
-#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */
-#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */
-#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */
-#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
-#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
-#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
-#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
-#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
+#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
+#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
+#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
+#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
+#define SD_WAKE_IDLE 0x0010 /* Wake to idle CPU on task wakeup */
+#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
+#define SD_WAKE_BALANCE 0x0040 /* Perform balancing at task wakeup */
+#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
+#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
+#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
+#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
+#define SD_WAKE_IDLE_FAR 0x0800 /* Gain latency sacrificing cache hit */
+#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
enum powersavings_balance_level {
POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
@@ -827,7 +830,7 @@ static inline int sd_balance_for_mc_power(void)
if (sched_smt_power_savings)
return SD_POWERSAVINGS_BALANCE;
- return 0;
+ return SD_PREFER_SIBLING;
}
static inline int sd_balance_for_package_power(void)
@@ -835,7 +838,7 @@ static inline int sd_balance_for_package_power(void)
if (sched_mc_power_savings | sched_smt_power_savings)
return SD_POWERSAVINGS_BALANCE;
- return 0;
+ return SD_PREFER_SIBLING;
}
/*
@@ -857,15 +860,9 @@ struct sched_group {
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
- * single CPU. This is read only (except for setup, hotplug CPU).
- * Note : Never change cpu_power without recompute its reciprocal
+ * single CPU.
*/
- unsigned int __cpu_power;
- /*
- * reciprocal value of cpu_power to avoid expensive divides
- * (see include/linux/reciprocal_div.h)
- */
- u32 reciprocal_cpu_power;
+ unsigned int cpu_power;
/*
* The CPUs this group covers.
@@ -918,6 +915,7 @@ struct sched_domain {
unsigned int newidle_idx;
unsigned int wake_idx;
unsigned int forkexec_idx;
+ unsigned int smt_gain;
int flags; /* See SD_* */
enum sched_domain_level level;
@@ -1045,7 +1043,6 @@ struct sched_class {
struct rq *busiest, struct sched_domain *sd,
enum cpu_idle_type idle);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
- int (*needs_post_schedule) (struct rq *this_rq);
void (*post_schedule) (struct rq *this_rq);
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
@@ -1110,6 +1107,8 @@ struct sched_entity {
u64 wait_max;
u64 wait_count;
u64 wait_sum;
+ u64 iowait_count;
+ u64 iowait_sum;
u64 sleep_start;
u64 sleep_max;
@@ -1163,6 +1162,8 @@ struct sched_rt_entity {
#endif
};
+struct rcu_node;
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
@@ -1206,10 +1207,12 @@ struct task_struct {
unsigned int policy;
cpumask_t cpus_allowed;
-#ifdef CONFIG_PREEMPT_RCU
+#ifdef CONFIG_TREE_PREEMPT_RCU
int rcu_read_lock_nesting;
- int rcu_flipctr_idx;
-#endif /* #ifdef CONFIG_PREEMPT_RCU */
+ char rcu_read_unlock_special;
+ struct rcu_node *rcu_blocked_node;
+ struct list_head rcu_node_entry;
+#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info sched_info;
@@ -1230,11 +1233,19 @@ struct task_struct {
unsigned did_exec:1;
unsigned in_execve:1; /* Tell the LSMs that the process is doing an
* execve */
+ unsigned in_iowait:1;
+
+
+ /* Revert to default priority/policy when forking */
+ unsigned sched_reset_on_fork:1;
+
pid_t pid;
pid_t tgid;
+#ifdef CONFIG_CC_STACKPROTECTOR
/* Canary value for the -fstack-protector gcc feature */
unsigned long stack_canary;
+#endif
/*
* pointers to (original) parent process, youngest child, younger sibling,
@@ -1292,6 +1303,7 @@ struct task_struct {
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
* (notably. ptrace) */
+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
@@ -1724,6 +1736,28 @@ extern cputime_t task_gtime(struct task_struct *p);
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
+#ifdef CONFIG_TREE_PREEMPT_RCU
+
+#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
+#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
+#define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */
+
+static inline void rcu_copy_process(struct task_struct *p)
+{
+ p->rcu_read_lock_nesting = 0;
+ p->rcu_read_unlock_special = 0;
+ p->rcu_blocked_node = NULL;
+ INIT_LIST_HEAD(&p->rcu_node_entry);
+}
+
+#else
+
+static inline void rcu_copy_process(struct task_struct *p)
+{
+}
+
+#endif
+
#ifdef CONFIG_SMP
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
@@ -1813,11 +1847,12 @@ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_shares_ratelimit;
extern unsigned int sysctl_sched_shares_thresh;
-#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_child_runs_first;
+#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
+extern unsigned int sysctl_sched_time_avg;
extern unsigned int sysctl_timer_migration;
int sched_nr_latency_handler(struct ctl_table *table, int write,
@@ -2077,7 +2112,7 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
#define for_each_process(p) \
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
-extern bool is_single_threaded(struct task_struct *);
+extern bool current_is_single_threaded(void);
/*
* Careful: do_each_thread/while_each_thread is a double loop so
@@ -2281,23 +2316,31 @@ static inline int need_resched(void)
* cond_resched_softirq() will enable bhs before scheduling.
*/
extern int _cond_resched(void);
-#ifdef CONFIG_PREEMPT_BKL
-static inline int cond_resched(void)
-{
- return 0;
-}
+
+#define cond_resched() ({ \
+ __might_sleep(__FILE__, __LINE__, 0); \
+ _cond_resched(); \
+})
+
+extern int __cond_resched_lock(spinlock_t *lock);
+
+#ifdef CONFIG_PREEMPT
+#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
#else
-static inline int cond_resched(void)
-{
- return _cond_resched();
-}
+#define PREEMPT_LOCK_OFFSET 0
#endif
-extern int cond_resched_lock(spinlock_t * lock);
-extern int cond_resched_softirq(void);
-static inline int cond_resched_bkl(void)
-{
- return _cond_resched();
-}
+
+#define cond_resched_lock(lock) ({ \
+ __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+ __cond_resched_lock(lock); \
+})
+
+extern int __cond_resched_softirq(void);
+
+#define cond_resched_softirq() ({ \
+ __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
+ __cond_resched_softirq(); \
+})
/*
* Does a critical section need to be broken due to another
diff --git a/include/linux/security.h b/include/linux/security.h
index 1f16eea2017b..d050b66ab9ef 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -53,7 +53,7 @@ struct audit_krule;
extern int cap_capable(struct task_struct *tsk, const struct cred *cred,
int cap, int audit);
extern int cap_settime(struct timespec *ts, struct timezone *tz);
-extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode);
+extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern int cap_capset(struct cred *new, const struct cred *old,
@@ -653,6 +653,11 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* manual page for definitions of the @clone_flags.
* @clone_flags contains the flags indicating what should be shared.
* Return 0 if permission is granted.
+ * @cred_alloc_blank:
+ * @cred points to the credentials.
+ * @gfp indicates the atomicity of any memory allocations.
+ * Only allocate sufficient memory and attach to @cred such that
+ * cred_transfer() will not get ENOMEM.
* @cred_free:
* @cred points to the credentials.
* Deallocate and clear the cred->security field in a set of credentials.
@@ -665,6 +670,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @new points to the new credentials.
* @old points to the original credentials.
* Install a new set of credentials.
+ * @cred_transfer:
+ * @new points to the new credentials.
+ * @old points to the original credentials.
+ * Transfer data from original creds to new creds
* @kernel_act_as:
* Set the credentials for a kernel service to act as (subjective context).
* @new points to the credentials to be modified.
@@ -678,6 +687,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @inode points to the inode to use as a reference.
* The current task must be the one that nominated @inode.
* Return 0 if successful.
+ * @kernel_module_request:
+ * Ability to trigger the kernel to automatically upcall to userspace for
+ * userspace to load a kernel module with the given name.
+ * Return 0 if successful.
* @task_setuid:
* Check permission before setting one or more of the user identity
* attributes of the current process. The @flags parameter indicates
@@ -994,6 +1007,17 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Sets the connection's peersid to the secmark on skb.
* @req_classify_flow:
* Sets the flow's sid to the openreq sid.
+ * @tun_dev_create:
+ * Check permissions prior to creating a new TUN device.
+ * @tun_dev_post_create:
+ * This hook allows a module to update or allocate a per-socket security
+ * structure.
+ * @sk contains the newly created sock structure.
+ * @tun_dev_attach:
+ * Check permissions prior to attaching to a persistent TUN device. This
+ * hook can also be used by the module to update any security state
+ * associated with the TUN device's sock structure.
+ * @sk contains the existing sock structure.
*
* Security hooks for XFRM operations.
*
@@ -1088,6 +1112,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Return the length of the string (including terminating NUL) or -ve if
* an error.
* May also return 0 (and a NULL buffer pointer) if there is no label.
+ * @key_session_to_parent:
+ * Forcibly assign the session keyring from a process to its parent
+ * process.
+ * @cred: Pointer to process's credentials
+ * @parent_cred: Pointer to parent process's credentials
+ * @keyring: Proposed new session keyring
+ * Return 0 if permission is granted, -ve error otherwise.
*
* Security hooks affecting all System V IPC operations.
*
@@ -1229,7 +1260,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @alter contains the flag indicating whether changes are to be made.
* Return 0 if permission is granted.
*
- * @ptrace_may_access:
+ * @ptrace_access_check:
* Check permission before allowing the current process to trace the
* @child process.
* Security modules may also want to perform a process tracing check
@@ -1244,7 +1275,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Check that the @parent process has sufficient permission to trace the
* current process before allowing the current process to present itself
* to the @parent process for tracing.
- * The parent process will still have to undergo the ptrace_may_access
+ * The parent process will still have to undergo the ptrace_access_check
* checks before it is allowed to trace this one.
* @parent contains the task_struct structure for debugger process.
* Return 0 if permission is granted.
@@ -1351,12 +1382,47 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* audit_rule_init.
* @rule contains the allocated rule
*
+ * @inode_notifysecctx:
+ * Notify the security module of what the security context of an inode
+ * should be. Initializes the incore security context managed by the
+ * security module for this inode. Example usage: NFS client invokes
+ * this hook to initialize the security context in its incore inode to the
+ * value provided by the server for the file when the server returned the
+ * file's attributes to the client.
+ *
+ * Must be called with inode->i_mutex locked.
+ *
+ * @inode we wish to set the security context of.
+ * @ctx contains the string which we wish to set in the inode.
+ * @ctxlen contains the length of @ctx.
+ *
+ * @inode_setsecctx:
+ * Change the security context of an inode. Updates the
+ * incore security context managed by the security module and invokes the
+ * fs code as needed (via __vfs_setxattr_noperm) to update any backing
+ * xattrs that represent the context. Example usage: NFS server invokes
+ * this hook to change the security context in its incore inode and on the
+ * backing filesystem to a value provided by the client on a SETATTR
+ * operation.
+ *
+ * Must be called with inode->i_mutex locked.
+ *
+ * @dentry contains the inode we wish to set the security context of.
+ * @ctx contains the string which we wish to set in the inode.
+ * @ctxlen contains the length of @ctx.
+ *
+ * @inode_getsecctx:
+ * Returns a string containing all relavent security context information
+ *
+ * @inode we wish to set the security context of.
+ * @ctx is a pointer in which to place the allocated security context.
+ * @ctxlen points to the place to put the length of @ctx.
* This is the main security structure.
*/
struct security_operations {
char name[SECURITY_NAME_MAX + 1];
- int (*ptrace_may_access) (struct task_struct *child, unsigned int mode);
+ int (*ptrace_access_check) (struct task_struct *child, unsigned int mode);
int (*ptrace_traceme) (struct task_struct *parent);
int (*capget) (struct task_struct *target,
kernel_cap_t *effective,
@@ -1483,12 +1549,15 @@ struct security_operations {
int (*dentry_open) (struct file *file, const struct cred *cred);
int (*task_create) (unsigned long clone_flags);
+ int (*cred_alloc_blank) (struct cred *cred, gfp_t gfp);
void (*cred_free) (struct cred *cred);
int (*cred_prepare)(struct cred *new, const struct cred *old,
gfp_t gfp);
void (*cred_commit)(struct cred *new, const struct cred *old);
+ void (*cred_transfer)(struct cred *new, const struct cred *old);
int (*kernel_act_as)(struct cred *new, u32 secid);
int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
+ int (*kernel_module_request)(void);
int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags);
int (*task_fix_setuid) (struct cred *new, const struct cred *old,
int flags);
@@ -1556,6 +1625,10 @@ struct security_operations {
int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid);
void (*release_secctx) (char *secdata, u32 seclen);
+ int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
+ int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
+ int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
+
#ifdef CONFIG_SECURITY_NETWORK
int (*unix_stream_connect) (struct socket *sock,
struct socket *other, struct sock *newsk);
@@ -1592,6 +1665,9 @@ struct security_operations {
void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req);
void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb);
void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
+ int (*tun_dev_create)(void);
+ void (*tun_dev_post_create)(struct sock *sk);
+ int (*tun_dev_attach)(struct sock *sk);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1620,6 +1696,9 @@ struct security_operations {
const struct cred *cred,
key_perm_t perm);
int (*key_getsecurity)(struct key *key, char **_buffer);
+ int (*key_session_to_parent)(const struct cred *cred,
+ const struct cred *parent_cred,
+ struct key *key);
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
@@ -1637,7 +1716,7 @@ extern int security_module_enable(struct security_operations *ops);
extern int register_security(struct security_operations *ops);
/* Security operations */
-int security_ptrace_may_access(struct task_struct *child, unsigned int mode);
+int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
int security_ptrace_traceme(struct task_struct *parent);
int security_capget(struct task_struct *target,
kernel_cap_t *effective,
@@ -1736,11 +1815,14 @@ int security_file_send_sigiotask(struct task_struct *tsk,
int security_file_receive(struct file *file);
int security_dentry_open(struct file *file, const struct cred *cred);
int security_task_create(unsigned long clone_flags);
+int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
void security_cred_free(struct cred *cred);
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
void security_commit_creds(struct cred *new, const struct cred *old);
+void security_transfer_creds(struct cred *new, const struct cred *old);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
+int security_kernel_module_request(void);
int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags);
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags);
@@ -1796,6 +1878,9 @@ int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
void security_release_secctx(char *secdata, u32 seclen);
+int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
+int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
+int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
#else /* CONFIG_SECURITY */
struct security_mnt_opts {
};
@@ -1818,10 +1903,10 @@ static inline int security_init(void)
return 0;
}
-static inline int security_ptrace_may_access(struct task_struct *child,
+static inline int security_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
- return cap_ptrace_may_access(child, mode);
+ return cap_ptrace_access_check(child, mode);
}
static inline int security_ptrace_traceme(struct task_struct *parent)
@@ -2266,6 +2351,11 @@ static inline int security_task_create(unsigned long clone_flags)
return 0;
}
+static inline int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+ return 0;
+}
+
static inline void security_cred_free(struct cred *cred)
{ }
@@ -2281,6 +2371,11 @@ static inline void security_commit_creds(struct cred *new,
{
}
+static inline void security_transfer_creds(struct cred *new,
+ const struct cred *old)
+{
+}
+
static inline int security_kernel_act_as(struct cred *cred, u32 secid)
{
return 0;
@@ -2292,6 +2387,11 @@ static inline int security_kernel_create_files_as(struct cred *cred,
return 0;
}
+static inline int security_kernel_module_request(void)
+{
+ return 0;
+}
+
static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2,
int flags)
{
@@ -2537,6 +2637,19 @@ static inline int security_secctx_to_secid(const char *secdata,
static inline void security_release_secctx(char *secdata, u32 seclen)
{
}
+
+static inline int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
+{
+ return -EOPNOTSUPP;
+}
+static inline int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+{
+ return -EOPNOTSUPP;
+}
+static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_SECURITY */
#ifdef CONFIG_SECURITY_NETWORK
@@ -2575,6 +2688,9 @@ void security_inet_csk_clone(struct sock *newsk,
const struct request_sock *req);
void security_inet_conn_established(struct sock *sk,
struct sk_buff *skb);
+int security_tun_dev_create(void);
+void security_tun_dev_post_create(struct sock *sk);
+int security_tun_dev_attach(struct sock *sk);
#else /* CONFIG_SECURITY_NETWORK */
static inline int security_unix_stream_connect(struct socket *sock,
@@ -2725,6 +2841,20 @@ static inline void security_inet_conn_established(struct sock *sk,
struct sk_buff *skb)
{
}
+
+static inline int security_tun_dev_create(void)
+{
+ return 0;
+}
+
+static inline void security_tun_dev_post_create(struct sock *sk)
+{
+}
+
+static inline int security_tun_dev_attach(struct sock *sk)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -2881,6 +3011,9 @@ void security_key_free(struct key *key);
int security_key_permission(key_ref_t key_ref,
const struct cred *cred, key_perm_t perm);
int security_key_getsecurity(struct key *key, char **_buffer);
+int security_key_session_to_parent(const struct cred *cred,
+ const struct cred *parent_cred,
+ struct key *key);
#else
@@ -2908,6 +3041,13 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer)
return 0;
}
+static inline int security_key_session_to_parent(const struct cred *cred,
+ const struct cred *parent_cred,
+ struct key *key)
+{
+ return 0;
+}
+
#endif
#endif /* CONFIG_KEYS */
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index abff6c9b413c..6d3f2f449ead 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -39,7 +39,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
}
#ifdef CONFIG_TMPFS_POSIX_ACL
-int shmem_permission(struct inode *, int);
+int shmem_check_acl(struct inode *, int);
int shmem_acl_init(struct inode *, struct inode *);
extern struct xattr_handler shmem_xattr_acl_access_handler;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 4be57ab03478..f0ca7a7a1757 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -143,15 +143,6 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
*/
#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
-/*
- * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-# include <linux/spinlock_api_smp.h>
-#else
-# include <linux/spinlock_api_up.h>
-#endif
-
#ifdef CONFIG_DEBUG_SPINLOCK
extern void _raw_spin_lock(spinlock_t *lock);
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
@@ -268,50 +259,16 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
#define spin_lock_irq(lock) _spin_lock_irq(lock)
#define spin_lock_bh(lock) _spin_lock_bh(lock)
-
#define read_lock_irq(lock) _read_lock_irq(lock)
#define read_lock_bh(lock) _read_lock_bh(lock)
-
#define write_lock_irq(lock) _write_lock_irq(lock)
#define write_lock_bh(lock) _write_lock_bh(lock)
-
-/*
- * We inline the unlock functions in the nondebug case:
- */
-#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \
- !defined(CONFIG_SMP)
-# define spin_unlock(lock) _spin_unlock(lock)
-# define read_unlock(lock) _read_unlock(lock)
-# define write_unlock(lock) _write_unlock(lock)
-# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
-# define read_unlock_irq(lock) _read_unlock_irq(lock)
-# define write_unlock_irq(lock) _write_unlock_irq(lock)
-#else
-# define spin_unlock(lock) \
- do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0)
-# define read_unlock(lock) \
- do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0)
-# define write_unlock(lock) \
- do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0)
-# define spin_unlock_irq(lock) \
-do { \
- __raw_spin_unlock(&(lock)->raw_lock); \
- __release(lock); \
- local_irq_enable(); \
-} while (0)
-# define read_unlock_irq(lock) \
-do { \
- __raw_read_unlock(&(lock)->raw_lock); \
- __release(lock); \
- local_irq_enable(); \
-} while (0)
-# define write_unlock_irq(lock) \
-do { \
- __raw_write_unlock(&(lock)->raw_lock); \
- __release(lock); \
- local_irq_enable(); \
-} while (0)
-#endif
+#define spin_unlock(lock) _spin_unlock(lock)
+#define read_unlock(lock) _read_unlock(lock)
+#define write_unlock(lock) _write_unlock(lock)
+#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
+#define read_unlock_irq(lock) _read_unlock_irq(lock)
+#define write_unlock_irq(lock) _write_unlock_irq(lock)
#define spin_unlock_irqrestore(lock, flags) \
do { \
@@ -380,4 +337,13 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
*/
#define spin_can_lock(lock) (!spin_is_locked(lock))
+/*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+# include <linux/spinlock_api_smp.h>
+#else
+# include <linux/spinlock_api_up.h>
+#endif
+
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index d79845d034b5..7a7e18fc2415 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -60,4 +60,398 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
+/*
+ * We inline the unlock functions in the nondebug case:
+ */
+#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT)
+#define __always_inline__spin_unlock
+#define __always_inline__read_unlock
+#define __always_inline__write_unlock
+#define __always_inline__spin_unlock_irq
+#define __always_inline__read_unlock_irq
+#define __always_inline__write_unlock_irq
+#endif
+
+#ifndef CONFIG_DEBUG_SPINLOCK
+#ifndef CONFIG_GENERIC_LOCKBREAK
+
+#ifdef __always_inline__spin_lock
+#define _spin_lock(lock) __spin_lock(lock)
+#endif
+
+#ifdef __always_inline__read_lock
+#define _read_lock(lock) __read_lock(lock)
+#endif
+
+#ifdef __always_inline__write_lock
+#define _write_lock(lock) __write_lock(lock)
+#endif
+
+#ifdef __always_inline__spin_lock_bh
+#define _spin_lock_bh(lock) __spin_lock_bh(lock)
+#endif
+
+#ifdef __always_inline__read_lock_bh
+#define _read_lock_bh(lock) __read_lock_bh(lock)
+#endif
+
+#ifdef __always_inline__write_lock_bh
+#define _write_lock_bh(lock) __write_lock_bh(lock)
+#endif
+
+#ifdef __always_inline__spin_lock_irq
+#define _spin_lock_irq(lock) __spin_lock_irq(lock)
+#endif
+
+#ifdef __always_inline__read_lock_irq
+#define _read_lock_irq(lock) __read_lock_irq(lock)
+#endif
+
+#ifdef __always_inline__write_lock_irq
+#define _write_lock_irq(lock) __write_lock_irq(lock)
+#endif
+
+#ifdef __always_inline__spin_lock_irqsave
+#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
+#endif
+
+#ifdef __always_inline__read_lock_irqsave
+#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
+#endif
+
+#ifdef __always_inline__write_lock_irqsave
+#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
+#endif
+
+#endif /* !CONFIG_GENERIC_LOCKBREAK */
+
+#ifdef __always_inline__spin_trylock
+#define _spin_trylock(lock) __spin_trylock(lock)
+#endif
+
+#ifdef __always_inline__read_trylock
+#define _read_trylock(lock) __read_trylock(lock)
+#endif
+
+#ifdef __always_inline__write_trylock
+#define _write_trylock(lock) __write_trylock(lock)
+#endif
+
+#ifdef __always_inline__spin_trylock_bh
+#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
+#endif
+
+#ifdef __always_inline__spin_unlock
+#define _spin_unlock(lock) __spin_unlock(lock)
+#endif
+
+#ifdef __always_inline__read_unlock
+#define _read_unlock(lock) __read_unlock(lock)
+#endif
+
+#ifdef __always_inline__write_unlock
+#define _write_unlock(lock) __write_unlock(lock)
+#endif
+
+#ifdef __always_inline__spin_unlock_bh
+#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
+#endif
+
+#ifdef __always_inline__read_unlock_bh
+#define _read_unlock_bh(lock) __read_unlock_bh(lock)
+#endif
+
+#ifdef __always_inline__write_unlock_bh
+#define _write_unlock_bh(lock) __write_unlock_bh(lock)
+#endif
+
+#ifdef __always_inline__spin_unlock_irq
+#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
+#endif
+
+#ifdef __always_inline__read_unlock_irq
+#define _read_unlock_irq(lock) __read_unlock_irq(lock)
+#endif
+
+#ifdef __always_inline__write_unlock_irq
+#define _write_unlock_irq(lock) __write_unlock_irq(lock)
+#endif
+
+#ifdef __always_inline__spin_unlock_irqrestore
+#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef __always_inline__read_unlock_irqrestore
+#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef __always_inline__write_unlock_irqrestore
+#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
+#endif
+
+#endif /* CONFIG_DEBUG_SPINLOCK */
+
+static inline int __spin_trylock(spinlock_t *lock)
+{
+ preempt_disable();
+ if (_raw_spin_trylock(lock)) {
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+static inline int __read_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (_raw_read_trylock(lock)) {
+ rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+static inline int __write_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (_raw_write_trylock(lock)) {
+ rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+/*
+ * If lockdep is enabled then we use the non-preemption spin-ops
+ * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
+ * not re-enabled during lock-acquire (which the preempt-spin-ops do):
+ */
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+static inline void __read_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+
+static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ /*
+ * On lockdep we dont want the hand-coded irq-enable of
+ * _raw_spin_lock_flags() code, because lockdep assumes
+ * that interrupts are not re-enabled during lock-acquire:
+ */
+#ifdef CONFIG_LOCKDEP
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+#else
+ _raw_spin_lock_flags(lock, &flags);
+#endif
+ return flags;
+}
+
+static inline void __spin_lock_irq(spinlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+}
+
+static inline void __spin_lock_bh(spinlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+}
+
+static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
+ _raw_read_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __read_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+
+static inline void __read_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+
+static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
+ _raw_write_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __write_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+
+static inline void __write_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+
+static inline void __spin_lock(spinlock_t *lock)
+{
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+}
+
+static inline void __write_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+
+#endif /* CONFIG_PREEMPT */
+
+static inline void __spin_unlock(spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_spin_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __write_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __read_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __spin_unlock_irqrestore(spinlock_t *lock,
+ unsigned long flags)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_spin_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __spin_unlock_irq(spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_spin_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __spin_unlock_bh(spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_spin_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __read_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __read_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline void __write_unlock_irqrestore(rwlock_t *lock,
+ unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __write_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __write_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline int __spin_trylock_bh(spinlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ if (_raw_spin_trylock(lock)) {
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ return 0;
+}
+
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 2d8b211b9324..6f52b4d7c447 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -59,6 +59,15 @@ struct cache_head {
#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */
+struct cache_detail_procfs {
+ struct proc_dir_entry *proc_ent;
+ struct proc_dir_entry *flush_ent, *channel_ent, *content_ent;
+};
+
+struct cache_detail_pipefs {
+ struct dentry *dir;
+};
+
struct cache_detail {
struct module * owner;
int hash_size;
@@ -70,15 +79,17 @@ struct cache_detail {
char *name;
void (*cache_put)(struct kref *);
- void (*cache_request)(struct cache_detail *cd,
- struct cache_head *h,
- char **bpp, int *blen);
+ int (*cache_upcall)(struct cache_detail *,
+ struct cache_head *);
+
int (*cache_parse)(struct cache_detail *,
char *buf, int len);
int (*cache_show)(struct seq_file *m,
struct cache_detail *cd,
struct cache_head *h);
+ void (*warn_no_listener)(struct cache_detail *cd,
+ int has_died);
struct cache_head * (*alloc)(void);
int (*match)(struct cache_head *orig, struct cache_head *new);
@@ -96,13 +107,15 @@ struct cache_detail {
/* fields for communication over channel */
struct list_head queue;
- struct proc_dir_entry *proc_ent;
- struct proc_dir_entry *flush_ent, *channel_ent, *content_ent;
atomic_t readers; /* how many time is /chennel open */
time_t last_close; /* if no readers, when did last close */
time_t last_warn; /* when we last warned about no readers */
- void (*warn_no_listener)(struct cache_detail *cd);
+
+ union {
+ struct cache_detail_procfs procfs;
+ struct cache_detail_pipefs pipefs;
+ } u;
};
@@ -127,6 +140,10 @@ struct cache_deferred_req {
};
+extern const struct file_operations cache_file_operations_pipefs;
+extern const struct file_operations content_file_operations_pipefs;
+extern const struct file_operations cache_flush_operations_pipefs;
+
extern struct cache_head *
sunrpc_cache_lookup(struct cache_detail *detail,
struct cache_head *key, int hash);
@@ -134,6 +151,13 @@ extern struct cache_head *
sunrpc_cache_update(struct cache_detail *detail,
struct cache_head *new, struct cache_head *old, int hash);
+extern int
+sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
+ void (*cache_request)(struct cache_detail *,
+ struct cache_head *,
+ char **,
+ int *));
+
extern void cache_clean_deferred(void *owner);
@@ -171,6 +195,10 @@ extern void cache_purge(struct cache_detail *detail);
extern int cache_register(struct cache_detail *cd);
extern void cache_unregister(struct cache_detail *cd);
+extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
+ mode_t, struct cache_detail *);
+extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
+
extern void qword_add(char **bpp, int *lp, char *str);
extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
extern int qword_get(char **bpp, char *dest, int bufsize);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 37881f1a0bd7..ab3f6e90caa5 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -9,6 +9,10 @@
#ifndef _LINUX_SUNRPC_CLNT_H
#define _LINUX_SUNRPC_CLNT_H
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xprt.h>
@@ -17,6 +21,7 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/timer.h>
#include <asm/signal.h>
+#include <linux/path.h>
struct rpc_inode;
@@ -50,9 +55,7 @@ struct rpc_clnt {
int cl_nodelen; /* nodename length */
char cl_nodename[UNX_MAXNODENAME];
- char cl_pathname[30];/* Path in rpc_pipe_fs */
- struct vfsmount * cl_vfsmnt;
- struct dentry * cl_dentry; /* inode */
+ struct path cl_path;
struct rpc_clnt * cl_parent; /* Points to parent of clones */
struct rpc_rtt cl_rtt_default;
struct rpc_timeout cl_timeout_default;
@@ -151,5 +154,39 @@ void rpc_force_rebind(struct rpc_clnt *);
size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
+size_t rpc_ntop(const struct sockaddr *, char *, const size_t);
+size_t rpc_pton(const char *, const size_t,
+ struct sockaddr *, const size_t);
+char * rpc_sockaddr2uaddr(const struct sockaddr *);
+size_t rpc_uaddr2sockaddr(const char *, const size_t,
+ struct sockaddr *, const size_t);
+
+static inline unsigned short rpc_get_port(const struct sockaddr *sap)
+{
+ switch (sap->sa_family) {
+ case AF_INET:
+ return ntohs(((struct sockaddr_in *)sap)->sin_port);
+ case AF_INET6:
+ return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
+ }
+ return 0;
+}
+
+static inline void rpc_set_port(struct sockaddr *sap,
+ const unsigned short port)
+{
+ switch (sap->sa_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)sap)->sin_port = htons(port);
+ break;
+ case AF_INET6:
+ ((struct sockaddr_in6 *)sap)->sin6_port = htons(port);
+ break;
+ }
+}
+
+#define IPV6_SCOPE_DELIMITER '%'
+#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn")
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 70df4f1d8847..77e624883393 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -189,7 +189,22 @@ typedef __be32 rpc_fraghdr;
* Additionally, the two alternative forms specified in Section 2.2 of
* [RFC2373] are also acceptable.
*/
-#define RPCBIND_MAXUADDRLEN (56u)
+
+#include <linux/inet.h>
+
+/* Maximum size of the port number part of a universal address */
+#define RPCBIND_MAXUADDRPLEN sizeof(".255.255")
+
+/* Maximum size of an IPv4 universal address */
+#define RPCBIND_MAXUADDR4LEN \
+ (INET_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN)
+
+/* Maximum size of an IPv6 universal address */
+#define RPCBIND_MAXUADDR6LEN \
+ (INET6_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN)
+
+/* Assume INET6_ADDRSTRLEN will always be larger than INET_ADDRSTRLEN... */
+#define RPCBIND_MAXUADDRLEN RPCBIND_MAXUADDR6LEN
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_MSGPROT_H_ */
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index cea764c2359f..cf14db975da0 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -3,6 +3,8 @@
#ifdef __KERNEL__
+#include <linux/workqueue.h>
+
struct rpc_pipe_msg {
struct list_head list;
void *data;
@@ -32,8 +34,8 @@ struct rpc_inode {
wait_queue_head_t waitq;
#define RPC_PIPE_WAIT_FOR_OPEN 1
int flags;
- struct rpc_pipe_ops *ops;
struct delayed_work queue_timeout;
+ const struct rpc_pipe_ops *ops;
};
static inline struct rpc_inode *
@@ -44,9 +46,19 @@ RPC_I(struct inode *inode)
extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *);
-extern struct dentry *rpc_mkdir(char *, struct rpc_clnt *);
-extern int rpc_rmdir(struct dentry *);
-extern struct dentry *rpc_mkpipe(struct dentry *, const char *, void *, struct rpc_pipe_ops *, int flags);
+struct rpc_clnt;
+extern struct dentry *rpc_create_client_dir(struct dentry *, struct qstr *, struct rpc_clnt *);
+extern int rpc_remove_client_dir(struct dentry *);
+
+struct cache_detail;
+extern struct dentry *rpc_create_cache_dir(struct dentry *,
+ struct qstr *,
+ mode_t umode,
+ struct cache_detail *);
+extern void rpc_remove_cache_dir(struct dentry *);
+
+extern struct dentry *rpc_mkpipe(struct dentry *, const char *, void *,
+ const struct rpc_pipe_ops *, int flags);
extern int rpc_unlink(struct dentry *);
extern struct vfsmount *rpc_get_mount(void);
extern void rpc_put_mount(void);
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index b99c625fddfe..7da466ba4b0d 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -117,17 +117,15 @@ static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int le
static inline __be32 *
xdr_encode_hyper(__be32 *p, __u64 val)
{
- *p++ = htonl(val >> 32);
- *p++ = htonl(val & 0xFFFFFFFF);
- return p;
+ *(__be64 *)p = cpu_to_be64(val);
+ return p + 2;
}
static inline __be32 *
xdr_decode_hyper(__be32 *p, __u64 *valp)
{
- *valp = ((__u64) ntohl(*p++)) << 32;
- *valp |= ntohl(*p++);
- return p;
+ *valp = be64_to_cpup((__be64 *)p);
+ return p + 2;
}
/*
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 1175d58efc2e..c090df442572 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -38,10 +38,8 @@ enum rpc_display_format_t {
RPC_DISPLAY_ADDR = 0,
RPC_DISPLAY_PORT,
RPC_DISPLAY_PROTO,
- RPC_DISPLAY_ALL,
RPC_DISPLAY_HEX_ADDR,
RPC_DISPLAY_HEX_PORT,
- RPC_DISPLAY_UNIVERSAL_ADDR,
RPC_DISPLAY_NETID,
RPC_DISPLAY_MAX,
};
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index cb1a6631b8f4..73b1f1cec423 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -14,7 +14,6 @@ struct scatterlist;
*/
#define IO_TLB_SEGSIZE 128
-
/*
* log of the size of each IO TLB slab. The number of slabs is command line
* controllable.
@@ -24,16 +23,6 @@ struct scatterlist;
extern void
swiotlb_init(void);
-extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
-extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
-
-extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev,
- phys_addr_t address);
-extern phys_addr_t swiotlb_bus_to_phys(struct device *hwdev,
- dma_addr_t address);
-
-extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size);
-
extern void
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 80de7003d8c2..a8e37821cc60 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -64,6 +64,7 @@ struct perf_counter_attr;
#include <linux/sem.h>
#include <asm/siginfo.h>
#include <asm/signal.h>
+#include <linux/unistd.h>
#include <linux/quota.h>
#include <linux/key.h>
#include <trace/syscall.h>
@@ -97,6 +98,53 @@ struct perf_counter_attr;
#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
+#ifdef CONFIG_EVENT_PROFILE
+#define TRACE_SYS_ENTER_PROFILE(sname) \
+static int prof_sysenter_enable_##sname(struct ftrace_event_call *event_call) \
+{ \
+ int ret = 0; \
+ if (!atomic_inc_return(&event_enter_##sname.profile_count)) \
+ ret = reg_prof_syscall_enter("sys"#sname); \
+ return ret; \
+} \
+ \
+static void prof_sysenter_disable_##sname(struct ftrace_event_call *event_call)\
+{ \
+ if (atomic_add_negative(-1, &event_enter_##sname.profile_count)) \
+ unreg_prof_syscall_enter("sys"#sname); \
+}
+
+#define TRACE_SYS_EXIT_PROFILE(sname) \
+static int prof_sysexit_enable_##sname(struct ftrace_event_call *event_call) \
+{ \
+ int ret = 0; \
+ if (!atomic_inc_return(&event_exit_##sname.profile_count)) \
+ ret = reg_prof_syscall_exit("sys"#sname); \
+ return ret; \
+} \
+ \
+static void prof_sysexit_disable_##sname(struct ftrace_event_call *event_call) \
+{ \
+ if (atomic_add_negative(-1, &event_exit_##sname.profile_count)) \
+ unreg_prof_syscall_exit("sys"#sname); \
+}
+
+#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
+ .profile_count = ATOMIC_INIT(-1), \
+ .profile_enable = prof_sysenter_enable_##sname, \
+ .profile_disable = prof_sysenter_disable_##sname,
+
+#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \
+ .profile_count = ATOMIC_INIT(-1), \
+ .profile_enable = prof_sysexit_enable_##sname, \
+ .profile_disable = prof_sysexit_disable_##sname,
+#else
+#define TRACE_SYS_ENTER_PROFILE(sname)
+#define TRACE_SYS_ENTER_PROFILE_INIT(sname)
+#define TRACE_SYS_EXIT_PROFILE(sname)
+#define TRACE_SYS_EXIT_PROFILE_INIT(sname)
+#endif
+
#ifdef CONFIG_FTRACE_SYSCALLS
#define __SC_STR_ADECL1(t, a) #a
#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
@@ -112,7 +160,81 @@ struct perf_counter_attr;
#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
+#define SYSCALL_TRACE_ENTER_EVENT(sname) \
+ static struct ftrace_event_call event_enter_##sname; \
+ struct trace_event enter_syscall_print_##sname = { \
+ .trace = print_syscall_enter, \
+ }; \
+ static int init_enter_##sname(void) \
+ { \
+ int num, id; \
+ num = syscall_name_to_nr("sys"#sname); \
+ if (num < 0) \
+ return -ENOSYS; \
+ id = register_ftrace_event(&enter_syscall_print_##sname);\
+ if (!id) \
+ return -ENODEV; \
+ event_enter_##sname.id = id; \
+ set_syscall_enter_id(num, id); \
+ INIT_LIST_HEAD(&event_enter_##sname.fields); \
+ return 0; \
+ } \
+ TRACE_SYS_ENTER_PROFILE(sname); \
+ static struct ftrace_event_call __used \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_events"))) \
+ event_enter_##sname = { \
+ .name = "sys_enter"#sname, \
+ .system = "syscalls", \
+ .event = &event_syscall_enter, \
+ .raw_init = init_enter_##sname, \
+ .show_format = syscall_enter_format, \
+ .define_fields = syscall_enter_define_fields, \
+ .regfunc = reg_event_syscall_enter, \
+ .unregfunc = unreg_event_syscall_enter, \
+ .data = "sys"#sname, \
+ TRACE_SYS_ENTER_PROFILE_INIT(sname) \
+ }
+
+#define SYSCALL_TRACE_EXIT_EVENT(sname) \
+ static struct ftrace_event_call event_exit_##sname; \
+ struct trace_event exit_syscall_print_##sname = { \
+ .trace = print_syscall_exit, \
+ }; \
+ static int init_exit_##sname(void) \
+ { \
+ int num, id; \
+ num = syscall_name_to_nr("sys"#sname); \
+ if (num < 0) \
+ return -ENOSYS; \
+ id = register_ftrace_event(&exit_syscall_print_##sname);\
+ if (!id) \
+ return -ENODEV; \
+ event_exit_##sname.id = id; \
+ set_syscall_exit_id(num, id); \
+ INIT_LIST_HEAD(&event_exit_##sname.fields); \
+ return 0; \
+ } \
+ TRACE_SYS_EXIT_PROFILE(sname); \
+ static struct ftrace_event_call __used \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_events"))) \
+ event_exit_##sname = { \
+ .name = "sys_exit"#sname, \
+ .system = "syscalls", \
+ .event = &event_syscall_exit, \
+ .raw_init = init_exit_##sname, \
+ .show_format = syscall_exit_format, \
+ .define_fields = syscall_exit_define_fields, \
+ .regfunc = reg_event_syscall_exit, \
+ .unregfunc = unreg_event_syscall_exit, \
+ .data = "sys"#sname, \
+ TRACE_SYS_EXIT_PROFILE_INIT(sname) \
+ }
+
#define SYSCALL_METADATA(sname, nb) \
+ SYSCALL_TRACE_ENTER_EVENT(sname); \
+ SYSCALL_TRACE_EXIT_EVENT(sname); \
static const struct syscall_metadata __used \
__attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \
@@ -121,18 +243,23 @@ struct perf_counter_attr;
.nb_args = nb, \
.types = types_##sname, \
.args = args_##sname, \
- }
+ .enter_event = &event_enter_##sname, \
+ .exit_event = &event_exit_##sname, \
+ };
#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_TRACE_ENTER_EVENT(_##sname); \
+ SYSCALL_TRACE_EXIT_EVENT(_##sname); \
static const struct syscall_metadata __used \
__attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \
__syscall_meta_##sname = { \
.name = "sys_"#sname, \
.nb_args = 0, \
+ .enter_event = &event_enter__##sname, \
+ .exit_event = &event_exit__##sname, \
}; \
asmlinkage long sys_##sname(void)
-
#else
#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void)
#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 7402c1a27c4f..85e8cf7d393c 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -85,20 +85,29 @@ int arch_update_cpu_topology(void);
#define ARCH_HAS_SCHED_WAKE_IDLE
/* Common values for SMT siblings */
#ifndef SD_SIBLING_INIT
-#define SD_SIBLING_INIT (struct sched_domain) { \
- .min_interval = 1, \
- .max_interval = 2, \
- .busy_factor = 64, \
- .imbalance_pct = 110, \
- .flags = SD_LOAD_BALANCE \
- | SD_BALANCE_NEWIDLE \
- | SD_BALANCE_FORK \
- | SD_BALANCE_EXEC \
- | SD_WAKE_AFFINE \
- | SD_WAKE_BALANCE \
- | SD_SHARE_CPUPOWER, \
- .last_balance = jiffies, \
- .balance_interval = 1, \
+#define SD_SIBLING_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 2, \
+ .busy_factor = 64, \
+ .imbalance_pct = 110, \
+ \
+ .flags = 1*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 0*SD_WAKE_IDLE \
+ | 1*SD_WAKE_AFFINE \
+ | 1*SD_WAKE_BALANCE \
+ | 1*SD_SHARE_CPUPOWER \
+ | 0*SD_POWERSAVINGS_BALANCE \
+ | 0*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ | 0*SD_WAKE_IDLE_FAR \
+ | 0*SD_PREFER_SIBLING \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+ .smt_gain = 1178, /* 15% */ \
}
#endif
#endif /* CONFIG_SCHED_SMT */
@@ -106,69 +115,94 @@ int arch_update_cpu_topology(void);
#ifdef CONFIG_SCHED_MC
/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */
#ifndef SD_MC_INIT
-#define SD_MC_INIT (struct sched_domain) { \
- .min_interval = 1, \
- .max_interval = 4, \
- .busy_factor = 64, \
- .imbalance_pct = 125, \
- .cache_nice_tries = 1, \
- .busy_idx = 2, \
- .wake_idx = 1, \
- .forkexec_idx = 1, \
- .flags = SD_LOAD_BALANCE \
- | SD_BALANCE_FORK \
- | SD_BALANCE_EXEC \
- | SD_WAKE_AFFINE \
- | SD_WAKE_BALANCE \
- | SD_SHARE_PKG_RESOURCES\
- | sd_balance_for_mc_power()\
- | sd_power_saving_flags(),\
- .last_balance = jiffies, \
- .balance_interval = 1, \
+#define SD_MC_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+ .wake_idx = 1, \
+ .forkexec_idx = 1, \
+ \
+ .flags = 1*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 1*SD_WAKE_IDLE \
+ | 1*SD_WAKE_AFFINE \
+ | 1*SD_WAKE_BALANCE \
+ | 0*SD_SHARE_CPUPOWER \
+ | 1*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ | 0*SD_WAKE_IDLE_FAR \
+ | sd_balance_for_mc_power() \
+ | sd_power_saving_flags() \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
}
#endif
#endif /* CONFIG_SCHED_MC */
/* Common values for CPUs */
#ifndef SD_CPU_INIT
-#define SD_CPU_INIT (struct sched_domain) { \
- .min_interval = 1, \
- .max_interval = 4, \
- .busy_factor = 64, \
- .imbalance_pct = 125, \
- .cache_nice_tries = 1, \
- .busy_idx = 2, \
- .idle_idx = 1, \
- .newidle_idx = 2, \
- .wake_idx = 1, \
- .forkexec_idx = 1, \
- .flags = SD_LOAD_BALANCE \
- | SD_BALANCE_EXEC \
- | SD_BALANCE_FORK \
- | SD_WAKE_AFFINE \
- | SD_WAKE_BALANCE \
- | sd_balance_for_package_power()\
- | sd_power_saving_flags(),\
- .last_balance = jiffies, \
- .balance_interval = 1, \
+#define SD_CPU_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+ .idle_idx = 1, \
+ .newidle_idx = 2, \
+ .wake_idx = 1, \
+ .forkexec_idx = 1, \
+ \
+ .flags = 1*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 1*SD_WAKE_IDLE \
+ | 0*SD_WAKE_AFFINE \
+ | 1*SD_WAKE_BALANCE \
+ | 0*SD_SHARE_CPUPOWER \
+ | 0*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ | 0*SD_WAKE_IDLE_FAR \
+ | sd_balance_for_package_power() \
+ | sd_power_saving_flags() \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
}
#endif
/* sched_domains SD_ALLNODES_INIT for NUMA machines */
-#define SD_ALLNODES_INIT (struct sched_domain) { \
- .min_interval = 64, \
- .max_interval = 64*num_online_cpus(), \
- .busy_factor = 128, \
- .imbalance_pct = 133, \
- .cache_nice_tries = 1, \
- .busy_idx = 3, \
- .idle_idx = 3, \
- .flags = SD_LOAD_BALANCE \
- | SD_BALANCE_NEWIDLE \
- | SD_WAKE_AFFINE \
- | SD_SERIALIZE, \
- .last_balance = jiffies, \
- .balance_interval = 64, \
+#define SD_ALLNODES_INIT (struct sched_domain) { \
+ .min_interval = 64, \
+ .max_interval = 64*num_online_cpus(), \
+ .busy_factor = 128, \
+ .imbalance_pct = 133, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 3, \
+ .idle_idx = 3, \
+ .flags = 1*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 0*SD_BALANCE_EXEC \
+ | 0*SD_BALANCE_FORK \
+ | 0*SD_WAKE_IDLE \
+ | 1*SD_WAKE_AFFINE \
+ | 0*SD_WAKE_BALANCE \
+ | 0*SD_SHARE_CPUPOWER \
+ | 0*SD_POWERSAVINGS_BALANCE \
+ | 0*SD_SHARE_PKG_RESOURCES \
+ | 1*SD_SERIALIZE \
+ | 1*SD_WAKE_IDLE_FAR \
+ | 0*SD_PREFER_SIBLING \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 64, \
}
#ifdef CONFIG_NUMA
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index b9dc4ca0246f..63a3f7a80580 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -23,6 +23,8 @@ struct tracepoint;
struct tracepoint {
const char *name; /* Tracepoint name */
int state; /* State. */
+ void (*regfunc)(void);
+ void (*unregfunc)(void);
void **funcs;
} __attribute__((aligned(32))); /*
* Aligned on 32 bytes because it is
@@ -78,12 +80,16 @@ struct tracepoint {
return tracepoint_probe_unregister(#name, (void *)probe);\
}
-#define DEFINE_TRACE(name) \
+
+#define DEFINE_TRACE_FN(name, reg, unreg) \
static const char __tpstrtab_##name[] \
__attribute__((section("__tracepoints_strings"))) = #name; \
struct tracepoint __tracepoint_##name \
__attribute__((section("__tracepoints"), aligned(32))) = \
- { __tpstrtab_##name, 0, NULL }
+ { __tpstrtab_##name, 0, reg, unreg, NULL }
+
+#define DEFINE_TRACE(name) \
+ DEFINE_TRACE_FN(name, NULL, NULL);
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
EXPORT_SYMBOL_GPL(__tracepoint_##name)
@@ -108,6 +114,7 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin,
return -ENOSYS; \
}
+#define DEFINE_TRACE_FN(name, reg, unreg)
#define DEFINE_TRACE(name)
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
#define EXPORT_TRACEPOINT_SYMBOL(name)
@@ -158,6 +165,15 @@ static inline void tracepoint_synchronize_unregister(void)
#define PARAMS(args...) args
+#endif /* _LINUX_TRACEPOINT_H */
+
+/*
+ * Note: we keep the TRACE_EVENT outside the include file ifdef protection.
+ * This is due to the way trace events work. If a file includes two
+ * trace event headers under one "CREATE_TRACE_POINTS" the first include
+ * will override the TRACE_EVENT and break the second include.
+ */
+
#ifndef TRACE_EVENT
/*
* For use with the TRACE_EVENT macro:
@@ -259,10 +275,15 @@ static inline void tracepoint_synchronize_unregister(void)
* can also by used by generic instrumentation like SystemTap), and
* it is also used to expose a structured trace record in
* /sys/kernel/debug/tracing/events/.
+ *
+ * A set of (un)registration functions can be passed to the variant
+ * TRACE_EVENT_FN to perform any (un)registration work.
*/
#define TRACE_EVENT(name, proto, args, struct, assign, print) \
DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
-#endif
+#define TRACE_EVENT_FN(name, proto, args, struct, \
+ assign, print, reg, unreg) \
+ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
-#endif
+#endif /* ifdef TRACE_EVENT (see note above) */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index e8c6c9136c97..0d3974f59c53 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -23,7 +23,7 @@
*/
#define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */
#define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */
-#define NR_LDISCS 19
+#define NR_LDISCS 20
/* line disciplines */
#define N_TTY 0
@@ -47,6 +47,8 @@
#define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */
#define N_PPS 18 /* Pulse per Second */
+#define N_V253 19 /* Codec control over voice modem */
+
/*
* This character is the same as _POSIX_VDISABLE: it cannot be used as
* a c_cc[] character, but indicates that a particular special character
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 3224820c8514..78b1e4684cc9 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -14,17 +14,6 @@ extern struct list_head inode_in_use;
extern struct list_head inode_unused;
/*
- * Yes, writeback.h requires sched.h
- * No, sched.h is not included from here.
- */
-static inline int task_is_pdflush(struct task_struct *task)
-{
- return task->flags & PF_FLUSHER;
-}
-
-#define current_is_pdflush() task_is_pdflush(current)
-
-/*
* fs/fs-writeback.c
*/
enum writeback_sync_modes {
@@ -40,6 +29,8 @@ enum writeback_sync_modes {
struct writeback_control {
struct backing_dev_info *bdi; /* If !NULL, only write back this
queue */
+ struct super_block *sb; /* if !NULL, only write inodes from
+ this super_block */
enum writeback_sync_modes sync_mode;
unsigned long *older_than_this; /* If !NULL, only write back inodes
older than this */
@@ -76,9 +67,13 @@ struct writeback_control {
/*
* fs/fs-writeback.c
*/
-void writeback_inodes(struct writeback_control *wbc);
+struct bdi_writeback;
int inode_wait(void *);
-void sync_inodes_sb(struct super_block *, int wait);
+long writeback_inodes_sb(struct super_block *);
+long sync_inodes_sb(struct super_block *);
+void writeback_inodes_wbc(struct writeback_control *wbc);
+long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
+void wakeup_flusher_threads(long nr_pages);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
@@ -98,7 +93,6 @@ static inline void inode_sync_wait(struct inode *inode)
/*
* mm/page-writeback.c
*/
-int wakeup_pdflush(long nr_pages);
void laptop_io_completion(void);
void laptop_sync_completion(void);
void throttle_vm_writeout(gfp_t gfp_mask);
@@ -150,7 +144,6 @@ balance_dirty_pages_ratelimited(struct address_space *mapping)
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
void *data);
-int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0);
int generic_writepages(struct address_space *mapping,
struct writeback_control *wbc);
int write_cache_pages(struct address_space *mapping,
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index d131e352cfe1..5c84af8c5f6f 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -49,6 +49,7 @@ struct xattr_handler {
ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
+int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
int vfs_removexattr(struct dentry *, const char *);
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 251fc1cd5002..3dae3f799b9b 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -32,6 +32,9 @@
#include "control.h"
#include "info.h"
+/* maximum number of devices on the AC97 bus */
+#define AC97_BUS_MAX_DEVICES 4
+
/*
* AC'97 codec registers
*/
@@ -642,4 +645,10 @@ int snd_ac97_pcm_double_rate_rules(struct snd_pcm_runtime *runtime);
/* ad hoc AC97 device driver access */
extern struct bus_type ac97_bus_type;
+/* AC97 platform_data adding function */
+static inline void snd_ac97_dev_add_pdata(struct snd_ac97 *ac97, void *data)
+{
+ ac97->dev.platform_data = data;
+}
+
#endif /* __SOUND_AC97_CODEC_H */
diff --git a/include/sound/asound.h b/include/sound/asound.h
index 82aed3f47534..1f57bb92eb5a 100644
--- a/include/sound/asound.h
+++ b/include/sound/asound.h
@@ -138,7 +138,7 @@ struct snd_hwdep_dsp_image {
* *
*****************************************************************************/
-#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 9)
+#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 10)
typedef unsigned long snd_pcm_uframes_t;
typedef signed long snd_pcm_sframes_t;
diff --git a/include/sound/core.h b/include/sound/core.h
index 309cb9659a05..a61499c22b0b 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -93,15 +93,6 @@ struct snd_device {
#define snd_device(n) list_entry(n, struct snd_device, list)
-/* monitor files for graceful shutdown (hotplug) */
-
-struct snd_monitor_file {
- struct file *file;
- const struct file_operations *disconnected_f_op;
- struct list_head shutdown_list; /* still need to shutdown */
- struct list_head list; /* link of monitor files */
-};
-
/* main structure for soundcard */
struct snd_card {
@@ -311,9 +302,7 @@ int snd_component_add(struct snd_card *card, const char *component);
int snd_card_file_add(struct snd_card *card, struct file *file);
int snd_card_file_remove(struct snd_card *card, struct file *file);
-#ifndef snd_card_set_dev
#define snd_card_set_dev(card, devptr) ((card)->dev = (devptr))
-#endif
/* device.c */
@@ -340,18 +329,17 @@ unsigned int snd_dma_pointer(unsigned long dma, unsigned int size);
struct resource;
void release_and_free_resource(struct resource *res);
-#ifdef CONFIG_SND_VERBOSE_PRINTK
-void snd_verbose_printk(const char *file, int line, const char *format, ...)
- __attribute__ ((format (printf, 3, 4)));
-#endif
-#if defined(CONFIG_SND_DEBUG) && defined(CONFIG_SND_VERBOSE_PRINTK)
-void snd_verbose_printd(const char *file, int line, const char *format, ...)
- __attribute__ ((format (printf, 3, 4)));
-#endif
-
/* --- */
-#ifdef CONFIG_SND_VERBOSE_PRINTK
+#if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
+void __snd_printk(unsigned int level, const char *file, int line,
+ const char *format, ...)
+ __attribute__ ((format (printf, 4, 5)));
+#else
+#define __snd_printk(level, file, line, format, args...) \
+ printk(format, ##args)
+#endif
+
/**
* snd_printk - printk wrapper
* @fmt: format string
@@ -360,15 +348,9 @@ void snd_verbose_printd(const char *file, int line, const char *format, ...)
* when configured with CONFIG_SND_VERBOSE_PRINTK.
*/
#define snd_printk(fmt, args...) \
- snd_verbose_printk(__FILE__, __LINE__, fmt ,##args)
-#else
-#define snd_printk(fmt, args...) \
- printk(fmt ,##args)
-#endif
+ __snd_printk(0, __FILE__, __LINE__, fmt, ##args)
#ifdef CONFIG_SND_DEBUG
-
-#ifdef CONFIG_SND_VERBOSE_PRINTK
/**
* snd_printd - debug printk
* @fmt: format string
@@ -377,11 +359,7 @@ void snd_verbose_printd(const char *file, int line, const char *format, ...)
* Ignored when CONFIG_SND_DEBUG is not set.
*/
#define snd_printd(fmt, args...) \
- snd_verbose_printd(__FILE__, __LINE__, fmt ,##args)
-#else
-#define snd_printd(fmt, args...) \
- printk(fmt ,##args)
-#endif
+ __snd_printk(1, __FILE__, __LINE__, fmt, ##args)
/**
* snd_BUG - give a BUG warning message and stack trace
@@ -428,9 +406,10 @@ static inline int __snd_bug_on(int cond)
* Works like snd_printk() for debugging purposes.
* Ignored when CONFIG_SND_DEBUG_VERBOSE is not set.
*/
-#define snd_printdd(format, args...) snd_printk(format, ##args)
+#define snd_printdd(format, args...) \
+ __snd_printk(2, __FILE__, __LINE__, format, ##args)
#else
-#define snd_printdd(format, args...) /* nothing */
+#define snd_printdd(format, args...) do { } while (0)
#endif
@@ -438,12 +417,10 @@ static inline int __snd_bug_on(int cond)
/* for easier backward-porting */
#if defined(CONFIG_GAMEPORT) || defined(CONFIG_GAMEPORT_MODULE)
-#ifndef gameport_set_dev_parent
#define gameport_set_dev_parent(gp,xdev) ((gp)->dev.parent = (xdev))
#define gameport_set_port_data(gp,r) ((gp)->port_data = (r))
#define gameport_get_port_data(gp) (gp)->port_data
#endif
-#endif
/* PCI quirk list helper */
struct snd_pci_quirk {
diff --git a/include/sound/info.h b/include/sound/info.h
index 7c2ee1a21b00..112e8949e1a7 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -110,13 +110,13 @@ void snd_card_info_read_oss(struct snd_info_buffer *buffer);
static inline void snd_card_info_read_oss(struct snd_info_buffer *buffer) {}
#endif
-int snd_iprintf(struct snd_info_buffer *buffer, char *fmt, ...) \
+int snd_iprintf(struct snd_info_buffer *buffer, const char *fmt, ...) \
__attribute__ ((format (printf, 2, 3)));
int snd_info_init(void);
int snd_info_done(void);
int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len);
-char *snd_info_get_str(char *dest, char *src, int len);
+const char *snd_info_get_str(char *dest, const char *src, int len);
struct snd_info_entry *snd_info_create_module_entry(struct module *module,
const char *name,
struct snd_info_entry *parent);
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 7ccce94a5255..c42506212649 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -47,7 +47,11 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */
#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
+#ifdef CONFIG_SND_DMA_SGBUF
#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
+#else
+#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
+#endif
/*
* info for buffer allocation
@@ -60,6 +64,7 @@ struct snd_dma_buffer {
void *private_data; /* private for allocator; don't touch */
};
+#ifdef CONFIG_SND_DMA_SGBUF
/*
* Scatter-Gather generic device pages
*/
@@ -107,6 +112,7 @@ static inline void *snd_sgbuf_get_ptr(struct snd_sg_buf *sgbuf, size_t offset)
{
return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE;
}
+#endif /* CONFIG_SND_DMA_SGBUF */
/* allocate/release a buffer */
int snd_dma_alloc_pages(int type, struct device *dev, size_t size,
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 23893523dc8c..de6d981de5d6 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -902,6 +902,7 @@ int snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size);
int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream);
+#ifdef CONFIG_SND_DMA_SGBUF
/*
* SG-buffer handling
*/
@@ -927,6 +928,28 @@ struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
unsigned int snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
unsigned int ofs, unsigned int size);
+#else /* !SND_DMA_SGBUF */
+/*
+ * fake using a continuous buffer
+ */
+static inline dma_addr_t
+snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
+{
+ return substream->runtime->dma_addr + ofs;
+}
+
+static inline void *
+snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs)
+{
+ return substream->runtime->dma_area + ofs;
+}
+
+#define snd_pcm_sgbuf_ops_page NULL
+
+#define snd_pcm_sgbuf_get_chunk_size(subs, ofs, size) (size)
+
+#endif /* SND_DMA_SGBUF */
+
/* handle mmap counter - PCM mmap callback should handle this counter properly */
static inline void snd_pcm_mmap_data_open(struct vm_area_struct *area)
{
@@ -965,4 +988,6 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
#define PCM_RUNTIME_CHECK(sub) snd_BUG_ON(!(sub) || !(sub)->runtime)
+const char *snd_pcm_format_name(snd_pcm_format_t format);
+
#endif /* __SOUND_PCM_H */
diff --git a/include/sound/sh_fsi.h b/include/sound/sh_fsi.h
new file mode 100644
index 000000000000..c0227361a876
--- /dev/null
+++ b/include/sound/sh_fsi.h
@@ -0,0 +1,83 @@
+#ifndef __SOUND_FSI_H
+#define __SOUND_FSI_H
+
+/*
+ * Fifo-attached Serial Interface (FSI) support for SH7724
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* flags format
+
+ * 0xABCDEEFF
+ *
+ * A: channel size for TDM (input)
+ * B: channel size for TDM (ooutput)
+ * C: inversion
+ * D: mode
+ * E: input format
+ * F: output format
+ */
+
+#include <linux/clk.h>
+#include <sound/soc.h>
+
+/* TDM channel */
+#define SH_FSI_SET_CH_I(x) ((x & 0xF) << 28)
+#define SH_FSI_SET_CH_O(x) ((x & 0xF) << 24)
+
+#define SH_FSI_CH_IMASK 0xF0000000
+#define SH_FSI_CH_OMASK 0x0F000000
+#define SH_FSI_GET_CH_I(x) ((x & SH_FSI_CH_IMASK) >> 28)
+#define SH_FSI_GET_CH_O(x) ((x & SH_FSI_CH_OMASK) >> 24)
+
+/* clock inversion */
+#define SH_FSI_INVERSION_MASK 0x00F00000
+#define SH_FSI_LRM_INV (1 << 20)
+#define SH_FSI_BRM_INV (1 << 21)
+#define SH_FSI_LRS_INV (1 << 22)
+#define SH_FSI_BRS_INV (1 << 23)
+
+/* mode */
+#define SH_FSI_MODE_MASK 0x000F0000
+#define SH_FSI_IN_SLAVE_MODE (1 << 16) /* default master mode */
+#define SH_FSI_OUT_SLAVE_MODE (1 << 17) /* default master mode */
+
+/* DI format */
+#define SH_FSI_FMT_MASK 0x000000FF
+#define SH_FSI_IFMT(x) (((SH_FSI_FMT_ ## x) & SH_FSI_FMT_MASK) << 8)
+#define SH_FSI_OFMT(x) (((SH_FSI_FMT_ ## x) & SH_FSI_FMT_MASK) << 0)
+#define SH_FSI_GET_IFMT(x) ((x >> 8) & SH_FSI_FMT_MASK)
+#define SH_FSI_GET_OFMT(x) ((x >> 0) & SH_FSI_FMT_MASK)
+
+#define SH_FSI_FMT_MONO (1 << 0)
+#define SH_FSI_FMT_MONO_DELAY (1 << 1)
+#define SH_FSI_FMT_PCM (1 << 2)
+#define SH_FSI_FMT_I2S (1 << 3)
+#define SH_FSI_FMT_TDM (1 << 4)
+#define SH_FSI_FMT_TDM_DELAY (1 << 5)
+
+#define SH_FSI_IFMT_TDM_CH(x) \
+ (SH_FSI_IFMT(TDM) | SH_FSI_SET_CH_I(x))
+#define SH_FSI_IFMT_TDM_DELAY_CH(x) \
+ (SH_FSI_IFMT(TDM_DELAY) | SH_FSI_SET_CH_I(x))
+
+#define SH_FSI_OFMT_TDM_CH(x) \
+ (SH_FSI_OFMT(TDM) | SH_FSI_SET_CH_O(x))
+#define SH_FSI_OFMT_TDM_DELAY_CH(x) \
+ (SH_FSI_OFMT(TDM_DELAY) | SH_FSI_SET_CH_O(x))
+
+struct sh_fsi_platform_info {
+ unsigned long porta_flags;
+ unsigned long portb_flags;
+};
+
+extern struct snd_soc_dai fsi_soc_dai[2];
+extern struct snd_soc_platform fsi_soc_platform;
+
+#endif /* __SOUND_FSI_H */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 352d7eee9b6d..97ca9af414dc 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -27,8 +27,8 @@ struct snd_pcm_substream;
#define SND_SOC_DAIFMT_I2S 0 /* I2S mode */
#define SND_SOC_DAIFMT_RIGHT_J 1 /* Right Justified mode */
#define SND_SOC_DAIFMT_LEFT_J 2 /* Left Justified mode */
-#define SND_SOC_DAIFMT_DSP_A 3 /* L data msb after FRM LRC */
-#define SND_SOC_DAIFMT_DSP_B 4 /* L data msb during FRM LRC */
+#define SND_SOC_DAIFMT_DSP_A 3 /* L data MSB after FRM LRC */
+#define SND_SOC_DAIFMT_DSP_B 4 /* L data MSB during FRM LRC */
#define SND_SOC_DAIFMT_AC97 5 /* AC97 */
/* left and right justified also known as MSB and LSB respectively */
@@ -38,7 +38,7 @@ struct snd_pcm_substream;
/*
* DAI Clock gating.
*
- * DAI bit clocks can be be gated (disabled) when not the DAI is not
+ * DAI bit clocks can be be gated (disabled) when the DAI is not
* sending or receiving PCM data in a frame. This can be used to save power.
*/
#define SND_SOC_DAIFMT_CONT (0 << 4) /* continuous clock */
@@ -51,21 +51,21 @@ struct snd_pcm_substream;
* format.
*/
#define SND_SOC_DAIFMT_NB_NF (0 << 8) /* normal bit clock + frame */
-#define SND_SOC_DAIFMT_NB_IF (1 << 8) /* normal bclk + inv frm */
-#define SND_SOC_DAIFMT_IB_NF (2 << 8) /* invert bclk + nor frm */
-#define SND_SOC_DAIFMT_IB_IF (3 << 8) /* invert bclk + frm */
+#define SND_SOC_DAIFMT_NB_IF (1 << 8) /* normal BCLK + inv FRM */
+#define SND_SOC_DAIFMT_IB_NF (2 << 8) /* invert BCLK + nor FRM */
+#define SND_SOC_DAIFMT_IB_IF (3 << 8) /* invert BCLK + FRM */
/*
* DAI hardware clock masters.
*
* This is wrt the codec, the inverse is true for the interface
- * i.e. if the codec is clk and frm master then the interface is
+ * i.e. if the codec is clk and FRM master then the interface is
* clk and frame slave.
*/
-#define SND_SOC_DAIFMT_CBM_CFM (0 << 12) /* codec clk & frm master */
-#define SND_SOC_DAIFMT_CBS_CFM (1 << 12) /* codec clk slave & frm master */
+#define SND_SOC_DAIFMT_CBM_CFM (0 << 12) /* codec clk & FRM master */
+#define SND_SOC_DAIFMT_CBS_CFM (1 << 12) /* codec clk slave & FRM master */
#define SND_SOC_DAIFMT_CBM_CFS (2 << 12) /* codec clk master & frame slave */
-#define SND_SOC_DAIFMT_CBS_CFS (3 << 12) /* codec clk & frm slave */
+#define SND_SOC_DAIFMT_CBS_CFS (3 << 12) /* codec clk & FRM slave */
#define SND_SOC_DAIFMT_FORMAT_MASK 0x000f
#define SND_SOC_DAIFMT_CLOCK_MASK 0x00f0
@@ -78,7 +78,13 @@ struct snd_pcm_substream;
#define SND_SOC_CLOCK_IN 0
#define SND_SOC_CLOCK_OUT 1
-#define SND_SOC_STD_AC97_FMTS (SNDRV_PCM_FMTBIT_S16_LE |\
+#define SND_SOC_STD_AC97_FMTS (SNDRV_PCM_FMTBIT_S8 |\
+ SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S16_BE |\
+ SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S20_3BE |\
+ SNDRV_PCM_FMTBIT_S24_3LE |\
+ SNDRV_PCM_FMTBIT_S24_3BE |\
SNDRV_PCM_FMTBIT_S32_LE |\
SNDRV_PCM_FMTBIT_S32_BE)
@@ -106,7 +112,7 @@ int snd_soc_dai_set_pll(struct snd_soc_dai *dai,
int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt);
int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
- unsigned int mask, int slots);
+ unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width);
int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate);
@@ -116,12 +122,12 @@ int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute);
/*
* Digital Audio Interface.
*
- * Describes the Digital Audio Interface in terms of it's ALSA, DAI and AC97
- * operations an capabilities. Codec and platfom drivers will register a this
+ * Describes the Digital Audio Interface in terms of its ALSA, DAI and AC97
+ * operations and capabilities. Codec and platform drivers will register this
* structure for every DAI they have.
*
* This structure covers the clocking, formating and ALSA operations for each
- * interface a
+ * interface.
*/
struct snd_soc_dai_ops {
/*
@@ -140,7 +146,8 @@ struct snd_soc_dai_ops {
*/
int (*set_fmt)(struct snd_soc_dai *dai, unsigned int fmt);
int (*set_tdm_slot)(struct snd_soc_dai *dai,
- unsigned int mask, int slots);
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width);
int (*set_tristate)(struct snd_soc_dai *dai, int tristate);
/*
@@ -179,6 +186,7 @@ struct snd_soc_dai {
int ac97_control;
struct device *dev;
+ void *ac97_pdata; /* platform_data for the ac97 codec */
/* DAI callbacks */
int (*probe)(struct platform_device *pdev,
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index ec8a45f9a069..c1410e3191e3 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -137,6 +137,12 @@
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD}
/* stream domain */
+#define SND_SOC_DAPM_AIF_IN(wname, stname, wslot, wreg, wshift, winvert) \
+{ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
+ .reg = wreg, .shift = wshift, .invert = winvert }
+#define SND_SOC_DAPM_AIF_OUT(wname, stname, wslot, wreg, wshift, winvert) \
+{ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
+ .reg = wreg, .shift = wshift, .invert = winvert }
#define SND_SOC_DAPM_DAC(wname, stname, wreg, wshift, winvert) \
{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \
.shift = wshift, .invert = winvert}
@@ -279,9 +285,11 @@ int snd_soc_dapm_add_routes(struct snd_soc_codec *codec,
/* dapm events */
int snd_soc_dapm_stream_event(struct snd_soc_codec *codec, char *stream,
int event);
+void snd_soc_dapm_shutdown(struct snd_soc_device *socdev);
/* dapm sys fs - used by the core */
int snd_soc_dapm_sys_add(struct device *dev);
+void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec);
/* dapm audio pin control and status */
int snd_soc_dapm_enable_pin(struct snd_soc_codec *codec, const char *pin);
@@ -311,6 +319,8 @@ enum snd_soc_dapm_type {
snd_soc_dapm_pre, /* machine specific pre widget - exec first */
snd_soc_dapm_post, /* machine specific post widget - exec last */
snd_soc_dapm_supply, /* power/clock supply */
+ snd_soc_dapm_aif_in, /* audio interface input */
+ snd_soc_dapm_aif_out, /* audio interface output */
};
/*
diff --git a/include/sound/soc.h b/include/sound/soc.h
index cf6111d72b17..475cb7ed6bec 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -135,6 +135,28 @@
.info = snd_soc_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
.private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) }
+#define SOC_DOUBLE_EXT_TLV(xname, xreg, shift_left, shift_right, xmax, xinvert,\
+ xhandler_get, xhandler_put, tlv_array) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) \
+ {.reg = xreg, .shift = shift_left, .rshift = shift_right, \
+ .max = xmax, .invert = xinvert} }
+#define SOC_DOUBLE_R_EXT_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert,\
+ xhandler_get, xhandler_put, tlv_array) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw_2r, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) \
+ {.reg = reg_left, .rreg = reg_right, .shift = xshift, \
+ .max = xmax, .invert = xinvert} }
#define SOC_SINGLE_BOOL_EXT(xname, xdata, xhandler_get, xhandler_put) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_bool_ext, \
@@ -183,14 +205,28 @@ struct snd_soc_jack_gpio;
#endif
typedef int (*hw_write_t)(void *,const char* ,int);
-typedef int (*hw_read_t)(void *,char* ,int);
extern struct snd_ac97_bus_ops soc_ac97_ops;
+enum snd_soc_control_type {
+ SND_SOC_CUSTOM,
+ SND_SOC_I2C,
+ SND_SOC_SPI,
+};
+
int snd_soc_register_platform(struct snd_soc_platform *platform);
void snd_soc_unregister_platform(struct snd_soc_platform *platform);
int snd_soc_register_codec(struct snd_soc_codec *codec);
void snd_soc_unregister_codec(struct snd_soc_codec *codec);
+int snd_soc_codec_volatile_register(struct snd_soc_codec *codec, int reg);
+int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
+ int addr_bits, int data_bits,
+ enum snd_soc_control_type control);
+
+#ifdef CONFIG_PM
+int snd_soc_suspend_device(struct device *dev);
+int snd_soc_resume_device(struct device *dev);
+#endif
/* pcm <-> DAI connect */
void snd_soc_free_pcms(struct snd_soc_device *socdev);
@@ -216,9 +252,9 @@ void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
/* codec register bit access */
int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg,
- unsigned short mask, unsigned short value);
+ unsigned int mask, unsigned int value);
int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned short reg,
- unsigned short mask, unsigned short value);
+ unsigned int mask, unsigned int value);
int snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
struct snd_ac97_bus_ops *ops, int num);
@@ -356,8 +392,10 @@ struct snd_soc_codec {
int (*write)(struct snd_soc_codec *, unsigned int, unsigned int);
int (*display_register)(struct snd_soc_codec *, char *,
size_t, unsigned int);
+ int (*volatile_register)(unsigned int);
+ int (*readable_register)(unsigned int);
hw_write_t hw_write;
- hw_read_t hw_read;
+ unsigned int (*hw_read)(struct snd_soc_codec *, unsigned int);
void *reg_cache;
short reg_cache_size;
short reg_cache_step;
@@ -369,8 +407,6 @@ struct snd_soc_codec {
enum snd_soc_bias_level bias_level;
enum snd_soc_bias_level suspend_bias_level;
struct delayed_work delayed_work;
- struct list_head up_list;
- struct list_head down_list;
/* codec DAI's */
struct snd_soc_dai *dai;
@@ -379,6 +415,7 @@ struct snd_soc_codec {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_reg;
struct dentry *debugfs_pop_time;
+ struct dentry *debugfs_dapm;
#endif
};
diff --git a/include/sound/tlv.h b/include/sound/tlv.h
index d136ea2181ed..9fd5b19ccf5c 100644
--- a/include/sound/tlv.h
+++ b/include/sound/tlv.h
@@ -35,6 +35,8 @@
#define SNDRV_CTL_TLVT_DB_SCALE 1 /* dB scale */
#define SNDRV_CTL_TLVT_DB_LINEAR 2 /* linear volume */
#define SNDRV_CTL_TLVT_DB_RANGE 3 /* dB range container */
+#define SNDRV_CTL_TLVT_DB_MINMAX 4 /* dB scale with min/max */
+#define SNDRV_CTL_TLVT_DB_MINMAX_MUTE 5 /* dB scale with min/max with mute */
#define TLV_DB_SCALE_ITEM(min, step, mute) \
SNDRV_CTL_TLVT_DB_SCALE, 2 * sizeof(unsigned int), \
@@ -42,6 +44,18 @@
#define DECLARE_TLV_DB_SCALE(name, min, step, mute) \
unsigned int name[] = { TLV_DB_SCALE_ITEM(min, step, mute) }
+/* dB scale specified with min/max values instead of step */
+#define TLV_DB_MINMAX_ITEM(min_dB, max_dB) \
+ SNDRV_CTL_TLVT_DB_MINMAX, 2 * sizeof(unsigned int), \
+ (min_dB), (max_dB)
+#define TLV_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) \
+ SNDRV_CTL_TLVT_DB_MINMAX_MUTE, 2 * sizeof(unsigned int), \
+ (min_dB), (max_dB)
+#define DECLARE_TLV_DB_MINMAX(name, min_dB, max_dB) \
+ unsigned int name[] = { TLV_DB_MINMAX_ITEM(min_dB, max_dB) }
+#define DECLARE_TLV_DB_MINMAX_MUTE(name, min_dB, max_dB) \
+ unsigned int name[] = { TLV_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) }
+
/* linear volume between min_dB and max_dB (.01dB unit) */
#define TLV_DB_LINEAR_ITEM(min_dB, max_dB) \
SNDRV_CTL_TLVT_DB_LINEAR, 2 * sizeof(unsigned int), \
diff --git a/include/sound/uda1380.h b/include/sound/uda1380.h
new file mode 100644
index 000000000000..381319c7000c
--- /dev/null
+++ b/include/sound/uda1380.h
@@ -0,0 +1,22 @@
+/*
+ * UDA1380 ALSA SoC Codec driver
+ *
+ * Copyright 2009 Philipp Zabel
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UDA1380_H
+#define __UDA1380_H
+
+struct uda1380_platform_data {
+ int gpio_power;
+ int gpio_reset;
+ int dac_clk;
+#define UDA1380_DAC_CLK_SYSCLK 0
+#define UDA1380_DAC_CLK_WSPLL 1
+};
+
+#endif /* __UDA1380_H */
diff --git a/include/sound/version.h b/include/sound/version.h
index 456f1359e1c0..22939142dd23 100644
--- a/include/sound/version.h
+++ b/include/sound/version.h
@@ -1,3 +1,3 @@
/* include/version.h */
-#define CONFIG_SND_VERSION "1.0.20"
+#define CONFIG_SND_VERSION "1.0.21"
#define CONFIG_SND_DATE ""
diff --git a/include/sound/wm8993.h b/include/sound/wm8993.h
new file mode 100644
index 000000000000..9c661f2f8cda
--- /dev/null
+++ b/include/sound/wm8993.h
@@ -0,0 +1,44 @@
+/*
+ * linux/sound/wm8993.h -- Platform data for WM8993
+ *
+ * Copyright 2009 Wolfson Microelectronics. PLC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_WM8993_H
+#define __LINUX_SND_WM8993_H
+
+/* Note that EQ1 only contains the enable/disable bit so will be
+ ignored but is included for simplicity.
+ */
+struct wm8993_retune_mobile_setting {
+ const char *name;
+ unsigned int rate;
+ u16 config[24];
+};
+
+struct wm8993_platform_data {
+ struct wm8993_retune_mobile_setting *retune_configs;
+ int num_retune_configs;
+
+ /* LINEOUT can be differential or single ended */
+ unsigned int lineout1_diff:1;
+ unsigned int lineout2_diff:1;
+
+ /* Common mode feedback */
+ unsigned int lineout1fb:1;
+ unsigned int lineout2fb:1;
+
+ /* Microphone biases: 0=0.9*AVDD1 1=0.65*AVVD1 */
+ unsigned int micbias1_lvl:1;
+ unsigned int micbias2_lvl:1;
+
+ /* Jack detect threashold levels, see datasheet for values */
+ unsigned int jd_scthr:2;
+ unsigned int jd_thr:2;
+};
+
+#endif
diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
index 05ead6698434..444cd6ba0ba7 100644
--- a/include/sound/ymfpci.h
+++ b/include/sound/ymfpci.h
@@ -331,6 +331,7 @@ struct snd_ymfpci {
struct snd_ac97 *ac97;
struct snd_rawmidi *rawmidi;
struct snd_timer *timer;
+ unsigned int timer_ticks;
struct pci_dev *pci;
struct snd_card *card;
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index f7a7ae1e8f90..2a4b3bf74033 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -26,6 +26,11 @@
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
DEFINE_TRACE(name)
+#undef TRACE_EVENT_FN
+#define TRACE_EVENT_FN(name, proto, args, tstruct, \
+ assign, print, reg, unreg) \
+ DEFINE_TRACE_FN(name, reg, unreg)
+
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args) \
DEFINE_TRACE(name)
@@ -56,6 +61,8 @@
#include <trace/ftrace.h>
#endif
+#undef TRACE_EVENT
+#undef TRACE_EVENT_FN
#undef TRACE_HEADER_MULTI_READ
/* Only undef what we defined in this file */
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
new file mode 100644
index 000000000000..84160fb18478
--- /dev/null
+++ b/include/trace/events/module.h
@@ -0,0 +1,126 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM module
+
+#if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MODULE_H
+
+#include <linux/tracepoint.h>
+
+#ifdef CONFIG_MODULES
+
+struct module;
+
+#define show_module_flags(flags) __print_flags(flags, "", \
+ { (1UL << TAINT_PROPRIETARY_MODULE), "P" }, \
+ { (1UL << TAINT_FORCED_MODULE), "F" }, \
+ { (1UL << TAINT_CRAP), "C" })
+
+TRACE_EVENT(module_load,
+
+ TP_PROTO(struct module *mod),
+
+ TP_ARGS(mod),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, taints )
+ __string( name, mod->name )
+ ),
+
+ TP_fast_assign(
+ __entry->taints = mod->taints;
+ __assign_str(name, mod->name);
+ ),
+
+ TP_printk("%s %s", __get_str(name), show_module_flags(__entry->taints))
+);
+
+TRACE_EVENT(module_free,
+
+ TP_PROTO(struct module *mod),
+
+ TP_ARGS(mod),
+
+ TP_STRUCT__entry(
+ __string( name, mod->name )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, mod->name);
+ ),
+
+ TP_printk("%s", __get_str(name))
+);
+
+TRACE_EVENT(module_get,
+
+ TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
+
+ TP_ARGS(mod, ip, refcnt),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, ip )
+ __field( int, refcnt )
+ __string( name, mod->name )
+ ),
+
+ TP_fast_assign(
+ __entry->ip = ip;
+ __entry->refcnt = refcnt;
+ __assign_str(name, mod->name);
+ ),
+
+ TP_printk("%s call_site=%pf refcnt=%d",
+ __get_str(name), (void *)__entry->ip, __entry->refcnt)
+);
+
+TRACE_EVENT(module_put,
+
+ TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
+
+ TP_ARGS(mod, ip, refcnt),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, ip )
+ __field( int, refcnt )
+ __string( name, mod->name )
+ ),
+
+ TP_fast_assign(
+ __entry->ip = ip;
+ __entry->refcnt = refcnt;
+ __assign_str(name, mod->name);
+ ),
+
+ TP_printk("%s call_site=%pf refcnt=%d",
+ __get_str(name), (void *)__entry->ip, __entry->refcnt)
+);
+
+TRACE_EVENT(module_request,
+
+ TP_PROTO(char *name, bool wait, unsigned long ip),
+
+ TP_ARGS(name, wait, ip),
+
+ TP_STRUCT__entry(
+ __field( bool, wait )
+ __field( unsigned long, ip )
+ __string( name, name )
+ ),
+
+ TP_fast_assign(
+ __entry->wait = wait;
+ __entry->ip = ip;
+ __assign_str(name, name);
+ ),
+
+ TP_printk("%s wait=%d call_site=%pf",
+ __get_str(name), (int)__entry->wait, (void *)__entry->ip)
+);
+
+#endif /* CONFIG_MODULES */
+
+#endif /* _TRACE_MODULE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 8949bb7eb082..b48f1ad7c946 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -94,6 +94,7 @@ TRACE_EVENT(sched_wakeup,
__field( pid_t, pid )
__field( int, prio )
__field( int, success )
+ __field( int, cpu )
),
TP_fast_assign(
@@ -101,11 +102,12 @@ TRACE_EVENT(sched_wakeup,
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->success = success;
+ __entry->cpu = task_cpu(p);
),
- TP_printk("task %s:%d [%d] success=%d",
+ TP_printk("task %s:%d [%d] success=%d [%03d]",
__entry->comm, __entry->pid, __entry->prio,
- __entry->success)
+ __entry->success, __entry->cpu)
);
/*
@@ -125,6 +127,7 @@ TRACE_EVENT(sched_wakeup_new,
__field( pid_t, pid )
__field( int, prio )
__field( int, success )
+ __field( int, cpu )
),
TP_fast_assign(
@@ -132,11 +135,12 @@ TRACE_EVENT(sched_wakeup_new,
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->success = success;
+ __entry->cpu = task_cpu(p);
),
- TP_printk("task %s:%d [%d] success=%d",
+ TP_printk("task %s:%d [%d] success=%d [%03d]",
__entry->comm, __entry->pid, __entry->prio,
- __entry->success)
+ __entry->success, __entry->cpu)
);
/*
@@ -340,6 +344,101 @@ TRACE_EVENT(sched_signal_send,
__entry->sig, __entry->comm, __entry->pid)
);
+/*
+ * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
+ * adding sched_stat support to SCHED_FIFO/RR would be welcome.
+ */
+
+/*
+ * Tracepoint for accounting wait time (time the task is runnable
+ * but not actually running due to scheduler contention).
+ */
+TRACE_EVENT(sched_stat_wait,
+
+ TP_PROTO(struct task_struct *tsk, u64 delay),
+
+ TP_ARGS(tsk, delay),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( u64, delay )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->delay = delay;
+ )
+ TP_perf_assign(
+ __perf_count(delay);
+ ),
+
+ TP_printk("task: %s:%d wait: %Lu [ns]",
+ __entry->comm, __entry->pid,
+ (unsigned long long)__entry->delay)
+);
+
+/*
+ * Tracepoint for accounting sleep time (time the task is not runnable,
+ * including iowait, see below).
+ */
+TRACE_EVENT(sched_stat_sleep,
+
+ TP_PROTO(struct task_struct *tsk, u64 delay),
+
+ TP_ARGS(tsk, delay),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( u64, delay )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->delay = delay;
+ )
+ TP_perf_assign(
+ __perf_count(delay);
+ ),
+
+ TP_printk("task: %s:%d sleep: %Lu [ns]",
+ __entry->comm, __entry->pid,
+ (unsigned long long)__entry->delay)
+);
+
+/*
+ * Tracepoint for accounting iowait time (time the task is not runnable
+ * due to waiting on IO to complete).
+ */
+TRACE_EVENT(sched_stat_iowait,
+
+ TP_PROTO(struct task_struct *tsk, u64 delay),
+
+ TP_ARGS(tsk, delay),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( u64, delay )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->delay = delay;
+ )
+ TP_perf_assign(
+ __perf_count(delay);
+ ),
+
+ TP_printk("task: %s:%d iowait: %Lu [ns]",
+ __entry->comm, __entry->pid,
+ (unsigned long long)__entry->delay)
+);
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
new file mode 100644
index 000000000000..397dff2dbd5a
--- /dev/null
+++ b/include/trace/events/syscalls.h
@@ -0,0 +1,70 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM syscalls
+
+#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EVENTS_SYSCALLS_H
+
+#include <linux/tracepoint.h>
+
+#include <asm/ptrace.h>
+#include <asm/syscall.h>
+
+
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+
+extern void syscall_regfunc(void);
+extern void syscall_unregfunc(void);
+
+TRACE_EVENT_FN(sys_enter,
+
+ TP_PROTO(struct pt_regs *regs, long id),
+
+ TP_ARGS(regs, id),
+
+ TP_STRUCT__entry(
+ __field( long, id )
+ __array( unsigned long, args, 6 )
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ syscall_get_arguments(current, regs, 0, 6, __entry->args);
+ ),
+
+ TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
+ __entry->id,
+ __entry->args[0], __entry->args[1], __entry->args[2],
+ __entry->args[3], __entry->args[4], __entry->args[5]),
+
+ syscall_regfunc, syscall_unregfunc
+);
+
+TRACE_EVENT_FN(sys_exit,
+
+ TP_PROTO(struct pt_regs *regs, long ret),
+
+ TP_ARGS(regs, ret),
+
+ TP_STRUCT__entry(
+ __field( long, id )
+ __field( long, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->id = syscall_get_nr(current, regs);
+ __entry->ret = ret;
+ ),
+
+ TP_printk("NR %ld = %ld",
+ __entry->id, __entry->ret),
+
+ syscall_regfunc, syscall_unregfunc
+);
+
+#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
+
+#endif /* _TRACE_EVENTS_SYSCALLS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index f64fbaae781a..308bafd93325 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -21,11 +21,14 @@
#undef __field
#define __field(type, item) type item;
+#undef __field_ext
+#define __field_ext(type, item, filter_type) type item;
+
#undef __array
#define __array(type, item, len) type item[len];
#undef __dynamic_array
-#define __dynamic_array(type, item, len) unsigned short __data_loc_##item;
+#define __dynamic_array(type, item, len) u32 __data_loc_##item;
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
@@ -42,6 +45,16 @@
}; \
static struct ftrace_event_call event_##name
+#undef __cpparg
+#define __cpparg(arg...) arg
+
+/* Callbacks are meaningless to ftrace. */
+#undef TRACE_EVENT_FN
+#define TRACE_EVENT_FN(name, proto, args, tstruct, \
+ assign, print, reg, unreg) \
+ TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
+ __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
+
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -51,23 +64,27 @@
* Include the following:
*
* struct ftrace_data_offsets_<call> {
- * int <item1>;
- * int <item2>;
+ * u32 <item1>;
+ * u32 <item2>;
* [...]
* };
*
- * The __dynamic_array() macro will create each int <item>, this is
+ * The __dynamic_array() macro will create each u32 <item>, this is
* to keep the offset of each array from the beginning of the event.
+ * The size of an array is also encoded, in the higher 16 bits of <item>.
*/
#undef __field
-#define __field(type, item);
+#define __field(type, item)
+
+#undef __field_ext
+#define __field_ext(type, item, filter_type)
#undef __array
#define __array(type, item, len)
#undef __dynamic_array
-#define __dynamic_array(type, item, len) int item;
+#define __dynamic_array(type, item, len) u32 item;
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
@@ -109,6 +126,9 @@
if (!ret) \
return 0;
+#undef __field_ext
+#define __field_ext(type, item, filter_type) __field(type, item)
+
#undef __array
#define __array(type, item, len) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
@@ -120,7 +140,7 @@
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
- ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \
+ ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
"offset:%u;\tsize:%u;\n", \
(unsigned int)offsetof(typeof(field), \
__data_loc_##item), \
@@ -150,7 +170,8 @@
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
static int \
-ftrace_format_##call(struct trace_seq *s) \
+ftrace_format_##call(struct ftrace_event_call *unused, \
+ struct trace_seq *s) \
{ \
struct ftrace_raw_##call field __attribute__((unused)); \
int ret = 0; \
@@ -210,7 +231,7 @@ ftrace_format_##call(struct trace_seq *s) \
#undef __get_dynamic_array
#define __get_dynamic_array(field) \
- ((void *)__entry + __entry->__data_loc_##field)
+ ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
#undef __get_str
#define __get_str(field) (char *)__get_dynamic_array(field)
@@ -263,28 +284,33 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#undef __field
-#define __field(type, item) \
+#undef __field_ext
+#define __field_ext(type, item, filter_type) \
ret = trace_define_field(event_call, #type, #item, \
offsetof(typeof(field), item), \
- sizeof(field.item), is_signed_type(type)); \
+ sizeof(field.item), \
+ is_signed_type(type), filter_type); \
if (ret) \
return ret;
+#undef __field
+#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
+
#undef __array
#define __array(type, item, len) \
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
ret = trace_define_field(event_call, #type "[" #len "]", #item, \
offsetof(typeof(field), item), \
- sizeof(field.item), 0); \
+ sizeof(field.item), 0, FILTER_OTHER); \
if (ret) \
return ret;
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
- ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\
- offsetof(typeof(field), __data_loc_##item), \
- sizeof(field.__data_loc_##item), 0);
+ ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
+ offsetof(typeof(field), __data_loc_##item), \
+ sizeof(field.__data_loc_##item), 0, \
+ FILTER_OTHER);
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
@@ -292,17 +318,14 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
int \
-ftrace_define_fields_##call(void) \
+ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
{ \
struct ftrace_raw_##call field; \
- struct ftrace_event_call *event_call = &event_##call; \
int ret; \
\
- __common_field(int, type, 1); \
- __common_field(unsigned char, flags, 0); \
- __common_field(unsigned char, preempt_count, 0); \
- __common_field(int, pid, 1); \
- __common_field(int, tgid, 1); \
+ ret = trace_define_common_fields(event_call); \
+ if (ret) \
+ return ret; \
\
tstruct; \
\
@@ -321,6 +344,9 @@ ftrace_define_fields_##call(void) \
#undef __field
#define __field(type, item)
+#undef __field_ext
+#define __field_ext(type, item, filter_type)
+
#undef __array
#define __array(type, item, len)
@@ -328,6 +354,7 @@ ftrace_define_fields_##call(void) \
#define __dynamic_array(type, item, len) \
__data_offsets->item = __data_size + \
offsetof(typeof(*entry), __data); \
+ __data_offsets->item |= (len * sizeof(type)) << 16; \
__data_size += (len) * sizeof(type);
#undef __string
@@ -433,13 +460,15 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
* {
* struct ring_buffer_event *event;
* struct ftrace_raw_<call> *entry; <-- defined in stage 1
+ * struct ring_buffer *buffer;
* unsigned long irq_flags;
* int pc;
*
* local_save_flags(irq_flags);
* pc = preempt_count();
*
- * event = trace_current_buffer_lock_reserve(event_<call>.id,
+ * event = trace_current_buffer_lock_reserve(&buffer,
+ * event_<call>.id,
* sizeof(struct ftrace_raw_<call>),
* irq_flags, pc);
* if (!event)
@@ -449,7 +478,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
* <assign>; <-- Here we assign the entries by the __field and
* __array macros.
*
- * trace_current_buffer_unlock_commit(event, irq_flags, pc);
+ * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
* }
*
* static int ftrace_raw_reg_event_<call>(void)
@@ -541,6 +570,7 @@ static void ftrace_raw_event_##call(proto) \
struct ftrace_event_call *event_call = &event_##call; \
struct ring_buffer_event *event; \
struct ftrace_raw_##call *entry; \
+ struct ring_buffer *buffer; \
unsigned long irq_flags; \
int __data_size; \
int pc; \
@@ -550,7 +580,8 @@ static void ftrace_raw_event_##call(proto) \
\
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
\
- event = trace_current_buffer_lock_reserve(event_##call.id, \
+ event = trace_current_buffer_lock_reserve(&buffer, \
+ event_##call.id, \
sizeof(*entry) + __data_size, \
irq_flags, pc); \
if (!event) \
@@ -562,11 +593,12 @@ static void ftrace_raw_event_##call(proto) \
\
{ assign; } \
\
- if (!filter_current_check_discard(event_call, entry, event)) \
- trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
+ if (!filter_current_check_discard(buffer, event_call, entry, event)) \
+ trace_nowake_buffer_unlock_commit(buffer, \
+ event, irq_flags, pc); \
} \
\
-static int ftrace_raw_reg_event_##call(void) \
+static int ftrace_raw_reg_event_##call(void *ptr) \
{ \
int ret; \
\
@@ -577,7 +609,7 @@ static int ftrace_raw_reg_event_##call(void) \
return ret; \
} \
\
-static void ftrace_raw_unreg_event_##call(void) \
+static void ftrace_raw_unreg_event_##call(void *ptr) \
{ \
unregister_trace_##call(ftrace_raw_event_##call); \
} \
@@ -595,7 +627,6 @@ static int ftrace_raw_init_event_##call(void) \
return -ENODEV; \
event_##call.id = id; \
INIT_LIST_HEAD(&event_##call.fields); \
- init_preds(&event_##call); \
return 0; \
} \
\
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 8cfe515cbc47..5dc283ba5ae0 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -1,8 +1,13 @@
#ifndef _TRACE_SYSCALL_H
#define _TRACE_SYSCALL_H
+#include <linux/tracepoint.h>
+#include <linux/unistd.h>
+#include <linux/ftrace_event.h>
+
#include <asm/ptrace.h>
+
/*
* A syscall entry in the ftrace syscalls array.
*
@@ -10,26 +15,49 @@
* @nb_args: number of parameters it takes
* @types: list of types as strings
* @args: list of args as strings (args[i] matches types[i])
+ * @enter_id: associated ftrace enter event id
+ * @exit_id: associated ftrace exit event id
+ * @enter_event: associated syscall_enter trace event
+ * @exit_event: associated syscall_exit trace event
*/
struct syscall_metadata {
const char *name;
int nb_args;
const char **types;
const char **args;
+ int enter_id;
+ int exit_id;
+
+ struct ftrace_event_call *enter_event;
+ struct ftrace_event_call *exit_event;
};
#ifdef CONFIG_FTRACE_SYSCALLS
-extern void arch_init_ftrace_syscalls(void);
extern struct syscall_metadata *syscall_nr_to_meta(int nr);
-extern void start_ftrace_syscalls(void);
-extern void stop_ftrace_syscalls(void);
-extern void ftrace_syscall_enter(struct pt_regs *regs);
-extern void ftrace_syscall_exit(struct pt_regs *regs);
-#else
-static inline void start_ftrace_syscalls(void) { }
-static inline void stop_ftrace_syscalls(void) { }
-static inline void ftrace_syscall_enter(struct pt_regs *regs) { }
-static inline void ftrace_syscall_exit(struct pt_regs *regs) { }
+extern int syscall_name_to_nr(char *name);
+void set_syscall_enter_id(int num, int id);
+void set_syscall_exit_id(int num, int id);
+extern struct trace_event event_syscall_enter;
+extern struct trace_event event_syscall_exit;
+extern int reg_event_syscall_enter(void *ptr);
+extern void unreg_event_syscall_enter(void *ptr);
+extern int reg_event_syscall_exit(void *ptr);
+extern void unreg_event_syscall_exit(void *ptr);
+extern int syscall_enter_format(struct ftrace_event_call *call,
+ struct trace_seq *s);
+extern int syscall_exit_format(struct ftrace_event_call *call,
+ struct trace_seq *s);
+extern int syscall_enter_define_fields(struct ftrace_event_call *call);
+extern int syscall_exit_define_fields(struct ftrace_event_call *call);
+enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags);
+enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
+#endif
+#ifdef CONFIG_EVENT_PROFILE
+int reg_prof_syscall_enter(char *name);
+void unreg_prof_syscall_enter(char *name);
+int reg_prof_syscall_exit(char *name);
+void unreg_prof_syscall_exit(char *name);
+
#endif
#endif /* _TRACE_SYSCALL_H */
diff --git a/init/Kconfig b/init/Kconfig
index 3f7e60995c80..8e8b76d8a272 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -316,38 +316,28 @@ choice
prompt "RCU Implementation"
default TREE_RCU
-config CLASSIC_RCU
- bool "Classic RCU"
- help
- This option selects the classic RCU implementation that is
- designed for best read-side performance on non-realtime
- systems.
-
- Select this option if you are unsure.
-
config TREE_RCU
bool "Tree-based hierarchical RCU"
help
This option selects the RCU implementation that is
designed for very large SMP system with hundreds or
- thousands of CPUs.
+ thousands of CPUs. It also scales down nicely to
+ smaller systems.
-config PREEMPT_RCU
- bool "Preemptible RCU"
+config TREE_PREEMPT_RCU
+ bool "Preemptable tree-based hierarchical RCU"
depends on PREEMPT
help
- This option reduces the latency of the kernel by making certain
- RCU sections preemptible. Normally RCU code is non-preemptible, if
- this option is selected then read-only RCU sections become
- preemptible. This helps latency, but may expose bugs due to
- now-naive assumptions about each RCU read-side critical section
- remaining on a given CPU through its execution.
+ This option selects the RCU implementation that is
+ designed for very large SMP systems with hundreds or
+ thousands of CPUs, but for which real-time response
+ is also required.
endchoice
config RCU_TRACE
bool "Enable tracing for RCU"
- depends on TREE_RCU || PREEMPT_RCU
+ depends on TREE_RCU || TREE_PREEMPT_RCU
help
This option provides tracing in RCU which presents stats
in debugfs for debugging RCU implementation.
@@ -359,7 +349,7 @@ config RCU_FANOUT
int "Tree-based hierarchical RCU fanout value"
range 2 64 if 64BIT
range 2 32 if !64BIT
- depends on TREE_RCU
+ depends on TREE_RCU || TREE_PREEMPT_RCU
default 64 if 64BIT
default 32 if !64BIT
help
@@ -374,7 +364,7 @@ config RCU_FANOUT
config RCU_FANOUT_EXACT
bool "Disable tree-based hierarchical RCU auto-balancing"
- depends on TREE_RCU
+ depends on TREE_RCU || TREE_PREEMPT_RCU
default n
help
This option forces use of the exact RCU_FANOUT value specified,
@@ -387,18 +377,12 @@ config RCU_FANOUT_EXACT
Say N if unsure.
config TREE_RCU_TRACE
- def_bool RCU_TRACE && TREE_RCU
- select DEBUG_FS
- help
- This option provides tracing for the TREE_RCU implementation,
- permitting Makefile to trivially select kernel/rcutree_trace.c.
-
-config PREEMPT_RCU_TRACE
- def_bool RCU_TRACE && PREEMPT_RCU
+ def_bool RCU_TRACE && ( TREE_RCU || TREE_PREEMPT_RCU )
select DEBUG_FS
help
- This option provides tracing for the PREEMPT_RCU implementation,
- permitting Makefile to trivially select kernel/rcupreempt_trace.c.
+ This option provides tracing for the TREE_RCU and
+ TREE_PREEMPT_RCU implementations, permitting Makefile to
+ trivially select kernel/rcutree_trace.c.
endmenu # "RCU Subsystem"
diff --git a/init/main.c b/init/main.c
index 11f4f145be3f..b34fd8e5edef 100644
--- a/init/main.c
+++ b/init/main.c
@@ -451,6 +451,7 @@ static noinline void __init_refok rest_init(void)
{
int pid;
+ rcu_scheduler_starting();
kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
numa_default_policy();
pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
@@ -462,7 +463,6 @@ static noinline void __init_refok rest_init(void)
* at least once to get things moving:
*/
init_idle_bootup_task(current);
- rcu_scheduler_starting();
preempt_enable_no_resched();
schedule();
preempt_disable();
@@ -631,7 +631,6 @@ asmlinkage void __init start_kernel(void)
softirq_init();
timekeeping_init();
time_init();
- sched_clock_init();
profile_init();
if (!irqs_disabled())
printk(KERN_CRIT "start_kernel(): bug: interrupts were "
@@ -682,6 +681,7 @@ asmlinkage void __init start_kernel(void)
numa_policy_init();
if (late_time_init)
late_time_init();
+ sched_clock_init();
calibrate_delay();
pidmap_init();
anon_vma_init();
diff --git a/kernel/Makefile b/kernel/Makefile
index 2093a691f1c2..b833bd5cc127 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -80,11 +80,9 @@ obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
-obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o
obj-$(CONFIG_TREE_RCU) += rcutree.o
-obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o
+obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
-obj-$(CONFIG_PREEMPT_RCU_TRACE) += rcupreempt_trace.o
obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
diff --git a/kernel/acct.c b/kernel/acct.c
index 9f3391090b3e..9a4715a2f6bf 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -491,13 +491,17 @@ static void do_acct_process(struct bsd_acct_struct *acct,
u64 run_time;
struct timespec uptime;
struct tty_struct *tty;
+ const struct cred *orig_cred;
+
+ /* Perform file operations on behalf of whoever enabled accounting */
+ orig_cred = override_creds(file->f_cred);
/*
* First check to see if there is enough free_space to continue
* the process accounting system.
*/
if (!check_free_space(acct, file))
- return;
+ goto out;
/*
* Fill the accounting struct with the needed info as recorded
@@ -578,6 +582,8 @@ static void do_acct_process(struct bsd_acct_struct *acct,
sizeof(acct_t), &file->f_pos);
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
set_fs(fs);
+out:
+ revert_creds(orig_cred);
}
/**
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index b6eadfe30e7b..c7ece8f027f2 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -600,6 +600,7 @@ static struct inode_operations cgroup_dir_inode_operations;
static struct file_operations proc_cgroupstats_operations;
static struct backing_dev_info cgroup_backing_dev_info = {
+ .name = "cgroup",
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
diff --git a/kernel/cred.c b/kernel/cred.c
index 1bb4d7e5d616..006fcab009d5 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -18,6 +18,18 @@
#include <linux/cn_proc.h>
#include "cred-internals.h"
+#if 0
+#define kdebug(FMT, ...) \
+ printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__)
+#else
+static inline __attribute__((format(printf, 1, 2)))
+void no_printk(const char *fmt, ...)
+{
+}
+#define kdebug(FMT, ...) \
+ no_printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__)
+#endif
+
static struct kmem_cache *cred_jar;
/*
@@ -36,6 +48,10 @@ static struct thread_group_cred init_tgcred = {
*/
struct cred init_cred = {
.usage = ATOMIC_INIT(4),
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ .subscribers = ATOMIC_INIT(2),
+ .magic = CRED_MAGIC,
+#endif
.securebits = SECUREBITS_DEFAULT,
.cap_inheritable = CAP_INIT_INH_SET,
.cap_permitted = CAP_FULL_SET,
@@ -48,6 +64,31 @@ struct cred init_cred = {
#endif
};
+static inline void set_cred_subscribers(struct cred *cred, int n)
+{
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ atomic_set(&cred->subscribers, n);
+#endif
+}
+
+static inline int read_cred_subscribers(const struct cred *cred)
+{
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ return atomic_read(&cred->subscribers);
+#else
+ return 0;
+#endif
+}
+
+static inline void alter_cred_subscribers(const struct cred *_cred, int n)
+{
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ struct cred *cred = (struct cred *) _cred;
+
+ atomic_add(n, &cred->subscribers);
+#endif
+}
+
/*
* Dispose of the shared task group credentials
*/
@@ -85,9 +126,22 @@ static void put_cred_rcu(struct rcu_head *rcu)
{
struct cred *cred = container_of(rcu, struct cred, rcu);
+ kdebug("put_cred_rcu(%p)", cred);
+
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ if (cred->magic != CRED_MAGIC_DEAD ||
+ atomic_read(&cred->usage) != 0 ||
+ read_cred_subscribers(cred) != 0)
+ panic("CRED: put_cred_rcu() sees %p with"
+ " mag %x, put %p, usage %d, subscr %d\n",
+ cred, cred->magic, cred->put_addr,
+ atomic_read(&cred->usage),
+ read_cred_subscribers(cred));
+#else
if (atomic_read(&cred->usage) != 0)
panic("CRED: put_cred_rcu() sees %p with usage %d\n",
cred, atomic_read(&cred->usage));
+#endif
security_cred_free(cred);
key_put(cred->thread_keyring);
@@ -106,12 +160,90 @@ static void put_cred_rcu(struct rcu_head *rcu)
*/
void __put_cred(struct cred *cred)
{
+ kdebug("__put_cred(%p{%d,%d})", cred,
+ atomic_read(&cred->usage),
+ read_cred_subscribers(cred));
+
BUG_ON(atomic_read(&cred->usage) != 0);
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ BUG_ON(read_cred_subscribers(cred) != 0);
+ cred->magic = CRED_MAGIC_DEAD;
+ cred->put_addr = __builtin_return_address(0);
+#endif
+ BUG_ON(cred == current->cred);
+ BUG_ON(cred == current->real_cred);
call_rcu(&cred->rcu, put_cred_rcu);
}
EXPORT_SYMBOL(__put_cred);
+/*
+ * Clean up a task's credentials when it exits
+ */
+void exit_creds(struct task_struct *tsk)
+{
+ struct cred *cred;
+
+ kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
+ atomic_read(&tsk->cred->usage),
+ read_cred_subscribers(tsk->cred));
+
+ cred = (struct cred *) tsk->real_cred;
+ tsk->real_cred = NULL;
+ validate_creds(cred);
+ alter_cred_subscribers(cred, -1);
+ put_cred(cred);
+
+ cred = (struct cred *) tsk->cred;
+ tsk->cred = NULL;
+ validate_creds(cred);
+ alter_cred_subscribers(cred, -1);
+ put_cred(cred);
+
+ cred = (struct cred *) tsk->replacement_session_keyring;
+ if (cred) {
+ tsk->replacement_session_keyring = NULL;
+ validate_creds(cred);
+ put_cred(cred);
+ }
+}
+
+/*
+ * Allocate blank credentials, such that the credentials can be filled in at a
+ * later date without risk of ENOMEM.
+ */
+struct cred *cred_alloc_blank(void)
+{
+ struct cred *new;
+
+ new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+#ifdef CONFIG_KEYS
+ new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL);
+ if (!new->tgcred) {
+ kfree(new);
+ return NULL;
+ }
+ atomic_set(&new->tgcred->usage, 1);
+#endif
+
+ atomic_set(&new->usage, 1);
+
+ if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
+ goto error;
+
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ new->magic = CRED_MAGIC;
+#endif
+ return new;
+
+error:
+ abort_creds(new);
+ return NULL;
+}
+
/**
* prepare_creds - Prepare a new set of credentials for modification
*
@@ -132,16 +264,19 @@ struct cred *prepare_creds(void)
const struct cred *old;
struct cred *new;
- BUG_ON(atomic_read(&task->real_cred->usage) < 1);
+ validate_process_creds();
new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
if (!new)
return NULL;
+ kdebug("prepare_creds() alloc %p", new);
+
old = task->cred;
memcpy(new, old, sizeof(struct cred));
atomic_set(&new->usage, 1);
+ set_cred_subscribers(new, 0);
get_group_info(new->group_info);
get_uid(new->user);
@@ -157,6 +292,7 @@ struct cred *prepare_creds(void)
if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
goto error;
+ validate_creds(new);
return new;
error:
@@ -229,9 +365,12 @@ struct cred *prepare_usermodehelper_creds(void)
if (!new)
return NULL;
+ kdebug("prepare_usermodehelper_creds() alloc %p", new);
+
memcpy(new, &init_cred, sizeof(struct cred));
atomic_set(&new->usage, 1);
+ set_cred_subscribers(new, 0);
get_group_info(new->group_info);
get_uid(new->user);
@@ -250,6 +389,7 @@ struct cred *prepare_usermodehelper_creds(void)
#endif
if (security_prepare_creds(new, &init_cred, GFP_ATOMIC) < 0)
goto error;
+ validate_creds(new);
BUG_ON(atomic_read(&new->usage) != 1);
return new;
@@ -286,6 +426,10 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
) {
p->real_cred = get_cred(p->cred);
get_cred(p->cred);
+ alter_cred_subscribers(p->cred, 2);
+ kdebug("share_creds(%p{%d,%d})",
+ p->cred, atomic_read(&p->cred->usage),
+ read_cred_subscribers(p->cred));
atomic_inc(&p->cred->user->processes);
return 0;
}
@@ -331,6 +475,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
atomic_inc(&new->user->processes);
p->cred = p->real_cred = get_cred(new);
+ alter_cred_subscribers(new, 2);
+ validate_creds(new);
return 0;
error_put:
@@ -355,13 +501,20 @@ error_put:
int commit_creds(struct cred *new)
{
struct task_struct *task = current;
- const struct cred *old;
+ const struct cred *old = task->real_cred;
- BUG_ON(task->cred != task->real_cred);
- BUG_ON(atomic_read(&task->real_cred->usage) < 2);
+ kdebug("commit_creds(%p{%d,%d})", new,
+ atomic_read(&new->usage),
+ read_cred_subscribers(new));
+
+ BUG_ON(task->cred != old);
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ BUG_ON(read_cred_subscribers(old) < 2);
+ validate_creds(old);
+ validate_creds(new);
+#endif
BUG_ON(atomic_read(&new->usage) < 1);
- old = task->real_cred;
security_commit_creds(new, old);
get_cred(new); /* we will require a ref for the subj creds too */
@@ -390,12 +543,14 @@ int commit_creds(struct cred *new)
* cheaply with the new uid cache, so if it matters
* we should be checking for it. -DaveM
*/
+ alter_cred_subscribers(new, 2);
if (new->user != old->user)
atomic_inc(&new->user->processes);
rcu_assign_pointer(task->real_cred, new);
rcu_assign_pointer(task->cred, new);
if (new->user != old->user)
atomic_dec(&old->user->processes);
+ alter_cred_subscribers(old, -2);
sched_switch_user(task);
@@ -428,6 +583,13 @@ EXPORT_SYMBOL(commit_creds);
*/
void abort_creds(struct cred *new)
{
+ kdebug("abort_creds(%p{%d,%d})", new,
+ atomic_read(&new->usage),
+ read_cred_subscribers(new));
+
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ BUG_ON(read_cred_subscribers(new) != 0);
+#endif
BUG_ON(atomic_read(&new->usage) < 1);
put_cred(new);
}
@@ -444,7 +606,20 @@ const struct cred *override_creds(const struct cred *new)
{
const struct cred *old = current->cred;
- rcu_assign_pointer(current->cred, get_cred(new));
+ kdebug("override_creds(%p{%d,%d})", new,
+ atomic_read(&new->usage),
+ read_cred_subscribers(new));
+
+ validate_creds(old);
+ validate_creds(new);
+ get_cred(new);
+ alter_cred_subscribers(new, 1);
+ rcu_assign_pointer(current->cred, new);
+ alter_cred_subscribers(old, -1);
+
+ kdebug("override_creds() = %p{%d,%d}", old,
+ atomic_read(&old->usage),
+ read_cred_subscribers(old));
return old;
}
EXPORT_SYMBOL(override_creds);
@@ -460,7 +635,15 @@ void revert_creds(const struct cred *old)
{
const struct cred *override = current->cred;
+ kdebug("revert_creds(%p{%d,%d})", old,
+ atomic_read(&old->usage),
+ read_cred_subscribers(old));
+
+ validate_creds(old);
+ validate_creds(override);
+ alter_cred_subscribers(old, 1);
rcu_assign_pointer(current->cred, old);
+ alter_cred_subscribers(override, -1);
put_cred(override);
}
EXPORT_SYMBOL(revert_creds);
@@ -502,11 +685,15 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
if (!new)
return NULL;
+ kdebug("prepare_kernel_cred() alloc %p", new);
+
if (daemon)
old = get_task_cred(daemon);
else
old = get_cred(&init_cred);
+ validate_creds(old);
+
*new = *old;
get_uid(new->user);
get_group_info(new->group_info);
@@ -526,7 +713,9 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
goto error;
atomic_set(&new->usage, 1);
+ set_cred_subscribers(new, 0);
put_cred(old);
+ validate_creds(new);
return new;
error:
@@ -589,3 +778,95 @@ int set_create_files_as(struct cred *new, struct inode *inode)
return security_kernel_create_files_as(new, inode);
}
EXPORT_SYMBOL(set_create_files_as);
+
+#ifdef CONFIG_DEBUG_CREDENTIALS
+
+/*
+ * dump invalid credentials
+ */
+static void dump_invalid_creds(const struct cred *cred, const char *label,
+ const struct task_struct *tsk)
+{
+ printk(KERN_ERR "CRED: %s credentials: %p %s%s%s\n",
+ label, cred,
+ cred == &init_cred ? "[init]" : "",
+ cred == tsk->real_cred ? "[real]" : "",
+ cred == tsk->cred ? "[eff]" : "");
+ printk(KERN_ERR "CRED: ->magic=%x, put_addr=%p\n",
+ cred->magic, cred->put_addr);
+ printk(KERN_ERR "CRED: ->usage=%d, subscr=%d\n",
+ atomic_read(&cred->usage),
+ read_cred_subscribers(cred));
+ printk(KERN_ERR "CRED: ->*uid = { %d,%d,%d,%d }\n",
+ cred->uid, cred->euid, cred->suid, cred->fsuid);
+ printk(KERN_ERR "CRED: ->*gid = { %d,%d,%d,%d }\n",
+ cred->gid, cred->egid, cred->sgid, cred->fsgid);
+#ifdef CONFIG_SECURITY
+ printk(KERN_ERR "CRED: ->security is %p\n", cred->security);
+ if ((unsigned long) cred->security >= PAGE_SIZE &&
+ (((unsigned long) cred->security & 0xffffff00) !=
+ (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8)))
+ printk(KERN_ERR "CRED: ->security {%x, %x}\n",
+ ((u32*)cred->security)[0],
+ ((u32*)cred->security)[1]);
+#endif
+}
+
+/*
+ * report use of invalid credentials
+ */
+void __invalid_creds(const struct cred *cred, const char *file, unsigned line)
+{
+ printk(KERN_ERR "CRED: Invalid credentials\n");
+ printk(KERN_ERR "CRED: At %s:%u\n", file, line);
+ dump_invalid_creds(cred, "Specified", current);
+ BUG();
+}
+EXPORT_SYMBOL(__invalid_creds);
+
+/*
+ * check the credentials on a process
+ */
+void __validate_process_creds(struct task_struct *tsk,
+ const char *file, unsigned line)
+{
+ if (tsk->cred == tsk->real_cred) {
+ if (unlikely(read_cred_subscribers(tsk->cred) < 2 ||
+ creds_are_invalid(tsk->cred)))
+ goto invalid_creds;
+ } else {
+ if (unlikely(read_cred_subscribers(tsk->real_cred) < 1 ||
+ read_cred_subscribers(tsk->cred) < 1 ||
+ creds_are_invalid(tsk->real_cred) ||
+ creds_are_invalid(tsk->cred)))
+ goto invalid_creds;
+ }
+ return;
+
+invalid_creds:
+ printk(KERN_ERR "CRED: Invalid process credentials\n");
+ printk(KERN_ERR "CRED: At %s:%u\n", file, line);
+
+ dump_invalid_creds(tsk->real_cred, "Real", tsk);
+ if (tsk->cred != tsk->real_cred)
+ dump_invalid_creds(tsk->cred, "Effective", tsk);
+ else
+ printk(KERN_ERR "CRED: Effective creds == Real creds\n");
+ BUG();
+}
+EXPORT_SYMBOL(__validate_process_creds);
+
+/*
+ * check creds for do_exit()
+ */
+void validate_creds_for_do_exit(struct task_struct *tsk)
+{
+ kdebug("validate_creds_for_do_exit(%p,%p{%d,%d})",
+ tsk->real_cred, tsk->cred,
+ atomic_read(&tsk->cred->usage),
+ read_cred_subscribers(tsk->cred));
+
+ __validate_process_creds(tsk, __FILE__, __LINE__);
+}
+
+#endif /* CONFIG_DEBUG_CREDENTIALS */
diff --git a/kernel/exit.c b/kernel/exit.c
index 869dc221733e..ae5d8660ddff 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -901,6 +901,8 @@ NORET_TYPE void do_exit(long code)
tracehook_report_exit(&code);
+ validate_creds_for_do_exit(tsk);
+
/*
* We're taking recursive faults here in do_exit. Safest is to just
* leave this task alone and wait for reboot.
@@ -1009,7 +1011,10 @@ NORET_TYPE void do_exit(long code)
if (tsk->splice_pipe)
__free_pipe_info(tsk->splice_pipe);
+ validate_creds_for_do_exit(tsk);
+
preempt_disable();
+ exit_rcu();
/* causes final put_task_struct in finish_task_switch(). */
tsk->state = TASK_DEAD;
schedule();
diff --git a/kernel/fork.c b/kernel/fork.c
index e6c04d462ab2..bfee931ee3fb 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -152,8 +152,7 @@ void __put_task_struct(struct task_struct *tsk)
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
- put_cred(tsk->real_cred);
- put_cred(tsk->cred);
+ exit_creds(tsk);
delayacct_tsk_free(tsk);
if (!profile_handoff_task(tsk))
@@ -1008,10 +1007,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
copy_flags(clone_flags, p);
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
-#ifdef CONFIG_PREEMPT_RCU
- p->rcu_read_lock_nesting = 0;
- p->rcu_flipctr_idx = 0;
-#endif /* #ifdef CONFIG_PREEMPT_RCU */
+ rcu_copy_process(p);
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
@@ -1297,8 +1293,7 @@ bad_fork_cleanup_put_domain:
module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
- put_cred(p->real_cred);
- put_cred(p->cred);
+ exit_creds(p);
bad_fork_free:
free_task(p);
fork_out:
diff --git a/kernel/futex.c b/kernel/futex.c
index e18cfbdc7190..248dd119a86e 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -115,6 +115,9 @@ struct futex_q {
/* rt_waiter storage for requeue_pi: */
struct rt_mutex_waiter *rt_waiter;
+ /* The expected requeue pi target futex key: */
+ union futex_key *requeue_pi_key;
+
/* Bitset for the optional bitmasked wakeup */
u32 bitset;
};
@@ -1089,6 +1092,10 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
if (!top_waiter)
return 0;
+ /* Ensure we requeue to the expected futex. */
+ if (!match_futex(top_waiter->requeue_pi_key, key2))
+ return -EINVAL;
+
/*
* Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
* the contended case or if set_waiters is 1. The pi_state is returned
@@ -1276,6 +1283,12 @@ retry_private:
continue;
}
+ /* Ensure we requeue to the expected futex for requeue_pi. */
+ if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
+ ret = -EINVAL;
+ break;
+ }
+
/*
* Requeue nr_requeue waiters and possibly one more in the case
* of requeue_pi if we couldn't acquire the lock atomically.
@@ -1751,6 +1764,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
q.pi_state = NULL;
q.bitset = bitset;
q.rt_waiter = NULL;
+ q.requeue_pi_key = NULL;
if (abs_time) {
to = &timeout;
@@ -1858,6 +1872,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
q.pi_state = NULL;
q.rt_waiter = NULL;
+ q.requeue_pi_key = NULL;
retry:
q.key = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
@@ -2118,11 +2133,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
* We call schedule in futex_wait_queue_me() when we enqueue and return there
* via the following:
* 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
- * 2) wakeup on uaddr2 after a requeue and subsequent unlock
- * 3) signal (before or after requeue)
- * 4) timeout (before or after requeue)
+ * 2) wakeup on uaddr2 after a requeue
+ * 3) signal
+ * 4) timeout
*
- * If 3, we setup a restart_block with futex_wait_requeue_pi() as the function.
+ * If 3, cleanup and return -ERESTARTNOINTR.
*
* If 2, we may then block on trying to take the rt_mutex and return via:
* 5) successful lock
@@ -2130,7 +2145,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
* 7) timeout
* 8) other lock acquisition failure
*
- * If 6, we setup a restart_block with futex_lock_pi() as the function.
+ * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
*
* If 4 or 7, we cleanup and return with -ETIMEDOUT.
*
@@ -2169,15 +2184,16 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
debug_rt_mutex_init_waiter(&rt_waiter);
rt_waiter.task = NULL;
- q.pi_state = NULL;
- q.bitset = bitset;
- q.rt_waiter = &rt_waiter;
-
key2 = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
+ q.pi_state = NULL;
+ q.bitset = bitset;
+ q.rt_waiter = &rt_waiter;
+ q.requeue_pi_key = &key2;
+
/* Prepare to wait on uaddr. */
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
if (ret)
@@ -2248,14 +2264,11 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
rt_mutex_unlock(pi_mutex);
} else if (ret == -EINTR) {
/*
- * We've already been requeued, but we have no way to
- * restart by calling futex_lock_pi() directly. We
- * could restart the syscall, but that will look at
- * the user space value and return right away. So we
- * drop back with EWOULDBLOCK to tell user space that
- * "val" has been changed. That's the same what the
- * restart of the syscall would do in
- * futex_wait_setup().
+ * We've already been requeued, but cannot restart by calling
+ * futex_lock_pi() directly. We could restart this syscall, but
+ * it would detect that the user space "val" changed and return
+ * -EWOULDBLOCK. Save the overhead of the restart and return
+ * -EWOULDBLOCK directly.
*/
ret = -EWOULDBLOCK;
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 13c68e71b726..c1660194d115 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -222,6 +222,34 @@ int set_irq_chip_data(unsigned int irq, void *data)
}
EXPORT_SYMBOL(set_irq_chip_data);
+/**
+ * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq
+ *
+ * @irq: Interrupt number
+ * @nest: 0 to clear / 1 to set the IRQ_NESTED_THREAD flag
+ *
+ * The IRQ_NESTED_THREAD flag indicates that on
+ * request_threaded_irq() no separate interrupt thread should be
+ * created for the irq as the handler are called nested in the
+ * context of a demultiplexing interrupt handler thread.
+ */
+void set_irq_nested_thread(unsigned int irq, int nest)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ unsigned long flags;
+
+ if (!desc)
+ return;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if (nest)
+ desc->status |= IRQ_NESTED_THREAD;
+ else
+ desc->status &= ~IRQ_NESTED_THREAD;
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(set_irq_nested_thread);
+
/*
* default enable function
*/
@@ -299,6 +327,45 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
}
}
+/*
+ * handle_nested_irq - Handle a nested irq from a irq thread
+ * @irq: the interrupt number
+ *
+ * Handle interrupts which are nested into a threaded interrupt
+ * handler. The handler function is called inside the calling
+ * threads context.
+ */
+void handle_nested_irq(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irqaction *action;
+ irqreturn_t action_ret;
+
+ might_sleep();
+
+ spin_lock_irq(&desc->lock);
+
+ kstat_incr_irqs_this_cpu(irq, desc);
+
+ action = desc->action;
+ if (unlikely(!action || (desc->status & IRQ_DISABLED)))
+ goto out_unlock;
+
+ desc->status |= IRQ_INPROGRESS;
+ spin_unlock_irq(&desc->lock);
+
+ action_ret = action->thread_fn(action->irq, action->dev_id);
+ if (!noirqdebug)
+ note_interrupt(irq, desc, action_ret);
+
+ spin_lock_irq(&desc->lock);
+ desc->status &= ~IRQ_INPROGRESS;
+
+out_unlock:
+ spin_unlock_irq(&desc->lock);
+}
+EXPORT_SYMBOL_GPL(handle_nested_irq);
+
/**
* handle_simple_irq - Simple and software-decoded IRQs.
* @irq: the interrupt number
@@ -382,7 +449,10 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
- if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
+
+ if (unlikely(desc->status & IRQ_ONESHOT))
+ desc->status |= IRQ_MASKED;
+ else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(irq);
out_unlock:
spin_unlock(&desc->lock);
@@ -572,6 +642,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
desc->chip = &dummy_irq_chip;
}
+ chip_bus_lock(irq, desc);
spin_lock_irqsave(&desc->lock, flags);
/* Uninstall? */
@@ -591,6 +662,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
desc->chip->startup(irq);
}
spin_unlock_irqrestore(&desc->lock, flags);
+ chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL_GPL(__set_irq_handler);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 065205bdd920..a81cf80554db 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -161,7 +161,7 @@ int __init early_irq_init(void)
desc = irq_desc_legacy;
legacy_count = ARRAY_SIZE(irq_desc_legacy);
- node = first_online_node;
+ node = first_online_node;
/* allocate irq_desc_ptrs array based on nr_irqs */
irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT);
@@ -172,6 +172,9 @@ int __init early_irq_init(void)
for (i = 0; i < legacy_count; i++) {
desc[i].irq = i;
+#ifdef CONFIG_SMP
+ desc[i].node = node;
+#endif
desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
alloc_desc_masks(&desc[i], node, true);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index e70ed5592eb9..1b5d742c6a77 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -44,6 +44,19 @@ extern int irq_select_affinity_usr(unsigned int irq);
extern void irq_set_thread_affinity(struct irq_desc *desc);
+/* Inline functions for support of irq chips on slow busses */
+static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc)
+{
+ if (unlikely(desc->chip->bus_lock))
+ desc->chip->bus_lock(irq);
+}
+
+static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc)
+{
+ if (unlikely(desc->chip->bus_sync_unlock))
+ desc->chip->bus_sync_unlock(irq);
+}
+
/*
* Debugging printout:
*/
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0ec9ed831737..bde4c667d24d 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -230,9 +230,11 @@ void disable_irq_nosync(unsigned int irq)
if (!desc)
return;
+ chip_bus_lock(irq, desc);
spin_lock_irqsave(&desc->lock, flags);
__disable_irq(desc, irq, false);
spin_unlock_irqrestore(&desc->lock, flags);
+ chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL(disable_irq_nosync);
@@ -294,7 +296,8 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
* matches the last disable, processing of interrupts on this
* IRQ line is re-enabled.
*
- * This function may be called from IRQ context.
+ * This function may be called from IRQ context only when
+ * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
*/
void enable_irq(unsigned int irq)
{
@@ -304,9 +307,11 @@ void enable_irq(unsigned int irq)
if (!desc)
return;
+ chip_bus_lock(irq, desc);
spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, false);
spin_unlock_irqrestore(&desc->lock, flags);
+ chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL(enable_irq);
@@ -436,6 +441,26 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
return ret;
}
+/*
+ * Default primary interrupt handler for threaded interrupts. Is
+ * assigned as primary handler when request_threaded_irq is called
+ * with handler == NULL. Useful for oneshot interrupts.
+ */
+static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+/*
+ * Primary handler for nested threaded interrupts. Should never be
+ * called.
+ */
+static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
+{
+ WARN(1, "Primary handler called for nested irq %d\n", irq);
+ return IRQ_NONE;
+}
+
static int irq_wait_for_interrupt(struct irqaction *action)
{
while (!kthread_should_stop()) {
@@ -451,6 +476,23 @@ static int irq_wait_for_interrupt(struct irqaction *action)
return -1;
}
+/*
+ * Oneshot interrupts keep the irq line masked until the threaded
+ * handler finished. unmask if the interrupt has not been disabled and
+ * is marked MASKED.
+ */
+static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
+{
+ chip_bus_lock(irq, desc);
+ spin_lock_irq(&desc->lock);
+ if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
+ desc->status &= ~IRQ_MASKED;
+ desc->chip->unmask(irq);
+ }
+ spin_unlock_irq(&desc->lock);
+ chip_bus_sync_unlock(irq, desc);
+}
+
#ifdef CONFIG_SMP
/*
* Check whether we need to change the affinity of the interrupt thread.
@@ -492,7 +534,7 @@ static int irq_thread(void *data)
struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, };
struct irqaction *action = data;
struct irq_desc *desc = irq_to_desc(action->irq);
- int wake;
+ int wake, oneshot = desc->status & IRQ_ONESHOT;
sched_setscheduler(current, SCHED_FIFO, &param);
current->irqaction = action;
@@ -518,6 +560,9 @@ static int irq_thread(void *data)
spin_unlock_irq(&desc->lock);
action->thread_fn(action->irq, action->dev_id);
+
+ if (oneshot)
+ irq_finalize_oneshot(action->irq, desc);
}
wake = atomic_dec_and_test(&desc->threads_active);
@@ -565,7 +610,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
struct irqaction *old, **old_ptr;
const char *old_name = NULL;
unsigned long flags;
- int shared = 0;
+ int nested, shared = 0;
int ret;
if (!desc)
@@ -590,10 +635,32 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
rand_initialize_irq(irq);
}
+ /* Oneshot interrupts are not allowed with shared */
+ if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED))
+ return -EINVAL;
+
+ /*
+ * Check whether the interrupt nests into another interrupt
+ * thread.
+ */
+ nested = desc->status & IRQ_NESTED_THREAD;
+ if (nested) {
+ if (!new->thread_fn)
+ return -EINVAL;
+ /*
+ * Replace the primary handler which was provided from
+ * the driver for non nested interrupt handling by the
+ * dummy function which warns when called.
+ */
+ new->handler = irq_nested_primary_handler;
+ }
+
/*
- * Threaded handler ?
+ * Create a handler thread when a thread function is supplied
+ * and the interrupt does not nest into another interrupt
+ * thread.
*/
- if (new->thread_fn) {
+ if (new->thread_fn && !nested) {
struct task_struct *t;
t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
@@ -662,9 +729,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
desc->status |= IRQ_PER_CPU;
#endif
- desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
+ desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT |
IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
+ if (new->flags & IRQF_ONESHOT)
+ desc->status |= IRQ_ONESHOT;
+
if (!(desc->status & IRQ_NOAUTOEN)) {
desc->depth = 0;
desc->status &= ~IRQ_DISABLED;
@@ -875,7 +945,14 @@ EXPORT_SYMBOL_GPL(remove_irq);
*/
void free_irq(unsigned int irq, void *dev_id)
{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (!desc)
+ return;
+
+ chip_bus_lock(irq, desc);
kfree(__free_irq(irq, dev_id));
+ chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL(free_irq);
@@ -884,6 +961,8 @@ EXPORT_SYMBOL(free_irq);
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* Primary handler for threaded interrupts
+ * If NULL and thread_fn != NULL the default
+ * primary handler is installed
* @thread_fn: Function called from the irq handler thread
* If NULL, no irq thread is created
* @irqflags: Interrupt type flags
@@ -963,8 +1042,12 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
if (desc->status & IRQ_NOREQUEST)
return -EINVAL;
- if (!handler)
- return -EINVAL;
+
+ if (!handler) {
+ if (!thread_fn)
+ return -EINVAL;
+ handler = irq_default_primary_handler;
+ }
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
@@ -976,7 +1059,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
action->name = devname;
action->dev_id = dev_id;
+ chip_bus_lock(irq, desc);
retval = __setup_irq(irq, desc, action);
+ chip_bus_sync_unlock(irq, desc);
+
if (retval)
kfree(action);
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 638d8bedec14..a0bb09e79867 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -15,10 +15,10 @@
/**
* suspend_device_irqs - disable all currently enabled interrupt lines
*
- * During system-wide suspend or hibernation device interrupts need to be
- * disabled at the chip level and this function is provided for this purpose.
- * It disables all interrupt lines that are enabled at the moment and sets the
- * IRQ_SUSPENDED flag for them.
+ * During system-wide suspend or hibernation device drivers need to be prevented
+ * from receiving interrupts and this function is provided for this purpose.
+ * It marks all interrupt lines in use, except for the timer ones, as disabled
+ * and sets the IRQ_SUSPENDED flag for each of them.
*/
void suspend_device_irqs(void)
{
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 89c7117acf2b..090c3763f3a2 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -70,8 +70,7 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
- if (!desc->chip || !desc->chip->retrigger ||
- !desc->chip->retrigger(irq)) {
+ if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) {
#ifdef CONFIG_HARDIRQS_SW_RESEND
/* Set it pending and activate the softirq: */
set_bit(irq, irqs_resend);
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 4d568294de3e..114e704760fe 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -297,7 +297,6 @@ static int __init irqfixup_setup(char *str)
__setup("irqfixup", irqfixup_setup);
module_param(irqfixup, int, 0644);
-MODULE_PARM_DESC("irqfixup", "0: No fixup, 1: irqfixup mode, 2: irqpoll mode");
static int __init irqpoll_setup(char *str)
{
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 385c31a1bdbf..9fcb53a11f87 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -37,6 +37,8 @@
#include <linux/suspend.h>
#include <asm/uaccess.h>
+#include <trace/events/module.h>
+
extern int max_threads;
static struct workqueue_struct *khelper_wq;
@@ -78,6 +80,10 @@ int __request_module(bool wait, const char *fmt, ...)
#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
static int kmod_loop_msg;
+ ret = security_kernel_module_request();
+ if (ret)
+ return ret;
+
va_start(args, fmt);
ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
va_end(args);
@@ -108,6 +114,8 @@ int __request_module(bool wait, const char *fmt, ...)
return -ENOMEM;
}
+ trace_module_request(module_name, wait, _RET_IP_);
+
ret = call_usermodehelper(modprobe_path, argv, envp,
wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
atomic_dec(&kmod_concurrent);
@@ -462,6 +470,7 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
int retval = 0;
BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
+ validate_creds(sub_info->cred);
helper_lock();
if (sub_info->path[0] == '\0')
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 0540948e29ab..ef177d653b2c 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -103,7 +103,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
struct kprobe_insn_page {
- struct hlist_node hlist;
+ struct list_head list;
kprobe_opcode_t *insns; /* Page of instruction slots */
char slot_used[INSNS_PER_PAGE];
int nused;
@@ -117,7 +117,7 @@ enum kprobe_slot_state {
};
static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
-static struct hlist_head kprobe_insn_pages;
+static LIST_HEAD(kprobe_insn_pages);
static int kprobe_garbage_slots;
static int collect_garbage_slots(void);
@@ -152,10 +152,9 @@ loop_end:
static kprobe_opcode_t __kprobes *__get_insn_slot(void)
{
struct kprobe_insn_page *kip;
- struct hlist_node *pos;
retry:
- hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
+ list_for_each_entry(kip, &kprobe_insn_pages, list) {
if (kip->nused < INSNS_PER_PAGE) {
int i;
for (i = 0; i < INSNS_PER_PAGE; i++) {
@@ -189,8 +188,8 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
kfree(kip);
return NULL;
}
- INIT_HLIST_NODE(&kip->hlist);
- hlist_add_head(&kip->hlist, &kprobe_insn_pages);
+ INIT_LIST_HEAD(&kip->list);
+ list_add(&kip->list, &kprobe_insn_pages);
memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
kip->slot_used[0] = SLOT_USED;
kip->nused = 1;
@@ -219,12 +218,8 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
* so as not to have to set it up again the
* next time somebody inserts a probe.
*/
- hlist_del(&kip->hlist);
- if (hlist_empty(&kprobe_insn_pages)) {
- INIT_HLIST_NODE(&kip->hlist);
- hlist_add_head(&kip->hlist,
- &kprobe_insn_pages);
- } else {
+ if (!list_is_singular(&kprobe_insn_pages)) {
+ list_del(&kip->list);
module_free(NULL, kip->insns);
kfree(kip);
}
@@ -235,14 +230,13 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
static int __kprobes collect_garbage_slots(void)
{
- struct kprobe_insn_page *kip;
- struct hlist_node *pos, *next;
+ struct kprobe_insn_page *kip, *next;
/* Ensure no-one is preepmted on the garbages */
if (check_safety())
return -EAGAIN;
- hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
+ list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
int i;
if (kip->ngarbage == 0)
continue;
@@ -260,19 +254,17 @@ static int __kprobes collect_garbage_slots(void)
void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
{
struct kprobe_insn_page *kip;
- struct hlist_node *pos;
mutex_lock(&kprobe_insn_mutex);
- hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
+ list_for_each_entry(kip, &kprobe_insn_pages, list) {
if (kip->insns <= slot &&
slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
int i = (slot - kip->insns) / MAX_INSN_SIZE;
if (dirty) {
kip->slot_used[i] = SLOT_DIRTY;
kip->ngarbage++;
- } else {
+ } else
collect_one_slot(kip, i);
- }
break;
}
}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index eb8751aa0418..5fe709982caa 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -16,8 +16,6 @@
#include <linux/mutex.h>
#include <trace/events/sched.h>
-#define KTHREAD_NICE_LEVEL (-5)
-
static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
struct task_struct *kthreadd_task;
@@ -145,7 +143,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
* The kernel thread should not inherit these properties.
*/
sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
- set_user_nice(create.result, KTHREAD_NICE_LEVEL);
set_cpus_allowed_ptr(create.result, cpu_all_mask);
}
return create.result;
@@ -221,7 +218,6 @@ int kthreadd(void *unused)
/* Setup a clean context for our children to inherit. */
set_task_comm(tsk, "kthreadd");
ignore_signals(tsk);
- set_user_nice(tsk, KTHREAD_NICE_LEVEL);
set_cpus_allowed_ptr(tsk, cpu_all_mask);
set_mems_allowed(node_possible_map);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8bbeef996c76..f74d2d7aa605 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -42,6 +42,7 @@
#include <linux/hash.h>
#include <linux/ftrace.h>
#include <linux/stringify.h>
+#include <linux/bitops.h>
#include <asm/sections.h>
@@ -366,11 +367,21 @@ static int save_trace(struct stack_trace *trace)
save_stack_trace(trace);
+ /*
+ * Some daft arches put -1 at the end to indicate its a full trace.
+ *
+ * <rant> this is buggy anyway, since it takes a whole extra entry so a
+ * complete trace that maxes out the entries provided will be reported
+ * as incomplete, friggin useless </rant>
+ */
+ if (trace->entries[trace->nr_entries-1] == ULONG_MAX)
+ trace->nr_entries--;
+
trace->max_entries = trace->nr_entries;
nr_stack_trace_entries += trace->nr_entries;
- if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
+ if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
if (!debug_locks_off_graph_unlock())
return 0;
@@ -388,20 +399,6 @@ unsigned int nr_hardirq_chains;
unsigned int nr_softirq_chains;
unsigned int nr_process_chains;
unsigned int max_lockdep_depth;
-unsigned int max_recursion_depth;
-
-static unsigned int lockdep_dependency_gen_id;
-
-static bool lockdep_dependency_visit(struct lock_class *source,
- unsigned int depth)
-{
- if (!depth)
- lockdep_dependency_gen_id++;
- if (source->dep_gen_id == lockdep_dependency_gen_id)
- return true;
- source->dep_gen_id = lockdep_dependency_gen_id;
- return false;
-}
#ifdef CONFIG_DEBUG_LOCKDEP
/*
@@ -431,11 +428,8 @@ atomic_t redundant_softirqs_on;
atomic_t redundant_softirqs_off;
atomic_t nr_unused_locks;
atomic_t nr_cyclic_checks;
-atomic_t nr_cyclic_check_recursions;
atomic_t nr_find_usage_forwards_checks;
-atomic_t nr_find_usage_forwards_recursions;
atomic_t nr_find_usage_backwards_checks;
-atomic_t nr_find_usage_backwards_recursions;
#endif
/*
@@ -551,58 +545,6 @@ static void lockdep_print_held_locks(struct task_struct *curr)
}
}
-static void print_lock_class_header(struct lock_class *class, int depth)
-{
- int bit;
-
- printk("%*s->", depth, "");
- print_lock_name(class);
- printk(" ops: %lu", class->ops);
- printk(" {\n");
-
- for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
- if (class->usage_mask & (1 << bit)) {
- int len = depth;
-
- len += printk("%*s %s", depth, "", usage_str[bit]);
- len += printk(" at:\n");
- print_stack_trace(class->usage_traces + bit, len);
- }
- }
- printk("%*s }\n", depth, "");
-
- printk("%*s ... key at: ",depth,"");
- print_ip_sym((unsigned long)class->key);
-}
-
-/*
- * printk all lock dependencies starting at <entry>:
- */
-static void __used
-print_lock_dependencies(struct lock_class *class, int depth)
-{
- struct lock_list *entry;
-
- if (lockdep_dependency_visit(class, depth))
- return;
-
- if (DEBUG_LOCKS_WARN_ON(depth >= 20))
- return;
-
- print_lock_class_header(class, depth);
-
- list_for_each_entry(entry, &class->locks_after, entry) {
- if (DEBUG_LOCKS_WARN_ON(!entry->class))
- return;
-
- print_lock_dependencies(entry->class, depth + 1);
-
- printk("%*s ... acquired at:\n",depth,"");
- print_stack_trace(&entry->trace, 2);
- printk("\n");
- }
-}
-
static void print_kernel_version(void)
{
printk("%s %.*s\n", init_utsname()->release,
@@ -898,22 +840,203 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
}
/*
+ * For good efficiency of modular, we use power of 2
+ */
+#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
+#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
+
+/*
+ * The circular_queue and helpers is used to implement the
+ * breadth-first search(BFS)algorithem, by which we can build
+ * the shortest path from the next lock to be acquired to the
+ * previous held lock if there is a circular between them.
+ */
+struct circular_queue {
+ unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
+ unsigned int front, rear;
+};
+
+static struct circular_queue lock_cq;
+
+unsigned int max_bfs_queue_depth;
+
+static unsigned int lockdep_dependency_gen_id;
+
+static inline void __cq_init(struct circular_queue *cq)
+{
+ cq->front = cq->rear = 0;
+ lockdep_dependency_gen_id++;
+}
+
+static inline int __cq_empty(struct circular_queue *cq)
+{
+ return (cq->front == cq->rear);
+}
+
+static inline int __cq_full(struct circular_queue *cq)
+{
+ return ((cq->rear + 1) & CQ_MASK) == cq->front;
+}
+
+static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+{
+ if (__cq_full(cq))
+ return -1;
+
+ cq->element[cq->rear] = elem;
+ cq->rear = (cq->rear + 1) & CQ_MASK;
+ return 0;
+}
+
+static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+{
+ if (__cq_empty(cq))
+ return -1;
+
+ *elem = cq->element[cq->front];
+ cq->front = (cq->front + 1) & CQ_MASK;
+ return 0;
+}
+
+static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
+{
+ return (cq->rear - cq->front) & CQ_MASK;
+}
+
+static inline void mark_lock_accessed(struct lock_list *lock,
+ struct lock_list *parent)
+{
+ unsigned long nr;
+
+ nr = lock - list_entries;
+ WARN_ON(nr >= nr_list_entries);
+ lock->parent = parent;
+ lock->class->dep_gen_id = lockdep_dependency_gen_id;
+}
+
+static inline unsigned long lock_accessed(struct lock_list *lock)
+{
+ unsigned long nr;
+
+ nr = lock - list_entries;
+ WARN_ON(nr >= nr_list_entries);
+ return lock->class->dep_gen_id == lockdep_dependency_gen_id;
+}
+
+static inline struct lock_list *get_lock_parent(struct lock_list *child)
+{
+ return child->parent;
+}
+
+static inline int get_lock_depth(struct lock_list *child)
+{
+ int depth = 0;
+ struct lock_list *parent;
+
+ while ((parent = get_lock_parent(child))) {
+ child = parent;
+ depth++;
+ }
+ return depth;
+}
+
+static int __bfs(struct lock_list *source_entry,
+ void *data,
+ int (*match)(struct lock_list *entry, void *data),
+ struct lock_list **target_entry,
+ int forward)
+{
+ struct lock_list *entry;
+ struct list_head *head;
+ struct circular_queue *cq = &lock_cq;
+ int ret = 1;
+
+ if (match(source_entry, data)) {
+ *target_entry = source_entry;
+ ret = 0;
+ goto exit;
+ }
+
+ if (forward)
+ head = &source_entry->class->locks_after;
+ else
+ head = &source_entry->class->locks_before;
+
+ if (list_empty(head))
+ goto exit;
+
+ __cq_init(cq);
+ __cq_enqueue(cq, (unsigned long)source_entry);
+
+ while (!__cq_empty(cq)) {
+ struct lock_list *lock;
+
+ __cq_dequeue(cq, (unsigned long *)&lock);
+
+ if (!lock->class) {
+ ret = -2;
+ goto exit;
+ }
+
+ if (forward)
+ head = &lock->class->locks_after;
+ else
+ head = &lock->class->locks_before;
+
+ list_for_each_entry(entry, head, entry) {
+ if (!lock_accessed(entry)) {
+ unsigned int cq_depth;
+ mark_lock_accessed(entry, lock);
+ if (match(entry, data)) {
+ *target_entry = entry;
+ ret = 0;
+ goto exit;
+ }
+
+ if (__cq_enqueue(cq, (unsigned long)entry)) {
+ ret = -1;
+ goto exit;
+ }
+ cq_depth = __cq_get_elem_count(cq);
+ if (max_bfs_queue_depth < cq_depth)
+ max_bfs_queue_depth = cq_depth;
+ }
+ }
+ }
+exit:
+ return ret;
+}
+
+static inline int __bfs_forwards(struct lock_list *src_entry,
+ void *data,
+ int (*match)(struct lock_list *entry, void *data),
+ struct lock_list **target_entry)
+{
+ return __bfs(src_entry, data, match, target_entry, 1);
+
+}
+
+static inline int __bfs_backwards(struct lock_list *src_entry,
+ void *data,
+ int (*match)(struct lock_list *entry, void *data),
+ struct lock_list **target_entry)
+{
+ return __bfs(src_entry, data, match, target_entry, 0);
+
+}
+
+/*
* Recursive, forwards-direction lock-dependency checking, used for
* both noncyclic checking and for hardirq-unsafe/softirq-unsafe
* checking.
- *
- * (to keep the stackframe of the recursive functions small we
- * use these global variables, and we also mark various helper
- * functions as noinline.)
*/
-static struct held_lock *check_source, *check_target;
/*
* Print a dependency chain entry (this is only done when a deadlock
* has been detected):
*/
static noinline int
-print_circular_bug_entry(struct lock_list *target, unsigned int depth)
+print_circular_bug_entry(struct lock_list *target, int depth)
{
if (debug_locks_silent)
return 0;
@@ -930,11 +1053,13 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
* header first:
*/
static noinline int
-print_circular_bug_header(struct lock_list *entry, unsigned int depth)
+print_circular_bug_header(struct lock_list *entry, unsigned int depth,
+ struct held_lock *check_src,
+ struct held_lock *check_tgt)
{
struct task_struct *curr = current;
- if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+ if (debug_locks_silent)
return 0;
printk("\n=======================================================\n");
@@ -943,9 +1068,9 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
printk( "-------------------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
- print_lock(check_source);
+ print_lock(check_src);
printk("\nbut task is already holding lock:\n");
- print_lock(check_target);
+ print_lock(check_tgt);
printk("\nwhich lock already depends on the new lock.\n\n");
printk("\nthe existing dependency chain (in reverse order) is:\n");
@@ -954,19 +1079,36 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
return 0;
}
-static noinline int print_circular_bug_tail(void)
+static inline int class_equal(struct lock_list *entry, void *data)
+{
+ return entry->class == data;
+}
+
+static noinline int print_circular_bug(struct lock_list *this,
+ struct lock_list *target,
+ struct held_lock *check_src,
+ struct held_lock *check_tgt)
{
struct task_struct *curr = current;
- struct lock_list this;
+ struct lock_list *parent;
+ int depth;
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
- this.class = hlock_class(check_source);
- if (!save_trace(&this.trace))
+ if (!save_trace(&this->trace))
return 0;
- print_circular_bug_entry(&this, 0);
+ depth = get_lock_depth(target);
+
+ print_circular_bug_header(target, depth, check_src, check_tgt);
+
+ parent = get_lock_parent(target);
+
+ while (parent) {
+ print_circular_bug_entry(parent, --depth);
+ parent = get_lock_parent(parent);
+ }
printk("\nother info that might help us debug this:\n\n");
lockdep_print_held_locks(curr);
@@ -977,73 +1119,69 @@ static noinline int print_circular_bug_tail(void)
return 0;
}
-#define RECURSION_LIMIT 40
-
-static int noinline print_infinite_recursion_bug(void)
+static noinline int print_bfs_bug(int ret)
{
if (!debug_locks_off_graph_unlock())
return 0;
- WARN_ON(1);
+ WARN(1, "lockdep bfs error:%d\n", ret);
return 0;
}
-unsigned long __lockdep_count_forward_deps(struct lock_class *class,
- unsigned int depth)
+static int noop_count(struct lock_list *entry, void *data)
{
- struct lock_list *entry;
- unsigned long ret = 1;
+ (*(unsigned long *)data)++;
+ return 0;
+}
- if (lockdep_dependency_visit(class, depth))
- return 0;
+unsigned long __lockdep_count_forward_deps(struct lock_list *this)
+{
+ unsigned long count = 0;
+ struct lock_list *uninitialized_var(target_entry);
- /*
- * Recurse this class's dependency list:
- */
- list_for_each_entry(entry, &class->locks_after, entry)
- ret += __lockdep_count_forward_deps(entry->class, depth + 1);
+ __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
- return ret;
+ return count;
}
-
unsigned long lockdep_count_forward_deps(struct lock_class *class)
{
unsigned long ret, flags;
+ struct lock_list this;
+
+ this.parent = NULL;
+ this.class = class;
local_irq_save(flags);
__raw_spin_lock(&lockdep_lock);
- ret = __lockdep_count_forward_deps(class, 0);
+ ret = __lockdep_count_forward_deps(&this);
__raw_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
return ret;
}
-unsigned long __lockdep_count_backward_deps(struct lock_class *class,
- unsigned int depth)
+unsigned long __lockdep_count_backward_deps(struct lock_list *this)
{
- struct lock_list *entry;
- unsigned long ret = 1;
+ unsigned long count = 0;
+ struct lock_list *uninitialized_var(target_entry);
- if (lockdep_dependency_visit(class, depth))
- return 0;
- /*
- * Recurse this class's dependency list:
- */
- list_for_each_entry(entry, &class->locks_before, entry)
- ret += __lockdep_count_backward_deps(entry->class, depth + 1);
+ __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
- return ret;
+ return count;
}
unsigned long lockdep_count_backward_deps(struct lock_class *class)
{
unsigned long ret, flags;
+ struct lock_list this;
+
+ this.parent = NULL;
+ this.class = class;
local_irq_save(flags);
__raw_spin_lock(&lockdep_lock);
- ret = __lockdep_count_backward_deps(class, 0);
+ ret = __lockdep_count_backward_deps(&this);
__raw_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
@@ -1055,29 +1193,16 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
* lead to <target>. Print an error and return 0 if it does.
*/
static noinline int
-check_noncircular(struct lock_class *source, unsigned int depth)
+check_noncircular(struct lock_list *root, struct lock_class *target,
+ struct lock_list **target_entry)
{
- struct lock_list *entry;
+ int result;
- if (lockdep_dependency_visit(source, depth))
- return 1;
+ debug_atomic_inc(&nr_cyclic_checks);
- debug_atomic_inc(&nr_cyclic_check_recursions);
- if (depth > max_recursion_depth)
- max_recursion_depth = depth;
- if (depth >= RECURSION_LIMIT)
- return print_infinite_recursion_bug();
- /*
- * Check this lock's dependency list:
- */
- list_for_each_entry(entry, &source->locks_after, entry) {
- if (entry->class == hlock_class(check_target))
- return print_circular_bug_header(entry, depth+1);
- debug_atomic_inc(&nr_cyclic_checks);
- if (!check_noncircular(entry->class, depth+1))
- return print_circular_bug_entry(entry, depth+1);
- }
- return 1;
+ result = __bfs_forwards(root, target, class_equal, target_entry);
+
+ return result;
}
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
@@ -1086,103 +1211,121 @@ check_noncircular(struct lock_class *source, unsigned int depth)
* proving that two subgraphs can be connected by a new dependency
* without creating any illegal irq-safe -> irq-unsafe lock dependency.
*/
-static enum lock_usage_bit find_usage_bit;
-static struct lock_class *forwards_match, *backwards_match;
+
+static inline int usage_match(struct lock_list *entry, void *bit)
+{
+ return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
+}
+
+
/*
* Find a node in the forwards-direction dependency sub-graph starting
- * at <source> that matches <find_usage_bit>.
+ * at @root->class that matches @bit.
*
- * Return 2 if such a node exists in the subgraph, and put that node
- * into <forwards_match>.
+ * Return 0 if such a node exists in the subgraph, and put that node
+ * into *@target_entry.
*
- * Return 1 otherwise and keep <forwards_match> unchanged.
- * Return 0 on error.
+ * Return 1 otherwise and keep *@target_entry unchanged.
+ * Return <0 on error.
*/
-static noinline int
-find_usage_forwards(struct lock_class *source, unsigned int depth)
+static int
+find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
+ struct lock_list **target_entry)
{
- struct lock_list *entry;
- int ret;
-
- if (lockdep_dependency_visit(source, depth))
- return 1;
-
- if (depth > max_recursion_depth)
- max_recursion_depth = depth;
- if (depth >= RECURSION_LIMIT)
- return print_infinite_recursion_bug();
+ int result;
debug_atomic_inc(&nr_find_usage_forwards_checks);
- if (source->usage_mask & (1 << find_usage_bit)) {
- forwards_match = source;
- return 2;
- }
- /*
- * Check this lock's dependency list:
- */
- list_for_each_entry(entry, &source->locks_after, entry) {
- debug_atomic_inc(&nr_find_usage_forwards_recursions);
- ret = find_usage_forwards(entry->class, depth+1);
- if (ret == 2 || ret == 0)
- return ret;
- }
- return 1;
+ result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
+
+ return result;
}
/*
* Find a node in the backwards-direction dependency sub-graph starting
- * at <source> that matches <find_usage_bit>.
+ * at @root->class that matches @bit.
*
- * Return 2 if such a node exists in the subgraph, and put that node
- * into <backwards_match>.
+ * Return 0 if such a node exists in the subgraph, and put that node
+ * into *@target_entry.
*
- * Return 1 otherwise and keep <backwards_match> unchanged.
- * Return 0 on error.
+ * Return 1 otherwise and keep *@target_entry unchanged.
+ * Return <0 on error.
*/
-static noinline int
-find_usage_backwards(struct lock_class *source, unsigned int depth)
+static int
+find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
+ struct lock_list **target_entry)
{
- struct lock_list *entry;
- int ret;
+ int result;
- if (lockdep_dependency_visit(source, depth))
- return 1;
+ debug_atomic_inc(&nr_find_usage_backwards_checks);
- if (!__raw_spin_is_locked(&lockdep_lock))
- return DEBUG_LOCKS_WARN_ON(1);
+ result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
- if (depth > max_recursion_depth)
- max_recursion_depth = depth;
- if (depth >= RECURSION_LIMIT)
- return print_infinite_recursion_bug();
+ return result;
+}
- debug_atomic_inc(&nr_find_usage_backwards_checks);
- if (source->usage_mask & (1 << find_usage_bit)) {
- backwards_match = source;
- return 2;
- }
+static void print_lock_class_header(struct lock_class *class, int depth)
+{
+ int bit;
- if (!source && debug_locks_off_graph_unlock()) {
- WARN_ON(1);
- return 0;
- }
+ printk("%*s->", depth, "");
+ print_lock_name(class);
+ printk(" ops: %lu", class->ops);
+ printk(" {\n");
- /*
- * Check this lock's dependency list:
- */
- list_for_each_entry(entry, &source->locks_before, entry) {
- debug_atomic_inc(&nr_find_usage_backwards_recursions);
- ret = find_usage_backwards(entry->class, depth+1);
- if (ret == 2 || ret == 0)
- return ret;
+ for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
+ if (class->usage_mask & (1 << bit)) {
+ int len = depth;
+
+ len += printk("%*s %s", depth, "", usage_str[bit]);
+ len += printk(" at:\n");
+ print_stack_trace(class->usage_traces + bit, len);
+ }
}
- return 1;
+ printk("%*s }\n", depth, "");
+
+ printk("%*s ... key at: ",depth,"");
+ print_ip_sym((unsigned long)class->key);
+}
+
+/*
+ * printk the shortest lock dependencies from @start to @end in reverse order:
+ */
+static void __used
+print_shortest_lock_dependencies(struct lock_list *leaf,
+ struct lock_list *root)
+{
+ struct lock_list *entry = leaf;
+ int depth;
+
+ /*compute depth from generated tree by BFS*/
+ depth = get_lock_depth(leaf);
+
+ do {
+ print_lock_class_header(entry->class, depth);
+ printk("%*s ... acquired at:\n", depth, "");
+ print_stack_trace(&entry->trace, 2);
+ printk("\n");
+
+ if (depth == 0 && (entry != root)) {
+ printk("lockdep:%s bad BFS generated tree\n", __func__);
+ break;
+ }
+
+ entry = get_lock_parent(entry);
+ depth--;
+ } while (entry && (depth >= 0));
+
+ return;
}
static int
print_bad_irq_dependency(struct task_struct *curr,
+ struct lock_list *prev_root,
+ struct lock_list *next_root,
+ struct lock_list *backwards_entry,
+ struct lock_list *forwards_entry,
struct held_lock *prev,
struct held_lock *next,
enum lock_usage_bit bit1,
@@ -1215,26 +1358,32 @@ print_bad_irq_dependency(struct task_struct *curr,
printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
irqclass);
- print_lock_name(backwards_match);
+ print_lock_name(backwards_entry->class);
printk("\n... which became %s-irq-safe at:\n", irqclass);
- print_stack_trace(backwards_match->usage_traces + bit1, 1);
+ print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
printk("\nto a %s-irq-unsafe lock:\n", irqclass);
- print_lock_name(forwards_match);
+ print_lock_name(forwards_entry->class);
printk("\n... which became %s-irq-unsafe at:\n", irqclass);
printk("...");
- print_stack_trace(forwards_match->usage_traces + bit2, 1);
+ print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
printk("\nother info that might help us debug this:\n\n");
lockdep_print_held_locks(curr);
- printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
- print_lock_dependencies(backwards_match, 0);
+ printk("\nthe dependencies between %s-irq-safe lock", irqclass);
+ printk(" and the holding lock:\n");
+ if (!save_trace(&prev_root->trace))
+ return 0;
+ print_shortest_lock_dependencies(backwards_entry, prev_root);
- printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
- print_lock_dependencies(forwards_match, 0);
+ printk("\nthe dependencies between the lock to be acquired");
+ printk(" and %s-irq-unsafe lock:\n", irqclass);
+ if (!save_trace(&next_root->trace))
+ return 0;
+ print_shortest_lock_dependencies(forwards_entry, next_root);
printk("\nstack backtrace:\n");
dump_stack();
@@ -1248,19 +1397,30 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
enum lock_usage_bit bit_forwards, const char *irqclass)
{
int ret;
+ struct lock_list this, that;
+ struct lock_list *uninitialized_var(target_entry);
+ struct lock_list *uninitialized_var(target_entry1);
- find_usage_bit = bit_backwards;
- /* fills in <backwards_match> */
- ret = find_usage_backwards(hlock_class(prev), 0);
- if (!ret || ret == 1)
+ this.parent = NULL;
+
+ this.class = hlock_class(prev);
+ ret = find_usage_backwards(&this, bit_backwards, &target_entry);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+ if (ret == 1)
return ret;
- find_usage_bit = bit_forwards;
- ret = find_usage_forwards(hlock_class(next), 0);
- if (!ret || ret == 1)
+ that.parent = NULL;
+ that.class = hlock_class(next);
+ ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+ if (ret == 1)
return ret;
- /* ret == 2 */
- return print_bad_irq_dependency(curr, prev, next,
+
+ return print_bad_irq_dependency(curr, &this, &that,
+ target_entry, target_entry1,
+ prev, next,
bit_backwards, bit_forwards, irqclass);
}
@@ -1472,6 +1632,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
{
struct lock_list *entry;
int ret;
+ struct lock_list this;
+ struct lock_list *uninitialized_var(target_entry);
/*
* Prove that the new <prev> -> <next> dependency would not
@@ -1482,10 +1644,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
* We are using global variables to control the recursion, to
* keep the stackframe size of the recursive functions low:
*/
- check_source = next;
- check_target = prev;
- if (!(check_noncircular(hlock_class(next), 0)))
- return print_circular_bug_tail();
+ this.class = hlock_class(next);
+ this.parent = NULL;
+ ret = check_noncircular(&this, hlock_class(prev), &target_entry);
+ if (unlikely(!ret))
+ return print_circular_bug(&this, target_entry, next, prev);
+ else if (unlikely(ret < 0))
+ return print_bfs_bug(ret);
if (!check_prev_add_irq(curr, prev, next))
return 0;
@@ -1884,7 +2049,8 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
* print irq inversion bug:
*/
static int
-print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
+print_irq_inversion_bug(struct task_struct *curr,
+ struct lock_list *root, struct lock_list *other,
struct held_lock *this, int forwards,
const char *irqclass)
{
@@ -1902,17 +2068,16 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
else
printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
- print_lock_name(other);
+ print_lock_name(other->class);
printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
printk("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
- printk("\nthe first lock's dependencies:\n");
- print_lock_dependencies(hlock_class(this), 0);
-
- printk("\nthe second lock's dependencies:\n");
- print_lock_dependencies(other, 0);
+ printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
+ if (!save_trace(&root->trace))
+ return 0;
+ print_shortest_lock_dependencies(other, root);
printk("\nstack backtrace:\n");
dump_stack();
@@ -1929,14 +2094,19 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit bit, const char *irqclass)
{
int ret;
-
- find_usage_bit = bit;
- /* fills in <forwards_match> */
- ret = find_usage_forwards(hlock_class(this), 0);
- if (!ret || ret == 1)
+ struct lock_list root;
+ struct lock_list *uninitialized_var(target_entry);
+
+ root.parent = NULL;
+ root.class = hlock_class(this);
+ ret = find_usage_forwards(&root, bit, &target_entry);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+ if (ret == 1)
return ret;
- return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
+ return print_irq_inversion_bug(curr, &root, target_entry,
+ this, 1, irqclass);
}
/*
@@ -1948,14 +2118,19 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit bit, const char *irqclass)
{
int ret;
-
- find_usage_bit = bit;
- /* fills in <backwards_match> */
- ret = find_usage_backwards(hlock_class(this), 0);
- if (!ret || ret == 1)
+ struct lock_list root;
+ struct lock_list *uninitialized_var(target_entry);
+
+ root.parent = NULL;
+ root.class = hlock_class(this);
+ ret = find_usage_backwards(&root, bit, &target_entry);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+ if (ret == 1)
return ret;
- return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
+ return print_irq_inversion_bug(curr, &root, target_entry,
+ this, 1, irqclass);
}
void print_irqtrace_events(struct task_struct *curr)
@@ -2530,13 +2705,15 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
*/
static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check, int hardirqs_off,
- struct lockdep_map *nest_lock, unsigned long ip)
+ struct lockdep_map *nest_lock, unsigned long ip,
+ int references)
{
struct task_struct *curr = current;
struct lock_class *class = NULL;
struct held_lock *hlock;
unsigned int depth, id;
int chain_head = 0;
+ int class_idx;
u64 chain_key;
if (!prove_locking)
@@ -2584,10 +2761,24 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
return 0;
+ class_idx = class - lock_classes + 1;
+
+ if (depth) {
+ hlock = curr->held_locks + depth - 1;
+ if (hlock->class_idx == class_idx && nest_lock) {
+ if (hlock->references)
+ hlock->references++;
+ else
+ hlock->references = 2;
+
+ return 1;
+ }
+ }
+
hlock = curr->held_locks + depth;
if (DEBUG_LOCKS_WARN_ON(!class))
return 0;
- hlock->class_idx = class - lock_classes + 1;
+ hlock->class_idx = class_idx;
hlock->acquire_ip = ip;
hlock->instance = lock;
hlock->nest_lock = nest_lock;
@@ -2595,6 +2786,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
hlock->read = read;
hlock->check = check;
hlock->hardirqs_off = !!hardirqs_off;
+ hlock->references = references;
#ifdef CONFIG_LOCK_STAT
hlock->waittime_stamp = 0;
hlock->holdtime_stamp = sched_clock();
@@ -2703,6 +2895,30 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
return 1;
}
+static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
+{
+ if (hlock->instance == lock)
+ return 1;
+
+ if (hlock->references) {
+ struct lock_class *class = lock->class_cache;
+
+ if (!class)
+ class = look_up_lock_class(lock, 0);
+
+ if (DEBUG_LOCKS_WARN_ON(!class))
+ return 0;
+
+ if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
+ return 0;
+
+ if (hlock->class_idx == class - lock_classes + 1)
+ return 1;
+ }
+
+ return 0;
+}
+
static int
__lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
@@ -2726,7 +2942,7 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
- if (hlock->instance == lock)
+ if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
@@ -2745,7 +2961,8 @@ found_it:
if (!__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass, hlock->trylock,
hlock->read, hlock->check, hlock->hardirqs_off,
- hlock->nest_lock, hlock->acquire_ip))
+ hlock->nest_lock, hlock->acquire_ip,
+ hlock->references))
return 0;
}
@@ -2784,20 +3001,34 @@ lock_release_non_nested(struct task_struct *curr,
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
- if (hlock->instance == lock)
+ if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
return print_unlock_inbalance_bug(curr, lock, ip);
found_it:
- lock_release_holdtime(hlock);
+ if (hlock->instance == lock)
+ lock_release_holdtime(hlock);
+
+ if (hlock->references) {
+ hlock->references--;
+ if (hlock->references) {
+ /*
+ * We had, and after removing one, still have
+ * references, the current lock stack is still
+ * valid. We're done!
+ */
+ return 1;
+ }
+ }
/*
* We have the right lock to unlock, 'hlock' points to it.
* Now we remove it from the stack, and add back the other
* entries (if any), recalculating the hash along the way:
*/
+
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
@@ -2806,7 +3037,8 @@ found_it:
if (!__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass, hlock->trylock,
hlock->read, hlock->check, hlock->hardirqs_off,
- hlock->nest_lock, hlock->acquire_ip))
+ hlock->nest_lock, hlock->acquire_ip,
+ hlock->references))
return 0;
}
@@ -2836,7 +3068,7 @@ static int lock_release_nested(struct task_struct *curr,
/*
* Is the unlock non-nested:
*/
- if (hlock->instance != lock)
+ if (hlock->instance != lock || hlock->references)
return lock_release_non_nested(curr, lock, ip);
curr->lockdep_depth--;
@@ -2881,6 +3113,21 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
check_chain_key(curr);
}
+static int __lock_is_held(struct lockdep_map *lock)
+{
+ struct task_struct *curr = current;
+ int i;
+
+ for (i = 0; i < curr->lockdep_depth; i++) {
+ struct held_lock *hlock = curr->held_locks + i;
+
+ if (match_held_lock(hlock, lock))
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* Check whether we follow the irq-flags state precisely:
*/
@@ -2957,7 +3204,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
current->lockdep_recursion = 1;
__lock_acquire(lock, subclass, trylock, read, check,
- irqs_disabled_flags(flags), nest_lock, ip);
+ irqs_disabled_flags(flags), nest_lock, ip, 0);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
@@ -2982,6 +3229,26 @@ void lock_release(struct lockdep_map *lock, int nested,
}
EXPORT_SYMBOL_GPL(lock_release);
+int lock_is_held(struct lockdep_map *lock)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (unlikely(current->lockdep_recursion))
+ return ret;
+
+ raw_local_irq_save(flags);
+ check_flags(flags);
+
+ current->lockdep_recursion = 1;
+ ret = __lock_is_held(lock);
+ current->lockdep_recursion = 0;
+ raw_local_irq_restore(flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lock_is_held);
+
void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
{
current->lockdep_reclaim_gfp = gfp_mask;
@@ -3041,7 +3308,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
- if (hlock->instance == lock)
+ if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
@@ -3049,6 +3316,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
return;
found_it:
+ if (hlock->instance != lock)
+ return;
+
hlock->waittime_stamp = sched_clock();
contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
@@ -3088,7 +3358,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
- if (hlock->instance == lock)
+ if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
@@ -3096,6 +3366,9 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
return;
found_it:
+ if (hlock->instance != lock)
+ return;
+
cpu = smp_processor_id();
if (hlock->waittime_stamp) {
now = sched_clock();
@@ -3326,7 +3599,12 @@ void __init lockdep_info(void)
sizeof(struct list_head) * CLASSHASH_SIZE +
sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
- sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
+ sizeof(struct list_head) * CHAINHASH_SIZE
+#ifdef CONFIG_PROVE_LOCKING
+ + sizeof(struct circular_queue)
+#endif
+ ) / 1024
+ );
printk(" per task-struct memory footprint: %lu bytes\n",
sizeof(struct held_lock) * MAX_LOCK_DEPTH);
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 699a2ac3a0d7..a2ee95ad1313 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -91,6 +91,8 @@ extern unsigned int nr_process_chains;
extern unsigned int max_lockdep_depth;
extern unsigned int max_recursion_depth;
+extern unsigned int max_bfs_queue_depth;
+
#ifdef CONFIG_PROVE_LOCKING
extern unsigned long lockdep_count_forward_deps(struct lock_class *);
extern unsigned long lockdep_count_backward_deps(struct lock_class *);
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index e94caa666dba..d4b3dbc79fdb 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -25,38 +25,12 @@
static void *l_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct lock_class *class;
-
- (*pos)++;
-
- if (v == SEQ_START_TOKEN)
- class = m->private;
- else {
- class = v;
-
- if (class->lock_entry.next != &all_lock_classes)
- class = list_entry(class->lock_entry.next,
- struct lock_class, lock_entry);
- else
- class = NULL;
- }
-
- return class;
+ return seq_list_next(v, &all_lock_classes, pos);
}
static void *l_start(struct seq_file *m, loff_t *pos)
{
- struct lock_class *class;
- loff_t i = 0;
-
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- list_for_each_entry(class, &all_lock_classes, lock_entry) {
- if (++i == *pos)
- return class;
- }
- return NULL;
+ return seq_list_start_head(&all_lock_classes, *pos);
}
static void l_stop(struct seq_file *m, void *v)
@@ -82,11 +56,11 @@ static void print_name(struct seq_file *m, struct lock_class *class)
static int l_show(struct seq_file *m, void *v)
{
- struct lock_class *class = v;
+ struct lock_class *class = list_entry(v, struct lock_class, lock_entry);
struct lock_list *entry;
char usage[LOCK_USAGE_CHARS];
- if (v == SEQ_START_TOKEN) {
+ if (v == &all_lock_classes) {
seq_printf(m, "all lock classes:\n");
return 0;
}
@@ -128,17 +102,7 @@ static const struct seq_operations lockdep_ops = {
static int lockdep_open(struct inode *inode, struct file *file)
{
- int res = seq_open(file, &lockdep_ops);
- if (!res) {
- struct seq_file *m = file->private_data;
-
- if (!list_empty(&all_lock_classes))
- m->private = list_entry(all_lock_classes.next,
- struct lock_class, lock_entry);
- else
- m->private = NULL;
- }
- return res;
+ return seq_open(file, &lockdep_ops);
}
static const struct file_operations proc_lockdep_operations = {
@@ -149,37 +113,23 @@ static const struct file_operations proc_lockdep_operations = {
};
#ifdef CONFIG_PROVE_LOCKING
-static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
-{
- struct lock_chain *chain;
-
- (*pos)++;
-
- if (v == SEQ_START_TOKEN)
- chain = m->private;
- else {
- chain = v;
-
- if (*pos < nr_lock_chains)
- chain = lock_chains + *pos;
- else
- chain = NULL;
- }
-
- return chain;
-}
-
static void *lc_start(struct seq_file *m, loff_t *pos)
{
if (*pos == 0)
return SEQ_START_TOKEN;
- if (*pos < nr_lock_chains)
- return lock_chains + *pos;
+ if (*pos - 1 < nr_lock_chains)
+ return lock_chains + (*pos - 1);
return NULL;
}
+static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return lc_start(m, pos);
+}
+
static void lc_stop(struct seq_file *m, void *v)
{
}
@@ -220,16 +170,7 @@ static const struct seq_operations lockdep_chains_ops = {
static int lockdep_chains_open(struct inode *inode, struct file *file)
{
- int res = seq_open(file, &lockdep_chains_ops);
- if (!res) {
- struct seq_file *m = file->private_data;
-
- if (nr_lock_chains)
- m->private = lock_chains;
- else
- m->private = NULL;
- }
- return res;
+ return seq_open(file, &lockdep_chains_ops);
}
static const struct file_operations proc_lockdep_chains_operations = {
@@ -258,16 +199,10 @@ static void lockdep_stats_debug_show(struct seq_file *m)
debug_atomic_read(&chain_lookup_hits));
seq_printf(m, " cyclic checks: %11u\n",
debug_atomic_read(&nr_cyclic_checks));
- seq_printf(m, " cyclic-check recursions: %11u\n",
- debug_atomic_read(&nr_cyclic_check_recursions));
seq_printf(m, " find-mask forwards checks: %11u\n",
debug_atomic_read(&nr_find_usage_forwards_checks));
- seq_printf(m, " find-mask forwards recursions: %11u\n",
- debug_atomic_read(&nr_find_usage_forwards_recursions));
seq_printf(m, " find-mask backwards checks: %11u\n",
debug_atomic_read(&nr_find_usage_backwards_checks));
- seq_printf(m, " find-mask backwards recursions:%11u\n",
- debug_atomic_read(&nr_find_usage_backwards_recursions));
seq_printf(m, " hardirq on events: %11u\n", hi1);
seq_printf(m, " hardirq off events: %11u\n", hi2);
@@ -409,8 +344,10 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
nr_unused);
seq_printf(m, " max locking depth: %11u\n",
max_lockdep_depth);
- seq_printf(m, " max recursion depth: %11u\n",
- max_recursion_depth);
+#ifdef CONFIG_PROVE_LOCKING
+ seq_printf(m, " max bfs queue depth: %11u\n",
+ max_bfs_queue_depth);
+#endif
lockdep_stats_debug_show(m);
seq_printf(m, " debug_locks: %11u\n",
debug_locks);
@@ -438,7 +375,6 @@ struct lock_stat_data {
};
struct lock_stat_seq {
- struct lock_stat_data *iter;
struct lock_stat_data *iter_end;
struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
};
@@ -626,34 +562,22 @@ static void seq_header(struct seq_file *m)
static void *ls_start(struct seq_file *m, loff_t *pos)
{
struct lock_stat_seq *data = m->private;
+ struct lock_stat_data *iter;
if (*pos == 0)
return SEQ_START_TOKEN;
- data->iter = data->stats + *pos;
- if (data->iter >= data->iter_end)
- data->iter = NULL;
+ iter = data->stats + (*pos - 1);
+ if (iter >= data->iter_end)
+ iter = NULL;
- return data->iter;
+ return iter;
}
static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct lock_stat_seq *data = m->private;
-
(*pos)++;
-
- if (v == SEQ_START_TOKEN)
- data->iter = data->stats;
- else {
- data->iter = v;
- data->iter++;
- }
-
- if (data->iter == data->iter_end)
- data->iter = NULL;
-
- return data->iter;
+ return ls_start(m, pos);
}
static void ls_stop(struct seq_file *m, void *v)
@@ -691,7 +615,6 @@ static int lock_stat_open(struct inode *inode, struct file *file)
struct lock_stat_data *iter = data->stats;
struct seq_file *m = file->private_data;
- data->iter = iter;
list_for_each_entry(class, &all_lock_classes, lock_entry) {
iter->class = class;
iter->stats = lock_stats(class);
@@ -699,7 +622,7 @@ static int lock_stat_open(struct inode *inode, struct file *file)
}
data->iter_end = iter;
- sort(data->stats, data->iter_end - data->iter,
+ sort(data->stats, data->iter_end - data->stats,
sizeof(struct lock_stat_data),
lock_stat_cmp, NULL);
@@ -734,7 +657,6 @@ static int lock_stat_release(struct inode *inode, struct file *file)
struct seq_file *seq = file->private_data;
vfree(seq->private);
- seq->private = NULL;
return seq_release(inode, file);
}
diff --git a/kernel/module.c b/kernel/module.c
index 2d537186191f..46580edff0cb 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -55,6 +55,11 @@
#include <linux/percpu.h>
#include <linux/kmemleak.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/module.h>
+
+EXPORT_TRACEPOINT_SYMBOL(module_get);
+
#if 0
#define DEBUGP printk
#else
@@ -942,6 +947,8 @@ void module_put(struct module *module)
if (module) {
unsigned int cpu = get_cpu();
local_dec(__module_ref_addr(module, cpu));
+ trace_module_put(module, _RET_IP_,
+ local_read(__module_ref_addr(module, cpu)));
/* Maybe they're waiting for us to drop reference? */
if (unlikely(!module_is_live(module)))
wake_up_process(module->waiter);
@@ -1497,6 +1504,8 @@ static int __unlink_module(void *_mod)
/* Free a module, remove from lists, etc (must hold module_mutex). */
static void free_module(struct module *mod)
{
+ trace_module_free(mod);
+
/* Delete from various lists */
stop_machine(__unlink_module, mod, NULL);
remove_notes_attrs(mod);
@@ -2364,6 +2373,8 @@ static noinline struct module *load_module(void __user *umod,
/* Get rid of temporary copy */
vfree(hdr);
+ trace_module_load(mod);
+
/* Done! */
return mod;
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index d7cbc579fc80..e0d91fdf0c3c 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -46,12 +46,18 @@ static atomic_t nr_task_counters __read_mostly;
/*
* perf counter paranoia level:
- * 0 - not paranoid
- * 1 - disallow cpu counters to unpriv
- * 2 - disallow kernel profiling to unpriv
+ * -1 - not paranoid at all
+ * 0 - disallow raw tracepoint access for unpriv
+ * 1 - disallow cpu counters for unpriv
+ * 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_counter_paranoid __read_mostly = 1;
+static inline bool perf_paranoid_tracepoint_raw(void)
+{
+ return sysctl_perf_counter_paranoid > -1;
+}
+
static inline bool perf_paranoid_cpu(void)
{
return sysctl_perf_counter_paranoid > 0;
@@ -469,7 +475,8 @@ static void update_counter_times(struct perf_counter *counter)
struct perf_counter_context *ctx = counter->ctx;
u64 run_end;
- if (counter->state < PERF_COUNTER_STATE_INACTIVE)
+ if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
+ counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE)
return;
counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
@@ -518,7 +525,7 @@ static void __perf_counter_disable(void *info)
*/
if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
update_context_time(ctx);
- update_counter_times(counter);
+ update_group_times(counter);
if (counter == counter->group_leader)
group_sched_out(counter, cpuctx, ctx);
else
@@ -573,7 +580,7 @@ static void perf_counter_disable(struct perf_counter *counter)
* in, so we can change the state safely.
*/
if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
- update_counter_times(counter);
+ update_group_times(counter);
counter->state = PERF_COUNTER_STATE_OFF;
}
@@ -851,6 +858,27 @@ retry:
}
/*
+ * Put a counter into inactive state and update time fields.
+ * Enabling the leader of a group effectively enables all
+ * the group members that aren't explicitly disabled, so we
+ * have to update their ->tstamp_enabled also.
+ * Note: this works for group members as well as group leaders
+ * since the non-leader members' sibling_lists will be empty.
+ */
+static void __perf_counter_mark_enabled(struct perf_counter *counter,
+ struct perf_counter_context *ctx)
+{
+ struct perf_counter *sub;
+
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
+ counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
+ list_for_each_entry(sub, &counter->sibling_list, list_entry)
+ if (sub->state >= PERF_COUNTER_STATE_INACTIVE)
+ sub->tstamp_enabled =
+ ctx->time - sub->total_time_enabled;
+}
+
+/*
* Cross CPU call to enable a performance counter
*/
static void __perf_counter_enable(void *info)
@@ -877,8 +905,7 @@ static void __perf_counter_enable(void *info)
if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
goto unlock;
- counter->state = PERF_COUNTER_STATE_INACTIVE;
- counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
+ __perf_counter_mark_enabled(counter, ctx);
/*
* If the counter is in a group and isn't the group leader,
@@ -971,11 +998,9 @@ static void perf_counter_enable(struct perf_counter *counter)
* Since we have the lock this context can't be scheduled
* in, so we can change the state safely.
*/
- if (counter->state == PERF_COUNTER_STATE_OFF) {
- counter->state = PERF_COUNTER_STATE_INACTIVE;
- counter->tstamp_enabled =
- ctx->time - counter->total_time_enabled;
- }
+ if (counter->state == PERF_COUNTER_STATE_OFF)
+ __perf_counter_mark_enabled(counter, ctx);
+
out:
spin_unlock_irq(&ctx->lock);
}
@@ -1479,9 +1504,7 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
counter->attr.enable_on_exec = 0;
if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
continue;
- counter->state = PERF_COUNTER_STATE_INACTIVE;
- counter->tstamp_enabled =
- ctx->time - counter->total_time_enabled;
+ __perf_counter_mark_enabled(counter, ctx);
enabled = 1;
}
@@ -1675,6 +1698,11 @@ static void free_counter(struct perf_counter *counter)
atomic_dec(&nr_task_counters);
}
+ if (counter->output) {
+ fput(counter->output->filp);
+ counter->output = NULL;
+ }
+
if (counter->destroy)
counter->destroy(counter);
@@ -1960,6 +1988,8 @@ unlock:
return ret;
}
+int perf_counter_set_output(struct perf_counter *counter, int output_fd);
+
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct perf_counter *counter = file->private_data;
@@ -1983,6 +2013,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PERF_COUNTER_IOC_PERIOD:
return perf_counter_period(counter, (u64 __user *)arg);
+ case PERF_COUNTER_IOC_SET_OUTPUT:
+ return perf_counter_set_output(counter, arg);
+
default:
return -ENOTTY;
}
@@ -2253,6 +2286,11 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
WARN_ON_ONCE(counter->ctx->parent_ctx);
mutex_lock(&counter->mmap_mutex);
+ if (counter->output) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
if (atomic_inc_not_zero(&counter->mmap_count)) {
if (nr_pages != counter->data->nr_pages)
ret = -EINVAL;
@@ -2638,6 +2676,7 @@ static int perf_output_begin(struct perf_output_handle *handle,
struct perf_counter *counter, unsigned int size,
int nmi, int sample)
{
+ struct perf_counter *output_counter;
struct perf_mmap_data *data;
unsigned int offset, head;
int have_lost;
@@ -2647,13 +2686,17 @@ static int perf_output_begin(struct perf_output_handle *handle,
u64 lost;
} lost_event;
+ rcu_read_lock();
/*
* For inherited counters we send all the output towards the parent.
*/
if (counter->parent)
counter = counter->parent;
- rcu_read_lock();
+ output_counter = rcu_dereference(counter->output);
+ if (output_counter)
+ counter = output_counter;
+
data = rcu_dereference(counter->data);
if (!data)
goto out;
@@ -3934,6 +3977,7 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
* have these.
*/
if ((counter->attr.sample_type & PERF_SAMPLE_RAW) &&
+ perf_paranoid_tracepoint_raw() &&
!capable(CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
@@ -4202,6 +4246,57 @@ err_size:
goto out;
}
+int perf_counter_set_output(struct perf_counter *counter, int output_fd)
+{
+ struct perf_counter *output_counter = NULL;
+ struct file *output_file = NULL;
+ struct perf_counter *old_output;
+ int fput_needed = 0;
+ int ret = -EINVAL;
+
+ if (!output_fd)
+ goto set;
+
+ output_file = fget_light(output_fd, &fput_needed);
+ if (!output_file)
+ return -EBADF;
+
+ if (output_file->f_op != &perf_fops)
+ goto out;
+
+ output_counter = output_file->private_data;
+
+ /* Don't chain output fds */
+ if (output_counter->output)
+ goto out;
+
+ /* Don't set an output fd when we already have an output channel */
+ if (counter->data)
+ goto out;
+
+ atomic_long_inc(&output_file->f_count);
+
+set:
+ mutex_lock(&counter->mmap_mutex);
+ old_output = counter->output;
+ rcu_assign_pointer(counter->output, output_counter);
+ mutex_unlock(&counter->mmap_mutex);
+
+ if (old_output) {
+ /*
+ * we need to make sure no existing perf_output_*()
+ * is still referencing this counter.
+ */
+ synchronize_rcu();
+ fput(old_output->filp);
+ }
+
+ ret = 0;
+out:
+ fput_light(output_file, fput_needed);
+ return ret;
+}
+
/**
* sys_perf_counter_open - open a performance counter, associate it to a task/cpu
*
@@ -4221,15 +4316,15 @@ SYSCALL_DEFINE5(perf_counter_open,
struct file *group_file = NULL;
int fput_needed = 0;
int fput_needed2 = 0;
- int ret;
+ int err;
/* for future expandability... */
- if (flags)
+ if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
return -EINVAL;
- ret = perf_copy_attr(attr_uptr, &attr);
- if (ret)
- return ret;
+ err = perf_copy_attr(attr_uptr, &attr);
+ if (err)
+ return err;
if (!attr.exclude_kernel) {
if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
@@ -4252,8 +4347,8 @@ SYSCALL_DEFINE5(perf_counter_open,
* Look up the group leader (we will attach this counter to it):
*/
group_leader = NULL;
- if (group_fd != -1) {
- ret = -EINVAL;
+ if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
+ err = -EINVAL;
group_file = fget_light(group_fd, &fput_needed);
if (!group_file)
goto err_put_context;
@@ -4282,18 +4377,24 @@ SYSCALL_DEFINE5(perf_counter_open,
counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
NULL, GFP_KERNEL);
- ret = PTR_ERR(counter);
+ err = PTR_ERR(counter);
if (IS_ERR(counter))
goto err_put_context;
- ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
- if (ret < 0)
+ err = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
+ if (err < 0)
goto err_free_put_context;
- counter_file = fget_light(ret, &fput_needed2);
+ counter_file = fget_light(err, &fput_needed2);
if (!counter_file)
goto err_free_put_context;
+ if (flags & PERF_FLAG_FD_OUTPUT) {
+ err = perf_counter_set_output(counter, group_fd);
+ if (err)
+ goto err_fput_free_put_context;
+ }
+
counter->filp = counter_file;
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
@@ -4307,20 +4408,20 @@ SYSCALL_DEFINE5(perf_counter_open,
list_add_tail(&counter->owner_entry, &current->perf_counter_list);
mutex_unlock(&current->perf_counter_mutex);
+err_fput_free_put_context:
fput_light(counter_file, fput_needed2);
-out_fput:
- fput_light(group_file, fput_needed);
-
- return ret;
-
err_free_put_context:
- kfree(counter);
+ if (err < 0)
+ kfree(counter);
err_put_context:
- put_ctx(ctx);
+ if (err < 0)
+ put_ctx(ctx);
+
+ fput_light(group_file, fput_needed);
- goto out_fput;
+ return err;
}
/*
diff --git a/kernel/printk.c b/kernel/printk.c
index b4d97b54c1ec..e10d193a833a 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -37,6 +37,12 @@
#include <asm/uaccess.h>
/*
+ * for_each_console() allows you to iterate on each console
+ */
+#define for_each_console(con) \
+ for (con = console_drivers; con != NULL; con = con->next)
+
+/*
* Architectures can override it:
*/
void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
@@ -61,6 +67,8 @@ int console_printk[4] = {
DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */
};
+static int saved_console_loglevel = -1;
+
/*
* Low level drivers may need that to know if they can schedule in
* their unblank() callback or not. So let's export it.
@@ -372,10 +380,15 @@ int do_syslog(int type, char __user *buf, int len)
logged_chars = 0;
break;
case 6: /* Disable logging to console */
+ if (saved_console_loglevel == -1)
+ saved_console_loglevel = console_loglevel;
console_loglevel = minimum_console_loglevel;
break;
case 7: /* Enable logging to console */
- console_loglevel = default_console_loglevel;
+ if (saved_console_loglevel != -1) {
+ console_loglevel = saved_console_loglevel;
+ saved_console_loglevel = -1;
+ }
break;
case 8: /* Set level of messages printed to console */
error = -EINVAL;
@@ -384,6 +397,8 @@ int do_syslog(int type, char __user *buf, int len)
if (len < minimum_console_loglevel)
len = minimum_console_loglevel;
console_loglevel = len;
+ /* Implicitly re-enable logging to console */
+ saved_console_loglevel = -1;
error = 0;
break;
case 9: /* Number of chars in the log buffer */
@@ -412,7 +427,7 @@ static void __call_console_drivers(unsigned start, unsigned end)
{
struct console *con;
- for (con = console_drivers; con; con = con->next) {
+ for_each_console(con) {
if ((con->flags & CON_ENABLED) && con->write &&
(cpu_online(smp_processor_id()) ||
(con->flags & CON_ANYTIME)))
@@ -544,7 +559,7 @@ static int have_callable_console(void)
{
struct console *con;
- for (con = console_drivers; con; con = con->next)
+ for_each_console(con)
if (con->flags & CON_ANYTIME)
return 1;
@@ -1082,7 +1097,7 @@ void console_unblank(void)
console_locked = 1;
console_may_schedule = 0;
- for (c = console_drivers; c != NULL; c = c->next)
+ for_each_console(c)
if ((c->flags & CON_ENABLED) && c->unblank)
c->unblank();
release_console_sem();
@@ -1097,7 +1112,7 @@ struct tty_driver *console_device(int *index)
struct tty_driver *driver = NULL;
acquire_console_sem();
- for (c = console_drivers; c != NULL; c = c->next) {
+ for_each_console(c) {
if (!c->device)
continue;
driver = c->device(c, index);
@@ -1134,25 +1149,49 @@ EXPORT_SYMBOL(console_start);
* to register the console printing procedure with printk() and to
* print any messages that were printed by the kernel before the
* console driver was initialized.
+ *
+ * This can happen pretty early during the boot process (because of
+ * early_printk) - sometimes before setup_arch() completes - be careful
+ * of what kernel features are used - they may not be initialised yet.
+ *
+ * There are two types of consoles - bootconsoles (early_printk) and
+ * "real" consoles (everything which is not a bootconsole) which are
+ * handled differently.
+ * - Any number of bootconsoles can be registered at any time.
+ * - As soon as a "real" console is registered, all bootconsoles
+ * will be unregistered automatically.
+ * - Once a "real" console is registered, any attempt to register a
+ * bootconsoles will be rejected
*/
-void register_console(struct console *console)
+void register_console(struct console *newcon)
{
int i;
unsigned long flags;
- struct console *bootconsole = NULL;
+ struct console *bcon = NULL;
- if (console_drivers) {
- if (console->flags & CON_BOOT)
- return;
- if (console_drivers->flags & CON_BOOT)
- bootconsole = console_drivers;
+ /*
+ * before we register a new CON_BOOT console, make sure we don't
+ * already have a valid console
+ */
+ if (console_drivers && newcon->flags & CON_BOOT) {
+ /* find the last or real console */
+ for_each_console(bcon) {
+ if (!(bcon->flags & CON_BOOT)) {
+ printk(KERN_INFO "Too late to register bootconsole %s%d\n",
+ newcon->name, newcon->index);
+ return;
+ }
+ }
}
- if (preferred_console < 0 || bootconsole || !console_drivers)
+ if (console_drivers && console_drivers->flags & CON_BOOT)
+ bcon = console_drivers;
+
+ if (preferred_console < 0 || bcon || !console_drivers)
preferred_console = selected_console;
- if (console->early_setup)
- console->early_setup();
+ if (newcon->early_setup)
+ newcon->early_setup();
/*
* See if we want to use this console driver. If we
@@ -1160,13 +1199,13 @@ void register_console(struct console *console)
* that registers here.
*/
if (preferred_console < 0) {
- if (console->index < 0)
- console->index = 0;
- if (console->setup == NULL ||
- console->setup(console, NULL) == 0) {
- console->flags |= CON_ENABLED;
- if (console->device) {
- console->flags |= CON_CONSDEV;
+ if (newcon->index < 0)
+ newcon->index = 0;
+ if (newcon->setup == NULL ||
+ newcon->setup(newcon, NULL) == 0) {
+ newcon->flags |= CON_ENABLED;
+ if (newcon->device) {
+ newcon->flags |= CON_CONSDEV;
preferred_console = 0;
}
}
@@ -1178,64 +1217,62 @@ void register_console(struct console *console)
*/
for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0];
i++) {
- if (strcmp(console_cmdline[i].name, console->name) != 0)
+ if (strcmp(console_cmdline[i].name, newcon->name) != 0)
continue;
- if (console->index >= 0 &&
- console->index != console_cmdline[i].index)
+ if (newcon->index >= 0 &&
+ newcon->index != console_cmdline[i].index)
continue;
- if (console->index < 0)
- console->index = console_cmdline[i].index;
+ if (newcon->index < 0)
+ newcon->index = console_cmdline[i].index;
#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
if (console_cmdline[i].brl_options) {
- console->flags |= CON_BRL;
- braille_register_console(console,
+ newcon->flags |= CON_BRL;
+ braille_register_console(newcon,
console_cmdline[i].index,
console_cmdline[i].options,
console_cmdline[i].brl_options);
return;
}
#endif
- if (console->setup &&
- console->setup(console, console_cmdline[i].options) != 0)
+ if (newcon->setup &&
+ newcon->setup(newcon, console_cmdline[i].options) != 0)
break;
- console->flags |= CON_ENABLED;
- console->index = console_cmdline[i].index;
+ newcon->flags |= CON_ENABLED;
+ newcon->index = console_cmdline[i].index;
if (i == selected_console) {
- console->flags |= CON_CONSDEV;
+ newcon->flags |= CON_CONSDEV;
preferred_console = selected_console;
}
break;
}
- if (!(console->flags & CON_ENABLED))
+ if (!(newcon->flags & CON_ENABLED))
return;
- if (bootconsole && (console->flags & CON_CONSDEV)) {
- printk(KERN_INFO "console handover: boot [%s%d] -> real [%s%d]\n",
- bootconsole->name, bootconsole->index,
- console->name, console->index);
- unregister_console(bootconsole);
- console->flags &= ~CON_PRINTBUFFER;
- } else {
- printk(KERN_INFO "console [%s%d] enabled\n",
- console->name, console->index);
- }
+ /*
+ * If we have a bootconsole, and are switching to a real console,
+ * don't print everything out again, since when the boot console, and
+ * the real console are the same physical device, it's annoying to
+ * see the beginning boot messages twice
+ */
+ if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV))
+ newcon->flags &= ~CON_PRINTBUFFER;
/*
* Put this console in the list - keep the
* preferred driver at the head of the list.
*/
acquire_console_sem();
- if ((console->flags & CON_CONSDEV) || console_drivers == NULL) {
- console->next = console_drivers;
- console_drivers = console;
- if (console->next)
- console->next->flags &= ~CON_CONSDEV;
+ if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
+ newcon->next = console_drivers;
+ console_drivers = newcon;
+ if (newcon->next)
+ newcon->next->flags &= ~CON_CONSDEV;
} else {
- console->next = console_drivers->next;
- console_drivers->next = console;
+ newcon->next = console_drivers->next;
+ console_drivers->next = newcon;
}
- if (console->flags & CON_PRINTBUFFER) {
+ if (newcon->flags & CON_PRINTBUFFER) {
/*
* release_console_sem() will print out the buffered messages
* for us.
@@ -1245,6 +1282,28 @@ void register_console(struct console *console)
spin_unlock_irqrestore(&logbuf_lock, flags);
}
release_console_sem();
+
+ /*
+ * By unregistering the bootconsoles after we enable the real console
+ * we get the "console xxx enabled" message on all the consoles -
+ * boot consoles, real consoles, etc - this is to ensure that end
+ * users know there might be something in the kernel's log buffer that
+ * went to the bootconsole (that they do not see on the real console)
+ */
+ if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
+ /* we need to iterate through twice, to make sure we print
+ * everything out, before we unregister the console(s)
+ */
+ printk(KERN_INFO "console [%s%d] enabled, bootconsole disabled\n",
+ newcon->name, newcon->index);
+ for_each_console(bcon)
+ if (bcon->flags & CON_BOOT)
+ unregister_console(bcon);
+ } else {
+ printk(KERN_INFO "%sconsole [%s%d] enabled\n",
+ (newcon->flags & CON_BOOT) ? "boot" : "" ,
+ newcon->name, newcon->index);
+ }
}
EXPORT_SYMBOL(register_console);
@@ -1287,11 +1346,13 @@ EXPORT_SYMBOL(unregister_console);
static int __init disable_boot_consoles(void)
{
- if (console_drivers != NULL) {
- if (console_drivers->flags & CON_BOOT) {
+ struct console *con;
+
+ for_each_console(con) {
+ if (con->flags & CON_BOOT) {
printk(KERN_INFO "turn off boot console %s%d\n",
- console_drivers->name, console_drivers->index);
- return unregister_console(console_drivers);
+ con->name, con->index);
+ unregister_console(con);
}
}
return 0;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 082c320e4dbf..307c285af59e 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -152,7 +152,7 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
if (!dumpable && !capable(CAP_SYS_PTRACE))
return -EPERM;
- return security_ptrace_may_access(task, mode);
+ return security_ptrace_access_check(task, mode);
}
bool ptrace_may_access(struct task_struct *task, unsigned int mode)
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
deleted file mode 100644
index 0f2b0b311304..000000000000
--- a/kernel/rcuclassic.c
+++ /dev/null
@@ -1,807 +0,0 @@
-/*
- * Read-Copy Update mechanism for mutual exclusion
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright IBM Corporation, 2001
- *
- * Authors: Dipankar Sarma <dipankar@in.ibm.com>
- * Manfred Spraul <manfred@colorfullife.com>
- *
- * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
- * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
- * Papers:
- * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
- * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
- *
- * For detailed explanation of Read-Copy Update mechanism see -
- * Documentation/RCU
- *
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/rcupdate.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <asm/atomic.h>
-#include <linux/bitops.h>
-#include <linux/module.h>
-#include <linux/completion.h>
-#include <linux/moduleparam.h>
-#include <linux/percpu.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
-#include <linux/mutex.h>
-#include <linux/time.h>
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static struct lock_class_key rcu_lock_key;
-struct lockdep_map rcu_lock_map =
- STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
-EXPORT_SYMBOL_GPL(rcu_lock_map);
-#endif
-
-
-/* Definition for rcupdate control block. */
-static struct rcu_ctrlblk rcu_ctrlblk = {
- .cur = -300,
- .completed = -300,
- .pending = -300,
- .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
- .cpumask = CPU_BITS_NONE,
-};
-
-static struct rcu_ctrlblk rcu_bh_ctrlblk = {
- .cur = -300,
- .completed = -300,
- .pending = -300,
- .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
- .cpumask = CPU_BITS_NONE,
-};
-
-static DEFINE_PER_CPU(struct rcu_data, rcu_data);
-static DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
-
-/*
- * Increment the quiescent state counter.
- * The counter is a bit degenerated: We do not need to know
- * how many quiescent states passed, just if there was at least
- * one since the start of the grace period. Thus just a flag.
- */
-void rcu_qsctr_inc(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
- rdp->passed_quiesc = 1;
-}
-
-void rcu_bh_qsctr_inc(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
- rdp->passed_quiesc = 1;
-}
-
-static int blimit = 10;
-static int qhimark = 10000;
-static int qlowmark = 100;
-
-#ifdef CONFIG_SMP
-static void force_quiescent_state(struct rcu_data *rdp,
- struct rcu_ctrlblk *rcp)
-{
- int cpu;
- unsigned long flags;
-
- set_need_resched();
- spin_lock_irqsave(&rcp->lock, flags);
- if (unlikely(!rcp->signaled)) {
- rcp->signaled = 1;
- /*
- * Don't send IPI to itself. With irqs disabled,
- * rdp->cpu is the current cpu.
- *
- * cpu_online_mask is updated by the _cpu_down()
- * using __stop_machine(). Since we're in irqs disabled
- * section, __stop_machine() is not exectuting, hence
- * the cpu_online_mask is stable.
- *
- * However, a cpu might have been offlined _just_ before
- * we disabled irqs while entering here.
- * And rcu subsystem might not yet have handled the CPU_DEAD
- * notification, leading to the offlined cpu's bit
- * being set in the rcp->cpumask.
- *
- * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent
- * sending smp_reschedule() to an offlined CPU.
- */
- for_each_cpu_and(cpu,
- to_cpumask(rcp->cpumask), cpu_online_mask) {
- if (cpu != rdp->cpu)
- smp_send_reschedule(cpu);
- }
- }
- spin_unlock_irqrestore(&rcp->lock, flags);
-}
-#else
-static inline void force_quiescent_state(struct rcu_data *rdp,
- struct rcu_ctrlblk *rcp)
-{
- set_need_resched();
-}
-#endif
-
-static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
- struct rcu_data *rdp)
-{
- long batch;
-
- head->next = NULL;
- smp_mb(); /* Read of rcu->cur must happen after any change by caller. */
-
- /*
- * Determine the batch number of this callback.
- *
- * Using ACCESS_ONCE to avoid the following error when gcc eliminates
- * local variable "batch" and emits codes like this:
- * 1) rdp->batch = rcp->cur + 1 # gets old value
- * ......
- * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
- * then [*nxttail[0], *nxttail[1]) may contain callbacks
- * that batch# = rdp->batch, see the comment of struct rcu_data.
- */
- batch = ACCESS_ONCE(rcp->cur) + 1;
-
- if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
- /* process callbacks */
- rdp->nxttail[0] = rdp->nxttail[1];
- rdp->nxttail[1] = rdp->nxttail[2];
- if (rcu_batch_after(batch - 1, rdp->batch))
- rdp->nxttail[0] = rdp->nxttail[2];
- }
-
- rdp->batch = batch;
- *rdp->nxttail[2] = head;
- rdp->nxttail[2] = &head->next;
-
- if (unlikely(++rdp->qlen > qhimark)) {
- rdp->blimit = INT_MAX;
- force_quiescent_state(rdp, &rcu_ctrlblk);
- }
-}
-
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
-
-static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
-{
- rcp->gp_start = jiffies;
- rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
-}
-
-static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
-{
- int cpu;
- long delta;
- unsigned long flags;
-
- /* Only let one CPU complain about others per time interval. */
-
- spin_lock_irqsave(&rcp->lock, flags);
- delta = jiffies - rcp->jiffies_stall;
- if (delta < 2 || rcp->cur != rcp->completed) {
- spin_unlock_irqrestore(&rcp->lock, flags);
- return;
- }
- rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
- spin_unlock_irqrestore(&rcp->lock, flags);
-
- /* OK, time to rat on our buddy... */
-
- printk(KERN_ERR "INFO: RCU detected CPU stalls:");
- for_each_possible_cpu(cpu) {
- if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask)))
- printk(" %d", cpu);
- }
- printk(" (detected by %d, t=%ld jiffies)\n",
- smp_processor_id(), (long)(jiffies - rcp->gp_start));
-}
-
-static void print_cpu_stall(struct rcu_ctrlblk *rcp)
-{
- unsigned long flags;
-
- printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
- smp_processor_id(), jiffies,
- jiffies - rcp->gp_start);
- dump_stack();
- spin_lock_irqsave(&rcp->lock, flags);
- if ((long)(jiffies - rcp->jiffies_stall) >= 0)
- rcp->jiffies_stall =
- jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
- spin_unlock_irqrestore(&rcp->lock, flags);
- set_need_resched(); /* kick ourselves to get things going. */
-}
-
-static void check_cpu_stall(struct rcu_ctrlblk *rcp)
-{
- long delta;
-
- delta = jiffies - rcp->jiffies_stall;
- if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) &&
- delta >= 0) {
-
- /* We haven't checked in, so go dump stack. */
- print_cpu_stall(rcp);
-
- } else if (rcp->cur != rcp->completed && delta >= 2) {
-
- /* They had two seconds to dump stack, so complain. */
- print_other_cpu_stall(rcp);
- }
-}
-
-#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-
-static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
-{
-}
-
-static inline void check_cpu_stall(struct rcu_ctrlblk *rcp)
-{
-}
-
-#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-
-/**
- * call_rcu - Queue an RCU callback for invocation after a grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
- *
- * The update function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. RCU read-side critical
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
- * and may be nested.
- */
-void call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu))
-{
- unsigned long flags;
-
- head->func = func;
- local_irq_save(flags);
- __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(call_rcu);
-
-/**
- * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
- *
- * The update function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_bh() assumes
- * that the read-side critical sections end on completion of a softirq
- * handler. This means that read-side critical sections in process
- * context must not be interrupted by softirqs. This interface is to be
- * used when most of the read-side critical sections are in softirq context.
- * RCU read-side critical sections are delimited by rcu_read_lock() and
- * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
- * and rcu_read_unlock_bh(), if in process context. These may be nested.
- */
-void call_rcu_bh(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu))
-{
- unsigned long flags;
-
- head->func = func;
- local_irq_save(flags);
- __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(call_rcu_bh);
-
-/*
- * Return the number of RCU batches processed thus far. Useful
- * for debug and statistics.
- */
-long rcu_batches_completed(void)
-{
- return rcu_ctrlblk.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-
-/*
- * Return the number of RCU batches processed thus far. Useful
- * for debug and statistics.
- */
-long rcu_batches_completed_bh(void)
-{
- return rcu_bh_ctrlblk.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
-
-/* Raises the softirq for processing rcu_callbacks. */
-static inline void raise_rcu_softirq(void)
-{
- raise_softirq(RCU_SOFTIRQ);
-}
-
-/*
- * Invoke the completed RCU callbacks. They are expected to be in
- * a per-cpu list.
- */
-static void rcu_do_batch(struct rcu_data *rdp)
-{
- unsigned long flags;
- struct rcu_head *next, *list;
- int count = 0;
-
- list = rdp->donelist;
- while (list) {
- next = list->next;
- prefetch(next);
- list->func(list);
- list = next;
- if (++count >= rdp->blimit)
- break;
- }
- rdp->donelist = list;
-
- local_irq_save(flags);
- rdp->qlen -= count;
- local_irq_restore(flags);
- if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
- rdp->blimit = blimit;
-
- if (!rdp->donelist)
- rdp->donetail = &rdp->donelist;
- else
- raise_rcu_softirq();
-}
-
-/*
- * Grace period handling:
- * The grace period handling consists out of two steps:
- * - A new grace period is started.
- * This is done by rcu_start_batch. The start is not broadcasted to
- * all cpus, they must pick this up by comparing rcp->cur with
- * rdp->quiescbatch. All cpus are recorded in the
- * rcu_ctrlblk.cpumask bitmap.
- * - All cpus must go through a quiescent state.
- * Since the start of the grace period is not broadcasted, at least two
- * calls to rcu_check_quiescent_state are required:
- * The first call just notices that a new grace period is running. The
- * following calls check if there was a quiescent state since the beginning
- * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
- * the bitmap is empty, then the grace period is completed.
- * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
- * period (if necessary).
- */
-
-/*
- * Register a new batch of callbacks, and start it up if there is currently no
- * active batch and the batch to be registered has not already occurred.
- * Caller must hold rcu_ctrlblk.lock.
- */
-static void rcu_start_batch(struct rcu_ctrlblk *rcp)
-{
- if (rcp->cur != rcp->pending &&
- rcp->completed == rcp->cur) {
- rcp->cur++;
- record_gp_stall_check_time(rcp);
-
- /*
- * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
- * Barrier Otherwise it can cause tickless idle CPUs to be
- * included in rcp->cpumask, which will extend graceperiods
- * unnecessarily.
- */
- smp_mb();
- cpumask_andnot(to_cpumask(rcp->cpumask),
- cpu_online_mask, nohz_cpu_mask);
-
- rcp->signaled = 0;
- }
-}
-
-/*
- * cpu went through a quiescent state since the beginning of the grace period.
- * Clear it from the cpu mask and complete the grace period if it was the last
- * cpu. Start another grace period if someone has further entries pending
- */
-static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
-{
- cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask));
- if (cpumask_empty(to_cpumask(rcp->cpumask))) {
- /* batch completed ! */
- rcp->completed = rcp->cur;
- rcu_start_batch(rcp);
- }
-}
-
-/*
- * Check if the cpu has gone through a quiescent state (say context
- * switch). If so and if it already hasn't done so in this RCU
- * quiescent cycle, then indicate that it has done so.
- */
-static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
- struct rcu_data *rdp)
-{
- unsigned long flags;
-
- if (rdp->quiescbatch != rcp->cur) {
- /* start new grace period: */
- rdp->qs_pending = 1;
- rdp->passed_quiesc = 0;
- rdp->quiescbatch = rcp->cur;
- return;
- }
-
- /* Grace period already completed for this cpu?
- * qs_pending is checked instead of the actual bitmap to avoid
- * cacheline trashing.
- */
- if (!rdp->qs_pending)
- return;
-
- /*
- * Was there a quiescent state since the beginning of the grace
- * period? If no, then exit and wait for the next call.
- */
- if (!rdp->passed_quiesc)
- return;
- rdp->qs_pending = 0;
-
- spin_lock_irqsave(&rcp->lock, flags);
- /*
- * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
- * during cpu startup. Ignore the quiescent state.
- */
- if (likely(rdp->quiescbatch == rcp->cur))
- cpu_quiet(rdp->cpu, rcp);
-
- spin_unlock_irqrestore(&rcp->lock, flags);
-}
-
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
- * locking requirements, the list it's pulling from has to belong to a cpu
- * which is dead and hence not processing interrupts.
- */
-static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
- struct rcu_head **tail, long batch)
-{
- unsigned long flags;
-
- if (list) {
- local_irq_save(flags);
- this_rdp->batch = batch;
- *this_rdp->nxttail[2] = list;
- this_rdp->nxttail[2] = tail;
- local_irq_restore(flags);
- }
-}
-
-static void __rcu_offline_cpu(struct rcu_data *this_rdp,
- struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
-{
- unsigned long flags;
-
- /*
- * if the cpu going offline owns the grace period
- * we can block indefinitely waiting for it, so flush
- * it here
- */
- spin_lock_irqsave(&rcp->lock, flags);
- if (rcp->cur != rcp->completed)
- cpu_quiet(rdp->cpu, rcp);
- rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
- rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
- spin_unlock(&rcp->lock);
-
- this_rdp->qlen += rdp->qlen;
- local_irq_restore(flags);
-}
-
-static void rcu_offline_cpu(int cpu)
-{
- struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
- struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
-
- __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
- &per_cpu(rcu_data, cpu));
- __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
- &per_cpu(rcu_bh_data, cpu));
- put_cpu_var(rcu_data);
- put_cpu_var(rcu_bh_data);
-}
-
-#else
-
-static void rcu_offline_cpu(int cpu)
-{
-}
-
-#endif
-
-/*
- * This does the RCU processing work from softirq context.
- */
-static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
- struct rcu_data *rdp)
-{
- unsigned long flags;
- long completed_snap;
-
- if (rdp->nxtlist) {
- local_irq_save(flags);
- completed_snap = ACCESS_ONCE(rcp->completed);
-
- /*
- * move the other grace-period-completed entries to
- * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
- */
- if (!rcu_batch_before(completed_snap, rdp->batch))
- rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
- else if (!rcu_batch_before(completed_snap, rdp->batch - 1))
- rdp->nxttail[0] = rdp->nxttail[1];
-
- /*
- * the grace period for entries in
- * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
- * move these entries to donelist
- */
- if (rdp->nxttail[0] != &rdp->nxtlist) {
- *rdp->donetail = rdp->nxtlist;
- rdp->donetail = rdp->nxttail[0];
- rdp->nxtlist = *rdp->nxttail[0];
- *rdp->donetail = NULL;
-
- if (rdp->nxttail[1] == rdp->nxttail[0])
- rdp->nxttail[1] = &rdp->nxtlist;
- if (rdp->nxttail[2] == rdp->nxttail[0])
- rdp->nxttail[2] = &rdp->nxtlist;
- rdp->nxttail[0] = &rdp->nxtlist;
- }
-
- local_irq_restore(flags);
-
- if (rcu_batch_after(rdp->batch, rcp->pending)) {
- unsigned long flags2;
-
- /* and start it/schedule start if it's a new batch */
- spin_lock_irqsave(&rcp->lock, flags2);
- if (rcu_batch_after(rdp->batch, rcp->pending)) {
- rcp->pending = rdp->batch;
- rcu_start_batch(rcp);
- }
- spin_unlock_irqrestore(&rcp->lock, flags2);
- }
- }
-
- rcu_check_quiescent_state(rcp, rdp);
- if (rdp->donelist)
- rcu_do_batch(rdp);
-}
-
-static void rcu_process_callbacks(struct softirq_action *unused)
-{
- /*
- * Memory references from any prior RCU read-side critical sections
- * executed by the interrupted code must be see before any RCU
- * grace-period manupulations below.
- */
-
- smp_mb(); /* See above block comment. */
-
- __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
- __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
-
- /*
- * Memory references from any later RCU read-side critical sections
- * executed by the interrupted code must be see after any RCU
- * grace-period manupulations above.
- */
-
- smp_mb(); /* See above block comment. */
-}
-
-static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
-{
- /* Check for CPU stalls, if enabled. */
- check_cpu_stall(rcp);
-
- if (rdp->nxtlist) {
- long completed_snap = ACCESS_ONCE(rcp->completed);
-
- /*
- * This cpu has pending rcu entries and the grace period
- * for them has completed.
- */
- if (!rcu_batch_before(completed_snap, rdp->batch))
- return 1;
- if (!rcu_batch_before(completed_snap, rdp->batch - 1) &&
- rdp->nxttail[0] != rdp->nxttail[1])
- return 1;
- if (rdp->nxttail[0] != &rdp->nxtlist)
- return 1;
-
- /*
- * This cpu has pending rcu entries and the new batch
- * for then hasn't been started nor scheduled start
- */
- if (rcu_batch_after(rdp->batch, rcp->pending))
- return 1;
- }
-
- /* This cpu has finished callbacks to invoke */
- if (rdp->donelist)
- return 1;
-
- /* The rcu core waits for a quiescent state from the cpu */
- if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
- return 1;
-
- /* nothing to do */
- return 0;
-}
-
-/*
- * Check to see if there is any immediate RCU-related work to be done
- * by the current CPU, returning 1 if so. This function is part of the
- * RCU implementation; it is -not- an exported member of the RCU API.
- */
-int rcu_pending(int cpu)
-{
- return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
- __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
-}
-
-/*
- * Check to see if any future RCU-related work will need to be done
- * by the current CPU, even if none need be done immediately, returning
- * 1 if so. This function is part of the RCU implementation; it is -not-
- * an exported member of the RCU API.
- */
-int rcu_needs_cpu(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
- struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
-
- return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
-}
-
-/*
- * Top-level function driving RCU grace-period detection, normally
- * invoked from the scheduler-clock interrupt. This function simply
- * increments counters that are read only from softirq by this same
- * CPU, so there are no memory barriers required.
- */
-void rcu_check_callbacks(int cpu, int user)
-{
- if (user ||
- (idle_cpu(cpu) && rcu_scheduler_active &&
- !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
-
- /*
- * Get here if this CPU took its interrupt from user
- * mode or from the idle loop, and if this is not a
- * nested interrupt. In this case, the CPU is in
- * a quiescent state, so count it.
- *
- * Also do a memory barrier. This is needed to handle
- * the case where writes from a preempt-disable section
- * of code get reordered into schedule() by this CPU's
- * write buffer. The memory barrier makes sure that
- * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
- * by other CPUs to happen after any such write.
- */
-
- smp_mb(); /* See above block comment. */
- rcu_qsctr_inc(cpu);
- rcu_bh_qsctr_inc(cpu);
-
- } else if (!in_softirq()) {
-
- /*
- * Get here if this CPU did not take its interrupt from
- * softirq, in other words, if it is not interrupting
- * a rcu_bh read-side critical section. This is an _bh
- * critical section, so count it. The memory barrier
- * is needed for the same reason as is the above one.
- */
-
- smp_mb(); /* See above block comment. */
- rcu_bh_qsctr_inc(cpu);
- }
- raise_rcu_softirq();
-}
-
-static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
- struct rcu_data *rdp)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rcp->lock, flags);
- memset(rdp, 0, sizeof(*rdp));
- rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
- rdp->donetail = &rdp->donelist;
- rdp->quiescbatch = rcp->completed;
- rdp->qs_pending = 0;
- rdp->cpu = cpu;
- rdp->blimit = blimit;
- spin_unlock_irqrestore(&rcp->lock, flags);
-}
-
-static void __cpuinit rcu_online_cpu(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
- struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
-
- rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
- rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
-}
-
-static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- long cpu = (long)hcpu;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- rcu_online_cpu(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- rcu_offline_cpu(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata rcu_nb = {
- .notifier_call = rcu_cpu_notify,
-};
-
-/*
- * Initializes rcu mechanism. Assumed to be called early.
- * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
- * Note that rcu_qsctr and friends are implicitly
- * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
- */
-void __init __rcu_init(void)
-{
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
- printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
- rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
- (void *)(long)smp_processor_id());
- /* Register notifier for non-boot CPUs */
- register_cpu_notifier(&rcu_nb);
-}
-
-module_param(blimit, int, 0);
-module_param(qhimark, int, 0);
-module_param(qlowmark, int, 0);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index a967c9feb90a..bd5d5c8e5140 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -98,6 +98,30 @@ void synchronize_rcu(void)
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
+/**
+ * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
+ *
+ * Control will return to the caller some time after a full rcu_bh grace
+ * period has elapsed, in other words after all currently executing rcu_bh
+ * read-side critical sections have completed. RCU read-side critical
+ * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
+ * and may be nested.
+ */
+void synchronize_rcu_bh(void)
+{
+ struct rcu_synchronize rcu;
+
+ if (rcu_blocking_is_gp())
+ return;
+
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ call_rcu_bh(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
+
static void rcu_barrier_callback(struct rcu_head *notused)
{
if (atomic_dec_and_test(&rcu_barrier_cpu_count))
@@ -129,6 +153,7 @@ static void rcu_barrier_func(void *type)
static inline void wait_migrated_callbacks(void)
{
wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
+ smp_mb(); /* In case we didn't sleep. */
}
/*
@@ -192,9 +217,13 @@ static void rcu_migrate_callback(struct rcu_head *notused)
wake_up(&rcu_migrate_wq);
}
+extern int rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu);
+
static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
unsigned long action, void *hcpu)
{
+ rcu_cpu_notify(self, action, hcpu);
if (action == CPU_DYING) {
/*
* preempt_disable() in on_each_cpu() prevents stop_machine(),
@@ -209,7 +238,8 @@ static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
- } else if (action == CPU_POST_DEAD) {
+ } else if (action == CPU_DOWN_PREPARE) {
+ /* Don't need to wait until next removal operation. */
/* rcu_migrate_head is protected by cpu_add_remove_lock */
wait_migrated_callbacks();
}
@@ -219,8 +249,18 @@ static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
void __init rcu_init(void)
{
+ int i;
+
__rcu_init();
- hotcpu_notifier(rcu_barrier_cpu_hotplug, 0);
+ cpu_notifier(rcu_barrier_cpu_hotplug, 0);
+
+ /*
+ * We don't need protection against CPU-hotplug here because
+ * this is called early in boot, before either interrupts
+ * or the scheduler are operational.
+ */
+ for_each_online_cpu(i)
+ rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i);
}
void rcu_scheduler_starting(void)
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
deleted file mode 100644
index beb0e659adcc..000000000000
--- a/kernel/rcupreempt.c
+++ /dev/null
@@ -1,1539 +0,0 @@
-/*
- * Read-Copy Update mechanism for mutual exclusion, realtime implementation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright IBM Corporation, 2006
- *
- * Authors: Paul E. McKenney <paulmck@us.ibm.com>
- * With thanks to Esben Nielsen, Bill Huey, and Ingo Molnar
- * for pushing me away from locks and towards counters, and
- * to Suparna Bhattacharya for pushing me completely away
- * from atomic instructions on the read side.
- *
- * - Added handling of Dynamic Ticks
- * Copyright 2007 - Paul E. Mckenney <paulmck@us.ibm.com>
- * - Steven Rostedt <srostedt@redhat.com>
- *
- * Papers: http://www.rdrop.com/users/paulmck/RCU
- *
- * Design Document: http://lwn.net/Articles/253651/
- *
- * For detailed explanation of Read-Copy Update mechanism see -
- * Documentation/RCU/ *.txt
- *
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/rcupdate.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <asm/atomic.h>
-#include <linux/bitops.h>
-#include <linux/module.h>
-#include <linux/kthread.h>
-#include <linux/completion.h>
-#include <linux/moduleparam.h>
-#include <linux/percpu.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
-#include <linux/random.h>
-#include <linux/delay.h>
-#include <linux/cpumask.h>
-#include <linux/rcupreempt_trace.h>
-#include <asm/byteorder.h>
-
-/*
- * PREEMPT_RCU data structures.
- */
-
-/*
- * GP_STAGES specifies the number of times the state machine has
- * to go through the all the rcu_try_flip_states (see below)
- * in a single Grace Period.
- *
- * GP in GP_STAGES stands for Grace Period ;)
- */
-#define GP_STAGES 2
-struct rcu_data {
- spinlock_t lock; /* Protect rcu_data fields. */
- long completed; /* Number of last completed batch. */
- int waitlistcount;
- struct rcu_head *nextlist;
- struct rcu_head **nexttail;
- struct rcu_head *waitlist[GP_STAGES];
- struct rcu_head **waittail[GP_STAGES];
- struct rcu_head *donelist; /* from waitlist & waitschedlist */
- struct rcu_head **donetail;
- long rcu_flipctr[2];
- struct rcu_head *nextschedlist;
- struct rcu_head **nextschedtail;
- struct rcu_head *waitschedlist;
- struct rcu_head **waitschedtail;
- int rcu_sched_sleeping;
-#ifdef CONFIG_RCU_TRACE
- struct rcupreempt_trace trace;
-#endif /* #ifdef CONFIG_RCU_TRACE */
-};
-
-/*
- * States for rcu_try_flip() and friends.
- */
-
-enum rcu_try_flip_states {
-
- /*
- * Stay here if nothing is happening. Flip the counter if somthing
- * starts happening. Denoted by "I"
- */
- rcu_try_flip_idle_state,
-
- /*
- * Wait here for all CPUs to notice that the counter has flipped. This
- * prevents the old set of counters from ever being incremented once
- * we leave this state, which in turn is necessary because we cannot
- * test any individual counter for zero -- we can only check the sum.
- * Denoted by "A".
- */
- rcu_try_flip_waitack_state,
-
- /*
- * Wait here for the sum of the old per-CPU counters to reach zero.
- * Denoted by "Z".
- */
- rcu_try_flip_waitzero_state,
-
- /*
- * Wait here for each of the other CPUs to execute a memory barrier.
- * This is necessary to ensure that these other CPUs really have
- * completed executing their RCU read-side critical sections, despite
- * their CPUs wildly reordering memory. Denoted by "M".
- */
- rcu_try_flip_waitmb_state,
-};
-
-/*
- * States for rcu_ctrlblk.rcu_sched_sleep.
- */
-
-enum rcu_sched_sleep_states {
- rcu_sched_not_sleeping, /* Not sleeping, callbacks need GP. */
- rcu_sched_sleep_prep, /* Thinking of sleeping, rechecking. */
- rcu_sched_sleeping, /* Sleeping, awaken if GP needed. */
-};
-
-struct rcu_ctrlblk {
- spinlock_t fliplock; /* Protect state-machine transitions. */
- long completed; /* Number of last completed batch. */
- enum rcu_try_flip_states rcu_try_flip_state; /* The current state of
- the rcu state machine */
- spinlock_t schedlock; /* Protect rcu_sched sleep state. */
- enum rcu_sched_sleep_states sched_sleep; /* rcu_sched state. */
- wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */
-};
-
-struct rcu_dyntick_sched {
- int dynticks;
- int dynticks_snap;
- int sched_qs;
- int sched_qs_snap;
- int sched_dynticks_snap;
-};
-
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
- .dynticks = 1,
-};
-
-void rcu_qsctr_inc(int cpu)
-{
- struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
-
- rdssp->sched_qs++;
-}
-
-#ifdef CONFIG_NO_HZ
-
-void rcu_enter_nohz(void)
-{
- static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
-
- smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
- __get_cpu_var(rcu_dyntick_sched).dynticks++;
- WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
-}
-
-void rcu_exit_nohz(void)
-{
- static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
-
- __get_cpu_var(rcu_dyntick_sched).dynticks++;
- smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
- WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
- &rs);
-}
-
-#endif /* CONFIG_NO_HZ */
-
-
-static DEFINE_PER_CPU(struct rcu_data, rcu_data);
-
-static struct rcu_ctrlblk rcu_ctrlblk = {
- .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
- .completed = 0,
- .rcu_try_flip_state = rcu_try_flip_idle_state,
- .schedlock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.schedlock),
- .sched_sleep = rcu_sched_not_sleeping,
- .sched_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rcu_ctrlblk.sched_wq),
-};
-
-static struct task_struct *rcu_sched_grace_period_task;
-
-#ifdef CONFIG_RCU_TRACE
-static char *rcu_try_flip_state_names[] =
- { "idle", "waitack", "waitzero", "waitmb" };
-#endif /* #ifdef CONFIG_RCU_TRACE */
-
-static DECLARE_BITMAP(rcu_cpu_online_map, NR_CPUS) __read_mostly
- = CPU_BITS_NONE;
-
-/*
- * Enum and per-CPU flag to determine when each CPU has seen
- * the most recent counter flip.
- */
-
-enum rcu_flip_flag_values {
- rcu_flip_seen, /* Steady/initial state, last flip seen. */
- /* Only GP detector can update. */
- rcu_flipped /* Flip just completed, need confirmation. */
- /* Only corresponding CPU can update. */
-};
-static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_flip_flag_values, rcu_flip_flag)
- = rcu_flip_seen;
-
-/*
- * Enum and per-CPU flag to determine when each CPU has executed the
- * needed memory barrier to fence in memory references from its last RCU
- * read-side critical section in the just-completed grace period.
- */
-
-enum rcu_mb_flag_values {
- rcu_mb_done, /* Steady/initial state, no mb()s required. */
- /* Only GP detector can update. */
- rcu_mb_needed /* Flip just completed, need an mb(). */
- /* Only corresponding CPU can update. */
-};
-static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_mb_flag_values, rcu_mb_flag)
- = rcu_mb_done;
-
-/*
- * RCU_DATA_ME: find the current CPU's rcu_data structure.
- * RCU_DATA_CPU: find the specified CPU's rcu_data structure.
- */
-#define RCU_DATA_ME() (&__get_cpu_var(rcu_data))
-#define RCU_DATA_CPU(cpu) (&per_cpu(rcu_data, cpu))
-
-/*
- * Helper macro for tracing when the appropriate rcu_data is not
- * cached in a local variable, but where the CPU number is so cached.
- */
-#define RCU_TRACE_CPU(f, cpu) RCU_TRACE(f, &(RCU_DATA_CPU(cpu)->trace));
-
-/*
- * Helper macro for tracing when the appropriate rcu_data is not
- * cached in a local variable.
- */
-#define RCU_TRACE_ME(f) RCU_TRACE(f, &(RCU_DATA_ME()->trace));
-
-/*
- * Helper macro for tracing when the appropriate rcu_data is pointed
- * to by a local variable.
- */
-#define RCU_TRACE_RDP(f, rdp) RCU_TRACE(f, &((rdp)->trace));
-
-#define RCU_SCHED_BATCH_TIME (HZ / 50)
-
-/*
- * Return the number of RCU batches processed thus far. Useful
- * for debug and statistics.
- */
-long rcu_batches_completed(void)
-{
- return rcu_ctrlblk.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-
-void __rcu_read_lock(void)
-{
- int idx;
- struct task_struct *t = current;
- int nesting;
-
- nesting = ACCESS_ONCE(t->rcu_read_lock_nesting);
- if (nesting != 0) {
-
- /* An earlier rcu_read_lock() covers us, just count it. */
-
- t->rcu_read_lock_nesting = nesting + 1;
-
- } else {
- unsigned long flags;
-
- /*
- * We disable interrupts for the following reasons:
- * - If we get scheduling clock interrupt here, and we
- * end up acking the counter flip, it's like a promise
- * that we will never increment the old counter again.
- * Thus we will break that promise if that
- * scheduling clock interrupt happens between the time
- * we pick the .completed field and the time that we
- * increment our counter.
- *
- * - We don't want to be preempted out here.
- *
- * NMIs can still occur, of course, and might themselves
- * contain rcu_read_lock().
- */
-
- local_irq_save(flags);
-
- /*
- * Outermost nesting of rcu_read_lock(), so increment
- * the current counter for the current CPU. Use volatile
- * casts to prevent the compiler from reordering.
- */
-
- idx = ACCESS_ONCE(rcu_ctrlblk.completed) & 0x1;
- ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])++;
-
- /*
- * Now that the per-CPU counter has been incremented, we
- * are protected from races with rcu_read_lock() invoked
- * from NMI handlers on this CPU. We can therefore safely
- * increment the nesting counter, relieving further NMIs
- * of the need to increment the per-CPU counter.
- */
-
- ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting + 1;
-
- /*
- * Now that we have preventing any NMIs from storing
- * to the ->rcu_flipctr_idx, we can safely use it to
- * remember which counter to decrement in the matching
- * rcu_read_unlock().
- */
-
- ACCESS_ONCE(t->rcu_flipctr_idx) = idx;
- local_irq_restore(flags);
- }
-}
-EXPORT_SYMBOL_GPL(__rcu_read_lock);
-
-void __rcu_read_unlock(void)
-{
- int idx;
- struct task_struct *t = current;
- int nesting;
-
- nesting = ACCESS_ONCE(t->rcu_read_lock_nesting);
- if (nesting > 1) {
-
- /*
- * We are still protected by the enclosing rcu_read_lock(),
- * so simply decrement the counter.
- */
-
- t->rcu_read_lock_nesting = nesting - 1;
-
- } else {
- unsigned long flags;
-
- /*
- * Disable local interrupts to prevent the grace-period
- * detection state machine from seeing us half-done.
- * NMIs can still occur, of course, and might themselves
- * contain rcu_read_lock() and rcu_read_unlock().
- */
-
- local_irq_save(flags);
-
- /*
- * Outermost nesting of rcu_read_unlock(), so we must
- * decrement the current counter for the current CPU.
- * This must be done carefully, because NMIs can
- * occur at any point in this code, and any rcu_read_lock()
- * and rcu_read_unlock() pairs in the NMI handlers
- * must interact non-destructively with this code.
- * Lots of volatile casts, and -very- careful ordering.
- *
- * Changes to this code, including this one, must be
- * inspected, validated, and tested extremely carefully!!!
- */
-
- /*
- * First, pick up the index.
- */
-
- idx = ACCESS_ONCE(t->rcu_flipctr_idx);
-
- /*
- * Now that we have fetched the counter index, it is
- * safe to decrement the per-task RCU nesting counter.
- * After this, any interrupts or NMIs will increment and
- * decrement the per-CPU counters.
- */
- ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting - 1;
-
- /*
- * It is now safe to decrement this task's nesting count.
- * NMIs that occur after this statement will route their
- * rcu_read_lock() calls through this "else" clause, and
- * will thus start incrementing the per-CPU counter on
- * their own. They will also clobber ->rcu_flipctr_idx,
- * but that is OK, since we have already fetched it.
- */
-
- ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])--;
- local_irq_restore(flags);
- }
-}
-EXPORT_SYMBOL_GPL(__rcu_read_unlock);
-
-/*
- * If a global counter flip has occurred since the last time that we
- * advanced callbacks, advance them. Hardware interrupts must be
- * disabled when calling this function.
- */
-static void __rcu_advance_callbacks(struct rcu_data *rdp)
-{
- int cpu;
- int i;
- int wlc = 0;
-
- if (rdp->completed != rcu_ctrlblk.completed) {
- if (rdp->waitlist[GP_STAGES - 1] != NULL) {
- *rdp->donetail = rdp->waitlist[GP_STAGES - 1];
- rdp->donetail = rdp->waittail[GP_STAGES - 1];
- RCU_TRACE_RDP(rcupreempt_trace_move2done, rdp);
- }
- for (i = GP_STAGES - 2; i >= 0; i--) {
- if (rdp->waitlist[i] != NULL) {
- rdp->waitlist[i + 1] = rdp->waitlist[i];
- rdp->waittail[i + 1] = rdp->waittail[i];
- wlc++;
- } else {
- rdp->waitlist[i + 1] = NULL;
- rdp->waittail[i + 1] =
- &rdp->waitlist[i + 1];
- }
- }
- if (rdp->nextlist != NULL) {
- rdp->waitlist[0] = rdp->nextlist;
- rdp->waittail[0] = rdp->nexttail;
- wlc++;
- rdp->nextlist = NULL;
- rdp->nexttail = &rdp->nextlist;
- RCU_TRACE_RDP(rcupreempt_trace_move2wait, rdp);
- } else {
- rdp->waitlist[0] = NULL;
- rdp->waittail[0] = &rdp->waitlist[0];
- }
- rdp->waitlistcount = wlc;
- rdp->completed = rcu_ctrlblk.completed;
- }
-
- /*
- * Check to see if this CPU needs to report that it has seen
- * the most recent counter flip, thereby declaring that all
- * subsequent rcu_read_lock() invocations will respect this flip.
- */
-
- cpu = raw_smp_processor_id();
- if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
- smp_mb(); /* Subsequent counter accesses must see new value */
- per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
- smp_mb(); /* Subsequent RCU read-side critical sections */
- /* seen -after- acknowledgement. */
- }
-}
-
-#ifdef CONFIG_NO_HZ
-static DEFINE_PER_CPU(int, rcu_update_flag);
-
-/**
- * rcu_irq_enter - Called from Hard irq handlers and NMI/SMI.
- *
- * If the CPU was idle with dynamic ticks active, this updates the
- * rcu_dyntick_sched.dynticks to let the RCU handling know that the
- * CPU is active.
- */
-void rcu_irq_enter(void)
-{
- int cpu = smp_processor_id();
- struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
-
- if (per_cpu(rcu_update_flag, cpu))
- per_cpu(rcu_update_flag, cpu)++;
-
- /*
- * Only update if we are coming from a stopped ticks mode
- * (rcu_dyntick_sched.dynticks is even).
- */
- if (!in_interrupt() &&
- (rdssp->dynticks & 0x1) == 0) {
- /*
- * The following might seem like we could have a race
- * with NMI/SMIs. But this really isn't a problem.
- * Here we do a read/modify/write, and the race happens
- * when an NMI/SMI comes in after the read and before
- * the write. But NMI/SMIs will increment this counter
- * twice before returning, so the zero bit will not
- * be corrupted by the NMI/SMI which is the most important
- * part.
- *
- * The only thing is that we would bring back the counter
- * to a postion that it was in during the NMI/SMI.
- * But the zero bit would be set, so the rest of the
- * counter would again be ignored.
- *
- * On return from the IRQ, the counter may have the zero
- * bit be 0 and the counter the same as the return from
- * the NMI/SMI. If the state machine was so unlucky to
- * see that, it still doesn't matter, since all
- * RCU read-side critical sections on this CPU would
- * have already completed.
- */
- rdssp->dynticks++;
- /*
- * The following memory barrier ensures that any
- * rcu_read_lock() primitives in the irq handler
- * are seen by other CPUs to follow the above
- * increment to rcu_dyntick_sched.dynticks. This is
- * required in order for other CPUs to correctly
- * determine when it is safe to advance the RCU
- * grace-period state machine.
- */
- smp_mb(); /* see above block comment. */
- /*
- * Since we can't determine the dynamic tick mode from
- * the rcu_dyntick_sched.dynticks after this routine,
- * we use a second flag to acknowledge that we came
- * from an idle state with ticks stopped.
- */
- per_cpu(rcu_update_flag, cpu)++;
- /*
- * If we take an NMI/SMI now, they will also increment
- * the rcu_update_flag, and will not update the
- * rcu_dyntick_sched.dynticks on exit. That is for
- * this IRQ to do.
- */
- }
-}
-
-/**
- * rcu_irq_exit - Called from exiting Hard irq context.
- *
- * If the CPU was idle with dynamic ticks active, update the
- * rcu_dyntick_sched.dynticks to put let the RCU handling be
- * aware that the CPU is going back to idle with no ticks.
- */
-void rcu_irq_exit(void)
-{
- int cpu = smp_processor_id();
- struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
-
- /*
- * rcu_update_flag is set if we interrupted the CPU
- * when it was idle with ticks stopped.
- * Once this occurs, we keep track of interrupt nesting
- * because a NMI/SMI could also come in, and we still
- * only want the IRQ that started the increment of the
- * rcu_dyntick_sched.dynticks to be the one that modifies
- * it on exit.
- */
- if (per_cpu(rcu_update_flag, cpu)) {
- if (--per_cpu(rcu_update_flag, cpu))
- return;
-
- /* This must match the interrupt nesting */
- WARN_ON(in_interrupt());
-
- /*
- * If an NMI/SMI happens now we are still
- * protected by the rcu_dyntick_sched.dynticks being odd.
- */
-
- /*
- * The following memory barrier ensures that any
- * rcu_read_unlock() primitives in the irq handler
- * are seen by other CPUs to preceed the following
- * increment to rcu_dyntick_sched.dynticks. This
- * is required in order for other CPUs to determine
- * when it is safe to advance the RCU grace-period
- * state machine.
- */
- smp_mb(); /* see above block comment. */
- rdssp->dynticks++;
- WARN_ON(rdssp->dynticks & 0x1);
- }
-}
-
-void rcu_nmi_enter(void)
-{
- rcu_irq_enter();
-}
-
-void rcu_nmi_exit(void)
-{
- rcu_irq_exit();
-}
-
-static void dyntick_save_progress_counter(int cpu)
-{
- struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
-
- rdssp->dynticks_snap = rdssp->dynticks;
-}
-
-static inline int
-rcu_try_flip_waitack_needed(int cpu)
-{
- long curr;
- long snap;
- struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
-
- curr = rdssp->dynticks;
- snap = rdssp->dynticks_snap;
- smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
-
- /*
- * If the CPU remained in dynticks mode for the entire time
- * and didn't take any interrupts, NMIs, SMIs, or whatever,
- * then it cannot be in the middle of an rcu_read_lock(), so
- * the next rcu_read_lock() it executes must use the new value
- * of the counter. So we can safely pretend that this CPU
- * already acknowledged the counter.
- */
-
- if ((curr == snap) && ((curr & 0x1) == 0))
- return 0;
-
- /*
- * If the CPU passed through or entered a dynticks idle phase with
- * no active irq handlers, then, as above, we can safely pretend
- * that this CPU already acknowledged the counter.
- */
-
- if ((curr - snap) > 2 || (curr & 0x1) == 0)
- return 0;
-
- /* We need this CPU to explicitly acknowledge the counter flip. */
-
- return 1;
-}
-
-static inline int
-rcu_try_flip_waitmb_needed(int cpu)
-{
- long curr;
- long snap;
- struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
-
- curr = rdssp->dynticks;
- snap = rdssp->dynticks_snap;
- smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
-
- /*
- * If the CPU remained in dynticks mode for the entire time
- * and didn't take any interrupts, NMIs, SMIs, or whatever,
- * then it cannot have executed an RCU read-side critical section
- * during that time, so there is no need for it to execute a
- * memory barrier.
- */
-
- if ((curr == snap) && ((curr & 0x1) == 0))
- return 0;
-
- /*
- * If the CPU either entered or exited an outermost interrupt,
- * SMI, NMI, or whatever handler, then we know that it executed
- * a memory barrier when doing so. So we don't need another one.
- */
- if (curr != snap)
- return 0;
-
- /* We need the CPU to execute a memory barrier. */
-
- return 1;
-}
-
-static void dyntick_save_progress_counter_sched(int cpu)
-{
- struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
-
- rdssp->sched_dynticks_snap = rdssp->dynticks;
-}
-
-static int rcu_qsctr_inc_needed_dyntick(int cpu)
-{
- long curr;
- long snap;
- struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
-
- curr = rdssp->dynticks;
- snap = rdssp->sched_dynticks_snap;
- smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
-
- /*
- * If the CPU remained in dynticks mode for the entire time
- * and didn't take any interrupts, NMIs, SMIs, or whatever,
- * then it cannot be in the middle of an rcu_read_lock(), so
- * the next rcu_read_lock() it executes must use the new value
- * of the counter. Therefore, this CPU has been in a quiescent
- * state the entire time, and we don't need to wait for it.
- */
-
- if ((curr == snap) && ((curr & 0x1) == 0))
- return 0;
-
- /*
- * If the CPU passed through or entered a dynticks idle phase with
- * no active irq handlers, then, as above, this CPU has already
- * passed through a quiescent state.
- */
-
- if ((curr - snap) > 2 || (snap & 0x1) == 0)
- return 0;
-
- /* We need this CPU to go through a quiescent state. */
-
- return 1;
-}
-
-#else /* !CONFIG_NO_HZ */
-
-# define dyntick_save_progress_counter(cpu) do { } while (0)
-# define rcu_try_flip_waitack_needed(cpu) (1)
-# define rcu_try_flip_waitmb_needed(cpu) (1)
-
-# define dyntick_save_progress_counter_sched(cpu) do { } while (0)
-# define rcu_qsctr_inc_needed_dyntick(cpu) (1)
-
-#endif /* CONFIG_NO_HZ */
-
-static void save_qsctr_sched(int cpu)
-{
- struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
-
- rdssp->sched_qs_snap = rdssp->sched_qs;
-}
-
-static inline int rcu_qsctr_inc_needed(int cpu)
-{
- struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
-
- /*
- * If there has been a quiescent state, no more need to wait
- * on this CPU.
- */
-
- if (rdssp->sched_qs != rdssp->sched_qs_snap) {
- smp_mb(); /* force ordering with cpu entering schedule(). */
- return 0;
- }
-
- /* We need this CPU to go through a quiescent state. */
-
- return 1;
-}
-
-/*
- * Get here when RCU is idle. Decide whether we need to
- * move out of idle state, and return non-zero if so.
- * "Straightforward" approach for the moment, might later
- * use callback-list lengths, grace-period duration, or
- * some such to determine when to exit idle state.
- * Might also need a pre-idle test that does not acquire
- * the lock, but let's get the simple case working first...
- */
-
-static int
-rcu_try_flip_idle(void)
-{
- int cpu;
-
- RCU_TRACE_ME(rcupreempt_trace_try_flip_i1);
- if (!rcu_pending(smp_processor_id())) {
- RCU_TRACE_ME(rcupreempt_trace_try_flip_ie1);
- return 0;
- }
-
- /*
- * Do the flip.
- */
-
- RCU_TRACE_ME(rcupreempt_trace_try_flip_g1);
- rcu_ctrlblk.completed++; /* stands in for rcu_try_flip_g2 */
-
- /*
- * Need a memory barrier so that other CPUs see the new
- * counter value before they see the subsequent change of all
- * the rcu_flip_flag instances to rcu_flipped.
- */
-
- smp_mb(); /* see above block comment. */
-
- /* Now ask each CPU for acknowledgement of the flip. */
-
- for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) {
- per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
- dyntick_save_progress_counter(cpu);
- }
-
- return 1;
-}
-
-/*
- * Wait for CPUs to acknowledge the flip.
- */
-
-static int
-rcu_try_flip_waitack(void)
-{
- int cpu;
-
- RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
- for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
- if (rcu_try_flip_waitack_needed(cpu) &&
- per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
- RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
- return 0;
- }
-
- /*
- * Make sure our checks above don't bleed into subsequent
- * waiting for the sum of the counters to reach zero.
- */
-
- smp_mb(); /* see above block comment. */
- RCU_TRACE_ME(rcupreempt_trace_try_flip_a2);
- return 1;
-}
-
-/*
- * Wait for collective ``last'' counter to reach zero,
- * then tell all CPUs to do an end-of-grace-period memory barrier.
- */
-
-static int
-rcu_try_flip_waitzero(void)
-{
- int cpu;
- int lastidx = !(rcu_ctrlblk.completed & 0x1);
- int sum = 0;
-
- /* Check to see if the sum of the "last" counters is zero. */
-
- RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
- for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
- sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
- if (sum != 0) {
- RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
- return 0;
- }
-
- /*
- * This ensures that the other CPUs see the call for
- * memory barriers -after- the sum to zero has been
- * detected here
- */
- smp_mb(); /* ^^^^^^^^^^^^ */
-
- /* Call for a memory barrier from each CPU. */
- for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) {
- per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
- dyntick_save_progress_counter(cpu);
- }
-
- RCU_TRACE_ME(rcupreempt_trace_try_flip_z2);
- return 1;
-}
-
-/*
- * Wait for all CPUs to do their end-of-grace-period memory barrier.
- * Return 0 once all CPUs have done so.
- */
-
-static int
-rcu_try_flip_waitmb(void)
-{
- int cpu;
-
- RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
- for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
- if (rcu_try_flip_waitmb_needed(cpu) &&
- per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
- RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
- return 0;
- }
-
- smp_mb(); /* Ensure that the above checks precede any following flip. */
- RCU_TRACE_ME(rcupreempt_trace_try_flip_m2);
- return 1;
-}
-
-/*
- * Attempt a single flip of the counters. Remember, a single flip does
- * -not- constitute a grace period. Instead, the interval between
- * at least GP_STAGES consecutive flips is a grace period.
- *
- * If anyone is nuts enough to run this CONFIG_PREEMPT_RCU implementation
- * on a large SMP, they might want to use a hierarchical organization of
- * the per-CPU-counter pairs.
- */
-static void rcu_try_flip(void)
-{
- unsigned long flags;
-
- RCU_TRACE_ME(rcupreempt_trace_try_flip_1);
- if (unlikely(!spin_trylock_irqsave(&rcu_ctrlblk.fliplock, flags))) {
- RCU_TRACE_ME(rcupreempt_trace_try_flip_e1);
- return;
- }
-
- /*
- * Take the next transition(s) through the RCU grace-period
- * flip-counter state machine.
- */
-
- switch (rcu_ctrlblk.rcu_try_flip_state) {
- case rcu_try_flip_idle_state:
- if (rcu_try_flip_idle())
- rcu_ctrlblk.rcu_try_flip_state =
- rcu_try_flip_waitack_state;
- break;
- case rcu_try_flip_waitack_state:
- if (rcu_try_flip_waitack())
- rcu_ctrlblk.rcu_try_flip_state =
- rcu_try_flip_waitzero_state;
- break;
- case rcu_try_flip_waitzero_state:
- if (rcu_try_flip_waitzero())
- rcu_ctrlblk.rcu_try_flip_state =
- rcu_try_flip_waitmb_state;
- break;
- case rcu_try_flip_waitmb_state:
- if (rcu_try_flip_waitmb())
- rcu_ctrlblk.rcu_try_flip_state =
- rcu_try_flip_idle_state;
- }
- spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
-}
-
-/*
- * Check to see if this CPU needs to do a memory barrier in order to
- * ensure that any prior RCU read-side critical sections have committed
- * their counter manipulations and critical-section memory references
- * before declaring the grace period to be completed.
- */
-static void rcu_check_mb(int cpu)
-{
- if (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed) {
- smp_mb(); /* Ensure RCU read-side accesses are visible. */
- per_cpu(rcu_mb_flag, cpu) = rcu_mb_done;
- }
-}
-
-void rcu_check_callbacks(int cpu, int user)
-{
- unsigned long flags;
- struct rcu_data *rdp = RCU_DATA_CPU(cpu);
-
- /*
- * If this CPU took its interrupt from user mode or from the
- * idle loop, and this is not a nested interrupt, then
- * this CPU has to have exited all prior preept-disable
- * sections of code. So increment the counter to note this.
- *
- * The memory barrier is needed to handle the case where
- * writes from a preempt-disable section of code get reordered
- * into schedule() by this CPU's write buffer. So the memory
- * barrier makes sure that the rcu_qsctr_inc() is seen by other
- * CPUs to happen after any such write.
- */
-
- if (user ||
- (idle_cpu(cpu) && !in_softirq() &&
- hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
- smp_mb(); /* Guard against aggressive schedule(). */
- rcu_qsctr_inc(cpu);
- }
-
- rcu_check_mb(cpu);
- if (rcu_ctrlblk.completed == rdp->completed)
- rcu_try_flip();
- spin_lock_irqsave(&rdp->lock, flags);
- RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
- __rcu_advance_callbacks(rdp);
- if (rdp->donelist == NULL) {
- spin_unlock_irqrestore(&rdp->lock, flags);
- } else {
- spin_unlock_irqrestore(&rdp->lock, flags);
- raise_softirq(RCU_SOFTIRQ);
- }
-}
-
-/*
- * Needed by dynticks, to make sure all RCU processing has finished
- * when we go idle:
- */
-void rcu_advance_callbacks(int cpu, int user)
-{
- unsigned long flags;
- struct rcu_data *rdp = RCU_DATA_CPU(cpu);
-
- if (rcu_ctrlblk.completed == rdp->completed) {
- rcu_try_flip();
- if (rcu_ctrlblk.completed == rdp->completed)
- return;
- }
- spin_lock_irqsave(&rdp->lock, flags);
- RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
- __rcu_advance_callbacks(rdp);
- spin_unlock_irqrestore(&rdp->lock, flags);
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-#define rcu_offline_cpu_enqueue(srclist, srctail, dstlist, dsttail) do { \
- *dsttail = srclist; \
- if (srclist != NULL) { \
- dsttail = srctail; \
- srclist = NULL; \
- srctail = &srclist;\
- } \
- } while (0)
-
-void rcu_offline_cpu(int cpu)
-{
- int i;
- struct rcu_head *list = NULL;
- unsigned long flags;
- struct rcu_data *rdp = RCU_DATA_CPU(cpu);
- struct rcu_head *schedlist = NULL;
- struct rcu_head **schedtail = &schedlist;
- struct rcu_head **tail = &list;
-
- /*
- * Remove all callbacks from the newly dead CPU, retaining order.
- * Otherwise rcu_barrier() will fail
- */
-
- spin_lock_irqsave(&rdp->lock, flags);
- rcu_offline_cpu_enqueue(rdp->donelist, rdp->donetail, list, tail);
- for (i = GP_STAGES - 1; i >= 0; i--)
- rcu_offline_cpu_enqueue(rdp->waitlist[i], rdp->waittail[i],
- list, tail);
- rcu_offline_cpu_enqueue(rdp->nextlist, rdp->nexttail, list, tail);
- rcu_offline_cpu_enqueue(rdp->waitschedlist, rdp->waitschedtail,
- schedlist, schedtail);
- rcu_offline_cpu_enqueue(rdp->nextschedlist, rdp->nextschedtail,
- schedlist, schedtail);
- rdp->rcu_sched_sleeping = 0;
- spin_unlock_irqrestore(&rdp->lock, flags);
- rdp->waitlistcount = 0;
-
- /* Disengage the newly dead CPU from the grace-period computation. */
-
- spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
- rcu_check_mb(cpu);
- if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
- smp_mb(); /* Subsequent counter accesses must see new value */
- per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
- smp_mb(); /* Subsequent RCU read-side critical sections */
- /* seen -after- acknowledgement. */
- }
-
- RCU_DATA_ME()->rcu_flipctr[0] += RCU_DATA_CPU(cpu)->rcu_flipctr[0];
- RCU_DATA_ME()->rcu_flipctr[1] += RCU_DATA_CPU(cpu)->rcu_flipctr[1];
-
- RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0;
- RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0;
-
- cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map));
-
- spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
-
- /*
- * Place the removed callbacks on the current CPU's queue.
- * Make them all start a new grace period: simple approach,
- * in theory could starve a given set of callbacks, but
- * you would need to be doing some serious CPU hotplugging
- * to make this happen. If this becomes a problem, adding
- * a synchronize_rcu() to the hotplug path would be a simple
- * fix.
- */
-
- local_irq_save(flags); /* disable preempt till we know what lock. */
- rdp = RCU_DATA_ME();
- spin_lock(&rdp->lock);
- *rdp->nexttail = list;
- if (list)
- rdp->nexttail = tail;
- *rdp->nextschedtail = schedlist;
- if (schedlist)
- rdp->nextschedtail = schedtail;
- spin_unlock_irqrestore(&rdp->lock, flags);
-}
-
-#else /* #ifdef CONFIG_HOTPLUG_CPU */
-
-void rcu_offline_cpu(int cpu)
-{
-}
-
-#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
-
-void __cpuinit rcu_online_cpu(int cpu)
-{
- unsigned long flags;
- struct rcu_data *rdp;
-
- spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
- cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map));
- spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
-
- /*
- * The rcu_sched grace-period processing might have bypassed
- * this CPU, given that it was not in the rcu_cpu_online_map
- * when the grace-period scan started. This means that the
- * grace-period task might sleep. So make sure that if this
- * should happen, the first callback posted to this CPU will
- * wake up the grace-period task if need be.
- */
-
- rdp = RCU_DATA_CPU(cpu);
- spin_lock_irqsave(&rdp->lock, flags);
- rdp->rcu_sched_sleeping = 1;
- spin_unlock_irqrestore(&rdp->lock, flags);
-}
-
-static void rcu_process_callbacks(struct softirq_action *unused)
-{
- unsigned long flags;
- struct rcu_head *next, *list;
- struct rcu_data *rdp;
-
- local_irq_save(flags);
- rdp = RCU_DATA_ME();
- spin_lock(&rdp->lock);
- list = rdp->donelist;
- if (list == NULL) {
- spin_unlock_irqrestore(&rdp->lock, flags);
- return;
- }
- rdp->donelist = NULL;
- rdp->donetail = &rdp->donelist;
- RCU_TRACE_RDP(rcupreempt_trace_done_remove, rdp);
- spin_unlock_irqrestore(&rdp->lock, flags);
- while (list) {
- next = list->next;
- list->func(list);
- list = next;
- RCU_TRACE_ME(rcupreempt_trace_invoke);
- }
-}
-
-void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
-{
- unsigned long flags;
- struct rcu_data *rdp;
-
- head->func = func;
- head->next = NULL;
- local_irq_save(flags);
- rdp = RCU_DATA_ME();
- spin_lock(&rdp->lock);
- __rcu_advance_callbacks(rdp);
- *rdp->nexttail = head;
- rdp->nexttail = &head->next;
- RCU_TRACE_RDP(rcupreempt_trace_next_add, rdp);
- spin_unlock_irqrestore(&rdp->lock, flags);
-}
-EXPORT_SYMBOL_GPL(call_rcu);
-
-void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
-{
- unsigned long flags;
- struct rcu_data *rdp;
- int wake_gp = 0;
-
- head->func = func;
- head->next = NULL;
- local_irq_save(flags);
- rdp = RCU_DATA_ME();
- spin_lock(&rdp->lock);
- *rdp->nextschedtail = head;
- rdp->nextschedtail = &head->next;
- if (rdp->rcu_sched_sleeping) {
-
- /* Grace-period processing might be sleeping... */
-
- rdp->rcu_sched_sleeping = 0;
- wake_gp = 1;
- }
- spin_unlock_irqrestore(&rdp->lock, flags);
- if (wake_gp) {
-
- /* Wake up grace-period processing, unless someone beat us. */
-
- spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
- if (rcu_ctrlblk.sched_sleep != rcu_sched_sleeping)
- wake_gp = 0;
- rcu_ctrlblk.sched_sleep = rcu_sched_not_sleeping;
- spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
- if (wake_gp)
- wake_up_interruptible(&rcu_ctrlblk.sched_wq);
- }
-}
-EXPORT_SYMBOL_GPL(call_rcu_sched);
-
-/*
- * Wait until all currently running preempt_disable() code segments
- * (including hardware-irq-disable segments) complete. Note that
- * in -rt this does -not- necessarily result in all currently executing
- * interrupt -handlers- having completed.
- */
-void __synchronize_sched(void)
-{
- struct rcu_synchronize rcu;
-
- if (num_online_cpus() == 1)
- return; /* blocking is gp if only one CPU! */
-
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished. */
- call_rcu_sched(&rcu.head, wakeme_after_rcu);
- /* Wait for it. */
- wait_for_completion(&rcu.completion);
-}
-EXPORT_SYMBOL_GPL(__synchronize_sched);
-
-/*
- * kthread function that manages call_rcu_sched grace periods.
- */
-static int rcu_sched_grace_period(void *arg)
-{
- int couldsleep; /* might sleep after current pass. */
- int couldsleepnext = 0; /* might sleep after next pass. */
- int cpu;
- unsigned long flags;
- struct rcu_data *rdp;
- int ret;
-
- /*
- * Each pass through the following loop handles one
- * rcu_sched grace period cycle.
- */
- do {
- /* Save each CPU's current state. */
-
- for_each_online_cpu(cpu) {
- dyntick_save_progress_counter_sched(cpu);
- save_qsctr_sched(cpu);
- }
-
- /*
- * Sleep for about an RCU grace-period's worth to
- * allow better batching and to consume less CPU.
- */
- schedule_timeout_interruptible(RCU_SCHED_BATCH_TIME);
-
- /*
- * If there was nothing to do last time, prepare to
- * sleep at the end of the current grace period cycle.
- */
- couldsleep = couldsleepnext;
- couldsleepnext = 1;
- if (couldsleep) {
- spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
- rcu_ctrlblk.sched_sleep = rcu_sched_sleep_prep;
- spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
- }
-
- /*
- * Wait on each CPU in turn to have either visited
- * a quiescent state or been in dynticks-idle mode.
- */
- for_each_online_cpu(cpu) {
- while (rcu_qsctr_inc_needed(cpu) &&
- rcu_qsctr_inc_needed_dyntick(cpu)) {
- /* resched_cpu(cpu); @@@ */
- schedule_timeout_interruptible(1);
- }
- }
-
- /* Advance callbacks for each CPU. */
-
- for_each_online_cpu(cpu) {
-
- rdp = RCU_DATA_CPU(cpu);
- spin_lock_irqsave(&rdp->lock, flags);
-
- /*
- * We are running on this CPU irq-disabled, so no
- * CPU can go offline until we re-enable irqs.
- * The current CPU might have already gone
- * offline (between the for_each_offline_cpu and
- * the spin_lock_irqsave), but in that case all its
- * callback lists will be empty, so no harm done.
- *
- * Advance the callbacks! We share normal RCU's
- * donelist, since callbacks are invoked the
- * same way in either case.
- */
- if (rdp->waitschedlist != NULL) {
- *rdp->donetail = rdp->waitschedlist;
- rdp->donetail = rdp->waitschedtail;
-
- /*
- * Next rcu_check_callbacks() will
- * do the required raise_softirq().
- */
- }
- if (rdp->nextschedlist != NULL) {
- rdp->waitschedlist = rdp->nextschedlist;
- rdp->waitschedtail = rdp->nextschedtail;
- couldsleep = 0;
- couldsleepnext = 0;
- } else {
- rdp->waitschedlist = NULL;
- rdp->waitschedtail = &rdp->waitschedlist;
- }
- rdp->nextschedlist = NULL;
- rdp->nextschedtail = &rdp->nextschedlist;
-
- /* Mark sleep intention. */
-
- rdp->rcu_sched_sleeping = couldsleep;
-
- spin_unlock_irqrestore(&rdp->lock, flags);
- }
-
- /* If we saw callbacks on the last scan, go deal with them. */
-
- if (!couldsleep)
- continue;
-
- /* Attempt to block... */
-
- spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
- if (rcu_ctrlblk.sched_sleep != rcu_sched_sleep_prep) {
-
- /*
- * Someone posted a callback after we scanned.
- * Go take care of it.
- */
- spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
- couldsleepnext = 0;
- continue;
- }
-
- /* Block until the next person posts a callback. */
-
- rcu_ctrlblk.sched_sleep = rcu_sched_sleeping;
- spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
- ret = 0; /* unused */
- __wait_event_interruptible(rcu_ctrlblk.sched_wq,
- rcu_ctrlblk.sched_sleep != rcu_sched_sleeping,
- ret);
-
- couldsleepnext = 0;
-
- } while (!kthread_should_stop());
-
- return (0);
-}
-
-/*
- * Check to see if any future RCU-related work will need to be done
- * by the current CPU, even if none need be done immediately, returning
- * 1 if so. Assumes that notifiers would take care of handling any
- * outstanding requests from the RCU core.
- *
- * This function is part of the RCU implementation; it is -not-
- * an exported member of the RCU API.
- */
-int rcu_needs_cpu(int cpu)
-{
- struct rcu_data *rdp = RCU_DATA_CPU(cpu);
-
- return (rdp->donelist != NULL ||
- !!rdp->waitlistcount ||
- rdp->nextlist != NULL ||
- rdp->nextschedlist != NULL ||
- rdp->waitschedlist != NULL);
-}
-
-int rcu_pending(int cpu)
-{
- struct rcu_data *rdp = RCU_DATA_CPU(cpu);
-
- /* The CPU has at least one callback queued somewhere. */
-
- if (rdp->donelist != NULL ||
- !!rdp->waitlistcount ||
- rdp->nextlist != NULL ||
- rdp->nextschedlist != NULL ||
- rdp->waitschedlist != NULL)
- return 1;
-
- /* The RCU core needs an acknowledgement from this CPU. */
-
- if ((per_cpu(rcu_flip_flag, cpu) == rcu_flipped) ||
- (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed))
- return 1;
-
- /* This CPU has fallen behind the global grace-period number. */
-
- if (rdp->completed != rcu_ctrlblk.completed)
- return 1;
-
- /* Nothing needed from this CPU. */
-
- return 0;
-}
-
-static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- long cpu = (long)hcpu;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- rcu_online_cpu(cpu);
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- rcu_offline_cpu(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata rcu_nb = {
- .notifier_call = rcu_cpu_notify,
-};
-
-void __init __rcu_init(void)
-{
- int cpu;
- int i;
- struct rcu_data *rdp;
-
- printk(KERN_NOTICE "Preemptible RCU implementation.\n");
- for_each_possible_cpu(cpu) {
- rdp = RCU_DATA_CPU(cpu);
- spin_lock_init(&rdp->lock);
- rdp->completed = 0;
- rdp->waitlistcount = 0;
- rdp->nextlist = NULL;
- rdp->nexttail = &rdp->nextlist;
- for (i = 0; i < GP_STAGES; i++) {
- rdp->waitlist[i] = NULL;
- rdp->waittail[i] = &rdp->waitlist[i];
- }
- rdp->donelist = NULL;
- rdp->donetail = &rdp->donelist;
- rdp->rcu_flipctr[0] = 0;
- rdp->rcu_flipctr[1] = 0;
- rdp->nextschedlist = NULL;
- rdp->nextschedtail = &rdp->nextschedlist;
- rdp->waitschedlist = NULL;
- rdp->waitschedtail = &rdp->waitschedlist;
- rdp->rcu_sched_sleeping = 0;
- }
- register_cpu_notifier(&rcu_nb);
-
- /*
- * We don't need protection against CPU-Hotplug here
- * since
- * a) If a CPU comes online while we are iterating over the
- * cpu_online_mask below, we would only end up making a
- * duplicate call to rcu_online_cpu() which sets the corresponding
- * CPU's mask in the rcu_cpu_online_map.
- *
- * b) A CPU cannot go offline at this point in time since the user
- * does not have access to the sysfs interface, nor do we
- * suspend the system.
- */
- for_each_online_cpu(cpu)
- rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long) cpu);
-
- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
-}
-
-/*
- * Late-boot-time RCU initialization that must wait until after scheduler
- * has been initialized.
- */
-void __init rcu_init_sched(void)
-{
- rcu_sched_grace_period_task = kthread_run(rcu_sched_grace_period,
- NULL,
- "rcu_sched_grace_period");
- WARN_ON(IS_ERR(rcu_sched_grace_period_task));
-}
-
-#ifdef CONFIG_RCU_TRACE
-long *rcupreempt_flipctr(int cpu)
-{
- return &RCU_DATA_CPU(cpu)->rcu_flipctr[0];
-}
-EXPORT_SYMBOL_GPL(rcupreempt_flipctr);
-
-int rcupreempt_flip_flag(int cpu)
-{
- return per_cpu(rcu_flip_flag, cpu);
-}
-EXPORT_SYMBOL_GPL(rcupreempt_flip_flag);
-
-int rcupreempt_mb_flag(int cpu)
-{
- return per_cpu(rcu_mb_flag, cpu);
-}
-EXPORT_SYMBOL_GPL(rcupreempt_mb_flag);
-
-char *rcupreempt_try_flip_state_name(void)
-{
- return rcu_try_flip_state_names[rcu_ctrlblk.rcu_try_flip_state];
-}
-EXPORT_SYMBOL_GPL(rcupreempt_try_flip_state_name);
-
-struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu)
-{
- struct rcu_data *rdp = RCU_DATA_CPU(cpu);
-
- return &rdp->trace;
-}
-EXPORT_SYMBOL_GPL(rcupreempt_trace_cpu);
-
-#endif /* #ifdef RCU_TRACE */
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c
deleted file mode 100644
index 7c2665cac172..000000000000
--- a/kernel/rcupreempt_trace.c
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * Read-Copy Update tracing for realtime implementation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright IBM Corporation, 2006
- *
- * Papers: http://www.rdrop.com/users/paulmck/RCU
- *
- * For detailed explanation of Read-Copy Update mechanism see -
- * Documentation/RCU/ *.txt
- *
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/rcupdate.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <asm/atomic.h>
-#include <linux/bitops.h>
-#include <linux/module.h>
-#include <linux/completion.h>
-#include <linux/moduleparam.h>
-#include <linux/percpu.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
-#include <linux/mutex.h>
-#include <linux/rcupreempt_trace.h>
-#include <linux/debugfs.h>
-
-static struct mutex rcupreempt_trace_mutex;
-static char *rcupreempt_trace_buf;
-#define RCUPREEMPT_TRACE_BUF_SIZE 4096
-
-void rcupreempt_trace_move2done(struct rcupreempt_trace *trace)
-{
- trace->done_length += trace->wait_length;
- trace->done_add += trace->wait_length;
- trace->wait_length = 0;
-}
-void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace)
-{
- trace->wait_length += trace->next_length;
- trace->wait_add += trace->next_length;
- trace->next_length = 0;
-}
-void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace)
-{
- atomic_inc(&trace->rcu_try_flip_1);
-}
-void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace)
-{
- atomic_inc(&trace->rcu_try_flip_e1);
-}
-void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace)
-{
- trace->rcu_try_flip_i1++;
-}
-void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace)
-{
- trace->rcu_try_flip_ie1++;
-}
-void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace)
-{
- trace->rcu_try_flip_g1++;
-}
-void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace)
-{
- trace->rcu_try_flip_a1++;
-}
-void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace)
-{
- trace->rcu_try_flip_ae1++;
-}
-void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace)
-{
- trace->rcu_try_flip_a2++;
-}
-void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace)
-{
- trace->rcu_try_flip_z1++;
-}
-void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace)
-{
- trace->rcu_try_flip_ze1++;
-}
-void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace)
-{
- trace->rcu_try_flip_z2++;
-}
-void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace)
-{
- trace->rcu_try_flip_m1++;
-}
-void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace)
-{
- trace->rcu_try_flip_me1++;
-}
-void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace)
-{
- trace->rcu_try_flip_m2++;
-}
-void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace)
-{
- trace->rcu_check_callbacks++;
-}
-void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace)
-{
- trace->done_remove += trace->done_length;
- trace->done_length = 0;
-}
-void rcupreempt_trace_invoke(struct rcupreempt_trace *trace)
-{
- atomic_inc(&trace->done_invoked);
-}
-void rcupreempt_trace_next_add(struct rcupreempt_trace *trace)
-{
- trace->next_add++;
- trace->next_length++;
-}
-
-static void rcupreempt_trace_sum(struct rcupreempt_trace *sp)
-{
- struct rcupreempt_trace *cp;
- int cpu;
-
- memset(sp, 0, sizeof(*sp));
- for_each_possible_cpu(cpu) {
- cp = rcupreempt_trace_cpu(cpu);
- sp->next_length += cp->next_length;
- sp->next_add += cp->next_add;
- sp->wait_length += cp->wait_length;
- sp->wait_add += cp->wait_add;
- sp->done_length += cp->done_length;
- sp->done_add += cp->done_add;
- sp->done_remove += cp->done_remove;
- atomic_add(atomic_read(&cp->done_invoked), &sp->done_invoked);
- sp->rcu_check_callbacks += cp->rcu_check_callbacks;
- atomic_add(atomic_read(&cp->rcu_try_flip_1),
- &sp->rcu_try_flip_1);
- atomic_add(atomic_read(&cp->rcu_try_flip_e1),
- &sp->rcu_try_flip_e1);
- sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1;
- sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1;
- sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1;
- sp->rcu_try_flip_a1 += cp->rcu_try_flip_a1;
- sp->rcu_try_flip_ae1 += cp->rcu_try_flip_ae1;
- sp->rcu_try_flip_a2 += cp->rcu_try_flip_a2;
- sp->rcu_try_flip_z1 += cp->rcu_try_flip_z1;
- sp->rcu_try_flip_ze1 += cp->rcu_try_flip_ze1;
- sp->rcu_try_flip_z2 += cp->rcu_try_flip_z2;
- sp->rcu_try_flip_m1 += cp->rcu_try_flip_m1;
- sp->rcu_try_flip_me1 += cp->rcu_try_flip_me1;
- sp->rcu_try_flip_m2 += cp->rcu_try_flip_m2;
- }
-}
-
-static ssize_t rcustats_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct rcupreempt_trace trace;
- ssize_t bcount;
- int cnt = 0;
-
- rcupreempt_trace_sum(&trace);
- mutex_lock(&rcupreempt_trace_mutex);
- snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE - cnt,
- "ggp=%ld rcc=%ld\n",
- rcu_batches_completed(),
- trace.rcu_check_callbacks);
- snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE - cnt,
- "na=%ld nl=%ld wa=%ld wl=%ld da=%ld dl=%ld dr=%ld di=%d\n"
- "1=%d e1=%d i1=%ld ie1=%ld g1=%ld a1=%ld ae1=%ld a2=%ld\n"
- "z1=%ld ze1=%ld z2=%ld m1=%ld me1=%ld m2=%ld\n",
-
- trace.next_add, trace.next_length,
- trace.wait_add, trace.wait_length,
- trace.done_add, trace.done_length,
- trace.done_remove, atomic_read(&trace.done_invoked),
- atomic_read(&trace.rcu_try_flip_1),
- atomic_read(&trace.rcu_try_flip_e1),
- trace.rcu_try_flip_i1, trace.rcu_try_flip_ie1,
- trace.rcu_try_flip_g1,
- trace.rcu_try_flip_a1, trace.rcu_try_flip_ae1,
- trace.rcu_try_flip_a2,
- trace.rcu_try_flip_z1, trace.rcu_try_flip_ze1,
- trace.rcu_try_flip_z2,
- trace.rcu_try_flip_m1, trace.rcu_try_flip_me1,
- trace.rcu_try_flip_m2);
- bcount = simple_read_from_buffer(buffer, count, ppos,
- rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
- mutex_unlock(&rcupreempt_trace_mutex);
- return bcount;
-}
-
-static ssize_t rcugp_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- long oldgp = rcu_batches_completed();
- ssize_t bcount;
-
- mutex_lock(&rcupreempt_trace_mutex);
- synchronize_rcu();
- snprintf(rcupreempt_trace_buf, RCUPREEMPT_TRACE_BUF_SIZE,
- "oldggp=%ld newggp=%ld\n", oldgp, rcu_batches_completed());
- bcount = simple_read_from_buffer(buffer, count, ppos,
- rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
- mutex_unlock(&rcupreempt_trace_mutex);
- return bcount;
-}
-
-static ssize_t rcuctrs_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- int cnt = 0;
- int cpu;
- int f = rcu_batches_completed() & 0x1;
- ssize_t bcount;
-
- mutex_lock(&rcupreempt_trace_mutex);
-
- cnt += snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE,
- "CPU last cur F M\n");
- for_each_online_cpu(cpu) {
- long *flipctr = rcupreempt_flipctr(cpu);
- cnt += snprintf(&rcupreempt_trace_buf[cnt],
- RCUPREEMPT_TRACE_BUF_SIZE - cnt,
- "%3d %4ld %3ld %d %d\n",
- cpu,
- flipctr[!f],
- flipctr[f],
- rcupreempt_flip_flag(cpu),
- rcupreempt_mb_flag(cpu));
- }
- cnt += snprintf(&rcupreempt_trace_buf[cnt],
- RCUPREEMPT_TRACE_BUF_SIZE - cnt,
- "ggp = %ld, state = %s\n",
- rcu_batches_completed(),
- rcupreempt_try_flip_state_name());
- cnt += snprintf(&rcupreempt_trace_buf[cnt],
- RCUPREEMPT_TRACE_BUF_SIZE - cnt,
- "\n");
- bcount = simple_read_from_buffer(buffer, count, ppos,
- rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
- mutex_unlock(&rcupreempt_trace_mutex);
- return bcount;
-}
-
-static struct file_operations rcustats_fops = {
- .owner = THIS_MODULE,
- .read = rcustats_read,
-};
-
-static struct file_operations rcugp_fops = {
- .owner = THIS_MODULE,
- .read = rcugp_read,
-};
-
-static struct file_operations rcuctrs_fops = {
- .owner = THIS_MODULE,
- .read = rcuctrs_read,
-};
-
-static struct dentry *rcudir, *statdir, *ctrsdir, *gpdir;
-static int rcupreempt_debugfs_init(void)
-{
- rcudir = debugfs_create_dir("rcu", NULL);
- if (!rcudir)
- goto out;
- statdir = debugfs_create_file("rcustats", 0444, rcudir,
- NULL, &rcustats_fops);
- if (!statdir)
- goto free_out;
-
- gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
- if (!gpdir)
- goto free_out;
-
- ctrsdir = debugfs_create_file("rcuctrs", 0444, rcudir,
- NULL, &rcuctrs_fops);
- if (!ctrsdir)
- goto free_out;
- return 0;
-free_out:
- if (statdir)
- debugfs_remove(statdir);
- if (gpdir)
- debugfs_remove(gpdir);
- debugfs_remove(rcudir);
-out:
- return 1;
-}
-
-static int __init rcupreempt_trace_init(void)
-{
- int ret;
-
- mutex_init(&rcupreempt_trace_mutex);
- rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL);
- if (!rcupreempt_trace_buf)
- return 1;
- ret = rcupreempt_debugfs_init();
- if (ret)
- kfree(rcupreempt_trace_buf);
- return ret;
-}
-
-static void __exit rcupreempt_trace_cleanup(void)
-{
- debugfs_remove(statdir);
- debugfs_remove(gpdir);
- debugfs_remove(ctrsdir);
- debugfs_remove(rcudir);
- kfree(rcupreempt_trace_buf);
-}
-
-
-module_init(rcupreempt_trace_init);
-module_exit(rcupreempt_trace_cleanup);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 9b4a975a4b4a..b33db539a8ad 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -257,14 +257,14 @@ struct rcu_torture_ops {
void (*init)(void);
void (*cleanup)(void);
int (*readlock)(void);
- void (*readdelay)(struct rcu_random_state *rrsp);
+ void (*read_delay)(struct rcu_random_state *rrsp);
void (*readunlock)(int idx);
int (*completed)(void);
- void (*deferredfree)(struct rcu_torture *p);
+ void (*deferred_free)(struct rcu_torture *p);
void (*sync)(void);
void (*cb_barrier)(void);
int (*stats)(char *page);
- int irqcapable;
+ int irq_capable;
char *name;
};
static struct rcu_torture_ops *cur_ops = NULL;
@@ -320,7 +320,7 @@ rcu_torture_cb(struct rcu_head *p)
rp->rtort_mbtest = 0;
rcu_torture_free(rp);
} else
- cur_ops->deferredfree(rp);
+ cur_ops->deferred_free(rp);
}
static void rcu_torture_deferred_free(struct rcu_torture *p)
@@ -329,18 +329,18 @@ static void rcu_torture_deferred_free(struct rcu_torture *p)
}
static struct rcu_torture_ops rcu_ops = {
- .init = NULL,
- .cleanup = NULL,
- .readlock = rcu_torture_read_lock,
- .readdelay = rcu_read_delay,
- .readunlock = rcu_torture_read_unlock,
- .completed = rcu_torture_completed,
- .deferredfree = rcu_torture_deferred_free,
- .sync = synchronize_rcu,
- .cb_barrier = rcu_barrier,
- .stats = NULL,
- .irqcapable = 1,
- .name = "rcu"
+ .init = NULL,
+ .cleanup = NULL,
+ .readlock = rcu_torture_read_lock,
+ .read_delay = rcu_read_delay,
+ .readunlock = rcu_torture_read_unlock,
+ .completed = rcu_torture_completed,
+ .deferred_free = rcu_torture_deferred_free,
+ .sync = synchronize_rcu,
+ .cb_barrier = rcu_barrier,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "rcu"
};
static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
@@ -370,18 +370,18 @@ static void rcu_sync_torture_init(void)
}
static struct rcu_torture_ops rcu_sync_ops = {
- .init = rcu_sync_torture_init,
- .cleanup = NULL,
- .readlock = rcu_torture_read_lock,
- .readdelay = rcu_read_delay,
- .readunlock = rcu_torture_read_unlock,
- .completed = rcu_torture_completed,
- .deferredfree = rcu_sync_torture_deferred_free,
- .sync = synchronize_rcu,
- .cb_barrier = NULL,
- .stats = NULL,
- .irqcapable = 1,
- .name = "rcu_sync"
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = rcu_torture_read_lock,
+ .read_delay = rcu_read_delay,
+ .readunlock = rcu_torture_read_unlock,
+ .completed = rcu_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = synchronize_rcu,
+ .cb_barrier = NULL,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "rcu_sync"
};
/*
@@ -432,33 +432,33 @@ static void rcu_bh_torture_synchronize(void)
}
static struct rcu_torture_ops rcu_bh_ops = {
- .init = NULL,
- .cleanup = NULL,
- .readlock = rcu_bh_torture_read_lock,
- .readdelay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_bh_torture_read_unlock,
- .completed = rcu_bh_torture_completed,
- .deferredfree = rcu_bh_torture_deferred_free,
- .sync = rcu_bh_torture_synchronize,
- .cb_barrier = rcu_barrier_bh,
- .stats = NULL,
- .irqcapable = 1,
- .name = "rcu_bh"
+ .init = NULL,
+ .cleanup = NULL,
+ .readlock = rcu_bh_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = rcu_bh_torture_read_unlock,
+ .completed = rcu_bh_torture_completed,
+ .deferred_free = rcu_bh_torture_deferred_free,
+ .sync = rcu_bh_torture_synchronize,
+ .cb_barrier = rcu_barrier_bh,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "rcu_bh"
};
static struct rcu_torture_ops rcu_bh_sync_ops = {
- .init = rcu_sync_torture_init,
- .cleanup = NULL,
- .readlock = rcu_bh_torture_read_lock,
- .readdelay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_bh_torture_read_unlock,
- .completed = rcu_bh_torture_completed,
- .deferredfree = rcu_sync_torture_deferred_free,
- .sync = rcu_bh_torture_synchronize,
- .cb_barrier = NULL,
- .stats = NULL,
- .irqcapable = 1,
- .name = "rcu_bh_sync"
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = rcu_bh_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = rcu_bh_torture_read_unlock,
+ .completed = rcu_bh_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = rcu_bh_torture_synchronize,
+ .cb_barrier = NULL,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "rcu_bh_sync"
};
/*
@@ -530,17 +530,17 @@ static int srcu_torture_stats(char *page)
}
static struct rcu_torture_ops srcu_ops = {
- .init = srcu_torture_init,
- .cleanup = srcu_torture_cleanup,
- .readlock = srcu_torture_read_lock,
- .readdelay = srcu_read_delay,
- .readunlock = srcu_torture_read_unlock,
- .completed = srcu_torture_completed,
- .deferredfree = rcu_sync_torture_deferred_free,
- .sync = srcu_torture_synchronize,
- .cb_barrier = NULL,
- .stats = srcu_torture_stats,
- .name = "srcu"
+ .init = srcu_torture_init,
+ .cleanup = srcu_torture_cleanup,
+ .readlock = srcu_torture_read_lock,
+ .read_delay = srcu_read_delay,
+ .readunlock = srcu_torture_read_unlock,
+ .completed = srcu_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = srcu_torture_synchronize,
+ .cb_barrier = NULL,
+ .stats = srcu_torture_stats,
+ .name = "srcu"
};
/*
@@ -574,32 +574,49 @@ static void sched_torture_synchronize(void)
}
static struct rcu_torture_ops sched_ops = {
- .init = rcu_sync_torture_init,
- .cleanup = NULL,
- .readlock = sched_torture_read_lock,
- .readdelay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = sched_torture_read_unlock,
- .completed = sched_torture_completed,
- .deferredfree = rcu_sched_torture_deferred_free,
- .sync = sched_torture_synchronize,
- .cb_barrier = rcu_barrier_sched,
- .stats = NULL,
- .irqcapable = 1,
- .name = "sched"
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = sched_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = sched_torture_read_unlock,
+ .completed = sched_torture_completed,
+ .deferred_free = rcu_sched_torture_deferred_free,
+ .sync = sched_torture_synchronize,
+ .cb_barrier = rcu_barrier_sched,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "sched"
};
static struct rcu_torture_ops sched_ops_sync = {
- .init = rcu_sync_torture_init,
- .cleanup = NULL,
- .readlock = sched_torture_read_lock,
- .readdelay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = sched_torture_read_unlock,
- .completed = sched_torture_completed,
- .deferredfree = rcu_sync_torture_deferred_free,
- .sync = sched_torture_synchronize,
- .cb_barrier = NULL,
- .stats = NULL,
- .name = "sched_sync"
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = sched_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = sched_torture_read_unlock,
+ .completed = sched_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = sched_torture_synchronize,
+ .cb_barrier = NULL,
+ .stats = NULL,
+ .name = "sched_sync"
+};
+
+extern int rcu_expedited_torture_stats(char *page);
+
+static struct rcu_torture_ops sched_expedited_ops = {
+ .init = rcu_sync_torture_init,
+ .cleanup = NULL,
+ .readlock = sched_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = sched_torture_read_unlock,
+ .completed = sched_torture_completed,
+ .deferred_free = rcu_sync_torture_deferred_free,
+ .sync = synchronize_sched_expedited,
+ .cb_barrier = NULL,
+ .stats = rcu_expedited_torture_stats,
+ .irq_capable = 1,
+ .name = "sched_expedited"
};
/*
@@ -635,7 +652,7 @@ rcu_torture_writer(void *arg)
i = RCU_TORTURE_PIPE_LEN;
atomic_inc(&rcu_torture_wcount[i]);
old_rp->rtort_pipe_count++;
- cur_ops->deferredfree(old_rp);
+ cur_ops->deferred_free(old_rp);
}
rcu_torture_current_version++;
oldbatch = cur_ops->completed();
@@ -700,7 +717,7 @@ static void rcu_torture_timer(unsigned long unused)
if (p->rtort_mbtest == 0)
atomic_inc(&n_rcu_torture_mberror);
spin_lock(&rand_lock);
- cur_ops->readdelay(&rand);
+ cur_ops->read_delay(&rand);
n_rcu_torture_timers++;
spin_unlock(&rand_lock);
preempt_disable();
@@ -738,11 +755,11 @@ rcu_torture_reader(void *arg)
VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
set_user_nice(current, 19);
- if (irqreader && cur_ops->irqcapable)
+ if (irqreader && cur_ops->irq_capable)
setup_timer_on_stack(&t, rcu_torture_timer, 0);
do {
- if (irqreader && cur_ops->irqcapable) {
+ if (irqreader && cur_ops->irq_capable) {
if (!timer_pending(&t))
mod_timer(&t, 1);
}
@@ -757,7 +774,7 @@ rcu_torture_reader(void *arg)
}
if (p->rtort_mbtest == 0)
atomic_inc(&n_rcu_torture_mberror);
- cur_ops->readdelay(&rand);
+ cur_ops->read_delay(&rand);
preempt_disable();
pipe_count = p->rtort_pipe_count;
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
@@ -778,7 +795,7 @@ rcu_torture_reader(void *arg)
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
rcutorture_shutdown_absorb("rcu_torture_reader");
- if (irqreader && cur_ops->irqcapable)
+ if (irqreader && cur_ops->irq_capable)
del_timer_sync(&t);
while (!kthread_should_stop())
schedule_timeout_uninterruptible(1);
@@ -1078,6 +1095,7 @@ rcu_torture_init(void)
int firsterr = 0;
static struct rcu_torture_ops *torture_ops[] =
{ &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
+ &sched_expedited_ops,
&srcu_ops, &sched_ops, &sched_ops_sync, };
mutex_lock(&fullstop_mutex);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 7717b95c2027..6b11b07cfe7f 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -35,6 +35,7 @@
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/nmi.h>
#include <asm/atomic.h>
#include <linux/bitops.h>
#include <linux/module.h>
@@ -46,6 +47,8 @@
#include <linux/mutex.h>
#include <linux/time.h>
+#include "rcutree.h"
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_key;
struct lockdep_map rcu_lock_map =
@@ -72,30 +75,59 @@ EXPORT_SYMBOL_GPL(rcu_lock_map);
.n_force_qs_ngp = 0, \
}
-struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state);
-DEFINE_PER_CPU(struct rcu_data, rcu_data);
+struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state);
+DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
+extern long rcu_batches_completed_sched(void);
+static struct rcu_node *rcu_get_root(struct rcu_state *rsp);
+static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp,
+ struct rcu_node *rnp, unsigned long flags);
+static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags);
+#ifdef CONFIG_HOTPLUG_CPU
+static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp);
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+static void __rcu_process_callbacks(struct rcu_state *rsp,
+ struct rcu_data *rdp);
+static void __call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu),
+ struct rcu_state *rsp);
+static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp);
+static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
+ int preemptable);
+
+#include "rcutree_plugin.h"
+
/*
- * Increment the quiescent state counter.
- * The counter is a bit degenerated: We do not need to know
+ * Note a quiescent state. Because we do not need to know
* how many quiescent states passed, just if there was at least
- * one since the start of the grace period. Thus just a flag.
+ * one since the start of the grace period, this just sets a flag.
*/
-void rcu_qsctr_inc(int cpu)
+void rcu_sched_qs(int cpu)
{
- struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ unsigned long flags;
+ struct rcu_data *rdp;
+
+ local_irq_save(flags);
+ rdp = &per_cpu(rcu_sched_data, cpu);
rdp->passed_quiesc = 1;
rdp->passed_quiesc_completed = rdp->completed;
+ rcu_preempt_qs(cpu);
+ local_irq_restore(flags);
}
-void rcu_bh_qsctr_inc(int cpu)
+void rcu_bh_qs(int cpu)
{
- struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
+ unsigned long flags;
+ struct rcu_data *rdp;
+
+ local_irq_save(flags);
+ rdp = &per_cpu(rcu_bh_data, cpu);
rdp->passed_quiesc = 1;
rdp->passed_quiesc_completed = rdp->completed;
+ local_irq_restore(flags);
}
#ifdef CONFIG_NO_HZ
@@ -110,15 +142,16 @@ static int qhimark = 10000; /* If this many pending, ignore blimit. */
static int qlowmark = 100; /* Once only this many pending, use blimit. */
static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
+static int rcu_pending(int cpu);
/*
- * Return the number of RCU batches processed thus far for debug & stats.
+ * Return the number of RCU-sched batches processed thus far for debug & stats.
*/
-long rcu_batches_completed(void)
+long rcu_batches_completed_sched(void)
{
- return rcu_state.completed;
+ return rcu_sched_state.completed;
}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
+EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
/*
* Return the number of RCU BH batches processed thus far for debug & stats.
@@ -181,6 +214,10 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
return 1;
}
+ /* If preemptable RCU, no point in sending reschedule IPI. */
+ if (rdp->preemptable)
+ return 0;
+
/* The CPU is online, so send it a reschedule IPI. */
if (rdp->cpu != smp_processor_id())
smp_send_reschedule(rdp->cpu);
@@ -193,7 +230,6 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
#endif /* #ifdef CONFIG_SMP */
#ifdef CONFIG_NO_HZ
-static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5);
/**
* rcu_enter_nohz - inform RCU that current CPU is entering nohz
@@ -213,7 +249,7 @@ void rcu_enter_nohz(void)
rdtp = &__get_cpu_var(rcu_dynticks);
rdtp->dynticks++;
rdtp->dynticks_nesting--;
- WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
+ WARN_ON_ONCE(rdtp->dynticks & 0x1);
local_irq_restore(flags);
}
@@ -232,7 +268,7 @@ void rcu_exit_nohz(void)
rdtp = &__get_cpu_var(rcu_dynticks);
rdtp->dynticks++;
rdtp->dynticks_nesting++;
- WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
+ WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
local_irq_restore(flags);
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
}
@@ -251,7 +287,7 @@ void rcu_nmi_enter(void)
if (rdtp->dynticks & 0x1)
return;
rdtp->dynticks_nmi++;
- WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs);
+ WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
}
@@ -270,7 +306,7 @@ void rcu_nmi_exit(void)
return;
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
rdtp->dynticks_nmi++;
- WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs);
+ WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
}
/**
@@ -286,7 +322,7 @@ void rcu_irq_enter(void)
if (rdtp->dynticks_nesting++)
return;
rdtp->dynticks++;
- WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
+ WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
}
@@ -305,10 +341,10 @@ void rcu_irq_exit(void)
return;
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
rdtp->dynticks++;
- WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
+ WARN_ON_ONCE(rdtp->dynticks & 0x1);
/* If the interrupt queued a callback, get out of dyntick mode. */
- if (__get_cpu_var(rcu_data).nxtlist ||
+ if (__get_cpu_var(rcu_sched_data).nxtlist ||
__get_cpu_var(rcu_bh_data).nxtlist)
set_need_resched();
}
@@ -461,6 +497,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
printk(KERN_ERR "INFO: RCU detected CPU stalls:");
for (; rnp_cur < rnp_end; rnp_cur++) {
+ rcu_print_task_stall(rnp);
if (rnp_cur->qsmask == 0)
continue;
for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++)
@@ -469,6 +506,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
}
printk(" (detected by %d, t=%ld jiffies)\n",
smp_processor_id(), (long)(jiffies - rsp->gp_start));
+ trigger_all_cpu_backtrace();
+
force_quiescent_state(rsp, 0); /* Kick them all. */
}
@@ -479,12 +518,14 @@ static void print_cpu_stall(struct rcu_state *rsp)
printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
smp_processor_id(), jiffies - rsp->gp_start);
- dump_stack();
+ trigger_all_cpu_backtrace();
+
spin_lock_irqsave(&rnp->lock, flags);
if ((long)(jiffies - rsp->jiffies_stall) >= 0)
rsp->jiffies_stall =
jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
spin_unlock_irqrestore(&rnp->lock, flags);
+
set_need_resched(); /* kick ourselves to get things going. */
}
@@ -674,6 +715,19 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
}
/*
+ * Clean up after the prior grace period and let rcu_start_gp() start up
+ * the next grace period if one is needed. Note that the caller must
+ * hold rnp->lock, as required by rcu_start_gp(), which will release it.
+ */
+static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
+ __releases(rnp->lock)
+{
+ rsp->completed = rsp->gpnum;
+ rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
+ rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
+}
+
+/*
* Similar to cpu_quiet(), for which it is a helper function. Allows
* a group of CPUs to be quieted at one go, though all the CPUs in the
* group must be represented by the same leaf rcu_node structure.
@@ -694,7 +748,7 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
return;
}
rnp->qsmask &= ~mask;
- if (rnp->qsmask != 0) {
+ if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
/* Other bits still set at this level, so done. */
spin_unlock_irqrestore(&rnp->lock, flags);
@@ -714,14 +768,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
/*
* Get here if we are the last CPU to pass through a quiescent
- * state for this grace period. Clean up and let rcu_start_gp()
- * start up the next grace period if one is needed. Note that
- * we still hold rnp->lock, as required by rcu_start_gp(), which
- * will release it.
+ * state for this grace period. Invoke cpu_quiet_msk_finish()
+ * to clean up and start the next grace period if one is needed.
*/
- rsp->completed = rsp->gpnum;
- rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
- rcu_start_gp(rsp, flags); /* releases rnp->lock. */
+ cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */
}
/*
@@ -828,11 +878,12 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
spin_lock(&rnp->lock); /* irqs already disabled. */
rnp->qsmaskinit &= ~mask;
if (rnp->qsmaskinit != 0) {
- spin_unlock(&rnp->lock); /* irqs already disabled. */
+ spin_unlock(&rnp->lock); /* irqs remain disabled. */
break;
}
+ rcu_preempt_offline_tasks(rsp, rnp);
mask = rnp->grpmask;
- spin_unlock(&rnp->lock); /* irqs already disabled. */
+ spin_unlock(&rnp->lock); /* irqs remain disabled. */
rnp = rnp->parent;
} while (rnp != NULL);
lastcomp = rsp->completed;
@@ -845,7 +896,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
/*
* Move callbacks from the outgoing CPU to the running CPU.
* Note that the outgoing CPU is now quiscent, so it is now
- * (uncharacteristically) safe to access it rcu_data structure.
+ * (uncharacteristically) safe to access its rcu_data structure.
* Note also that we must carefully retain the order of the
* outgoing CPU's callbacks in order for rcu_barrier() to work
* correctly. Finally, note that we start all the callbacks
@@ -876,8 +927,9 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
*/
static void rcu_offline_cpu(int cpu)
{
- __rcu_offline_cpu(cpu, &rcu_state);
+ __rcu_offline_cpu(cpu, &rcu_sched_state);
__rcu_offline_cpu(cpu, &rcu_bh_state);
+ rcu_preempt_offline_cpu(cpu);
}
#else /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -963,6 +1015,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
*/
void rcu_check_callbacks(int cpu, int user)
{
+ if (!rcu_pending(cpu))
+ return; /* if nothing for RCU to do. */
if (user ||
(idle_cpu(cpu) && rcu_scheduler_active &&
!in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
@@ -971,17 +1025,16 @@ void rcu_check_callbacks(int cpu, int user)
* Get here if this CPU took its interrupt from user
* mode or from the idle loop, and if this is not a
* nested interrupt. In this case, the CPU is in
- * a quiescent state, so count it.
+ * a quiescent state, so note it.
*
* No memory barrier is required here because both
- * rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference
- * only CPU-local variables that other CPUs neither
- * access nor modify, at least not while the corresponding
- * CPU is online.
+ * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
+ * variables that other CPUs neither access nor modify,
+ * at least not while the corresponding CPU is online.
*/
- rcu_qsctr_inc(cpu);
- rcu_bh_qsctr_inc(cpu);
+ rcu_sched_qs(cpu);
+ rcu_bh_qs(cpu);
} else if (!in_softirq()) {
@@ -989,11 +1042,12 @@ void rcu_check_callbacks(int cpu, int user)
* Get here if this CPU did not take its interrupt from
* softirq, in other words, if it is not interrupting
* a rcu_bh read-side critical section. This is an _bh
- * critical section, so count it.
+ * critical section, so note it.
*/
- rcu_bh_qsctr_inc(cpu);
+ rcu_bh_qs(cpu);
}
+ rcu_preempt_check_callbacks(cpu);
raise_softirq(RCU_SOFTIRQ);
}
@@ -1132,6 +1186,8 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
unsigned long flags;
+ WARN_ON_ONCE(rdp->beenonline == 0);
+
/*
* If an RCU GP has gone long enough, go check for dyntick
* idle CPUs and, if needed, send resched IPIs.
@@ -1170,8 +1226,10 @@ static void rcu_process_callbacks(struct softirq_action *unused)
*/
smp_mb(); /* See above block comment. */
- __rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data));
+ __rcu_process_callbacks(&rcu_sched_state,
+ &__get_cpu_var(rcu_sched_data));
__rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
+ rcu_preempt_process_callbacks();
/*
* Memory references from any later RCU read-side critical sections
@@ -1227,13 +1285,13 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
}
/*
- * Queue an RCU callback for invocation after a grace period.
+ * Queue an RCU-sched callback for invocation after a grace period.
*/
-void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
- __call_rcu(head, func, &rcu_state);
+ __call_rcu(head, func, &rcu_sched_state);
}
-EXPORT_SYMBOL_GPL(call_rcu);
+EXPORT_SYMBOL_GPL(call_rcu_sched);
/*
* Queue an RCU for invocation after a quicker grace period.
@@ -1305,10 +1363,11 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
* by the current CPU, returning 1 if so. This function is part of the
* RCU implementation; it is -not- an exported member of the RCU API.
*/
-int rcu_pending(int cpu)
+static int rcu_pending(int cpu)
{
- return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) ||
- __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu));
+ return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
+ __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
+ rcu_preempt_pending(cpu);
}
/*
@@ -1320,27 +1379,46 @@ int rcu_pending(int cpu)
int rcu_needs_cpu(int cpu)
{
/* RCU callbacks either ready or pending? */
- return per_cpu(rcu_data, cpu).nxtlist ||
- per_cpu(rcu_bh_data, cpu).nxtlist;
+ return per_cpu(rcu_sched_data, cpu).nxtlist ||
+ per_cpu(rcu_bh_data, cpu).nxtlist ||
+ rcu_preempt_needs_cpu(cpu);
}
/*
- * Initialize a CPU's per-CPU RCU data. We take this "scorched earth"
- * approach so that we don't have to worry about how long the CPU has
- * been gone, or whether it ever was online previously. We do trust the
- * ->mynode field, as it is constant for a given struct rcu_data and
- * initialized during early boot.
- *
- * Note that only one online or offline event can be happening at a given
- * time. Note also that we can accept some slop in the rsp->completed
- * access due to the fact that this CPU cannot possibly have any RCU
- * callbacks in flight yet.
+ * Do boot-time initialization of a CPU's per-CPU RCU data.
*/
-static void __cpuinit
-rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
+static void __init
+rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
{
unsigned long flags;
int i;
+ struct rcu_data *rdp = rsp->rda[cpu];
+ struct rcu_node *rnp = rcu_get_root(rsp);
+
+ /* Set up local state, ensuring consistent view of global state. */
+ spin_lock_irqsave(&rnp->lock, flags);
+ rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
+ rdp->nxtlist = NULL;
+ for (i = 0; i < RCU_NEXT_SIZE; i++)
+ rdp->nxttail[i] = &rdp->nxtlist;
+ rdp->qlen = 0;
+#ifdef CONFIG_NO_HZ
+ rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
+#endif /* #ifdef CONFIG_NO_HZ */
+ rdp->cpu = cpu;
+ spin_unlock_irqrestore(&rnp->lock, flags);
+}
+
+/*
+ * Initialize a CPU's per-CPU RCU data. Note that only one online or
+ * offline event can be happening at a given time. Note also that we
+ * can accept some slop in the rsp->completed access due to the fact
+ * that this CPU cannot possibly have any RCU callbacks in flight yet.
+ */
+static void __cpuinit
+rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
+{
+ unsigned long flags;
long lastcomp;
unsigned long mask;
struct rcu_data *rdp = rsp->rda[cpu];
@@ -1354,17 +1432,9 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->passed_quiesc = 0; /* We could be racing with new GP, */
rdp->qs_pending = 1; /* so set up to respond to current GP. */
rdp->beenonline = 1; /* We have now been online. */
+ rdp->preemptable = preemptable;
rdp->passed_quiesc_completed = lastcomp - 1;
- rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
- rdp->nxtlist = NULL;
- for (i = 0; i < RCU_NEXT_SIZE; i++)
- rdp->nxttail[i] = &rdp->nxtlist;
- rdp->qlen = 0;
rdp->blimit = blimit;
-#ifdef CONFIG_NO_HZ
- rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
-#endif /* #ifdef CONFIG_NO_HZ */
- rdp->cpu = cpu;
spin_unlock(&rnp->lock); /* irqs remain disabled. */
/*
@@ -1405,16 +1475,16 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
static void __cpuinit rcu_online_cpu(int cpu)
{
- rcu_init_percpu_data(cpu, &rcu_state);
- rcu_init_percpu_data(cpu, &rcu_bh_state);
- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+ rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
+ rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
+ rcu_preempt_init_percpu_data(cpu);
}
/*
- * Handle CPU online/offline notifcation events.
+ * Handle CPU online/offline notification events.
*/
-static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+int __cpuinit rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -1486,6 +1556,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
rnp = rsp->level[i];
for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
spin_lock_init(&rnp->lock);
+ rnp->gpnum = 0;
rnp->qsmask = 0;
rnp->qsmaskinit = 0;
rnp->grplo = j * cpustride;
@@ -1503,16 +1574,20 @@ static void __init rcu_init_one(struct rcu_state *rsp)
j / rsp->levelspread[i - 1];
}
rnp->level = i;
+ INIT_LIST_HEAD(&rnp->blocked_tasks[0]);
+ INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
}
}
}
/*
- * Helper macro for __rcu_init(). To be used nowhere else!
- * Assigns leaf node pointers into each CPU's rcu_data structure.
+ * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used
+ * nowhere else! Assigns leaf node pointers into each CPU's rcu_data
+ * structure.
*/
-#define RCU_DATA_PTR_INIT(rsp, rcu_data) \
+#define RCU_INIT_FLAVOR(rsp, rcu_data) \
do { \
+ rcu_init_one(rsp); \
rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
j = 0; \
for_each_possible_cpu(i) { \
@@ -1520,32 +1595,43 @@ do { \
j++; \
per_cpu(rcu_data, i).mynode = &rnp[j]; \
(rsp)->rda[i] = &per_cpu(rcu_data, i); \
+ rcu_boot_init_percpu_data(i, rsp); \
} \
} while (0)
-static struct notifier_block __cpuinitdata rcu_nb = {
- .notifier_call = rcu_cpu_notify,
-};
+#ifdef CONFIG_TREE_PREEMPT_RCU
+
+void __init __rcu_init_preempt(void)
+{
+ int i; /* All used by RCU_INIT_FLAVOR(). */
+ int j;
+ struct rcu_node *rnp;
+
+ RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
+}
+
+#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
+
+void __init __rcu_init_preempt(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
void __init __rcu_init(void)
{
- int i; /* All used by RCU_DATA_PTR_INIT(). */
+ int i; /* All used by RCU_INIT_FLAVOR(). */
int j;
struct rcu_node *rnp;
- printk(KERN_INFO "Hierarchical RCU implementation.\n");
+ rcu_bootup_announce();
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
- rcu_init_one(&rcu_state);
- RCU_DATA_PTR_INIT(&rcu_state, rcu_data);
- rcu_init_one(&rcu_bh_state);
- RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data);
-
- for_each_online_cpu(i)
- rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i);
- /* Register notifier for non-boot CPUs */
- register_cpu_notifier(&rcu_nb);
+ RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
+ RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
+ __rcu_init_preempt();
+ open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
}
module_param(blimit, int, 0);
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 5e872bbf07f5..bf8a6f9f134d 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -1,10 +1,259 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion (tree-based version)
+ * Internal non-public definitions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2008
+ *
+ * Author: Ingo Molnar <mingo@elte.hu>
+ * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/seqlock.h>
+
+/*
+ * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
+ * In theory, it should be possible to add more levels straightforwardly.
+ * In practice, this has not been tested, so there is probably some
+ * bug somewhere.
+ */
+#define MAX_RCU_LVLS 3
+#define RCU_FANOUT (CONFIG_RCU_FANOUT)
+#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
+#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
+
+#if NR_CPUS <= RCU_FANOUT
+# define NUM_RCU_LVLS 1
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 (NR_CPUS)
+# define NUM_RCU_LVL_2 0
+# define NUM_RCU_LVL_3 0
+#elif NR_CPUS <= RCU_FANOUT_SQ
+# define NUM_RCU_LVLS 2
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
+# define NUM_RCU_LVL_2 (NR_CPUS)
+# define NUM_RCU_LVL_3 0
+#elif NR_CPUS <= RCU_FANOUT_CUBE
+# define NUM_RCU_LVLS 3
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
+# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
+# define NUM_RCU_LVL_3 NR_CPUS
+#else
+# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
+#endif /* #if (NR_CPUS) <= RCU_FANOUT */
+
+#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
+#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
+
+/*
+ * Dynticks per-CPU state.
+ */
+struct rcu_dynticks {
+ int dynticks_nesting; /* Track nesting level, sort of. */
+ int dynticks; /* Even value for dynticks-idle, else odd. */
+ int dynticks_nmi; /* Even value for either dynticks-idle or */
+ /* not in nmi handler, else odd. So this */
+ /* remains even for nmi from irq handler. */
+};
+
+/*
+ * Definition for node within the RCU grace-period-detection hierarchy.
+ */
+struct rcu_node {
+ spinlock_t lock;
+ long gpnum; /* Current grace period for this node. */
+ /* This will either be equal to or one */
+ /* behind the root rcu_node's gpnum. */
+ unsigned long qsmask; /* CPUs or groups that need to switch in */
+ /* order for current grace period to proceed.*/
+ unsigned long qsmaskinit;
+ /* Per-GP initialization for qsmask. */
+ unsigned long grpmask; /* Mask to apply to parent qsmask. */
+ int grplo; /* lowest-numbered CPU or group here. */
+ int grphi; /* highest-numbered CPU or group here. */
+ u8 grpnum; /* CPU/group number for next level up. */
+ u8 level; /* root is at level 0. */
+ struct rcu_node *parent;
+ struct list_head blocked_tasks[2];
+ /* Tasks blocked in RCU read-side critsect. */
+} ____cacheline_internodealigned_in_smp;
+
+/* Index values for nxttail array in struct rcu_data. */
+#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
+#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
+#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
+#define RCU_NEXT_TAIL 3
+#define RCU_NEXT_SIZE 4
+
+/* Per-CPU data for read-copy update. */
+struct rcu_data {
+ /* 1) quiescent-state and grace-period handling : */
+ long completed; /* Track rsp->completed gp number */
+ /* in order to detect GP end. */
+ long gpnum; /* Highest gp number that this CPU */
+ /* is aware of having started. */
+ long passed_quiesc_completed;
+ /* Value of completed at time of qs. */
+ bool passed_quiesc; /* User-mode/idle loop etc. */
+ bool qs_pending; /* Core waits for quiesc state. */
+ bool beenonline; /* CPU online at least once. */
+ bool preemptable; /* Preemptable RCU? */
+ struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
+ unsigned long grpmask; /* Mask to apply to leaf qsmask. */
+
+ /* 2) batch handling */
+ /*
+ * If nxtlist is not NULL, it is partitioned as follows.
+ * Any of the partitions might be empty, in which case the
+ * pointer to that partition will be equal to the pointer for
+ * the following partition. When the list is empty, all of
+ * the nxttail elements point to nxtlist, which is NULL.
+ *
+ * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
+ * Entries that might have arrived after current GP ended
+ * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
+ * Entries known to have arrived before current GP ended
+ * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
+ * Entries that batch # <= ->completed - 1: waiting for current GP
+ * [nxtlist, *nxttail[RCU_DONE_TAIL]):
+ * Entries that batch # <= ->completed
+ * The grace period for these entries has completed, and
+ * the other grace-period-completed entries may be moved
+ * here temporarily in rcu_process_callbacks().
+ */
+ struct rcu_head *nxtlist;
+ struct rcu_head **nxttail[RCU_NEXT_SIZE];
+ long qlen; /* # of queued callbacks */
+ long blimit; /* Upper limit on a processed batch */
+
+#ifdef CONFIG_NO_HZ
+ /* 3) dynticks interface. */
+ struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
+ int dynticks_snap; /* Per-GP tracking for dynticks. */
+ int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
+#endif /* #ifdef CONFIG_NO_HZ */
+
+ /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
+#ifdef CONFIG_NO_HZ
+ unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
+#endif /* #ifdef CONFIG_NO_HZ */
+ unsigned long offline_fqs; /* Kicked due to being offline. */
+ unsigned long resched_ipi; /* Sent a resched IPI. */
+
+ /* 5) __rcu_pending() statistics. */
+ long n_rcu_pending; /* rcu_pending() calls since boot. */
+ long n_rp_qs_pending;
+ long n_rp_cb_ready;
+ long n_rp_cpu_needs_gp;
+ long n_rp_gp_completed;
+ long n_rp_gp_started;
+ long n_rp_need_fqs;
+ long n_rp_need_nothing;
+
+ int cpu;
+};
+
+/* Values for signaled field in struct rcu_state. */
+#define RCU_GP_INIT 0 /* Grace period being initialized. */
+#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
+#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
+#ifdef CONFIG_NO_HZ
+#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
+#else /* #ifdef CONFIG_NO_HZ */
+#define RCU_SIGNAL_INIT RCU_FORCE_QS
+#endif /* #else #ifdef CONFIG_NO_HZ */
+
+#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */
+#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */
+#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
+ /* to take at least one */
+ /* scheduling clock irq */
+ /* before ratting on them. */
+
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+/*
+ * RCU global state, including node hierarchy. This hierarchy is
+ * represented in "heap" form in a dense array. The root (first level)
+ * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
+ * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
+ * and the third level in ->node[m+1] and following (->node[m+1] referenced
+ * by ->level[2]). The number of levels is determined by the number of
+ * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
+ * consisting of a single rcu_node.
+ */
+struct rcu_state {
+ struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
+ struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
+ u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
+ u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
+ struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
+
+ /* The following fields are guarded by the root rcu_node's lock. */
+
+ u8 signaled ____cacheline_internodealigned_in_smp;
+ /* Force QS state. */
+ long gpnum; /* Current gp number. */
+ long completed; /* # of last completed gp. */
+ spinlock_t onofflock; /* exclude on/offline and */
+ /* starting new GP. */
+ spinlock_t fqslock; /* Only one task forcing */
+ /* quiescent states. */
+ unsigned long jiffies_force_qs; /* Time at which to invoke */
+ /* force_quiescent_state(). */
+ unsigned long n_force_qs; /* Number of calls to */
+ /* force_quiescent_state(). */
+ unsigned long n_force_qs_lh; /* ~Number of calls leaving */
+ /* due to lock unavailable. */
+ unsigned long n_force_qs_ngp; /* Number of calls leaving */
+ /* due to no GP active. */
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+ unsigned long gp_start; /* Time at which GP started, */
+ /* but in jiffies. */
+ unsigned long jiffies_stall; /* Time at which to check */
+ /* for CPU stalls. */
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+#ifdef CONFIG_NO_HZ
+ long dynticks_completed; /* Value of completed @ snap. */
+#endif /* #ifdef CONFIG_NO_HZ */
+};
+
+#ifdef RCU_TREE_NONCORE
/*
* RCU implementation internal declarations:
*/
-extern struct rcu_state rcu_state;
-DECLARE_PER_CPU(struct rcu_data, rcu_data);
+extern struct rcu_state rcu_sched_state;
+DECLARE_PER_CPU(struct rcu_data, rcu_sched_data);
extern struct rcu_state rcu_bh_state;
DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
+#ifdef CONFIG_TREE_PREEMPT_RCU
+extern struct rcu_state rcu_preempt_state;
+DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
+#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
+
+#endif /* #ifdef RCU_TREE_NONCORE */
+
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
new file mode 100644
index 000000000000..47789369ea59
--- /dev/null
+++ b/kernel/rcutree_plugin.h
@@ -0,0 +1,532 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion (tree-based version)
+ * Internal non-public definitions that provide either classic
+ * or preemptable semantics.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright Red Hat, 2009
+ * Copyright IBM Corporation, 2009
+ *
+ * Author: Ingo Molnar <mingo@elte.hu>
+ * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+
+#ifdef CONFIG_TREE_PREEMPT_RCU
+
+struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
+DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
+
+/*
+ * Tell them what RCU they are running.
+ */
+static inline void rcu_bootup_announce(void)
+{
+ printk(KERN_INFO
+ "Experimental preemptable hierarchical RCU implementation.\n");
+}
+
+/*
+ * Return the number of RCU-preempt batches processed thus far
+ * for debug and statistics.
+ */
+long rcu_batches_completed_preempt(void)
+{
+ return rcu_preempt_state.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
+
+/*
+ * Return the number of RCU batches processed thus far for debug & stats.
+ */
+long rcu_batches_completed(void)
+{
+ return rcu_batches_completed_preempt();
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+
+/*
+ * Record a preemptable-RCU quiescent state for the specified CPU. Note
+ * that this just means that the task currently running on the CPU is
+ * not in a quiescent state. There might be any number of tasks blocked
+ * while in an RCU read-side critical section.
+ */
+static void rcu_preempt_qs_record(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
+ rdp->passed_quiesc = 1;
+ rdp->passed_quiesc_completed = rdp->completed;
+}
+
+/*
+ * We have entered the scheduler or are between softirqs in ksoftirqd.
+ * If we are in an RCU read-side critical section, we need to reflect
+ * that in the state of the rcu_node structure corresponding to this CPU.
+ * Caller must disable hardirqs.
+ */
+static void rcu_preempt_qs(int cpu)
+{
+ struct task_struct *t = current;
+ int phase;
+ struct rcu_data *rdp;
+ struct rcu_node *rnp;
+
+ if (t->rcu_read_lock_nesting &&
+ (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
+
+ /* Possibly blocking in an RCU read-side critical section. */
+ rdp = rcu_preempt_state.rda[cpu];
+ rnp = rdp->mynode;
+ spin_lock(&rnp->lock);
+ t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
+ t->rcu_blocked_node = rnp;
+
+ /*
+ * If this CPU has already checked in, then this task
+ * will hold up the next grace period rather than the
+ * current grace period. Queue the task accordingly.
+ * If the task is queued for the current grace period
+ * (i.e., this CPU has not yet passed through a quiescent
+ * state for the current grace period), then as long
+ * as that task remains queued, the current grace period
+ * cannot end.
+ */
+ phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1);
+ list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
+ smp_mb(); /* Ensure later ctxt swtch seen after above. */
+ spin_unlock(&rnp->lock);
+ }
+
+ /*
+ * Either we were not in an RCU read-side critical section to
+ * begin with, or we have now recorded that critical section
+ * globally. Either way, we can now note a quiescent state
+ * for this CPU. Again, if we were in an RCU read-side critical
+ * section, and if that critical section was blocking the current
+ * grace period, then the fact that the task has been enqueued
+ * means that we continue to block the current grace period.
+ */
+ rcu_preempt_qs_record(cpu);
+ t->rcu_read_unlock_special &= ~(RCU_READ_UNLOCK_NEED_QS |
+ RCU_READ_UNLOCK_GOT_QS);
+}
+
+/*
+ * Tree-preemptable RCU implementation for rcu_read_lock().
+ * Just increment ->rcu_read_lock_nesting, shared state will be updated
+ * if we block.
+ */
+void __rcu_read_lock(void)
+{
+ ACCESS_ONCE(current->rcu_read_lock_nesting)++;
+ barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
+}
+EXPORT_SYMBOL_GPL(__rcu_read_lock);
+
+static void rcu_read_unlock_special(struct task_struct *t)
+{
+ int empty;
+ unsigned long flags;
+ unsigned long mask;
+ struct rcu_node *rnp;
+ int special;
+
+ /* NMI handlers cannot block and cannot safely manipulate state. */
+ if (in_nmi())
+ return;
+
+ local_irq_save(flags);
+
+ /*
+ * If RCU core is waiting for this CPU to exit critical section,
+ * let it know that we have done so.
+ */
+ special = t->rcu_read_unlock_special;
+ if (special & RCU_READ_UNLOCK_NEED_QS) {
+ t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
+ t->rcu_read_unlock_special |= RCU_READ_UNLOCK_GOT_QS;
+ }
+
+ /* Hardware IRQ handlers cannot block. */
+ if (in_irq()) {
+ local_irq_restore(flags);
+ return;
+ }
+
+ /* Clean up if blocked during RCU read-side critical section. */
+ if (special & RCU_READ_UNLOCK_BLOCKED) {
+ t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
+
+ /*
+ * Remove this task from the list it blocked on. The
+ * task can migrate while we acquire the lock, but at
+ * most one time. So at most two passes through loop.
+ */
+ for (;;) {
+ rnp = t->rcu_blocked_node;
+ spin_lock(&rnp->lock);
+ if (rnp == t->rcu_blocked_node)
+ break;
+ spin_unlock(&rnp->lock);
+ }
+ empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
+ list_del_init(&t->rcu_node_entry);
+ t->rcu_blocked_node = NULL;
+
+ /*
+ * If this was the last task on the current list, and if
+ * we aren't waiting on any CPUs, report the quiescent state.
+ * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk()
+ * drop rnp->lock and restore irq.
+ */
+ if (!empty && rnp->qsmask == 0 &&
+ list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) {
+ t->rcu_read_unlock_special &=
+ ~(RCU_READ_UNLOCK_NEED_QS |
+ RCU_READ_UNLOCK_GOT_QS);
+ if (rnp->parent == NULL) {
+ /* Only one rcu_node in the tree. */
+ cpu_quiet_msk_finish(&rcu_preempt_state, flags);
+ return;
+ }
+ /* Report up the rest of the hierarchy. */
+ mask = rnp->grpmask;
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ rnp = rnp->parent;
+ spin_lock_irqsave(&rnp->lock, flags);
+ cpu_quiet_msk(mask, &rcu_preempt_state, rnp, flags);
+ return;
+ }
+ spin_unlock(&rnp->lock);
+ }
+ local_irq_restore(flags);
+}
+
+/*
+ * Tree-preemptable RCU implementation for rcu_read_unlock().
+ * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
+ * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
+ * invoke rcu_read_unlock_special() to clean up after a context switch
+ * in an RCU read-side critical section and other special cases.
+ */
+void __rcu_read_unlock(void)
+{
+ struct task_struct *t = current;
+
+ barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
+ if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
+ unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+ rcu_read_unlock_special(t);
+}
+EXPORT_SYMBOL_GPL(__rcu_read_unlock);
+
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+
+/*
+ * Scan the current list of tasks blocked within RCU read-side critical
+ * sections, printing out the tid of each.
+ */
+static void rcu_print_task_stall(struct rcu_node *rnp)
+{
+ unsigned long flags;
+ struct list_head *lp;
+ int phase = rnp->gpnum & 0x1;
+ struct task_struct *t;
+
+ if (!list_empty(&rnp->blocked_tasks[phase])) {
+ spin_lock_irqsave(&rnp->lock, flags);
+ phase = rnp->gpnum & 0x1; /* re-read under lock. */
+ lp = &rnp->blocked_tasks[phase];
+ list_for_each_entry(t, lp, rcu_node_entry)
+ printk(" P%d", t->pid);
+ spin_unlock_irqrestore(&rnp->lock, flags);
+ }
+}
+
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+/*
+ * Check for preempted RCU readers for the specified rcu_node structure.
+ * If the caller needs a reliable answer, it must hold the rcu_node's
+ * >lock.
+ */
+static int rcu_preempted_readers(struct rcu_node *rnp)
+{
+ return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Handle tasklist migration for case in which all CPUs covered by the
+ * specified rcu_node have gone offline. Move them up to the root
+ * rcu_node. The reason for not just moving them to the immediate
+ * parent is to remove the need for rcu_read_unlock_special() to
+ * make more than two attempts to acquire the target rcu_node's lock.
+ *
+ * The caller must hold rnp->lock with irqs disabled.
+ */
+static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
+ struct rcu_node *rnp)
+{
+ int i;
+ struct list_head *lp;
+ struct list_head *lp_root;
+ struct rcu_node *rnp_root = rcu_get_root(rsp);
+ struct task_struct *tp;
+
+ if (rnp == rnp_root) {
+ WARN_ONCE(1, "Last CPU thought to be offlined?");
+ return; /* Shouldn't happen: at least one CPU online. */
+ }
+
+ /*
+ * Move tasks up to root rcu_node. Rely on the fact that the
+ * root rcu_node can be at most one ahead of the rest of the
+ * rcu_nodes in terms of gp_num value. This fact allows us to
+ * move the blocked_tasks[] array directly, element by element.
+ */
+ for (i = 0; i < 2; i++) {
+ lp = &rnp->blocked_tasks[i];
+ lp_root = &rnp_root->blocked_tasks[i];
+ while (!list_empty(lp)) {
+ tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
+ spin_lock(&rnp_root->lock); /* irqs already disabled */
+ list_del(&tp->rcu_node_entry);
+ tp->rcu_blocked_node = rnp_root;
+ list_add(&tp->rcu_node_entry, lp_root);
+ spin_unlock(&rnp_root->lock); /* irqs remain disabled */
+ }
+ }
+}
+
+/*
+ * Do CPU-offline processing for preemptable RCU.
+ */
+static void rcu_preempt_offline_cpu(int cpu)
+{
+ __rcu_offline_cpu(cpu, &rcu_preempt_state);
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
+/*
+ * Check for a quiescent state from the current CPU. When a task blocks,
+ * the task is recorded in the corresponding CPU's rcu_node structure,
+ * which is checked elsewhere.
+ *
+ * Caller must disable hard irqs.
+ */
+static void rcu_preempt_check_callbacks(int cpu)
+{
+ struct task_struct *t = current;
+
+ if (t->rcu_read_lock_nesting == 0) {
+ t->rcu_read_unlock_special &=
+ ~(RCU_READ_UNLOCK_NEED_QS | RCU_READ_UNLOCK_GOT_QS);
+ rcu_preempt_qs_record(cpu);
+ return;
+ }
+ if (per_cpu(rcu_preempt_data, cpu).qs_pending) {
+ if (t->rcu_read_unlock_special & RCU_READ_UNLOCK_GOT_QS) {
+ rcu_preempt_qs_record(cpu);
+ t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_GOT_QS;
+ } else if (!(t->rcu_read_unlock_special &
+ RCU_READ_UNLOCK_NEED_QS)) {
+ t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
+ }
+ }
+}
+
+/*
+ * Process callbacks for preemptable RCU.
+ */
+static void rcu_preempt_process_callbacks(void)
+{
+ __rcu_process_callbacks(&rcu_preempt_state,
+ &__get_cpu_var(rcu_preempt_data));
+}
+
+/*
+ * Queue a preemptable-RCU callback for invocation after a grace period.
+ */
+void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+ __call_rcu(head, func, &rcu_preempt_state);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+/*
+ * Check to see if there is any immediate preemptable-RCU-related work
+ * to be done.
+ */
+static int rcu_preempt_pending(int cpu)
+{
+ return __rcu_pending(&rcu_preempt_state,
+ &per_cpu(rcu_preempt_data, cpu));
+}
+
+/*
+ * Does preemptable RCU need the CPU to stay out of dynticks mode?
+ */
+static int rcu_preempt_needs_cpu(int cpu)
+{
+ return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
+}
+
+/*
+ * Initialize preemptable RCU's per-CPU data.
+ */
+static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
+{
+ rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
+}
+
+/*
+ * Check for a task exiting while in a preemptable-RCU read-side
+ * critical section, clean up if so. No need to issue warnings,
+ * as debug_check_no_locks_held() already does this if lockdep
+ * is enabled.
+ */
+void exit_rcu(void)
+{
+ struct task_struct *t = current;
+
+ if (t->rcu_read_lock_nesting == 0)
+ return;
+ t->rcu_read_lock_nesting = 1;
+ rcu_read_unlock();
+}
+
+#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
+
+/*
+ * Tell them what RCU they are running.
+ */
+static inline void rcu_bootup_announce(void)
+{
+ printk(KERN_INFO "Hierarchical RCU implementation.\n");
+}
+
+/*
+ * Return the number of RCU batches processed thus far for debug & stats.
+ */
+long rcu_batches_completed(void)
+{
+ return rcu_batches_completed_sched();
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+
+/*
+ * Because preemptable RCU does not exist, we never have to check for
+ * CPUs being in quiescent states.
+ */
+static void rcu_preempt_qs(int cpu)
+{
+}
+
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+
+/*
+ * Because preemptable RCU does not exist, we never have to check for
+ * tasks blocked within RCU read-side critical sections.
+ */
+static void rcu_print_task_stall(struct rcu_node *rnp)
+{
+}
+
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+/*
+ * Because preemptable RCU does not exist, there are never any preempted
+ * RCU readers.
+ */
+static int rcu_preempted_readers(struct rcu_node *rnp)
+{
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Because preemptable RCU does not exist, it never needs to migrate
+ * tasks that were blocked within RCU read-side critical sections.
+ */
+static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
+ struct rcu_node *rnp)
+{
+}
+
+/*
+ * Because preemptable RCU does not exist, it never needs CPU-offline
+ * processing.
+ */
+static void rcu_preempt_offline_cpu(int cpu)
+{
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
+/*
+ * Because preemptable RCU does not exist, it never has any callbacks
+ * to check.
+ */
+void rcu_preempt_check_callbacks(int cpu)
+{
+}
+
+/*
+ * Because preemptable RCU does not exist, it never has any callbacks
+ * to process.
+ */
+void rcu_preempt_process_callbacks(void)
+{
+}
+
+/*
+ * In classic RCU, call_rcu() is just call_rcu_sched().
+ */
+void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+ call_rcu_sched(head, func);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+/*
+ * Because preemptable RCU does not exist, it never has any work to do.
+ */
+static int rcu_preempt_pending(int cpu)
+{
+ return 0;
+}
+
+/*
+ * Because preemptable RCU does not exist, it never needs any CPU.
+ */
+static int rcu_preempt_needs_cpu(int cpu)
+{
+ return 0;
+}
+
+/*
+ * Because preemptable RCU does not exist, there is no per-CPU
+ * data to initialize.
+ */
+static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
+{
+}
+
+#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index fe1dcdbf1ca3..0ea1bff69727 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -43,6 +43,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#define RCU_TREE_NONCORE
#include "rcutree.h"
static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
@@ -76,8 +77,12 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
static int show_rcudata(struct seq_file *m, void *unused)
{
- seq_puts(m, "rcu:\n");
- PRINT_RCU_DATA(rcu_data, print_one_rcu_data, m);
+#ifdef CONFIG_TREE_PREEMPT_RCU
+ seq_puts(m, "rcu_preempt:\n");
+ PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data, m);
+#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
+ seq_puts(m, "rcu_sched:\n");
+ PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data, m);
seq_puts(m, "rcu_bh:\n");
PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m);
return 0;
@@ -102,7 +107,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
return;
seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d",
rdp->cpu,
- cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"",
+ cpu_is_offline(rdp->cpu) ? "\"N\"" : "\"Y\"",
rdp->completed, rdp->gpnum,
rdp->passed_quiesc, rdp->passed_quiesc_completed,
rdp->qs_pending);
@@ -124,8 +129,12 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
#endif /* #ifdef CONFIG_NO_HZ */
seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n");
- seq_puts(m, "\"rcu:\"\n");
- PRINT_RCU_DATA(rcu_data, print_one_rcu_data_csv, m);
+#ifdef CONFIG_TREE_PREEMPT_RCU
+ seq_puts(m, "\"rcu_preempt:\"\n");
+ PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
+#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
+ seq_puts(m, "\"rcu_sched:\"\n");
+ PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data_csv, m);
seq_puts(m, "\"rcu_bh:\"\n");
PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m);
return 0;
@@ -171,8 +180,12 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
static int show_rcuhier(struct seq_file *m, void *unused)
{
- seq_puts(m, "rcu:\n");
- print_one_rcu_state(m, &rcu_state);
+#ifdef CONFIG_TREE_PREEMPT_RCU
+ seq_puts(m, "rcu_preempt:\n");
+ print_one_rcu_state(m, &rcu_preempt_state);
+#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
+ seq_puts(m, "rcu_sched:\n");
+ print_one_rcu_state(m, &rcu_sched_state);
seq_puts(m, "rcu_bh:\n");
print_one_rcu_state(m, &rcu_bh_state);
return 0;
@@ -193,8 +206,12 @@ static struct file_operations rcuhier_fops = {
static int show_rcugp(struct seq_file *m, void *unused)
{
- seq_printf(m, "rcu: completed=%ld gpnum=%ld\n",
- rcu_state.completed, rcu_state.gpnum);
+#ifdef CONFIG_TREE_PREEMPT_RCU
+ seq_printf(m, "rcu_preempt: completed=%ld gpnum=%ld\n",
+ rcu_preempt_state.completed, rcu_preempt_state.gpnum);
+#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
+ seq_printf(m, "rcu_sched: completed=%ld gpnum=%ld\n",
+ rcu_sched_state.completed, rcu_sched_state.gpnum);
seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n",
rcu_bh_state.completed, rcu_bh_state.gpnum);
return 0;
@@ -243,8 +260,12 @@ static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp)
static int show_rcu_pending(struct seq_file *m, void *unused)
{
- seq_puts(m, "rcu:\n");
- print_rcu_pendings(m, &rcu_state);
+#ifdef CONFIG_TREE_PREEMPT_RCU
+ seq_puts(m, "rcu_preempt:\n");
+ print_rcu_pendings(m, &rcu_preempt_state);
+#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
+ seq_puts(m, "rcu_sched:\n");
+ print_rcu_pendings(m, &rcu_sched_state);
seq_puts(m, "rcu_bh:\n");
print_rcu_pendings(m, &rcu_bh_state);
return 0;
@@ -264,62 +285,47 @@ static struct file_operations rcu_pending_fops = {
};
static struct dentry *rcudir;
-static struct dentry *datadir;
-static struct dentry *datadir_csv;
-static struct dentry *gpdir;
-static struct dentry *hierdir;
-static struct dentry *rcu_pendingdir;
static int __init rcuclassic_trace_init(void)
{
+ struct dentry *retval;
+
rcudir = debugfs_create_dir("rcu", NULL);
if (!rcudir)
- goto out;
+ goto free_out;
- datadir = debugfs_create_file("rcudata", 0444, rcudir,
+ retval = debugfs_create_file("rcudata", 0444, rcudir,
NULL, &rcudata_fops);
- if (!datadir)
+ if (!retval)
goto free_out;
- datadir_csv = debugfs_create_file("rcudata.csv", 0444, rcudir,
+ retval = debugfs_create_file("rcudata.csv", 0444, rcudir,
NULL, &rcudata_csv_fops);
- if (!datadir_csv)
+ if (!retval)
goto free_out;
- gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
- if (!gpdir)
+ retval = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
+ if (!retval)
goto free_out;
- hierdir = debugfs_create_file("rcuhier", 0444, rcudir,
+ retval = debugfs_create_file("rcuhier", 0444, rcudir,
NULL, &rcuhier_fops);
- if (!hierdir)
+ if (!retval)
goto free_out;
- rcu_pendingdir = debugfs_create_file("rcu_pending", 0444, rcudir,
+ retval = debugfs_create_file("rcu_pending", 0444, rcudir,
NULL, &rcu_pending_fops);
- if (!rcu_pendingdir)
+ if (!retval)
goto free_out;
return 0;
free_out:
- if (datadir)
- debugfs_remove(datadir);
- if (datadir_csv)
- debugfs_remove(datadir_csv);
- if (gpdir)
- debugfs_remove(gpdir);
- debugfs_remove(rcudir);
-out:
+ debugfs_remove_recursive(rcudir);
return 1;
}
static void __exit rcuclassic_trace_cleanup(void)
{
- debugfs_remove(datadir);
- debugfs_remove(datadir_csv);
- debugfs_remove(gpdir);
- debugfs_remove(hierdir);
- debugfs_remove(rcu_pendingdir);
- debugfs_remove(rcudir);
+ debugfs_remove_recursive(rcudir);
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 1b59e265273b..e27a53685ed9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -64,7 +64,6 @@
#include <linux/tsacct_kern.h>
#include <linux/kprobes.h>
#include <linux/delayacct.h>
-#include <linux/reciprocal_div.h>
#include <linux/unistd.h>
#include <linux/pagemap.h>
#include <linux/hrtimer.h>
@@ -120,30 +119,8 @@
*/
#define RUNTIME_INF ((u64)~0ULL)
-#ifdef CONFIG_SMP
-
static void double_rq_lock(struct rq *rq1, struct rq *rq2);
-/*
- * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
- * Since cpu_power is a 'constant', we can use a reciprocal divide.
- */
-static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
-{
- return reciprocal_divide(load, sg->reciprocal_cpu_power);
-}
-
-/*
- * Each time a sched group cpu_power is changed,
- * we must compute its reciprocal value
- */
-static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
-{
- sg->__cpu_power += val;
- sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
-}
-#endif
-
static inline int rt_policy(int policy)
{
if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
@@ -309,8 +286,8 @@ void set_tg_uid(struct user_struct *user)
/*
* Root task group.
- * Every UID task group (including init_task_group aka UID-0) will
- * be a child to this group.
+ * Every UID task group (including init_task_group aka UID-0) will
+ * be a child to this group.
*/
struct task_group root_task_group;
@@ -318,7 +295,7 @@ struct task_group root_task_group;
/* Default task group's sched entity on each cpu */
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
/* Default task group's cfs_rq on each cpu */
-static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(struct cfs_rq, init_tg_cfs_rq) ____cacheline_aligned_in_smp;
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
@@ -616,6 +593,7 @@ struct rq {
unsigned char idle_at_tick;
/* For active balancing */
+ int post_schedule;
int active_balance;
int push_cpu;
/* cpu of this runqueue: */
@@ -626,6 +604,9 @@ struct rq {
struct task_struct *migration_thread;
struct list_head migration_queue;
+
+ u64 rt_avg;
+ u64 age_stamp;
#endif
/* calc_load related fields */
@@ -693,6 +674,7 @@ static inline int cpu_of(struct rq *rq)
#define this_rq() (&__get_cpu_var(runqueues))
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+#define raw_rq() (&__raw_get_cpu_var(runqueues))
inline void update_rq_clock(struct rq *rq)
{
@@ -861,6 +843,14 @@ unsigned int sysctl_sched_shares_ratelimit = 250000;
unsigned int sysctl_sched_shares_thresh = 4;
/*
+ * period over which we average the RT time consumption, measured
+ * in ms.
+ *
+ * default: 1s
+ */
+const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
+
+/*
* period over which we measure -rt task cpu usage in us.
* default: 1s
*/
@@ -1278,12 +1268,37 @@ void wake_up_idle_cpu(int cpu)
}
#endif /* CONFIG_NO_HZ */
+static u64 sched_avg_period(void)
+{
+ return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
+}
+
+static void sched_avg_update(struct rq *rq)
+{
+ s64 period = sched_avg_period();
+
+ while ((s64)(rq->clock - rq->age_stamp) > period) {
+ rq->age_stamp += period;
+ rq->rt_avg /= 2;
+ }
+}
+
+static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
+{
+ rq->rt_avg += rt_delta;
+ sched_avg_update(rq);
+}
+
#else /* !CONFIG_SMP */
static void resched_task(struct task_struct *p)
{
assert_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
+
+static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
+{
+}
#endif /* CONFIG_SMP */
#if BITS_PER_LONG == 32
@@ -1513,28 +1528,35 @@ static unsigned long cpu_avg_load_per_task(int cpu)
#ifdef CONFIG_FAIR_GROUP_SCHED
+struct update_shares_data {
+ unsigned long rq_weight[NR_CPUS];
+};
+
+static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
+
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
/*
* Calculate and set the cpu's group shares.
*/
-static void
-update_group_shares_cpu(struct task_group *tg, int cpu,
- unsigned long sd_shares, unsigned long sd_rq_weight)
+static void update_group_shares_cpu(struct task_group *tg, int cpu,
+ unsigned long sd_shares,
+ unsigned long sd_rq_weight,
+ struct update_shares_data *usd)
{
- unsigned long shares;
- unsigned long rq_weight;
-
- if (!tg->se[cpu])
- return;
+ unsigned long shares, rq_weight;
+ int boost = 0;
- rq_weight = tg->cfs_rq[cpu]->rq_weight;
+ rq_weight = usd->rq_weight[cpu];
+ if (!rq_weight) {
+ boost = 1;
+ rq_weight = NICE_0_LOAD;
+ }
/*
- * \Sum shares * rq_weight
- * shares = -----------------------
- * \Sum rq_weight
- *
+ * \Sum_j shares_j * rq_weight_i
+ * shares_i = -----------------------------
+ * \Sum_j rq_weight_j
*/
shares = (sd_shares * rq_weight) / sd_rq_weight;
shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
@@ -1545,8 +1567,8 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
- tg->cfs_rq[cpu]->shares = shares;
-
+ tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
+ tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
__set_se_shares(tg->se[cpu], shares);
spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -1559,22 +1581,30 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
*/
static int tg_shares_up(struct task_group *tg, void *data)
{
- unsigned long weight, rq_weight = 0;
- unsigned long shares = 0;
+ unsigned long weight, rq_weight = 0, shares = 0;
+ struct update_shares_data *usd;
struct sched_domain *sd = data;
+ unsigned long flags;
int i;
+ if (!tg->se[0])
+ return 0;
+
+ local_irq_save(flags);
+ usd = &__get_cpu_var(update_shares_data);
+
for_each_cpu(i, sched_domain_span(sd)) {
+ weight = tg->cfs_rq[i]->load.weight;
+ usd->rq_weight[i] = weight;
+
/*
* If there are currently no tasks on the cpu pretend there
* is one of average load so that when a new task gets to
* run here it will not get delayed by group starvation.
*/
- weight = tg->cfs_rq[i]->load.weight;
if (!weight)
weight = NICE_0_LOAD;
- tg->cfs_rq[i]->rq_weight = weight;
rq_weight += weight;
shares += tg->cfs_rq[i]->shares;
}
@@ -1586,7 +1616,9 @@ static int tg_shares_up(struct task_group *tg, void *data)
shares = tg->shares;
for_each_cpu(i, sched_domain_span(sd))
- update_group_shares_cpu(tg, i, shares, rq_weight);
+ update_group_shares_cpu(tg, i, shares, rq_weight, usd);
+
+ local_irq_restore(flags);
return 0;
}
@@ -1616,8 +1648,14 @@ static int tg_load_down(struct task_group *tg, void *data)
static void update_shares(struct sched_domain *sd)
{
- u64 now = cpu_clock(raw_smp_processor_id());
- s64 elapsed = now - sd->last_update;
+ s64 elapsed;
+ u64 now;
+
+ if (root_task_group_empty())
+ return;
+
+ now = cpu_clock(raw_smp_processor_id());
+ elapsed = now - sd->last_update;
if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
sd->last_update = now;
@@ -1627,6 +1665,9 @@ static void update_shares(struct sched_domain *sd)
static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
{
+ if (root_task_group_empty())
+ return;
+
spin_unlock(&rq->lock);
update_shares(sd);
spin_lock(&rq->lock);
@@ -1634,6 +1675,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
static void update_h_load(long cpu)
{
+ if (root_task_group_empty())
+ return;
+
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
}
@@ -2268,8 +2312,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
}
/* Adjust by relative CPU power of the group */
- avg_load = sg_div_cpu_power(group,
- avg_load * SCHED_LOAD_SCALE);
+ avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
if (local_group) {
this_load = avg_load;
@@ -2637,9 +2680,32 @@ void sched_fork(struct task_struct *p, int clone_flags)
set_task_cpu(p, cpu);
/*
- * Make sure we do not leak PI boosting priority to the child:
+ * Make sure we do not leak PI boosting priority to the child.
*/
p->prio = current->normal_prio;
+
+ /*
+ * Revert to default priority/policy on fork if requested.
+ */
+ if (unlikely(p->sched_reset_on_fork)) {
+ if (p->policy == SCHED_FIFO || p->policy == SCHED_RR)
+ p->policy = SCHED_NORMAL;
+
+ if (p->normal_prio < DEFAULT_PRIO)
+ p->prio = DEFAULT_PRIO;
+
+ if (PRIO_TO_NICE(p->static_prio) < 0) {
+ p->static_prio = NICE_TO_PRIO(0);
+ set_load_weight(p);
+ }
+
+ /*
+ * We don't need the reset flag anymore after the fork. It has
+ * fulfilled its duty:
+ */
+ p->sched_reset_on_fork = 0;
+ }
+
if (!rt_prio(p->prio))
p->sched_class = &fair_sched_class;
@@ -2796,12 +2862,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
{
struct mm_struct *mm = rq->prev_mm;
long prev_state;
-#ifdef CONFIG_SMP
- int post_schedule = 0;
-
- if (current->sched_class->needs_post_schedule)
- post_schedule = current->sched_class->needs_post_schedule(rq);
-#endif
rq->prev_mm = NULL;
@@ -2820,10 +2880,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
finish_arch_switch(prev);
perf_counter_task_sched_in(current, cpu_of(rq));
finish_lock_switch(rq, prev);
-#ifdef CONFIG_SMP
- if (post_schedule)
- current->sched_class->post_schedule(rq);
-#endif
fire_sched_in_preempt_notifiers(current);
if (mm)
@@ -2838,6 +2894,42 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
}
}
+#ifdef CONFIG_SMP
+
+/* assumes rq->lock is held */
+static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
+{
+ if (prev->sched_class->pre_schedule)
+ prev->sched_class->pre_schedule(rq, prev);
+}
+
+/* rq->lock is NOT held, but preemption is disabled */
+static inline void post_schedule(struct rq *rq)
+{
+ if (rq->post_schedule) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&rq->lock, flags);
+ if (rq->curr->sched_class->post_schedule)
+ rq->curr->sched_class->post_schedule(rq);
+ spin_unlock_irqrestore(&rq->lock, flags);
+
+ rq->post_schedule = 0;
+ }
+}
+
+#else
+
+static inline void pre_schedule(struct rq *rq, struct task_struct *p)
+{
+}
+
+static inline void post_schedule(struct rq *rq)
+{
+}
+
+#endif
+
/**
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
@@ -2848,6 +2940,13 @@ asmlinkage void schedule_tail(struct task_struct *prev)
struct rq *rq = this_rq();
finish_task_switch(rq, prev);
+
+ /*
+ * FIXME: do we need to worry about rq being invalidated by the
+ * task_switch?
+ */
+ post_schedule(rq);
+
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
/* In this case, finish_task_switch does not reenable preemption */
preempt_enable();
@@ -3379,9 +3478,10 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
{
const struct sched_class *class;
- for (class = sched_class_highest; class; class = class->next)
+ for_each_class(class) {
if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
return 1;
+ }
return 0;
}
@@ -3544,7 +3644,7 @@ static inline void update_sd_power_savings_stats(struct sched_group *group,
* capacity but still has some space to pick up some load
* from other group and save more power
*/
- if (sgs->sum_nr_running > sgs->group_capacity - 1)
+ if (sgs->sum_nr_running + 1 > sgs->group_capacity)
return;
if (sgs->sum_nr_running > sds->leader_nr_running ||
@@ -3611,6 +3711,77 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
}
#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
+unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
+{
+ unsigned long weight = cpumask_weight(sched_domain_span(sd));
+ unsigned long smt_gain = sd->smt_gain;
+
+ smt_gain /= weight;
+
+ return smt_gain;
+}
+
+unsigned long scale_rt_power(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ u64 total, available;
+
+ sched_avg_update(rq);
+
+ total = sched_avg_period() + (rq->clock - rq->age_stamp);
+ available = total - rq->rt_avg;
+
+ if (unlikely((s64)total < SCHED_LOAD_SCALE))
+ total = SCHED_LOAD_SCALE;
+
+ total >>= SCHED_LOAD_SHIFT;
+
+ return div_u64(available, total);
+}
+
+static void update_cpu_power(struct sched_domain *sd, int cpu)
+{
+ unsigned long weight = cpumask_weight(sched_domain_span(sd));
+ unsigned long power = SCHED_LOAD_SCALE;
+ struct sched_group *sdg = sd->groups;
+
+ /* here we could scale based on cpufreq */
+
+ if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
+ power *= arch_scale_smt_power(sd, cpu);
+ power >>= SCHED_LOAD_SHIFT;
+ }
+
+ power *= scale_rt_power(cpu);
+ power >>= SCHED_LOAD_SHIFT;
+
+ if (!power)
+ power = 1;
+
+ sdg->cpu_power = power;
+}
+
+static void update_group_power(struct sched_domain *sd, int cpu)
+{
+ struct sched_domain *child = sd->child;
+ struct sched_group *group, *sdg = sd->groups;
+ unsigned long power;
+
+ if (!child) {
+ update_cpu_power(sd, cpu);
+ return;
+ }
+
+ power = 0;
+
+ group = child->groups;
+ do {
+ power += group->cpu_power;
+ group = group->next;
+ } while (group != child->groups);
+
+ sdg->cpu_power = power;
+}
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
@@ -3624,7 +3795,8 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
* @balance: Should we balance.
* @sgs: variable to hold the statistics for this group.
*/
-static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
+static inline void update_sg_lb_stats(struct sched_domain *sd,
+ struct sched_group *group, int this_cpu,
enum cpu_idle_type idle, int load_idx, int *sd_idle,
int local_group, const struct cpumask *cpus,
int *balance, struct sg_lb_stats *sgs)
@@ -3635,8 +3807,11 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
unsigned long sum_avg_load_per_task;
unsigned long avg_load_per_task;
- if (local_group)
+ if (local_group) {
balance_cpu = group_first_cpu(group);
+ if (balance_cpu == this_cpu)
+ update_group_power(sd, this_cpu);
+ }
/* Tally up the load of all CPUs in the group */
sum_avg_load_per_task = avg_load_per_task = 0;
@@ -3685,8 +3860,7 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
}
/* Adjust by relative CPU power of the group */
- sgs->avg_load = sg_div_cpu_power(group,
- sgs->group_load * SCHED_LOAD_SCALE);
+ sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
/*
@@ -3698,14 +3872,14 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
* normalized nr_running number somewhere that negates
* the hierarchy?
*/
- avg_load_per_task = sg_div_cpu_power(group,
- sum_avg_load_per_task * SCHED_LOAD_SCALE);
+ avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
+ group->cpu_power;
if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
sgs->group_imb = 1;
- sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
-
+ sgs->group_capacity =
+ DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
}
/**
@@ -3723,9 +3897,13 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
const struct cpumask *cpus, int *balance,
struct sd_lb_stats *sds)
{
+ struct sched_domain *child = sd->child;
struct sched_group *group = sd->groups;
struct sg_lb_stats sgs;
- int load_idx;
+ int load_idx, prefer_sibling = 0;
+
+ if (child && child->flags & SD_PREFER_SIBLING)
+ prefer_sibling = 1;
init_sd_power_savings_stats(sd, sds, idle);
load_idx = get_sd_load_idx(sd, idle);
@@ -3736,14 +3914,22 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
local_group = cpumask_test_cpu(this_cpu,
sched_group_cpus(group));
memset(&sgs, 0, sizeof(sgs));
- update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
+ update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle,
local_group, cpus, balance, &sgs);
if (local_group && balance && !(*balance))
return;
sds->total_load += sgs.group_load;
- sds->total_pwr += group->__cpu_power;
+ sds->total_pwr += group->cpu_power;
+
+ /*
+ * In case the child domain prefers tasks go to siblings
+ * first, lower the group capacity to one so that we'll try
+ * and move all the excess tasks away.
+ */
+ if (prefer_sibling)
+ sgs.group_capacity = min(sgs.group_capacity, 1UL);
if (local_group) {
sds->this_load = sgs.avg_load;
@@ -3763,7 +3949,6 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
update_sd_power_savings_stats(group, sds, local_group, &sgs);
group = group->next;
} while (group != sd->groups);
-
}
/**
@@ -3801,28 +3986,28 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
* moving them.
*/
- pwr_now += sds->busiest->__cpu_power *
+ pwr_now += sds->busiest->cpu_power *
min(sds->busiest_load_per_task, sds->max_load);
- pwr_now += sds->this->__cpu_power *
+ pwr_now += sds->this->cpu_power *
min(sds->this_load_per_task, sds->this_load);
pwr_now /= SCHED_LOAD_SCALE;
/* Amount of load we'd subtract */
- tmp = sg_div_cpu_power(sds->busiest,
- sds->busiest_load_per_task * SCHED_LOAD_SCALE);
+ tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
+ sds->busiest->cpu_power;
if (sds->max_load > tmp)
- pwr_move += sds->busiest->__cpu_power *
+ pwr_move += sds->busiest->cpu_power *
min(sds->busiest_load_per_task, sds->max_load - tmp);
/* Amount of load we'd add */
- if (sds->max_load * sds->busiest->__cpu_power <
+ if (sds->max_load * sds->busiest->cpu_power <
sds->busiest_load_per_task * SCHED_LOAD_SCALE)
- tmp = sg_div_cpu_power(sds->this,
- sds->max_load * sds->busiest->__cpu_power);
+ tmp = (sds->max_load * sds->busiest->cpu_power) /
+ sds->this->cpu_power;
else
- tmp = sg_div_cpu_power(sds->this,
- sds->busiest_load_per_task * SCHED_LOAD_SCALE);
- pwr_move += sds->this->__cpu_power *
+ tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
+ sds->this->cpu_power;
+ pwr_move += sds->this->cpu_power *
min(sds->this_load_per_task, sds->this_load + tmp);
pwr_move /= SCHED_LOAD_SCALE;
@@ -3857,8 +4042,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
sds->max_load - sds->busiest_load_per_task);
/* How much load to actually move to equalise the imbalance */
- *imbalance = min(max_pull * sds->busiest->__cpu_power,
- (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
+ *imbalance = min(max_pull * sds->busiest->cpu_power,
+ (sds->avg_load - sds->this_load) * sds->this->cpu_power)
/ SCHED_LOAD_SCALE;
/*
@@ -3976,6 +4161,26 @@ ret:
return NULL;
}
+static struct sched_group *group_of(int cpu)
+{
+ struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
+
+ if (!sd)
+ return NULL;
+
+ return sd->groups;
+}
+
+static unsigned long power_of(int cpu)
+{
+ struct sched_group *group = group_of(cpu);
+
+ if (!group)
+ return SCHED_LOAD_SCALE;
+
+ return group->cpu_power;
+}
+
/*
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
@@ -3988,15 +4193,18 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
int i;
for_each_cpu(i, sched_group_cpus(group)) {
+ unsigned long power = power_of(i);
+ unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
unsigned long wl;
if (!cpumask_test_cpu(i, cpus))
continue;
rq = cpu_rq(i);
- wl = weighted_cpuload(i);
+ wl = weighted_cpuload(i) * SCHED_LOAD_SCALE;
+ wl /= power;
- if (rq->nr_running == 1 && wl > imbalance)
+ if (capacity && rq->nr_running == 1 && wl > imbalance)
continue;
if (wl > max_load) {
@@ -5325,7 +5533,7 @@ need_resched:
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
- rcu_qsctr_inc(cpu);
+ rcu_sched_qs(cpu);
prev = rq->curr;
switch_count = &prev->nivcsw;
@@ -5349,10 +5557,7 @@ need_resched_nonpreemptible:
switch_count = &prev->nvcsw;
}
-#ifdef CONFIG_SMP
- if (prev->sched_class->pre_schedule)
- prev->sched_class->pre_schedule(rq, prev);
-#endif
+ pre_schedule(rq, prev);
if (unlikely(!rq->nr_running))
idle_balance(cpu, rq);
@@ -5378,6 +5583,8 @@ need_resched_nonpreemptible:
} else
spin_unlock_irq(&rq->lock);
+ post_schedule(rq);
+
if (unlikely(reacquire_kernel_lock(current) < 0))
goto need_resched_nonpreemptible;
@@ -6123,17 +6330,25 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
unsigned long flags;
const struct sched_class *prev_class = p->sched_class;
struct rq *rq;
+ int reset_on_fork;
/* may grab non-irq protected spin_locks */
BUG_ON(in_interrupt());
recheck:
/* double check policy once rq lock held */
- if (policy < 0)
+ if (policy < 0) {
+ reset_on_fork = p->sched_reset_on_fork;
policy = oldpolicy = p->policy;
- else if (policy != SCHED_FIFO && policy != SCHED_RR &&
- policy != SCHED_NORMAL && policy != SCHED_BATCH &&
- policy != SCHED_IDLE)
- return -EINVAL;
+ } else {
+ reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
+ policy &= ~SCHED_RESET_ON_FORK;
+
+ if (policy != SCHED_FIFO && policy != SCHED_RR &&
+ policy != SCHED_NORMAL && policy != SCHED_BATCH &&
+ policy != SCHED_IDLE)
+ return -EINVAL;
+ }
+
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are
* 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
@@ -6177,6 +6392,10 @@ recheck:
/* can't change other user's priorities */
if (!check_same_owner(p))
return -EPERM;
+
+ /* Normal users shall not reset the sched_reset_on_fork flag */
+ if (p->sched_reset_on_fork && !reset_on_fork)
+ return -EPERM;
}
if (user) {
@@ -6220,6 +6439,8 @@ recheck:
if (running)
p->sched_class->put_prev_task(rq, p);
+ p->sched_reset_on_fork = reset_on_fork;
+
oldprio = p->prio;
__setscheduler(rq, p, policy, param->sched_priority);
@@ -6336,14 +6557,15 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
if (p) {
retval = security_task_getscheduler(p);
if (!retval)
- retval = p->policy;
+ retval = p->policy
+ | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
}
read_unlock(&tasklist_lock);
return retval;
}
/**
- * sys_sched_getscheduler - get the RT priority of a thread
+ * sys_sched_getparam - get the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the RT priority.
*/
@@ -6571,19 +6793,9 @@ static inline int should_resched(void)
static void __cond_resched(void)
{
-#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
- __might_sleep(__FILE__, __LINE__);
-#endif
- /*
- * The BKS might be reacquired before we have dropped
- * PREEMPT_ACTIVE, which could trigger a second
- * cond_resched() call.
- */
- do {
- add_preempt_count(PREEMPT_ACTIVE);
- schedule();
- sub_preempt_count(PREEMPT_ACTIVE);
- } while (need_resched());
+ add_preempt_count(PREEMPT_ACTIVE);
+ schedule();
+ sub_preempt_count(PREEMPT_ACTIVE);
}
int __sched _cond_resched(void)
@@ -6597,18 +6809,20 @@ int __sched _cond_resched(void)
EXPORT_SYMBOL(_cond_resched);
/*
- * cond_resched_lock() - if a reschedule is pending, drop the given lock,
+ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock.
*
* This works OK both with and without CONFIG_PREEMPT. We do strange low-level
* operations here to prevent schedule() from being called twice (once via
* spin_unlock(), once by hand).
*/
-int cond_resched_lock(spinlock_t *lock)
+int __cond_resched_lock(spinlock_t *lock)
{
int resched = should_resched();
int ret = 0;
+ lockdep_assert_held(lock);
+
if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
if (resched)
@@ -6620,9 +6834,9 @@ int cond_resched_lock(spinlock_t *lock)
}
return ret;
}
-EXPORT_SYMBOL(cond_resched_lock);
+EXPORT_SYMBOL(__cond_resched_lock);
-int __sched cond_resched_softirq(void)
+int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
@@ -6634,7 +6848,7 @@ int __sched cond_resched_softirq(void)
}
return 0;
}
-EXPORT_SYMBOL(cond_resched_softirq);
+EXPORT_SYMBOL(__cond_resched_softirq);
/**
* yield - yield the current processor to other threads.
@@ -6658,11 +6872,13 @@ EXPORT_SYMBOL(yield);
*/
void __sched io_schedule(void)
{
- struct rq *rq = &__raw_get_cpu_var(runqueues);
+ struct rq *rq = raw_rq();
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
+ current->in_iowait = 1;
schedule();
+ current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
}
@@ -6670,12 +6886,14 @@ EXPORT_SYMBOL(io_schedule);
long __sched io_schedule_timeout(long timeout)
{
- struct rq *rq = &__raw_get_cpu_var(runqueues);
+ struct rq *rq = raw_rq();
long ret;
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
+ current->in_iowait = 1;
ret = schedule_timeout(timeout);
+ current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
return ret;
@@ -6992,8 +7210,12 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
/* Need help from migration thread: drop lock and wait. */
+ struct task_struct *mt = rq->migration_thread;
+
+ get_task_struct(mt);
task_rq_unlock(rq, &flags);
wake_up_process(rq->migration_thread);
+ put_task_struct(mt);
wait_for_completion(&req.done);
tlb_migrate_finish(p->mm);
return 0;
@@ -7051,6 +7273,11 @@ fail:
return ret;
}
+#define RCU_MIGRATION_IDLE 0
+#define RCU_MIGRATION_NEED_QS 1
+#define RCU_MIGRATION_GOT_QS 2
+#define RCU_MIGRATION_MUST_SYNC 3
+
/*
* migration_thread - this is a highprio system thread that performs
* thread migration by bumping thread off CPU then 'pushing' onto
@@ -7058,6 +7285,7 @@ fail:
*/
static int migration_thread(void *data)
{
+ int badcpu;
int cpu = (long)data;
struct rq *rq;
@@ -7092,8 +7320,17 @@ static int migration_thread(void *data)
req = list_entry(head->next, struct migration_req, list);
list_del_init(head->next);
- spin_unlock(&rq->lock);
- __migrate_task(req->task, cpu, req->dest_cpu);
+ if (req->task != NULL) {
+ spin_unlock(&rq->lock);
+ __migrate_task(req->task, cpu, req->dest_cpu);
+ } else if (likely(cpu == (badcpu = smp_processor_id()))) {
+ req->dest_cpu = RCU_MIGRATION_GOT_QS;
+ spin_unlock(&rq->lock);
+ } else {
+ req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
+ spin_unlock(&rq->lock);
+ WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
+ }
local_irq_enable();
complete(&req->done);
@@ -7625,7 +7862,7 @@ static int __init migration_init(void)
migration_call(&migration_notifier, CPU_ONLINE, cpu);
register_cpu_notifier(&migration_notifier);
- return err;
+ return 0;
}
early_initcall(migration_init);
#endif
@@ -7672,7 +7909,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
break;
}
- if (!group->__cpu_power) {
+ if (!group->cpu_power) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: domain->cpu_power not "
"set\n");
@@ -7696,9 +7933,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
printk(KERN_CONT " %s", str);
- if (group->__cpu_power != SCHED_LOAD_SCALE) {
- printk(KERN_CONT " (__cpu_power = %d)",
- group->__cpu_power);
+ if (group->cpu_power != SCHED_LOAD_SCALE) {
+ printk(KERN_CONT " (cpu_power = %d)",
+ group->cpu_power);
}
group = group->next;
@@ -7841,7 +8078,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
rq->rd = rd;
cpumask_set_cpu(rq->cpu, rd->span);
- if (cpumask_test_cpu(rq->cpu, cpu_online_mask))
+ if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
spin_unlock_irqrestore(&rq->lock, flags);
@@ -7983,7 +8220,7 @@ init_sched_build_groups(const struct cpumask *span,
continue;
cpumask_clear(sched_group_cpus(sg));
- sg->__cpu_power = 0;
+ sg->cpu_power = 0;
for_each_cpu(j, span) {
if (group_fn(j, cpu_map, NULL, tmpmask) != group)
@@ -8091,6 +8328,39 @@ struct static_sched_domain {
DECLARE_BITMAP(span, CONFIG_NR_CPUS);
};
+struct s_data {
+#ifdef CONFIG_NUMA
+ int sd_allnodes;
+ cpumask_var_t domainspan;
+ cpumask_var_t covered;
+ cpumask_var_t notcovered;
+#endif
+ cpumask_var_t nodemask;
+ cpumask_var_t this_sibling_map;
+ cpumask_var_t this_core_map;
+ cpumask_var_t send_covered;
+ cpumask_var_t tmpmask;
+ struct sched_group **sched_group_nodes;
+ struct root_domain *rd;
+};
+
+enum s_alloc {
+ sa_sched_groups = 0,
+ sa_rootdomain,
+ sa_tmpmask,
+ sa_send_covered,
+ sa_this_core_map,
+ sa_this_sibling_map,
+ sa_nodemask,
+ sa_sched_group_nodes,
+#ifdef CONFIG_NUMA
+ sa_notcovered,
+ sa_covered,
+ sa_domainspan,
+#endif
+ sa_none,
+};
+
/*
* SMT sched-domains:
*/
@@ -8208,11 +8478,76 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
continue;
}
- sg_inc_cpu_power(sg, sd->groups->__cpu_power);
+ sg->cpu_power += sd->groups->cpu_power;
}
sg = sg->next;
} while (sg != group_head);
}
+
+static int build_numa_sched_groups(struct s_data *d,
+ const struct cpumask *cpu_map, int num)
+{
+ struct sched_domain *sd;
+ struct sched_group *sg, *prev;
+ int n, j;
+
+ cpumask_clear(d->covered);
+ cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
+ if (cpumask_empty(d->nodemask)) {
+ d->sched_group_nodes[num] = NULL;
+ goto out;
+ }
+
+ sched_domain_node_span(num, d->domainspan);
+ cpumask_and(d->domainspan, d->domainspan, cpu_map);
+
+ sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
+ GFP_KERNEL, num);
+ if (!sg) {
+ printk(KERN_WARNING "Can not alloc domain group for node %d\n",
+ num);
+ return -ENOMEM;
+ }
+ d->sched_group_nodes[num] = sg;
+
+ for_each_cpu(j, d->nodemask) {
+ sd = &per_cpu(node_domains, j).sd;
+ sd->groups = sg;
+ }
+
+ sg->cpu_power = 0;
+ cpumask_copy(sched_group_cpus(sg), d->nodemask);
+ sg->next = sg;
+ cpumask_or(d->covered, d->covered, d->nodemask);
+
+ prev = sg;
+ for (j = 0; j < nr_node_ids; j++) {
+ n = (num + j) % nr_node_ids;
+ cpumask_complement(d->notcovered, d->covered);
+ cpumask_and(d->tmpmask, d->notcovered, cpu_map);
+ cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
+ if (cpumask_empty(d->tmpmask))
+ break;
+ cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
+ if (cpumask_empty(d->tmpmask))
+ continue;
+ sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
+ GFP_KERNEL, num);
+ if (!sg) {
+ printk(KERN_WARNING
+ "Can not alloc domain group for node %d\n", j);
+ return -ENOMEM;
+ }
+ sg->cpu_power = 0;
+ cpumask_copy(sched_group_cpus(sg), d->tmpmask);
+ sg->next = prev->next;
+ cpumask_or(d->covered, d->covered, d->tmpmask);
+ prev->next = sg;
+ prev = sg;
+ }
+out:
+ return 0;
+}
#endif /* CONFIG_NUMA */
#ifdef CONFIG_NUMA
@@ -8266,15 +8601,13 @@ static void free_sched_groups(const struct cpumask *cpu_map,
* there are asymmetries in the topology. If there are asymmetries, group
* having more cpu_power will pickup more load compared to the group having
* less cpu_power.
- *
- * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
- * the maximum number of tasks a group can handle in the presence of other idle
- * or lightly loaded groups in the same sched domain.
*/
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
{
struct sched_domain *child;
struct sched_group *group;
+ long power;
+ int weight;
WARN_ON(!sd || !sd->groups);
@@ -8283,28 +8616,32 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
child = sd->child;
- sd->groups->__cpu_power = 0;
+ sd->groups->cpu_power = 0;
- /*
- * For perf policy, if the groups in child domain share resources
- * (for example cores sharing some portions of the cache hierarchy
- * or SMT), then set this domain groups cpu_power such that each group
- * can handle only one task, when there are other idle groups in the
- * same sched domain.
- */
- if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
- (child->flags &
- (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
- sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
+ if (!child) {
+ power = SCHED_LOAD_SCALE;
+ weight = cpumask_weight(sched_domain_span(sd));
+ /*
+ * SMT siblings share the power of a single core.
+ * Usually multiple threads get a better yield out of
+ * that one core than a single thread would have,
+ * reflect that in sd->smt_gain.
+ */
+ if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
+ power *= sd->smt_gain;
+ power /= weight;
+ power >>= SCHED_LOAD_SHIFT;
+ }
+ sd->groups->cpu_power += power;
return;
}
/*
- * add cpu_power of each child group to this groups cpu_power
+ * Add cpu_power of each child group to this groups cpu_power.
*/
group = child->groups;
do {
- sg_inc_cpu_power(sd->groups, group->__cpu_power);
+ sd->groups->cpu_power += group->cpu_power;
group = group->next;
} while (group != child->groups);
}
@@ -8378,280 +8715,285 @@ static void set_domain_attribute(struct sched_domain *sd,
}
}
-/*
- * Build sched domains for a given set of cpus and attach the sched domains
- * to the individual cpus
- */
-static int __build_sched_domains(const struct cpumask *cpu_map,
- struct sched_domain_attr *attr)
-{
- int i, err = -ENOMEM;
- struct root_domain *rd;
- cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered,
- tmpmask;
+static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
+ const struct cpumask *cpu_map)
+{
+ switch (what) {
+ case sa_sched_groups:
+ free_sched_groups(cpu_map, d->tmpmask); /* fall through */
+ d->sched_group_nodes = NULL;
+ case sa_rootdomain:
+ free_rootdomain(d->rd); /* fall through */
+ case sa_tmpmask:
+ free_cpumask_var(d->tmpmask); /* fall through */
+ case sa_send_covered:
+ free_cpumask_var(d->send_covered); /* fall through */
+ case sa_this_core_map:
+ free_cpumask_var(d->this_core_map); /* fall through */
+ case sa_this_sibling_map:
+ free_cpumask_var(d->this_sibling_map); /* fall through */
+ case sa_nodemask:
+ free_cpumask_var(d->nodemask); /* fall through */
+ case sa_sched_group_nodes:
#ifdef CONFIG_NUMA
- cpumask_var_t domainspan, covered, notcovered;
- struct sched_group **sched_group_nodes = NULL;
- int sd_allnodes = 0;
-
- if (!alloc_cpumask_var(&domainspan, GFP_KERNEL))
- goto out;
- if (!alloc_cpumask_var(&covered, GFP_KERNEL))
- goto free_domainspan;
- if (!alloc_cpumask_var(&notcovered, GFP_KERNEL))
- goto free_covered;
-#endif
-
- if (!alloc_cpumask_var(&nodemask, GFP_KERNEL))
- goto free_notcovered;
- if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL))
- goto free_nodemask;
- if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL))
- goto free_this_sibling_map;
- if (!alloc_cpumask_var(&send_covered, GFP_KERNEL))
- goto free_this_core_map;
- if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
- goto free_send_covered;
+ kfree(d->sched_group_nodes); /* fall through */
+ case sa_notcovered:
+ free_cpumask_var(d->notcovered); /* fall through */
+ case sa_covered:
+ free_cpumask_var(d->covered); /* fall through */
+ case sa_domainspan:
+ free_cpumask_var(d->domainspan); /* fall through */
+#endif
+ case sa_none:
+ break;
+ }
+}
+static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
+ const struct cpumask *cpu_map)
+{
#ifdef CONFIG_NUMA
- /*
- * Allocate the per-node list of sched groups
- */
- sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
- GFP_KERNEL);
- if (!sched_group_nodes) {
+ if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
+ return sa_none;
+ if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
+ return sa_domainspan;
+ if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
+ return sa_covered;
+ /* Allocate the per-node list of sched groups */
+ d->sched_group_nodes = kcalloc(nr_node_ids,
+ sizeof(struct sched_group *), GFP_KERNEL);
+ if (!d->sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
- goto free_tmpmask;
- }
-#endif
-
- rd = alloc_rootdomain();
- if (!rd) {
+ return sa_notcovered;
+ }
+ sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
+#endif
+ if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
+ return sa_sched_group_nodes;
+ if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
+ return sa_nodemask;
+ if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
+ return sa_this_sibling_map;
+ if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
+ return sa_this_core_map;
+ if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
+ return sa_send_covered;
+ d->rd = alloc_rootdomain();
+ if (!d->rd) {
printk(KERN_WARNING "Cannot alloc root domain\n");
- goto free_sched_groups;
+ return sa_tmpmask;
}
+ return sa_rootdomain;
+}
+static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
+{
+ struct sched_domain *sd = NULL;
#ifdef CONFIG_NUMA
- sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes;
-#endif
-
- /*
- * Set up domains for cpus specified by the cpu_map.
- */
- for_each_cpu(i, cpu_map) {
- struct sched_domain *sd = NULL, *p;
-
- cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map);
-
-#ifdef CONFIG_NUMA
- if (cpumask_weight(cpu_map) >
- SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
- sd = &per_cpu(allnodes_domains, i).sd;
- SD_INIT(sd, ALLNODES);
- set_domain_attribute(sd, attr);
- cpumask_copy(sched_domain_span(sd), cpu_map);
- cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
- p = sd;
- sd_allnodes = 1;
- } else
- p = NULL;
+ struct sched_domain *parent;
- sd = &per_cpu(node_domains, i).sd;
- SD_INIT(sd, NODE);
+ d->sd_allnodes = 0;
+ if (cpumask_weight(cpu_map) >
+ SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
+ sd = &per_cpu(allnodes_domains, i).sd;
+ SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr);
- sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
- sd->parent = p;
- if (p)
- p->child = sd;
- cpumask_and(sched_domain_span(sd),
- sched_domain_span(sd), cpu_map);
+ cpumask_copy(sched_domain_span(sd), cpu_map);
+ cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
+ d->sd_allnodes = 1;
+ }
+ parent = sd;
+
+ sd = &per_cpu(node_domains, i).sd;
+ SD_INIT(sd, NODE);
+ set_domain_attribute(sd, attr);
+ sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
+ sd->parent = parent;
+ if (parent)
+ parent->child = sd;
+ cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
#endif
+ return sd;
+}
- p = sd;
- sd = &per_cpu(phys_domains, i).sd;
- SD_INIT(sd, CPU);
- set_domain_attribute(sd, attr);
- cpumask_copy(sched_domain_span(sd), nodemask);
- sd->parent = p;
- if (p)
- p->child = sd;
- cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
+static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+ struct sched_domain *parent, int i)
+{
+ struct sched_domain *sd;
+ sd = &per_cpu(phys_domains, i).sd;
+ SD_INIT(sd, CPU);
+ set_domain_attribute(sd, attr);
+ cpumask_copy(sched_domain_span(sd), d->nodemask);
+ sd->parent = parent;
+ if (parent)
+ parent->child = sd;
+ cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
+ return sd;
+}
+static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+ struct sched_domain *parent, int i)
+{
+ struct sched_domain *sd = parent;
#ifdef CONFIG_SCHED_MC
- p = sd;
- sd = &per_cpu(core_domains, i).sd;
- SD_INIT(sd, MC);
- set_domain_attribute(sd, attr);
- cpumask_and(sched_domain_span(sd), cpu_map,
- cpu_coregroup_mask(i));
- sd->parent = p;
- p->child = sd;
- cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
+ sd = &per_cpu(core_domains, i).sd;
+ SD_INIT(sd, MC);
+ set_domain_attribute(sd, attr);
+ cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
+ sd->parent = parent;
+ parent->child = sd;
+ cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
#endif
+ return sd;
+}
+static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+ struct sched_domain *parent, int i)
+{
+ struct sched_domain *sd = parent;
#ifdef CONFIG_SCHED_SMT
- p = sd;
- sd = &per_cpu(cpu_domains, i).sd;
- SD_INIT(sd, SIBLING);
- set_domain_attribute(sd, attr);
- cpumask_and(sched_domain_span(sd),
- topology_thread_cpumask(i), cpu_map);
- sd->parent = p;
- p->child = sd;
- cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
+ sd = &per_cpu(cpu_domains, i).sd;
+ SD_INIT(sd, SIBLING);
+ set_domain_attribute(sd, attr);
+ cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
+ sd->parent = parent;
+ parent->child = sd;
+ cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
#endif
- }
+ return sd;
+}
+static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
+ const struct cpumask *cpu_map, int cpu)
+{
+ switch (l) {
#ifdef CONFIG_SCHED_SMT
- /* Set up CPU (sibling) groups */
- for_each_cpu(i, cpu_map) {
- cpumask_and(this_sibling_map,
- topology_thread_cpumask(i), cpu_map);
- if (i != cpumask_first(this_sibling_map))
- continue;
-
- init_sched_build_groups(this_sibling_map, cpu_map,
- &cpu_to_cpu_group,
- send_covered, tmpmask);
- }
+ case SD_LV_SIBLING: /* set up CPU (sibling) groups */
+ cpumask_and(d->this_sibling_map, cpu_map,
+ topology_thread_cpumask(cpu));
+ if (cpu == cpumask_first(d->this_sibling_map))
+ init_sched_build_groups(d->this_sibling_map, cpu_map,
+ &cpu_to_cpu_group,
+ d->send_covered, d->tmpmask);
+ break;
#endif
-
#ifdef CONFIG_SCHED_MC
- /* Set up multi-core groups */
- for_each_cpu(i, cpu_map) {
- cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map);
- if (i != cpumask_first(this_core_map))
- continue;
-
- init_sched_build_groups(this_core_map, cpu_map,
- &cpu_to_core_group,
- send_covered, tmpmask);
- }
+ case SD_LV_MC: /* set up multi-core groups */
+ cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
+ if (cpu == cpumask_first(d->this_core_map))
+ init_sched_build_groups(d->this_core_map, cpu_map,
+ &cpu_to_core_group,
+ d->send_covered, d->tmpmask);
+ break;
#endif
-
- /* Set up physical groups */
- for (i = 0; i < nr_node_ids; i++) {
- cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
- if (cpumask_empty(nodemask))
- continue;
-
- init_sched_build_groups(nodemask, cpu_map,
- &cpu_to_phys_group,
- send_covered, tmpmask);
- }
-
+ case SD_LV_CPU: /* set up physical groups */
+ cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
+ if (!cpumask_empty(d->nodemask))
+ init_sched_build_groups(d->nodemask, cpu_map,
+ &cpu_to_phys_group,
+ d->send_covered, d->tmpmask);
+ break;
#ifdef CONFIG_NUMA
- /* Set up node groups */
- if (sd_allnodes) {
- init_sched_build_groups(cpu_map, cpu_map,
- &cpu_to_allnodes_group,
- send_covered, tmpmask);
+ case SD_LV_ALLNODES:
+ init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
+ d->send_covered, d->tmpmask);
+ break;
+#endif
+ default:
+ break;
}
+}
- for (i = 0; i < nr_node_ids; i++) {
- /* Set up node groups */
- struct sched_group *sg, *prev;
- int j;
-
- cpumask_clear(covered);
- cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
- if (cpumask_empty(nodemask)) {
- sched_group_nodes[i] = NULL;
- continue;
- }
+/*
+ * Build sched domains for a given set of cpus and attach the sched domains
+ * to the individual cpus
+ */
+static int __build_sched_domains(const struct cpumask *cpu_map,
+ struct sched_domain_attr *attr)
+{
+ enum s_alloc alloc_state = sa_none;
+ struct s_data d;
+ struct sched_domain *sd;
+ int i;
+#ifdef CONFIG_NUMA
+ d.sd_allnodes = 0;
+#endif
- sched_domain_node_span(i, domainspan);
- cpumask_and(domainspan, domainspan, cpu_map);
+ alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
+ if (alloc_state != sa_rootdomain)
+ goto error;
+ alloc_state = sa_sched_groups;
- sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
- GFP_KERNEL, i);
- if (!sg) {
- printk(KERN_WARNING "Can not alloc domain group for "
- "node %d\n", i);
- goto error;
- }
- sched_group_nodes[i] = sg;
- for_each_cpu(j, nodemask) {
- struct sched_domain *sd;
+ /*
+ * Set up domains for cpus specified by the cpu_map.
+ */
+ for_each_cpu(i, cpu_map) {
+ cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
+ cpu_map);
- sd = &per_cpu(node_domains, j).sd;
- sd->groups = sg;
- }
- sg->__cpu_power = 0;
- cpumask_copy(sched_group_cpus(sg), nodemask);
- sg->next = sg;
- cpumask_or(covered, covered, nodemask);
- prev = sg;
+ sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
+ sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
+ sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
+ sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
+ }
- for (j = 0; j < nr_node_ids; j++) {
- int n = (i + j) % nr_node_ids;
+ for_each_cpu(i, cpu_map) {
+ build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
+ build_sched_groups(&d, SD_LV_MC, cpu_map, i);
+ }
- cpumask_complement(notcovered, covered);
- cpumask_and(tmpmask, notcovered, cpu_map);
- cpumask_and(tmpmask, tmpmask, domainspan);
- if (cpumask_empty(tmpmask))
- break;
+ /* Set up physical groups */
+ for (i = 0; i < nr_node_ids; i++)
+ build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
- cpumask_and(tmpmask, tmpmask, cpumask_of_node(n));
- if (cpumask_empty(tmpmask))
- continue;
+#ifdef CONFIG_NUMA
+ /* Set up node groups */
+ if (d.sd_allnodes)
+ build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
- sg = kmalloc_node(sizeof(struct sched_group) +
- cpumask_size(),
- GFP_KERNEL, i);
- if (!sg) {
- printk(KERN_WARNING
- "Can not alloc domain group for node %d\n", j);
- goto error;
- }
- sg->__cpu_power = 0;
- cpumask_copy(sched_group_cpus(sg), tmpmask);
- sg->next = prev->next;
- cpumask_or(covered, covered, tmpmask);
- prev->next = sg;
- prev = sg;
- }
- }
+ for (i = 0; i < nr_node_ids; i++)
+ if (build_numa_sched_groups(&d, cpu_map, i))
+ goto error;
#endif
/* Calculate CPU power for physical packages and nodes */
#ifdef CONFIG_SCHED_SMT
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd = &per_cpu(cpu_domains, i).sd;
-
+ sd = &per_cpu(cpu_domains, i).sd;
init_sched_groups_power(i, sd);
}
#endif
#ifdef CONFIG_SCHED_MC
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd = &per_cpu(core_domains, i).sd;
-
+ sd = &per_cpu(core_domains, i).sd;
init_sched_groups_power(i, sd);
}
#endif
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd = &per_cpu(phys_domains, i).sd;
-
+ sd = &per_cpu(phys_domains, i).sd;
init_sched_groups_power(i, sd);
}
#ifdef CONFIG_NUMA
for (i = 0; i < nr_node_ids; i++)
- init_numa_sched_groups_power(sched_group_nodes[i]);
+ init_numa_sched_groups_power(d.sched_group_nodes[i]);
- if (sd_allnodes) {
+ if (d.sd_allnodes) {
struct sched_group *sg;
cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
- tmpmask);
+ d.tmpmask);
init_numa_sched_groups_power(sg);
}
#endif
/* Attach the domains */
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i).sd;
#elif defined(CONFIG_SCHED_MC)
@@ -8659,44 +9001,16 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
#else
sd = &per_cpu(phys_domains, i).sd;
#endif
- cpu_attach_domain(sd, rd, i);
+ cpu_attach_domain(sd, d.rd, i);
}
- err = 0;
-
-free_tmpmask:
- free_cpumask_var(tmpmask);
-free_send_covered:
- free_cpumask_var(send_covered);
-free_this_core_map:
- free_cpumask_var(this_core_map);
-free_this_sibling_map:
- free_cpumask_var(this_sibling_map);
-free_nodemask:
- free_cpumask_var(nodemask);
-free_notcovered:
-#ifdef CONFIG_NUMA
- free_cpumask_var(notcovered);
-free_covered:
- free_cpumask_var(covered);
-free_domainspan:
- free_cpumask_var(domainspan);
-out:
-#endif
- return err;
-
-free_sched_groups:
-#ifdef CONFIG_NUMA
- kfree(sched_group_nodes);
-#endif
- goto free_tmpmask;
+ d.sched_group_nodes = NULL; /* don't free this we still need it */
+ __free_domain_allocs(&d, sa_tmpmask, cpu_map);
+ return 0;
-#ifdef CONFIG_NUMA
error:
- free_sched_groups(cpu_map, tmpmask);
- free_rootdomain(rd);
- goto free_tmpmask;
-#endif
+ __free_domain_allocs(&d, alloc_state, cpu_map);
+ return -ENOMEM;
}
static int build_sched_domains(const struct cpumask *cpu_map)
@@ -9304,11 +9618,11 @@ void __init sched_init(void)
* system cpu resource, based on the weight assigned to root
* user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
* by letting tasks of init_task_group sit in a separate cfs_rq
- * (init_cfs_rq) and having one entity represent this group of
+ * (init_tg_cfs_rq) and having one entity represent this group of
* tasks in rq->cfs (i.e init_task_group->se[] != NULL).
*/
init_tg_cfs_entry(&init_task_group,
- &per_cpu(init_cfs_rq, i),
+ &per_cpu(init_tg_cfs_rq, i),
&per_cpu(init_sched_entity, i), i, 1,
root_task_group.se[i]);
@@ -9334,6 +9648,7 @@ void __init sched_init(void)
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
+ rq->post_schedule = 0;
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
@@ -9398,13 +9713,20 @@ void __init sched_init(void)
}
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
-void __might_sleep(char *file, int line)
+static inline int preempt_count_equals(int preempt_offset)
+{
+ int nested = preempt_count() & ~PREEMPT_ACTIVE;
+
+ return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
+}
+
+void __might_sleep(char *file, int line, int preempt_offset)
{
#ifdef in_atomic
static unsigned long prev_jiffy; /* ratelimiting */
- if ((!in_atomic() && !irqs_disabled()) ||
- system_state != SYSTEM_RUNNING || oops_in_progress)
+ if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
+ system_state != SYSTEM_RUNNING || oops_in_progress)
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
@@ -10581,3 +10903,113 @@ struct cgroup_subsys cpuacct_subsys = {
.subsys_id = cpuacct_subsys_id,
};
#endif /* CONFIG_CGROUP_CPUACCT */
+
+#ifndef CONFIG_SMP
+
+int rcu_expedited_torture_stats(char *page)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
+
+void synchronize_sched_expedited(void)
+{
+}
+EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+
+#else /* #ifndef CONFIG_SMP */
+
+static DEFINE_PER_CPU(struct migration_req, rcu_migration_req);
+static DEFINE_MUTEX(rcu_sched_expedited_mutex);
+
+#define RCU_EXPEDITED_STATE_POST -2
+#define RCU_EXPEDITED_STATE_IDLE -1
+
+static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
+
+int rcu_expedited_torture_stats(char *page)
+{
+ int cnt = 0;
+ int cpu;
+
+ cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state);
+ for_each_online_cpu(cpu) {
+ cnt += sprintf(&page[cnt], " %d:%d",
+ cpu, per_cpu(rcu_migration_req, cpu).dest_cpu);
+ }
+ cnt += sprintf(&page[cnt], "\n");
+ return cnt;
+}
+EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
+
+static long synchronize_sched_expedited_count;
+
+/*
+ * Wait for an rcu-sched grace period to elapse, but use "big hammer"
+ * approach to force grace period to end quickly. This consumes
+ * significant time on all CPUs, and is thus not recommended for
+ * any sort of common-case code.
+ *
+ * Note that it is illegal to call this function while holding any
+ * lock that is acquired by a CPU-hotplug notifier. Failing to
+ * observe this restriction will result in deadlock.
+ */
+void synchronize_sched_expedited(void)
+{
+ int cpu;
+ unsigned long flags;
+ bool need_full_sync = 0;
+ struct rq *rq;
+ struct migration_req *req;
+ long snap;
+ int trycount = 0;
+
+ smp_mb(); /* ensure prior mod happens before capturing snap. */
+ snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1;
+ get_online_cpus();
+ while (!mutex_trylock(&rcu_sched_expedited_mutex)) {
+ put_online_cpus();
+ if (trycount++ < 10)
+ udelay(trycount * num_online_cpus());
+ else {
+ synchronize_sched();
+ return;
+ }
+ if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) {
+ smp_mb(); /* ensure test happens before caller kfree */
+ return;
+ }
+ get_online_cpus();
+ }
+ rcu_expedited_state = RCU_EXPEDITED_STATE_POST;
+ for_each_online_cpu(cpu) {
+ rq = cpu_rq(cpu);
+ req = &per_cpu(rcu_migration_req, cpu);
+ init_completion(&req->done);
+ req->task = NULL;
+ req->dest_cpu = RCU_MIGRATION_NEED_QS;
+ spin_lock_irqsave(&rq->lock, flags);
+ list_add(&req->list, &rq->migration_queue);
+ spin_unlock_irqrestore(&rq->lock, flags);
+ wake_up_process(rq->migration_thread);
+ }
+ for_each_online_cpu(cpu) {
+ rcu_expedited_state = cpu;
+ req = &per_cpu(rcu_migration_req, cpu);
+ rq = cpu_rq(cpu);
+ wait_for_completion(&req->done);
+ spin_lock_irqsave(&rq->lock, flags);
+ if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
+ need_full_sync = 1;
+ req->dest_cpu = RCU_MIGRATION_IDLE;
+ spin_unlock_irqrestore(&rq->lock, flags);
+ }
+ rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
+ mutex_unlock(&rcu_sched_expedited_mutex);
+ put_online_cpus();
+ if (need_full_sync)
+ synchronize_sched();
+}
+EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+
+#endif /* #else #ifndef CONFIG_SMP */
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index d014efbf947a..0f052fc674d5 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -127,21 +127,11 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
/*
* If the cpu was currently mapped to a different value, we
- * first need to unmap the old value
+ * need to map it to the new value then remove the old value.
+ * Note, we must add the new value first, otherwise we risk the
+ * cpu being cleared from pri_active, and this cpu could be
+ * missed for a push or pull.
*/
- if (likely(oldpri != CPUPRI_INVALID)) {
- struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
-
- spin_lock_irqsave(&vec->lock, flags);
-
- vec->count--;
- if (!vec->count)
- clear_bit(oldpri, cp->pri_active);
- cpumask_clear_cpu(cpu, vec->mask);
-
- spin_unlock_irqrestore(&vec->lock, flags);
- }
-
if (likely(newpri != CPUPRI_INVALID)) {
struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
@@ -154,6 +144,18 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
spin_unlock_irqrestore(&vec->lock, flags);
}
+ if (likely(oldpri != CPUPRI_INVALID)) {
+ struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
+
+ spin_lock_irqsave(&vec->lock, flags);
+
+ vec->count--;
+ if (!vec->count)
+ clear_bit(oldpri, cp->pri_active);
+ cpumask_clear_cpu(cpu, vec->mask);
+
+ spin_unlock_irqrestore(&vec->lock, flags);
+ }
*currpri = newpri;
}
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 70c7e0b79946..5ddbd0891267 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -409,6 +409,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.wait_max);
PN(se.wait_sum);
P(se.wait_count);
+ PN(se.iowait_sum);
+ P(se.iowait_count);
P(sched_info.bkl_count);
P(se.nr_migrations);
P(se.nr_migrations_cold);
@@ -479,6 +481,8 @@ void proc_sched_set_task(struct task_struct *p)
p->se.wait_max = 0;
p->se.wait_sum = 0;
p->se.wait_count = 0;
+ p->se.iowait_sum = 0;
+ p->se.iowait_count = 0;
p->se.sleep_max = 0;
p->se.sum_sleep_runtime = 0;
p->se.block_max = 0;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 652e8bdef9aa..aa7f84121016 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -24,7 +24,7 @@
/*
* Targeted preemption latency for CPU-bound tasks:
- * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
+ * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds)
*
* NOTE: this latency value is not the same as the concept of
* 'timeslice length' - timeslices in CFS are of variable length
@@ -34,13 +34,13 @@
* (to see the precise effective timeslice length of your workload,
* run vmstat and monitor the context-switches (cs) field)
*/
-unsigned int sysctl_sched_latency = 20000000ULL;
+unsigned int sysctl_sched_latency = 5000000ULL;
/*
* Minimal preemption granularity for CPU-bound tasks:
- * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
-unsigned int sysctl_sched_min_granularity = 4000000ULL;
+unsigned int sysctl_sched_min_granularity = 1000000ULL;
/*
* is kept at sysctl_sched_latency / sysctl_sched_min_granularity
@@ -48,10 +48,10 @@ unsigned int sysctl_sched_min_granularity = 4000000ULL;
static unsigned int sched_nr_latency = 5;
/*
- * After fork, child runs first. (default) If set to 0 then
+ * After fork, child runs first. If set to 0 (default) then
* parent will (try to) run first.
*/
-const_debug unsigned int sysctl_sched_child_runs_first = 1;
+unsigned int sysctl_sched_child_runs_first __read_mostly;
/*
* sys_sched_yield() compat mode
@@ -63,13 +63,13 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
/*
* SCHED_OTHER wake-up granularity.
- * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
*
* This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
-unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
+unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
@@ -79,11 +79,6 @@ static const struct sched_class fair_sched_class;
* CFS operations on generic schedulable entities:
*/
-static inline struct task_struct *task_of(struct sched_entity *se)
-{
- return container_of(se, struct task_struct, se);
-}
-
#ifdef CONFIG_FAIR_GROUP_SCHED
/* cpu runqueue to which this cfs_rq is attached */
@@ -95,6 +90,14 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
/* An entity is a task if it doesn't "own" a runqueue */
#define entity_is_task(se) (!se->my_q)
+static inline struct task_struct *task_of(struct sched_entity *se)
+{
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(!entity_is_task(se));
+#endif
+ return container_of(se, struct task_struct, se);
+}
+
/* Walk up scheduling entities hierarchy */
#define for_each_sched_entity(se) \
for (; se; se = se->parent)
@@ -186,7 +189,12 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
}
}
-#else /* CONFIG_FAIR_GROUP_SCHED */
+#else /* !CONFIG_FAIR_GROUP_SCHED */
+
+static inline struct task_struct *task_of(struct sched_entity *se)
+{
+ return container_of(se, struct task_struct, se);
+}
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
@@ -537,6 +545,12 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
schedstat_set(se->wait_count, se->wait_count + 1);
schedstat_set(se->wait_sum, se->wait_sum +
rq_of(cfs_rq)->clock - se->wait_start);
+#ifdef CONFIG_SCHEDSTATS
+ if (entity_is_task(se)) {
+ trace_sched_stat_wait(task_of(se),
+ rq_of(cfs_rq)->clock - se->wait_start);
+ }
+#endif
schedstat_set(se->wait_start, 0);
}
@@ -628,8 +642,10 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->sleep_start = 0;
se->sum_sleep_runtime += delta;
- if (tsk)
+ if (tsk) {
account_scheduler_latency(tsk, delta >> 10, 1);
+ trace_sched_stat_sleep(tsk, delta);
+ }
}
if (se->block_start) {
u64 delta = rq_of(cfs_rq)->clock - se->block_start;
@@ -644,6 +660,12 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->sum_sleep_runtime += delta;
if (tsk) {
+ if (tsk->in_iowait) {
+ se->iowait_sum += delta;
+ se->iowait_count++;
+ trace_sched_stat_iowait(tsk, delta);
+ }
+
/*
* Blocking time is in units of nanosecs, so shift by
* 20 to get a milliseconds-range estimation of the
@@ -705,11 +727,11 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
vruntime -= thresh;
}
-
- /* ensure we never gain time by being placed backwards. */
- vruntime = max_vruntime(se->vruntime, vruntime);
}
+ /* ensure we never gain time by being placed backwards. */
+ vruntime = max_vruntime(se->vruntime, vruntime);
+
se->vruntime = vruntime;
}
@@ -1046,17 +1068,21 @@ static void yield_task_fair(struct rq *rq)
* search starts with cpus closest then further out as needed,
* so we always favor a closer, idle cpu.
* Domains may include CPUs that are not usable for migration,
- * hence we need to mask them out (cpu_active_mask)
+ * hence we need to mask them out (rq->rd->online)
*
* Returns the CPU we should wake onto.
*/
#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
+
+#define cpu_rd_active(cpu, rq) cpumask_test_cpu(cpu, rq->rd->online)
+
static int wake_idle(int cpu, struct task_struct *p)
{
struct sched_domain *sd;
int i;
unsigned int chosen_wakeup_cpu;
int this_cpu;
+ struct rq *task_rq = task_rq(p);
/*
* At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
@@ -1089,10 +1115,10 @@ static int wake_idle(int cpu, struct task_struct *p)
for_each_domain(cpu, sd) {
if ((sd->flags & SD_WAKE_IDLE)
|| ((sd->flags & SD_WAKE_IDLE_FAR)
- && !task_hot(p, task_rq(p)->clock, sd))) {
+ && !task_hot(p, task_rq->clock, sd))) {
for_each_cpu_and(i, sched_domain_span(sd),
&p->cpus_allowed) {
- if (cpu_active(i) && idle_cpu(i)) {
+ if (cpu_rd_active(i, task_rq) && idle_cpu(i)) {
if (i != task_cpu(p)) {
schedstat_inc(p,
se.nr_wakeups_idle);
@@ -1235,7 +1261,17 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
tg = task_group(p);
weight = p->se.load.weight;
- balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <=
+ /*
+ * In low-load situations, where prev_cpu is idle and this_cpu is idle
+ * due to the sync cause above having dropped tl to 0, we'll always have
+ * an imbalance, but there's really nothing you can do about that, so
+ * that's good too.
+ *
+ * Otherwise check if either cpus are near enough in load to allow this
+ * task to be woken on this_cpu.
+ */
+ balanced = !tl ||
+ 100*(tl + effective_load(tg, this_cpu, weight, weight)) <=
imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
/*
@@ -1278,8 +1314,6 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
this_rq = cpu_rq(this_cpu);
new_cpu = prev_cpu;
- if (prev_cpu == this_cpu)
- goto out;
/*
* 'this_sd' is the first domain that both
* this_cpu and prev_cpu are present in:
@@ -1721,6 +1755,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
sched_info_queued(p);
update_curr(cfs_rq);
+ if (curr)
+ se->vruntime = curr->vruntime;
place_entity(cfs_rq, se, 1);
/* 'curr' will be NULL if the child belongs to a different group */
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 4569bfa7df9b..e2dc63a5815d 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -1,4 +1,4 @@
-SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
+SCHED_FEAT(NEW_FAIR_SLEEPERS, 0)
SCHED_FEAT(NORMALIZED_SLEEPER, 0)
SCHED_FEAT(ADAPTIVE_GRAN, 1)
SCHED_FEAT(WAKEUP_PREEMPT, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 3918e01994e0..2eb4bd6a526c 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -3,15 +3,18 @@
* policies)
*/
+#ifdef CONFIG_RT_GROUP_SCHED
+
+#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
+
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(!rt_entity_is_task(rt_se));
+#endif
return container_of(rt_se, struct task_struct, rt);
}
-#ifdef CONFIG_RT_GROUP_SCHED
-
-#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
-
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return rt_rq->rq;
@@ -26,6 +29,11 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
#define rt_entity_is_task(rt_se) (1)
+static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+{
+ return container_of(rt_se, struct task_struct, rt);
+}
+
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return container_of(rt_rq, struct rq, rt);
@@ -128,6 +136,11 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
}
+static inline int has_pushable_tasks(struct rq *rq)
+{
+ return !plist_head_empty(&rq->rt.pushable_tasks);
+}
+
#else
static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
@@ -602,6 +615,8 @@ static void update_curr_rt(struct rq *rq)
curr->se.exec_start = rq->clock;
cpuacct_charge(curr, delta_exec);
+ sched_rt_avg_update(rq, delta_exec);
+
if (!rt_bandwidth_enabled())
return;
@@ -874,8 +889,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
-
- inc_cpu_load(rq, p->se.load.weight);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
@@ -886,8 +899,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
dequeue_rt_entity(rt_se);
dequeue_pushable_task(rq, p);
-
- dec_cpu_load(rq, p->se.load.weight);
}
/*
@@ -1064,6 +1075,14 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
if (p)
dequeue_pushable_task(rq, p);
+#ifdef CONFIG_SMP
+ /*
+ * We detect this state here so that we can avoid taking the RQ
+ * lock again later if there is no need to push
+ */
+ rq->post_schedule = has_pushable_tasks(rq);
+#endif
+
return p;
}
@@ -1162,13 +1181,6 @@ static int find_lowest_rq(struct task_struct *task)
return -1; /* No targets found */
/*
- * Only consider CPUs that are usable for migration.
- * I guess we might want to change cpupri_find() to ignore those
- * in the first place.
- */
- cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
-
- /*
* At this point we have built a mask of cpus representing the
* lowest priority tasks in the system. Now we want to elect
* the best one based on our affinity and topology.
@@ -1262,11 +1274,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
return lowest_rq;
}
-static inline int has_pushable_tasks(struct rq *rq)
-{
- return !plist_head_empty(&rq->rt.pushable_tasks);
-}
-
static struct task_struct *pick_next_pushable_task(struct rq *rq)
{
struct task_struct *p;
@@ -1466,23 +1473,9 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
pull_rt_task(rq);
}
-/*
- * assumes rq->lock is held
- */
-static int needs_post_schedule_rt(struct rq *rq)
-{
- return has_pushable_tasks(rq);
-}
-
static void post_schedule_rt(struct rq *rq)
{
- /*
- * This is only called if needs_post_schedule_rt() indicates that
- * we need to push tasks away
- */
- spin_lock_irq(&rq->lock);
push_rt_tasks(rq);
- spin_unlock_irq(&rq->lock);
}
/*
@@ -1758,7 +1751,6 @@ static const struct sched_class rt_sched_class = {
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
.pre_schedule = pre_schedule_rt,
- .needs_post_schedule = needs_post_schedule_rt,
.post_schedule = post_schedule_rt,
.task_wake_up = task_wake_up_rt,
.switched_from = switched_from_rt,
diff --git a/kernel/softirq.c b/kernel/softirq.c
index eb5e131a0485..7db25067cd2d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -227,7 +227,7 @@ restart:
preempt_count() = prev_count;
}
- rcu_bh_qsctr_inc(cpu);
+ rcu_bh_qs(cpu);
}
h++;
pending >>= 1;
@@ -721,7 +721,7 @@ static int ksoftirqd(void * __bind_cpu)
preempt_enable_no_resched();
cond_resched();
preempt_disable();
- rcu_qsctr_inc((long)__bind_cpu);
+ rcu_sched_qs((long)__bind_cpu);
}
preempt_enable();
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 7932653c4ebd..5ddab730cb2f 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -21,44 +21,29 @@
#include <linux/debug_locks.h>
#include <linux/module.h>
+#ifndef _spin_trylock
int __lockfunc _spin_trylock(spinlock_t *lock)
{
- preempt_disable();
- if (_raw_spin_trylock(lock)) {
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
-
- preempt_enable();
- return 0;
+ return __spin_trylock(lock);
}
EXPORT_SYMBOL(_spin_trylock);
+#endif
+#ifndef _read_trylock
int __lockfunc _read_trylock(rwlock_t *lock)
{
- preempt_disable();
- if (_raw_read_trylock(lock)) {
- rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
-
- preempt_enable();
- return 0;
+ return __read_trylock(lock);
}
EXPORT_SYMBOL(_read_trylock);
+#endif
+#ifndef _write_trylock
int __lockfunc _write_trylock(rwlock_t *lock)
{
- preempt_disable();
- if (_raw_write_trylock(lock)) {
- rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
-
- preempt_enable();
- return 0;
+ return __write_trylock(lock);
}
EXPORT_SYMBOL(_write_trylock);
+#endif
/*
* If lockdep is enabled then we use the non-preemption spin-ops
@@ -67,132 +52,101 @@ EXPORT_SYMBOL(_write_trylock);
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+#ifndef _read_lock
void __lockfunc _read_lock(rwlock_t *lock)
{
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+ __read_lock(lock);
}
EXPORT_SYMBOL(_read_lock);
+#endif
+#ifndef _spin_lock_irqsave
unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- /*
- * On lockdep we dont want the hand-coded irq-enable of
- * _raw_spin_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#ifdef CONFIG_LOCKDEP
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-#else
- _raw_spin_lock_flags(lock, &flags);
-#endif
- return flags;
+ return __spin_lock_irqsave(lock);
}
EXPORT_SYMBOL(_spin_lock_irqsave);
+#endif
+#ifndef _spin_lock_irq
void __lockfunc _spin_lock_irq(spinlock_t *lock)
{
- local_irq_disable();
- preempt_disable();
- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ __spin_lock_irq(lock);
}
EXPORT_SYMBOL(_spin_lock_irq);
+#endif
+#ifndef _spin_lock_bh
void __lockfunc _spin_lock_bh(spinlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ __spin_lock_bh(lock);
}
EXPORT_SYMBOL(_spin_lock_bh);
+#endif
+#ifndef _read_lock_irqsave
unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
- _raw_read_lock_flags, &flags);
- return flags;
+ return __read_lock_irqsave(lock);
}
EXPORT_SYMBOL(_read_lock_irqsave);
+#endif
+#ifndef _read_lock_irq
void __lockfunc _read_lock_irq(rwlock_t *lock)
{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+ __read_lock_irq(lock);
}
EXPORT_SYMBOL(_read_lock_irq);
+#endif
+#ifndef _read_lock_bh
void __lockfunc _read_lock_bh(rwlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+ __read_lock_bh(lock);
}
EXPORT_SYMBOL(_read_lock_bh);
+#endif
+#ifndef _write_lock_irqsave
unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
- _raw_write_lock_flags, &flags);
- return flags;
+ return __write_lock_irqsave(lock);
}
EXPORT_SYMBOL(_write_lock_irqsave);
+#endif
+#ifndef _write_lock_irq
void __lockfunc _write_lock_irq(rwlock_t *lock)
{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ __write_lock_irq(lock);
}
EXPORT_SYMBOL(_write_lock_irq);
+#endif
+#ifndef _write_lock_bh
void __lockfunc _write_lock_bh(rwlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ __write_lock_bh(lock);
}
EXPORT_SYMBOL(_write_lock_bh);
+#endif
+#ifndef _spin_lock
void __lockfunc _spin_lock(spinlock_t *lock)
{
- preempt_disable();
- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ __spin_lock(lock);
}
-
EXPORT_SYMBOL(_spin_lock);
+#endif
+#ifndef _write_lock
void __lockfunc _write_lock(rwlock_t *lock)
{
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ __write_lock(lock);
}
-
EXPORT_SYMBOL(_write_lock);
+#endif
#else /* CONFIG_PREEMPT: */
@@ -318,125 +272,109 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);
#endif
+#ifndef _spin_unlock
void __lockfunc _spin_unlock(spinlock_t *lock)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- preempt_enable();
+ __spin_unlock(lock);
}
EXPORT_SYMBOL(_spin_unlock);
+#endif
+#ifndef _write_unlock
void __lockfunc _write_unlock(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable();
+ __write_unlock(lock);
}
EXPORT_SYMBOL(_write_unlock);
+#endif
+#ifndef _read_unlock
void __lockfunc _read_unlock(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- preempt_enable();
+ __read_unlock(lock);
}
EXPORT_SYMBOL(_read_unlock);
+#endif
+#ifndef _spin_unlock_irqrestore
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
+ __spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_spin_unlock_irqrestore);
+#endif
+#ifndef _spin_unlock_irq
void __lockfunc _spin_unlock_irq(spinlock_t *lock)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- local_irq_enable();
- preempt_enable();
+ __spin_unlock_irq(lock);
}
EXPORT_SYMBOL(_spin_unlock_irq);
+#endif
+#ifndef _spin_unlock_bh
void __lockfunc _spin_unlock_bh(spinlock_t *lock)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __spin_unlock_bh(lock);
}
EXPORT_SYMBOL(_spin_unlock_bh);
+#endif
+#ifndef _read_unlock_irqrestore
void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
+ __read_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_read_unlock_irqrestore);
+#endif
+#ifndef _read_unlock_irq
void __lockfunc _read_unlock_irq(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_enable();
- preempt_enable();
+ __read_unlock_irq(lock);
}
EXPORT_SYMBOL(_read_unlock_irq);
+#endif
+#ifndef _read_unlock_bh
void __lockfunc _read_unlock_bh(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __read_unlock_bh(lock);
}
EXPORT_SYMBOL(_read_unlock_bh);
+#endif
+#ifndef _write_unlock_irqrestore
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
+ __write_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_write_unlock_irqrestore);
+#endif
+#ifndef _write_unlock_irq
void __lockfunc _write_unlock_irq(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_enable();
- preempt_enable();
+ __write_unlock_irq(lock);
}
EXPORT_SYMBOL(_write_unlock_irq);
+#endif
+#ifndef _write_unlock_bh
void __lockfunc _write_unlock_bh(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ __write_unlock_bh(lock);
}
EXPORT_SYMBOL(_write_unlock_bh);
+#endif
+#ifndef _spin_trylock_bh
int __lockfunc _spin_trylock_bh(spinlock_t *lock)
{
- local_bh_disable();
- preempt_disable();
- if (_raw_spin_trylock(lock)) {
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
-
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
- return 0;
+ return __spin_trylock_bh(lock);
}
EXPORT_SYMBOL(_spin_trylock_bh);
+#endif
notrace int in_lock_functions(unsigned long addr)
{
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 58be76017fd0..3125cff1c570 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -49,7 +49,6 @@
#include <linux/acpi.h>
#include <linux/reboot.h>
#include <linux/ftrace.h>
-#include <linux/security.h>
#include <linux/slow-work.h>
#include <linux/perf_counter.h>
@@ -246,6 +245,14 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
#endif
static struct ctl_table kern_table[] = {
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_child_runs_first",
+ .data = &sysctl_sched_child_runs_first,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
#ifdef CONFIG_SCHED_DEBUG
{
.ctl_name = CTL_UNNUMBERED,
@@ -300,14 +307,6 @@ static struct ctl_table kern_table[] = {
},
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "sched_child_runs_first",
- .data = &sysctl_sched_child_runs_first,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- {
- .ctl_name = CTL_UNNUMBERED,
.procname = "sched_features",
.data = &sysctl_sched_features,
.maxlen = sizeof(unsigned int),
@@ -332,6 +331,14 @@ static struct ctl_table kern_table[] = {
},
{
.ctl_name = CTL_UNNUMBERED,
+ .procname = "sched_time_avg",
+ .data = &sysctl_sched_time_avg,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
.procname = "timer_migration",
.data = &sysctl_timer_migration,
.maxlen = sizeof(unsigned int),
diff --git a/kernel/timer.c b/kernel/timer.c
index a7f07d5a6241..a3d25f415019 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1156,8 +1156,7 @@ void update_process_times(int user_tick)
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
run_local_timers();
- if (rcu_pending(cpu))
- rcu_check_callbacks(cpu, user_tick);
+ rcu_check_callbacks(cpu, user_tick);
printk_tick();
scheduler_tick();
run_posix_cpu_timers(p);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 019f380fd764..1ea0d1234f4a 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -41,7 +41,7 @@ config HAVE_FTRACE_MCOUNT_RECORD
config HAVE_HW_BRANCH_TRACER
bool
-config HAVE_FTRACE_SYSCALLS
+config HAVE_SYSCALL_TRACEPOINTS
bool
config TRACER_MAX_TRACE
@@ -60,9 +60,14 @@ config EVENT_TRACING
bool
config CONTEXT_SWITCH_TRACER
- select MARKERS
bool
+config RING_BUFFER_ALLOW_SWAP
+ bool
+ help
+ Allow the use of ring_buffer_swap_cpu.
+ Adds a very slight overhead to tracing when enabled.
+
# All tracer options should select GENERIC_TRACER. For those options that are
# enabled by all tracers (context switch and event tracer) they select TRACING.
# This allows those options to appear when no other tracer is selected. But the
@@ -147,6 +152,7 @@ config IRQSOFF_TRACER
select TRACE_IRQFLAGS
select GENERIC_TRACER
select TRACER_MAX_TRACE
+ select RING_BUFFER_ALLOW_SWAP
help
This option measures the time spent in irqs-off critical
sections, with microsecond accuracy.
@@ -168,6 +174,7 @@ config PREEMPT_TRACER
depends on PREEMPT
select GENERIC_TRACER
select TRACER_MAX_TRACE
+ select RING_BUFFER_ALLOW_SWAP
help
This option measures the time spent in preemption off critical
sections, with microsecond accuracy.
@@ -211,7 +218,7 @@ config ENABLE_DEFAULT_TRACERS
config FTRACE_SYSCALLS
bool "Trace syscalls"
- depends on HAVE_FTRACE_SYSCALLS
+ depends on HAVE_SYSCALL_TRACEPOINTS
select GENERIC_TRACER
select KALLSYMS
help
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7a34cb563fec..3eb159c277c8 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -65,13 +65,15 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
{
struct blk_io_trace *t;
struct ring_buffer_event *event = NULL;
+ struct ring_buffer *buffer = NULL;
int pc = 0;
int cpu = smp_processor_id();
bool blk_tracer = blk_tracer_enabled;
if (blk_tracer) {
+ buffer = blk_tr->buffer;
pc = preempt_count();
- event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
+ event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + len,
0, pc);
if (!event)
@@ -96,7 +98,7 @@ record_it:
memcpy((void *) t + sizeof(*t), data, len);
if (blk_tracer)
- trace_buffer_unlock_commit(blk_tr, event, 0, pc);
+ trace_buffer_unlock_commit(buffer, event, 0, pc);
}
}
@@ -179,6 +181,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
{
struct task_struct *tsk = current;
struct ring_buffer_event *event = NULL;
+ struct ring_buffer *buffer = NULL;
struct blk_io_trace *t;
unsigned long flags = 0;
unsigned long *sequence;
@@ -204,8 +207,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
if (blk_tracer) {
tracing_record_cmdline(current);
+ buffer = blk_tr->buffer;
pc = preempt_count();
- event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
+ event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + pdu_len,
0, pc);
if (!event)
@@ -252,7 +256,7 @@ record_it:
memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
if (blk_tracer) {
- trace_buffer_unlock_commit(blk_tr, event, 0, pc);
+ trace_buffer_unlock_commit(buffer, event, 0, pc);
return;
}
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 25edd5cc5935..8c804e24f96f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1016,71 +1016,35 @@ static int
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{
unsigned long ftrace_addr;
- unsigned long ip, fl;
+ unsigned long flag = 0UL;
ftrace_addr = (unsigned long)FTRACE_ADDR;
- ip = rec->ip;
-
/*
- * If this record is not to be traced and
- * it is not enabled then do nothing.
+ * If this record is not to be traced or we want to disable it,
+ * then disable it.
*
- * If this record is not to be traced and
- * it is enabled then disable it.
+ * If we want to enable it and filtering is off, then enable it.
*
+ * If we want to enable it and filtering is on, enable it only if
+ * it's filtered
*/
- if (rec->flags & FTRACE_FL_NOTRACE) {
- if (rec->flags & FTRACE_FL_ENABLED)
- rec->flags &= ~FTRACE_FL_ENABLED;
- else
- return 0;
-
- } else if (ftrace_filtered && enable) {
- /*
- * Filtering is on:
- */
-
- fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
-
- /* Record is filtered and enabled, do nothing */
- if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
- return 0;
-
- /* Record is not filtered or enabled, do nothing */
- if (!fl)
- return 0;
-
- /* Record is not filtered but enabled, disable it */
- if (fl == FTRACE_FL_ENABLED)
- rec->flags &= ~FTRACE_FL_ENABLED;
- else
- /* Otherwise record is filtered but not enabled, enable it */
- rec->flags |= FTRACE_FL_ENABLED;
- } else {
- /* Disable or not filtered */
-
- if (enable) {
- /* if record is enabled, do nothing */
- if (rec->flags & FTRACE_FL_ENABLED)
- return 0;
-
- rec->flags |= FTRACE_FL_ENABLED;
-
- } else {
+ if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
+ if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
+ flag = FTRACE_FL_ENABLED;
+ }
- /* if record is not enabled, do nothing */
- if (!(rec->flags & FTRACE_FL_ENABLED))
- return 0;
+ /* If the state of this record hasn't changed, then do nothing */
+ if ((rec->flags & FTRACE_FL_ENABLED) == flag)
+ return 0;
- rec->flags &= ~FTRACE_FL_ENABLED;
- }
+ if (flag) {
+ rec->flags |= FTRACE_FL_ENABLED;
+ return ftrace_make_call(rec, ftrace_addr);
}
- if (rec->flags & FTRACE_FL_ENABLED)
- return ftrace_make_call(rec, ftrace_addr);
- else
- return ftrace_make_nop(NULL, rec, ftrace_addr);
+ rec->flags &= ~FTRACE_FL_ENABLED;
+ return ftrace_make_nop(NULL, rec, ftrace_addr);
}
static void ftrace_replace_code(int enable)
@@ -1375,7 +1339,6 @@ struct ftrace_iterator {
unsigned flags;
unsigned char buffer[FTRACE_BUFF_MAX+1];
unsigned buffer_idx;
- unsigned filtered;
};
static void *
@@ -1438,18 +1401,13 @@ static int t_hash_show(struct seq_file *m, void *v)
{
struct ftrace_func_probe *rec;
struct hlist_node *hnd = v;
- char str[KSYM_SYMBOL_LEN];
rec = hlist_entry(hnd, struct ftrace_func_probe, node);
if (rec->ops->print)
return rec->ops->print(m, rec->ip, rec->ops, rec->data);
- kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
- seq_printf(m, "%s:", str);
-
- kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
- seq_printf(m, "%s", str);
+ seq_printf(m, "%pf:%pf", (void *)rec->ip, (void *)rec->ops->func);
if (rec->data)
seq_printf(m, ":%p", rec->data);
@@ -1547,7 +1505,6 @@ static int t_show(struct seq_file *m, void *v)
{
struct ftrace_iterator *iter = m->private;
struct dyn_ftrace *rec = v;
- char str[KSYM_SYMBOL_LEN];
if (iter->flags & FTRACE_ITER_HASH)
return t_hash_show(m, v);
@@ -1560,9 +1517,7 @@ static int t_show(struct seq_file *m, void *v)
if (!rec)
return 0;
- kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
-
- seq_printf(m, "%s\n", str);
+ seq_printf(m, "%pf\n", (void *)rec->ip);
return 0;
}
@@ -1601,17 +1556,6 @@ ftrace_avail_open(struct inode *inode, struct file *file)
return ret;
}
-int ftrace_avail_release(struct inode *inode, struct file *file)
-{
- struct seq_file *m = (struct seq_file *)file->private_data;
- struct ftrace_iterator *iter = m->private;
-
- seq_release(inode, file);
- kfree(iter);
-
- return 0;
-}
-
static int
ftrace_failures_open(struct inode *inode, struct file *file)
{
@@ -2317,7 +2261,6 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
}
if (isspace(ch)) {
- iter->filtered++;
iter->buffer[iter->buffer_idx] = 0;
ret = ftrace_process_regex(iter->buffer,
iter->buffer_idx, enable);
@@ -2448,7 +2391,6 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
iter = file->private_data;
if (iter->buffer_idx) {
- iter->filtered++;
iter->buffer[iter->buffer_idx] = 0;
ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
}
@@ -2479,14 +2421,14 @@ static const struct file_operations ftrace_avail_fops = {
.open = ftrace_avail_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = ftrace_avail_release,
+ .release = seq_release_private,
};
static const struct file_operations ftrace_failures_fops = {
.open = ftrace_failures_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = ftrace_avail_release,
+ .release = seq_release_private,
};
static const struct file_operations ftrace_filter_fops = {
@@ -2548,7 +2490,6 @@ static void g_stop(struct seq_file *m, void *p)
static int g_show(struct seq_file *m, void *v)
{
unsigned long *ptr = v;
- char str[KSYM_SYMBOL_LEN];
if (!ptr)
return 0;
@@ -2558,9 +2499,7 @@ static int g_show(struct seq_file *m, void *v)
return 0;
}
- kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
-
- seq_printf(m, "%s\n", str);
+ seq_printf(m, "%pf\n", v);
return 0;
}
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 1edaa9516e81..81b1645c8549 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -183,11 +183,9 @@ static void kmemtrace_stop_probes(void)
static int kmem_trace_init(struct trace_array *tr)
{
- int cpu;
kmemtrace_array = tr;
- for_each_cpu(cpu, cpu_possible_mask)
- tracing_reset(tr, cpu);
+ tracing_reset_online_cpus(tr);
kmemtrace_start_probes();
@@ -239,12 +237,52 @@ struct kmemtrace_user_event_alloc {
};
static enum print_line_t
-kmemtrace_print_alloc_user(struct trace_iterator *iter,
- struct kmemtrace_alloc_entry *entry)
+kmemtrace_print_alloc(struct trace_iterator *iter, int flags)
{
- struct kmemtrace_user_event_alloc *ev_alloc;
struct trace_seq *s = &iter->seq;
+ struct kmemtrace_alloc_entry *entry;
+ int ret;
+
+ trace_assign_type(entry, iter->ent);
+
+ ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu "
+ "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
+ entry->type_id, (void *)entry->call_site, (unsigned long)entry->ptr,
+ (unsigned long)entry->bytes_req, (unsigned long)entry->bytes_alloc,
+ (unsigned long)entry->gfp_flags, entry->node);
+
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+kmemtrace_print_free(struct trace_iterator *iter, int flags)
+{
+ struct trace_seq *s = &iter->seq;
+ struct kmemtrace_free_entry *entry;
+ int ret;
+
+ trace_assign_type(entry, iter->ent);
+
+ ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu\n",
+ entry->type_id, (void *)entry->call_site,
+ (unsigned long)entry->ptr);
+
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags)
+{
+ struct trace_seq *s = &iter->seq;
+ struct kmemtrace_alloc_entry *entry;
struct kmemtrace_user_event *ev;
+ struct kmemtrace_user_event_alloc *ev_alloc;
+
+ trace_assign_type(entry, iter->ent);
ev = trace_seq_reserve(s, sizeof(*ev));
if (!ev)
@@ -271,12 +309,14 @@ kmemtrace_print_alloc_user(struct trace_iterator *iter,
}
static enum print_line_t
-kmemtrace_print_free_user(struct trace_iterator *iter,
- struct kmemtrace_free_entry *entry)
+kmemtrace_print_free_user(struct trace_iterator *iter, int flags)
{
struct trace_seq *s = &iter->seq;
+ struct kmemtrace_free_entry *entry;
struct kmemtrace_user_event *ev;
+ trace_assign_type(entry, iter->ent);
+
ev = trace_seq_reserve(s, sizeof(*ev));
if (!ev)
return TRACE_TYPE_PARTIAL_LINE;
@@ -294,12 +334,14 @@ kmemtrace_print_free_user(struct trace_iterator *iter,
/* The two other following provide a more minimalistic output */
static enum print_line_t
-kmemtrace_print_alloc_compress(struct trace_iterator *iter,
- struct kmemtrace_alloc_entry *entry)
+kmemtrace_print_alloc_compress(struct trace_iterator *iter)
{
+ struct kmemtrace_alloc_entry *entry;
struct trace_seq *s = &iter->seq;
int ret;
+ trace_assign_type(entry, iter->ent);
+
/* Alloc entry */
ret = trace_seq_printf(s, " + ");
if (!ret)
@@ -345,29 +387,24 @@ kmemtrace_print_alloc_compress(struct trace_iterator *iter,
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- /* Node */
- ret = trace_seq_printf(s, "%4d ", entry->node);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- /* Call site */
- ret = seq_print_ip_sym(s, entry->call_site, 0);
+ /* Node and call site*/
+ ret = trace_seq_printf(s, "%4d %pf\n", entry->node,
+ (void *)entry->call_site);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- if (!trace_seq_printf(s, "\n"))
- return TRACE_TYPE_PARTIAL_LINE;
-
return TRACE_TYPE_HANDLED;
}
static enum print_line_t
-kmemtrace_print_free_compress(struct trace_iterator *iter,
- struct kmemtrace_free_entry *entry)
+kmemtrace_print_free_compress(struct trace_iterator *iter)
{
+ struct kmemtrace_free_entry *entry;
struct trace_seq *s = &iter->seq;
int ret;
+ trace_assign_type(entry, iter->ent);
+
/* Free entry */
ret = trace_seq_printf(s, " - ");
if (!ret)
@@ -401,19 +438,11 @@ kmemtrace_print_free_compress(struct trace_iterator *iter,
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- /* Skip node */
- ret = trace_seq_printf(s, " ");
+ /* Skip node and print call site*/
+ ret = trace_seq_printf(s, " %pf\n", (void *)entry->call_site);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- /* Call site */
- ret = seq_print_ip_sym(s, entry->call_site, 0);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- if (!trace_seq_printf(s, "\n"))
- return TRACE_TYPE_PARTIAL_LINE;
-
return TRACE_TYPE_HANDLED;
}
@@ -421,32 +450,31 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
{
struct trace_entry *entry = iter->ent;
- switch (entry->type) {
- case TRACE_KMEM_ALLOC: {
- struct kmemtrace_alloc_entry *field;
-
- trace_assign_type(field, entry);
- if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
- return kmemtrace_print_alloc_compress(iter, field);
- else
- return kmemtrace_print_alloc_user(iter, field);
- }
-
- case TRACE_KMEM_FREE: {
- struct kmemtrace_free_entry *field;
-
- trace_assign_type(field, entry);
- if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
- return kmemtrace_print_free_compress(iter, field);
- else
- return kmemtrace_print_free_user(iter, field);
- }
+ if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
+ return TRACE_TYPE_UNHANDLED;
+ switch (entry->type) {
+ case TRACE_KMEM_ALLOC:
+ return kmemtrace_print_alloc_compress(iter);
+ case TRACE_KMEM_FREE:
+ return kmemtrace_print_free_compress(iter);
default:
return TRACE_TYPE_UNHANDLED;
}
}
+static struct trace_event kmem_trace_alloc = {
+ .type = TRACE_KMEM_ALLOC,
+ .trace = kmemtrace_print_alloc,
+ .binary = kmemtrace_print_alloc_user,
+};
+
+static struct trace_event kmem_trace_free = {
+ .type = TRACE_KMEM_FREE,
+ .trace = kmemtrace_print_free,
+ .binary = kmemtrace_print_free_user,
+};
+
static struct tracer kmem_tracer __read_mostly = {
.name = "kmemtrace",
.init = kmem_trace_init,
@@ -463,6 +491,21 @@ void kmemtrace_init(void)
static int __init init_kmem_tracer(void)
{
- return register_tracer(&kmem_tracer);
+ if (!register_ftrace_event(&kmem_trace_alloc)) {
+ pr_warning("Warning: could not register kmem events\n");
+ return 1;
+ }
+
+ if (!register_ftrace_event(&kmem_trace_free)) {
+ pr_warning("Warning: could not register kmem events\n");
+ return 1;
+ }
+
+ if (!register_tracer(&kmem_tracer)) {
+ pr_warning("Warning: could not register the kmem tracer\n");
+ return 1;
+ }
+
+ return 0;
}
device_initcall(init_kmem_tracer);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a330513d96ce..454e74e718cf 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -218,17 +218,12 @@ enum {
static inline int rb_null_event(struct ring_buffer_event *event)
{
- return event->type_len == RINGBUF_TYPE_PADDING
- && event->time_delta == 0;
-}
-
-static inline int rb_discarded_event(struct ring_buffer_event *event)
-{
- return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
+ return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
}
static void rb_event_set_padding(struct ring_buffer_event *event)
{
+ /* padding has a NULL time_delta */
event->type_len = RINGBUF_TYPE_PADDING;
event->time_delta = 0;
}
@@ -322,6 +317,14 @@ struct buffer_data_page {
unsigned char data[]; /* data of buffer page */
};
+/*
+ * Note, the buffer_page list must be first. The buffer pages
+ * are allocated in cache lines, which means that each buffer
+ * page will be at the beginning of a cache line, and thus
+ * the least significant bits will be zero. We use this to
+ * add flags in the list struct pointers, to make the ring buffer
+ * lockless.
+ */
struct buffer_page {
struct list_head list; /* list of buffer pages */
local_t write; /* index for next write */
@@ -330,6 +333,21 @@ struct buffer_page {
struct buffer_data_page *page; /* Actual data page */
};
+/*
+ * The buffer page counters, write and entries, must be reset
+ * atomically when crossing page boundaries. To synchronize this
+ * update, two counters are inserted into the number. One is
+ * the actual counter for the write position or count on the page.
+ *
+ * The other is a counter of updaters. Before an update happens
+ * the update partition of the counter is incremented. This will
+ * allow the updater to update the counter atomically.
+ *
+ * The counter is 20 bits, and the state data is 12.
+ */
+#define RB_WRITE_MASK 0xfffff
+#define RB_WRITE_INTCNT (1 << 20)
+
static void rb_init_page(struct buffer_data_page *bpage)
{
local_set(&bpage->commit, 0);
@@ -403,21 +421,20 @@ int ring_buffer_print_page_header(struct trace_seq *s)
struct ring_buffer_per_cpu {
int cpu;
struct ring_buffer *buffer;
- spinlock_t reader_lock; /* serialize readers */
+ spinlock_t reader_lock; /* serialize readers */
raw_spinlock_t lock;
struct lock_class_key lock_key;
- struct list_head pages;
+ struct list_head *pages;
struct buffer_page *head_page; /* read from head */
struct buffer_page *tail_page; /* write to tail */
struct buffer_page *commit_page; /* committed pages */
struct buffer_page *reader_page;
- unsigned long nmi_dropped;
- unsigned long commit_overrun;
- unsigned long overrun;
- unsigned long read;
+ local_t commit_overrun;
+ local_t overrun;
local_t entries;
local_t committing;
local_t commits;
+ unsigned long read;
u64 write_stamp;
u64 read_stamp;
atomic_t record_disabled;
@@ -450,14 +467,19 @@ struct ring_buffer_iter {
};
/* buffer may be either ring_buffer or ring_buffer_per_cpu */
-#define RB_WARN_ON(buffer, cond) \
- ({ \
- int _____ret = unlikely(cond); \
- if (_____ret) { \
- atomic_inc(&buffer->record_disabled); \
- WARN_ON(1); \
- } \
- _____ret; \
+#define RB_WARN_ON(b, cond) \
+ ({ \
+ int _____ret = unlikely(cond); \
+ if (_____ret) { \
+ if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
+ struct ring_buffer_per_cpu *__b = \
+ (void *)b; \
+ atomic_inc(&__b->buffer->record_disabled); \
+ } else \
+ atomic_inc(&b->record_disabled); \
+ WARN_ON(1); \
+ } \
+ _____ret; \
})
/* Up this if you want to test the TIME_EXTENTS and normalization */
@@ -489,6 +511,390 @@ void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
+/*
+ * Making the ring buffer lockless makes things tricky.
+ * Although writes only happen on the CPU that they are on,
+ * and they only need to worry about interrupts. Reads can
+ * happen on any CPU.
+ *
+ * The reader page is always off the ring buffer, but when the
+ * reader finishes with a page, it needs to swap its page with
+ * a new one from the buffer. The reader needs to take from
+ * the head (writes go to the tail). But if a writer is in overwrite
+ * mode and wraps, it must push the head page forward.
+ *
+ * Here lies the problem.
+ *
+ * The reader must be careful to replace only the head page, and
+ * not another one. As described at the top of the file in the
+ * ASCII art, the reader sets its old page to point to the next
+ * page after head. It then sets the page after head to point to
+ * the old reader page. But if the writer moves the head page
+ * during this operation, the reader could end up with the tail.
+ *
+ * We use cmpxchg to help prevent this race. We also do something
+ * special with the page before head. We set the LSB to 1.
+ *
+ * When the writer must push the page forward, it will clear the
+ * bit that points to the head page, move the head, and then set
+ * the bit that points to the new head page.
+ *
+ * We also don't want an interrupt coming in and moving the head
+ * page on another writer. Thus we use the second LSB to catch
+ * that too. Thus:
+ *
+ * head->list->prev->next bit 1 bit 0
+ * ------- -------
+ * Normal page 0 0
+ * Points to head page 0 1
+ * New head page 1 0
+ *
+ * Note we can not trust the prev pointer of the head page, because:
+ *
+ * +----+ +-----+ +-----+
+ * | |------>| T |---X--->| N |
+ * | |<------| | | |
+ * +----+ +-----+ +-----+
+ * ^ ^ |
+ * | +-----+ | |
+ * +----------| R |----------+ |
+ * | |<-----------+
+ * +-----+
+ *
+ * Key: ---X--> HEAD flag set in pointer
+ * T Tail page
+ * R Reader page
+ * N Next page
+ *
+ * (see __rb_reserve_next() to see where this happens)
+ *
+ * What the above shows is that the reader just swapped out
+ * the reader page with a page in the buffer, but before it
+ * could make the new header point back to the new page added
+ * it was preempted by a writer. The writer moved forward onto
+ * the new page added by the reader and is about to move forward
+ * again.
+ *
+ * You can see, it is legitimate for the previous pointer of
+ * the head (or any page) not to point back to itself. But only
+ * temporarially.
+ */
+
+#define RB_PAGE_NORMAL 0UL
+#define RB_PAGE_HEAD 1UL
+#define RB_PAGE_UPDATE 2UL
+
+
+#define RB_FLAG_MASK 3UL
+
+/* PAGE_MOVED is not part of the mask */
+#define RB_PAGE_MOVED 4UL
+
+/*
+ * rb_list_head - remove any bit
+ */
+static struct list_head *rb_list_head(struct list_head *list)
+{
+ unsigned long val = (unsigned long)list;
+
+ return (struct list_head *)(val & ~RB_FLAG_MASK);
+}
+
+/*
+ * rb_is_head_page - test if the give page is the head page
+ *
+ * Because the reader may move the head_page pointer, we can
+ * not trust what the head page is (it may be pointing to
+ * the reader page). But if the next page is a header page,
+ * its flags will be non zero.
+ */
+static int inline
+rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *page, struct list_head *list)
+{
+ unsigned long val;
+
+ val = (unsigned long)list->next;
+
+ if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
+ return RB_PAGE_MOVED;
+
+ return val & RB_FLAG_MASK;
+}
+
+/*
+ * rb_is_reader_page
+ *
+ * The unique thing about the reader page, is that, if the
+ * writer is ever on it, the previous pointer never points
+ * back to the reader page.
+ */
+static int rb_is_reader_page(struct buffer_page *page)
+{
+ struct list_head *list = page->list.prev;
+
+ return rb_list_head(list->next) != &page->list;
+}
+
+/*
+ * rb_set_list_to_head - set a list_head to be pointing to head.
+ */
+static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
+ struct list_head *list)
+{
+ unsigned long *ptr;
+
+ ptr = (unsigned long *)&list->next;
+ *ptr |= RB_PAGE_HEAD;
+ *ptr &= ~RB_PAGE_UPDATE;
+}
+
+/*
+ * rb_head_page_activate - sets up head page
+ */
+static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ struct buffer_page *head;
+
+ head = cpu_buffer->head_page;
+ if (!head)
+ return;
+
+ /*
+ * Set the previous list pointer to have the HEAD flag.
+ */
+ rb_set_list_to_head(cpu_buffer, head->list.prev);
+}
+
+static void rb_list_head_clear(struct list_head *list)
+{
+ unsigned long *ptr = (unsigned long *)&list->next;
+
+ *ptr &= ~RB_FLAG_MASK;
+}
+
+/*
+ * rb_head_page_dactivate - clears head page ptr (for free list)
+ */
+static void
+rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ struct list_head *hd;
+
+ /* Go through the whole list and clear any pointers found. */
+ rb_list_head_clear(cpu_buffer->pages);
+
+ list_for_each(hd, cpu_buffer->pages)
+ rb_list_head_clear(hd);
+}
+
+static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *head,
+ struct buffer_page *prev,
+ int old_flag, int new_flag)
+{
+ struct list_head *list;
+ unsigned long val = (unsigned long)&head->list;
+ unsigned long ret;
+
+ list = &prev->list;
+
+ val &= ~RB_FLAG_MASK;
+
+ ret = (unsigned long)cmpxchg(&list->next,
+ val | old_flag, val | new_flag);
+
+ /* check if the reader took the page */
+ if ((ret & ~RB_FLAG_MASK) != val)
+ return RB_PAGE_MOVED;
+
+ return ret & RB_FLAG_MASK;
+}
+
+static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *head,
+ struct buffer_page *prev,
+ int old_flag)
+{
+ return rb_head_page_set(cpu_buffer, head, prev,
+ old_flag, RB_PAGE_UPDATE);
+}
+
+static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *head,
+ struct buffer_page *prev,
+ int old_flag)
+{
+ return rb_head_page_set(cpu_buffer, head, prev,
+ old_flag, RB_PAGE_HEAD);
+}
+
+static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *head,
+ struct buffer_page *prev,
+ int old_flag)
+{
+ return rb_head_page_set(cpu_buffer, head, prev,
+ old_flag, RB_PAGE_NORMAL);
+}
+
+static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page **bpage)
+{
+ struct list_head *p = rb_list_head((*bpage)->list.next);
+
+ *bpage = list_entry(p, struct buffer_page, list);
+}
+
+static struct buffer_page *
+rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ struct buffer_page *head;
+ struct buffer_page *page;
+ struct list_head *list;
+ int i;
+
+ if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
+ return NULL;
+
+ /* sanity check */
+ list = cpu_buffer->pages;
+ if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
+ return NULL;
+
+ page = head = cpu_buffer->head_page;
+ /*
+ * It is possible that the writer moves the header behind
+ * where we started, and we miss in one loop.
+ * A second loop should grab the header, but we'll do
+ * three loops just because I'm paranoid.
+ */
+ for (i = 0; i < 3; i++) {
+ do {
+ if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
+ cpu_buffer->head_page = page;
+ return page;
+ }
+ rb_inc_page(cpu_buffer, &page);
+ } while (page != head);
+ }
+
+ RB_WARN_ON(cpu_buffer, 1);
+
+ return NULL;
+}
+
+static int rb_head_page_replace(struct buffer_page *old,
+ struct buffer_page *new)
+{
+ unsigned long *ptr = (unsigned long *)&old->list.prev->next;
+ unsigned long val;
+ unsigned long ret;
+
+ val = *ptr & ~RB_FLAG_MASK;
+ val |= RB_PAGE_HEAD;
+
+ ret = cmpxchg(ptr, val, &new->list);
+
+ return ret == val;
+}
+
+/*
+ * rb_tail_page_update - move the tail page forward
+ *
+ * Returns 1 if moved tail page, 0 if someone else did.
+ */
+static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *tail_page,
+ struct buffer_page *next_page)
+{
+ struct buffer_page *old_tail;
+ unsigned long old_entries;
+ unsigned long old_write;
+ int ret = 0;
+
+ /*
+ * The tail page now needs to be moved forward.
+ *
+ * We need to reset the tail page, but without messing
+ * with possible erasing of data brought in by interrupts
+ * that have moved the tail page and are currently on it.
+ *
+ * We add a counter to the write field to denote this.
+ */
+ old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
+ old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
+
+ /*
+ * Just make sure we have seen our old_write and synchronize
+ * with any interrupts that come in.
+ */
+ barrier();
+
+ /*
+ * If the tail page is still the same as what we think
+ * it is, then it is up to us to update the tail
+ * pointer.
+ */
+ if (tail_page == cpu_buffer->tail_page) {
+ /* Zero the write counter */
+ unsigned long val = old_write & ~RB_WRITE_MASK;
+ unsigned long eval = old_entries & ~RB_WRITE_MASK;
+
+ /*
+ * This will only succeed if an interrupt did
+ * not come in and change it. In which case, we
+ * do not want to modify it.
+ *
+ * We add (void) to let the compiler know that we do not care
+ * about the return value of these functions. We use the
+ * cmpxchg to only update if an interrupt did not already
+ * do it for us. If the cmpxchg fails, we don't care.
+ */
+ (void)local_cmpxchg(&next_page->write, old_write, val);
+ (void)local_cmpxchg(&next_page->entries, old_entries, eval);
+
+ /*
+ * No need to worry about races with clearing out the commit.
+ * it only can increment when a commit takes place. But that
+ * only happens in the outer most nested commit.
+ */
+ local_set(&next_page->page->commit, 0);
+
+ old_tail = cmpxchg(&cpu_buffer->tail_page,
+ tail_page, next_page);
+
+ if (old_tail == tail_page)
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *bpage)
+{
+ unsigned long val = (unsigned long)bpage;
+
+ if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * rb_check_list - make sure a pointer to a list has the last bits zero
+ */
+static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
+ struct list_head *list)
+{
+ if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
+ return 1;
+ if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
+ return 1;
+ return 0;
+}
+
/**
* check_pages - integrity check of buffer pages
* @cpu_buffer: CPU buffer with pages to test
@@ -498,14 +904,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
*/
static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
- struct list_head *head = &cpu_buffer->pages;
+ struct list_head *head = cpu_buffer->pages;
struct buffer_page *bpage, *tmp;
+ rb_head_page_deactivate(cpu_buffer);
+
if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
return -1;
if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
return -1;
+ if (rb_check_list(cpu_buffer, head))
+ return -1;
+
list_for_each_entry_safe(bpage, tmp, head, list) {
if (RB_WARN_ON(cpu_buffer,
bpage->list.next->prev != &bpage->list))
@@ -513,25 +924,33 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
if (RB_WARN_ON(cpu_buffer,
bpage->list.prev->next != &bpage->list))
return -1;
+ if (rb_check_list(cpu_buffer, &bpage->list))
+ return -1;
}
+ rb_head_page_activate(cpu_buffer);
+
return 0;
}
static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
unsigned nr_pages)
{
- struct list_head *head = &cpu_buffer->pages;
struct buffer_page *bpage, *tmp;
unsigned long addr;
LIST_HEAD(pages);
unsigned i;
+ WARN_ON(!nr_pages);
+
for (i = 0; i < nr_pages; i++) {
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
if (!bpage)
goto free_pages;
+
+ rb_check_bpage(cpu_buffer, bpage);
+
list_add(&bpage->list, &pages);
addr = __get_free_page(GFP_KERNEL);
@@ -541,7 +960,13 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
rb_init_page(bpage->page);
}
- list_splice(&pages, head);
+ /*
+ * The ring buffer page list is a circular list that does not
+ * start and end with a list head. All page list items point to
+ * other pages.
+ */
+ cpu_buffer->pages = pages.next;
+ list_del(&pages);
rb_check_pages(cpu_buffer);
@@ -573,13 +998,14 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
- INIT_LIST_HEAD(&cpu_buffer->pages);
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
if (!bpage)
goto fail_free_buffer;
+ rb_check_bpage(cpu_buffer, bpage);
+
cpu_buffer->reader_page = bpage;
addr = __get_free_page(GFP_KERNEL);
if (!addr)
@@ -594,9 +1020,11 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
goto fail_free_reader;
cpu_buffer->head_page
- = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
+ = list_entry(cpu_buffer->pages, struct buffer_page, list);
cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
+ rb_head_page_activate(cpu_buffer);
+
return cpu_buffer;
fail_free_reader:
@@ -609,15 +1037,22 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
{
- struct list_head *head = &cpu_buffer->pages;
+ struct list_head *head = cpu_buffer->pages;
struct buffer_page *bpage, *tmp;
free_buffer_page(cpu_buffer->reader_page);
- list_for_each_entry_safe(bpage, tmp, head, list) {
- list_del_init(&bpage->list);
+ rb_head_page_deactivate(cpu_buffer);
+
+ if (head) {
+ list_for_each_entry_safe(bpage, tmp, head, list) {
+ list_del_init(&bpage->list);
+ free_buffer_page(bpage);
+ }
+ bpage = list_entry(head, struct buffer_page, list);
free_buffer_page(bpage);
}
+
kfree(cpu_buffer);
}
@@ -760,15 +1195,17 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched();
+ rb_head_page_deactivate(cpu_buffer);
+
for (i = 0; i < nr_pages; i++) {
- if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
+ if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
return;
- p = cpu_buffer->pages.next;
+ p = cpu_buffer->pages->next;
bpage = list_entry(p, struct buffer_page, list);
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
- if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
+ if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
return;
rb_reset_cpu(cpu_buffer);
@@ -790,15 +1227,19 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched();
+ spin_lock_irq(&cpu_buffer->reader_lock);
+ rb_head_page_deactivate(cpu_buffer);
+
for (i = 0; i < nr_pages; i++) {
if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
return;
p = pages->next;
bpage = list_entry(p, struct buffer_page, list);
list_del_init(&bpage->list);
- list_add_tail(&bpage->list, &cpu_buffer->pages);
+ list_add_tail(&bpage->list, cpu_buffer->pages);
}
rb_reset_cpu(cpu_buffer);
+ spin_unlock_irq(&cpu_buffer->reader_lock);
rb_check_pages(cpu_buffer);
@@ -949,21 +1390,14 @@ rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
}
static inline struct ring_buffer_event *
-rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
-{
- return __rb_page_index(cpu_buffer->head_page,
- cpu_buffer->head_page->read);
-}
-
-static inline struct ring_buffer_event *
rb_iter_head_event(struct ring_buffer_iter *iter)
{
return __rb_page_index(iter->head_page, iter->head);
}
-static inline unsigned rb_page_write(struct buffer_page *bpage)
+static inline unsigned long rb_page_write(struct buffer_page *bpage)
{
- return local_read(&bpage->write);
+ return local_read(&bpage->write) & RB_WRITE_MASK;
}
static inline unsigned rb_page_commit(struct buffer_page *bpage)
@@ -971,6 +1405,11 @@ static inline unsigned rb_page_commit(struct buffer_page *bpage)
return local_read(&bpage->page->commit);
}
+static inline unsigned long rb_page_entries(struct buffer_page *bpage)
+{
+ return local_read(&bpage->entries) & RB_WRITE_MASK;
+}
+
/* Size is determined by what has been commited */
static inline unsigned rb_page_size(struct buffer_page *bpage)
{
@@ -983,22 +1422,6 @@ rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
return rb_page_commit(cpu_buffer->commit_page);
}
-static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
-{
- return rb_page_commit(cpu_buffer->head_page);
-}
-
-static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
- struct buffer_page **bpage)
-{
- struct list_head *p = (*bpage)->list.next;
-
- if (p == &cpu_buffer->pages)
- p = p->next;
-
- *bpage = list_entry(p, struct buffer_page, list);
-}
-
static inline unsigned
rb_event_index(struct ring_buffer_event *event)
{
@@ -1024,6 +1447,8 @@ rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
static void
rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
{
+ unsigned long max_count;
+
/*
* We only race with interrupts and NMIs on this CPU.
* If we own the commit event, then we can commit
@@ -1033,9 +1458,16 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
* assign the commit to the tail.
*/
again:
+ max_count = cpu_buffer->buffer->pages * 100;
+
while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
- cpu_buffer->commit_page->page->commit =
- cpu_buffer->commit_page->write;
+ if (RB_WARN_ON(cpu_buffer, !(--max_count)))
+ return;
+ if (RB_WARN_ON(cpu_buffer,
+ rb_is_reader_page(cpu_buffer->tail_page)))
+ return;
+ local_set(&cpu_buffer->commit_page->page->commit,
+ rb_page_write(cpu_buffer->commit_page));
rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
cpu_buffer->write_stamp =
cpu_buffer->commit_page->page->time_stamp;
@@ -1044,8 +1476,12 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
}
while (rb_commit_index(cpu_buffer) !=
rb_page_write(cpu_buffer->commit_page)) {
- cpu_buffer->commit_page->page->commit =
- cpu_buffer->commit_page->write;
+
+ local_set(&cpu_buffer->commit_page->page->commit,
+ rb_page_write(cpu_buffer->commit_page));
+ RB_WARN_ON(cpu_buffer,
+ local_read(&cpu_buffer->commit_page->page->commit) &
+ ~RB_WRITE_MASK);
barrier();
}
@@ -1078,7 +1514,7 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
* to the head page instead of next.
*/
if (iter->head_page == cpu_buffer->reader_page)
- iter->head_page = cpu_buffer->head_page;
+ iter->head_page = rb_set_head_page(cpu_buffer);
else
rb_inc_page(cpu_buffer, &iter->head_page);
@@ -1122,6 +1558,163 @@ rb_update_event(struct ring_buffer_event *event,
}
}
+/*
+ * rb_handle_head_page - writer hit the head page
+ *
+ * Returns: +1 to retry page
+ * 0 to continue
+ * -1 on error
+ */
+static int
+rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *tail_page,
+ struct buffer_page *next_page)
+{
+ struct buffer_page *new_head;
+ int entries;
+ int type;
+ int ret;
+
+ entries = rb_page_entries(next_page);
+
+ /*
+ * The hard part is here. We need to move the head
+ * forward, and protect against both readers on
+ * other CPUs and writers coming in via interrupts.
+ */
+ type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
+ RB_PAGE_HEAD);
+
+ /*
+ * type can be one of four:
+ * NORMAL - an interrupt already moved it for us
+ * HEAD - we are the first to get here.
+ * UPDATE - we are the interrupt interrupting
+ * a current move.
+ * MOVED - a reader on another CPU moved the next
+ * pointer to its reader page. Give up
+ * and try again.
+ */
+
+ switch (type) {
+ case RB_PAGE_HEAD:
+ /*
+ * We changed the head to UPDATE, thus
+ * it is our responsibility to update
+ * the counters.
+ */
+ local_add(entries, &cpu_buffer->overrun);
+
+ /*
+ * The entries will be zeroed out when we move the
+ * tail page.
+ */
+
+ /* still more to do */
+ break;
+
+ case RB_PAGE_UPDATE:
+ /*
+ * This is an interrupt that interrupt the
+ * previous update. Still more to do.
+ */
+ break;
+ case RB_PAGE_NORMAL:
+ /*
+ * An interrupt came in before the update
+ * and processed this for us.
+ * Nothing left to do.
+ */
+ return 1;
+ case RB_PAGE_MOVED:
+ /*
+ * The reader is on another CPU and just did
+ * a swap with our next_page.
+ * Try again.
+ */
+ return 1;
+ default:
+ RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
+ return -1;
+ }
+
+ /*
+ * Now that we are here, the old head pointer is
+ * set to UPDATE. This will keep the reader from
+ * swapping the head page with the reader page.
+ * The reader (on another CPU) will spin till
+ * we are finished.
+ *
+ * We just need to protect against interrupts
+ * doing the job. We will set the next pointer
+ * to HEAD. After that, we set the old pointer
+ * to NORMAL, but only if it was HEAD before.
+ * otherwise we are an interrupt, and only
+ * want the outer most commit to reset it.
+ */
+ new_head = next_page;
+ rb_inc_page(cpu_buffer, &new_head);
+
+ ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
+ RB_PAGE_NORMAL);
+
+ /*
+ * Valid returns are:
+ * HEAD - an interrupt came in and already set it.
+ * NORMAL - One of two things:
+ * 1) We really set it.
+ * 2) A bunch of interrupts came in and moved
+ * the page forward again.
+ */
+ switch (ret) {
+ case RB_PAGE_HEAD:
+ case RB_PAGE_NORMAL:
+ /* OK */
+ break;
+ default:
+ RB_WARN_ON(cpu_buffer, 1);
+ return -1;
+ }
+
+ /*
+ * It is possible that an interrupt came in,
+ * set the head up, then more interrupts came in
+ * and moved it again. When we get back here,
+ * the page would have been set to NORMAL but we
+ * just set it back to HEAD.
+ *
+ * How do you detect this? Well, if that happened
+ * the tail page would have moved.
+ */
+ if (ret == RB_PAGE_NORMAL) {
+ /*
+ * If the tail had moved passed next, then we need
+ * to reset the pointer.
+ */
+ if (cpu_buffer->tail_page != tail_page &&
+ cpu_buffer->tail_page != next_page)
+ rb_head_page_set_normal(cpu_buffer, new_head,
+ next_page,
+ RB_PAGE_HEAD);
+ }
+
+ /*
+ * If this was the outer most commit (the one that
+ * changed the original pointer from HEAD to UPDATE),
+ * then it is up to us to reset it to NORMAL.
+ */
+ if (type == RB_PAGE_HEAD) {
+ ret = rb_head_page_set_normal(cpu_buffer, next_page,
+ tail_page,
+ RB_PAGE_UPDATE);
+ if (RB_WARN_ON(cpu_buffer,
+ ret != RB_PAGE_UPDATE))
+ return -1;
+ }
+
+ return 0;
+}
+
static unsigned rb_calculate_event_length(unsigned length)
{
struct ring_buffer_event event; /* Used only for sizeof array */
@@ -1185,9 +1778,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
event->type_len = RINGBUF_TYPE_PADDING;
/* time delta must be non zero */
event->time_delta = 1;
- /* Account for this as an entry */
- local_inc(&tail_page->entries);
- local_inc(&cpu_buffer->entries);
/* Set write to end of buffer */
length = (tail + length) - BUF_PAGE_SIZE;
@@ -1200,96 +1790,93 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *commit_page,
struct buffer_page *tail_page, u64 *ts)
{
- struct buffer_page *next_page, *head_page, *reader_page;
struct ring_buffer *buffer = cpu_buffer->buffer;
- bool lock_taken = false;
- unsigned long flags;
+ struct buffer_page *next_page;
+ int ret;
next_page = tail_page;
- local_irq_save(flags);
- /*
- * Since the write to the buffer is still not
- * fully lockless, we must be careful with NMIs.
- * The locks in the writers are taken when a write
- * crosses to a new page. The locks protect against
- * races with the readers (this will soon be fixed
- * with a lockless solution).
- *
- * Because we can not protect against NMIs, and we
- * want to keep traces reentrant, we need to manage
- * what happens when we are in an NMI.
- *
- * NMIs can happen after we take the lock.
- * If we are in an NMI, only take the lock
- * if it is not already taken. Otherwise
- * simply fail.
- */
- if (unlikely(in_nmi())) {
- if (!__raw_spin_trylock(&cpu_buffer->lock)) {
- cpu_buffer->nmi_dropped++;
- goto out_reset;
- }
- } else
- __raw_spin_lock(&cpu_buffer->lock);
-
- lock_taken = true;
-
rb_inc_page(cpu_buffer, &next_page);
- head_page = cpu_buffer->head_page;
- reader_page = cpu_buffer->reader_page;
-
- /* we grabbed the lock before incrementing */
- if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
- goto out_reset;
-
/*
* If for some reason, we had an interrupt storm that made
* it all the way around the buffer, bail, and warn
* about it.
*/
if (unlikely(next_page == commit_page)) {
- cpu_buffer->commit_overrun++;
+ local_inc(&cpu_buffer->commit_overrun);
goto out_reset;
}
- if (next_page == head_page) {
- if (!(buffer->flags & RB_FL_OVERWRITE))
- goto out_reset;
-
- /* tail_page has not moved yet? */
- if (tail_page == cpu_buffer->tail_page) {
- /* count overflows */
- cpu_buffer->overrun +=
- local_read(&head_page->entries);
+ /*
+ * This is where the fun begins!
+ *
+ * We are fighting against races between a reader that
+ * could be on another CPU trying to swap its reader
+ * page with the buffer head.
+ *
+ * We are also fighting against interrupts coming in and
+ * moving the head or tail on us as well.
+ *
+ * If the next page is the head page then we have filled
+ * the buffer, unless the commit page is still on the
+ * reader page.
+ */
+ if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
- rb_inc_page(cpu_buffer, &head_page);
- cpu_buffer->head_page = head_page;
- cpu_buffer->head_page->read = 0;
+ /*
+ * If the commit is not on the reader page, then
+ * move the header page.
+ */
+ if (!rb_is_reader_page(cpu_buffer->commit_page)) {
+ /*
+ * If we are not in overwrite mode,
+ * this is easy, just stop here.
+ */
+ if (!(buffer->flags & RB_FL_OVERWRITE))
+ goto out_reset;
+
+ ret = rb_handle_head_page(cpu_buffer,
+ tail_page,
+ next_page);
+ if (ret < 0)
+ goto out_reset;
+ if (ret)
+ goto out_again;
+ } else {
+ /*
+ * We need to be careful here too. The
+ * commit page could still be on the reader
+ * page. We could have a small buffer, and
+ * have filled up the buffer with events
+ * from interrupts and such, and wrapped.
+ *
+ * Note, if the tail page is also the on the
+ * reader_page, we let it move out.
+ */
+ if (unlikely((cpu_buffer->commit_page !=
+ cpu_buffer->tail_page) &&
+ (cpu_buffer->commit_page ==
+ cpu_buffer->reader_page))) {
+ local_inc(&cpu_buffer->commit_overrun);
+ goto out_reset;
+ }
}
}
- /*
- * If the tail page is still the same as what we think
- * it is, then it is up to us to update the tail
- * pointer.
- */
- if (tail_page == cpu_buffer->tail_page) {
- local_set(&next_page->write, 0);
- local_set(&next_page->entries, 0);
- local_set(&next_page->page->commit, 0);
- cpu_buffer->tail_page = next_page;
-
- /* reread the time stamp */
+ ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
+ if (ret) {
+ /*
+ * Nested commits always have zero deltas, so
+ * just reread the time stamp
+ */
*ts = rb_time_stamp(buffer, cpu_buffer->cpu);
- cpu_buffer->tail_page->page->time_stamp = *ts;
+ next_page->page->time_stamp = *ts;
}
- rb_reset_tail(cpu_buffer, tail_page, tail, length);
+ out_again:
- __raw_spin_unlock(&cpu_buffer->lock);
- local_irq_restore(flags);
+ rb_reset_tail(cpu_buffer, tail_page, tail, length);
/* fail and let the caller try again */
return ERR_PTR(-EAGAIN);
@@ -1298,9 +1885,6 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
/* reset write */
rb_reset_tail(cpu_buffer, tail_page, tail, length);
- if (likely(lock_taken))
- __raw_spin_unlock(&cpu_buffer->lock);
- local_irq_restore(flags);
return NULL;
}
@@ -1317,6 +1901,9 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
barrier();
tail_page = cpu_buffer->tail_page;
write = local_add_return(length, &tail_page->write);
+
+ /* set write to only the index of the write */
+ write &= RB_WRITE_MASK;
tail = write - length;
/* See if we shot pass the end of this buffer page */
@@ -1361,12 +1948,16 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
bpage = cpu_buffer->tail_page;
if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
+ unsigned long write_mask =
+ local_read(&bpage->write) & ~RB_WRITE_MASK;
/*
* This is on the tail page. It is possible that
* a write could come in and move the tail page
* and write to the next page. That is fine
* because we just shorten what is on this page.
*/
+ old_index += write_mask;
+ new_index += write_mask;
index = local_cmpxchg(&bpage->write, old_index, new_index);
if (index == old_index)
return 1;
@@ -1482,7 +2073,8 @@ static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
}
static struct ring_buffer_event *
-rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
+rb_reserve_next_event(struct ring_buffer *buffer,
+ struct ring_buffer_per_cpu *cpu_buffer,
unsigned long length)
{
struct ring_buffer_event *event;
@@ -1492,6 +2084,21 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
rb_start_commit(cpu_buffer);
+#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
+ /*
+ * Due to the ability to swap a cpu buffer from a buffer
+ * it is possible it was swapped before we committed.
+ * (committing stops a swap). We check for it here and
+ * if it happened, we have to fail the write.
+ */
+ barrier();
+ if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
+ local_dec(&cpu_buffer->committing);
+ local_dec(&cpu_buffer->commits);
+ return NULL;
+ }
+#endif
+
length = rb_calculate_event_length(length);
again:
/*
@@ -1652,7 +2259,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
if (length > BUF_MAX_DATA_SIZE)
goto out;
- event = rb_reserve_next_event(cpu_buffer, length);
+ event = rb_reserve_next_event(buffer, cpu_buffer, length);
if (!event)
goto out;
@@ -1675,18 +2282,23 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
}
EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
-static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
+static void
+rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
- local_inc(&cpu_buffer->entries);
-
/*
* The event first in the commit queue updates the
* time stamp.
*/
if (rb_event_is_commit(cpu_buffer, event))
cpu_buffer->write_stamp += event->time_delta;
+}
+static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
+ struct ring_buffer_event *event)
+{
+ local_inc(&cpu_buffer->entries);
+ rb_update_write_stamp(cpu_buffer, event);
rb_end_commit(cpu_buffer);
}
@@ -1733,32 +2345,57 @@ static inline void rb_event_discard(struct ring_buffer_event *event)
event->time_delta = 1;
}
-/**
- * ring_buffer_event_discard - discard any event in the ring buffer
- * @event: the event to discard
- *
- * Sometimes a event that is in the ring buffer needs to be ignored.
- * This function lets the user discard an event in the ring buffer
- * and then that event will not be read later.
- *
- * Note, it is up to the user to be careful with this, and protect
- * against races. If the user discards an event that has been consumed
- * it is possible that it could corrupt the ring buffer.
+/*
+ * Decrement the entries to the page that an event is on.
+ * The event does not even need to exist, only the pointer
+ * to the page it is on. This may only be called before the commit
+ * takes place.
*/
-void ring_buffer_event_discard(struct ring_buffer_event *event)
+static inline void
+rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+ struct ring_buffer_event *event)
{
- rb_event_discard(event);
+ unsigned long addr = (unsigned long)event;
+ struct buffer_page *bpage = cpu_buffer->commit_page;
+ struct buffer_page *start;
+
+ addr &= PAGE_MASK;
+
+ /* Do the likely case first */
+ if (likely(bpage->page == (void *)addr)) {
+ local_dec(&bpage->entries);
+ return;
+ }
+
+ /*
+ * Because the commit page may be on the reader page we
+ * start with the next page and check the end loop there.
+ */
+ rb_inc_page(cpu_buffer, &bpage);
+ start = bpage;
+ do {
+ if (bpage->page == (void *)addr) {
+ local_dec(&bpage->entries);
+ return;
+ }
+ rb_inc_page(cpu_buffer, &bpage);
+ } while (bpage != start);
+
+ /* commit not part of this buffer?? */
+ RB_WARN_ON(cpu_buffer, 1);
}
-EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
/**
* ring_buffer_commit_discard - discard an event that has not been committed
* @buffer: the ring buffer
* @event: non committed event to discard
*
- * This is similar to ring_buffer_event_discard but must only be
- * performed on an event that has not been committed yet. The difference
- * is that this will also try to free the event from the ring buffer
+ * Sometimes an event that is in the ring buffer needs to be ignored.
+ * This function lets the user discard an event in the ring buffer
+ * and then that event will not be read later.
+ *
+ * This function only works if it is called before the the item has been
+ * committed. It will try to free the event from the ring buffer
* if another event has not been added behind it.
*
* If another event has been added behind it, it will set the event
@@ -1786,14 +2423,15 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
*/
RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
+ rb_decrement_entry(cpu_buffer, event);
if (rb_try_to_discard(cpu_buffer, event))
goto out;
/*
* The commit is still visible by the reader, so we
- * must increment entries.
+ * must still update the timestamp.
*/
- local_inc(&cpu_buffer->entries);
+ rb_update_write_stamp(cpu_buffer, event);
out:
rb_end_commit(cpu_buffer);
@@ -1854,7 +2492,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
if (length > BUF_MAX_DATA_SIZE)
goto out;
- event = rb_reserve_next_event(cpu_buffer, length);
+ event = rb_reserve_next_event(buffer, cpu_buffer, length);
if (!event)
goto out;
@@ -1875,9 +2513,13 @@ EXPORT_SYMBOL_GPL(ring_buffer_write);
static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
{
struct buffer_page *reader = cpu_buffer->reader_page;
- struct buffer_page *head = cpu_buffer->head_page;
+ struct buffer_page *head = rb_set_head_page(cpu_buffer);
struct buffer_page *commit = cpu_buffer->commit_page;
+ /* In case of error, head will be NULL */
+ if (unlikely(!head))
+ return 1;
+
return reader->read == rb_page_commit(reader) &&
(commit == reader ||
(commit == head &&
@@ -1968,7 +2610,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
- ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
+ ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
- cpu_buffer->read;
return ret;
@@ -1989,33 +2631,13 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
- ret = cpu_buffer->overrun;
+ ret = local_read(&cpu_buffer->overrun);
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
/**
- * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
- * @buffer: The ring buffer
- * @cpu: The per CPU buffer to get the number of overruns from
- */
-unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
-{
- struct ring_buffer_per_cpu *cpu_buffer;
- unsigned long ret;
-
- if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return 0;
-
- cpu_buffer = buffer->buffers[cpu];
- ret = cpu_buffer->nmi_dropped;
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
-
-/**
* ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
@@ -2030,7 +2652,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
- ret = cpu_buffer->commit_overrun;
+ ret = local_read(&cpu_buffer->commit_overrun);
return ret;
}
@@ -2053,7 +2675,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
entries += (local_read(&cpu_buffer->entries) -
- cpu_buffer->overrun) - cpu_buffer->read;
+ local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
}
return entries;
@@ -2076,7 +2698,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
/* if you care about this being correct, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
- overruns += cpu_buffer->overrun;
+ overruns += local_read(&cpu_buffer->overrun);
}
return overruns;
@@ -2089,8 +2711,10 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
/* Iterator usage is expected to have record disabled */
if (list_empty(&cpu_buffer->reader_page->list)) {
- iter->head_page = cpu_buffer->head_page;
- iter->head = cpu_buffer->head_page->read;
+ iter->head_page = rb_set_head_page(cpu_buffer);
+ if (unlikely(!iter->head_page))
+ return;
+ iter->head = iter->head_page->read;
} else {
iter->head_page = cpu_buffer->reader_page;
iter->head = cpu_buffer->reader_page->read;
@@ -2207,6 +2831,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
struct buffer_page *reader = NULL;
unsigned long flags;
int nr_loops = 0;
+ int ret;
local_irq_save(flags);
__raw_spin_lock(&cpu_buffer->lock);
@@ -2240,30 +2865,56 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
goto out;
/*
- * Splice the empty reader page into the list around the head.
* Reset the reader page to size zero.
*/
+ local_set(&cpu_buffer->reader_page->write, 0);
+ local_set(&cpu_buffer->reader_page->entries, 0);
+ local_set(&cpu_buffer->reader_page->page->commit, 0);
- reader = cpu_buffer->head_page;
+ spin:
+ /*
+ * Splice the empty reader page into the list around the head.
+ */
+ reader = rb_set_head_page(cpu_buffer);
cpu_buffer->reader_page->list.next = reader->list.next;
cpu_buffer->reader_page->list.prev = reader->list.prev;
- local_set(&cpu_buffer->reader_page->write, 0);
- local_set(&cpu_buffer->reader_page->entries, 0);
- local_set(&cpu_buffer->reader_page->page->commit, 0);
+ /*
+ * cpu_buffer->pages just needs to point to the buffer, it
+ * has no specific buffer page to point to. Lets move it out
+ * of our way so we don't accidently swap it.
+ */
+ cpu_buffer->pages = reader->list.prev;
- /* Make the reader page now replace the head */
- reader->list.prev->next = &cpu_buffer->reader_page->list;
- reader->list.next->prev = &cpu_buffer->reader_page->list;
+ /* The reader page will be pointing to the new head */
+ rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
/*
- * If the tail is on the reader, then we must set the head
- * to the inserted page, otherwise we set it one before.
+ * Here's the tricky part.
+ *
+ * We need to move the pointer past the header page.
+ * But we can only do that if a writer is not currently
+ * moving it. The page before the header page has the
+ * flag bit '1' set if it is pointing to the page we want.
+ * but if the writer is in the process of moving it
+ * than it will be '2' or already moved '0'.
*/
- cpu_buffer->head_page = cpu_buffer->reader_page;
- if (cpu_buffer->commit_page != reader)
- rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
+ ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
+
+ /*
+ * If we did not convert it, then we must try again.
+ */
+ if (!ret)
+ goto spin;
+
+ /*
+ * Yeah! We succeeded in replacing the page.
+ *
+ * Now make the new head point back to the reader page.
+ */
+ reader->list.next->prev = &cpu_buffer->reader_page->list;
+ rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
/* Finally update the reader page to the new head */
cpu_buffer->reader_page = reader;
@@ -2292,8 +2943,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
event = rb_reader_event(cpu_buffer);
- if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
- || rb_discarded_event(event))
+ if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
cpu_buffer->read++;
rb_update_read_stamp(cpu_buffer, event);
@@ -2525,10 +3175,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);
- if (event && event->type_len == RINGBUF_TYPE_PADDING) {
- cpu_relax();
+ if (event && event->type_len == RINGBUF_TYPE_PADDING)
goto again;
- }
return event;
}
@@ -2553,10 +3201,8 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
event = rb_iter_peek(iter, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
- if (event && event->type_len == RINGBUF_TYPE_PADDING) {
- cpu_relax();
+ if (event && event->type_len == RINGBUF_TYPE_PADDING)
goto again;
- }
return event;
}
@@ -2602,10 +3248,8 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
out:
preempt_enable();
- if (event && event->type_len == RINGBUF_TYPE_PADDING) {
- cpu_relax();
+ if (event && event->type_len == RINGBUF_TYPE_PADDING)
goto again;
- }
return event;
}
@@ -2685,21 +3329,19 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
- again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ again:
event = rb_iter_peek(iter, ts);
if (!event)
goto out;
+ if (event->type_len == RINGBUF_TYPE_PADDING)
+ goto again;
+
rb_advance_iter(iter);
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
- if (event && event->type_len == RINGBUF_TYPE_PADDING) {
- cpu_relax();
- goto again;
- }
-
return event;
}
EXPORT_SYMBOL_GPL(ring_buffer_read);
@@ -2717,8 +3359,10 @@ EXPORT_SYMBOL_GPL(ring_buffer_size);
static void
rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
{
+ rb_head_page_deactivate(cpu_buffer);
+
cpu_buffer->head_page
- = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
+ = list_entry(cpu_buffer->pages, struct buffer_page, list);
local_set(&cpu_buffer->head_page->write, 0);
local_set(&cpu_buffer->head_page->entries, 0);
local_set(&cpu_buffer->head_page->page->commit, 0);
@@ -2734,16 +3378,17 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->read = 0;
- cpu_buffer->nmi_dropped = 0;
- cpu_buffer->commit_overrun = 0;
- cpu_buffer->overrun = 0;
- cpu_buffer->read = 0;
+ local_set(&cpu_buffer->commit_overrun, 0);
+ local_set(&cpu_buffer->overrun, 0);
local_set(&cpu_buffer->entries, 0);
local_set(&cpu_buffer->committing, 0);
local_set(&cpu_buffer->commits, 0);
+ cpu_buffer->read = 0;
cpu_buffer->write_stamp = 0;
cpu_buffer->read_stamp = 0;
+
+ rb_head_page_activate(cpu_buffer);
}
/**
@@ -2763,12 +3408,16 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
+ goto out;
+
__raw_spin_lock(&cpu_buffer->lock);
rb_reset_cpu(cpu_buffer);
__raw_spin_unlock(&cpu_buffer->lock);
+ out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
atomic_dec(&cpu_buffer->record_disabled);
@@ -2851,6 +3500,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
}
EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
+#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
/**
* ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
* @buffer_a: One buffer to swap with
@@ -2905,20 +3555,28 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
atomic_inc(&cpu_buffer_a->record_disabled);
atomic_inc(&cpu_buffer_b->record_disabled);
+ ret = -EBUSY;
+ if (local_read(&cpu_buffer_a->committing))
+ goto out_dec;
+ if (local_read(&cpu_buffer_b->committing))
+ goto out_dec;
+
buffer_a->buffers[cpu] = cpu_buffer_b;
buffer_b->buffers[cpu] = cpu_buffer_a;
cpu_buffer_b->buffer = buffer_a;
cpu_buffer_a->buffer = buffer_b;
+ ret = 0;
+
+out_dec:
atomic_dec(&cpu_buffer_a->record_disabled);
atomic_dec(&cpu_buffer_b->record_disabled);
-
- ret = 0;
out:
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
+#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
/**
* ring_buffer_alloc_read_page - allocate a page to read from buffer
@@ -3091,7 +3749,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
read = 0;
} else {
/* update the entry counter */
- cpu_buffer->read += local_read(&reader->entries);
+ cpu_buffer->read += rb_page_entries(reader);
/* swap the pages */
rb_init_page(bpage);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8c358395d338..5c75deeefe30 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -43,14 +43,11 @@
#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
-unsigned long __read_mostly tracing_max_latency;
-unsigned long __read_mostly tracing_thresh;
-
/*
* On boot up, the ring buffer is set to the minimum size, so that
* we do not waste memory on systems that are not using tracing.
*/
-static int ring_buffer_expanded;
+int ring_buffer_expanded;
/*
* We need to change this state when a selftest is running.
@@ -64,7 +61,7 @@ static bool __read_mostly tracing_selftest_running;
/*
* If a tracer is running, we do not want to run SELFTEST.
*/
-static bool __read_mostly tracing_selftest_disabled;
+bool __read_mostly tracing_selftest_disabled;
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
@@ -89,7 +86,7 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
*/
static int tracing_disabled = 1;
-static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
+DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
static inline void ftrace_disable_cpu(void)
{
@@ -172,10 +169,11 @@ static struct trace_array global_trace;
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
-int filter_current_check_discard(struct ftrace_event_call *call, void *rec,
+int filter_current_check_discard(struct ring_buffer *buffer,
+ struct ftrace_event_call *call, void *rec,
struct ring_buffer_event *event)
{
- return filter_check_discard(call, rec, global_trace.buffer, event);
+ return filter_check_discard(call, rec, buffer, event);
}
EXPORT_SYMBOL_GPL(filter_current_check_discard);
@@ -266,6 +264,9 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
TRACE_ITER_GRAPH_TIME;
+static int trace_stop_count;
+static DEFINE_SPINLOCK(tracing_start_lock);
+
/**
* trace_wake_up - wake up tasks waiting for trace input
*
@@ -323,50 +324,20 @@ static const char *trace_options[] = {
"printk-msg-only",
"context-info",
"latency-format",
- "global-clock",
"sleep-time",
"graph-time",
NULL
};
-/*
- * ftrace_max_lock is used to protect the swapping of buffers
- * when taking a max snapshot. The buffers themselves are
- * protected by per_cpu spinlocks. But the action of the swap
- * needs its own lock.
- *
- * This is defined as a raw_spinlock_t in order to help
- * with performance when lockdep debugging is enabled.
- */
-static raw_spinlock_t ftrace_max_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
-
-/*
- * Copy the new maximum trace into the separate maximum-trace
- * structure. (this way the maximum trace is permanently saved,
- * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
- */
-static void
-__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
-{
- struct trace_array_cpu *data = tr->data[cpu];
-
- max_tr.cpu = cpu;
- max_tr.time_start = data->preempt_timestamp;
+static struct {
+ u64 (*func)(void);
+ const char *name;
+} trace_clocks[] = {
+ { trace_clock_local, "local" },
+ { trace_clock_global, "global" },
+};
- data = max_tr.data[cpu];
- data->saved_latency = tracing_max_latency;
-
- memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
- data->pid = tsk->pid;
- data->uid = task_uid(tsk);
- data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
- data->policy = tsk->policy;
- data->rt_priority = tsk->rt_priority;
-
- /* record this tasks comm */
- tracing_record_cmdline(tsk);
-}
+int trace_clock_id;
ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
{
@@ -411,6 +382,56 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
return cnt;
}
+/*
+ * ftrace_max_lock is used to protect the swapping of buffers
+ * when taking a max snapshot. The buffers themselves are
+ * protected by per_cpu spinlocks. But the action of the swap
+ * needs its own lock.
+ *
+ * This is defined as a raw_spinlock_t in order to help
+ * with performance when lockdep debugging is enabled.
+ *
+ * It is also used in other places outside the update_max_tr
+ * so it needs to be defined outside of the
+ * CONFIG_TRACER_MAX_TRACE.
+ */
+static raw_spinlock_t ftrace_max_lock =
+ (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+unsigned long __read_mostly tracing_max_latency;
+unsigned long __read_mostly tracing_thresh;
+
+/*
+ * Copy the new maximum trace into the separate maximum-trace
+ * structure. (this way the maximum trace is permanently saved,
+ * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
+ */
+static void
+__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
+{
+ struct trace_array_cpu *data = tr->data[cpu];
+ struct trace_array_cpu *max_data = tr->data[cpu];
+
+ max_tr.cpu = cpu;
+ max_tr.time_start = data->preempt_timestamp;
+
+ max_data = max_tr.data[cpu];
+ max_data->saved_latency = tracing_max_latency;
+ max_data->critical_start = data->critical_start;
+ max_data->critical_end = data->critical_end;
+
+ memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
+ max_data->pid = tsk->pid;
+ max_data->uid = task_uid(tsk);
+ max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
+ max_data->policy = tsk->policy;
+ max_data->rt_priority = tsk->rt_priority;
+
+ /* record this tasks comm */
+ tracing_record_cmdline(tsk);
+}
+
/**
* update_max_tr - snapshot all trace buffers from global_trace to max_tr
* @tr: tracer
@@ -425,16 +446,15 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
struct ring_buffer *buf = tr->buffer;
+ if (trace_stop_count)
+ return;
+
WARN_ON_ONCE(!irqs_disabled());
__raw_spin_lock(&ftrace_max_lock);
tr->buffer = max_tr.buffer;
max_tr.buffer = buf;
- ftrace_disable_cpu();
- ring_buffer_reset(tr->buffer);
- ftrace_enable_cpu();
-
__update_max_tr(tr, tsk, cpu);
__raw_spin_unlock(&ftrace_max_lock);
}
@@ -452,21 +472,35 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
int ret;
+ if (trace_stop_count)
+ return;
+
WARN_ON_ONCE(!irqs_disabled());
__raw_spin_lock(&ftrace_max_lock);
ftrace_disable_cpu();
- ring_buffer_reset(max_tr.buffer);
ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
+ if (ret == -EBUSY) {
+ /*
+ * We failed to swap the buffer due to a commit taking
+ * place on this CPU. We fail to record, but we reset
+ * the max trace buffer (no one writes directly to it)
+ * and flag that it failed.
+ */
+ trace_array_printk(&max_tr, _THIS_IP_,
+ "Failed to swap buffers due to commit in progress\n");
+ }
+
ftrace_enable_cpu();
- WARN_ON_ONCE(ret && ret != -EAGAIN);
+ WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
__update_max_tr(tr, tsk, cpu);
__raw_spin_unlock(&ftrace_max_lock);
}
+#endif /* CONFIG_TRACER_MAX_TRACE */
/**
* register_tracer - register a tracer with the ftrace system.
@@ -523,7 +557,6 @@ __acquires(kernel_lock)
if (type->selftest && !tracing_selftest_disabled) {
struct tracer *saved_tracer = current_trace;
struct trace_array *tr = &global_trace;
- int i;
/*
* Run a selftest on this tracer.
@@ -532,8 +565,7 @@ __acquires(kernel_lock)
* internal tracing to verify that everything is in order.
* If we fail, we do not register this tracer.
*/
- for_each_tracing_cpu(i)
- tracing_reset(tr, i);
+ tracing_reset_online_cpus(tr);
current_trace = type;
/* the test is responsible for initializing and enabling */
@@ -546,8 +578,7 @@ __acquires(kernel_lock)
goto out;
}
/* Only reset on passing, to avoid touching corrupted buffers */
- for_each_tracing_cpu(i)
- tracing_reset(tr, i);
+ tracing_reset_online_cpus(tr);
printk(KERN_CONT "PASSED\n");
}
@@ -622,21 +653,42 @@ void unregister_tracer(struct tracer *type)
mutex_unlock(&trace_types_lock);
}
-void tracing_reset(struct trace_array *tr, int cpu)
+static void __tracing_reset(struct trace_array *tr, int cpu)
{
ftrace_disable_cpu();
ring_buffer_reset_cpu(tr->buffer, cpu);
ftrace_enable_cpu();
}
+void tracing_reset(struct trace_array *tr, int cpu)
+{
+ struct ring_buffer *buffer = tr->buffer;
+
+ ring_buffer_record_disable(buffer);
+
+ /* Make sure all commits have finished */
+ synchronize_sched();
+ __tracing_reset(tr, cpu);
+
+ ring_buffer_record_enable(buffer);
+}
+
void tracing_reset_online_cpus(struct trace_array *tr)
{
+ struct ring_buffer *buffer = tr->buffer;
int cpu;
+ ring_buffer_record_disable(buffer);
+
+ /* Make sure all commits have finished */
+ synchronize_sched();
+
tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
+ __tracing_reset(tr, cpu);
+
+ ring_buffer_record_enable(buffer);
}
void tracing_reset_current(int cpu)
@@ -667,9 +719,6 @@ static void trace_init_cmdlines(void)
cmdline_idx = 0;
}
-static int trace_stop_count;
-static DEFINE_SPINLOCK(tracing_start_lock);
-
/**
* ftrace_off_permanent - disable all ftrace code permanently
*
@@ -850,14 +899,15 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
- int type,
- unsigned long len,
- unsigned long flags, int pc)
+struct ring_buffer_event *
+trace_buffer_lock_reserve(struct ring_buffer *buffer,
+ int type,
+ unsigned long len,
+ unsigned long flags, int pc)
{
struct ring_buffer_event *event;
- event = ring_buffer_lock_reserve(tr->buffer, len);
+ event = ring_buffer_lock_reserve(buffer, len);
if (event != NULL) {
struct trace_entry *ent = ring_buffer_event_data(event);
@@ -867,58 +917,60 @@ struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
return event;
}
-static void ftrace_trace_stack(struct trace_array *tr,
- unsigned long flags, int skip, int pc);
-static void ftrace_trace_userstack(struct trace_array *tr,
- unsigned long flags, int pc);
-static inline void __trace_buffer_unlock_commit(struct trace_array *tr,
- struct ring_buffer_event *event,
- unsigned long flags, int pc,
- int wake)
+static inline void
+__trace_buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc,
+ int wake)
{
- ring_buffer_unlock_commit(tr->buffer, event);
+ ring_buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr, flags, 6, pc);
- ftrace_trace_userstack(tr, flags, pc);
+ ftrace_trace_stack(buffer, flags, 6, pc);
+ ftrace_trace_userstack(buffer, flags, pc);
if (wake)
trace_wake_up();
}
-void trace_buffer_unlock_commit(struct trace_array *tr,
- struct ring_buffer_event *event,
- unsigned long flags, int pc)
+void trace_buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc)
{
- __trace_buffer_unlock_commit(tr, event, flags, pc, 1);
+ __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
}
struct ring_buffer_event *
-trace_current_buffer_lock_reserve(int type, unsigned long len,
+trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
+ int type, unsigned long len,
unsigned long flags, int pc)
{
- return trace_buffer_lock_reserve(&global_trace,
+ *current_rb = global_trace.buffer;
+ return trace_buffer_lock_reserve(*current_rb,
type, len, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
-void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
+void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
unsigned long flags, int pc)
{
- __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1);
+ __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
}
EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
-void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
- unsigned long flags, int pc)
+void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc)
{
- __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0);
+ __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
}
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
-void trace_current_buffer_discard_commit(struct ring_buffer_event *event)
+void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event)
{
- ring_buffer_discard_commit(global_trace.buffer, event);
+ ring_buffer_discard_commit(buffer, event);
}
EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
@@ -928,6 +980,7 @@ trace_function(struct trace_array *tr,
int pc)
{
struct ftrace_event_call *call = &event_function;
+ struct ring_buffer *buffer = tr->buffer;
struct ring_buffer_event *event;
struct ftrace_entry *entry;
@@ -935,7 +988,7 @@ trace_function(struct trace_array *tr,
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return;
- event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry),
+ event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
flags, pc);
if (!event)
return;
@@ -943,58 +996,10 @@ trace_function(struct trace_array *tr,
entry->ip = ip;
entry->parent_ip = parent_ip;
- if (!filter_check_discard(call, entry, tr->buffer, event))
- ring_buffer_unlock_commit(tr->buffer, event);
-}
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static int __trace_graph_entry(struct trace_array *tr,
- struct ftrace_graph_ent *trace,
- unsigned long flags,
- int pc)
-{
- struct ftrace_event_call *call = &event_funcgraph_entry;
- struct ring_buffer_event *event;
- struct ftrace_graph_ent_entry *entry;
-
- if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
- return 0;
-
- event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
- sizeof(*entry), flags, pc);
- if (!event)
- return 0;
- entry = ring_buffer_event_data(event);
- entry->graph_ent = *trace;
- if (!filter_current_check_discard(call, entry, event))
- ring_buffer_unlock_commit(global_trace.buffer, event);
-
- return 1;
+ if (!filter_check_discard(call, entry, buffer, event))
+ ring_buffer_unlock_commit(buffer, event);
}
-static void __trace_graph_return(struct trace_array *tr,
- struct ftrace_graph_ret *trace,
- unsigned long flags,
- int pc)
-{
- struct ftrace_event_call *call = &event_funcgraph_exit;
- struct ring_buffer_event *event;
- struct ftrace_graph_ret_entry *entry;
-
- if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
- return;
-
- event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
- sizeof(*entry), flags, pc);
- if (!event)
- return;
- entry = ring_buffer_event_data(event);
- entry->ret = *trace;
- if (!filter_current_check_discard(call, entry, event))
- ring_buffer_unlock_commit(global_trace.buffer, event);
-}
-#endif
-
void
ftrace(struct trace_array *tr, struct trace_array_cpu *data,
unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -1004,17 +1009,17 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
trace_function(tr, ip, parent_ip, flags, pc);
}
-static void __ftrace_trace_stack(struct trace_array *tr,
+#ifdef CONFIG_STACKTRACE
+static void __ftrace_trace_stack(struct ring_buffer *buffer,
unsigned long flags,
int skip, int pc)
{
-#ifdef CONFIG_STACKTRACE
struct ftrace_event_call *call = &event_kernel_stack;
struct ring_buffer_event *event;
struct stack_entry *entry;
struct stack_trace trace;
- event = trace_buffer_lock_reserve(tr, TRACE_STACK,
+ event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
sizeof(*entry), flags, pc);
if (!event)
return;
@@ -1027,32 +1032,28 @@ static void __ftrace_trace_stack(struct trace_array *tr,
trace.entries = entry->caller;
save_stack_trace(&trace);
- if (!filter_check_discard(call, entry, tr->buffer, event))
- ring_buffer_unlock_commit(tr->buffer, event);
-#endif
+ if (!filter_check_discard(call, entry, buffer, event))
+ ring_buffer_unlock_commit(buffer, event);
}
-static void ftrace_trace_stack(struct trace_array *tr,
- unsigned long flags,
- int skip, int pc)
+void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
+ int skip, int pc)
{
if (!(trace_flags & TRACE_ITER_STACKTRACE))
return;
- __ftrace_trace_stack(tr, flags, skip, pc);
+ __ftrace_trace_stack(buffer, flags, skip, pc);
}
-void __trace_stack(struct trace_array *tr,
- unsigned long flags,
- int skip, int pc)
+void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
+ int pc)
{
- __ftrace_trace_stack(tr, flags, skip, pc);
+ __ftrace_trace_stack(tr->buffer, flags, skip, pc);
}
-static void ftrace_trace_userstack(struct trace_array *tr,
- unsigned long flags, int pc)
+void
+ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
{
-#ifdef CONFIG_STACKTRACE
struct ftrace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
struct userstack_entry *entry;
@@ -1061,7 +1062,7 @@ static void ftrace_trace_userstack(struct trace_array *tr,
if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
- event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK,
+ event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
sizeof(*entry), flags, pc);
if (!event)
return;
@@ -1075,9 +1076,8 @@ static void ftrace_trace_userstack(struct trace_array *tr,
trace.entries = entry->caller;
save_stack_trace_user(&trace);
- if (!filter_check_discard(call, entry, tr->buffer, event))
- ring_buffer_unlock_commit(tr->buffer, event);
-#endif
+ if (!filter_check_discard(call, entry, buffer, event))
+ ring_buffer_unlock_commit(buffer, event);
}
#ifdef UNUSED
@@ -1087,6 +1087,8 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags)
}
#endif /* UNUSED */
+#endif /* CONFIG_STACKTRACE */
+
static void
ftrace_trace_special(void *__tr,
unsigned long arg1, unsigned long arg2, unsigned long arg3,
@@ -1094,9 +1096,10 @@ ftrace_trace_special(void *__tr,
{
struct ring_buffer_event *event;
struct trace_array *tr = __tr;
+ struct ring_buffer *buffer = tr->buffer;
struct special_entry *entry;
- event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL,
+ event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL,
sizeof(*entry), 0, pc);
if (!event)
return;
@@ -1104,7 +1107,7 @@ ftrace_trace_special(void *__tr,
entry->arg1 = arg1;
entry->arg2 = arg2;
entry->arg3 = arg3;
- trace_buffer_unlock_commit(tr, event, 0, pc);
+ trace_buffer_unlock_commit(buffer, event, 0, pc);
}
void
@@ -1115,62 +1118,6 @@ __trace_special(void *__tr, void *__data,
}
void
-tracing_sched_switch_trace(struct trace_array *tr,
- struct task_struct *prev,
- struct task_struct *next,
- unsigned long flags, int pc)
-{
- struct ftrace_event_call *call = &event_context_switch;
- struct ring_buffer_event *event;
- struct ctx_switch_entry *entry;
-
- event = trace_buffer_lock_reserve(tr, TRACE_CTX,
- sizeof(*entry), flags, pc);
- if (!event)
- return;
- entry = ring_buffer_event_data(event);
- entry->prev_pid = prev->pid;
- entry->prev_prio = prev->prio;
- entry->prev_state = prev->state;
- entry->next_pid = next->pid;
- entry->next_prio = next->prio;
- entry->next_state = next->state;
- entry->next_cpu = task_cpu(next);
-
- if (!filter_check_discard(call, entry, tr->buffer, event))
- trace_buffer_unlock_commit(tr, event, flags, pc);
-}
-
-void
-tracing_sched_wakeup_trace(struct trace_array *tr,
- struct task_struct *wakee,
- struct task_struct *curr,
- unsigned long flags, int pc)
-{
- struct ftrace_event_call *call = &event_wakeup;
- struct ring_buffer_event *event;
- struct ctx_switch_entry *entry;
-
- event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
- sizeof(*entry), flags, pc);
- if (!event)
- return;
- entry = ring_buffer_event_data(event);
- entry->prev_pid = curr->pid;
- entry->prev_prio = curr->prio;
- entry->prev_state = curr->state;
- entry->next_pid = wakee->pid;
- entry->next_prio = wakee->prio;
- entry->next_state = wakee->state;
- entry->next_cpu = task_cpu(wakee);
-
- if (!filter_check_discard(call, entry, tr->buffer, event))
- ring_buffer_unlock_commit(tr->buffer, event);
- ftrace_trace_stack(tr, flags, 6, pc);
- ftrace_trace_userstack(tr, flags, pc);
-}
-
-void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
struct trace_array *tr = &global_trace;
@@ -1194,68 +1141,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
local_irq_restore(flags);
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-int trace_graph_entry(struct ftrace_graph_ent *trace)
-{
- struct trace_array *tr = &global_trace;
- struct trace_array_cpu *data;
- unsigned long flags;
- long disabled;
- int ret;
- int cpu;
- int pc;
-
- if (!ftrace_trace_task(current))
- return 0;
-
- if (!ftrace_graph_addr(trace->func))
- return 0;
-
- local_irq_save(flags);
- cpu = raw_smp_processor_id();
- data = tr->data[cpu];
- disabled = atomic_inc_return(&data->disabled);
- if (likely(disabled == 1)) {
- pc = preempt_count();
- ret = __trace_graph_entry(tr, trace, flags, pc);
- } else {
- ret = 0;
- }
- /* Only do the atomic if it is not already set */
- if (!test_tsk_trace_graph(current))
- set_tsk_trace_graph(current);
-
- atomic_dec(&data->disabled);
- local_irq_restore(flags);
-
- return ret;
-}
-
-void trace_graph_return(struct ftrace_graph_ret *trace)
-{
- struct trace_array *tr = &global_trace;
- struct trace_array_cpu *data;
- unsigned long flags;
- long disabled;
- int cpu;
- int pc;
-
- local_irq_save(flags);
- cpu = raw_smp_processor_id();
- data = tr->data[cpu];
- disabled = atomic_inc_return(&data->disabled);
- if (likely(disabled == 1)) {
- pc = preempt_count();
- __trace_graph_return(tr, trace, flags, pc);
- }
- if (!trace->depth)
- clear_tsk_trace_graph(current);
- atomic_dec(&data->disabled);
- local_irq_restore(flags);
-}
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-
/**
* trace_vbprintk - write binary msg to tracing buffer
*
@@ -1268,6 +1153,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
struct ftrace_event_call *call = &event_bprint;
struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
struct bprint_entry *entry;
@@ -1300,7 +1186,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
goto out_unlock;
size = sizeof(*entry) + sizeof(u32) * len;
- event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc);
+ buffer = tr->buffer;
+ event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
+ flags, pc);
if (!event)
goto out_unlock;
entry = ring_buffer_event_data(event);
@@ -1308,8 +1196,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
entry->fmt = fmt;
memcpy(entry->buf, trace_buf, sizeof(u32) * len);
- if (!filter_check_discard(call, entry, tr->buffer, event))
- ring_buffer_unlock_commit(tr->buffer, event);
+ if (!filter_check_discard(call, entry, buffer, event))
+ ring_buffer_unlock_commit(buffer, event);
out_unlock:
__raw_spin_unlock(&trace_buf_lock);
@@ -1324,14 +1212,30 @@ out:
}
EXPORT_SYMBOL_GPL(trace_vbprintk);
-int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
+int trace_array_printk(struct trace_array *tr,
+ unsigned long ip, const char *fmt, ...)
+{
+ int ret;
+ va_list ap;
+
+ if (!(trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+ va_start(ap, fmt);
+ ret = trace_array_vprintk(tr, ip, fmt, ap);
+ va_end(ap);
+ return ret;
+}
+
+int trace_array_vprintk(struct trace_array *tr,
+ unsigned long ip, const char *fmt, va_list args)
{
static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_print;
struct ring_buffer_event *event;
- struct trace_array *tr = &global_trace;
+ struct ring_buffer *buffer;
struct trace_array_cpu *data;
int cpu, len = 0, size, pc;
struct print_entry *entry;
@@ -1359,7 +1263,9 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
trace_buf[len] = 0;
size = sizeof(*entry) + len + 1;
- event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc);
+ buffer = tr->buffer;
+ event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
+ irq_flags, pc);
if (!event)
goto out_unlock;
entry = ring_buffer_event_data(event);
@@ -1367,8 +1273,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
memcpy(&entry->buf, trace_buf, len);
entry->buf[len] = 0;
- if (!filter_check_discard(call, entry, tr->buffer, event))
- ring_buffer_unlock_commit(tr->buffer, event);
+ if (!filter_check_discard(call, entry, buffer, event))
+ ring_buffer_unlock_commit(buffer, event);
out_unlock:
__raw_spin_unlock(&trace_buf_lock);
@@ -1380,6 +1286,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
return len;
}
+
+int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
+{
+ return trace_array_printk(&global_trace, ip, fmt, args);
+}
EXPORT_SYMBOL_GPL(trace_vprintk);
enum trace_file_type {
@@ -1519,6 +1430,37 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
return ent;
}
+static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
+{
+ struct trace_array *tr = iter->tr;
+ struct ring_buffer_event *event;
+ struct ring_buffer_iter *buf_iter;
+ unsigned long entries = 0;
+ u64 ts;
+
+ tr->data[cpu]->skipped_entries = 0;
+
+ if (!iter->buffer_iter[cpu])
+ return;
+
+ buf_iter = iter->buffer_iter[cpu];
+ ring_buffer_iter_reset(buf_iter);
+
+ /*
+ * We could have the case with the max latency tracers
+ * that a reset never took place on a cpu. This is evident
+ * by the timestamp being before the start of the buffer.
+ */
+ while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
+ if (ts >= iter->tr->time_start)
+ break;
+ entries++;
+ ring_buffer_read(buf_iter, NULL);
+ }
+
+ tr->data[cpu]->skipped_entries = entries;
+}
+
/*
* No necessary locking here. The worst thing which can
* happen is loosing events consumed at the same time
@@ -1557,10 +1499,9 @@ static void *s_start(struct seq_file *m, loff_t *pos)
if (cpu_file == TRACE_PIPE_ALL_CPU) {
for_each_tracing_cpu(cpu)
- ring_buffer_iter_reset(iter->buffer_iter[cpu]);
+ tracing_iter_reset(iter, cpu);
} else
- ring_buffer_iter_reset(iter->buffer_iter[cpu_file]);
-
+ tracing_iter_reset(iter, cpu_file);
ftrace_enable_cpu();
@@ -1609,16 +1550,32 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
struct trace_array *tr = iter->tr;
struct trace_array_cpu *data = tr->data[tr->cpu];
struct tracer *type = current_trace;
- unsigned long total;
- unsigned long entries;
+ unsigned long entries = 0;
+ unsigned long total = 0;
+ unsigned long count;
const char *name = "preemption";
+ int cpu;
if (type)
name = type->name;
- entries = ring_buffer_entries(iter->tr->buffer);
- total = entries +
- ring_buffer_overruns(iter->tr->buffer);
+
+ for_each_tracing_cpu(cpu) {
+ count = ring_buffer_entries_cpu(tr->buffer, cpu);
+ /*
+ * If this buffer has skipped entries, then we hold all
+ * entries for the trace and we need to ignore the
+ * ones before the time stamp.
+ */
+ if (tr->data[cpu]->skipped_entries) {
+ count -= tr->data[cpu]->skipped_entries;
+ /* total is the same as the entries */
+ total += count;
+ } else
+ total += count +
+ ring_buffer_overrun_cpu(tr->buffer, cpu);
+ entries += count;
+ }
seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
name, UTS_RELEASE);
@@ -1660,7 +1617,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
seq_puts(m, "\n# => ended at: ");
seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
trace_print_seq(m, &iter->seq);
- seq_puts(m, "#\n");
+ seq_puts(m, "\n#\n");
}
seq_puts(m, "#\n");
@@ -1679,6 +1636,9 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
if (cpumask_test_cpu(iter->cpu, iter->started))
return;
+ if (iter->tr->data[iter->cpu]->skipped_entries)
+ return;
+
cpumask_set_cpu(iter->cpu, iter->started);
/* Don't print started cpu buffer for the first entry of the trace */
@@ -1941,19 +1901,23 @@ __tracing_open(struct inode *inode, struct file *file)
if (ring_buffer_overruns(iter->tr->buffer))
iter->iter_flags |= TRACE_FILE_ANNOTATE;
+ /* stop the trace while dumping */
+ tracing_stop();
+
if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] =
ring_buffer_read_start(iter->tr->buffer, cpu);
+ tracing_iter_reset(iter, cpu);
}
} else {
cpu = iter->cpu_file;
iter->buffer_iter[cpu] =
ring_buffer_read_start(iter->tr->buffer, cpu);
+ tracing_iter_reset(iter, cpu);
}
- /* TODO stop tracer */
ret = seq_open(file, &tracer_seq_ops);
if (ret < 0) {
fail_ret = ERR_PTR(ret);
@@ -1963,9 +1927,6 @@ __tracing_open(struct inode *inode, struct file *file)
m = file->private_data;
m->private = iter;
- /* stop the trace while dumping */
- tracing_stop();
-
mutex_unlock(&trace_types_lock);
return iter;
@@ -1976,6 +1937,7 @@ __tracing_open(struct inode *inode, struct file *file)
ring_buffer_read_finish(iter->buffer_iter[cpu]);
}
free_cpumask_var(iter->started);
+ tracing_start();
fail:
mutex_unlock(&trace_types_lock);
kfree(iter->trace);
@@ -2257,8 +2219,8 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf,
len += 3; /* "no" and newline */
}
- /* +2 for \n and \0 */
- buf = kmalloc(len + 2, GFP_KERNEL);
+ /* +1 for \0 */
+ buf = kmalloc(len + 1, GFP_KERNEL);
if (!buf) {
mutex_unlock(&trace_types_lock);
return -ENOMEM;
@@ -2281,7 +2243,7 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf,
}
mutex_unlock(&trace_types_lock);
- WARN_ON(r >= len + 2);
+ WARN_ON(r >= len + 1);
r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
@@ -2292,23 +2254,23 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf,
/* Try to assign a tracer specific option */
static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
{
- struct tracer_flags *trace_flags = trace->flags;
+ struct tracer_flags *tracer_flags = trace->flags;
struct tracer_opt *opts = NULL;
int ret = 0, i = 0;
int len;
- for (i = 0; trace_flags->opts[i].name; i++) {
- opts = &trace_flags->opts[i];
+ for (i = 0; tracer_flags->opts[i].name; i++) {
+ opts = &tracer_flags->opts[i];
len = strlen(opts->name);
if (strncmp(cmp, opts->name, len) == 0) {
- ret = trace->set_flag(trace_flags->val,
+ ret = trace->set_flag(tracer_flags->val,
opts->bit, !neg);
break;
}
}
/* Not found */
- if (!trace_flags->opts[i].name)
+ if (!tracer_flags->opts[i].name)
return -EINVAL;
/* Refused to handle */
@@ -2316,9 +2278,9 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
return ret;
if (neg)
- trace_flags->val &= ~opts->bit;
+ tracer_flags->val &= ~opts->bit;
else
- trace_flags->val |= opts->bit;
+ tracer_flags->val |= opts->bit;
return 0;
}
@@ -2333,22 +2295,6 @@ static void set_tracer_flags(unsigned int mask, int enabled)
trace_flags |= mask;
else
trace_flags &= ~mask;
-
- if (mask == TRACE_ITER_GLOBAL_CLK) {
- u64 (*func)(void);
-
- if (enabled)
- func = trace_clock_global;
- else
- func = trace_clock_local;
-
- mutex_lock(&trace_types_lock);
- ring_buffer_set_clock(global_trace.buffer, func);
-
- if (max_tr.buffer)
- ring_buffer_set_clock(max_tr.buffer, func);
- mutex_unlock(&trace_types_lock);
- }
}
static ssize_t
@@ -3316,6 +3262,62 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
return cnt;
}
+static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int bufiter = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
+ bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter,
+ "%s%s%s%s", i ? " " : "",
+ i == trace_clock_id ? "[" : "", trace_clocks[i].name,
+ i == trace_clock_id ? "]" : "");
+ bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n");
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter);
+}
+
+static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *fpos)
+{
+ char buf[64];
+ const char *clockstr;
+ int i;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ clockstr = strstrip(buf);
+
+ for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
+ if (strcmp(trace_clocks[i].name, clockstr) == 0)
+ break;
+ }
+ if (i == ARRAY_SIZE(trace_clocks))
+ return -EINVAL;
+
+ trace_clock_id = i;
+
+ mutex_lock(&trace_types_lock);
+
+ ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
+ if (max_tr.buffer)
+ ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
+
+ mutex_unlock(&trace_types_lock);
+
+ *fpos += cnt;
+
+ return cnt;
+}
+
static const struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
@@ -3353,6 +3355,12 @@ static const struct file_operations tracing_mark_fops = {
.write = tracing_mark_write,
};
+static const struct file_operations trace_clock_fops = {
+ .open = tracing_open_generic,
+ .read = tracing_clock_read,
+ .write = tracing_clock_write,
+};
+
struct ftrace_buffer_info {
struct trace_array *tr;
void *spare;
@@ -3633,9 +3641,6 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
trace_seq_printf(s, "commit overrun: %ld\n", cnt);
- cnt = ring_buffer_nmi_dropped_cpu(tr->buffer, cpu);
- trace_seq_printf(s, "nmi dropped: %ld\n", cnt);
-
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
kfree(s);
@@ -4066,11 +4071,13 @@ static __init int tracer_init_debugfs(void)
trace_create_file("current_tracer", 0644, d_tracer,
&global_trace, &set_tracer_fops);
+#ifdef CONFIG_TRACER_MAX_TRACE
trace_create_file("tracing_max_latency", 0644, d_tracer,
&tracing_max_latency, &tracing_max_lat_fops);
trace_create_file("tracing_thresh", 0644, d_tracer,
&tracing_thresh, &tracing_max_lat_fops);
+#endif
trace_create_file("README", 0444, d_tracer,
NULL, &tracing_readme_fops);
@@ -4087,6 +4094,9 @@ static __init int tracer_init_debugfs(void)
trace_create_file("saved_cmdlines", 0444, d_tracer,
NULL, &tracing_saved_cmdlines_fops);
+ trace_create_file("trace_clock", 0644, d_tracer, NULL,
+ &trace_clock_fops);
+
#ifdef CONFIG_DYNAMIC_FTRACE
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
@@ -4265,7 +4275,6 @@ void ftrace_dump(void)
__init static int tracer_alloc_buffers(void)
{
- struct trace_array_cpu *data;
int ring_buf_size;
int i;
int ret = -ENOMEM;
@@ -4315,7 +4324,7 @@ __init static int tracer_alloc_buffers(void)
/* Allocate the first page for all buffers */
for_each_tracing_cpu(i) {
- data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
+ global_trace.data[i] = &per_cpu(global_trace_cpu, i);
max_tr.data[i] = &per_cpu(max_data, i);
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 8b9f4f6e9559..fa1dccb579d5 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -34,8 +34,6 @@ enum trace_type {
TRACE_GRAPH_ENT,
TRACE_USER_STACK,
TRACE_HW_BRANCHES,
- TRACE_SYSCALL_ENTER,
- TRACE_SYSCALL_EXIT,
TRACE_KMEM_ALLOC,
TRACE_KMEM_FREE,
TRACE_POWER,
@@ -236,9 +234,6 @@ struct trace_array_cpu {
atomic_t disabled;
void *buffer_page; /* ring buffer spare */
- /* these fields get copied into max-trace: */
- unsigned long trace_idx;
- unsigned long overrun;
unsigned long saved_latency;
unsigned long critical_start;
unsigned long critical_end;
@@ -246,6 +241,7 @@ struct trace_array_cpu {
unsigned long nice;
unsigned long policy;
unsigned long rt_priority;
+ unsigned long skipped_entries;
cycle_t preempt_timestamp;
pid_t pid;
uid_t uid;
@@ -319,10 +315,6 @@ extern void __ftrace_bad_type(void);
TRACE_KMEM_ALLOC); \
IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
TRACE_KMEM_FREE); \
- IF_ASSIGN(var, ent, struct syscall_trace_enter, \
- TRACE_SYSCALL_ENTER); \
- IF_ASSIGN(var, ent, struct syscall_trace_exit, \
- TRACE_SYSCALL_EXIT); \
__ftrace_bad_type(); \
} while (0)
@@ -423,12 +415,13 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
struct ring_buffer_event;
-struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
- int type,
- unsigned long len,
- unsigned long flags,
- int pc);
-void trace_buffer_unlock_commit(struct trace_array *tr,
+struct ring_buffer_event *
+trace_buffer_lock_reserve(struct ring_buffer *buffer,
+ int type,
+ unsigned long len,
+ unsigned long flags,
+ int pc);
+void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
@@ -467,6 +460,7 @@ void trace_function(struct trace_array *tr,
void trace_graph_return(struct ftrace_graph_ret *trace);
int trace_graph_entry(struct ftrace_graph_ent *trace);
+void set_graph_array(struct trace_array *tr);
void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void);
@@ -478,16 +472,40 @@ void unregister_tracer(struct tracer *type);
extern unsigned long nsecs_to_usecs(unsigned long nsecs);
+#ifdef CONFIG_TRACER_MAX_TRACE
extern unsigned long tracing_max_latency;
extern unsigned long tracing_thresh;
void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr,
struct task_struct *tsk, int cpu);
+#endif /* CONFIG_TRACER_MAX_TRACE */
-void __trace_stack(struct trace_array *tr,
- unsigned long flags,
- int skip, int pc);
+#ifdef CONFIG_STACKTRACE
+void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
+ int skip, int pc);
+
+void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
+ int pc);
+
+void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
+ int pc);
+#else
+static inline void ftrace_trace_stack(struct trace_array *tr,
+ unsigned long flags, int skip, int pc)
+{
+}
+
+static inline void ftrace_trace_userstack(struct trace_array *tr,
+ unsigned long flags, int pc)
+{
+}
+
+static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
+ int skip, int pc)
+{
+}
+#endif /* CONFIG_STACKTRACE */
extern cycle_t ftrace_now(int cpu);
@@ -513,6 +531,10 @@ extern unsigned long ftrace_update_tot_cnt;
extern int DYN_FTRACE_TEST_NAME(void);
#endif
+extern int ring_buffer_expanded;
+extern bool tracing_selftest_disabled;
+DECLARE_PER_CPU(local_t, ftrace_cpu_disabled);
+
#ifdef CONFIG_FTRACE_STARTUP_TEST
extern int trace_selftest_startup_function(struct tracer *trace,
struct trace_array *tr);
@@ -544,9 +566,16 @@ extern int
trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
extern int
trace_vprintk(unsigned long ip, const char *fmt, va_list args);
+extern int
+trace_array_vprintk(struct trace_array *tr,
+ unsigned long ip, const char *fmt, va_list args);
+int trace_array_printk(struct trace_array *tr,
+ unsigned long ip, const char *fmt, ...);
extern unsigned long trace_flags;
+extern int trace_clock_id;
+
/* Standard output formatting function used for function return traces */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern enum print_line_t print_graph_function(struct trace_iterator *iter);
@@ -635,9 +664,8 @@ enum trace_iterator_flags {
TRACE_ITER_PRINTK_MSGONLY = 0x10000,
TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
TRACE_ITER_LATENCY_FMT = 0x40000,
- TRACE_ITER_GLOBAL_CLK = 0x80000,
- TRACE_ITER_SLEEP_TIME = 0x100000,
- TRACE_ITER_GRAPH_TIME = 0x200000,
+ TRACE_ITER_SLEEP_TIME = 0x80000,
+ TRACE_ITER_GRAPH_TIME = 0x100000,
};
/*
@@ -734,6 +762,7 @@ struct ftrace_event_field {
struct list_head link;
char *name;
char *type;
+ int filter_type;
int offset;
int size;
int is_signed;
@@ -743,13 +772,15 @@ struct event_filter {
int n_preds;
struct filter_pred **preds;
char *filter_string;
+ bool no_reset;
};
struct event_subsystem {
struct list_head list;
const char *name;
struct dentry *entry;
- void *filter;
+ struct event_filter *filter;
+ int nr_events;
};
struct filter_pred;
@@ -777,6 +808,7 @@ extern int apply_subsystem_event_filter(struct event_subsystem *system,
char *filter_string);
extern void print_subsystem_event_filter(struct event_subsystem *system,
struct trace_seq *s);
+extern int filter_assign_type(const char *type);
static inline int
filter_check_discard(struct ftrace_event_call *call, void *rec,
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index a29ef23ffb47..19bfc75d467e 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -41,14 +41,12 @@ void disable_boot_trace(void)
static int boot_trace_init(struct trace_array *tr)
{
- int cpu;
boot_trace = tr;
if (!tr)
return 0;
- for_each_cpu(cpu, cpu_possible_mask)
- tracing_reset(tr, cpu);
+ tracing_reset_online_cpus(tr);
tracing_sched_switch_assign_trace(tr);
return 0;
@@ -132,6 +130,7 @@ struct tracer boot_tracer __read_mostly =
void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
{
struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
struct trace_boot_call *entry;
struct trace_array *tr = boot_trace;
@@ -144,13 +143,14 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
sprint_symbol(bt->func, (unsigned long)fn);
preempt_disable();
- event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL,
+ buffer = tr->buffer;
+ event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_CALL,
sizeof(*entry), 0, 0);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
entry->boot_call = *bt;
- trace_buffer_unlock_commit(tr, event, 0, 0);
+ trace_buffer_unlock_commit(buffer, event, 0, 0);
out:
preempt_enable();
}
@@ -158,6 +158,7 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
{
struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
struct trace_boot_ret *entry;
struct trace_array *tr = boot_trace;
@@ -167,13 +168,14 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
sprint_symbol(bt->func, (unsigned long)fn);
preempt_disable();
- event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET,
+ buffer = tr->buffer;
+ event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET,
sizeof(*entry), 0, 0);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
entry->boot_ret = *bt;
- trace_buffer_unlock_commit(tr, event, 0, 0);
+ trace_buffer_unlock_commit(buffer, event, 0, 0);
out:
preempt_enable();
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index e75276a49cf5..78b1ed230177 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -17,6 +17,8 @@
#include <linux/ctype.h>
#include <linux/delay.h>
+#include <asm/setup.h>
+
#include "trace_output.h"
#define TRACE_SYSTEM "TRACE_SYSTEM"
@@ -25,8 +27,9 @@ DEFINE_MUTEX(event_mutex);
LIST_HEAD(ftrace_events);
-int trace_define_field(struct ftrace_event_call *call, char *type,
- char *name, int offset, int size, int is_signed)
+int trace_define_field(struct ftrace_event_call *call, const char *type,
+ const char *name, int offset, int size, int is_signed,
+ int filter_type)
{
struct ftrace_event_field *field;
@@ -42,9 +45,15 @@ int trace_define_field(struct ftrace_event_call *call, char *type,
if (!field->type)
goto err;
+ if (filter_type == FILTER_OTHER)
+ field->filter_type = filter_assign_type(type);
+ else
+ field->filter_type = filter_type;
+
field->offset = offset;
field->size = size;
field->is_signed = is_signed;
+
list_add(&field->link, &call->fields);
return 0;
@@ -60,6 +69,29 @@ err:
}
EXPORT_SYMBOL_GPL(trace_define_field);
+#define __common_field(type, item) \
+ ret = trace_define_field(call, #type, "common_" #item, \
+ offsetof(typeof(ent), item), \
+ sizeof(ent.item), \
+ is_signed_type(type), FILTER_OTHER); \
+ if (ret) \
+ return ret;
+
+int trace_define_common_fields(struct ftrace_event_call *call)
+{
+ int ret;
+ struct trace_entry ent;
+
+ __common_field(unsigned short, type);
+ __common_field(unsigned char, flags);
+ __common_field(unsigned char, preempt_count);
+ __common_field(int, pid);
+ __common_field(int, tgid);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(trace_define_common_fields);
+
#ifdef CONFIG_MODULES
static void trace_destroy_fields(struct ftrace_event_call *call)
@@ -84,14 +116,14 @@ static void ftrace_event_enable_disable(struct ftrace_event_call *call,
if (call->enabled) {
call->enabled = 0;
tracing_stop_cmdline_record();
- call->unregfunc();
+ call->unregfunc(call->data);
}
break;
case 1:
if (!call->enabled) {
call->enabled = 1;
tracing_start_cmdline_record();
- call->regfunc();
+ call->regfunc(call->data);
}
break;
}
@@ -574,7 +606,7 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
trace_seq_printf(s, "format:\n");
trace_write_header(s);
- r = call->show_format(s);
+ r = call->show_format(call, s);
if (!r) {
/*
* ug! The format output is bigger than a PAGE!!
@@ -849,8 +881,10 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
/* First see if we did not already create this dir */
list_for_each_entry(system, &event_subsystems, list) {
- if (strcmp(system->name, name) == 0)
+ if (strcmp(system->name, name) == 0) {
+ system->nr_events++;
return system->entry;
+ }
}
/* need to create new entry */
@@ -869,6 +903,7 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
return d_events;
}
+ system->nr_events = 1;
system->name = kstrdup(name, GFP_KERNEL);
if (!system->name) {
debugfs_remove(system->entry);
@@ -920,15 +955,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
if (strcmp(call->system, TRACE_SYSTEM) != 0)
d_events = event_subsystem_dir(call->system, d_events);
- if (call->raw_init) {
- ret = call->raw_init();
- if (ret < 0) {
- pr_warning("Could not initialize trace point"
- " events/%s\n", call->name);
- return ret;
- }
- }
-
call->dir = debugfs_create_dir(call->name, d_events);
if (!call->dir) {
pr_warning("Could not create debugfs "
@@ -945,7 +971,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
id);
if (call->define_fields) {
- ret = call->define_fields();
+ ret = call->define_fields(call);
if (ret < 0) {
pr_warning("Could not initialize trace point"
" events/%s\n", call->name);
@@ -987,6 +1013,32 @@ struct ftrace_module_file_ops {
struct file_operations filter;
};
+static void remove_subsystem_dir(const char *name)
+{
+ struct event_subsystem *system;
+
+ if (strcmp(name, TRACE_SYSTEM) == 0)
+ return;
+
+ list_for_each_entry(system, &event_subsystems, list) {
+ if (strcmp(system->name, name) == 0) {
+ if (!--system->nr_events) {
+ struct event_filter *filter = system->filter;
+
+ debugfs_remove_recursive(system->entry);
+ list_del(&system->list);
+ if (filter) {
+ kfree(filter->filter_string);
+ kfree(filter);
+ }
+ kfree(system->name);
+ kfree(system);
+ }
+ break;
+ }
+ }
+}
+
static struct ftrace_module_file_ops *
trace_create_file_ops(struct module *mod)
{
@@ -1027,6 +1079,7 @@ static void trace_module_add_events(struct module *mod)
struct ftrace_module_file_ops *file_ops = NULL;
struct ftrace_event_call *call, *start, *end;
struct dentry *d_events;
+ int ret;
start = mod->trace_events;
end = mod->trace_events + mod->num_trace_events;
@@ -1042,7 +1095,15 @@ static void trace_module_add_events(struct module *mod)
/* The linker may leave blanks */
if (!call->name)
continue;
-
+ if (call->raw_init) {
+ ret = call->raw_init();
+ if (ret < 0) {
+ if (ret != -ENOSYS)
+ pr_warning("Could not initialize trace "
+ "point events/%s\n", call->name);
+ continue;
+ }
+ }
/*
* This module has events, create file ops for this module
* if not already done.
@@ -1077,6 +1138,7 @@ static void trace_module_remove_events(struct module *mod)
list_del(&call->list);
trace_destroy_fields(call);
destroy_preds(call);
+ remove_subsystem_dir(call->system);
}
}
@@ -1133,6 +1195,18 @@ struct notifier_block trace_module_nb = {
extern struct ftrace_event_call __start_ftrace_events[];
extern struct ftrace_event_call __stop_ftrace_events[];
+static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
+
+static __init int setup_trace_event(char *str)
+{
+ strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
+ ring_buffer_expanded = 1;
+ tracing_selftest_disabled = 1;
+
+ return 1;
+}
+__setup("trace_event=", setup_trace_event);
+
static __init int event_trace_init(void)
{
struct ftrace_event_call *call;
@@ -1140,6 +1214,8 @@ static __init int event_trace_init(void)
struct dentry *entry;
struct dentry *d_events;
int ret;
+ char *buf = bootup_event_buf;
+ char *token;
d_tracer = tracing_init_dentry();
if (!d_tracer)
@@ -1179,12 +1255,34 @@ static __init int event_trace_init(void)
/* The linker may leave blanks */
if (!call->name)
continue;
+ if (call->raw_init) {
+ ret = call->raw_init();
+ if (ret < 0) {
+ if (ret != -ENOSYS)
+ pr_warning("Could not initialize trace "
+ "point events/%s\n", call->name);
+ continue;
+ }
+ }
list_add(&call->list, &ftrace_events);
event_create_dir(call, d_events, &ftrace_event_id_fops,
&ftrace_enable_fops, &ftrace_event_filter_fops,
&ftrace_event_format_fops);
}
+ while (true) {
+ token = strsep(&buf, ",");
+
+ if (!token)
+ break;
+ if (!*token)
+ continue;
+
+ ret = ftrace_set_clr_event(token, 1);
+ if (ret)
+ pr_warning("Failed to enable trace event: %s\n", token);
+ }
+
ret = register_module_notifier(&trace_module_nb);
if (ret)
pr_warning("Failed to register trace events module notifier\n");
@@ -1340,6 +1438,7 @@ static void
function_test_events_call(unsigned long ip, unsigned long parent_ip)
{
struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
struct ftrace_entry *entry;
unsigned long flags;
long disabled;
@@ -1357,7 +1456,8 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
local_save_flags(flags);
- event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
+ event = trace_current_buffer_lock_reserve(&buffer,
+ TRACE_FN, sizeof(*entry),
flags, pc);
if (!event)
goto out;
@@ -1365,7 +1465,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
entry->ip = ip;
entry->parent_ip = parent_ip;
- trace_nowake_buffer_unlock_commit(event, flags, pc);
+ trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
out:
atomic_dec(&per_cpu(test_event_disable, cpu));
@@ -1392,10 +1492,10 @@ static __init void event_trace_self_test_with_function(void)
static __init int event_trace_self_tests_init(void)
{
-
- event_trace_self_tests();
-
- event_trace_self_test_with_function();
+ if (!tracing_selftest_disabled) {
+ event_trace_self_tests();
+ event_trace_self_test_with_function();
+ }
return 0;
}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index f32dc9d1ea7b..93660fbbf629 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -163,6 +163,20 @@ static int filter_pred_string(struct filter_pred *pred, void *event,
return match;
}
+/* Filter predicate for char * pointers */
+static int filter_pred_pchar(struct filter_pred *pred, void *event,
+ int val1, int val2)
+{
+ char **addr = (char **)(event + pred->offset);
+ int cmp, match;
+
+ cmp = strncmp(*addr, pred->str_val, pred->str_len);
+
+ match = (!cmp) ^ pred->not;
+
+ return match;
+}
+
/*
* Filter predicate for dynamic sized arrays of characters.
* These are implemented through a list of strings at the end
@@ -176,11 +190,13 @@ static int filter_pred_string(struct filter_pred *pred, void *event,
static int filter_pred_strloc(struct filter_pred *pred, void *event,
int val1, int val2)
{
- unsigned short str_loc = *(unsigned short *)(event + pred->offset);
+ u32 str_item = *(u32 *)(event + pred->offset);
+ int str_loc = str_item & 0xffff;
+ int str_len = str_item >> 16;
char *addr = (char *)(event + str_loc);
int cmp, match;
- cmp = strncmp(addr, pred->str_val, pred->str_len);
+ cmp = strncmp(addr, pred->str_val, str_len);
match = (!cmp) ^ pred->not;
@@ -293,7 +309,7 @@ void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
struct event_filter *filter = call->filter;
mutex_lock(&event_mutex);
- if (filter->filter_string)
+ if (filter && filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string);
else
trace_seq_printf(s, "none\n");
@@ -306,7 +322,7 @@ void print_subsystem_event_filter(struct event_subsystem *system,
struct event_filter *filter = system->filter;
mutex_lock(&event_mutex);
- if (filter->filter_string)
+ if (filter && filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string);
else
trace_seq_printf(s, "none\n");
@@ -374,6 +390,9 @@ void destroy_preds(struct ftrace_event_call *call)
struct event_filter *filter = call->filter;
int i;
+ if (!filter)
+ return;
+
for (i = 0; i < MAX_FILTER_PRED; i++) {
if (filter->preds[i])
filter_free_pred(filter->preds[i]);
@@ -384,17 +403,19 @@ void destroy_preds(struct ftrace_event_call *call)
call->filter = NULL;
}
-int init_preds(struct ftrace_event_call *call)
+static int init_preds(struct ftrace_event_call *call)
{
struct event_filter *filter;
struct filter_pred *pred;
int i;
+ if (call->filter)
+ return 0;
+
filter = call->filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (!call->filter)
return -ENOMEM;
- call->filter_active = 0;
filter->n_preds = 0;
filter->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred), GFP_KERNEL);
@@ -416,30 +437,55 @@ oom:
return -ENOMEM;
}
-EXPORT_SYMBOL_GPL(init_preds);
-static void filter_free_subsystem_preds(struct event_subsystem *system)
+static int init_subsystem_preds(struct event_subsystem *system)
{
- struct event_filter *filter = system->filter;
struct ftrace_event_call *call;
- int i;
+ int err;
- if (filter->n_preds) {
- for (i = 0; i < filter->n_preds; i++)
- filter_free_pred(filter->preds[i]);
- kfree(filter->preds);
- filter->preds = NULL;
- filter->n_preds = 0;
+ list_for_each_entry(call, &ftrace_events, list) {
+ if (!call->define_fields)
+ continue;
+
+ if (strcmp(call->system, system->name) != 0)
+ continue;
+
+ err = init_preds(call);
+ if (err)
+ return err;
}
+ return 0;
+}
+
+enum {
+ FILTER_DISABLE_ALL,
+ FILTER_INIT_NO_RESET,
+ FILTER_SKIP_NO_RESET,
+};
+
+static void filter_free_subsystem_preds(struct event_subsystem *system,
+ int flag)
+{
+ struct ftrace_event_call *call;
+
list_for_each_entry(call, &ftrace_events, list) {
if (!call->define_fields)
continue;
- if (!strcmp(call->system, system->name)) {
- filter_disable_preds(call);
- remove_filter_string(call->filter);
+ if (strcmp(call->system, system->name) != 0)
+ continue;
+
+ if (flag == FILTER_INIT_NO_RESET) {
+ call->filter->no_reset = false;
+ continue;
}
+
+ if (flag == FILTER_SKIP_NO_RESET && call->filter->no_reset)
+ continue;
+
+ filter_disable_preds(call);
+ remove_filter_string(call->filter);
}
}
@@ -468,12 +514,7 @@ static int filter_add_pred_fn(struct filter_parse_state *ps,
return 0;
}
-enum {
- FILTER_STATIC_STRING = 1,
- FILTER_DYN_STRING
-};
-
-static int is_string_field(const char *type)
+int filter_assign_type(const char *type)
{
if (strstr(type, "__data_loc") && strstr(type, "char"))
return FILTER_DYN_STRING;
@@ -481,12 +522,19 @@ static int is_string_field(const char *type)
if (strchr(type, '[') && strstr(type, "char"))
return FILTER_STATIC_STRING;
- return 0;
+ return FILTER_OTHER;
+}
+
+static bool is_string_field(struct ftrace_event_field *field)
+{
+ return field->filter_type == FILTER_DYN_STRING ||
+ field->filter_type == FILTER_STATIC_STRING ||
+ field->filter_type == FILTER_PTR_STRING;
}
static int is_legal_op(struct ftrace_event_field *field, int op)
{
- if (is_string_field(field->type) && (op != OP_EQ && op != OP_NE))
+ if (is_string_field(field) && (op != OP_EQ && op != OP_NE))
return 0;
return 1;
@@ -537,22 +585,24 @@ static filter_pred_fn_t select_comparison_fn(int op, int field_size,
static int filter_add_pred(struct filter_parse_state *ps,
struct ftrace_event_call *call,
- struct filter_pred *pred)
+ struct filter_pred *pred,
+ bool dry_run)
{
struct ftrace_event_field *field;
filter_pred_fn_t fn;
unsigned long long val;
- int string_type;
int ret;
pred->fn = filter_pred_none;
if (pred->op == OP_AND) {
pred->pop_n = 2;
- return filter_add_pred_fn(ps, call, pred, filter_pred_and);
+ fn = filter_pred_and;
+ goto add_pred_fn;
} else if (pred->op == OP_OR) {
pred->pop_n = 2;
- return filter_add_pred_fn(ps, call, pred, filter_pred_or);
+ fn = filter_pred_or;
+ goto add_pred_fn;
}
field = find_event_field(call, pred->field_name);
@@ -568,16 +618,17 @@ static int filter_add_pred(struct filter_parse_state *ps,
return -EINVAL;
}
- string_type = is_string_field(field->type);
- if (string_type) {
- if (string_type == FILTER_STATIC_STRING)
+ if (is_string_field(field)) {
+ pred->str_len = field->size;
+
+ if (field->filter_type == FILTER_STATIC_STRING)
fn = filter_pred_string;
- else
+ else if (field->filter_type == FILTER_DYN_STRING)
fn = filter_pred_strloc;
- pred->str_len = field->size;
- if (pred->op == OP_NE)
- pred->not = 1;
- return filter_add_pred_fn(ps, call, pred, fn);
+ else {
+ fn = filter_pred_pchar;
+ pred->str_len = strlen(pred->str_val);
+ }
} else {
if (field->is_signed)
ret = strict_strtoll(pred->str_val, 0, &val);
@@ -588,41 +639,33 @@ static int filter_add_pred(struct filter_parse_state *ps,
return -EINVAL;
}
pred->val = val;
- }
- fn = select_comparison_fn(pred->op, field->size, field->is_signed);
- if (!fn) {
- parse_error(ps, FILT_ERR_INVALID_OP, 0);
- return -EINVAL;
+ fn = select_comparison_fn(pred->op, field->size,
+ field->is_signed);
+ if (!fn) {
+ parse_error(ps, FILT_ERR_INVALID_OP, 0);
+ return -EINVAL;
+ }
}
if (pred->op == OP_NE)
pred->not = 1;
- return filter_add_pred_fn(ps, call, pred, fn);
+add_pred_fn:
+ if (!dry_run)
+ return filter_add_pred_fn(ps, call, pred, fn);
+ return 0;
}
static int filter_add_subsystem_pred(struct filter_parse_state *ps,
struct event_subsystem *system,
struct filter_pred *pred,
- char *filter_string)
+ char *filter_string,
+ bool dry_run)
{
- struct event_filter *filter = system->filter;
struct ftrace_event_call *call;
int err = 0;
-
- if (!filter->preds) {
- filter->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred),
- GFP_KERNEL);
-
- if (!filter->preds)
- return -ENOMEM;
- }
-
- if (filter->n_preds == MAX_FILTER_PRED) {
- parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
- return -ENOSPC;
- }
+ bool fail = true;
list_for_each_entry(call, &ftrace_events, list) {
@@ -632,19 +675,24 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps,
if (strcmp(call->system, system->name))
continue;
- err = filter_add_pred(ps, call, pred);
- if (err) {
- filter_free_subsystem_preds(system);
- parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
- goto out;
- }
- replace_filter_string(call->filter, filter_string);
+ if (call->filter->no_reset)
+ continue;
+
+ err = filter_add_pred(ps, call, pred, dry_run);
+ if (err)
+ call->filter->no_reset = true;
+ else
+ fail = false;
+
+ if (!dry_run)
+ replace_filter_string(call->filter, filter_string);
}
- filter->preds[filter->n_preds] = pred;
- filter->n_preds++;
-out:
- return err;
+ if (fail) {
+ parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
+ return err;
+ }
+ return 0;
}
static void parse_init(struct filter_parse_state *ps,
@@ -1003,12 +1051,14 @@ static int check_preds(struct filter_parse_state *ps)
static int replace_preds(struct event_subsystem *system,
struct ftrace_event_call *call,
struct filter_parse_state *ps,
- char *filter_string)
+ char *filter_string,
+ bool dry_run)
{
char *operand1 = NULL, *operand2 = NULL;
struct filter_pred *pred;
struct postfix_elt *elt;
int err;
+ int n_preds = 0;
err = check_preds(ps);
if (err)
@@ -1027,24 +1077,14 @@ static int replace_preds(struct event_subsystem *system,
continue;
}
+ if (n_preds++ == MAX_FILTER_PRED) {
+ parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
+ return -ENOSPC;
+ }
+
if (elt->op == OP_AND || elt->op == OP_OR) {
pred = create_logical_pred(elt->op);
- if (!pred)
- return -ENOMEM;
- if (call) {
- err = filter_add_pred(ps, call, pred);
- filter_free_pred(pred);
- } else {
- err = filter_add_subsystem_pred(ps, system,
- pred, filter_string);
- if (err)
- filter_free_pred(pred);
- }
- if (err)
- return err;
-
- operand1 = operand2 = NULL;
- continue;
+ goto add_pred;
}
if (!operand1 || !operand2) {
@@ -1053,17 +1093,15 @@ static int replace_preds(struct event_subsystem *system,
}
pred = create_pred(elt->op, operand1, operand2);
+add_pred:
if (!pred)
return -ENOMEM;
- if (call) {
- err = filter_add_pred(ps, call, pred);
- filter_free_pred(pred);
- } else {
+ if (call)
+ err = filter_add_pred(ps, call, pred, false);
+ else
err = filter_add_subsystem_pred(ps, system, pred,
- filter_string);
- if (err)
- filter_free_pred(pred);
- }
+ filter_string, dry_run);
+ filter_free_pred(pred);
if (err)
return err;
@@ -1081,6 +1119,10 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
mutex_lock(&event_mutex);
+ err = init_preds(call);
+ if (err)
+ goto out_unlock;
+
if (!strcmp(strstrip(filter_string), "0")) {
filter_disable_preds(call);
remove_filter_string(call->filter);
@@ -1103,7 +1145,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
goto out;
}
- err = replace_preds(NULL, call, ps, filter_string);
+ err = replace_preds(NULL, call, ps, filter_string, false);
if (err)
append_filter_err(ps, call->filter);
@@ -1126,8 +1168,12 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
mutex_lock(&event_mutex);
+ err = init_subsystem_preds(system);
+ if (err)
+ goto out_unlock;
+
if (!strcmp(strstrip(filter_string), "0")) {
- filter_free_subsystem_preds(system);
+ filter_free_subsystem_preds(system, FILTER_DISABLE_ALL);
remove_filter_string(system->filter);
mutex_unlock(&event_mutex);
return 0;
@@ -1138,7 +1184,6 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
if (!ps)
goto out_unlock;
- filter_free_subsystem_preds(system);
replace_filter_string(system->filter, filter_string);
parse_init(ps, filter_ops, filter_string);
@@ -1148,9 +1193,23 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
goto out;
}
- err = replace_preds(system, NULL, ps, filter_string);
- if (err)
+ filter_free_subsystem_preds(system, FILTER_INIT_NO_RESET);
+
+ /* try to see the filter can be applied to which events */
+ err = replace_preds(system, NULL, ps, filter_string, true);
+ if (err) {
append_filter_err(ps, system->filter);
+ goto out;
+ }
+
+ filter_free_subsystem_preds(system, FILTER_SKIP_NO_RESET);
+
+ /* really apply the filter to the events */
+ err = replace_preds(system, NULL, ps, filter_string, false);
+ if (err) {
+ append_filter_err(ps, system->filter);
+ filter_free_subsystem_preds(system, 2);
+ }
out:
filter_opstack_clear(ps);
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index d06cf898dc86..df1bf6e48bb9 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -60,7 +60,8 @@ extern void __bad_type_size(void);
#undef TRACE_EVENT_FORMAT
#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \
static int \
-ftrace_format_##call(struct trace_seq *s) \
+ftrace_format_##call(struct ftrace_event_call *unused, \
+ struct trace_seq *s) \
{ \
struct args field; \
int ret; \
@@ -76,7 +77,8 @@ ftrace_format_##call(struct trace_seq *s) \
#define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, \
tpfmt) \
static int \
-ftrace_format_##call(struct trace_seq *s) \
+ftrace_format_##call(struct ftrace_event_call *unused, \
+ struct trace_seq *s) \
{ \
struct args field; \
int ret; \
@@ -117,7 +119,7 @@ ftrace_format_##call(struct trace_seq *s) \
#undef TRACE_EVENT_FORMAT
#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \
-int ftrace_define_fields_##call(void); \
+int ftrace_define_fields_##call(struct ftrace_event_call *event_call); \
static int ftrace_raw_init_event_##call(void); \
\
struct ftrace_event_call __used \
@@ -133,7 +135,6 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
static int ftrace_raw_init_event_##call(void) \
{ \
INIT_LIST_HEAD(&event_##call.fields); \
- init_preds(&event_##call); \
return 0; \
} \
@@ -156,7 +157,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
#define TRACE_FIELD(type, item, assign) \
ret = trace_define_field(event_call, #type, #item, \
offsetof(typeof(field), item), \
- sizeof(field.item), is_signed_type(type)); \
+ sizeof(field.item), \
+ is_signed_type(type), FILTER_OTHER); \
if (ret) \
return ret;
@@ -164,7 +166,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
#define TRACE_FIELD_SPECIAL(type, item, len, cmd) \
ret = trace_define_field(event_call, #type "[" #len "]", #item, \
offsetof(typeof(field), item), \
- sizeof(field.item), 0); \
+ sizeof(field.item), 0, FILTER_OTHER); \
if (ret) \
return ret;
@@ -172,7 +174,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
#define TRACE_FIELD_SIGN(type, item, assign, is_signed) \
ret = trace_define_field(event_call, #type, #item, \
offsetof(typeof(field), item), \
- sizeof(field.item), is_signed); \
+ sizeof(field.item), is_signed, \
+ FILTER_OTHER); \
if (ret) \
return ret;
@@ -182,17 +185,14 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
#undef TRACE_EVENT_FORMAT
#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \
int \
-ftrace_define_fields_##call(void) \
+ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
{ \
- struct ftrace_event_call *event_call = &event_##call; \
struct args field; \
int ret; \
\
- __common_field(unsigned char, type, 0); \
- __common_field(unsigned char, flags, 0); \
- __common_field(unsigned char, preempt_count, 0); \
- __common_field(int, pid, 1); \
- __common_field(int, tgid, 1); \
+ ret = trace_define_common_fields(event_call); \
+ if (ret) \
+ return ret; \
\
tstruct; \
\
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 75ef000613c3..5b01b94518fc 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -288,11 +288,9 @@ static int
ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
struct ftrace_probe_ops *ops, void *data)
{
- char str[KSYM_SYMBOL_LEN];
long count = (long)data;
- kallsyms_lookup(ip, NULL, NULL, NULL, str);
- seq_printf(m, "%s:", str);
+ seq_printf(m, "%pf:", (void *)ip);
if (ops == &traceon_probe_ops)
seq_printf(m, "traceon");
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 420ec3487579..b3749a2c3132 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -52,7 +52,7 @@ static struct tracer_flags tracer_flags = {
.opts = trace_opts
};
-/* pid on the last trace processed */
+static struct trace_array *graph_array;
/* Add a function return address to the trace stack on thread info.*/
@@ -166,10 +166,123 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
return ret;
}
+static int __trace_graph_entry(struct trace_array *tr,
+ struct ftrace_graph_ent *trace,
+ unsigned long flags,
+ int pc)
+{
+ struct ftrace_event_call *call = &event_funcgraph_entry;
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer = tr->buffer;
+ struct ftrace_graph_ent_entry *entry;
+
+ if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+ return 0;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
+ sizeof(*entry), flags, pc);
+ if (!event)
+ return 0;
+ entry = ring_buffer_event_data(event);
+ entry->graph_ent = *trace;
+ if (!filter_current_check_discard(buffer, call, entry, event))
+ ring_buffer_unlock_commit(buffer, event);
+
+ return 1;
+}
+
+int trace_graph_entry(struct ftrace_graph_ent *trace)
+{
+ struct trace_array *tr = graph_array;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int ret;
+ int cpu;
+ int pc;
+
+ if (unlikely(!tr))
+ return 0;
+
+ if (!ftrace_trace_task(current))
+ return 0;
+
+ if (!ftrace_graph_addr(trace->func))
+ return 0;
+
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ ret = __trace_graph_entry(tr, trace, flags, pc);
+ } else {
+ ret = 0;
+ }
+ /* Only do the atomic if it is not already set */
+ if (!test_tsk_trace_graph(current))
+ set_tsk_trace_graph(current);
+
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static void __trace_graph_return(struct trace_array *tr,
+ struct ftrace_graph_ret *trace,
+ unsigned long flags,
+ int pc)
+{
+ struct ftrace_event_call *call = &event_funcgraph_exit;
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer = tr->buffer;
+ struct ftrace_graph_ret_entry *entry;
+
+ if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+ return;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
+ sizeof(*entry), flags, pc);
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+ entry->ret = *trace;
+ if (!filter_current_check_discard(buffer, call, entry, event))
+ ring_buffer_unlock_commit(buffer, event);
+}
+
+void trace_graph_return(struct ftrace_graph_ret *trace)
+{
+ struct trace_array *tr = graph_array;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+ int pc;
+
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ __trace_graph_return(tr, trace, flags, pc);
+ }
+ if (!trace->depth)
+ clear_tsk_trace_graph(current);
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+}
+
static int graph_trace_init(struct trace_array *tr)
{
- int ret = register_ftrace_graph(&trace_graph_return,
- &trace_graph_entry);
+ int ret;
+
+ graph_array = tr;
+ ret = register_ftrace_graph(&trace_graph_return,
+ &trace_graph_entry);
if (ret)
return ret;
tracing_start_cmdline_record();
@@ -177,49 +290,30 @@ static int graph_trace_init(struct trace_array *tr)
return 0;
}
+void set_graph_array(struct trace_array *tr)
+{
+ graph_array = tr;
+}
+
static void graph_trace_reset(struct trace_array *tr)
{
tracing_stop_cmdline_record();
unregister_ftrace_graph();
}
-static inline int log10_cpu(int nb)
-{
- if (nb / 100)
- return 3;
- if (nb / 10)
- return 2;
- return 1;
-}
+static int max_bytes_for_cpu;
static enum print_line_t
print_graph_cpu(struct trace_seq *s, int cpu)
{
- int i;
int ret;
- int log10_this = log10_cpu(cpu);
- int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
-
/*
* Start with a space character - to make it stand out
* to the right a bit when trace output is pasted into
* email:
*/
- ret = trace_seq_printf(s, " ");
-
- /*
- * Tricky - we space the CPU field according to the max
- * number of online CPUs. On a 2-cpu system it would take
- * a maximum of 1 digit - on a 128 cpu system it would
- * take up to 3 digits:
- */
- for (i = 0; i < log10_all - log10_this; i++) {
- ret = trace_seq_printf(s, " ");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- }
- ret = trace_seq_printf(s, "%d) ", cpu);
+ ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -565,11 +659,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
return TRACE_TYPE_PARTIAL_LINE;
}
- ret = seq_print_ip_sym(s, call->func, 0);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- ret = trace_seq_printf(s, "();\n");
+ ret = trace_seq_printf(s, "%pf();\n", (void *)call->func);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -612,11 +702,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
return TRACE_TYPE_PARTIAL_LINE;
}
- ret = seq_print_ip_sym(s, call->func, 0);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- ret = trace_seq_printf(s, "() {\n");
+ ret = trace_seq_printf(s, "%pf() {\n", (void *)call->func);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -934,6 +1020,8 @@ static struct tracer graph_trace __read_mostly = {
static __init int init_graph_trace(void)
{
+ max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
+
return register_tracer(&graph_trace);
}
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index b923d13e2fad..5555b75a0d12 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -178,7 +178,6 @@ out_unlock:
out:
data->critical_sequence = max_sequence;
data->preempt_timestamp = ftrace_now(cpu);
- tracing_reset(tr, cpu);
trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
}
@@ -208,7 +207,6 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
data->critical_sequence = max_sequence;
data->preempt_timestamp = ftrace_now(cpu);
data->critical_start = parent_ip ? : ip;
- tracing_reset(tr, cpu);
local_save_flags(flags);
@@ -379,6 +377,7 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
irqsoff_trace = tr;
/* make sure that the tracer is visible */
smp_wmb();
+ tracing_reset_online_cpus(tr);
start_irqsoff_tracer(tr);
}
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index d53b45ed0806..c4c9bbda53d3 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -307,11 +307,12 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
struct trace_array_cpu *data,
struct mmiotrace_rw *rw)
{
+ struct ring_buffer *buffer = tr->buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry;
int pc = preempt_count();
- event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW,
+ event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
sizeof(*entry), 0, pc);
if (!event) {
atomic_inc(&dropped_count);
@@ -319,7 +320,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
}
entry = ring_buffer_event_data(event);
entry->rw = *rw;
- trace_buffer_unlock_commit(tr, event, 0, pc);
+ trace_buffer_unlock_commit(buffer, event, 0, pc);
}
void mmio_trace_rw(struct mmiotrace_rw *rw)
@@ -333,11 +334,12 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
struct trace_array_cpu *data,
struct mmiotrace_map *map)
{
+ struct ring_buffer *buffer = tr->buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry;
int pc = preempt_count();
- event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP,
+ event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
sizeof(*entry), 0, pc);
if (!event) {
atomic_inc(&dropped_count);
@@ -345,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
}
entry = ring_buffer_event_data(event);
entry->map = *map;
- trace_buffer_unlock_commit(tr, event, 0, pc);
+ trace_buffer_unlock_commit(buffer, event, 0, pc);
}
void mmio_trace_mapping(struct mmiotrace_map *map)
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index 8a30d9874cd4..fe1a00f1445a 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -38,6 +38,7 @@ static void probe_power_end(struct power_trace *it)
{
struct ftrace_event_call *call = &event_power;
struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
struct trace_power *entry;
struct trace_array_cpu *data;
struct trace_array *tr = power_trace;
@@ -45,18 +46,20 @@ static void probe_power_end(struct power_trace *it)
if (!trace_power_enabled)
return;
+ buffer = tr->buffer;
+
preempt_disable();
it->end = ktime_get();
data = tr->data[smp_processor_id()];
- event = trace_buffer_lock_reserve(tr, TRACE_POWER,
+ event = trace_buffer_lock_reserve(buffer, TRACE_POWER,
sizeof(*entry), 0, 0);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
entry->state_data = *it;
- if (!filter_check_discard(call, entry, tr->buffer, event))
- trace_buffer_unlock_commit(tr, event, 0, 0);
+ if (!filter_check_discard(call, entry, buffer, event))
+ trace_buffer_unlock_commit(buffer, event, 0, 0);
out:
preempt_enable();
}
@@ -66,6 +69,7 @@ static void probe_power_mark(struct power_trace *it, unsigned int type,
{
struct ftrace_event_call *call = &event_power;
struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
struct trace_power *entry;
struct trace_array_cpu *data;
struct trace_array *tr = power_trace;
@@ -73,6 +77,8 @@ static void probe_power_mark(struct power_trace *it, unsigned int type,
if (!trace_power_enabled)
return;
+ buffer = tr->buffer;
+
memset(it, 0, sizeof(struct power_trace));
it->state = level;
it->type = type;
@@ -81,14 +87,14 @@ static void probe_power_mark(struct power_trace *it, unsigned int type,
it->end = it->stamp;
data = tr->data[smp_processor_id()];
- event = trace_buffer_lock_reserve(tr, TRACE_POWER,
+ event = trace_buffer_lock_reserve(buffer, TRACE_POWER,
sizeof(*entry), 0, 0);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
entry->state_data = *it;
- if (!filter_check_discard(call, entry, tr->buffer, event))
- trace_buffer_unlock_commit(tr, event, 0, 0);
+ if (!filter_check_discard(call, entry, buffer, event))
+ trace_buffer_unlock_commit(buffer, event, 0, 0);
out:
preempt_enable();
}
@@ -144,14 +150,12 @@ static void power_trace_reset(struct trace_array *tr)
static int power_trace_init(struct trace_array *tr)
{
- int cpu;
power_trace = tr;
trace_power_enabled = 1;
tracing_power_register();
- for_each_cpu(cpu, cpu_possible_mask)
- tracing_reset(tr, cpu);
+ tracing_reset_online_cpus(tr);
return 0;
}
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index a98106dd979c..5fca0f51fde4 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -20,6 +20,35 @@ static int sched_ref;
static DEFINE_MUTEX(sched_register_mutex);
static int sched_stopped;
+
+void
+tracing_sched_switch_trace(struct trace_array *tr,
+ struct task_struct *prev,
+ struct task_struct *next,
+ unsigned long flags, int pc)
+{
+ struct ftrace_event_call *call = &event_context_switch;
+ struct ring_buffer *buffer = tr->buffer;
+ struct ring_buffer_event *event;
+ struct ctx_switch_entry *entry;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
+ sizeof(*entry), flags, pc);
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+ entry->prev_pid = prev->pid;
+ entry->prev_prio = prev->prio;
+ entry->prev_state = prev->state;
+ entry->next_pid = next->pid;
+ entry->next_prio = next->prio;
+ entry->next_state = next->state;
+ entry->next_cpu = task_cpu(next);
+
+ if (!filter_check_discard(call, entry, buffer, event))
+ trace_buffer_unlock_commit(buffer, event, flags, pc);
+}
+
static void
probe_sched_switch(struct rq *__rq, struct task_struct *prev,
struct task_struct *next)
@@ -49,6 +78,36 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
local_irq_restore(flags);
}
+void
+tracing_sched_wakeup_trace(struct trace_array *tr,
+ struct task_struct *wakee,
+ struct task_struct *curr,
+ unsigned long flags, int pc)
+{
+ struct ftrace_event_call *call = &event_wakeup;
+ struct ring_buffer_event *event;
+ struct ctx_switch_entry *entry;
+ struct ring_buffer *buffer = tr->buffer;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
+ sizeof(*entry), flags, pc);
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+ entry->prev_pid = curr->pid;
+ entry->prev_prio = curr->prio;
+ entry->prev_state = curr->state;
+ entry->next_pid = wakee->pid;
+ entry->next_prio = wakee->prio;
+ entry->next_state = wakee->state;
+ entry->next_cpu = task_cpu(wakee);
+
+ if (!filter_check_discard(call, entry, buffer, event))
+ ring_buffer_unlock_commit(buffer, event);
+ ftrace_trace_stack(tr->buffer, flags, 6, pc);
+ ftrace_trace_userstack(tr->buffer, flags, pc);
+}
+
static void
probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
{
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index eacb27225173..ad69f105a7c6 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -186,11 +186,6 @@ out:
static void __wakeup_reset(struct trace_array *tr)
{
- int cpu;
-
- for_each_possible_cpu(cpu)
- tracing_reset(tr, cpu);
-
wakeup_cpu = -1;
wakeup_prio = -1;
@@ -204,6 +199,8 @@ static void wakeup_reset(struct trace_array *tr)
{
unsigned long flags;
+ tracing_reset_online_cpus(tr);
+
local_irq_save(flags);
__raw_spin_lock(&wakeup_lock);
__wakeup_reset(tr);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 00dd6485bdd7..d2cdbabb4ead 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -288,6 +288,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
* to detect and recover from possible hangs
*/
tracing_reset_online_cpus(tr);
+ set_graph_array(tr);
ret = register_ftrace_graph(&trace_graph_return,
&trace_graph_entry_watchdog);
if (ret) {
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 6a2a9d484cd6..0f6facb050a1 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -186,43 +186,33 @@ static const struct file_operations stack_max_size_fops = {
};
static void *
-t_next(struct seq_file *m, void *v, loff_t *pos)
+__next(struct seq_file *m, loff_t *pos)
{
- long i;
+ long n = *pos - 1;
- (*pos)++;
-
- if (v == SEQ_START_TOKEN)
- i = 0;
- else {
- i = *(long *)v;
- i++;
- }
-
- if (i >= max_stack_trace.nr_entries ||
- stack_dump_trace[i] == ULONG_MAX)
+ if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
return NULL;
- m->private = (void *)i;
-
+ m->private = (void *)n;
return &m->private;
}
-static void *t_start(struct seq_file *m, loff_t *pos)
+static void *
+t_next(struct seq_file *m, void *v, loff_t *pos)
{
- void *t = SEQ_START_TOKEN;
- loff_t l = 0;
+ (*pos)++;
+ return __next(m, pos);
+}
+static void *t_start(struct seq_file *m, loff_t *pos)
+{
local_irq_disable();
__raw_spin_lock(&max_stack_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
- for (; t && l < *pos; t = t_next(m, t, &l))
- ;
-
- return t;
+ return __next(m, pos);
}
static void t_stop(struct seq_file *m, void *p)
@@ -234,15 +224,8 @@ static void t_stop(struct seq_file *m, void *p)
static int trace_lookup_stack(struct seq_file *m, long i)
{
unsigned long addr = stack_dump_trace[i];
-#ifdef CONFIG_KALLSYMS
- char str[KSYM_SYMBOL_LEN];
-
- sprint_symbol(str, addr);
- return seq_printf(m, "%s\n", str);
-#else
- return seq_printf(m, "%p\n", (void*)addr);
-#endif
+ return seq_printf(m, "%pF\n", (void *)addr);
}
static void print_disabled(struct seq_file *m)
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index aea321c82fa0..a4bb239eb987 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -49,7 +49,8 @@ static struct dentry *stat_dir;
* but it will at least advance closer to the next one
* to be released.
*/
-static struct rb_node *release_next(struct rb_node *node)
+static struct rb_node *release_next(struct tracer_stat *ts,
+ struct rb_node *node)
{
struct stat_node *snode;
struct rb_node *parent = rb_parent(node);
@@ -67,6 +68,8 @@ static struct rb_node *release_next(struct rb_node *node)
parent->rb_right = NULL;
snode = container_of(node, struct stat_node, node);
+ if (ts->stat_release)
+ ts->stat_release(snode->stat);
kfree(snode);
return parent;
@@ -78,7 +81,7 @@ static void __reset_stat_session(struct stat_session *session)
struct rb_node *node = session->stat_root.rb_node;
while (node)
- node = release_next(node);
+ node = release_next(session->ts, node);
session->stat_root = RB_ROOT;
}
@@ -200,17 +203,21 @@ static void *stat_seq_start(struct seq_file *s, loff_t *pos)
{
struct stat_session *session = s->private;
struct rb_node *node;
+ int n = *pos;
int i;
/* Prevent from tracer switch or rbtree modification */
mutex_lock(&session->stat_mutex);
/* If we are in the beginning of the file, print the headers */
- if (!*pos && session->ts->stat_headers)
- return SEQ_START_TOKEN;
+ if (session->ts->stat_headers) {
+ if (n == 0)
+ return SEQ_START_TOKEN;
+ n--;
+ }
node = rb_first(&session->stat_root);
- for (i = 0; node && i < *pos; i++)
+ for (i = 0; node && i < n; i++)
node = rb_next(node);
return node;
diff --git a/kernel/trace/trace_stat.h b/kernel/trace/trace_stat.h
index f3546a2cd826..8f03914b9a6a 100644
--- a/kernel/trace/trace_stat.h
+++ b/kernel/trace/trace_stat.h
@@ -18,6 +18,8 @@ struct tracer_stat {
int (*stat_cmp)(void *p1, void *p2);
/* Print a stat entry */
int (*stat_show)(struct seq_file *s, void *p);
+ /* Release an entry */
+ void (*stat_release)(void *stat);
/* Print the headers of your stat entries */
int (*stat_headers)(struct seq_file *s);
};
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 5e579645ac86..8712ce3c6a0e 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -1,30 +1,18 @@
#include <trace/syscall.h>
+#include <trace/events/syscalls.h>
#include <linux/kernel.h>
+#include <linux/ftrace.h>
+#include <linux/perf_counter.h>
#include <asm/syscall.h>
#include "trace_output.h"
#include "trace.h"
-/* Keep a counter of the syscall tracing users */
-static int refcount;
-
-/* Prevent from races on thread flags toggling */
static DEFINE_MUTEX(syscall_trace_lock);
-
-/* Option to display the parameters types */
-enum {
- TRACE_SYSCALLS_OPT_TYPES = 0x1,
-};
-
-static struct tracer_opt syscalls_opts[] = {
- { TRACER_OPT(syscall_arg_type, TRACE_SYSCALLS_OPT_TYPES) },
- { }
-};
-
-static struct tracer_flags syscalls_flags = {
- .val = 0, /* By default: no parameters types */
- .opts = syscalls_opts
-};
+static int sys_refcount_enter;
+static int sys_refcount_exit;
+static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
+static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
enum print_line_t
print_syscall_enter(struct trace_iterator *iter, int flags)
@@ -35,35 +23,46 @@ print_syscall_enter(struct trace_iterator *iter, int flags)
struct syscall_metadata *entry;
int i, ret, syscall;
- trace_assign_type(trace, ent);
-
+ trace = (typeof(trace))ent;
syscall = trace->nr;
-
entry = syscall_nr_to_meta(syscall);
+
if (!entry)
goto end;
+ if (entry->enter_id != ent->type) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+
ret = trace_seq_printf(s, "%s(", entry->name);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
for (i = 0; i < entry->nb_args; i++) {
/* parameter types */
- if (syscalls_flags.val & TRACE_SYSCALLS_OPT_TYPES) {
+ if (trace_flags & TRACE_ITER_VERBOSE) {
ret = trace_seq_printf(s, "%s ", entry->types[i]);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* parameter values */
- ret = trace_seq_printf(s, "%s: %lx%s ", entry->args[i],
+ ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
trace->args[i],
- i == entry->nb_args - 1 ? ")" : ",");
+ i == entry->nb_args - 1 ? "" : ", ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
+ ret = trace_seq_putc(s, ')');
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
end:
- trace_seq_printf(s, "\n");
+ ret = trace_seq_putc(s, '\n');
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
return TRACE_TYPE_HANDLED;
}
@@ -77,16 +76,20 @@ print_syscall_exit(struct trace_iterator *iter, int flags)
struct syscall_metadata *entry;
int ret;
- trace_assign_type(trace, ent);
-
+ trace = (typeof(trace))ent;
syscall = trace->nr;
-
entry = syscall_nr_to_meta(syscall);
+
if (!entry) {
trace_seq_printf(s, "\n");
return TRACE_TYPE_HANDLED;
}
+ if (entry->exit_id != ent->type) {
+ WARN_ON_ONCE(1);
+ return TRACE_TYPE_UNHANDLED;
+ }
+
ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
trace->ret);
if (!ret)
@@ -95,62 +98,140 @@ print_syscall_exit(struct trace_iterator *iter, int flags)
return TRACE_TYPE_HANDLED;
}
-void start_ftrace_syscalls(void)
+extern char *__bad_type_size(void);
+
+#define SYSCALL_FIELD(type, name) \
+ sizeof(type) != sizeof(trace.name) ? \
+ __bad_type_size() : \
+ #type, #name, offsetof(typeof(trace), name), sizeof(trace.name)
+
+int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
{
- unsigned long flags;
- struct task_struct *g, *t;
+ int i;
+ int nr;
+ int ret;
+ struct syscall_metadata *entry;
+ struct syscall_trace_enter trace;
+ int offset = offsetof(struct syscall_trace_enter, args);
- mutex_lock(&syscall_trace_lock);
+ nr = syscall_name_to_nr(call->data);
+ entry = syscall_nr_to_meta(nr);
- /* Don't enable the flag on the tasks twice */
- if (++refcount != 1)
- goto unlock;
+ if (!entry)
+ return 0;
- arch_init_ftrace_syscalls();
- read_lock_irqsave(&tasklist_lock, flags);
+ ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
+ SYSCALL_FIELD(int, nr));
+ if (!ret)
+ return 0;
- do_each_thread(g, t) {
- set_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
- } while_each_thread(g, t);
+ for (i = 0; i < entry->nb_args; i++) {
+ ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i],
+ entry->args[i]);
+ if (!ret)
+ return 0;
+ ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;\n", offset,
+ sizeof(unsigned long));
+ if (!ret)
+ return 0;
+ offset += sizeof(unsigned long);
+ }
- read_unlock_irqrestore(&tasklist_lock, flags);
+ trace_seq_puts(s, "\nprint fmt: \"");
+ for (i = 0; i < entry->nb_args; i++) {
+ ret = trace_seq_printf(s, "%s: 0x%%0%zulx%s", entry->args[i],
+ sizeof(unsigned long),
+ i == entry->nb_args - 1 ? "" : ", ");
+ if (!ret)
+ return 0;
+ }
+ trace_seq_putc(s, '"');
-unlock:
- mutex_unlock(&syscall_trace_lock);
+ for (i = 0; i < entry->nb_args; i++) {
+ ret = trace_seq_printf(s, ", ((unsigned long)(REC->%s))",
+ entry->args[i]);
+ if (!ret)
+ return 0;
+ }
+
+ return trace_seq_putc(s, '\n');
}
-void stop_ftrace_syscalls(void)
+int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
{
- unsigned long flags;
- struct task_struct *g, *t;
+ int ret;
+ struct syscall_trace_exit trace;
- mutex_lock(&syscall_trace_lock);
+ ret = trace_seq_printf(s,
+ "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
+ "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
+ SYSCALL_FIELD(int, nr),
+ SYSCALL_FIELD(unsigned long, ret));
+ if (!ret)
+ return 0;
- /* There are perhaps still some users */
- if (--refcount)
- goto unlock;
+ return trace_seq_printf(s, "\nprint fmt: \"0x%%lx\", REC->ret\n");
+}
- read_lock_irqsave(&tasklist_lock, flags);
+int syscall_enter_define_fields(struct ftrace_event_call *call)
+{
+ struct syscall_trace_enter trace;
+ struct syscall_metadata *meta;
+ int ret;
+ int nr;
+ int i;
+ int offset = offsetof(typeof(trace), args);
+
+ nr = syscall_name_to_nr(call->data);
+ meta = syscall_nr_to_meta(nr);
+
+ if (!meta)
+ return 0;
+
+ ret = trace_define_common_fields(call);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < meta->nb_args; i++) {
+ ret = trace_define_field(call, meta->types[i],
+ meta->args[i], offset,
+ sizeof(unsigned long), 0,
+ FILTER_OTHER);
+ offset += sizeof(unsigned long);
+ }
- do_each_thread(g, t) {
- clear_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
- } while_each_thread(g, t);
+ return ret;
+}
- read_unlock_irqrestore(&tasklist_lock, flags);
+int syscall_exit_define_fields(struct ftrace_event_call *call)
+{
+ struct syscall_trace_exit trace;
+ int ret;
-unlock:
- mutex_unlock(&syscall_trace_lock);
+ ret = trace_define_common_fields(call);
+ if (ret)
+ return ret;
+
+ ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0,
+ FILTER_OTHER);
+
+ return ret;
}
-void ftrace_syscall_enter(struct pt_regs *regs)
+void ftrace_syscall_enter(struct pt_regs *regs, long id)
{
struct syscall_trace_enter *entry;
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
int size;
int syscall_nr;
syscall_nr = syscall_get_nr(current, regs);
+ if (syscall_nr < 0)
+ return;
+ if (!test_bit(syscall_nr, enabled_enter_syscalls))
+ return;
sys_data = syscall_nr_to_meta(syscall_nr);
if (!sys_data)
@@ -158,8 +239,8 @@ void ftrace_syscall_enter(struct pt_regs *regs)
size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
- event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_ENTER, size,
- 0, 0);
+ event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id,
+ size, 0, 0);
if (!event)
return;
@@ -167,24 +248,30 @@ void ftrace_syscall_enter(struct pt_regs *regs)
entry->nr = syscall_nr;
syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
- trace_current_buffer_unlock_commit(event, 0, 0);
- trace_wake_up();
+ if (!filter_current_check_discard(buffer, sys_data->enter_event,
+ entry, event))
+ trace_current_buffer_unlock_commit(buffer, event, 0, 0);
}
-void ftrace_syscall_exit(struct pt_regs *regs)
+void ftrace_syscall_exit(struct pt_regs *regs, long ret)
{
struct syscall_trace_exit *entry;
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
int syscall_nr;
syscall_nr = syscall_get_nr(current, regs);
+ if (syscall_nr < 0)
+ return;
+ if (!test_bit(syscall_nr, enabled_exit_syscalls))
+ return;
sys_data = syscall_nr_to_meta(syscall_nr);
if (!sys_data)
return;
- event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_EXIT,
+ event = trace_current_buffer_lock_reserve(&buffer, sys_data->exit_id,
sizeof(*entry), 0, 0);
if (!event)
return;
@@ -193,58 +280,244 @@ void ftrace_syscall_exit(struct pt_regs *regs)
entry->nr = syscall_nr;
entry->ret = syscall_get_return_value(current, regs);
- trace_current_buffer_unlock_commit(event, 0, 0);
- trace_wake_up();
+ if (!filter_current_check_discard(buffer, sys_data->exit_event,
+ entry, event))
+ trace_current_buffer_unlock_commit(buffer, event, 0, 0);
}
-static int init_syscall_tracer(struct trace_array *tr)
+int reg_event_syscall_enter(void *ptr)
{
- start_ftrace_syscalls();
+ int ret = 0;
+ int num;
+ char *name;
+
+ name = (char *)ptr;
+ num = syscall_name_to_nr(name);
+ if (num < 0 || num >= NR_syscalls)
+ return -ENOSYS;
+ mutex_lock(&syscall_trace_lock);
+ if (!sys_refcount_enter)
+ ret = register_trace_sys_enter(ftrace_syscall_enter);
+ if (ret) {
+ pr_info("event trace: Could not activate"
+ "syscall entry trace point");
+ } else {
+ set_bit(num, enabled_enter_syscalls);
+ sys_refcount_enter++;
+ }
+ mutex_unlock(&syscall_trace_lock);
+ return ret;
+}
+
+void unreg_event_syscall_enter(void *ptr)
+{
+ int num;
+ char *name;
- return 0;
+ name = (char *)ptr;
+ num = syscall_name_to_nr(name);
+ if (num < 0 || num >= NR_syscalls)
+ return;
+ mutex_lock(&syscall_trace_lock);
+ sys_refcount_enter--;
+ clear_bit(num, enabled_enter_syscalls);
+ if (!sys_refcount_enter)
+ unregister_trace_sys_enter(ftrace_syscall_enter);
+ mutex_unlock(&syscall_trace_lock);
}
-static void reset_syscall_tracer(struct trace_array *tr)
+int reg_event_syscall_exit(void *ptr)
{
- stop_ftrace_syscalls();
- tracing_reset_online_cpus(tr);
+ int ret = 0;
+ int num;
+ char *name;
+
+ name = (char *)ptr;
+ num = syscall_name_to_nr(name);
+ if (num < 0 || num >= NR_syscalls)
+ return -ENOSYS;
+ mutex_lock(&syscall_trace_lock);
+ if (!sys_refcount_exit)
+ ret = register_trace_sys_exit(ftrace_syscall_exit);
+ if (ret) {
+ pr_info("event trace: Could not activate"
+ "syscall exit trace point");
+ } else {
+ set_bit(num, enabled_exit_syscalls);
+ sys_refcount_exit++;
+ }
+ mutex_unlock(&syscall_trace_lock);
+ return ret;
}
-static struct trace_event syscall_enter_event = {
- .type = TRACE_SYSCALL_ENTER,
- .trace = print_syscall_enter,
-};
+void unreg_event_syscall_exit(void *ptr)
+{
+ int num;
+ char *name;
+
+ name = (char *)ptr;
+ num = syscall_name_to_nr(name);
+ if (num < 0 || num >= NR_syscalls)
+ return;
+ mutex_lock(&syscall_trace_lock);
+ sys_refcount_exit--;
+ clear_bit(num, enabled_exit_syscalls);
+ if (!sys_refcount_exit)
+ unregister_trace_sys_exit(ftrace_syscall_exit);
+ mutex_unlock(&syscall_trace_lock);
+}
-static struct trace_event syscall_exit_event = {
- .type = TRACE_SYSCALL_EXIT,
- .trace = print_syscall_exit,
+struct trace_event event_syscall_enter = {
+ .trace = print_syscall_enter,
};
-static struct tracer syscall_tracer __read_mostly = {
- .name = "syscall",
- .init = init_syscall_tracer,
- .reset = reset_syscall_tracer,
- .flags = &syscalls_flags,
+struct trace_event event_syscall_exit = {
+ .trace = print_syscall_exit,
};
-__init int register_ftrace_syscalls(void)
+#ifdef CONFIG_EVENT_PROFILE
+
+static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
+static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
+static int sys_prof_refcount_enter;
+static int sys_prof_refcount_exit;
+
+static void prof_syscall_enter(struct pt_regs *regs, long id)
{
- int ret;
+ struct syscall_trace_enter *rec;
+ struct syscall_metadata *sys_data;
+ int syscall_nr;
+ int size;
- ret = register_ftrace_event(&syscall_enter_event);
- if (!ret) {
- printk(KERN_WARNING "event %d failed to register\n",
- syscall_enter_event.type);
- WARN_ON_ONCE(1);
+ syscall_nr = syscall_get_nr(current, regs);
+ if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
+ return;
+
+ sys_data = syscall_nr_to_meta(syscall_nr);
+ if (!sys_data)
+ return;
+
+ /* get the size after alignment with the u32 buffer size field */
+ size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
+ size = ALIGN(size + sizeof(u32), sizeof(u64));
+ size -= sizeof(u32);
+
+ do {
+ char raw_data[size];
+
+ /* zero the dead bytes from align to not leak stack to user */
+ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+
+ rec = (struct syscall_trace_enter *) raw_data;
+ tracing_generic_entry_update(&rec->ent, 0, 0);
+ rec->ent.type = sys_data->enter_id;
+ rec->nr = syscall_nr;
+ syscall_get_arguments(current, regs, 0, sys_data->nb_args,
+ (unsigned long *)&rec->args);
+ perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size);
+ } while(0);
+}
+
+int reg_prof_syscall_enter(char *name)
+{
+ int ret = 0;
+ int num;
+
+ num = syscall_name_to_nr(name);
+ if (num < 0 || num >= NR_syscalls)
+ return -ENOSYS;
+
+ mutex_lock(&syscall_trace_lock);
+ if (!sys_prof_refcount_enter)
+ ret = register_trace_sys_enter(prof_syscall_enter);
+ if (ret) {
+ pr_info("event trace: Could not activate"
+ "syscall entry trace point");
+ } else {
+ set_bit(num, enabled_prof_enter_syscalls);
+ sys_prof_refcount_enter++;
}
+ mutex_unlock(&syscall_trace_lock);
+ return ret;
+}
- ret = register_ftrace_event(&syscall_exit_event);
- if (!ret) {
- printk(KERN_WARNING "event %d failed to register\n",
- syscall_exit_event.type);
- WARN_ON_ONCE(1);
+void unreg_prof_syscall_enter(char *name)
+{
+ int num;
+
+ num = syscall_name_to_nr(name);
+ if (num < 0 || num >= NR_syscalls)
+ return;
+
+ mutex_lock(&syscall_trace_lock);
+ sys_prof_refcount_enter--;
+ clear_bit(num, enabled_prof_enter_syscalls);
+ if (!sys_prof_refcount_enter)
+ unregister_trace_sys_enter(prof_syscall_enter);
+ mutex_unlock(&syscall_trace_lock);
+}
+
+static void prof_syscall_exit(struct pt_regs *regs, long ret)
+{
+ struct syscall_metadata *sys_data;
+ struct syscall_trace_exit rec;
+ int syscall_nr;
+
+ syscall_nr = syscall_get_nr(current, regs);
+ if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
+ return;
+
+ sys_data = syscall_nr_to_meta(syscall_nr);
+ if (!sys_data)
+ return;
+
+ tracing_generic_entry_update(&rec.ent, 0, 0);
+ rec.ent.type = sys_data->exit_id;
+ rec.nr = syscall_nr;
+ rec.ret = syscall_get_return_value(current, regs);
+
+ perf_tpcounter_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec));
+}
+
+int reg_prof_syscall_exit(char *name)
+{
+ int ret = 0;
+ int num;
+
+ num = syscall_name_to_nr(name);
+ if (num < 0 || num >= NR_syscalls)
+ return -ENOSYS;
+
+ mutex_lock(&syscall_trace_lock);
+ if (!sys_prof_refcount_exit)
+ ret = register_trace_sys_exit(prof_syscall_exit);
+ if (ret) {
+ pr_info("event trace: Could not activate"
+ "syscall entry trace point");
+ } else {
+ set_bit(num, enabled_prof_exit_syscalls);
+ sys_prof_refcount_exit++;
}
+ mutex_unlock(&syscall_trace_lock);
+ return ret;
+}
- return register_tracer(&syscall_tracer);
+void unreg_prof_syscall_exit(char *name)
+{
+ int num;
+
+ num = syscall_name_to_nr(name);
+ if (num < 0 || num >= NR_syscalls)
+ return;
+
+ mutex_lock(&syscall_trace_lock);
+ sys_prof_refcount_exit--;
+ clear_bit(num, enabled_prof_exit_syscalls);
+ if (!sys_prof_refcount_exit)
+ unregister_trace_sys_exit(prof_syscall_exit);
+ mutex_unlock(&syscall_trace_lock);
}
-device_initcall(register_ftrace_syscalls);
+
+#endif
+
+
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
index 97fcea4acce1..40cafb07dffd 100644
--- a/kernel/trace/trace_workqueue.c
+++ b/kernel/trace/trace_workqueue.c
@@ -9,6 +9,7 @@
#include <trace/events/workqueue.h>
#include <linux/list.h>
#include <linux/percpu.h>
+#include <linux/kref.h>
#include "trace_stat.h"
#include "trace.h"
@@ -16,6 +17,7 @@
/* A cpu workqueue thread */
struct cpu_workqueue_stats {
struct list_head list;
+ struct kref kref;
int cpu;
pid_t pid;
/* Can be inserted from interrupt or user context, need to be atomic */
@@ -39,6 +41,11 @@ struct workqueue_global_stats {
static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
+static void cpu_workqueue_stat_free(struct kref *kref)
+{
+ kfree(container_of(kref, struct cpu_workqueue_stats, kref));
+}
+
/* Insertion of a work */
static void
probe_workqueue_insertion(struct task_struct *wq_thread,
@@ -96,8 +103,8 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
return;
}
INIT_LIST_HEAD(&cws->list);
+ kref_init(&cws->kref);
cws->cpu = cpu;
-
cws->pid = wq_thread->pid;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
@@ -118,7 +125,7 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread)
list) {
if (node->pid == wq_thread->pid) {
list_del(&node->list);
- kfree(node);
+ kref_put(&node->kref, cpu_workqueue_stat_free);
goto found;
}
}
@@ -137,9 +144,11 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
- if (!list_empty(&workqueue_cpu_stat(cpu)->list))
+ if (!list_empty(&workqueue_cpu_stat(cpu)->list)) {
ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
struct cpu_workqueue_stats, list);
+ kref_get(&ret->kref);
+ }
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
@@ -162,9 +171,9 @@ static void *workqueue_stat_start(struct tracer_stat *trace)
static void *workqueue_stat_next(void *prev, int idx)
{
struct cpu_workqueue_stats *prev_cws = prev;
+ struct cpu_workqueue_stats *ret;
int cpu = prev_cws->cpu;
unsigned long flags;
- void *ret = NULL;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
@@ -175,11 +184,14 @@ static void *workqueue_stat_next(void *prev, int idx)
return NULL;
} while (!(ret = workqueue_stat_start_cpu(cpu)));
return ret;
+ } else {
+ ret = list_entry(prev_cws->list.next,
+ struct cpu_workqueue_stats, list);
+ kref_get(&ret->kref);
}
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
- return list_entry(prev_cws->list.next, struct cpu_workqueue_stats,
- list);
+ return ret;
}
static int workqueue_stat_show(struct seq_file *s, void *p)
@@ -203,6 +215,13 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
return 0;
}
+static void workqueue_stat_release(void *stat)
+{
+ struct cpu_workqueue_stats *node = stat;
+
+ kref_put(&node->kref, cpu_workqueue_stat_free);
+}
+
static int workqueue_stat_headers(struct seq_file *s)
{
seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
@@ -215,6 +234,7 @@ struct tracer_stat workqueue_stats __read_mostly = {
.stat_start = workqueue_stat_start,
.stat_next = workqueue_stat_next,
.stat_show = workqueue_stat_show,
+ .stat_release = workqueue_stat_release,
.stat_headers = workqueue_stat_headers
};
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 1ef5d3a601c7..9489a0a9b1be 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -24,6 +24,7 @@
#include <linux/tracepoint.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/sched.h>
extern struct tracepoint __start___tracepoints[];
extern struct tracepoint __stop___tracepoints[];
@@ -242,6 +243,11 @@ static void set_tracepoint(struct tracepoint_entry **entry,
{
WARN_ON(strcmp((*entry)->name, elem->name) != 0);
+ if (elem->regfunc && !elem->state && active)
+ elem->regfunc();
+ else if (elem->unregfunc && elem->state && !active)
+ elem->unregfunc();
+
/*
* rcu_assign_pointer has a smp_wmb() which makes sure that the new
* probe callbacks array is consistent before setting a pointer to it.
@@ -261,6 +267,9 @@ static void set_tracepoint(struct tracepoint_entry **entry,
*/
static void disable_tracepoint(struct tracepoint *elem)
{
+ if (elem->unregfunc && elem->state)
+ elem->unregfunc();
+
elem->state = 0;
rcu_assign_pointer(elem->funcs, NULL);
}
@@ -554,9 +563,6 @@ int tracepoint_module_notify(struct notifier_block *self,
switch (val) {
case MODULE_STATE_COMING:
- tracepoint_update_probe_range(mod->tracepoints,
- mod->tracepoints + mod->num_tracepoints);
- break;
case MODULE_STATE_GOING:
tracepoint_update_probe_range(mod->tracepoints,
mod->tracepoints + mod->num_tracepoints);
@@ -577,3 +583,41 @@ static int init_tracepoints(void)
__initcall(init_tracepoints);
#endif /* CONFIG_MODULES */
+
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+
+/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
+static int sys_tracepoint_refcount;
+
+void syscall_regfunc(void)
+{
+ unsigned long flags;
+ struct task_struct *g, *t;
+
+ if (!sys_tracepoint_refcount) {
+ read_lock_irqsave(&tasklist_lock, flags);
+ do_each_thread(g, t) {
+ /* Skip kernel threads. */
+ if (t->mm)
+ set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
+ } while_each_thread(g, t);
+ read_unlock_irqrestore(&tasklist_lock, flags);
+ }
+ sys_tracepoint_refcount++;
+}
+
+void syscall_unregfunc(void)
+{
+ unsigned long flags;
+ struct task_struct *g, *t;
+
+ sys_tracepoint_refcount--;
+ if (!sys_tracepoint_refcount) {
+ read_lock_irqsave(&tasklist_lock, flags);
+ do_each_thread(g, t) {
+ clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
+ } while_each_thread(g, t);
+ read_unlock_irqrestore(&tasklist_lock, flags);
+ }
+}
+#endif
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0668795d8818..addfe2df93b1 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -317,8 +317,6 @@ static int worker_thread(void *__cwq)
if (cwq->wq->freezeable)
set_freezable();
- set_user_nice(current, -5);
-
for (;;) {
prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
if (!freezing(current) &&
@@ -600,7 +598,12 @@ static struct workqueue_struct *keventd_wq __read_mostly;
* schedule_work - put work task in global workqueue
* @work: job to be done
*
- * This puts a job in the kernel-global workqueue.
+ * Returns zero if @work was already on the kernel-global workqueue and
+ * non-zero otherwise.
+ *
+ * This puts a job in the kernel-global workqueue if it was not already
+ * queued and leaves it in the same position on the kernel-global
+ * workqueue otherwise.
*/
int schedule_work(struct work_struct *work)
{
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 12327b2bb785..7dbd5d9c29a4 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -653,6 +653,21 @@ config DEBUG_NOTIFIERS
This is a relatively cheap check but if you care about maximum
performance, say N.
+config DEBUG_CREDENTIALS
+ bool "Debug credential management"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on some debug checking for credential
+ management. The additional code keeps track of the number of
+ pointers from task_structs to any given cred struct, and checks to
+ see that this number never exceeds the usage count of the cred
+ struct.
+
+ Furthermore, if SELinux is enabled, this also checks that the
+ security pointer in the cred struct is never seen to be invalid.
+
+ If unsure, say N.
+
#
# Select this config option from the architecture Kconfig, if it
# it is preferred to always offer frame pointers as a config
@@ -725,7 +740,7 @@ config RCU_TORTURE_TEST_RUNNABLE
config RCU_CPU_STALL_DETECTOR
bool "Check for stalled CPUs delaying RCU grace periods"
- depends on CLASSIC_RCU || TREE_RCU
+ depends on TREE_RCU || TREE_PREEMPT_RCU
default n
help
This option causes RCU to printk information on which
diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
index f1ed2fe76c65..bd2bea963364 100644
--- a/lib/is_single_threaded.c
+++ b/lib/is_single_threaded.c
@@ -12,34 +12,47 @@
#include <linux/sched.h>
-/**
- * is_single_threaded - Determine if a thread group is single-threaded or not
- * @p: A task in the thread group in question
- *
- * This returns true if the thread group to which a task belongs is single
- * threaded, false if it is not.
+/*
+ * Returns true if the task does not share ->mm with another thread/process.
*/
-bool is_single_threaded(struct task_struct *p)
+bool current_is_single_threaded(void)
{
- struct task_struct *g, *t;
- struct mm_struct *mm = p->mm;
+ struct task_struct *task = current;
+ struct mm_struct *mm = task->mm;
+ struct task_struct *p, *t;
+ bool ret;
- if (atomic_read(&p->signal->count) != 1)
- goto no;
+ if (atomic_read(&task->signal->live) != 1)
+ return false;
- if (atomic_read(&p->mm->mm_users) != 1) {
- read_lock(&tasklist_lock);
- do_each_thread(g, t) {
- if (t->mm == mm && t != p)
- goto no_unlock;
- } while_each_thread(g, t);
- read_unlock(&tasklist_lock);
- }
+ if (atomic_read(&mm->mm_users) == 1)
+ return true;
- return true;
+ ret = false;
+ rcu_read_lock();
+ for_each_process(p) {
+ if (unlikely(p->flags & PF_KTHREAD))
+ continue;
+ if (unlikely(p == task->group_leader))
+ continue;
+
+ t = p;
+ do {
+ if (unlikely(t->mm == mm))
+ goto found;
+ if (likely(t->mm))
+ break;
+ /*
+ * t->mm == NULL. Make sure next_thread/next_task
+ * will see other CLONE_VM tasks which might be
+ * forked before exiting.
+ */
+ smp_rmb();
+ } while_each_thread(p, t);
+ }
+ ret = true;
+found:
+ rcu_read_unlock();
-no_unlock:
- read_unlock(&tasklist_lock);
-no:
- return false;
+ return ret;
}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index bffe6d7ef9d9..ac25cd28e807 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -114,46 +114,11 @@ setup_io_tlb_npages(char *str)
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
-void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
-{
- return alloc_bootmem_low_pages(size);
-}
-
-void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
-{
- return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
-}
-
-dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
-{
- return paddr;
-}
-
-phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
-{
- return baddr;
-}
-
+/* Note that this doesn't work with highmem page */
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
volatile void *address)
{
- return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
-}
-
-void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address)
-{
- return phys_to_virt(swiotlb_bus_to_phys(hwdev, address));
-}
-
-int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
- dma_addr_t addr, size_t size)
-{
- return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
-}
-
-int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
-{
- return 0;
+ return phys_to_dma(hwdev, virt_to_phys(address));
}
static void swiotlb_print_info(unsigned long bytes)
@@ -189,7 +154,7 @@ swiotlb_init_with_default_size(size_t default_size)
/*
* Get IO TLB memory from the low pages
*/
- io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs);
+ io_tlb_start = alloc_bootmem_low_pages(bytes);
if (!io_tlb_start)
panic("Cannot allocate SWIOTLB buffer");
io_tlb_end = io_tlb_start + bytes;
@@ -245,7 +210,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
- io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs);
+ io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
+ order);
if (io_tlb_start)
break;
order--;
@@ -315,20 +281,10 @@ cleanup1:
return -ENOMEM;
}
-static inline int
-address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
+static int is_swiotlb_buffer(phys_addr_t paddr)
{
- return swiotlb_arch_address_needs_mapping(hwdev, addr, size);
-}
-
-static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
-{
- return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size);
-}
-
-static int is_swiotlb_buffer(char *addr)
-{
- return addr >= io_tlb_start && addr < io_tlb_end;
+ return paddr >= virt_to_phys(io_tlb_start) &&
+ paddr < virt_to_phys(io_tlb_end);
}
/*
@@ -561,9 +517,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_mask = hwdev->coherent_dma_mask;
ret = (void *)__get_free_pages(flags, order);
- if (ret &&
- !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
- size)) {
+ if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
/*
* The allocated memory isn't reachable by the device.
*/
@@ -585,7 +539,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dev_addr = swiotlb_virt_to_bus(hwdev, ret);
/* Confirm address can be DMA'd by device */
- if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
+ if (dev_addr + size > dma_mask) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)dma_mask,
(unsigned long long)dev_addr);
@@ -601,11 +555,13 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+ dma_addr_t dev_addr)
{
+ phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
+
WARN_ON(irqs_disabled());
- if (!is_swiotlb_buffer(vaddr))
- free_pages((unsigned long) vaddr, get_order(size));
+ if (!is_swiotlb_buffer(paddr))
+ free_pages((unsigned long)vaddr, get_order(size));
else
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
@@ -625,12 +581,15 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
"device %s\n", size, dev ? dev_name(dev) : "?");
- if (size > io_tlb_overflow && do_panic) {
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
- panic("DMA: Memory would be corrupted\n");
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
- panic("DMA: Random memory would be DMAed\n");
- }
+ if (size <= io_tlb_overflow || !do_panic)
+ return;
+
+ if (dir == DMA_BIDIRECTIONAL)
+ panic("DMA: Random memory could be DMA accessed\n");
+ if (dir == DMA_FROM_DEVICE)
+ panic("DMA: Random memory could be DMA written\n");
+ if (dir == DMA_TO_DEVICE)
+ panic("DMA: Random memory could be DMA read\n");
}
/*
@@ -646,7 +605,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
struct dma_attrs *attrs)
{
phys_addr_t phys = page_to_phys(page) + offset;
- dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys);
+ dma_addr_t dev_addr = phys_to_dma(dev, phys);
void *map;
BUG_ON(dir == DMA_NONE);
@@ -655,8 +614,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (!address_needs_mapping(dev, dev_addr, size) &&
- !range_needs_mapping(phys, size))
+ if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
return dev_addr;
/*
@@ -673,7 +631,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
/*
* Ensure that the address returned is DMA'ble
*/
- if (address_needs_mapping(dev, dev_addr, size))
+ if (!dma_capable(dev, dev_addr, size))
panic("map_single: bounce buffer is not DMA'ble");
return dev_addr;
@@ -691,19 +649,25 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir)
{
- char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
+ phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE);
- if (is_swiotlb_buffer(dma_addr)) {
- do_unmap_single(hwdev, dma_addr, size, dir);
+ if (is_swiotlb_buffer(paddr)) {
+ do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
return;
}
if (dir != DMA_FROM_DEVICE)
return;
- dma_mark_clean(dma_addr, size);
+ /*
+ * phys_to_virt doesn't work with hihgmem page but we could
+ * call dma_mark_clean() with hihgmem page here. However, we
+ * are fine since dma_mark_clean() is null on POWERPC. We can
+ * make dma_mark_clean() take a physical address if necessary.
+ */
+ dma_mark_clean(phys_to_virt(paddr), size);
}
void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
@@ -728,19 +692,19 @@ static void
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir, int target)
{
- char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
+ phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE);
- if (is_swiotlb_buffer(dma_addr)) {
- sync_single(hwdev, dma_addr, size, dir, target);
+ if (is_swiotlb_buffer(paddr)) {
+ sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
return;
}
if (dir != DMA_FROM_DEVICE)
return;
- dma_mark_clean(dma_addr, size);
+ dma_mark_clean(phys_to_virt(paddr), size);
}
void
@@ -817,10 +781,10 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
for_each_sg(sgl, sg, nelems, i) {
phys_addr_t paddr = sg_phys(sg);
- dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr);
+ dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
- if (range_needs_mapping(paddr, sg->length) ||
- address_needs_mapping(hwdev, dev_addr, sg->length)) {
+ if (swiotlb_force ||
+ !dma_capable(hwdev, dev_addr, sg->length)) {
void *map = map_single(hwdev, sg_phys(sg),
sg->length, dir);
if (!map) {
diff --git a/mm/Makefile b/mm/Makefile
index 5e0bd6426693..147a7a7873c4 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -8,7 +8,7 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
vmalloc.o
obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
- maccess.o page_alloc.o page-writeback.o pdflush.o \
+ maccess.o page_alloc.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
page_isolation.o mm_init.o $(mmu-y)
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index c86edd244294..d3ca0dac1111 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -1,8 +1,11 @@
#include <linux/wait.h>
#include <linux/backing-dev.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
+#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/writeback.h>
@@ -14,6 +17,7 @@ void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
EXPORT_SYMBOL(default_unplug_io_fn);
struct backing_dev_info default_backing_dev_info = {
+ .name = "default",
.ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
.state = 0,
.capabilities = BDI_CAP_MAP_COPY,
@@ -22,6 +26,18 @@ struct backing_dev_info default_backing_dev_info = {
EXPORT_SYMBOL_GPL(default_backing_dev_info);
static struct class *bdi_class;
+DEFINE_SPINLOCK(bdi_lock);
+LIST_HEAD(bdi_list);
+LIST_HEAD(bdi_pending_list);
+
+static struct task_struct *sync_supers_tsk;
+static struct timer_list sync_supers_timer;
+
+static int bdi_sync_supers(void *);
+static void sync_supers_timer_fn(unsigned long);
+static void arm_supers_timer(void);
+
+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi);
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
@@ -37,9 +53,29 @@ static void bdi_debug_init(void)
static int bdi_debug_stats_show(struct seq_file *m, void *v)
{
struct backing_dev_info *bdi = m->private;
+ struct bdi_writeback *wb;
unsigned long background_thresh;
unsigned long dirty_thresh;
unsigned long bdi_thresh;
+ unsigned long nr_dirty, nr_io, nr_more_io, nr_wb;
+ struct inode *inode;
+
+ /*
+ * inode lock is enough here, the bdi->wb_list is protected by
+ * RCU on the reader side
+ */
+ nr_wb = nr_dirty = nr_io = nr_more_io = 0;
+ spin_lock(&inode_lock);
+ list_for_each_entry(wb, &bdi->wb_list, list) {
+ nr_wb++;
+ list_for_each_entry(inode, &wb->b_dirty, i_list)
+ nr_dirty++;
+ list_for_each_entry(inode, &wb->b_io, i_list)
+ nr_io++;
+ list_for_each_entry(inode, &wb->b_more_io, i_list)
+ nr_more_io++;
+ }
+ spin_unlock(&inode_lock);
get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
@@ -49,12 +85,22 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
"BdiReclaimable: %8lu kB\n"
"BdiDirtyThresh: %8lu kB\n"
"DirtyThresh: %8lu kB\n"
- "BackgroundThresh: %8lu kB\n",
+ "BackgroundThresh: %8lu kB\n"
+ "WriteBack threads:%8lu\n"
+ "b_dirty: %8lu\n"
+ "b_io: %8lu\n"
+ "b_more_io: %8lu\n"
+ "bdi_list: %8u\n"
+ "state: %8lx\n"
+ "wb_mask: %8lx\n"
+ "wb_list: %8u\n"
+ "wb_cnt: %8u\n",
(unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
(unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
- K(bdi_thresh),
- K(dirty_thresh),
- K(background_thresh));
+ K(bdi_thresh), K(dirty_thresh),
+ K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io,
+ !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask,
+ !list_empty(&bdi->wb_list), bdi->wb_cnt);
#undef K
return 0;
@@ -185,6 +231,13 @@ static int __init default_bdi_init(void)
{
int err;
+ sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
+ BUG_ON(IS_ERR(sync_supers_tsk));
+
+ init_timer(&sync_supers_timer);
+ setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
+ arm_supers_timer();
+
err = bdi_init(&default_backing_dev_info);
if (!err)
bdi_register(&default_backing_dev_info, NULL, "default");
@@ -193,6 +246,248 @@ static int __init default_bdi_init(void)
}
subsys_initcall(default_bdi_init);
+static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
+{
+ memset(wb, 0, sizeof(*wb));
+
+ wb->bdi = bdi;
+ wb->last_old_flush = jiffies;
+ INIT_LIST_HEAD(&wb->b_dirty);
+ INIT_LIST_HEAD(&wb->b_io);
+ INIT_LIST_HEAD(&wb->b_more_io);
+}
+
+static void bdi_task_init(struct backing_dev_info *bdi,
+ struct bdi_writeback *wb)
+{
+ struct task_struct *tsk = current;
+
+ spin_lock(&bdi->wb_lock);
+ list_add_tail_rcu(&wb->list, &bdi->wb_list);
+ spin_unlock(&bdi->wb_lock);
+
+ tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
+ set_freezable();
+
+ /*
+ * Our parent may run at a different priority, just set us to normal
+ */
+ set_user_nice(tsk, 0);
+}
+
+static int bdi_start_fn(void *ptr)
+{
+ struct bdi_writeback *wb = ptr;
+ struct backing_dev_info *bdi = wb->bdi;
+ int ret;
+
+ /*
+ * Add us to the active bdi_list
+ */
+ spin_lock(&bdi_lock);
+ list_add(&bdi->bdi_list, &bdi_list);
+ spin_unlock(&bdi_lock);
+
+ bdi_task_init(bdi, wb);
+
+ /*
+ * Clear pending bit and wakeup anybody waiting to tear us down
+ */
+ clear_bit(BDI_pending, &bdi->state);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&bdi->state, BDI_pending);
+
+ ret = bdi_writeback_task(wb);
+
+ /*
+ * Remove us from the list
+ */
+ spin_lock(&bdi->wb_lock);
+ list_del_rcu(&wb->list);
+ spin_unlock(&bdi->wb_lock);
+
+ /*
+ * Flush any work that raced with us exiting. No new work
+ * will be added, since this bdi isn't discoverable anymore.
+ */
+ if (!list_empty(&bdi->work_list))
+ wb_do_writeback(wb, 1);
+
+ wb->task = NULL;
+ return ret;
+}
+
+int bdi_has_dirty_io(struct backing_dev_info *bdi)
+{
+ return wb_has_dirty_io(&bdi->wb);
+}
+
+static void bdi_flush_io(struct backing_dev_info *bdi)
+{
+ struct writeback_control wbc = {
+ .bdi = bdi,
+ .sync_mode = WB_SYNC_NONE,
+ .older_than_this = NULL,
+ .range_cyclic = 1,
+ .nr_to_write = 1024,
+ };
+
+ writeback_inodes_wbc(&wbc);
+}
+
+/*
+ * kupdated() used to do this. We cannot do it from the bdi_forker_task()
+ * or we risk deadlocking on ->s_umount. The longer term solution would be
+ * to implement sync_supers_bdi() or similar and simply do it from the
+ * bdi writeback tasks individually.
+ */
+static int bdi_sync_supers(void *unused)
+{
+ set_user_nice(current, 0);
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+
+ /*
+ * Do this periodically, like kupdated() did before.
+ */
+ sync_supers();
+ }
+
+ return 0;
+}
+
+static void arm_supers_timer(void)
+{
+ unsigned long next;
+
+ next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
+ mod_timer(&sync_supers_timer, round_jiffies_up(next));
+}
+
+static void sync_supers_timer_fn(unsigned long unused)
+{
+ wake_up_process(sync_supers_tsk);
+ arm_supers_timer();
+}
+
+static int bdi_forker_task(void *ptr)
+{
+ struct bdi_writeback *me = ptr;
+
+ bdi_task_init(me->bdi, me);
+
+ for (;;) {
+ struct backing_dev_info *bdi, *tmp;
+ struct bdi_writeback *wb;
+
+ /*
+ * Temporary measure, we want to make sure we don't see
+ * dirty data on the default backing_dev_info
+ */
+ if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
+ wb_do_writeback(me, 0);
+
+ spin_lock(&bdi_lock);
+
+ /*
+ * Check if any existing bdi's have dirty data without
+ * a thread registered. If so, set that up.
+ */
+ list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) {
+ if (bdi->wb.task)
+ continue;
+ if (list_empty(&bdi->work_list) &&
+ !bdi_has_dirty_io(bdi))
+ continue;
+
+ bdi_add_default_flusher_task(bdi);
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (list_empty(&bdi_pending_list)) {
+ unsigned long wait;
+
+ spin_unlock(&bdi_lock);
+ wait = msecs_to_jiffies(dirty_writeback_interval * 10);
+ schedule_timeout(wait);
+ try_to_freeze();
+ continue;
+ }
+
+ __set_current_state(TASK_RUNNING);
+
+ /*
+ * This is our real job - check for pending entries in
+ * bdi_pending_list, and create the tasks that got added
+ */
+ bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
+ bdi_list);
+ list_del_init(&bdi->bdi_list);
+ spin_unlock(&bdi_lock);
+
+ wb = &bdi->wb;
+ wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
+ dev_name(bdi->dev));
+ /*
+ * If task creation fails, then readd the bdi to
+ * the pending list and force writeout of the bdi
+ * from this forker thread. That will free some memory
+ * and we can try again.
+ */
+ if (IS_ERR(wb->task)) {
+ wb->task = NULL;
+
+ /*
+ * Add this 'bdi' to the back, so we get
+ * a chance to flush other bdi's to free
+ * memory.
+ */
+ spin_lock(&bdi_lock);
+ list_add_tail(&bdi->bdi_list, &bdi_pending_list);
+ spin_unlock(&bdi_lock);
+
+ bdi_flush_io(bdi);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Add the default flusher task that gets created for any bdi
+ * that has dirty data pending writeout
+ */
+void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
+{
+ if (!bdi_cap_writeback_dirty(bdi))
+ return;
+
+ if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) {
+ printk(KERN_ERR "bdi %p/%s is not registered!\n",
+ bdi, bdi->name);
+ return;
+ }
+
+ /*
+ * Check with the helper whether to proceed adding a task. Will only
+ * abort if we two or more simultanous calls to
+ * bdi_add_default_flusher_task() occured, further additions will block
+ * waiting for previous additions to finish.
+ */
+ if (!test_and_set_bit(BDI_pending, &bdi->state)) {
+ list_move_tail(&bdi->bdi_list, &bdi_pending_list);
+
+ /*
+ * We are now on the pending list, wake up bdi_forker_task()
+ * to finish the job and add us back to the active bdi_list
+ */
+ wake_up_process(default_backing_dev_info.wb.task);
+ }
+}
+
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...)
{
@@ -211,9 +506,35 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
goto exit;
}
+ spin_lock(&bdi_lock);
+ list_add_tail(&bdi->bdi_list, &bdi_list);
+ spin_unlock(&bdi_lock);
+
bdi->dev = dev;
- bdi_debug_register(bdi, dev_name(dev));
+ /*
+ * Just start the forker thread for our default backing_dev_info,
+ * and add other bdi's to the list. They will get a thread created
+ * on-demand when they need it.
+ */
+ if (bdi_cap_flush_forker(bdi)) {
+ struct bdi_writeback *wb = &bdi->wb;
+
+ wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s",
+ dev_name(dev));
+ if (IS_ERR(wb->task)) {
+ wb->task = NULL;
+ ret = -ENOMEM;
+
+ spin_lock(&bdi_lock);
+ list_del(&bdi->bdi_list);
+ spin_unlock(&bdi_lock);
+ goto exit;
+ }
+ }
+
+ bdi_debug_register(bdi, dev_name(dev));
+ set_bit(BDI_registered, &bdi->state);
exit:
return ret;
}
@@ -225,9 +546,42 @@ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
}
EXPORT_SYMBOL(bdi_register_dev);
+/*
+ * Remove bdi from the global list and shutdown any threads we have running
+ */
+static void bdi_wb_shutdown(struct backing_dev_info *bdi)
+{
+ struct bdi_writeback *wb;
+
+ if (!bdi_cap_writeback_dirty(bdi))
+ return;
+
+ /*
+ * If setup is pending, wait for that to complete first
+ */
+ wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
+ TASK_UNINTERRUPTIBLE);
+
+ /*
+ * Make sure nobody finds us on the bdi_list anymore
+ */
+ spin_lock(&bdi_lock);
+ list_del(&bdi->bdi_list);
+ spin_unlock(&bdi_lock);
+
+ /*
+ * Finally, kill the kernel threads. We don't need to be RCU
+ * safe anymore, since the bdi is gone from visibility.
+ */
+ list_for_each_entry(wb, &bdi->wb_list, list)
+ kthread_stop(wb->task);
+}
+
void bdi_unregister(struct backing_dev_info *bdi)
{
if (bdi->dev) {
+ if (!bdi_cap_flush_forker(bdi))
+ bdi_wb_shutdown(bdi);
bdi_debug_unregister(bdi);
device_unregister(bdi->dev);
bdi->dev = NULL;
@@ -237,14 +591,25 @@ EXPORT_SYMBOL(bdi_unregister);
int bdi_init(struct backing_dev_info *bdi)
{
- int i;
- int err;
+ int i, err;
bdi->dev = NULL;
bdi->min_ratio = 0;
bdi->max_ratio = 100;
bdi->max_prop_frac = PROP_FRAC_BASE;
+ spin_lock_init(&bdi->wb_lock);
+ INIT_LIST_HEAD(&bdi->bdi_list);
+ INIT_LIST_HEAD(&bdi->wb_list);
+ INIT_LIST_HEAD(&bdi->work_list);
+
+ bdi_wb_init(&bdi->wb, bdi);
+
+ /*
+ * Just one thread support for now, hard code mask and count
+ */
+ bdi->wb_mask = 1;
+ bdi->wb_cnt = 1;
for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
err = percpu_counter_init(&bdi->bdi_stat[i], 0);
@@ -269,6 +634,8 @@ void bdi_destroy(struct backing_dev_info *bdi)
{
int i;
+ WARN_ON(bdi_has_dirty_io(bdi));
+
bdi_unregister(bdi);
for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 701740c9e81b..555d5d2731c6 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -521,7 +521,11 @@ find_block:
region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
start_off);
memset(region, 0, size);
- kmemleak_alloc(region, size, 1, 0);
+ /*
+ * The min_count is set to 0 so that bootmem allocated blocks
+ * are never reported as leaks.
+ */
+ kmemleak_alloc(region, size, 0, 0);
return region;
}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 487267310a84..4ea4510e2996 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -92,11 +92,13 @@
#include <linux/string.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
+#include <linux/workqueue.h>
#include <asm/sections.h>
#include <asm/processor.h>
#include <asm/atomic.h>
+#include <linux/kmemcheck.h>
#include <linux/kmemleak.h>
/*
@@ -107,6 +109,7 @@
#define SECS_FIRST_SCAN 60 /* delay before the first scan */
#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */
+#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
#define BYTES_PER_POINTER sizeof(void *)
@@ -120,6 +123,9 @@ struct kmemleak_scan_area {
size_t length;
};
+#define KMEMLEAK_GREY 0
+#define KMEMLEAK_BLACK -1
+
/*
* Structure holding the metadata for each allocated memory block.
* Modifications to such objects should be made while holding the
@@ -161,6 +167,15 @@ struct kmemleak_object {
/* flag set on newly allocated objects */
#define OBJECT_NEW (1 << 3)
+/* number of bytes to print per line; must be 16 or 32 */
+#define HEX_ROW_SIZE 16
+/* number of bytes to print at a time (1, 2, 4, 8) */
+#define HEX_GROUP_SIZE 1
+/* include ASCII after the hex output */
+#define HEX_ASCII 1
+/* max number of lines to be printed */
+#define HEX_MAX_LINES 2
+
/* the list of all allocated objects */
static LIST_HEAD(object_list);
/* the list of gray-colored objects (see color_gray comment below) */
@@ -228,11 +243,14 @@ struct early_log {
int min_count; /* minimum reference count */
unsigned long offset; /* scan area offset */
size_t length; /* scan area length */
+ unsigned long trace[MAX_TRACE]; /* stack trace */
+ unsigned int trace_len; /* stack trace length */
};
/* early logging buffer and current position */
-static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE];
-static int crt_early_log;
+static struct early_log
+ early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
+static int crt_early_log __initdata;
static void kmemleak_disable(void);
@@ -255,6 +273,35 @@ static void kmemleak_disable(void);
} while (0)
/*
+ * Printing of the objects hex dump to the seq file. The number of lines to be
+ * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
+ * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
+ * with the object->lock held.
+ */
+static void hex_dump_object(struct seq_file *seq,
+ struct kmemleak_object *object)
+{
+ const u8 *ptr = (const u8 *)object->pointer;
+ int i, len, remaining;
+ unsigned char linebuf[HEX_ROW_SIZE * 5];
+
+ /* limit the number of lines to HEX_MAX_LINES */
+ remaining = len =
+ min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
+
+ seq_printf(seq, " hex dump (first %d bytes):\n", len);
+ for (i = 0; i < len; i += HEX_ROW_SIZE) {
+ int linelen = min(remaining, HEX_ROW_SIZE);
+
+ remaining -= HEX_ROW_SIZE;
+ hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
+ HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
+ HEX_ASCII);
+ seq_printf(seq, " %s\n", linebuf);
+ }
+}
+
+/*
* Object colors, encoded with count and min_count:
* - white - orphan object, not enough references to it (count < min_count)
* - gray - not orphan, not marked as false positive (min_count == 0) or
@@ -264,19 +311,21 @@ static void kmemleak_disable(void);
* Newly created objects don't have any color assigned (object->count == -1)
* before the next memory scan when they become white.
*/
-static int color_white(const struct kmemleak_object *object)
+static bool color_white(const struct kmemleak_object *object)
{
- return object->count != -1 && object->count < object->min_count;
+ return object->count != KMEMLEAK_BLACK &&
+ object->count < object->min_count;
}
-static int color_gray(const struct kmemleak_object *object)
+static bool color_gray(const struct kmemleak_object *object)
{
- return object->min_count != -1 && object->count >= object->min_count;
+ return object->min_count != KMEMLEAK_BLACK &&
+ object->count >= object->min_count;
}
-static int color_black(const struct kmemleak_object *object)
+static bool color_black(const struct kmemleak_object *object)
{
- return object->min_count == -1;
+ return object->min_count == KMEMLEAK_BLACK;
}
/*
@@ -284,7 +333,7 @@ static int color_black(const struct kmemleak_object *object)
* not be deleted and have a minimum age to avoid false positives caused by
* pointers temporarily stored in CPU registers.
*/
-static int unreferenced_object(struct kmemleak_object *object)
+static bool unreferenced_object(struct kmemleak_object *object)
{
return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
time_before_eq(object->jiffies + jiffies_min_age,
@@ -304,6 +353,7 @@ static void print_unreferenced(struct seq_file *seq,
object->pointer, object->size);
seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
object->comm, object->pid, object->jiffies);
+ hex_dump_object(seq, object);
seq_printf(seq, " backtrace:\n");
for (i = 0; i < object->trace_len; i++) {
@@ -330,6 +380,7 @@ static void dump_object_info(struct kmemleak_object *object)
object->comm, object->pid, object->jiffies);
pr_notice(" min_count = %d\n", object->min_count);
pr_notice(" count = %d\n", object->count);
+ pr_notice(" flags = 0x%lx\n", object->flags);
pr_notice(" backtrace:\n");
print_stack_trace(&trace, 4);
}
@@ -434,21 +485,36 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
}
/*
+ * Save stack trace to the given array of MAX_TRACE size.
+ */
+static int __save_stack_trace(unsigned long *trace)
+{
+ struct stack_trace stack_trace;
+
+ stack_trace.max_entries = MAX_TRACE;
+ stack_trace.nr_entries = 0;
+ stack_trace.entries = trace;
+ stack_trace.skip = 2;
+ save_stack_trace(&stack_trace);
+
+ return stack_trace.nr_entries;
+}
+
+/*
* Create the metadata (struct kmemleak_object) corresponding to an allocated
* memory block and add it to the object_list and object_tree_root.
*/
-static void create_object(unsigned long ptr, size_t size, int min_count,
- gfp_t gfp)
+static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
+ int min_count, gfp_t gfp)
{
unsigned long flags;
struct kmemleak_object *object;
struct prio_tree_node *node;
- struct stack_trace trace;
object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
if (!object) {
kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
- return;
+ return NULL;
}
INIT_LIST_HEAD(&object->object_list);
@@ -482,18 +548,14 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
}
/* kernel backtrace */
- trace.max_entries = MAX_TRACE;
- trace.nr_entries = 0;
- trace.entries = object->trace;
- trace.skip = 1;
- save_stack_trace(&trace);
- object->trace_len = trace.nr_entries;
+ object->trace_len = __save_stack_trace(object->trace);
INIT_PRIO_TREE_NODE(&object->tree_node);
object->tree_node.start = ptr;
object->tree_node.last = ptr + size - 1;
write_lock_irqsave(&kmemleak_lock, flags);
+
min_addr = min(min_addr, ptr);
max_addr = max(max_addr, ptr + size);
node = prio_tree_insert(&object_tree_root, &object->tree_node);
@@ -504,20 +566,19 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
* random memory blocks.
*/
if (node != &object->tree_node) {
- unsigned long flags;
-
kmemleak_stop("Cannot insert 0x%lx into the object search tree "
"(already existing)\n", ptr);
object = lookup_object(ptr, 1);
- spin_lock_irqsave(&object->lock, flags);
+ spin_lock(&object->lock);
dump_object_info(object);
- spin_unlock_irqrestore(&object->lock, flags);
+ spin_unlock(&object->lock);
goto out;
}
list_add_tail_rcu(&object->object_list, &object_list);
out:
write_unlock_irqrestore(&kmemleak_lock, flags);
+ return object;
}
/*
@@ -604,46 +665,55 @@ static void delete_object_part(unsigned long ptr, size_t size)
put_object(object);
}
-/*
- * Make a object permanently as gray-colored so that it can no longer be
- * reported as a leak. This is used in general to mark a false positive.
- */
-static void make_gray_object(unsigned long ptr)
+
+static void __paint_it(struct kmemleak_object *object, int color)
+{
+ object->min_count = color;
+ if (color == KMEMLEAK_BLACK)
+ object->flags |= OBJECT_NO_SCAN;
+}
+
+static void paint_it(struct kmemleak_object *object, int color)
{
unsigned long flags;
+
+ spin_lock_irqsave(&object->lock, flags);
+ __paint_it(object, color);
+ spin_unlock_irqrestore(&object->lock, flags);
+}
+
+static void paint_ptr(unsigned long ptr, int color)
+{
struct kmemleak_object *object;
object = find_and_get_object(ptr, 0);
if (!object) {
- kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr);
+ kmemleak_warn("Trying to color unknown object "
+ "at 0x%08lx as %s\n", ptr,
+ (color == KMEMLEAK_GREY) ? "Grey" :
+ (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
return;
}
-
- spin_lock_irqsave(&object->lock, flags);
- object->min_count = 0;
- spin_unlock_irqrestore(&object->lock, flags);
+ paint_it(object, color);
put_object(object);
}
/*
+ * Make a object permanently as gray-colored so that it can no longer be
+ * reported as a leak. This is used in general to mark a false positive.
+ */
+static void make_gray_object(unsigned long ptr)
+{
+ paint_ptr(ptr, KMEMLEAK_GREY);
+}
+
+/*
* Mark the object as black-colored so that it is ignored from scans and
* reporting.
*/
static void make_black_object(unsigned long ptr)
{
- unsigned long flags;
- struct kmemleak_object *object;
-
- object = find_and_get_object(ptr, 0);
- if (!object) {
- kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr);
- return;
- }
-
- spin_lock_irqsave(&object->lock, flags);
- object->min_count = -1;
- spin_unlock_irqrestore(&object->lock, flags);
- put_object(object);
+ paint_ptr(ptr, KMEMLEAK_BLACK);
}
/*
@@ -715,14 +785,15 @@ static void object_no_scan(unsigned long ptr)
* Log an early kmemleak_* call to the early_log buffer. These calls will be
* processed later once kmemleak is fully initialized.
*/
-static void log_early(int op_type, const void *ptr, size_t size,
- int min_count, unsigned long offset, size_t length)
+static void __init log_early(int op_type, const void *ptr, size_t size,
+ int min_count, unsigned long offset, size_t length)
{
unsigned long flags;
struct early_log *log;
if (crt_early_log >= ARRAY_SIZE(early_log)) {
- pr_warning("Early log buffer exceeded\n");
+ pr_warning("Early log buffer exceeded, "
+ "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n");
kmemleak_disable();
return;
}
@@ -739,16 +810,45 @@ static void log_early(int op_type, const void *ptr, size_t size,
log->min_count = min_count;
log->offset = offset;
log->length = length;
+ if (op_type == KMEMLEAK_ALLOC)
+ log->trace_len = __save_stack_trace(log->trace);
crt_early_log++;
local_irq_restore(flags);
}
/*
+ * Log an early allocated block and populate the stack trace.
+ */
+static void early_alloc(struct early_log *log)
+{
+ struct kmemleak_object *object;
+ unsigned long flags;
+ int i;
+
+ if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
+ return;
+
+ /*
+ * RCU locking needed to ensure object is not freed via put_object().
+ */
+ rcu_read_lock();
+ object = create_object((unsigned long)log->ptr, log->size,
+ log->min_count, GFP_KERNEL);
+ spin_lock_irqsave(&object->lock, flags);
+ for (i = 0; i < log->trace_len; i++)
+ object->trace[i] = log->trace[i];
+ object->trace_len = log->trace_len;
+ spin_unlock_irqrestore(&object->lock, flags);
+ rcu_read_unlock();
+}
+
+/*
* Memory allocation function callback. This function is called from the
* kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
* vmalloc etc.).
*/
-void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp)
+void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
+ gfp_t gfp)
{
pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
@@ -763,7 +863,7 @@ EXPORT_SYMBOL_GPL(kmemleak_alloc);
* Memory freeing function callback. This function is called from the kernel
* allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
*/
-void kmemleak_free(const void *ptr)
+void __ref kmemleak_free(const void *ptr)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
@@ -778,7 +878,7 @@ EXPORT_SYMBOL_GPL(kmemleak_free);
* Partial memory freeing function callback. This function is usually called
* from bootmem allocator when (part of) a memory block is freed.
*/
-void kmemleak_free_part(const void *ptr, size_t size)
+void __ref kmemleak_free_part(const void *ptr, size_t size)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
@@ -793,7 +893,7 @@ EXPORT_SYMBOL_GPL(kmemleak_free_part);
* Mark an already allocated memory block as a false positive. This will cause
* the block to no longer be reported as leak and always be scanned.
*/
-void kmemleak_not_leak(const void *ptr)
+void __ref kmemleak_not_leak(const void *ptr)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
@@ -809,7 +909,7 @@ EXPORT_SYMBOL(kmemleak_not_leak);
* corresponding block is not a leak and does not contain any references to
* other allocated memory blocks.
*/
-void kmemleak_ignore(const void *ptr)
+void __ref kmemleak_ignore(const void *ptr)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
@@ -823,8 +923,8 @@ EXPORT_SYMBOL(kmemleak_ignore);
/*
* Limit the range to be scanned in an allocated memory block.
*/
-void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length,
- gfp_t gfp)
+void __ref kmemleak_scan_area(const void *ptr, unsigned long offset,
+ size_t length, gfp_t gfp)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
@@ -838,7 +938,7 @@ EXPORT_SYMBOL(kmemleak_scan_area);
/*
* Inform kmemleak not to scan the given memory block.
*/
-void kmemleak_no_scan(const void *ptr)
+void __ref kmemleak_no_scan(const void *ptr)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
@@ -882,15 +982,22 @@ static void scan_block(void *_start, void *_end,
unsigned long *end = _end - (BYTES_PER_POINTER - 1);
for (ptr = start; ptr < end; ptr++) {
- unsigned long flags;
- unsigned long pointer = *ptr;
struct kmemleak_object *object;
+ unsigned long flags;
+ unsigned long pointer;
if (allow_resched)
cond_resched();
if (scan_should_stop())
break;
+ /* don't scan uninitialized memory */
+ if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
+ BYTES_PER_POINTER))
+ continue;
+
+ pointer = *ptr;
+
object = find_and_get_object(pointer, 1);
if (!object)
continue;
@@ -949,10 +1056,21 @@ static void scan_object(struct kmemleak_object *object)
if (!(object->flags & OBJECT_ALLOCATED))
/* already freed object */
goto out;
- if (hlist_empty(&object->area_list))
- scan_block((void *)object->pointer,
- (void *)(object->pointer + object->size), object, 0);
- else
+ if (hlist_empty(&object->area_list)) {
+ void *start = (void *)object->pointer;
+ void *end = (void *)(object->pointer + object->size);
+
+ while (start < end && (object->flags & OBJECT_ALLOCATED) &&
+ !(object->flags & OBJECT_NO_SCAN)) {
+ scan_block(start, min(start + MAX_SCAN_SIZE, end),
+ object, 0);
+ start += MAX_SCAN_SIZE;
+
+ spin_unlock_irqrestore(&object->lock, flags);
+ cond_resched();
+ spin_lock_irqsave(&object->lock, flags);
+ }
+ } else
hlist_for_each_entry(area, elem, &object->area_list, node)
scan_block((void *)(object->pointer + area->offset),
(void *)(object->pointer + area->offset
@@ -970,7 +1088,6 @@ static void kmemleak_scan(void)
{
unsigned long flags;
struct kmemleak_object *object, *tmp;
- struct task_struct *task;
int i;
int new_leaks = 0;
int gray_list_pass = 0;
@@ -1037,15 +1154,16 @@ static void kmemleak_scan(void)
}
/*
- * Scanning the task stacks may introduce false negatives and it is
- * not enabled by default.
+ * Scanning the task stacks (may introduce false negatives).
*/
if (kmemleak_stack_scan) {
+ struct task_struct *p, *g;
+
read_lock(&tasklist_lock);
- for_each_process(task)
- scan_block(task_stack_page(task),
- task_stack_page(task) + THREAD_SIZE,
- NULL, 0);
+ do_each_thread(g, p) {
+ scan_block(task_stack_page(p), task_stack_page(p) +
+ THREAD_SIZE, NULL, 0);
+ } while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
@@ -1170,7 +1288,7 @@ static int kmemleak_scan_thread(void *arg)
* Start the automatic memory scanning thread. This function must be called
* with the scan_mutex held.
*/
-void start_scan_thread(void)
+static void start_scan_thread(void)
{
if (scan_thread)
return;
@@ -1185,7 +1303,7 @@ void start_scan_thread(void)
* Stop the automatic memory scanning thread. This function must be called
* with the scan_mutex held.
*/
-void stop_scan_thread(void)
+static void stop_scan_thread(void)
{
if (scan_thread) {
kthread_stop(scan_thread);
@@ -1294,6 +1412,49 @@ static int kmemleak_release(struct inode *inode, struct file *file)
return seq_release(inode, file);
}
+static int dump_str_object_info(const char *str)
+{
+ unsigned long flags;
+ struct kmemleak_object *object;
+ unsigned long addr;
+
+ addr= simple_strtoul(str, NULL, 0);
+ object = find_and_get_object(addr, 0);
+ if (!object) {
+ pr_info("Unknown object at 0x%08lx\n", addr);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&object->lock, flags);
+ dump_object_info(object);
+ spin_unlock_irqrestore(&object->lock, flags);
+
+ put_object(object);
+ return 0;
+}
+
+/*
+ * We use grey instead of black to ensure we can do future scans on the same
+ * objects. If we did not do future scans these black objects could
+ * potentially contain references to newly allocated objects in the future and
+ * we'd end up with false positives.
+ */
+static void kmemleak_clear(void)
+{
+ struct kmemleak_object *object;
+ unsigned long flags;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(object, &object_list, object_list) {
+ spin_lock_irqsave(&object->lock, flags);
+ if ((object->flags & OBJECT_REPORTED) &&
+ unreferenced_object(object))
+ __paint_it(object, KMEMLEAK_GREY);
+ spin_unlock_irqrestore(&object->lock, flags);
+ }
+ rcu_read_unlock();
+}
+
/*
* File write operation to configure kmemleak at run-time. The following
* commands can be written to the /sys/kernel/debug/kmemleak file:
@@ -1305,6 +1466,9 @@ static int kmemleak_release(struct inode *inode, struct file *file)
* scan=... - set the automatic memory scanning period in seconds (0 to
* disable it)
* scan - trigger a memory scan
+ * clear - mark all current reported unreferenced kmemleak objects as
+ * grey to ignore printing them
+ * dump=... - dump information about the object found at the given address
*/
static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
size_t size, loff_t *ppos)
@@ -1345,6 +1509,10 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
}
} else if (strncmp(buf, "scan", 4) == 0)
kmemleak_scan();
+ else if (strncmp(buf, "clear", 5) == 0)
+ kmemleak_clear();
+ else if (strncmp(buf, "dump=", 5) == 0)
+ ret = dump_str_object_info(buf + 5);
else
ret = -EINVAL;
@@ -1371,7 +1539,7 @@ static const struct file_operations kmemleak_fops = {
* Perform the freeing of the kmemleak internal objects after waiting for any
* current memory scan to complete.
*/
-static int kmemleak_cleanup_thread(void *arg)
+static void kmemleak_do_cleanup(struct work_struct *work)
{
struct kmemleak_object *object;
@@ -1383,22 +1551,9 @@ static int kmemleak_cleanup_thread(void *arg)
delete_object_full(object->pointer);
rcu_read_unlock();
mutex_unlock(&scan_mutex);
-
- return 0;
}
-/*
- * Start the clean-up thread.
- */
-static void kmemleak_cleanup(void)
-{
- struct task_struct *cleanup_thread;
-
- cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL,
- "kmemleak-clean");
- if (IS_ERR(cleanup_thread))
- pr_warning("Failed to create the clean-up thread\n");
-}
+static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
/*
* Disable kmemleak. No memory allocation/freeing will be traced once this
@@ -1416,7 +1571,7 @@ static void kmemleak_disable(void)
/* check whether it is too early for a kernel thread */
if (atomic_read(&kmemleak_initialized))
- kmemleak_cleanup();
+ schedule_work(&cleanup_work);
pr_info("Kernel memory leak detector disabled\n");
}
@@ -1469,8 +1624,7 @@ void __init kmemleak_init(void)
switch (log->op_type) {
case KMEMLEAK_ALLOC:
- kmemleak_alloc(log->ptr, log->size, log->min_count,
- GFP_KERNEL);
+ early_alloc(log);
break;
case KMEMLEAK_FREE:
kmemleak_free(log->ptr);
@@ -1513,7 +1667,7 @@ static int __init kmemleak_late_init(void)
* after setting kmemleak_initialized and we may end up with
* two clean-up threads but serialized by scan_mutex.
*/
- kmemleak_cleanup();
+ schedule_work(&cleanup_work);
return -ENOMEM;
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 81627ebcd313..25e7770309b8 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -36,15 +36,6 @@
#include <linux/pagevec.h>
/*
- * The maximum number of pages to writeout in a single bdflush/kupdate
- * operation. We do this so we don't hold I_SYNC against an inode for
- * enormous amounts of time, which would block a userspace task which has
- * been forced to throttle against that inode. Also, the code reevaluates
- * the dirty each time it has written this many pages.
- */
-#define MAX_WRITEBACK_PAGES 1024
-
-/*
* After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
* will look to see if it needs to force writeback or throttling.
*/
@@ -117,8 +108,6 @@ EXPORT_SYMBOL(laptop_mode);
/* End of sysctl-exported parameters */
-static void background_writeout(unsigned long _min_pages);
-
/*
* Scale the writeback cache size proportional to the relative writeout speeds.
*
@@ -320,15 +309,13 @@ static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
/*
*
*/
-static DEFINE_SPINLOCK(bdi_lock);
static unsigned int bdi_min_ratio;
int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{
int ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&bdi_lock, flags);
+ spin_lock(&bdi_lock);
if (min_ratio > bdi->max_ratio) {
ret = -EINVAL;
} else {
@@ -340,27 +327,26 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
ret = -EINVAL;
}
}
- spin_unlock_irqrestore(&bdi_lock, flags);
+ spin_unlock(&bdi_lock);
return ret;
}
int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
{
- unsigned long flags;
int ret = 0;
if (max_ratio > 100)
return -EINVAL;
- spin_lock_irqsave(&bdi_lock, flags);
+ spin_lock(&bdi_lock);
if (bdi->min_ratio > max_ratio) {
ret = -EINVAL;
} else {
bdi->max_ratio = max_ratio;
bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
}
- spin_unlock_irqrestore(&bdi_lock, flags);
+ spin_unlock(&bdi_lock);
return ret;
}
@@ -546,7 +532,7 @@ static void balance_dirty_pages(struct address_space *mapping)
* up.
*/
if (bdi_nr_reclaimable > bdi_thresh) {
- writeback_inodes(&wbc);
+ writeback_inodes_wbc(&wbc);
pages_written += write_chunk - wbc.nr_to_write;
get_dirty_limits(&background_thresh, &dirty_thresh,
&bdi_thresh, bdi);
@@ -575,7 +561,7 @@ static void balance_dirty_pages(struct address_space *mapping)
if (pages_written >= write_chunk)
break; /* We've done our duty */
- congestion_wait(BLK_RW_ASYNC, HZ/10);
+ schedule_timeout(1);
}
if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
@@ -594,10 +580,18 @@ static void balance_dirty_pages(struct address_space *mapping)
* background_thresh, to keep the amount of dirty memory low.
*/
if ((laptop_mode && pages_written) ||
- (!laptop_mode && (global_page_state(NR_FILE_DIRTY)
- + global_page_state(NR_UNSTABLE_NFS)
- > background_thresh)))
- pdflush_operation(background_writeout, 0);
+ (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY)
+ + global_page_state(NR_UNSTABLE_NFS))
+ > background_thresh))) {
+ struct writeback_control wbc = {
+ .bdi = bdi,
+ .sync_mode = WB_SYNC_NONE,
+ .nr_to_write = nr_writeback,
+ };
+
+
+ bdi_start_writeback(&wbc);
+ }
}
void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -681,153 +675,35 @@ void throttle_vm_writeout(gfp_t gfp_mask)
}
}
-/*
- * writeback at least _min_pages, and keep writing until the amount of dirty
- * memory is less than the background threshold, or until we're all clean.
- */
-static void background_writeout(unsigned long _min_pages)
-{
- long min_pages = _min_pages;
- struct writeback_control wbc = {
- .bdi = NULL,
- .sync_mode = WB_SYNC_NONE,
- .older_than_this = NULL,
- .nr_to_write = 0,
- .nonblocking = 1,
- .range_cyclic = 1,
- };
-
- for ( ; ; ) {
- unsigned long background_thresh;
- unsigned long dirty_thresh;
-
- get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
- if (global_page_state(NR_FILE_DIRTY) +
- global_page_state(NR_UNSTABLE_NFS) < background_thresh
- && min_pages <= 0)
- break;
- wbc.more_io = 0;
- wbc.encountered_congestion = 0;
- wbc.nr_to_write = MAX_WRITEBACK_PAGES;
- wbc.pages_skipped = 0;
- writeback_inodes(&wbc);
- min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
- if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
- /* Wrote less than expected */
- if (wbc.encountered_congestion || wbc.more_io)
- congestion_wait(BLK_RW_ASYNC, HZ/10);
- else
- break;
- }
- }
-}
-
-/*
- * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
- * the whole world. Returns 0 if a pdflush thread was dispatched. Returns
- * -1 if all pdflush threads were busy.
- */
-int wakeup_pdflush(long nr_pages)
-{
- if (nr_pages == 0)
- nr_pages = global_page_state(NR_FILE_DIRTY) +
- global_page_state(NR_UNSTABLE_NFS);
- return pdflush_operation(background_writeout, nr_pages);
-}
-
-static void wb_timer_fn(unsigned long unused);
static void laptop_timer_fn(unsigned long unused);
-static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
/*
- * Periodic writeback of "old" data.
- *
- * Define "old": the first time one of an inode's pages is dirtied, we mark the
- * dirtying-time in the inode's address_space. So this periodic writeback code
- * just walks the superblock inode list, writing back any inodes which are
- * older than a specific point in time.
- *
- * Try to run once per dirty_writeback_interval. But if a writeback event
- * takes longer than a dirty_writeback_interval interval, then leave a
- * one-second gap.
- *
- * older_than_this takes precedence over nr_to_write. So we'll only write back
- * all dirty pages if they are all attached to "old" mappings.
- */
-static void wb_kupdate(unsigned long arg)
-{
- unsigned long oldest_jif;
- unsigned long start_jif;
- unsigned long next_jif;
- long nr_to_write;
- struct writeback_control wbc = {
- .bdi = NULL,
- .sync_mode = WB_SYNC_NONE,
- .older_than_this = &oldest_jif,
- .nr_to_write = 0,
- .nonblocking = 1,
- .for_kupdate = 1,
- .range_cyclic = 1,
- };
-
- sync_supers();
-
- oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10);
- start_jif = jiffies;
- next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
- nr_to_write = global_page_state(NR_FILE_DIRTY) +
- global_page_state(NR_UNSTABLE_NFS) +
- (inodes_stat.nr_inodes - inodes_stat.nr_unused);
- while (nr_to_write > 0) {
- wbc.more_io = 0;
- wbc.encountered_congestion = 0;
- wbc.nr_to_write = MAX_WRITEBACK_PAGES;
- writeback_inodes(&wbc);
- if (wbc.nr_to_write > 0) {
- if (wbc.encountered_congestion || wbc.more_io)
- congestion_wait(BLK_RW_ASYNC, HZ/10);
- else
- break; /* All the old data is written */
- }
- nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
- }
- if (time_before(next_jif, jiffies + HZ))
- next_jif = jiffies + HZ;
- if (dirty_writeback_interval)
- mod_timer(&wb_timer, next_jif);
-}
-
-/*
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
*/
int dirty_writeback_centisecs_handler(ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
{
proc_dointvec(table, write, file, buffer, length, ppos);
- if (dirty_writeback_interval)
- mod_timer(&wb_timer, jiffies +
- msecs_to_jiffies(dirty_writeback_interval * 10));
- else
- del_timer(&wb_timer);
return 0;
}
-static void wb_timer_fn(unsigned long unused)
-{
- if (pdflush_operation(wb_kupdate, 0) < 0)
- mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
-}
-
-static void laptop_flush(unsigned long unused)
+static void do_laptop_sync(struct work_struct *work)
{
- sys_sync();
+ wakeup_flusher_threads(0);
+ kfree(work);
}
static void laptop_timer_fn(unsigned long unused)
{
- pdflush_operation(laptop_flush, 0);
+ struct work_struct *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK(work, do_laptop_sync);
+ schedule_work(work);
+ }
}
/*
@@ -910,8 +786,6 @@ void __init page_writeback_init(void)
{
int shift;
- mod_timer(&wb_timer,
- jiffies + msecs_to_jiffies(dirty_writeback_interval * 10));
writeback_set_ratelimit();
register_cpu_notifier(&ratelimit_nb);
diff --git a/mm/pdflush.c b/mm/pdflush.c
deleted file mode 100644
index 235ac440c44e..000000000000
--- a/mm/pdflush.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * mm/pdflush.c - worker threads for writing back filesystem data
- *
- * Copyright (C) 2002, Linus Torvalds.
- *
- * 09Apr2002 Andrew Morton
- * Initial version
- * 29Feb2004 kaos@sgi.com
- * Move worker thread creation to kthread to avoid chewing
- * up stack space with nested calls to kernel_thread.
- */
-
-#include <linux/sched.h>
-#include <linux/list.h>
-#include <linux/signal.h>
-#include <linux/spinlock.h>
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/fs.h> /* Needed by writeback.h */
-#include <linux/writeback.h> /* Prototypes pdflush_operation() */
-#include <linux/kthread.h>
-#include <linux/cpuset.h>
-#include <linux/freezer.h>
-
-
-/*
- * Minimum and maximum number of pdflush instances
- */
-#define MIN_PDFLUSH_THREADS 2
-#define MAX_PDFLUSH_THREADS 8
-
-static void start_one_pdflush_thread(void);
-
-
-/*
- * The pdflush threads are worker threads for writing back dirty data.
- * Ideally, we'd like one thread per active disk spindle. But the disk
- * topology is very hard to divine at this level. Instead, we take
- * care in various places to prevent more than one pdflush thread from
- * performing writeback against a single filesystem. pdflush threads
- * have the PF_FLUSHER flag set in current->flags to aid in this.
- */
-
-/*
- * All the pdflush threads. Protected by pdflush_lock
- */
-static LIST_HEAD(pdflush_list);
-static DEFINE_SPINLOCK(pdflush_lock);
-
-/*
- * The count of currently-running pdflush threads. Protected
- * by pdflush_lock.
- *
- * Readable by sysctl, but not writable. Published to userspace at
- * /proc/sys/vm/nr_pdflush_threads.
- */
-int nr_pdflush_threads = 0;
-
-/*
- * The time at which the pdflush thread pool last went empty
- */
-static unsigned long last_empty_jifs;
-
-/*
- * The pdflush thread.
- *
- * Thread pool management algorithm:
- *
- * - The minimum and maximum number of pdflush instances are bound
- * by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS.
- *
- * - If there have been no idle pdflush instances for 1 second, create
- * a new one.
- *
- * - If the least-recently-went-to-sleep pdflush thread has been asleep
- * for more than one second, terminate a thread.
- */
-
-/*
- * A structure for passing work to a pdflush thread. Also for passing
- * state information between pdflush threads. Protected by pdflush_lock.
- */
-struct pdflush_work {
- struct task_struct *who; /* The thread */
- void (*fn)(unsigned long); /* A callback function */
- unsigned long arg0; /* An argument to the callback */
- struct list_head list; /* On pdflush_list, when idle */
- unsigned long when_i_went_to_sleep;
-};
-
-static int __pdflush(struct pdflush_work *my_work)
-{
- current->flags |= PF_FLUSHER | PF_SWAPWRITE;
- set_freezable();
- my_work->fn = NULL;
- my_work->who = current;
- INIT_LIST_HEAD(&my_work->list);
-
- spin_lock_irq(&pdflush_lock);
- for ( ; ; ) {
- struct pdflush_work *pdf;
-
- set_current_state(TASK_INTERRUPTIBLE);
- list_move(&my_work->list, &pdflush_list);
- my_work->when_i_went_to_sleep = jiffies;
- spin_unlock_irq(&pdflush_lock);
- schedule();
- try_to_freeze();
- spin_lock_irq(&pdflush_lock);
- if (!list_empty(&my_work->list)) {
- /*
- * Someone woke us up, but without removing our control
- * structure from the global list. swsusp will do this
- * in try_to_freeze()->refrigerator(). Handle it.
- */
- my_work->fn = NULL;
- continue;
- }
- if (my_work->fn == NULL) {
- printk("pdflush: bogus wakeup\n");
- continue;
- }
- spin_unlock_irq(&pdflush_lock);
-
- (*my_work->fn)(my_work->arg0);
-
- spin_lock_irq(&pdflush_lock);
-
- /*
- * Thread creation: For how long have there been zero
- * available threads?
- *
- * To throttle creation, we reset last_empty_jifs.
- */
- if (time_after(jiffies, last_empty_jifs + 1 * HZ)) {
- if (list_empty(&pdflush_list)) {
- if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) {
- last_empty_jifs = jiffies;
- nr_pdflush_threads++;
- spin_unlock_irq(&pdflush_lock);
- start_one_pdflush_thread();
- spin_lock_irq(&pdflush_lock);
- }
- }
- }
-
- my_work->fn = NULL;
-
- /*
- * Thread destruction: For how long has the sleepiest
- * thread slept?
- */
- if (list_empty(&pdflush_list))
- continue;
- if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS)
- continue;
- pdf = list_entry(pdflush_list.prev, struct pdflush_work, list);
- if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) {
- /* Limit exit rate */
- pdf->when_i_went_to_sleep = jiffies;
- break; /* exeunt */
- }
- }
- nr_pdflush_threads--;
- spin_unlock_irq(&pdflush_lock);
- return 0;
-}
-
-/*
- * Of course, my_work wants to be just a local in __pdflush(). It is
- * separated out in this manner to hopefully prevent the compiler from
- * performing unfortunate optimisations against the auto variables. Because
- * these are visible to other tasks and CPUs. (No problem has actually
- * been observed. This is just paranoia).
- */
-static int pdflush(void *dummy)
-{
- struct pdflush_work my_work;
- cpumask_var_t cpus_allowed;
-
- /*
- * Since the caller doesn't even check kthread_run() worked, let's not
- * freak out too much if this fails.
- */
- if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
- printk(KERN_WARNING "pdflush failed to allocate cpumask\n");
- return 0;
- }
-
- /*
- * pdflush can spend a lot of time doing encryption via dm-crypt. We
- * don't want to do that at keventd's priority.
- */
- set_user_nice(current, 0);
-
- /*
- * Some configs put our parent kthread in a limited cpuset,
- * which kthread() overrides, forcing cpus_allowed == cpu_all_mask.
- * Our needs are more modest - cut back to our cpusets cpus_allowed.
- * This is needed as pdflush's are dynamically created and destroyed.
- * The boottime pdflush's are easily placed w/o these 2 lines.
- */
- cpuset_cpus_allowed(current, cpus_allowed);
- set_cpus_allowed_ptr(current, cpus_allowed);
- free_cpumask_var(cpus_allowed);
-
- return __pdflush(&my_work);
-}
-
-/*
- * Attempt to wake up a pdflush thread, and get it to do some work for you.
- * Returns zero if it indeed managed to find a worker thread, and passed your
- * payload to it.
- */
-int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
-{
- unsigned long flags;
- int ret = 0;
-
- BUG_ON(fn == NULL); /* Hard to diagnose if it's deferred */
-
- spin_lock_irqsave(&pdflush_lock, flags);
- if (list_empty(&pdflush_list)) {
- ret = -1;
- } else {
- struct pdflush_work *pdf;
-
- pdf = list_entry(pdflush_list.next, struct pdflush_work, list);
- list_del_init(&pdf->list);
- if (list_empty(&pdflush_list))
- last_empty_jifs = jiffies;
- pdf->fn = fn;
- pdf->arg0 = arg0;
- wake_up_process(pdf->who);
- }
- spin_unlock_irqrestore(&pdflush_lock, flags);
-
- return ret;
-}
-
-static void start_one_pdflush_thread(void)
-{
- struct task_struct *k;
-
- k = kthread_run(pdflush, NULL, "pdflush");
- if (unlikely(IS_ERR(k))) {
- spin_lock_irq(&pdflush_lock);
- nr_pdflush_threads--;
- spin_unlock_irq(&pdflush_lock);
- }
-}
-
-static int __init pdflush_init(void)
-{
- int i;
-
- /*
- * Pre-set nr_pdflush_threads... If we fail to create,
- * the count will be decremented.
- */
- nr_pdflush_threads = MIN_PDFLUSH_THREADS;
-
- for (i = 0; i < MIN_PDFLUSH_THREADS; i++)
- start_one_pdflush_thread();
- return 0;
-}
-
-module_init(pdflush_init);
diff --git a/mm/shmem.c b/mm/shmem.c
index d713239ce2ce..5a0b3d4055f3 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2446,7 +2446,7 @@ static const struct inode_operations shmem_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .permission = shmem_permission,
+ .check_acl = shmem_check_acl,
#endif
};
@@ -2469,7 +2469,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .permission = shmem_permission,
+ .check_acl = shmem_check_acl,
#endif
};
@@ -2480,7 +2480,7 @@ static const struct inode_operations shmem_special_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .permission = shmem_permission,
+ .check_acl = shmem_check_acl,
#endif
};
diff --git a/mm/shmem_acl.c b/mm/shmem_acl.c
index 606a8e757a42..df2c87fdae50 100644
--- a/mm/shmem_acl.c
+++ b/mm/shmem_acl.c
@@ -157,7 +157,7 @@ shmem_acl_init(struct inode *inode, struct inode *dir)
/**
* shmem_check_acl - check_acl() callback for generic_permission()
*/
-static int
+int
shmem_check_acl(struct inode *inode, int mask)
{
struct posix_acl *acl = shmem_get_acl(inode, ACL_TYPE_ACCESS);
@@ -169,12 +169,3 @@ shmem_check_acl(struct inode *inode, int mask)
}
return -EAGAIN;
}
-
-/**
- * shmem_permission - permission() inode operation
- */
-int
-shmem_permission(struct inode *inode, int mask)
-{
- return generic_permission(inode, mask, shmem_check_acl);
-}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 42cd38eba79f..5ae6b8b78c80 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -34,6 +34,7 @@ static const struct address_space_operations swap_aops = {
};
static struct backing_dev_info swap_backing_dev_info = {
+ .name = "swap",
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
.unplug_io_fn = swap_unplug_io_fn,
};
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 94e86dd6954c..ba8228e0a806 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1720,7 +1720,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
*/
if (total_scanned > sc->swap_cluster_max +
sc->swap_cluster_max / 2) {
- wakeup_pdflush(laptop_mode ? 0 : total_scanned);
+ wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
sc->may_writepage = 1;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 6a94475aee85..278d489aad3b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1031,7 +1031,7 @@ void dev_load(struct net *net, const char *name)
dev = __dev_get_by_name(net, name);
read_unlock(&dev_base_lock);
- if (!dev && capable(CAP_SYS_MODULE))
+ if (!dev && capable(CAP_NET_ADMIN))
request_module("%s", name);
}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index e92beb9e55e0..6428b342b164 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -116,7 +116,7 @@ int tcp_set_default_congestion_control(const char *name)
spin_lock(&tcp_cong_list_lock);
ca = tcp_ca_find(name);
#ifdef CONFIG_MODULES
- if (!ca && capable(CAP_SYS_MODULE)) {
+ if (!ca && capable(CAP_NET_ADMIN)) {
spin_unlock(&tcp_cong_list_lock);
request_module("tcp_%s", name);
@@ -246,7 +246,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
#ifdef CONFIG_MODULES
/* not found attempt to autoload module */
- if (!ca && capable(CAP_SYS_MODULE)) {
+ if (!ca && capable(CAP_NET_ADMIN)) {
rcu_read_unlock();
request_module("tcp_%s", name);
rcu_read_lock();
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index db73fd2a3f0e..9d2fca5ad14a 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_SUNRPC_XPRT_RDMA) += xprtrdma/
sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
auth.o auth_null.o auth_unix.o auth_generic.o \
svc.o svcsock.o svcauth.o svcauth_unix.o \
- rpcb_clnt.o timer.o xdr.o \
+ addr.o rpcb_clnt.o timer.o xdr.o \
sunrpc_syms.o cache.o rpc_pipe.o \
svc_xprt.o
sunrpc-$(CONFIG_NFS_V4_1) += backchannel_rqst.o bc_svc.o
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
new file mode 100644
index 000000000000..22e8fd89477f
--- /dev/null
+++ b/net/sunrpc/addr.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright 2009, Oracle. All rights reserved.
+ *
+ * Convert socket addresses to presentation addresses and universal
+ * addresses, and vice versa.
+ *
+ * Universal addresses are introduced by RFC 1833 and further refined by
+ * recent RFCs describing NFSv4. The universal address format is part
+ * of the external (network) interface provided by rpcbind version 3
+ * and 4, and by NFSv4. Such an address is a string containing a
+ * presentation format IP address followed by a port number in
+ * "hibyte.lobyte" format.
+ *
+ * IPv6 addresses can also include a scope ID, typically denoted by
+ * a '%' followed by a device name or a non-negative integer. Refer to
+ * RFC 4291, Section 2.2 for details on IPv6 presentation formats.
+ */
+
+#include <net/ipv6.h>
+#include <linux/sunrpc/clnt.h>
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
+static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
+ char *buf, const int buflen)
+{
+ const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
+ const struct in6_addr *addr = &sin6->sin6_addr;
+
+ /*
+ * RFC 4291, Section 2.2.2
+ *
+ * Shorthanded ANY address
+ */
+ if (ipv6_addr_any(addr))
+ return snprintf(buf, buflen, "::");
+
+ /*
+ * RFC 4291, Section 2.2.2
+ *
+ * Shorthanded loopback address
+ */
+ if (ipv6_addr_loopback(addr))
+ return snprintf(buf, buflen, "::1");
+
+ /*
+ * RFC 4291, Section 2.2.3
+ *
+ * Special presentation address format for mapped v4
+ * addresses.
+ */
+ if (ipv6_addr_v4mapped(addr))
+ return snprintf(buf, buflen, "::ffff:%pI4",
+ &addr->s6_addr32[3]);
+
+ /*
+ * RFC 4291, Section 2.2.1
+ *
+ * To keep the result as short as possible, especially
+ * since we don't shorthand, we don't want leading zeros
+ * in each halfword, so avoid %pI6.
+ */
+ return snprintf(buf, buflen, "%x:%x:%x:%x:%x:%x:%x:%x",
+ ntohs(addr->s6_addr16[0]), ntohs(addr->s6_addr16[1]),
+ ntohs(addr->s6_addr16[2]), ntohs(addr->s6_addr16[3]),
+ ntohs(addr->s6_addr16[4]), ntohs(addr->s6_addr16[5]),
+ ntohs(addr->s6_addr16[6]), ntohs(addr->s6_addr16[7]));
+}
+
+static size_t rpc_ntop6(const struct sockaddr *sap,
+ char *buf, const size_t buflen)
+{
+ const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
+ char scopebuf[IPV6_SCOPE_ID_LEN];
+ size_t len;
+ int rc;
+
+ len = rpc_ntop6_noscopeid(sap, buf, buflen);
+ if (unlikely(len == 0))
+ return len;
+
+ if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
+ !(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL))
+ return len;
+
+ rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u",
+ IPV6_SCOPE_DELIMITER, sin6->sin6_scope_id);
+ if (unlikely((size_t)rc > sizeof(scopebuf)))
+ return 0;
+
+ len += rc;
+ if (unlikely(len > buflen))
+ return 0;
+
+ strcat(buf, scopebuf);
+ return len;
+}
+
+#else /* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */
+
+static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
+ char *buf, const int buflen)
+{
+ return 0;
+}
+
+static size_t rpc_ntop6(const struct sockaddr *sap,
+ char *buf, const size_t buflen)
+{
+ return 0;
+}
+
+#endif /* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */
+
+static int rpc_ntop4(const struct sockaddr *sap,
+ char *buf, const size_t buflen)
+{
+ const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
+
+ return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
+}
+
+/**
+ * rpc_ntop - construct a presentation address in @buf
+ * @sap: socket address
+ * @buf: construction area
+ * @buflen: size of @buf, in bytes
+ *
+ * Plants a %NUL-terminated string in @buf and returns the length
+ * of the string, excluding the %NUL. Otherwise zero is returned.
+ */
+size_t rpc_ntop(const struct sockaddr *sap, char *buf, const size_t buflen)
+{
+ switch (sap->sa_family) {
+ case AF_INET:
+ return rpc_ntop4(sap, buf, buflen);
+ case AF_INET6:
+ return rpc_ntop6(sap, buf, buflen);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rpc_ntop);
+
+static size_t rpc_pton4(const char *buf, const size_t buflen,
+ struct sockaddr *sap, const size_t salen)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)sap;
+ u8 *addr = (u8 *)&sin->sin_addr.s_addr;
+
+ if (buflen > INET_ADDRSTRLEN || salen < sizeof(struct sockaddr_in))
+ return 0;
+
+ memset(sap, 0, sizeof(struct sockaddr_in));
+
+ if (in4_pton(buf, buflen, addr, '\0', NULL) == 0)
+ return 0;
+
+ sin->sin_family = AF_INET;
+ return sizeof(struct sockaddr_in);;
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static int rpc_parse_scope_id(const char *buf, const size_t buflen,
+ const char *delim, struct sockaddr_in6 *sin6)
+{
+ char *p;
+ size_t len;
+
+ if ((buf + buflen) == delim)
+ return 1;
+
+ if (*delim != IPV6_SCOPE_DELIMITER)
+ return 0;
+
+ if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
+ !(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL))
+ return 0;
+
+ len = (buf + buflen) - delim - 1;
+ p = kstrndup(delim + 1, len, GFP_KERNEL);
+ if (p) {
+ unsigned long scope_id = 0;
+ struct net_device *dev;
+
+ dev = dev_get_by_name(&init_net, p);
+ if (dev != NULL) {
+ scope_id = dev->ifindex;
+ dev_put(dev);
+ } else {
+ if (strict_strtoul(p, 10, &scope_id) == 0) {
+ kfree(p);
+ return 0;
+ }
+ }
+
+ kfree(p);
+
+ sin6->sin6_scope_id = scope_id;
+ return 1;
+ }
+
+ return 0;
+}
+
+static size_t rpc_pton6(const char *buf, const size_t buflen,
+ struct sockaddr *sap, const size_t salen)
+{
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
+ u8 *addr = (u8 *)&sin6->sin6_addr.in6_u;
+ const char *delim;
+
+ if (buflen > (INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN) ||
+ salen < sizeof(struct sockaddr_in6))
+ return 0;
+
+ memset(sap, 0, sizeof(struct sockaddr_in6));
+
+ if (in6_pton(buf, buflen, addr, IPV6_SCOPE_DELIMITER, &delim) == 0)
+ return 0;
+
+ if (!rpc_parse_scope_id(buf, buflen, delim, sin6))
+ return 0;
+
+ sin6->sin6_family = AF_INET6;
+ return sizeof(struct sockaddr_in6);
+}
+#else
+static size_t rpc_pton6(const char *buf, const size_t buflen,
+ struct sockaddr *sap, const size_t salen)
+{
+ return 0;
+}
+#endif
+
+/**
+ * rpc_pton - Construct a sockaddr in @sap
+ * @buf: C string containing presentation format IP address
+ * @buflen: length of presentation address in bytes
+ * @sap: buffer into which to plant socket address
+ * @salen: size of buffer in bytes
+ *
+ * Returns the size of the socket address if successful; otherwise
+ * zero is returned.
+ *
+ * Plants a socket address in @sap and returns the size of the
+ * socket address, if successful. Returns zero if an error
+ * occurred.
+ */
+size_t rpc_pton(const char *buf, const size_t buflen,
+ struct sockaddr *sap, const size_t salen)
+{
+ unsigned int i;
+
+ for (i = 0; i < buflen; i++)
+ if (buf[i] == ':')
+ return rpc_pton6(buf, buflen, sap, salen);
+ return rpc_pton4(buf, buflen, sap, salen);
+}
+EXPORT_SYMBOL_GPL(rpc_pton);
+
+/**
+ * rpc_sockaddr2uaddr - Construct a universal address string from @sap.
+ * @sap: socket address
+ *
+ * Returns a %NUL-terminated string in dynamically allocated memory;
+ * otherwise NULL is returned if an error occurred. Caller must
+ * free the returned string.
+ */
+char *rpc_sockaddr2uaddr(const struct sockaddr *sap)
+{
+ char portbuf[RPCBIND_MAXUADDRPLEN];
+ char addrbuf[RPCBIND_MAXUADDRLEN];
+ unsigned short port;
+
+ switch (sap->sa_family) {
+ case AF_INET:
+ if (rpc_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
+ return NULL;
+ port = ntohs(((struct sockaddr_in *)sap)->sin_port);
+ break;
+ case AF_INET6:
+ if (rpc_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
+ return NULL;
+ port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
+ break;
+ default:
+ return NULL;
+ }
+
+ if (snprintf(portbuf, sizeof(portbuf),
+ ".%u.%u", port >> 8, port & 0xff) > (int)sizeof(portbuf))
+ return NULL;
+
+ if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf))
+ return NULL;
+
+ return kstrdup(addrbuf, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(rpc_sockaddr2uaddr);
+
+/**
+ * rpc_uaddr2sockaddr - convert a universal address to a socket address.
+ * @uaddr: C string containing universal address to convert
+ * @uaddr_len: length of universal address string
+ * @sap: buffer into which to plant socket address
+ * @salen: size of buffer
+ *
+ * Returns the size of the socket address if successful; otherwise
+ * zero is returned.
+ */
+size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
+ struct sockaddr *sap, const size_t salen)
+{
+ char *c, buf[RPCBIND_MAXUADDRLEN];
+ unsigned long portlo, porthi;
+ unsigned short port;
+
+ if (uaddr_len > sizeof(buf))
+ return 0;
+
+ memcpy(buf, uaddr, uaddr_len);
+
+ buf[uaddr_len] = '\n';
+ buf[uaddr_len + 1] = '\0';
+
+ c = strrchr(buf, '.');
+ if (unlikely(c == NULL))
+ return 0;
+ if (unlikely(strict_strtoul(c + 1, 10, &portlo) != 0))
+ return 0;
+ if (unlikely(portlo > 255))
+ return 0;
+
+ c[0] = '\n';
+ c[1] = '\0';
+
+ c = strrchr(buf, '.');
+ if (unlikely(c == NULL))
+ return 0;
+ if (unlikely(strict_strtoul(c + 1, 10, &porthi) != 0))
+ return 0;
+ if (unlikely(porthi > 255))
+ return 0;
+
+ port = (unsigned short)((porthi << 8) | portlo);
+
+ c[0] = '\0';
+
+ if (rpc_pton(buf, strlen(buf), sap, salen) == 0)
+ return 0;
+
+ switch (sap->sa_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)sap)->sin_port = htons(port);
+ return sizeof(struct sockaddr_in);
+ case AF_INET6:
+ ((struct sockaddr_in6 *)sap)->sin6_port = htons(port);
+ return sizeof(struct sockaddr_in6);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rpc_uaddr2sockaddr);
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 66d458fc6920..fc6a43ccd950 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -89,8 +89,8 @@ static struct rpc_wait_queue pipe_version_rpc_waitqueue;
static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
static void gss_free_ctx(struct gss_cl_ctx *);
-static struct rpc_pipe_ops gss_upcall_ops_v0;
-static struct rpc_pipe_ops gss_upcall_ops_v1;
+static const struct rpc_pipe_ops gss_upcall_ops_v0;
+static const struct rpc_pipe_ops gss_upcall_ops_v1;
static inline struct gss_cl_ctx *
gss_get_ctx(struct gss_cl_ctx *ctx)
@@ -777,7 +777,7 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
* that we supported only the old pipe. So we instead create
* the new pipe first.
*/
- gss_auth->dentry[1] = rpc_mkpipe(clnt->cl_dentry,
+ gss_auth->dentry[1] = rpc_mkpipe(clnt->cl_path.dentry,
"gssd",
clnt, &gss_upcall_ops_v1,
RPC_PIPE_WAIT_FOR_OPEN);
@@ -786,7 +786,7 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
goto err_put_mech;
}
- gss_auth->dentry[0] = rpc_mkpipe(clnt->cl_dentry,
+ gss_auth->dentry[0] = rpc_mkpipe(clnt->cl_path.dentry,
gss_auth->mech->gm_name,
clnt, &gss_upcall_ops_v0,
RPC_PIPE_WAIT_FOR_OPEN);
@@ -1507,7 +1507,7 @@ static const struct rpc_credops gss_nullops = {
.crunwrap_resp = gss_unwrap_resp,
};
-static struct rpc_pipe_ops gss_upcall_ops_v0 = {
+static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
.upcall = gss_pipe_upcall,
.downcall = gss_pipe_downcall,
.destroy_msg = gss_pipe_destroy_msg,
@@ -1515,7 +1515,7 @@ static struct rpc_pipe_ops gss_upcall_ops_v0 = {
.release_pipe = gss_pipe_release,
};
-static struct rpc_pipe_ops gss_upcall_ops_v1 = {
+static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
.upcall = gss_pipe_upcall,
.downcall = gss_pipe_downcall,
.destroy_msg = gss_pipe_destroy_msg,
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 2278a50c6444..2e6a148d277c 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -181,6 +181,11 @@ static void rsi_request(struct cache_detail *cd,
(*bpp)[-1] = '\n';
}
+static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
+}
+
static int rsi_parse(struct cache_detail *cd,
char *mesg, int mlen)
@@ -270,7 +275,7 @@ static struct cache_detail rsi_cache = {
.hash_table = rsi_table,
.name = "auth.rpcsec.init",
.cache_put = rsi_put,
- .cache_request = rsi_request,
+ .cache_upcall = rsi_upcall,
.cache_parse = rsi_parse,
.match = rsi_match,
.init = rsi_init,
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index ff0c23053d2f..45cdaff9b361 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -27,10 +27,12 @@
#include <linux/net.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/pagemap.h>
#include <asm/ioctls.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/stats.h>
+#include <linux/sunrpc/rpc_pipe_fs.h>
#define RPCDBG_FACILITY RPCDBG_CACHE
@@ -175,7 +177,13 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
}
EXPORT_SYMBOL_GPL(sunrpc_cache_update);
-static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h);
+static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ if (!cd->cache_upcall)
+ return -EINVAL;
+ return cd->cache_upcall(cd, h);
+}
+
/*
* This is the generic cache management routine for all
* the authentication caches.
@@ -284,76 +292,11 @@ static DEFINE_SPINLOCK(cache_list_lock);
static struct cache_detail *current_detail;
static int current_index;
-static const struct file_operations cache_file_operations;
-static const struct file_operations content_file_operations;
-static const struct file_operations cache_flush_operations;
-
static void do_cache_clean(struct work_struct *work);
static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
-static void remove_cache_proc_entries(struct cache_detail *cd)
-{
- if (cd->proc_ent == NULL)
- return;
- if (cd->flush_ent)
- remove_proc_entry("flush", cd->proc_ent);
- if (cd->channel_ent)
- remove_proc_entry("channel", cd->proc_ent);
- if (cd->content_ent)
- remove_proc_entry("content", cd->proc_ent);
- cd->proc_ent = NULL;
- remove_proc_entry(cd->name, proc_net_rpc);
-}
-
-#ifdef CONFIG_PROC_FS
-static int create_cache_proc_entries(struct cache_detail *cd)
-{
- struct proc_dir_entry *p;
-
- cd->proc_ent = proc_mkdir(cd->name, proc_net_rpc);
- if (cd->proc_ent == NULL)
- goto out_nomem;
- cd->channel_ent = cd->content_ent = NULL;
-
- p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
- cd->proc_ent, &cache_flush_operations, cd);
- cd->flush_ent = p;
- if (p == NULL)
- goto out_nomem;
-
- if (cd->cache_request || cd->cache_parse) {
- p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
- cd->proc_ent, &cache_file_operations, cd);
- cd->channel_ent = p;
- if (p == NULL)
- goto out_nomem;
- }
- if (cd->cache_show) {
- p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
- cd->proc_ent, &content_file_operations, cd);
- cd->content_ent = p;
- if (p == NULL)
- goto out_nomem;
- }
- return 0;
-out_nomem:
- remove_cache_proc_entries(cd);
- return -ENOMEM;
-}
-#else /* CONFIG_PROC_FS */
-static int create_cache_proc_entries(struct cache_detail *cd)
-{
- return 0;
-}
-#endif
-
-int cache_register(struct cache_detail *cd)
+static void sunrpc_init_cache_detail(struct cache_detail *cd)
{
- int ret;
-
- ret = create_cache_proc_entries(cd);
- if (ret)
- return ret;
rwlock_init(&cd->hash_lock);
INIT_LIST_HEAD(&cd->queue);
spin_lock(&cache_list_lock);
@@ -367,11 +310,9 @@ int cache_register(struct cache_detail *cd)
/* start the cleaning process */
schedule_delayed_work(&cache_cleaner, 0);
- return 0;
}
-EXPORT_SYMBOL_GPL(cache_register);
-void cache_unregister(struct cache_detail *cd)
+static void sunrpc_destroy_cache_detail(struct cache_detail *cd)
{
cache_purge(cd);
spin_lock(&cache_list_lock);
@@ -386,7 +327,6 @@ void cache_unregister(struct cache_detail *cd)
list_del_init(&cd->others);
write_unlock(&cd->hash_lock);
spin_unlock(&cache_list_lock);
- remove_cache_proc_entries(cd);
if (list_empty(&cache_list)) {
/* module must be being unloaded so its safe to kill the worker */
cancel_delayed_work_sync(&cache_cleaner);
@@ -395,7 +335,6 @@ void cache_unregister(struct cache_detail *cd)
out:
printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
}
-EXPORT_SYMBOL_GPL(cache_unregister);
/* clean cache tries to find something to clean
* and cleans it.
@@ -687,18 +626,18 @@ struct cache_reader {
int offset; /* if non-0, we have a refcnt on next request */
};
-static ssize_t
-cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
+static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *ppos, struct cache_detail *cd)
{
struct cache_reader *rp = filp->private_data;
struct cache_request *rq;
- struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+ struct inode *inode = filp->f_path.dentry->d_inode;
int err;
if (count == 0)
return 0;
- mutex_lock(&queue_io_mutex); /* protect against multiple concurrent
+ mutex_lock(&inode->i_mutex); /* protect against multiple concurrent
* readers on this file */
again:
spin_lock(&queue_lock);
@@ -711,7 +650,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
}
if (rp->q.list.next == &cd->queue) {
spin_unlock(&queue_lock);
- mutex_unlock(&queue_io_mutex);
+ mutex_unlock(&inode->i_mutex);
BUG_ON(rp->offset);
return 0;
}
@@ -758,49 +697,90 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
}
if (err == -EAGAIN)
goto again;
- mutex_unlock(&queue_io_mutex);
+ mutex_unlock(&inode->i_mutex);
return err ? err : count;
}
-static char write_buf[8192]; /* protected by queue_io_mutex */
+static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
+ size_t count, struct cache_detail *cd)
+{
+ ssize_t ret;
-static ssize_t
-cache_write(struct file *filp, const char __user *buf, size_t count,
- loff_t *ppos)
+ if (copy_from_user(kaddr, buf, count))
+ return -EFAULT;
+ kaddr[count] = '\0';
+ ret = cd->cache_parse(cd, kaddr, count);
+ if (!ret)
+ ret = count;
+ return ret;
+}
+
+static ssize_t cache_slow_downcall(const char __user *buf,
+ size_t count, struct cache_detail *cd)
{
- int err;
- struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+ static char write_buf[8192]; /* protected by queue_io_mutex */
+ ssize_t ret = -EINVAL;
- if (count == 0)
- return 0;
if (count >= sizeof(write_buf))
- return -EINVAL;
-
+ goto out;
mutex_lock(&queue_io_mutex);
+ ret = cache_do_downcall(write_buf, buf, count, cd);
+ mutex_unlock(&queue_io_mutex);
+out:
+ return ret;
+}
- if (copy_from_user(write_buf, buf, count)) {
- mutex_unlock(&queue_io_mutex);
- return -EFAULT;
- }
- write_buf[count] = '\0';
- if (cd->cache_parse)
- err = cd->cache_parse(cd, write_buf, count);
- else
- err = -EINVAL;
+static ssize_t cache_downcall(struct address_space *mapping,
+ const char __user *buf,
+ size_t count, struct cache_detail *cd)
+{
+ struct page *page;
+ char *kaddr;
+ ssize_t ret = -ENOMEM;
+
+ if (count >= PAGE_CACHE_SIZE)
+ goto out_slow;
+
+ page = find_or_create_page(mapping, 0, GFP_KERNEL);
+ if (!page)
+ goto out_slow;
+
+ kaddr = kmap(page);
+ ret = cache_do_downcall(kaddr, buf, count, cd);
+ kunmap(page);
+ unlock_page(page);
+ page_cache_release(page);
+ return ret;
+out_slow:
+ return cache_slow_downcall(buf, count, cd);
+}
- mutex_unlock(&queue_io_mutex);
- return err ? err : count;
+static ssize_t cache_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos,
+ struct cache_detail *cd)
+{
+ struct address_space *mapping = filp->f_mapping;
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ ssize_t ret = -EINVAL;
+
+ if (!cd->cache_parse)
+ goto out;
+
+ mutex_lock(&inode->i_mutex);
+ ret = cache_downcall(mapping, buf, count, cd);
+ mutex_unlock(&inode->i_mutex);
+out:
+ return ret;
}
static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
-static unsigned int
-cache_poll(struct file *filp, poll_table *wait)
+static unsigned int cache_poll(struct file *filp, poll_table *wait,
+ struct cache_detail *cd)
{
unsigned int mask;
struct cache_reader *rp = filp->private_data;
struct cache_queue *cq;
- struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
poll_wait(filp, &queue_wait, wait);
@@ -822,14 +802,13 @@ cache_poll(struct file *filp, poll_table *wait)
return mask;
}
-static int
-cache_ioctl(struct inode *ino, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static int cache_ioctl(struct inode *ino, struct file *filp,
+ unsigned int cmd, unsigned long arg,
+ struct cache_detail *cd)
{
int len = 0;
struct cache_reader *rp = filp->private_data;
struct cache_queue *cq;
- struct cache_detail *cd = PDE(ino)->data;
if (cmd != FIONREAD || !rp)
return -EINVAL;
@@ -852,15 +831,15 @@ cache_ioctl(struct inode *ino, struct file *filp,
return put_user(len, (int __user *)arg);
}
-static int
-cache_open(struct inode *inode, struct file *filp)
+static int cache_open(struct inode *inode, struct file *filp,
+ struct cache_detail *cd)
{
struct cache_reader *rp = NULL;
+ if (!cd || !try_module_get(cd->owner))
+ return -EACCES;
nonseekable_open(inode, filp);
if (filp->f_mode & FMODE_READ) {
- struct cache_detail *cd = PDE(inode)->data;
-
rp = kmalloc(sizeof(*rp), GFP_KERNEL);
if (!rp)
return -ENOMEM;
@@ -875,11 +854,10 @@ cache_open(struct inode *inode, struct file *filp)
return 0;
}
-static int
-cache_release(struct inode *inode, struct file *filp)
+static int cache_release(struct inode *inode, struct file *filp,
+ struct cache_detail *cd)
{
struct cache_reader *rp = filp->private_data;
- struct cache_detail *cd = PDE(inode)->data;
if (rp) {
spin_lock(&queue_lock);
@@ -903,23 +881,12 @@ cache_release(struct inode *inode, struct file *filp)
cd->last_close = get_seconds();
atomic_dec(&cd->readers);
}
+ module_put(cd->owner);
return 0;
}
-static const struct file_operations cache_file_operations = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = cache_read,
- .write = cache_write,
- .poll = cache_poll,
- .ioctl = cache_ioctl, /* for FIONREAD */
- .open = cache_open,
- .release = cache_release,
-};
-
-
static void queue_loose(struct cache_detail *detail, struct cache_head *ch)
{
struct cache_queue *cq;
@@ -1020,15 +987,21 @@ static void warn_no_listener(struct cache_detail *detail)
if (detail->last_warn != detail->last_close) {
detail->last_warn = detail->last_close;
if (detail->warn_no_listener)
- detail->warn_no_listener(detail);
+ detail->warn_no_listener(detail, detail->last_close != 0);
}
}
/*
- * register an upcall request to user-space.
+ * register an upcall request to user-space and queue it up for read() by the
+ * upcall daemon.
+ *
* Each request is at most one page long.
*/
-static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
+int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
+ void (*cache_request)(struct cache_detail *,
+ struct cache_head *,
+ char **,
+ int *))
{
char *buf;
@@ -1036,9 +1009,6 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
char *bp;
int len;
- if (detail->cache_request == NULL)
- return -EINVAL;
-
if (atomic_read(&detail->readers) == 0 &&
detail->last_close < get_seconds() - 30) {
warn_no_listener(detail);
@@ -1057,7 +1027,7 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
bp = buf; len = PAGE_SIZE;
- detail->cache_request(detail, h, &bp, &len);
+ cache_request(detail, h, &bp, &len);
if (len < 0) {
kfree(buf);
@@ -1075,6 +1045,7 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
wake_up(&queue_wait);
return 0;
}
+EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
/*
* parse a message from user-space and pass it
@@ -1242,11 +1213,13 @@ static const struct seq_operations cache_content_op = {
.show = c_show,
};
-static int content_open(struct inode *inode, struct file *file)
+static int content_open(struct inode *inode, struct file *file,
+ struct cache_detail *cd)
{
struct handle *han;
- struct cache_detail *cd = PDE(inode)->data;
+ if (!cd || !try_module_get(cd->owner))
+ return -EACCES;
han = __seq_open_private(file, &cache_content_op, sizeof(*han));
if (han == NULL)
return -ENOMEM;
@@ -1255,17 +1228,33 @@ static int content_open(struct inode *inode, struct file *file)
return 0;
}
-static const struct file_operations content_file_operations = {
- .open = content_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_private,
-};
+static int content_release(struct inode *inode, struct file *file,
+ struct cache_detail *cd)
+{
+ int ret = seq_release_private(inode, file);
+ module_put(cd->owner);
+ return ret;
+}
+
+static int open_flush(struct inode *inode, struct file *file,
+ struct cache_detail *cd)
+{
+ if (!cd || !try_module_get(cd->owner))
+ return -EACCES;
+ return nonseekable_open(inode, file);
+}
+
+static int release_flush(struct inode *inode, struct file *file,
+ struct cache_detail *cd)
+{
+ module_put(cd->owner);
+ return 0;
+}
static ssize_t read_flush(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos,
+ struct cache_detail *cd)
{
- struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
char tbuf[20];
unsigned long p = *ppos;
size_t len;
@@ -1283,10 +1272,10 @@ static ssize_t read_flush(struct file *file, char __user *buf,
return len;
}
-static ssize_t write_flush(struct file * file, const char __user * buf,
- size_t count, loff_t *ppos)
+static ssize_t write_flush(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos,
+ struct cache_detail *cd)
{
- struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
char tbuf[20];
char *ep;
long flushtime;
@@ -1307,8 +1296,343 @@ static ssize_t write_flush(struct file * file, const char __user * buf,
return count;
}
-static const struct file_operations cache_flush_operations = {
- .open = nonseekable_open,
- .read = read_flush,
- .write = write_flush,
+static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+
+ return cache_read(filp, buf, count, ppos, cd);
+}
+
+static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+
+ return cache_write(filp, buf, count, ppos, cd);
+}
+
+static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
+{
+ struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+
+ return cache_poll(filp, wait, cd);
+}
+
+static int cache_ioctl_procfs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cache_detail *cd = PDE(inode)->data;
+
+ return cache_ioctl(inode, filp, cmd, arg, cd);
+}
+
+static int cache_open_procfs(struct inode *inode, struct file *filp)
+{
+ struct cache_detail *cd = PDE(inode)->data;
+
+ return cache_open(inode, filp, cd);
+}
+
+static int cache_release_procfs(struct inode *inode, struct file *filp)
+{
+ struct cache_detail *cd = PDE(inode)->data;
+
+ return cache_release(inode, filp, cd);
+}
+
+static const struct file_operations cache_file_operations_procfs = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = cache_read_procfs,
+ .write = cache_write_procfs,
+ .poll = cache_poll_procfs,
+ .ioctl = cache_ioctl_procfs, /* for FIONREAD */
+ .open = cache_open_procfs,
+ .release = cache_release_procfs,
};
+
+static int content_open_procfs(struct inode *inode, struct file *filp)
+{
+ struct cache_detail *cd = PDE(inode)->data;
+
+ return content_open(inode, filp, cd);
+}
+
+static int content_release_procfs(struct inode *inode, struct file *filp)
+{
+ struct cache_detail *cd = PDE(inode)->data;
+
+ return content_release(inode, filp, cd);
+}
+
+static const struct file_operations content_file_operations_procfs = {
+ .open = content_open_procfs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = content_release_procfs,
+};
+
+static int open_flush_procfs(struct inode *inode, struct file *filp)
+{
+ struct cache_detail *cd = PDE(inode)->data;
+
+ return open_flush(inode, filp, cd);
+}
+
+static int release_flush_procfs(struct inode *inode, struct file *filp)
+{
+ struct cache_detail *cd = PDE(inode)->data;
+
+ return release_flush(inode, filp, cd);
+}
+
+static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+
+ return read_flush(filp, buf, count, ppos, cd);
+}
+
+static ssize_t write_flush_procfs(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
+
+ return write_flush(filp, buf, count, ppos, cd);
+}
+
+static const struct file_operations cache_flush_operations_procfs = {
+ .open = open_flush_procfs,
+ .read = read_flush_procfs,
+ .write = write_flush_procfs,
+ .release = release_flush_procfs,
+};
+
+static void remove_cache_proc_entries(struct cache_detail *cd)
+{
+ if (cd->u.procfs.proc_ent == NULL)
+ return;
+ if (cd->u.procfs.flush_ent)
+ remove_proc_entry("flush", cd->u.procfs.proc_ent);
+ if (cd->u.procfs.channel_ent)
+ remove_proc_entry("channel", cd->u.procfs.proc_ent);
+ if (cd->u.procfs.content_ent)
+ remove_proc_entry("content", cd->u.procfs.proc_ent);
+ cd->u.procfs.proc_ent = NULL;
+ remove_proc_entry(cd->name, proc_net_rpc);
+}
+
+#ifdef CONFIG_PROC_FS
+static int create_cache_proc_entries(struct cache_detail *cd)
+{
+ struct proc_dir_entry *p;
+
+ cd->u.procfs.proc_ent = proc_mkdir(cd->name, proc_net_rpc);
+ if (cd->u.procfs.proc_ent == NULL)
+ goto out_nomem;
+ cd->u.procfs.channel_ent = NULL;
+ cd->u.procfs.content_ent = NULL;
+
+ p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
+ cd->u.procfs.proc_ent,
+ &cache_flush_operations_procfs, cd);
+ cd->u.procfs.flush_ent = p;
+ if (p == NULL)
+ goto out_nomem;
+
+ if (cd->cache_upcall || cd->cache_parse) {
+ p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
+ cd->u.procfs.proc_ent,
+ &cache_file_operations_procfs, cd);
+ cd->u.procfs.channel_ent = p;
+ if (p == NULL)
+ goto out_nomem;
+ }
+ if (cd->cache_show) {
+ p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
+ cd->u.procfs.proc_ent,
+ &content_file_operations_procfs, cd);
+ cd->u.procfs.content_ent = p;
+ if (p == NULL)
+ goto out_nomem;
+ }
+ return 0;
+out_nomem:
+ remove_cache_proc_entries(cd);
+ return -ENOMEM;
+}
+#else /* CONFIG_PROC_FS */
+static int create_cache_proc_entries(struct cache_detail *cd)
+{
+ return 0;
+}
+#endif
+
+int cache_register(struct cache_detail *cd)
+{
+ int ret;
+
+ sunrpc_init_cache_detail(cd);
+ ret = create_cache_proc_entries(cd);
+ if (ret)
+ sunrpc_destroy_cache_detail(cd);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cache_register);
+
+void cache_unregister(struct cache_detail *cd)
+{
+ remove_cache_proc_entries(cd);
+ sunrpc_destroy_cache_detail(cd);
+}
+EXPORT_SYMBOL_GPL(cache_unregister);
+
+static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+
+ return cache_read(filp, buf, count, ppos, cd);
+}
+
+static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+
+ return cache_write(filp, buf, count, ppos, cd);
+}
+
+static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
+{
+ struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+
+ return cache_poll(filp, wait, cd);
+}
+
+static int cache_ioctl_pipefs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cache_detail *cd = RPC_I(inode)->private;
+
+ return cache_ioctl(inode, filp, cmd, arg, cd);
+}
+
+static int cache_open_pipefs(struct inode *inode, struct file *filp)
+{
+ struct cache_detail *cd = RPC_I(inode)->private;
+
+ return cache_open(inode, filp, cd);
+}
+
+static int cache_release_pipefs(struct inode *inode, struct file *filp)
+{
+ struct cache_detail *cd = RPC_I(inode)->private;
+
+ return cache_release(inode, filp, cd);
+}
+
+const struct file_operations cache_file_operations_pipefs = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = cache_read_pipefs,
+ .write = cache_write_pipefs,
+ .poll = cache_poll_pipefs,
+ .ioctl = cache_ioctl_pipefs, /* for FIONREAD */
+ .open = cache_open_pipefs,
+ .release = cache_release_pipefs,
+};
+
+static int content_open_pipefs(struct inode *inode, struct file *filp)
+{
+ struct cache_detail *cd = RPC_I(inode)->private;
+
+ return content_open(inode, filp, cd);
+}
+
+static int content_release_pipefs(struct inode *inode, struct file *filp)
+{
+ struct cache_detail *cd = RPC_I(inode)->private;
+
+ return content_release(inode, filp, cd);
+}
+
+const struct file_operations content_file_operations_pipefs = {
+ .open = content_open_pipefs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = content_release_pipefs,
+};
+
+static int open_flush_pipefs(struct inode *inode, struct file *filp)
+{
+ struct cache_detail *cd = RPC_I(inode)->private;
+
+ return open_flush(inode, filp, cd);
+}
+
+static int release_flush_pipefs(struct inode *inode, struct file *filp)
+{
+ struct cache_detail *cd = RPC_I(inode)->private;
+
+ return release_flush(inode, filp, cd);
+}
+
+static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+
+ return read_flush(filp, buf, count, ppos, cd);
+}
+
+static ssize_t write_flush_pipefs(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
+
+ return write_flush(filp, buf, count, ppos, cd);
+}
+
+const struct file_operations cache_flush_operations_pipefs = {
+ .open = open_flush_pipefs,
+ .read = read_flush_pipefs,
+ .write = write_flush_pipefs,
+ .release = release_flush_pipefs,
+};
+
+int sunrpc_cache_register_pipefs(struct dentry *parent,
+ const char *name, mode_t umode,
+ struct cache_detail *cd)
+{
+ struct qstr q;
+ struct dentry *dir;
+ int ret = 0;
+
+ sunrpc_init_cache_detail(cd);
+ q.name = name;
+ q.len = strlen(name);
+ q.hash = full_name_hash(q.name, q.len);
+ dir = rpc_create_cache_dir(parent, &q, umode, cd);
+ if (!IS_ERR(dir))
+ cd->u.pipefs.dir = dir;
+ else {
+ sunrpc_destroy_cache_detail(cd);
+ ret = PTR_ERR(dir);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
+
+void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
+{
+ rpc_remove_cache_dir(cd->u.pipefs.dir);
+ cd->u.pipefs.dir = NULL;
+ sunrpc_destroy_cache_detail(cd);
+}
+EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
+
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index df1039f077c2..fac0ca93f06b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -27,6 +27,8 @@
#include <linux/types.h>
#include <linux/kallsyms.h>
#include <linux/mm.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
#include <linux/slab.h>
#include <linux/utsname.h>
#include <linux/workqueue.h>
@@ -97,33 +99,49 @@ static int
rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
{
static uint32_t clntid;
+ struct nameidata nd;
+ struct path path;
+ char name[15];
+ struct qstr q = {
+ .name = name,
+ };
int error;
- clnt->cl_vfsmnt = ERR_PTR(-ENOENT);
- clnt->cl_dentry = ERR_PTR(-ENOENT);
+ clnt->cl_path.mnt = ERR_PTR(-ENOENT);
+ clnt->cl_path.dentry = ERR_PTR(-ENOENT);
if (dir_name == NULL)
return 0;
- clnt->cl_vfsmnt = rpc_get_mount();
- if (IS_ERR(clnt->cl_vfsmnt))
- return PTR_ERR(clnt->cl_vfsmnt);
+ path.mnt = rpc_get_mount();
+ if (IS_ERR(path.mnt))
+ return PTR_ERR(path.mnt);
+ error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &nd);
+ if (error)
+ goto err;
for (;;) {
- snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname),
- "%s/clnt%x", dir_name,
- (unsigned int)clntid++);
- clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0';
- clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt);
- if (!IS_ERR(clnt->cl_dentry))
- return 0;
- error = PTR_ERR(clnt->cl_dentry);
+ q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
+ name[sizeof(name) - 1] = '\0';
+ q.hash = full_name_hash(q.name, q.len);
+ path.dentry = rpc_create_client_dir(nd.path.dentry, &q, clnt);
+ if (!IS_ERR(path.dentry))
+ break;
+ error = PTR_ERR(path.dentry);
if (error != -EEXIST) {
- printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n",
- clnt->cl_pathname, error);
- rpc_put_mount();
- return error;
+ printk(KERN_INFO "RPC: Couldn't create pipefs entry"
+ " %s/%s, error %d\n",
+ dir_name, name, error);
+ goto err_path_put;
}
}
+ path_put(&nd.path);
+ clnt->cl_path = path;
+ return 0;
+err_path_put:
+ path_put(&nd.path);
+err:
+ rpc_put_mount();
+ return error;
}
static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
@@ -231,8 +249,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
return clnt;
out_no_auth:
- if (!IS_ERR(clnt->cl_dentry)) {
- rpc_rmdir(clnt->cl_dentry);
+ if (!IS_ERR(clnt->cl_path.dentry)) {
+ rpc_remove_client_dir(clnt->cl_path.dentry);
rpc_put_mount();
}
out_no_path:
@@ -423,8 +441,8 @@ rpc_free_client(struct kref *kref)
dprintk("RPC: destroying %s client for %s\n",
clnt->cl_protname, clnt->cl_server);
- if (!IS_ERR(clnt->cl_dentry)) {
- rpc_rmdir(clnt->cl_dentry);
+ if (!IS_ERR(clnt->cl_path.dentry)) {
+ rpc_remove_client_dir(clnt->cl_path.dentry);
rpc_put_mount();
}
if (clnt->cl_parent != clnt) {
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9ced0628d69c..7f676bdf70d3 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -26,6 +26,7 @@
#include <linux/sunrpc/clnt.h>
#include <linux/workqueue.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
+#include <linux/sunrpc/cache.h>
static struct vfsmount *rpc_mount __read_mostly;
static int rpc_mount_count;
@@ -125,7 +126,7 @@ static void
rpc_close_pipes(struct inode *inode)
{
struct rpc_inode *rpci = RPC_I(inode);
- struct rpc_pipe_ops *ops;
+ const struct rpc_pipe_ops *ops;
int need_release;
mutex_lock(&inode->i_mutex);
@@ -398,66 +399,12 @@ static const struct file_operations rpc_info_operations = {
/*
- * We have a single directory with 1 node in it.
- */
-enum {
- RPCAUTH_Root = 1,
- RPCAUTH_lockd,
- RPCAUTH_mount,
- RPCAUTH_nfs,
- RPCAUTH_portmap,
- RPCAUTH_statd,
- RPCAUTH_nfsd4_cb,
- RPCAUTH_RootEOF
-};
-
-/*
* Description of fs contents.
*/
struct rpc_filelist {
- char *name;
+ const char *name;
const struct file_operations *i_fop;
- int mode;
-};
-
-static struct rpc_filelist files[] = {
- [RPCAUTH_lockd] = {
- .name = "lockd",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
- },
- [RPCAUTH_mount] = {
- .name = "mount",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
- },
- [RPCAUTH_nfs] = {
- .name = "nfs",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
- },
- [RPCAUTH_portmap] = {
- .name = "portmap",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
- },
- [RPCAUTH_statd] = {
- .name = "statd",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
- },
- [RPCAUTH_nfsd4_cb] = {
- .name = "nfsd4_cb",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
- },
-};
-
-enum {
- RPCAUTH_info = 2,
- RPCAUTH_EOF
-};
-
-static struct rpc_filelist authfiles[] = {
- [RPCAUTH_info] = {
- .name = "info",
- .i_fop = &rpc_info_operations,
- .mode = S_IFREG | S_IRUSR,
- },
+ umode_t mode;
};
struct vfsmount *rpc_get_mount(void)
@@ -469,11 +416,13 @@ struct vfsmount *rpc_get_mount(void)
return ERR_PTR(err);
return rpc_mount;
}
+EXPORT_SYMBOL_GPL(rpc_get_mount);
void rpc_put_mount(void)
{
simple_release_fs(&rpc_mount, &rpc_mount_count);
}
+EXPORT_SYMBOL_GPL(rpc_put_mount);
static int rpc_delete_dentry(struct dentry *dentry)
{
@@ -484,39 +433,8 @@ static const struct dentry_operations rpc_dentry_operations = {
.d_delete = rpc_delete_dentry,
};
-static int
-rpc_lookup_parent(char *path, struct nameidata *nd)
-{
- struct vfsmount *mnt;
-
- if (path[0] == '\0')
- return -ENOENT;
-
- mnt = rpc_get_mount();
- if (IS_ERR(mnt)) {
- printk(KERN_WARNING "%s: %s failed to mount "
- "pseudofilesystem \n", __FILE__, __func__);
- return PTR_ERR(mnt);
- }
-
- if (vfs_path_lookup(mnt->mnt_root, mnt, path, LOOKUP_PARENT, nd)) {
- printk(KERN_WARNING "%s: %s failed to find path %s\n",
- __FILE__, __func__, path);
- rpc_put_mount();
- return -ENOENT;
- }
- return 0;
-}
-
-static void
-rpc_release_path(struct nameidata *nd)
-{
- path_put(&nd->path);
- rpc_put_mount();
-}
-
static struct inode *
-rpc_get_inode(struct super_block *sb, int mode)
+rpc_get_inode(struct super_block *sb, umode_t mode)
{
struct inode *inode = new_inode(sb);
if (!inode)
@@ -534,212 +452,274 @@ rpc_get_inode(struct super_block *sb, int mode)
return inode;
}
-/*
- * FIXME: This probably has races.
- */
-static void rpc_depopulate(struct dentry *parent,
- unsigned long start, unsigned long eof)
+static int __rpc_create_common(struct inode *dir, struct dentry *dentry,
+ umode_t mode,
+ const struct file_operations *i_fop,
+ void *private)
{
- struct inode *dir = parent->d_inode;
- struct list_head *pos, *next;
- struct dentry *dentry, *dvec[10];
- int n = 0;
+ struct inode *inode;
- mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
-repeat:
- spin_lock(&dcache_lock);
- list_for_each_safe(pos, next, &parent->d_subdirs) {
- dentry = list_entry(pos, struct dentry, d_u.d_child);
- if (!dentry->d_inode ||
- dentry->d_inode->i_ino < start ||
- dentry->d_inode->i_ino >= eof)
- continue;
- spin_lock(&dentry->d_lock);
- if (!d_unhashed(dentry)) {
- dget_locked(dentry);
- __d_drop(dentry);
- spin_unlock(&dentry->d_lock);
- dvec[n++] = dentry;
- if (n == ARRAY_SIZE(dvec))
- break;
- } else
- spin_unlock(&dentry->d_lock);
- }
- spin_unlock(&dcache_lock);
- if (n) {
- do {
- dentry = dvec[--n];
- if (S_ISREG(dentry->d_inode->i_mode))
- simple_unlink(dir, dentry);
- else if (S_ISDIR(dentry->d_inode->i_mode))
- simple_rmdir(dir, dentry);
- d_delete(dentry);
- dput(dentry);
- } while (n);
- goto repeat;
- }
- mutex_unlock(&dir->i_mutex);
+ BUG_ON(!d_unhashed(dentry));
+ inode = rpc_get_inode(dir->i_sb, mode);
+ if (!inode)
+ goto out_err;
+ inode->i_ino = iunique(dir->i_sb, 100);
+ if (i_fop)
+ inode->i_fop = i_fop;
+ if (private)
+ rpc_inode_setowner(inode, private);
+ d_add(dentry, inode);
+ return 0;
+out_err:
+ printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
+ __FILE__, __func__, dentry->d_name.name);
+ dput(dentry);
+ return -ENOMEM;
}
-static int
-rpc_populate(struct dentry *parent,
- struct rpc_filelist *files,
- int start, int eof)
+static int __rpc_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode,
+ const struct file_operations *i_fop,
+ void *private)
{
- struct inode *inode, *dir = parent->d_inode;
- void *private = RPC_I(dir)->private;
- struct dentry *dentry;
- int mode, i;
+ int err;
- mutex_lock(&dir->i_mutex);
- for (i = start; i < eof; i++) {
- dentry = d_alloc_name(parent, files[i].name);
- if (!dentry)
- goto out_bad;
- dentry->d_op = &rpc_dentry_operations;
- mode = files[i].mode;
- inode = rpc_get_inode(dir->i_sb, mode);
- if (!inode) {
- dput(dentry);
- goto out_bad;
- }
- inode->i_ino = i;
- if (files[i].i_fop)
- inode->i_fop = files[i].i_fop;
- if (private)
- rpc_inode_setowner(inode, private);
- if (S_ISDIR(mode))
- inc_nlink(dir);
- d_add(dentry, inode);
- fsnotify_create(dir, dentry);
- }
- mutex_unlock(&dir->i_mutex);
+ err = __rpc_create_common(dir, dentry, S_IFREG | mode, i_fop, private);
+ if (err)
+ return err;
+ fsnotify_create(dir, dentry);
return 0;
-out_bad:
- mutex_unlock(&dir->i_mutex);
- printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
- __FILE__, __func__, parent->d_name.name);
- return -ENOMEM;
}
-static int
-__rpc_mkdir(struct inode *dir, struct dentry *dentry)
+static int __rpc_mkdir(struct inode *dir, struct dentry *dentry,
+ umode_t mode,
+ const struct file_operations *i_fop,
+ void *private)
{
- struct inode *inode;
+ int err;
- inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO);
- if (!inode)
- goto out_err;
- inode->i_ino = iunique(dir->i_sb, 100);
- d_instantiate(dentry, inode);
+ err = __rpc_create_common(dir, dentry, S_IFDIR | mode, i_fop, private);
+ if (err)
+ return err;
inc_nlink(dir);
fsnotify_mkdir(dir, dentry);
return 0;
-out_err:
- printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
- __FILE__, __func__, dentry->d_name.name);
- return -ENOMEM;
}
-static int
-__rpc_rmdir(struct inode *dir, struct dentry *dentry)
+static int __rpc_mkpipe(struct inode *dir, struct dentry *dentry,
+ umode_t mode,
+ const struct file_operations *i_fop,
+ void *private,
+ const struct rpc_pipe_ops *ops,
+ int flags)
{
- int error;
- error = simple_rmdir(dir, dentry);
- if (!error)
- d_delete(dentry);
- return error;
+ struct rpc_inode *rpci;
+ int err;
+
+ err = __rpc_create_common(dir, dentry, S_IFIFO | mode, i_fop, private);
+ if (err)
+ return err;
+ rpci = RPC_I(dentry->d_inode);
+ rpci->nkern_readwriters = 1;
+ rpci->private = private;
+ rpci->flags = flags;
+ rpci->ops = ops;
+ fsnotify_create(dir, dentry);
+ return 0;
}
-static struct dentry *
-rpc_lookup_create(struct dentry *parent, const char *name, int len, int exclusive)
+static int __rpc_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ int ret;
+
+ dget(dentry);
+ ret = simple_rmdir(dir, dentry);
+ d_delete(dentry);
+ dput(dentry);
+ return ret;
+}
+
+static int __rpc_unlink(struct inode *dir, struct dentry *dentry)
+{
+ int ret;
+
+ dget(dentry);
+ ret = simple_unlink(dir, dentry);
+ d_delete(dentry);
+ dput(dentry);
+ return ret;
+}
+
+static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ struct rpc_inode *rpci = RPC_I(inode);
+
+ rpci->nkern_readwriters--;
+ if (rpci->nkern_readwriters != 0)
+ return 0;
+ rpc_close_pipes(inode);
+ return __rpc_unlink(dir, dentry);
+}
+
+static struct dentry *__rpc_lookup_create(struct dentry *parent,
+ struct qstr *name)
{
- struct inode *dir = parent->d_inode;
struct dentry *dentry;
- mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
- dentry = lookup_one_len(name, parent, len);
- if (IS_ERR(dentry))
- goto out_err;
+ dentry = d_lookup(parent, name);
+ if (!dentry) {
+ dentry = d_alloc(parent, name);
+ if (!dentry) {
+ dentry = ERR_PTR(-ENOMEM);
+ goto out_err;
+ }
+ }
if (!dentry->d_inode)
dentry->d_op = &rpc_dentry_operations;
- else if (exclusive) {
- dput(dentry);
- dentry = ERR_PTR(-EEXIST);
- goto out_err;
- }
- return dentry;
out_err:
- mutex_unlock(&dir->i_mutex);
return dentry;
}
-static struct dentry *
-rpc_lookup_negative(char *path, struct nameidata *nd)
+static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
+ struct qstr *name)
{
struct dentry *dentry;
- int error;
- if ((error = rpc_lookup_parent(path, nd)) != 0)
- return ERR_PTR(error);
- dentry = rpc_lookup_create(nd->path.dentry, nd->last.name, nd->last.len,
- 1);
- if (IS_ERR(dentry))
- rpc_release_path(nd);
- return dentry;
+ dentry = __rpc_lookup_create(parent, name);
+ if (dentry->d_inode == NULL)
+ return dentry;
+ dput(dentry);
+ return ERR_PTR(-EEXIST);
}
-/**
- * rpc_mkdir - Create a new directory in rpc_pipefs
- * @path: path from the rpc_pipefs root to the new directory
- * @rpc_client: rpc client to associate with this directory
- *
- * This creates a directory at the given @path associated with
- * @rpc_clnt, which will contain a file named "info" with some basic
- * information about the client, together with any "pipes" that may
- * later be created using rpc_mkpipe().
+/*
+ * FIXME: This probably has races.
*/
-struct dentry *
-rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
+static void __rpc_depopulate(struct dentry *parent,
+ const struct rpc_filelist *files,
+ int start, int eof)
{
- struct nameidata nd;
+ struct inode *dir = parent->d_inode;
struct dentry *dentry;
- struct inode *dir;
+ struct qstr name;
+ int i;
+
+ for (i = start; i < eof; i++) {
+ name.name = files[i].name;
+ name.len = strlen(files[i].name);
+ name.hash = full_name_hash(name.name, name.len);
+ dentry = d_lookup(parent, &name);
+
+ if (dentry == NULL)
+ continue;
+ if (dentry->d_inode == NULL)
+ goto next;
+ switch (dentry->d_inode->i_mode & S_IFMT) {
+ default:
+ BUG();
+ case S_IFREG:
+ __rpc_unlink(dir, dentry);
+ break;
+ case S_IFDIR:
+ __rpc_rmdir(dir, dentry);
+ }
+next:
+ dput(dentry);
+ }
+}
+
+static void rpc_depopulate(struct dentry *parent,
+ const struct rpc_filelist *files,
+ int start, int eof)
+{
+ struct inode *dir = parent->d_inode;
+
+ mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
+ __rpc_depopulate(parent, files, start, eof);
+ mutex_unlock(&dir->i_mutex);
+}
+
+static int rpc_populate(struct dentry *parent,
+ const struct rpc_filelist *files,
+ int start, int eof,
+ void *private)
+{
+ struct inode *dir = parent->d_inode;
+ struct dentry *dentry;
+ int i, err;
+
+ mutex_lock(&dir->i_mutex);
+ for (i = start; i < eof; i++) {
+ struct qstr q;
+
+ q.name = files[i].name;
+ q.len = strlen(files[i].name);
+ q.hash = full_name_hash(q.name, q.len);
+ dentry = __rpc_lookup_create_exclusive(parent, &q);
+ err = PTR_ERR(dentry);
+ if (IS_ERR(dentry))
+ goto out_bad;
+ switch (files[i].mode & S_IFMT) {
+ default:
+ BUG();
+ case S_IFREG:
+ err = __rpc_create(dir, dentry,
+ files[i].mode,
+ files[i].i_fop,
+ private);
+ break;
+ case S_IFDIR:
+ err = __rpc_mkdir(dir, dentry,
+ files[i].mode,
+ NULL,
+ private);
+ }
+ if (err != 0)
+ goto out_bad;
+ }
+ mutex_unlock(&dir->i_mutex);
+ return 0;
+out_bad:
+ __rpc_depopulate(parent, files, start, eof);
+ mutex_unlock(&dir->i_mutex);
+ printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
+ __FILE__, __func__, parent->d_name.name);
+ return err;
+}
+
+static struct dentry *rpc_mkdir_populate(struct dentry *parent,
+ struct qstr *name, umode_t mode, void *private,
+ int (*populate)(struct dentry *, void *), void *args_populate)
+{
+ struct dentry *dentry;
+ struct inode *dir = parent->d_inode;
int error;
- dentry = rpc_lookup_negative(path, &nd);
+ mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
+ dentry = __rpc_lookup_create_exclusive(parent, name);
if (IS_ERR(dentry))
- return dentry;
- dir = nd.path.dentry->d_inode;
- if ((error = __rpc_mkdir(dir, dentry)) != 0)
- goto err_dput;
- RPC_I(dentry->d_inode)->private = rpc_client;
- error = rpc_populate(dentry, authfiles,
- RPCAUTH_info, RPCAUTH_EOF);
- if (error)
- goto err_depopulate;
- dget(dentry);
+ goto out;
+ error = __rpc_mkdir(dir, dentry, mode, NULL, private);
+ if (error != 0)
+ goto out_err;
+ if (populate != NULL) {
+ error = populate(dentry, args_populate);
+ if (error)
+ goto err_rmdir;
+ }
out:
mutex_unlock(&dir->i_mutex);
- rpc_release_path(&nd);
return dentry;
-err_depopulate:
- rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF);
+err_rmdir:
__rpc_rmdir(dir, dentry);
-err_dput:
- dput(dentry);
- printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n",
- __FILE__, __func__, path, error);
+out_err:
dentry = ERR_PTR(error);
goto out;
}
-/**
- * rpc_rmdir - Remove a directory created with rpc_mkdir()
- * @dentry: directory to remove
- */
-int
-rpc_rmdir(struct dentry *dentry)
+static int rpc_rmdir_depopulate(struct dentry *dentry,
+ void (*depopulate)(struct dentry *))
{
struct dentry *parent;
struct inode *dir;
@@ -748,9 +728,9 @@ rpc_rmdir(struct dentry *dentry)
parent = dget_parent(dentry);
dir = parent->d_inode;
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
- rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF);
+ if (depopulate != NULL)
+ depopulate(dentry);
error = __rpc_rmdir(dir, dentry);
- dput(dentry);
mutex_unlock(&dir->i_mutex);
dput(parent);
return error;
@@ -776,50 +756,54 @@ rpc_rmdir(struct dentry *dentry)
* The @private argument passed here will be available to all these methods
* from the file pointer, via RPC_I(file->f_dentry->d_inode)->private.
*/
-struct dentry *
-rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pipe_ops *ops, int flags)
+struct dentry *rpc_mkpipe(struct dentry *parent, const char *name,
+ void *private, const struct rpc_pipe_ops *ops,
+ int flags)
{
struct dentry *dentry;
- struct inode *dir, *inode;
- struct rpc_inode *rpci;
+ struct inode *dir = parent->d_inode;
+ umode_t umode = S_IFIFO | S_IRUSR | S_IWUSR;
+ struct qstr q;
+ int err;
+
+ if (ops->upcall == NULL)
+ umode &= ~S_IRUGO;
+ if (ops->downcall == NULL)
+ umode &= ~S_IWUGO;
+
+ q.name = name;
+ q.len = strlen(name);
+ q.hash = full_name_hash(q.name, q.len),
- dentry = rpc_lookup_create(parent, name, strlen(name), 0);
+ mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
+ dentry = __rpc_lookup_create(parent, &q);
if (IS_ERR(dentry))
- return dentry;
- dir = parent->d_inode;
+ goto out;
if (dentry->d_inode) {
- rpci = RPC_I(dentry->d_inode);
+ struct rpc_inode *rpci = RPC_I(dentry->d_inode);
if (rpci->private != private ||
rpci->ops != ops ||
rpci->flags != flags) {
dput (dentry);
- dentry = ERR_PTR(-EBUSY);
+ err = -EBUSY;
+ goto out_err;
}
rpci->nkern_readwriters++;
goto out;
}
- inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR);
- if (!inode)
- goto err_dput;
- inode->i_ino = iunique(dir->i_sb, 100);
- inode->i_fop = &rpc_pipe_fops;
- d_instantiate(dentry, inode);
- rpci = RPC_I(inode);
- rpci->private = private;
- rpci->flags = flags;
- rpci->ops = ops;
- rpci->nkern_readwriters = 1;
- fsnotify_create(dir, dentry);
- dget(dentry);
+
+ err = __rpc_mkpipe(dir, dentry, umode, &rpc_pipe_fops,
+ private, ops, flags);
+ if (err)
+ goto out_err;
out:
mutex_unlock(&dir->i_mutex);
return dentry;
-err_dput:
- dput(dentry);
- dentry = ERR_PTR(-ENOMEM);
+out_err:
+ dentry = ERR_PTR(err);
printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n",
__FILE__, __func__, parent->d_name.name, name,
- -ENOMEM);
+ err);
goto out;
}
EXPORT_SYMBOL_GPL(rpc_mkpipe);
@@ -842,19 +826,107 @@ rpc_unlink(struct dentry *dentry)
parent = dget_parent(dentry);
dir = parent->d_inode;
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
- if (--RPC_I(dentry->d_inode)->nkern_readwriters == 0) {
- rpc_close_pipes(dentry->d_inode);
- error = simple_unlink(dir, dentry);
- if (!error)
- d_delete(dentry);
- }
- dput(dentry);
+ error = __rpc_rmpipe(dir, dentry);
mutex_unlock(&dir->i_mutex);
dput(parent);
return error;
}
EXPORT_SYMBOL_GPL(rpc_unlink);
+enum {
+ RPCAUTH_info,
+ RPCAUTH_EOF
+};
+
+static const struct rpc_filelist authfiles[] = {
+ [RPCAUTH_info] = {
+ .name = "info",
+ .i_fop = &rpc_info_operations,
+ .mode = S_IFREG | S_IRUSR,
+ },
+};
+
+static int rpc_clntdir_populate(struct dentry *dentry, void *private)
+{
+ return rpc_populate(dentry,
+ authfiles, RPCAUTH_info, RPCAUTH_EOF,
+ private);
+}
+
+static void rpc_clntdir_depopulate(struct dentry *dentry)
+{
+ rpc_depopulate(dentry, authfiles, RPCAUTH_info, RPCAUTH_EOF);
+}
+
+/**
+ * rpc_create_client_dir - Create a new rpc_client directory in rpc_pipefs
+ * @path: path from the rpc_pipefs root to the new directory
+ * @rpc_client: rpc client to associate with this directory
+ *
+ * This creates a directory at the given @path associated with
+ * @rpc_clnt, which will contain a file named "info" with some basic
+ * information about the client, together with any "pipes" that may
+ * later be created using rpc_mkpipe().
+ */
+struct dentry *rpc_create_client_dir(struct dentry *dentry,
+ struct qstr *name,
+ struct rpc_clnt *rpc_client)
+{
+ return rpc_mkdir_populate(dentry, name, S_IRUGO | S_IXUGO, NULL,
+ rpc_clntdir_populate, rpc_client);
+}
+
+/**
+ * rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir()
+ * @dentry: directory to remove
+ */
+int rpc_remove_client_dir(struct dentry *dentry)
+{
+ return rpc_rmdir_depopulate(dentry, rpc_clntdir_depopulate);
+}
+
+static const struct rpc_filelist cache_pipefs_files[3] = {
+ [0] = {
+ .name = "channel",
+ .i_fop = &cache_file_operations_pipefs,
+ .mode = S_IFREG|S_IRUSR|S_IWUSR,
+ },
+ [1] = {
+ .name = "content",
+ .i_fop = &content_file_operations_pipefs,
+ .mode = S_IFREG|S_IRUSR,
+ },
+ [2] = {
+ .name = "flush",
+ .i_fop = &cache_flush_operations_pipefs,
+ .mode = S_IFREG|S_IRUSR|S_IWUSR,
+ },
+};
+
+static int rpc_cachedir_populate(struct dentry *dentry, void *private)
+{
+ return rpc_populate(dentry,
+ cache_pipefs_files, 0, 3,
+ private);
+}
+
+static void rpc_cachedir_depopulate(struct dentry *dentry)
+{
+ rpc_depopulate(dentry, cache_pipefs_files, 0, 3);
+}
+
+struct dentry *rpc_create_cache_dir(struct dentry *parent, struct qstr *name,
+ mode_t umode, struct cache_detail *cd)
+{
+ return rpc_mkdir_populate(parent, name, umode, NULL,
+ rpc_cachedir_populate, cd);
+}
+
+void rpc_remove_cache_dir(struct dentry *dentry)
+{
+ rpc_rmdir_depopulate(dentry, rpc_cachedir_depopulate);
+}
+
/*
* populate the filesystem
*/
@@ -866,6 +938,51 @@ static struct super_operations s_ops = {
#define RPCAUTH_GSSMAGIC 0x67596969
+/*
+ * We have a single directory with 1 node in it.
+ */
+enum {
+ RPCAUTH_lockd,
+ RPCAUTH_mount,
+ RPCAUTH_nfs,
+ RPCAUTH_portmap,
+ RPCAUTH_statd,
+ RPCAUTH_nfsd4_cb,
+ RPCAUTH_cache,
+ RPCAUTH_RootEOF
+};
+
+static const struct rpc_filelist files[] = {
+ [RPCAUTH_lockd] = {
+ .name = "lockd",
+ .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ },
+ [RPCAUTH_mount] = {
+ .name = "mount",
+ .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ },
+ [RPCAUTH_nfs] = {
+ .name = "nfs",
+ .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ },
+ [RPCAUTH_portmap] = {
+ .name = "portmap",
+ .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ },
+ [RPCAUTH_statd] = {
+ .name = "statd",
+ .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ },
+ [RPCAUTH_nfsd4_cb] = {
+ .name = "nfsd4_cb",
+ .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ },
+ [RPCAUTH_cache] = {
+ .name = "cache",
+ .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ },
+};
+
static int
rpc_fill_super(struct super_block *sb, void *data, int silent)
{
@@ -886,7 +1003,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
iput(inode);
return -ENOMEM;
}
- if (rpc_populate(root, files, RPCAUTH_Root + 1, RPCAUTH_RootEOF))
+ if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL))
goto out;
sb->s_root = root;
return 0;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index beee6da33035..830faf4d9997 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -75,6 +75,37 @@ enum {
#define RPCB_OWNER_STRING "0"
#define RPCB_MAXOWNERLEN sizeof(RPCB_OWNER_STRING)
+/*
+ * XDR data type sizes
+ */
+#define RPCB_program_sz (1)
+#define RPCB_version_sz (1)
+#define RPCB_protocol_sz (1)
+#define RPCB_port_sz (1)
+#define RPCB_boolean_sz (1)
+
+#define RPCB_netid_sz (1 + XDR_QUADLEN(RPCBIND_MAXNETIDLEN))
+#define RPCB_addr_sz (1 + XDR_QUADLEN(RPCBIND_MAXUADDRLEN))
+#define RPCB_ownerstring_sz (1 + XDR_QUADLEN(RPCB_MAXOWNERLEN))
+
+/*
+ * XDR argument and result sizes
+ */
+#define RPCB_mappingargs_sz (RPCB_program_sz + RPCB_version_sz + \
+ RPCB_protocol_sz + RPCB_port_sz)
+#define RPCB_getaddrargs_sz (RPCB_program_sz + RPCB_version_sz + \
+ RPCB_netid_sz + RPCB_addr_sz + \
+ RPCB_ownerstring_sz)
+
+#define RPCB_getportres_sz RPCB_port_sz
+#define RPCB_setres_sz RPCB_boolean_sz
+
+/*
+ * Note that RFC 1833 does not put any size restrictions on the
+ * address string returned by the remote rpcbind database.
+ */
+#define RPCB_getaddrres_sz RPCB_addr_sz
+
static void rpcb_getport_done(struct rpc_task *, void *);
static void rpcb_map_release(void *data);
static struct rpc_program rpcb_program;
@@ -122,6 +153,7 @@ static void rpcb_map_release(void *data)
rpcb_wake_rpcbind_waiters(map->r_xprt, map->r_status);
xprt_put(map->r_xprt);
+ kfree(map->r_addr);
kfree(map);
}
@@ -268,12 +300,9 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
struct rpcbind_args *map = msg->rpc_argp;
unsigned short port = ntohs(sin->sin_port);
- char buf[32];
+ int result;
- /* Construct AF_INET universal address */
- snprintf(buf, sizeof(buf), "%pI4.%u.%u",
- &sin->sin_addr.s_addr, port >> 8, port & 0xff);
- map->r_addr = buf;
+ map->r_addr = rpc_sockaddr2uaddr(sap);
dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
"local rpcbind\n", (port ? "" : "un"),
@@ -284,7 +313,9 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
if (port)
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
- return rpcb_register_call(RPCBVERS_4, msg);
+ result = rpcb_register_call(RPCBVERS_4, msg);
+ kfree(map->r_addr);
+ return result;
}
/*
@@ -296,16 +327,9 @@ static int rpcb_register_inet6(const struct sockaddr *sap,
const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
struct rpcbind_args *map = msg->rpc_argp;
unsigned short port = ntohs(sin6->sin6_port);
- char buf[64];
+ int result;
- /* Construct AF_INET6 universal address */
- if (ipv6_addr_any(&sin6->sin6_addr))
- snprintf(buf, sizeof(buf), "::.%u.%u",
- port >> 8, port & 0xff);
- else
- snprintf(buf, sizeof(buf), "%pI6.%u.%u",
- &sin6->sin6_addr, port >> 8, port & 0xff);
- map->r_addr = buf;
+ map->r_addr = rpc_sockaddr2uaddr(sap);
dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
"local rpcbind\n", (port ? "" : "un"),
@@ -316,7 +340,9 @@ static int rpcb_register_inet6(const struct sockaddr *sap,
if (port)
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
- return rpcb_register_call(RPCBVERS_4, msg);
+ result = rpcb_register_call(RPCBVERS_4, msg);
+ kfree(map->r_addr);
+ return result;
}
static int rpcb_unregister_all_protofamilies(struct rpc_message *msg)
@@ -428,7 +454,7 @@ int rpcb_getport_sync(struct sockaddr_in *sin, u32 prog, u32 vers, int prot)
struct rpc_message msg = {
.rpc_proc = &rpcb_procedures2[RPCBPROC_GETPORT],
.rpc_argp = &map,
- .rpc_resp = &map.r_port,
+ .rpc_resp = &map,
};
struct rpc_clnt *rpcb_clnt;
int status;
@@ -458,7 +484,7 @@ static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbi
struct rpc_message msg = {
.rpc_proc = proc,
.rpc_argp = map,
- .rpc_resp = &map->r_port,
+ .rpc_resp = map,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = rpcb_clnt,
@@ -539,6 +565,7 @@ void rpcb_getport_async(struct rpc_task *task)
goto bailout_nofree;
}
+ /* Parent transport's destination address */
salen = rpc_peeraddr(clnt, sap, sizeof(addr));
/* Don't ever use rpcbind v2 for AF_INET6 requests */
@@ -589,11 +616,22 @@ void rpcb_getport_async(struct rpc_task *task)
map->r_prot = xprt->prot;
map->r_port = 0;
map->r_xprt = xprt_get(xprt);
- map->r_netid = rpc_peeraddr2str(clnt, RPC_DISPLAY_NETID);
- map->r_addr = rpc_peeraddr2str(rpcb_clnt, RPC_DISPLAY_UNIVERSAL_ADDR);
- map->r_owner = "";
map->r_status = -EIO;
+ switch (bind_version) {
+ case RPCBVERS_4:
+ case RPCBVERS_3:
+ map->r_netid = rpc_peeraddr2str(clnt, RPC_DISPLAY_NETID);
+ map->r_addr = rpc_sockaddr2uaddr(sap);
+ map->r_owner = "";
+ break;
+ case RPCBVERS_2:
+ map->r_addr = NULL;
+ break;
+ default:
+ BUG();
+ }
+
child = rpcb_call_async(rpcb_clnt, map, proc);
rpc_release_client(rpcb_clnt);
if (IS_ERR(child)) {
@@ -656,176 +694,278 @@ static void rpcb_getport_done(struct rpc_task *child, void *data)
* XDR functions for rpcbind
*/
-static int rpcb_encode_mapping(struct rpc_rqst *req, __be32 *p,
- struct rpcbind_args *rpcb)
+static int rpcb_enc_mapping(struct rpc_rqst *req, __be32 *p,
+ const struct rpcbind_args *rpcb)
{
- dprintk("RPC: encoding rpcb request (%u, %u, %d, %u)\n",
+ struct rpc_task *task = req->rq_task;
+ struct xdr_stream xdr;
+
+ dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n",
+ task->tk_pid, task->tk_msg.rpc_proc->p_name,
rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port);
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+
+ p = xdr_reserve_space(&xdr, sizeof(__be32) * RPCB_mappingargs_sz);
+ if (unlikely(p == NULL))
+ return -EIO;
+
*p++ = htonl(rpcb->r_prog);
*p++ = htonl(rpcb->r_vers);
*p++ = htonl(rpcb->r_prot);
- *p++ = htonl(rpcb->r_port);
+ *p = htonl(rpcb->r_port);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
-static int rpcb_decode_getport(struct rpc_rqst *req, __be32 *p,
- unsigned short *portp)
+static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
+ struct rpcbind_args *rpcb)
{
- *portp = (unsigned short) ntohl(*p++);
- dprintk("RPC: rpcb getport result: %u\n",
- *portp);
+ struct rpc_task *task = req->rq_task;
+ struct xdr_stream xdr;
+ unsigned long port;
+
+ xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+
+ rpcb->r_port = 0;
+
+ p = xdr_inline_decode(&xdr, sizeof(__be32));
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ port = ntohl(*p);
+ dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid,
+ task->tk_msg.rpc_proc->p_name, port);
+ if (unlikely(port > USHORT_MAX))
+ return -EIO;
+
+ rpcb->r_port = port;
return 0;
}
-static int rpcb_decode_set(struct rpc_rqst *req, __be32 *p,
- unsigned int *boolp)
+static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p,
+ unsigned int *boolp)
{
- *boolp = (unsigned int) ntohl(*p++);
- dprintk("RPC: rpcb set/unset call %s\n",
+ struct rpc_task *task = req->rq_task;
+ struct xdr_stream xdr;
+
+ xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+
+ p = xdr_inline_decode(&xdr, sizeof(__be32));
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ *boolp = 0;
+ if (*p)
+ *boolp = 1;
+
+ dprintk("RPC: %5u RPCB_%s call %s\n",
+ task->tk_pid, task->tk_msg.rpc_proc->p_name,
(*boolp ? "succeeded" : "failed"));
return 0;
}
-static int rpcb_encode_getaddr(struct rpc_rqst *req, __be32 *p,
- struct rpcbind_args *rpcb)
+static int encode_rpcb_string(struct xdr_stream *xdr, const char *string,
+ const u32 maxstrlen)
{
- dprintk("RPC: encoding rpcb request (%u, %u, %s)\n",
- rpcb->r_prog, rpcb->r_vers, rpcb->r_addr);
- *p++ = htonl(rpcb->r_prog);
- *p++ = htonl(rpcb->r_vers);
+ u32 len;
+ __be32 *p;
- p = xdr_encode_string(p, rpcb->r_netid);
- p = xdr_encode_string(p, rpcb->r_addr);
- p = xdr_encode_string(p, rpcb->r_owner);
+ if (unlikely(string == NULL))
+ return -EIO;
+ len = strlen(string);
+ if (unlikely(len > maxstrlen))
+ return -EIO;
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ p = xdr_reserve_space(xdr, sizeof(__be32) + len);
+ if (unlikely(p == NULL))
+ return -EIO;
+ xdr_encode_opaque(p, string, len);
return 0;
}
-static int rpcb_decode_getaddr(struct rpc_rqst *req, __be32 *p,
- unsigned short *portp)
+static int rpcb_enc_getaddr(struct rpc_rqst *req, __be32 *p,
+ const struct rpcbind_args *rpcb)
{
- char *addr;
- u32 addr_len;
- int c, i, f, first, val;
+ struct rpc_task *task = req->rq_task;
+ struct xdr_stream xdr;
- *portp = 0;
- addr_len = ntohl(*p++);
+ dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n",
+ task->tk_pid, task->tk_msg.rpc_proc->p_name,
+ rpcb->r_prog, rpcb->r_vers,
+ rpcb->r_netid, rpcb->r_addr);
- if (addr_len == 0) {
- dprintk("RPC: rpcb_decode_getaddr: "
- "service is not registered\n");
- return 0;
- }
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- /*
- * Simple sanity check.
- */
- if (addr_len > RPCBIND_MAXUADDRLEN)
- goto out_err;
-
- /*
- * Start at the end and walk backwards until the first dot
- * is encountered. When the second dot is found, we have
- * both parts of the port number.
- */
- addr = (char *)p;
- val = 0;
- first = 1;
- f = 1;
- for (i = addr_len - 1; i > 0; i--) {
- c = addr[i];
- if (c >= '0' && c <= '9') {
- val += (c - '0') * f;
- f *= 10;
- } else if (c == '.') {
- if (first) {
- *portp = val;
- val = first = 0;
- f = 1;
- } else {
- *portp |= (val << 8);
- break;
- }
- }
- }
+ p = xdr_reserve_space(&xdr,
+ sizeof(__be32) * (RPCB_program_sz + RPCB_version_sz));
+ if (unlikely(p == NULL))
+ return -EIO;
+ *p++ = htonl(rpcb->r_prog);
+ *p = htonl(rpcb->r_vers);
- /*
- * Simple sanity check. If we never saw a dot in the reply,
- * then this was probably just garbage.
- */
- if (first)
- goto out_err;
+ if (encode_rpcb_string(&xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN))
+ return -EIO;
+ if (encode_rpcb_string(&xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN))
+ return -EIO;
+ if (encode_rpcb_string(&xdr, rpcb->r_owner, RPCB_MAXOWNERLEN))
+ return -EIO;
- dprintk("RPC: rpcb_decode_getaddr port=%u\n", *portp);
return 0;
-
-out_err:
- dprintk("RPC: rpcbind server returned malformed reply\n");
- return -EIO;
}
-#define RPCB_program_sz (1u)
-#define RPCB_version_sz (1u)
-#define RPCB_protocol_sz (1u)
-#define RPCB_port_sz (1u)
-#define RPCB_boolean_sz (1u)
+static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p,
+ struct rpcbind_args *rpcb)
+{
+ struct sockaddr_storage address;
+ struct sockaddr *sap = (struct sockaddr *)&address;
+ struct rpc_task *task = req->rq_task;
+ struct xdr_stream xdr;
+ u32 len;
-#define RPCB_netid_sz (1+XDR_QUADLEN(RPCBIND_MAXNETIDLEN))
-#define RPCB_addr_sz (1+XDR_QUADLEN(RPCBIND_MAXUADDRLEN))
-#define RPCB_ownerstring_sz (1+XDR_QUADLEN(RPCB_MAXOWNERLEN))
+ rpcb->r_port = 0;
-#define RPCB_mappingargs_sz RPCB_program_sz+RPCB_version_sz+ \
- RPCB_protocol_sz+RPCB_port_sz
-#define RPCB_getaddrargs_sz RPCB_program_sz+RPCB_version_sz+ \
- RPCB_netid_sz+RPCB_addr_sz+ \
- RPCB_ownerstring_sz
+ xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-#define RPCB_setres_sz RPCB_boolean_sz
-#define RPCB_getportres_sz RPCB_port_sz
-
-/*
- * Note that RFC 1833 does not put any size restrictions on the
- * address string returned by the remote rpcbind database.
- */
-#define RPCB_getaddrres_sz RPCB_addr_sz
+ p = xdr_inline_decode(&xdr, sizeof(__be32));
+ if (unlikely(p == NULL))
+ goto out_fail;
+ len = ntohl(*p);
-#define PROC(proc, argtype, restype) \
- [RPCBPROC_##proc] = { \
- .p_proc = RPCBPROC_##proc, \
- .p_encode = (kxdrproc_t) rpcb_encode_##argtype, \
- .p_decode = (kxdrproc_t) rpcb_decode_##restype, \
- .p_arglen = RPCB_##argtype##args_sz, \
- .p_replen = RPCB_##restype##res_sz, \
- .p_statidx = RPCBPROC_##proc, \
- .p_timer = 0, \
- .p_name = #proc, \
+ /*
+ * If the returned universal address is a null string,
+ * the requested RPC service was not registered.
+ */
+ if (len == 0) {
+ dprintk("RPC: %5u RPCB reply: program not registered\n",
+ task->tk_pid);
+ return 0;
}
+ if (unlikely(len > RPCBIND_MAXUADDRLEN))
+ goto out_fail;
+
+ p = xdr_inline_decode(&xdr, len);
+ if (unlikely(p == NULL))
+ goto out_fail;
+ dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid,
+ task->tk_msg.rpc_proc->p_name, (char *)p);
+
+ if (rpc_uaddr2sockaddr((char *)p, len, sap, sizeof(address)) == 0)
+ goto out_fail;
+ rpcb->r_port = rpc_get_port(sap);
+
+ return 0;
+
+out_fail:
+ dprintk("RPC: %5u malformed RPCB_%s reply\n",
+ task->tk_pid, task->tk_msg.rpc_proc->p_name);
+ return -EIO;
+}
+
/*
* Not all rpcbind procedures described in RFC 1833 are implemented
* since the Linux kernel RPC code requires only these.
*/
+
static struct rpc_procinfo rpcb_procedures2[] = {
- PROC(SET, mapping, set),
- PROC(UNSET, mapping, set),
- PROC(GETPORT, mapping, getport),
+ [RPCBPROC_SET] = {
+ .p_proc = RPCBPROC_SET,
+ .p_encode = (kxdrproc_t)rpcb_enc_mapping,
+ .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_arglen = RPCB_mappingargs_sz,
+ .p_replen = RPCB_setres_sz,
+ .p_statidx = RPCBPROC_SET,
+ .p_timer = 0,
+ .p_name = "SET",
+ },
+ [RPCBPROC_UNSET] = {
+ .p_proc = RPCBPROC_UNSET,
+ .p_encode = (kxdrproc_t)rpcb_enc_mapping,
+ .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_arglen = RPCB_mappingargs_sz,
+ .p_replen = RPCB_setres_sz,
+ .p_statidx = RPCBPROC_UNSET,
+ .p_timer = 0,
+ .p_name = "UNSET",
+ },
+ [RPCBPROC_GETPORT] = {
+ .p_proc = RPCBPROC_GETPORT,
+ .p_encode = (kxdrproc_t)rpcb_enc_mapping,
+ .p_decode = (kxdrproc_t)rpcb_dec_getport,
+ .p_arglen = RPCB_mappingargs_sz,
+ .p_replen = RPCB_getportres_sz,
+ .p_statidx = RPCBPROC_GETPORT,
+ .p_timer = 0,
+ .p_name = "GETPORT",
+ },
};
static struct rpc_procinfo rpcb_procedures3[] = {
- PROC(SET, getaddr, set),
- PROC(UNSET, getaddr, set),
- PROC(GETADDR, getaddr, getaddr),
+ [RPCBPROC_SET] = {
+ .p_proc = RPCBPROC_SET,
+ .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_arglen = RPCB_getaddrargs_sz,
+ .p_replen = RPCB_setres_sz,
+ .p_statidx = RPCBPROC_SET,
+ .p_timer = 0,
+ .p_name = "SET",
+ },
+ [RPCBPROC_UNSET] = {
+ .p_proc = RPCBPROC_UNSET,
+ .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_arglen = RPCB_getaddrargs_sz,
+ .p_replen = RPCB_setres_sz,
+ .p_statidx = RPCBPROC_UNSET,
+ .p_timer = 0,
+ .p_name = "UNSET",
+ },
+ [RPCBPROC_GETADDR] = {
+ .p_proc = RPCBPROC_GETADDR,
+ .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrproc_t)rpcb_dec_getaddr,
+ .p_arglen = RPCB_getaddrargs_sz,
+ .p_replen = RPCB_getaddrres_sz,
+ .p_statidx = RPCBPROC_GETADDR,
+ .p_timer = 0,
+ .p_name = "GETADDR",
+ },
};
static struct rpc_procinfo rpcb_procedures4[] = {
- PROC(SET, getaddr, set),
- PROC(UNSET, getaddr, set),
- PROC(GETADDR, getaddr, getaddr),
- PROC(GETVERSADDR, getaddr, getaddr),
+ [RPCBPROC_SET] = {
+ .p_proc = RPCBPROC_SET,
+ .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_arglen = RPCB_getaddrargs_sz,
+ .p_replen = RPCB_setres_sz,
+ .p_statidx = RPCBPROC_SET,
+ .p_timer = 0,
+ .p_name = "SET",
+ },
+ [RPCBPROC_UNSET] = {
+ .p_proc = RPCBPROC_UNSET,
+ .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrproc_t)rpcb_dec_set,
+ .p_arglen = RPCB_getaddrargs_sz,
+ .p_replen = RPCB_setres_sz,
+ .p_statidx = RPCBPROC_UNSET,
+ .p_timer = 0,
+ .p_name = "UNSET",
+ },
+ [RPCBPROC_GETADDR] = {
+ .p_proc = RPCBPROC_GETADDR,
+ .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
+ .p_decode = (kxdrproc_t)rpcb_dec_getaddr,
+ .p_arglen = RPCB_getaddrargs_sz,
+ .p_replen = RPCB_getaddrres_sz,
+ .p_statidx = RPCBPROC_GETADDR,
+ .p_timer = 0,
+ .p_name = "GETADDR",
+ },
};
static struct rpcb_info rpcb_next_version[] = {
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index adaa81982f74..8cce92189019 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -69,5 +69,5 @@ cleanup_sunrpc(void)
rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
MODULE_LICENSE("GPL");
-module_init(init_sunrpc);
+fs_initcall(init_sunrpc); /* Ensure we're initialised before nfs */
module_exit(cleanup_sunrpc);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 5c865e2d299e..6caffa34ac01 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -171,6 +171,11 @@ static void ip_map_request(struct cache_detail *cd,
(*bpp)[-1] = '\n';
}
+static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall(cd, h, ip_map_request);
+}
+
static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr);
static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
@@ -289,7 +294,7 @@ struct cache_detail ip_map_cache = {
.hash_table = ip_table,
.name = "auth.unix.ip",
.cache_put = ip_map_put,
- .cache_request = ip_map_request,
+ .cache_upcall = ip_map_upcall,
.cache_parse = ip_map_parse,
.cache_show = ip_map_show,
.match = ip_map_match,
@@ -523,6 +528,11 @@ static void unix_gid_request(struct cache_detail *cd,
(*bpp)[-1] = '\n';
}
+static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall(cd, h, unix_gid_request);
+}
+
static struct unix_gid *unix_gid_lookup(uid_t uid);
extern struct cache_detail unix_gid_cache;
@@ -622,7 +632,7 @@ struct cache_detail unix_gid_cache = {
.hash_table = gid_table,
.name = "auth.unix.gid",
.cache_put = unix_gid_put,
- .cache_request = unix_gid_request,
+ .cache_upcall = unix_gid_upcall,
.cache_parse = unix_gid_parse,
.cache_show = unix_gid_show,
.match = unix_gid_match,
diff --git a/net/sunrpc/timer.c b/net/sunrpc/timer.c
index 31becbf09263..dd824341c349 100644
--- a/net/sunrpc/timer.c
+++ b/net/sunrpc/timer.c
@@ -25,8 +25,13 @@
#define RPC_RTO_INIT (HZ/5)
#define RPC_RTO_MIN (HZ/10)
-void
-rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
+/**
+ * rpc_init_rtt - Initialize an RPC RTT estimator context
+ * @rt: context to initialize
+ * @timeo: initial timeout value, in jiffies
+ *
+ */
+void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
{
unsigned long init = 0;
unsigned i;
@@ -43,12 +48,16 @@ rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
}
EXPORT_SYMBOL_GPL(rpc_init_rtt);
-/*
+/**
+ * rpc_update_rtt - Update an RPC RTT estimator context
+ * @rt: context to update
+ * @timer: timer array index (request type)
+ * @m: recent actual RTT, in jiffies
+ *
* NB: When computing the smoothed RTT and standard deviation,
* be careful not to produce negative intermediate results.
*/
-void
-rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
+void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
{
long *srtt, *sdrtt;
@@ -79,21 +88,25 @@ rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
}
EXPORT_SYMBOL_GPL(rpc_update_rtt);
-/*
- * Estimate rto for an nfs rpc sent via. an unreliable datagram.
- * Use the mean and mean deviation of rtt for the appropriate type of rpc
- * for the frequent rpcs and a default for the others.
- * The justification for doing "other" this way is that these rpcs
- * happen so infrequently that timer est. would probably be stale.
- * Also, since many of these rpcs are
- * non-idempotent, a conservative timeout is desired.
+/**
+ * rpc_calc_rto - Provide an estimated timeout value
+ * @rt: context to use for calculation
+ * @timer: timer array index (request type)
+ *
+ * Estimate RTO for an NFS RPC sent via an unreliable datagram. Use
+ * the mean and mean deviation of RTT for the appropriate type of RPC
+ * for frequently issued RPCs, and a fixed default for the others.
+ *
+ * The justification for doing "other" this way is that these RPCs
+ * happen so infrequently that timer estimation would probably be
+ * stale. Also, since many of these RPCs are non-idempotent, a
+ * conservative timeout is desired.
+ *
* getattr, lookup,
* read, write, commit - A+4D
* other - timeo
*/
-
-unsigned long
-rpc_calc_rto(struct rpc_rtt *rt, unsigned timer)
+unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer)
{
unsigned long res;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 406e26de584e..8bd690c48b69 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -24,7 +24,7 @@ xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
unsigned int quadlen = XDR_QUADLEN(obj->len);
p[quadlen] = 0; /* zero trailing bytes */
- *p++ = htonl(obj->len);
+ *p++ = cpu_to_be32(obj->len);
memcpy(p, obj->data, obj->len);
return p + XDR_QUADLEN(obj->len);
}
@@ -35,7 +35,7 @@ xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
{
unsigned int len;
- if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
+ if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
return NULL;
obj->len = len;
obj->data = (u8 *) p;
@@ -83,7 +83,7 @@ EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
*/
__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
{
- *p++ = htonl(nbytes);
+ *p++ = cpu_to_be32(nbytes);
return xdr_encode_opaque_fixed(p, ptr, nbytes);
}
EXPORT_SYMBOL_GPL(xdr_encode_opaque);
@@ -101,7 +101,7 @@ xdr_decode_string_inplace(__be32 *p, char **sp,
{
u32 len;
- len = ntohl(*p++);
+ len = be32_to_cpu(*p++);
if (len > maxlen)
return NULL;
*lenp = len;
@@ -771,7 +771,7 @@ xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
if (status)
return status;
- *obj = ntohl(raw);
+ *obj = be32_to_cpu(raw);
return 0;
}
EXPORT_SYMBOL_GPL(xdr_decode_word);
@@ -779,7 +779,7 @@ EXPORT_SYMBOL_GPL(xdr_decode_word);
int
xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
{
- __be32 raw = htonl(obj);
+ __be32 raw = cpu_to_be32(obj);
return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
}
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 1dd6123070e9..9a63f669ece4 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -168,47 +168,25 @@ static struct rpc_xprt_ops xprt_rdma_procs; /* forward reference */
static void
xprt_rdma_format_addresses(struct rpc_xprt *xprt)
{
- struct sockaddr_in *addr = (struct sockaddr_in *)
+ struct sockaddr *sap = (struct sockaddr *)
&rpcx_to_rdmad(xprt).addr;
- char *buf;
+ struct sockaddr_in *sin = (struct sockaddr_in *)sap;
+ char buf[64];
- buf = kzalloc(20, GFP_KERNEL);
- if (buf)
- snprintf(buf, 20, "%pI4", &addr->sin_addr.s_addr);
- xprt->address_strings[RPC_DISPLAY_ADDR] = buf;
+ (void)rpc_ntop(sap, buf, sizeof(buf));
+ xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
- buf = kzalloc(8, GFP_KERNEL);
- if (buf)
- snprintf(buf, 8, "%u", ntohs(addr->sin_port));
- xprt->address_strings[RPC_DISPLAY_PORT] = buf;
+ (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
+ xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
- buf = kzalloc(48, GFP_KERNEL);
- if (buf)
- snprintf(buf, 48, "addr=%pI4 port=%u proto=%s",
- &addr->sin_addr.s_addr,
- ntohs(addr->sin_port), "rdma");
- xprt->address_strings[RPC_DISPLAY_ALL] = buf;
-
- buf = kzalloc(10, GFP_KERNEL);
- if (buf)
- snprintf(buf, 10, "%02x%02x%02x%02x",
- NIPQUAD(addr->sin_addr.s_addr));
- xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = buf;
-
- buf = kzalloc(8, GFP_KERNEL);
- if (buf)
- snprintf(buf, 8, "%4hx", ntohs(addr->sin_port));
- xprt->address_strings[RPC_DISPLAY_HEX_PORT] = buf;
-
- buf = kzalloc(30, GFP_KERNEL);
- if (buf)
- snprintf(buf, 30, "%pI4.%u.%u",
- &addr->sin_addr.s_addr,
- ntohs(addr->sin_port) >> 8,
- ntohs(addr->sin_port) & 0xff);
- xprt->address_strings[RPC_DISPLAY_UNIVERSAL_ADDR] = buf;
+ (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x",
+ NIPQUAD(sin->sin_addr.s_addr));
+ xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
+
+ (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
+ xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
/* netid */
xprt->address_strings[RPC_DISPLAY_NETID] = "rdma";
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 83c73c4d017a..62438f3a914d 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -248,8 +248,8 @@ struct sock_xprt {
* Connection of transports
*/
struct delayed_work connect_worker;
- struct sockaddr_storage addr;
- unsigned short port;
+ struct sockaddr_storage srcaddr;
+ unsigned short srcport;
/*
* UDP socket buffer size parameters
@@ -296,117 +296,60 @@ static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
return (struct sockaddr_in6 *) &xprt->addr;
}
-static void xs_format_ipv4_peer_addresses(struct rpc_xprt *xprt,
- const char *protocol,
- const char *netid)
+static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
{
- struct sockaddr_in *addr = xs_addr_in(xprt);
- char *buf;
+ struct sockaddr *sap = xs_addr(xprt);
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_in *sin;
+ char buf[128];
- buf = kzalloc(20, GFP_KERNEL);
- if (buf) {
- snprintf(buf, 20, "%pI4", &addr->sin_addr.s_addr);
- }
- xprt->address_strings[RPC_DISPLAY_ADDR] = buf;
-
- buf = kzalloc(8, GFP_KERNEL);
- if (buf) {
- snprintf(buf, 8, "%u",
- ntohs(addr->sin_port));
- }
- xprt->address_strings[RPC_DISPLAY_PORT] = buf;
-
- xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
-
- buf = kzalloc(48, GFP_KERNEL);
- if (buf) {
- snprintf(buf, 48, "addr=%pI4 port=%u proto=%s",
- &addr->sin_addr.s_addr,
- ntohs(addr->sin_port),
- protocol);
- }
- xprt->address_strings[RPC_DISPLAY_ALL] = buf;
-
- buf = kzalloc(10, GFP_KERNEL);
- if (buf) {
- snprintf(buf, 10, "%02x%02x%02x%02x",
- NIPQUAD(addr->sin_addr.s_addr));
- }
- xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = buf;
+ (void)rpc_ntop(sap, buf, sizeof(buf));
+ xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
- buf = kzalloc(8, GFP_KERNEL);
- if (buf) {
- snprintf(buf, 8, "%4hx",
- ntohs(addr->sin_port));
- }
- xprt->address_strings[RPC_DISPLAY_HEX_PORT] = buf;
-
- buf = kzalloc(30, GFP_KERNEL);
- if (buf) {
- snprintf(buf, 30, "%pI4.%u.%u",
- &addr->sin_addr.s_addr,
- ntohs(addr->sin_port) >> 8,
- ntohs(addr->sin_port) & 0xff);
+ switch (sap->sa_family) {
+ case AF_INET:
+ sin = xs_addr_in(xprt);
+ (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x",
+ NIPQUAD(sin->sin_addr.s_addr));
+ break;
+ case AF_INET6:
+ sin6 = xs_addr_in6(xprt);
+ (void)snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
+ break;
+ default:
+ BUG();
}
- xprt->address_strings[RPC_DISPLAY_UNIVERSAL_ADDR] = buf;
-
- xprt->address_strings[RPC_DISPLAY_NETID] = netid;
+ xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
}
-static void xs_format_ipv6_peer_addresses(struct rpc_xprt *xprt,
- const char *protocol,
- const char *netid)
+static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
{
- struct sockaddr_in6 *addr = xs_addr_in6(xprt);
- char *buf;
+ struct sockaddr *sap = xs_addr(xprt);
+ char buf[128];
- buf = kzalloc(40, GFP_KERNEL);
- if (buf) {
- snprintf(buf, 40, "%pI6",&addr->sin6_addr);
- }
- xprt->address_strings[RPC_DISPLAY_ADDR] = buf;
+ (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
+ xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
- buf = kzalloc(8, GFP_KERNEL);
- if (buf) {
- snprintf(buf, 8, "%u",
- ntohs(addr->sin6_port));
- }
- xprt->address_strings[RPC_DISPLAY_PORT] = buf;
+ (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
+ xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
+}
+static void xs_format_peer_addresses(struct rpc_xprt *xprt,
+ const char *protocol,
+ const char *netid)
+{
xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
+ xprt->address_strings[RPC_DISPLAY_NETID] = netid;
+ xs_format_common_peer_addresses(xprt);
+ xs_format_common_peer_ports(xprt);
+}
- buf = kzalloc(64, GFP_KERNEL);
- if (buf) {
- snprintf(buf, 64, "addr=%pI6 port=%u proto=%s",
- &addr->sin6_addr,
- ntohs(addr->sin6_port),
- protocol);
- }
- xprt->address_strings[RPC_DISPLAY_ALL] = buf;
-
- buf = kzalloc(36, GFP_KERNEL);
- if (buf)
- snprintf(buf, 36, "%pi6", &addr->sin6_addr);
-
- xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = buf;
-
- buf = kzalloc(8, GFP_KERNEL);
- if (buf) {
- snprintf(buf, 8, "%4hx",
- ntohs(addr->sin6_port));
- }
- xprt->address_strings[RPC_DISPLAY_HEX_PORT] = buf;
-
- buf = kzalloc(50, GFP_KERNEL);
- if (buf) {
- snprintf(buf, 50, "%pI6.%u.%u",
- &addr->sin6_addr,
- ntohs(addr->sin6_port) >> 8,
- ntohs(addr->sin6_port) & 0xff);
- }
- xprt->address_strings[RPC_DISPLAY_UNIVERSAL_ADDR] = buf;
+static void xs_update_peer_port(struct rpc_xprt *xprt)
+{
+ kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
+ kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
- xprt->address_strings[RPC_DISPLAY_NETID] = netid;
+ xs_format_common_peer_ports(xprt);
}
static void xs_free_peer_addresses(struct rpc_xprt *xprt)
@@ -1587,25 +1530,15 @@ static unsigned short xs_get_random_port(void)
*/
static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
{
- struct sockaddr *addr = xs_addr(xprt);
-
dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
- switch (addr->sa_family) {
- case AF_INET:
- ((struct sockaddr_in *)addr)->sin_port = htons(port);
- break;
- case AF_INET6:
- ((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
- break;
- default:
- BUG();
- }
+ rpc_set_port(xs_addr(xprt), port);
+ xs_update_peer_port(xprt);
}
static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket *sock)
{
- unsigned short port = transport->port;
+ unsigned short port = transport->srcport;
if (port == 0 && transport->xprt.resvport)
port = xs_get_random_port();
@@ -1614,8 +1547,8 @@ static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket
static unsigned short xs_next_srcport(struct sock_xprt *transport, struct socket *sock, unsigned short port)
{
- if (transport->port != 0)
- transport->port = 0;
+ if (transport->srcport != 0)
+ transport->srcport = 0;
if (!transport->xprt.resvport)
return 0;
if (port <= xprt_min_resvport || port > xprt_max_resvport)
@@ -1633,7 +1566,7 @@ static int xs_bind4(struct sock_xprt *transport, struct socket *sock)
unsigned short port = xs_get_srcport(transport, sock);
unsigned short last;
- sa = (struct sockaddr_in *)&transport->addr;
+ sa = (struct sockaddr_in *)&transport->srcaddr;
myaddr.sin_addr = sa->sin_addr;
do {
myaddr.sin_port = htons(port);
@@ -1642,7 +1575,7 @@ static int xs_bind4(struct sock_xprt *transport, struct socket *sock)
if (port == 0)
break;
if (err == 0) {
- transport->port = port;
+ transport->srcport = port;
break;
}
last = port;
@@ -1666,7 +1599,7 @@ static int xs_bind6(struct sock_xprt *transport, struct socket *sock)
unsigned short port = xs_get_srcport(transport, sock);
unsigned short last;
- sa = (struct sockaddr_in6 *)&transport->addr;
+ sa = (struct sockaddr_in6 *)&transport->srcaddr;
myaddr.sin6_addr = sa->sin6_addr;
do {
myaddr.sin6_port = htons(port);
@@ -1675,7 +1608,7 @@ static int xs_bind6(struct sock_xprt *transport, struct socket *sock)
if (port == 0)
break;
if (err == 0) {
- transport->port = port;
+ transport->srcport = port;
break;
}
last = port;
@@ -1780,8 +1713,11 @@ static void xs_udp_connect_worker4(struct work_struct *work)
goto out;
}
- dprintk("RPC: worker connecting xprt %p to address: %s\n",
- xprt, xprt->address_strings[RPC_DISPLAY_ALL]);
+ dprintk("RPC: worker connecting xprt %p via %s to "
+ "%s (port %s)\n", xprt,
+ xprt->address_strings[RPC_DISPLAY_PROTO],
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PORT]);
xs_udp_finish_connecting(xprt, sock);
status = 0;
@@ -1822,8 +1758,11 @@ static void xs_udp_connect_worker6(struct work_struct *work)
goto out;
}
- dprintk("RPC: worker connecting xprt %p to address: %s\n",
- xprt, xprt->address_strings[RPC_DISPLAY_ALL]);
+ dprintk("RPC: worker connecting xprt %p via %s to "
+ "%s (port %s)\n", xprt,
+ xprt->address_strings[RPC_DISPLAY_PROTO],
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PORT]);
xs_udp_finish_connecting(xprt, sock);
status = 0;
@@ -1948,8 +1887,11 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
goto out_eagain;
}
- dprintk("RPC: worker connecting xprt %p to address: %s\n",
- xprt, xprt->address_strings[RPC_DISPLAY_ALL]);
+ dprintk("RPC: worker connecting xprt %p via %s to "
+ "%s (port %s)\n", xprt,
+ xprt->address_strings[RPC_DISPLAY_PROTO],
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PORT]);
status = xs_tcp_finish_connecting(xprt, sock);
dprintk("RPC: %p connect status %d connected %d sock state %d\n",
@@ -2120,7 +2062,7 @@ static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n",
- transport->port,
+ transport->srcport,
xprt->stat.bind_count,
xprt->stat.sends,
xprt->stat.recvs,
@@ -2144,7 +2086,7 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
idle_time = (long)(jiffies - xprt->last_used) / HZ;
seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n",
- transport->port,
+ transport->srcport,
xprt->stat.bind_count,
xprt->stat.connect_count,
xprt->stat.connect_time,
@@ -2223,7 +2165,7 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
memcpy(&xprt->addr, args->dstaddr, args->addrlen);
xprt->addrlen = args->addrlen;
if (args->srcaddr)
- memcpy(&new->addr, args->srcaddr, args->addrlen);
+ memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
return xprt;
}
@@ -2272,7 +2214,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
INIT_DELAYED_WORK(&transport->connect_worker,
xs_udp_connect_worker4);
- xs_format_ipv4_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
+ xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
break;
case AF_INET6:
if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
@@ -2280,15 +2222,22 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
INIT_DELAYED_WORK(&transport->connect_worker,
xs_udp_connect_worker6);
- xs_format_ipv6_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
+ xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
break;
default:
kfree(xprt);
return ERR_PTR(-EAFNOSUPPORT);
}
- dprintk("RPC: set up transport to address %s\n",
- xprt->address_strings[RPC_DISPLAY_ALL]);
+ if (xprt_bound(xprt))
+ dprintk("RPC: set up xprt to %s (port %s) via %s\n",
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PORT],
+ xprt->address_strings[RPC_DISPLAY_PROTO]);
+ else
+ dprintk("RPC: set up xprt to %s (autobind) via %s\n",
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PROTO]);
if (try_module_get(THIS_MODULE))
return xprt;
@@ -2337,23 +2286,33 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
if (((struct sockaddr_in *)addr)->sin_port != htons(0))
xprt_set_bound(xprt);
- INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker4);
- xs_format_ipv4_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
+ INIT_DELAYED_WORK(&transport->connect_worker,
+ xs_tcp_connect_worker4);
+ xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
break;
case AF_INET6:
if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
xprt_set_bound(xprt);
- INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker6);
- xs_format_ipv6_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
+ INIT_DELAYED_WORK(&transport->connect_worker,
+ xs_tcp_connect_worker6);
+ xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
break;
default:
kfree(xprt);
return ERR_PTR(-EAFNOSUPPORT);
}
- dprintk("RPC: set up transport to address %s\n",
- xprt->address_strings[RPC_DISPLAY_ALL]);
+ if (xprt_bound(xprt))
+ dprintk("RPC: set up xprt to %s (port %s) via %s\n",
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PORT],
+ xprt->address_strings[RPC_DISPLAY_PROTO]);
+ else
+ dprintk("RPC: set up xprt to %s (autobind) via %s\n",
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PROTO]);
+
if (try_module_get(THIS_MODULE))
return xprt;
@@ -2412,3 +2371,55 @@ void cleanup_socket_xprt(void)
xprt_unregister_transport(&xs_udp_transport);
xprt_unregister_transport(&xs_tcp_transport);
}
+
+static int param_set_uint_minmax(const char *val, struct kernel_param *kp,
+ unsigned int min, unsigned int max)
+{
+ unsigned long num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+ ret = strict_strtoul(val, 0, &num);
+ if (ret == -EINVAL || num < min || num > max)
+ return -EINVAL;
+ *((unsigned int *)kp->arg) = num;
+ return 0;
+}
+
+static int param_set_portnr(const char *val, struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp,
+ RPC_MIN_RESVPORT,
+ RPC_MAX_RESVPORT);
+}
+
+static int param_get_portnr(char *buffer, struct kernel_param *kp)
+{
+ return param_get_uint(buffer, kp);
+}
+#define param_check_portnr(name, p) \
+ __param_check(name, p, unsigned int);
+
+module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
+module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
+
+static int param_set_slot_table_size(const char *val, struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp,
+ RPC_MIN_SLOT_TABLE,
+ RPC_MAX_SLOT_TABLE);
+}
+
+static int param_get_slot_table_size(char *buffer, struct kernel_param *kp)
+{
+ return param_get_uint(buffer, kp);
+}
+#define param_check_slot_table_size(name, p) \
+ __param_check(name, p, unsigned int);
+
+module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
+ slot_table_size, 0644);
+module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
+ slot_table_size, 0644);
+
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 911ba7ffab84..090d300d7394 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -57,7 +57,6 @@
# call mcount (offset: 0x5)
# [...]
# ret
-# .globl my_func
# other_func:
# [...]
# call mcount (offset: 0x1b)
diff --git a/security/Makefile b/security/Makefile
index b56e7f9ecbc2..95ecc06392d7 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -16,9 +16,7 @@ obj-$(CONFIG_SECURITYFS) += inode.o
# Must precede capability.o in order to stack properly.
obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o
obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o
-ifeq ($(CONFIG_AUDIT),y)
-obj-$(CONFIG_SECURITY_SMACK) += lsm_audit.o
-endif
+obj-$(CONFIG_AUDIT) += lsm_audit.o
obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o
obj-$(CONFIG_SECURITY_ROOTPLUG) += root_plug.o
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
diff --git a/security/capability.c b/security/capability.c
index 88f752e8152c..fce07a7bc825 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -373,6 +373,11 @@ static int cap_task_create(unsigned long clone_flags)
return 0;
}
+static int cap_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+ return 0;
+}
+
static void cap_cred_free(struct cred *cred)
{
}
@@ -386,6 +391,10 @@ static void cap_cred_commit(struct cred *new, const struct cred *old)
{
}
+static void cap_cred_transfer(struct cred *new, const struct cred *old)
+{
+}
+
static int cap_kernel_act_as(struct cred *new, u32 secid)
{
return 0;
@@ -396,6 +405,11 @@ static int cap_kernel_create_files_as(struct cred *new, struct inode *inode)
return 0;
}
+static int cap_kernel_module_request(void)
+{
+ return 0;
+}
+
static int cap_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags)
{
return 0;
@@ -701,10 +715,26 @@ static void cap_inet_conn_established(struct sock *sk, struct sk_buff *skb)
{
}
+
+
static void cap_req_classify_flow(const struct request_sock *req,
struct flowi *fl)
{
}
+
+static int cap_tun_dev_create(void)
+{
+ return 0;
+}
+
+static void cap_tun_dev_post_create(struct sock *sk)
+{
+}
+
+static int cap_tun_dev_attach(struct sock *sk)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -792,6 +822,20 @@ static void cap_release_secctx(char *secdata, u32 seclen)
{
}
+static int cap_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
+{
+ return 0;
+}
+
+static int cap_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+{
+ return 0;
+}
+
+static int cap_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+{
+ return 0;
+}
#ifdef CONFIG_KEYS
static int cap_key_alloc(struct key *key, const struct cred *cred,
unsigned long flags)
@@ -815,6 +859,13 @@ static int cap_key_getsecurity(struct key *key, char **_buffer)
return 0;
}
+static int cap_key_session_to_parent(const struct cred *cred,
+ const struct cred *parent_cred,
+ struct key *key)
+{
+ return 0;
+}
+
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
@@ -854,7 +905,7 @@ struct security_operations default_security_ops = {
void security_fixup_ops(struct security_operations *ops)
{
- set_to_cap_if_null(ops, ptrace_may_access);
+ set_to_cap_if_null(ops, ptrace_access_check);
set_to_cap_if_null(ops, ptrace_traceme);
set_to_cap_if_null(ops, capget);
set_to_cap_if_null(ops, capset);
@@ -940,11 +991,14 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, file_receive);
set_to_cap_if_null(ops, dentry_open);
set_to_cap_if_null(ops, task_create);
+ set_to_cap_if_null(ops, cred_alloc_blank);
set_to_cap_if_null(ops, cred_free);
set_to_cap_if_null(ops, cred_prepare);
set_to_cap_if_null(ops, cred_commit);
+ set_to_cap_if_null(ops, cred_transfer);
set_to_cap_if_null(ops, kernel_act_as);
set_to_cap_if_null(ops, kernel_create_files_as);
+ set_to_cap_if_null(ops, kernel_module_request);
set_to_cap_if_null(ops, task_setuid);
set_to_cap_if_null(ops, task_fix_setuid);
set_to_cap_if_null(ops, task_setgid);
@@ -992,6 +1046,9 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, secid_to_secctx);
set_to_cap_if_null(ops, secctx_to_secid);
set_to_cap_if_null(ops, release_secctx);
+ set_to_cap_if_null(ops, inode_notifysecctx);
+ set_to_cap_if_null(ops, inode_setsecctx);
+ set_to_cap_if_null(ops, inode_getsecctx);
#ifdef CONFIG_SECURITY_NETWORK
set_to_cap_if_null(ops, unix_stream_connect);
set_to_cap_if_null(ops, unix_may_send);
@@ -1020,6 +1077,9 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, inet_csk_clone);
set_to_cap_if_null(ops, inet_conn_established);
set_to_cap_if_null(ops, req_classify_flow);
+ set_to_cap_if_null(ops, tun_dev_create);
+ set_to_cap_if_null(ops, tun_dev_post_create);
+ set_to_cap_if_null(ops, tun_dev_attach);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
set_to_cap_if_null(ops, xfrm_policy_alloc_security);
@@ -1038,6 +1098,7 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, key_free);
set_to_cap_if_null(ops, key_permission);
set_to_cap_if_null(ops, key_getsecurity);
+ set_to_cap_if_null(ops, key_session_to_parent);
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
set_to_cap_if_null(ops, audit_rule_init);
diff --git a/security/commoncap.c b/security/commoncap.c
index e3097c0a1311..fe30751a6cd9 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -101,7 +101,7 @@ int cap_settime(struct timespec *ts, struct timezone *tz)
}
/**
- * cap_ptrace_may_access - Determine whether the current process may access
+ * cap_ptrace_access_check - Determine whether the current process may access
* another
* @child: The process to be accessed
* @mode: The mode of attachment.
@@ -109,7 +109,7 @@ int cap_settime(struct timespec *ts, struct timezone *tz)
* Determine whether a process may access another, returning 0 if permission
* granted, -ve if denied.
*/
-int cap_ptrace_may_access(struct task_struct *child, unsigned int mode)
+int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
int ret = 0;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 4732f5e5d127..b85e61bcf246 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -249,7 +249,11 @@ void ima_counts_put(struct path *path, int mask)
struct inode *inode = path->dentry->d_inode;
struct ima_iint_cache *iint;
- if (!ima_initialized || !S_ISREG(inode->i_mode))
+ /* The inode may already have been freed, freeing the iint
+ * with it. Verify the inode is not NULL before dereferencing
+ * it.
+ */
+ if (!ima_initialized || !inode || !S_ISREG(inode->i_mode))
return;
iint = ima_iint_find_insert_get(inode);
if (!iint)
diff --git a/security/keys/Makefile b/security/keys/Makefile
index 747a464943af..74d5447d7df7 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -3,6 +3,7 @@
#
obj-y := \
+ gc.o \
key.o \
keyring.o \
keyctl.o \
diff --git a/security/keys/compat.c b/security/keys/compat.c
index c766c68a63bc..792c0a611a6d 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -82,6 +82,9 @@ asmlinkage long compat_sys_keyctl(u32 option,
case KEYCTL_GET_SECURITY:
return keyctl_get_security(arg2, compat_ptr(arg3), arg4);
+ case KEYCTL_SESSION_TO_PARENT:
+ return keyctl_session_to_parent();
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/gc.c b/security/keys/gc.c
new file mode 100644
index 000000000000..1e616aef55fd
--- /dev/null
+++ b/security/keys/gc.c
@@ -0,0 +1,194 @@
+/* Key garbage collector
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <keys/keyring-type.h>
+#include "internal.h"
+
+/*
+ * Delay between key revocation/expiry in seconds
+ */
+unsigned key_gc_delay = 5 * 60;
+
+/*
+ * Reaper
+ */
+static void key_gc_timer_func(unsigned long);
+static void key_garbage_collector(struct work_struct *);
+static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
+static DECLARE_WORK(key_gc_work, key_garbage_collector);
+static key_serial_t key_gc_cursor; /* the last key the gc considered */
+static unsigned long key_gc_executing;
+static time_t key_gc_next_run = LONG_MAX;
+
+/*
+ * Schedule a garbage collection run
+ * - precision isn't particularly important
+ */
+void key_schedule_gc(time_t gc_at)
+{
+ unsigned long expires;
+ time_t now = current_kernel_time().tv_sec;
+
+ kenter("%ld", gc_at - now);
+
+ gc_at += key_gc_delay;
+
+ if (now >= gc_at) {
+ schedule_work(&key_gc_work);
+ } else if (gc_at < key_gc_next_run) {
+ expires = jiffies + (gc_at - now) * HZ;
+ mod_timer(&key_gc_timer, expires);
+ }
+}
+
+/*
+ * The garbage collector timer kicked off
+ */
+static void key_gc_timer_func(unsigned long data)
+{
+ kenter("");
+ key_gc_next_run = LONG_MAX;
+ schedule_work(&key_gc_work);
+}
+
+/*
+ * Garbage collect pointers from a keyring
+ * - return true if we altered the keyring
+ */
+static bool key_gc_keyring(struct key *keyring, time_t limit)
+ __releases(key_serial_lock)
+{
+ struct keyring_list *klist;
+ struct key *key;
+ int loop;
+
+ kenter("%x", key_serial(keyring));
+
+ if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
+ goto dont_gc;
+
+ /* scan the keyring looking for dead keys */
+ klist = rcu_dereference(keyring->payload.subscriptions);
+ if (!klist)
+ goto dont_gc;
+
+ for (loop = klist->nkeys - 1; loop >= 0; loop--) {
+ key = klist->keys[loop];
+ if (test_bit(KEY_FLAG_DEAD, &key->flags) ||
+ (key->expiry > 0 && key->expiry <= limit))
+ goto do_gc;
+ }
+
+dont_gc:
+ kleave(" = false");
+ return false;
+
+do_gc:
+ key_gc_cursor = keyring->serial;
+ key_get(keyring);
+ spin_unlock(&key_serial_lock);
+ keyring_gc(keyring, limit);
+ key_put(keyring);
+ kleave(" = true");
+ return true;
+}
+
+/*
+ * Garbage collector for keys
+ * - this involves scanning the keyrings for dead, expired and revoked keys
+ * that have overstayed their welcome
+ */
+static void key_garbage_collector(struct work_struct *work)
+{
+ struct rb_node *rb;
+ key_serial_t cursor;
+ struct key *key, *xkey;
+ time_t new_timer = LONG_MAX, limit;
+
+ kenter("");
+
+ if (test_and_set_bit(0, &key_gc_executing)) {
+ key_schedule_gc(current_kernel_time().tv_sec);
+ return;
+ }
+
+ limit = current_kernel_time().tv_sec;
+ if (limit > key_gc_delay)
+ limit -= key_gc_delay;
+ else
+ limit = key_gc_delay;
+
+ spin_lock(&key_serial_lock);
+
+ if (RB_EMPTY_ROOT(&key_serial_tree))
+ goto reached_the_end;
+
+ cursor = key_gc_cursor;
+ if (cursor < 0)
+ cursor = 0;
+
+ /* find the first key above the cursor */
+ key = NULL;
+ rb = key_serial_tree.rb_node;
+ while (rb) {
+ xkey = rb_entry(rb, struct key, serial_node);
+ if (cursor < xkey->serial) {
+ key = xkey;
+ rb = rb->rb_left;
+ } else if (cursor > xkey->serial) {
+ rb = rb->rb_right;
+ } else {
+ rb = rb_next(rb);
+ if (!rb)
+ goto reached_the_end;
+ key = rb_entry(rb, struct key, serial_node);
+ break;
+ }
+ }
+
+ if (!key)
+ goto reached_the_end;
+
+ /* trawl through the keys looking for keyrings */
+ for (;;) {
+ if (key->expiry > 0 && key->expiry < new_timer)
+ new_timer = key->expiry;
+
+ if (key->type == &key_type_keyring &&
+ key_gc_keyring(key, limit)) {
+ /* the gc ate our lock */
+ schedule_work(&key_gc_work);
+ goto no_unlock;
+ }
+
+ rb = rb_next(&key->serial_node);
+ if (!rb) {
+ key_gc_cursor = 0;
+ break;
+ }
+ key = rb_entry(rb, struct key, serial_node);
+ }
+
+out:
+ spin_unlock(&key_serial_lock);
+no_unlock:
+ clear_bit(0, &key_gc_executing);
+ if (new_timer < LONG_MAX)
+ key_schedule_gc(new_timer);
+
+ kleave("");
+ return;
+
+reached_the_end:
+ key_gc_cursor = 0;
+ goto out;
+}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 9fb679c66b8a..24ba0307b7ad 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -124,11 +124,18 @@ extern struct key *request_key_and_link(struct key_type *type,
struct key *dest_keyring,
unsigned long flags);
-extern key_ref_t lookup_user_key(key_serial_t id, int create, int partial,
+extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
key_perm_t perm);
+#define KEY_LOOKUP_CREATE 0x01
+#define KEY_LOOKUP_PARTIAL 0x02
+#define KEY_LOOKUP_FOR_UNLINK 0x04
extern long join_session_keyring(const char *name);
+extern unsigned key_gc_delay;
+extern void keyring_gc(struct key *keyring, time_t limit);
+extern void key_schedule_gc(time_t expiry_at);
+
/*
* check to see whether permission is granted to use a key in the desired way
*/
@@ -194,6 +201,7 @@ extern long keyctl_set_timeout(key_serial_t, unsigned);
extern long keyctl_assume_authority(key_serial_t);
extern long keyctl_get_security(key_serial_t keyid, char __user *buffer,
size_t buflen);
+extern long keyctl_session_to_parent(void);
/*
* debugging key validation
diff --git a/security/keys/key.c b/security/keys/key.c
index 4a1297d1ada4..08531ad0f252 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -500,6 +500,7 @@ int key_negate_and_link(struct key *key,
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
+ key_schedule_gc(key->expiry);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
@@ -642,10 +643,8 @@ struct key *key_lookup(key_serial_t id)
goto error;
found:
- /* pretend it doesn't exist if it's dead */
- if (atomic_read(&key->usage) == 0 ||
- test_bit(KEY_FLAG_DEAD, &key->flags) ||
- key->type == &key_type_dead)
+ /* pretend it doesn't exist if it is awaiting deletion */
+ if (atomic_read(&key->usage) == 0)
goto not_found;
/* this races with key_put(), but that doesn't matter since key_put()
@@ -890,6 +889,9 @@ EXPORT_SYMBOL(key_update);
*/
void key_revoke(struct key *key)
{
+ struct timespec now;
+ time_t time;
+
key_check(key);
/* make sure no one's trying to change or use the key when we mark it
@@ -902,6 +904,14 @@ void key_revoke(struct key *key)
key->type->revoke)
key->type->revoke(key);
+ /* set the death time to no more than the expiry time */
+ now = current_kernel_time();
+ time = now.tv_sec;
+ if (key->revoked_at == 0 || key->revoked_at > time) {
+ key->revoked_at = time;
+ key_schedule_gc(key->revoked_at);
+ }
+
up_write(&key->sem);
} /* end key_revoke() */
@@ -958,8 +968,10 @@ void unregister_key_type(struct key_type *ktype)
for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
key = rb_entry(_n, struct key, serial_node);
- if (key->type == ktype)
+ if (key->type == ktype) {
key->type = &key_type_dead;
+ set_bit(KEY_FLAG_DEAD, &key->flags);
+ }
}
spin_unlock(&key_serial_lock);
@@ -984,6 +996,8 @@ void unregister_key_type(struct key_type *ktype)
spin_unlock(&key_serial_lock);
up_write(&key_types_sem);
+ key_schedule_gc(0);
+
} /* end unregister_key_type() */
EXPORT_SYMBOL(unregister_key_type);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 7f09fb897d2b..74c968524592 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -103,7 +103,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
}
/* find the target keyring (which must be writable) */
- keyring_ref = lookup_user_key(ringid, 1, 0, KEY_WRITE);
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error3;
@@ -185,7 +185,8 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
/* get the destination keyring if specified */
dest_ref = NULL;
if (destringid) {
- dest_ref = lookup_user_key(destringid, 1, 0, KEY_WRITE);
+ dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
+ KEY_WRITE);
if (IS_ERR(dest_ref)) {
ret = PTR_ERR(dest_ref);
goto error3;
@@ -233,9 +234,11 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
long keyctl_get_keyring_ID(key_serial_t id, int create)
{
key_ref_t key_ref;
+ unsigned long lflags;
long ret;
- key_ref = lookup_user_key(id, create, 0, KEY_SEARCH);
+ lflags = create ? KEY_LOOKUP_CREATE : 0;
+ key_ref = lookup_user_key(id, lflags, KEY_SEARCH);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
@@ -309,7 +312,7 @@ long keyctl_update_key(key_serial_t id,
}
/* find the target key (which must be writable) */
- key_ref = lookup_user_key(id, 0, 0, KEY_WRITE);
+ key_ref = lookup_user_key(id, 0, KEY_WRITE);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
@@ -337,10 +340,16 @@ long keyctl_revoke_key(key_serial_t id)
key_ref_t key_ref;
long ret;
- key_ref = lookup_user_key(id, 0, 0, KEY_WRITE);
+ key_ref = lookup_user_key(id, 0, KEY_WRITE);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
- goto error;
+ if (ret != -EACCES)
+ goto error;
+ key_ref = lookup_user_key(id, 0, KEY_SETATTR);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
}
key_revoke(key_ref_to_ptr(key_ref));
@@ -363,7 +372,7 @@ long keyctl_keyring_clear(key_serial_t ringid)
key_ref_t keyring_ref;
long ret;
- keyring_ref = lookup_user_key(ringid, 1, 0, KEY_WRITE);
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error;
@@ -389,13 +398,13 @@ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
key_ref_t keyring_ref, key_ref;
long ret;
- keyring_ref = lookup_user_key(ringid, 1, 0, KEY_WRITE);
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error;
}
- key_ref = lookup_user_key(id, 1, 0, KEY_LINK);
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_LINK);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
@@ -423,13 +432,13 @@ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
key_ref_t keyring_ref, key_ref;
long ret;
- keyring_ref = lookup_user_key(ringid, 0, 0, KEY_WRITE);
+ keyring_ref = lookup_user_key(ringid, 0, KEY_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error;
}
- key_ref = lookup_user_key(id, 0, 0, 0);
+ key_ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
@@ -465,7 +474,7 @@ long keyctl_describe_key(key_serial_t keyid,
char *tmpbuf;
long ret;
- key_ref = lookup_user_key(keyid, 0, 1, KEY_VIEW);
+ key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_VIEW);
if (IS_ERR(key_ref)) {
/* viewing a key under construction is permitted if we have the
* authorisation token handy */
@@ -474,7 +483,8 @@ long keyctl_describe_key(key_serial_t keyid,
if (!IS_ERR(instkey)) {
key_put(instkey);
key_ref = lookup_user_key(keyid,
- 0, 1, 0);
+ KEY_LOOKUP_PARTIAL,
+ 0);
if (!IS_ERR(key_ref))
goto okay;
}
@@ -558,7 +568,7 @@ long keyctl_keyring_search(key_serial_t ringid,
}
/* get the keyring at which to begin the search */
- keyring_ref = lookup_user_key(ringid, 0, 0, KEY_SEARCH);
+ keyring_ref = lookup_user_key(ringid, 0, KEY_SEARCH);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error2;
@@ -567,7 +577,8 @@ long keyctl_keyring_search(key_serial_t ringid,
/* get the destination keyring if specified */
dest_ref = NULL;
if (destringid) {
- dest_ref = lookup_user_key(destringid, 1, 0, KEY_WRITE);
+ dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
+ KEY_WRITE);
if (IS_ERR(dest_ref)) {
ret = PTR_ERR(dest_ref);
goto error3;
@@ -637,7 +648,7 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
long ret;
/* find the key first */
- key_ref = lookup_user_key(keyid, 0, 0, 0);
+ key_ref = lookup_user_key(keyid, 0, 0);
if (IS_ERR(key_ref)) {
ret = -ENOKEY;
goto error;
@@ -700,7 +711,8 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
if (uid == (uid_t) -1 && gid == (gid_t) -1)
goto error;
- key_ref = lookup_user_key(id, 1, 1, KEY_SETATTR);
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
+ KEY_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
@@ -805,7 +817,8 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL))
goto error;
- key_ref = lookup_user_key(id, 1, 1, KEY_SETATTR);
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
+ KEY_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
@@ -847,7 +860,7 @@ static long get_instantiation_keyring(key_serial_t ringid,
/* if a specific keyring is nominated by ID, then use that */
if (ringid > 0) {
- dkref = lookup_user_key(ringid, 1, 0, KEY_WRITE);
+ dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
if (IS_ERR(dkref))
return PTR_ERR(dkref);
*_dest_keyring = key_ref_to_ptr(dkref);
@@ -1083,7 +1096,8 @@ long keyctl_set_timeout(key_serial_t id, unsigned timeout)
time_t expiry;
long ret;
- key_ref = lookup_user_key(id, 1, 1, KEY_SETATTR);
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
+ KEY_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
@@ -1101,6 +1115,7 @@ long keyctl_set_timeout(key_serial_t id, unsigned timeout)
}
key->expiry = expiry;
+ key_schedule_gc(key->expiry);
up_write(&key->sem);
key_put(key);
@@ -1170,7 +1185,7 @@ long keyctl_get_security(key_serial_t keyid,
char *context;
long ret;
- key_ref = lookup_user_key(keyid, 0, 1, KEY_VIEW);
+ key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_VIEW);
if (IS_ERR(key_ref)) {
if (PTR_ERR(key_ref) != -EACCES)
return PTR_ERR(key_ref);
@@ -1182,7 +1197,7 @@ long keyctl_get_security(key_serial_t keyid,
return PTR_ERR(key_ref);
key_put(instkey);
- key_ref = lookup_user_key(keyid, 0, 1, 0);
+ key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0);
if (IS_ERR(key_ref))
return PTR_ERR(key_ref);
}
@@ -1213,6 +1228,105 @@ long keyctl_get_security(key_serial_t keyid,
return ret;
}
+/*
+ * attempt to install the calling process's session keyring on the process's
+ * parent process
+ * - the keyring must exist and must grant us LINK permission
+ * - implements keyctl(KEYCTL_SESSION_TO_PARENT)
+ */
+long keyctl_session_to_parent(void)
+{
+ struct task_struct *me, *parent;
+ const struct cred *mycred, *pcred;
+ struct cred *cred, *oldcred;
+ key_ref_t keyring_r;
+ int ret;
+
+ keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_LINK);
+ if (IS_ERR(keyring_r))
+ return PTR_ERR(keyring_r);
+
+ /* our parent is going to need a new cred struct, a new tgcred struct
+ * and new security data, so we allocate them here to prevent ENOMEM in
+ * our parent */
+ ret = -ENOMEM;
+ cred = cred_alloc_blank();
+ if (!cred)
+ goto error_keyring;
+
+ cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r);
+ keyring_r = NULL;
+
+ me = current;
+ write_lock_irq(&tasklist_lock);
+
+ parent = me->real_parent;
+ ret = -EPERM;
+
+ /* the parent mustn't be init and mustn't be a kernel thread */
+ if (parent->pid <= 1 || !parent->mm)
+ goto not_permitted;
+
+ /* the parent must be single threaded */
+ if (atomic_read(&parent->signal->count) != 1)
+ goto not_permitted;
+
+ /* the parent and the child must have different session keyrings or
+ * there's no point */
+ mycred = current_cred();
+ pcred = __task_cred(parent);
+ if (mycred == pcred ||
+ mycred->tgcred->session_keyring == pcred->tgcred->session_keyring)
+ goto already_same;
+
+ /* the parent must have the same effective ownership and mustn't be
+ * SUID/SGID */
+ if (pcred-> uid != mycred->euid ||
+ pcred->euid != mycred->euid ||
+ pcred->suid != mycred->euid ||
+ pcred-> gid != mycred->egid ||
+ pcred->egid != mycred->egid ||
+ pcred->sgid != mycred->egid)
+ goto not_permitted;
+
+ /* the keyrings must have the same UID */
+ if (pcred ->tgcred->session_keyring->uid != mycred->euid ||
+ mycred->tgcred->session_keyring->uid != mycred->euid)
+ goto not_permitted;
+
+ /* the LSM must permit the replacement of the parent's keyring with the
+ * keyring from this process */
+ ret = security_key_session_to_parent(mycred, pcred,
+ key_ref_to_ptr(keyring_r));
+ if (ret < 0)
+ goto not_permitted;
+
+ /* if there's an already pending keyring replacement, then we replace
+ * that */
+ oldcred = parent->replacement_session_keyring;
+
+ /* the replacement session keyring is applied just prior to userspace
+ * restarting */
+ parent->replacement_session_keyring = cred;
+ cred = NULL;
+ set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
+
+ write_unlock_irq(&tasklist_lock);
+ if (oldcred)
+ put_cred(oldcred);
+ return 0;
+
+already_same:
+ ret = 0;
+not_permitted:
+ put_cred(cred);
+ return ret;
+
+error_keyring:
+ key_ref_put(keyring_r);
+ return ret;
+}
+
/*****************************************************************************/
/*
* the key control system call
@@ -1298,6 +1412,9 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
(char __user *) arg3,
(size_t) arg4);
+ case KEYCTL_SESSION_TO_PARENT:
+ return keyctl_session_to_parent();
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 3dba81c2eba3..ac977f661a79 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1000,3 +1000,88 @@ static void keyring_revoke(struct key *keyring)
}
} /* end keyring_revoke() */
+
+/*
+ * Determine whether a key is dead
+ */
+static bool key_is_dead(struct key *key, time_t limit)
+{
+ return test_bit(KEY_FLAG_DEAD, &key->flags) ||
+ (key->expiry > 0 && key->expiry <= limit);
+}
+
+/*
+ * Collect garbage from the contents of a keyring
+ */
+void keyring_gc(struct key *keyring, time_t limit)
+{
+ struct keyring_list *klist, *new;
+ struct key *key;
+ int loop, keep, max;
+
+ kenter("%x", key_serial(keyring));
+
+ down_write(&keyring->sem);
+
+ klist = keyring->payload.subscriptions;
+ if (!klist)
+ goto just_return;
+
+ /* work out how many subscriptions we're keeping */
+ keep = 0;
+ for (loop = klist->nkeys - 1; loop >= 0; loop--)
+ if (!key_is_dead(klist->keys[loop], limit));
+ keep++;
+
+ if (keep == klist->nkeys)
+ goto just_return;
+
+ /* allocate a new keyring payload */
+ max = roundup(keep, 4);
+ new = kmalloc(sizeof(struct keyring_list) + max * sizeof(struct key *),
+ GFP_KERNEL);
+ if (!new)
+ goto just_return;
+ new->maxkeys = max;
+ new->nkeys = 0;
+ new->delkey = 0;
+
+ /* install the live keys
+ * - must take care as expired keys may be updated back to life
+ */
+ keep = 0;
+ for (loop = klist->nkeys - 1; loop >= 0; loop--) {
+ key = klist->keys[loop];
+ if (!key_is_dead(key, limit)) {
+ if (keep >= max)
+ goto discard_new;
+ new->keys[keep++] = key_get(key);
+ }
+ }
+ new->nkeys = keep;
+
+ /* adjust the quota */
+ key_payload_reserve(keyring,
+ sizeof(struct keyring_list) +
+ KEYQUOTA_LINK_BYTES * keep);
+
+ if (keep == 0) {
+ rcu_assign_pointer(keyring->payload.subscriptions, NULL);
+ kfree(new);
+ } else {
+ rcu_assign_pointer(keyring->payload.subscriptions, new);
+ }
+
+ up_write(&keyring->sem);
+
+ call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
+ kleave(" [yes]");
+ return;
+
+discard_new:
+ new->nkeys = keep;
+ keyring_clear_rcu_disposal(&new->rcu);
+just_return:
+ up_write(&keyring->sem);
+ kleave(" [no]");
+}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 769f9bdfd2b3..9d01021ca0c8 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -91,59 +91,94 @@ __initcall(key_proc_init);
*/
#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
-static struct rb_node *__key_serial_next(struct rb_node *n)
+static struct rb_node *key_serial_next(struct rb_node *n)
{
+ struct user_namespace *user_ns = current_user_ns();
+
+ n = rb_next(n);
while (n) {
struct key *key = rb_entry(n, struct key, serial_node);
- if (key->user->user_ns == current_user_ns())
+ if (key->user->user_ns == user_ns)
break;
n = rb_next(n);
}
return n;
}
-static struct rb_node *key_serial_next(struct rb_node *n)
+static int proc_keys_open(struct inode *inode, struct file *file)
{
- return __key_serial_next(rb_next(n));
+ return seq_open(file, &proc_keys_ops);
}
-static struct rb_node *key_serial_first(struct rb_root *r)
+static struct key *find_ge_key(key_serial_t id)
{
- struct rb_node *n = rb_first(r);
- return __key_serial_next(n);
-}
+ struct user_namespace *user_ns = current_user_ns();
+ struct rb_node *n = key_serial_tree.rb_node;
+ struct key *minkey = NULL;
-static int proc_keys_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &proc_keys_ops);
+ while (n) {
+ struct key *key = rb_entry(n, struct key, serial_node);
+ if (id < key->serial) {
+ if (!minkey || minkey->serial > key->serial)
+ minkey = key;
+ n = n->rb_left;
+ } else if (id > key->serial) {
+ n = n->rb_right;
+ } else {
+ minkey = key;
+ break;
+ }
+ key = NULL;
+ }
+ if (!minkey)
+ return NULL;
+
+ for (;;) {
+ if (minkey->user->user_ns == user_ns)
+ return minkey;
+ n = rb_next(&minkey->serial_node);
+ if (!n)
+ return NULL;
+ minkey = rb_entry(n, struct key, serial_node);
+ }
}
static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
+ __acquires(key_serial_lock)
{
- struct rb_node *_p;
- loff_t pos = *_pos;
+ key_serial_t pos = *_pos;
+ struct key *key;
spin_lock(&key_serial_lock);
- _p = key_serial_first(&key_serial_tree);
- while (pos > 0 && _p) {
- pos--;
- _p = key_serial_next(_p);
- }
-
- return _p;
+ if (*_pos > INT_MAX)
+ return NULL;
+ key = find_ge_key(pos);
+ if (!key)
+ return NULL;
+ *_pos = key->serial;
+ return &key->serial_node;
+}
+static inline key_serial_t key_node_serial(struct rb_node *n)
+{
+ struct key *key = rb_entry(n, struct key, serial_node);
+ return key->serial;
}
static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
{
- (*_pos)++;
- return key_serial_next((struct rb_node *) v);
+ struct rb_node *n;
+ n = key_serial_next(v);
+ if (n)
+ *_pos = key_node_serial(n);
+ return n;
}
static void proc_keys_stop(struct seq_file *p, void *v)
+ __releases(key_serial_lock)
{
spin_unlock(&key_serial_lock);
}
@@ -174,11 +209,9 @@ static int proc_keys_show(struct seq_file *m, void *v)
/* come up with a suitable timeout value */
if (key->expiry == 0) {
memcpy(xbuf, "perm", 5);
- }
- else if (now.tv_sec >= key->expiry) {
+ } else if (now.tv_sec >= key->expiry) {
memcpy(xbuf, "expd", 5);
- }
- else {
+ } else {
timo = key->expiry - now.tv_sec;
if (timo < 60)
@@ -218,9 +251,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
seq_putc(m, '\n');
rcu_read_unlock();
-
return 0;
-
}
#endif /* CONFIG_KEYS_DEBUG_PROC_KEYS */
@@ -246,6 +277,7 @@ static struct rb_node *key_user_first(struct rb_root *r)
struct rb_node *n = rb_first(r);
return __key_user_next(n);
}
+
/*****************************************************************************/
/*
* implement "/proc/key-users" to provides a list of the key users
@@ -253,10 +285,10 @@ static struct rb_node *key_user_first(struct rb_root *r)
static int proc_key_users_open(struct inode *inode, struct file *file)
{
return seq_open(file, &proc_key_users_ops);
-
}
static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
+ __acquires(key_user_lock)
{
struct rb_node *_p;
loff_t pos = *_pos;
@@ -270,17 +302,16 @@ static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
}
return _p;
-
}
static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
{
(*_pos)++;
return key_user_next((struct rb_node *) v);
-
}
static void proc_key_users_stop(struct seq_file *p, void *v)
+ __releases(key_user_lock)
{
spin_unlock(&key_user_lock);
}
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 276d27882ce8..5c23afb31ece 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -17,6 +17,7 @@
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/mutex.h>
+#include <linux/security.h>
#include <linux/user_namespace.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -487,7 +488,7 @@ static int lookup_user_key_possessed(const struct key *key, const void *target)
* - don't create special keyrings unless so requested
* - partially constructed keys aren't found unless requested
*/
-key_ref_t lookup_user_key(key_serial_t id, int create, int partial,
+key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
key_perm_t perm)
{
struct request_key_auth *rka;
@@ -503,7 +504,7 @@ try_again:
switch (id) {
case KEY_SPEC_THREAD_KEYRING:
if (!cred->thread_keyring) {
- if (!create)
+ if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
ret = install_thread_keyring();
@@ -521,7 +522,7 @@ try_again:
case KEY_SPEC_PROCESS_KEYRING:
if (!cred->tgcred->process_keyring) {
- if (!create)
+ if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
ret = install_process_keyring();
@@ -642,7 +643,14 @@ try_again:
break;
}
- if (!partial) {
+ /* unlink does not use the nominated key in any way, so can skip all
+ * the permission checks as it is only concerned with the keyring */
+ if (lflags & KEY_LOOKUP_FOR_UNLINK) {
+ ret = 0;
+ goto error;
+ }
+
+ if (!(lflags & KEY_LOOKUP_PARTIAL)) {
ret = wait_for_key_construction(key, true);
switch (ret) {
case -ERESTARTSYS:
@@ -660,7 +668,8 @@ try_again:
}
ret = -EIO;
- if (!partial && !test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ if (!(lflags & KEY_LOOKUP_PARTIAL) &&
+ !test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
goto invalid_key;
/* check the permissions */
@@ -702,7 +711,7 @@ long join_session_keyring(const char *name)
/* only permit this if there's a single thread in the thread group -
* this avoids us having to adjust the creds on all threads and risking
* ENOMEM */
- if (!is_single_threaded(current))
+ if (!current_is_single_threaded())
return -EMLINK;
new = prepare_creds();
@@ -760,3 +769,51 @@ error:
abort_creds(new);
return ret;
}
+
+/*
+ * Replace a process's session keyring when that process resumes userspace on
+ * behalf of one of its children
+ */
+void key_replace_session_keyring(void)
+{
+ const struct cred *old;
+ struct cred *new;
+
+ if (!current->replacement_session_keyring)
+ return;
+
+ write_lock_irq(&tasklist_lock);
+ new = current->replacement_session_keyring;
+ current->replacement_session_keyring = NULL;
+ write_unlock_irq(&tasklist_lock);
+
+ if (!new)
+ return;
+
+ old = current_cred();
+ new-> uid = old-> uid;
+ new-> euid = old-> euid;
+ new-> suid = old-> suid;
+ new->fsuid = old->fsuid;
+ new-> gid = old-> gid;
+ new-> egid = old-> egid;
+ new-> sgid = old-> sgid;
+ new->fsgid = old->fsgid;
+ new->user = get_uid(old->user);
+ new->group_info = get_group_info(old->group_info);
+
+ new->securebits = old->securebits;
+ new->cap_inheritable = old->cap_inheritable;
+ new->cap_permitted = old->cap_permitted;
+ new->cap_effective = old->cap_effective;
+ new->cap_bset = old->cap_bset;
+
+ new->jit_keyring = old->jit_keyring;
+ new->thread_keyring = key_get(old->thread_keyring);
+ new->tgcred->tgid = old->tgcred->tgid;
+ new->tgcred->process_keyring = key_get(old->tgcred->process_keyring);
+
+ security_transfer_creds(new, old);
+
+ commit_creds(new);
+}
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
index b611d493c2d8..5e05dc09e2db 100644
--- a/security/keys/sysctl.c
+++ b/security/keys/sysctl.c
@@ -13,6 +13,8 @@
#include <linux/sysctl.h>
#include "internal.h"
+static const int zero, one = 1, max = INT_MAX;
+
ctl_table key_sysctls[] = {
{
.ctl_name = CTL_UNNUMBERED,
@@ -20,7 +22,9 @@ ctl_table key_sysctls[] = {
.data = &key_quota_maxkeys,
.maxlen = sizeof(unsigned),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = (void *) &one,
+ .extra2 = (void *) &max,
},
{
.ctl_name = CTL_UNNUMBERED,
@@ -28,7 +32,9 @@ ctl_table key_sysctls[] = {
.data = &key_quota_maxbytes,
.maxlen = sizeof(unsigned),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = (void *) &one,
+ .extra2 = (void *) &max,
},
{
.ctl_name = CTL_UNNUMBERED,
@@ -36,7 +42,9 @@ ctl_table key_sysctls[] = {
.data = &key_quota_root_maxkeys,
.maxlen = sizeof(unsigned),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = (void *) &one,
+ .extra2 = (void *) &max,
},
{
.ctl_name = CTL_UNNUMBERED,
@@ -44,7 +52,19 @@ ctl_table key_sysctls[] = {
.data = &key_quota_root_maxbytes,
.maxlen = sizeof(unsigned),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = (void *) &one,
+ .extra2 = (void *) &max,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "gc_delay",
+ .data = &key_gc_delay,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = (void *) &zero,
+ .extra2 = (void *) &max,
},
{ .ctl_name = 0 }
};
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 94b868494b31..500aad0ebd6a 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -220,6 +220,8 @@ static void dump_common_audit_data(struct audit_buffer *ab,
}
switch (a->type) {
+ case LSM_AUDIT_NO_AUDIT:
+ return;
case LSM_AUDIT_DATA_IPC:
audit_log_format(ab, " key=%d ", a->u.ipc_id);
break;
diff --git a/security/security.c b/security/security.c
index dc7674fbfc7a..c4c673240c1c 100644
--- a/security/security.c
+++ b/security/security.c
@@ -124,9 +124,9 @@ int register_security(struct security_operations *ops)
/* Security operations */
-int security_ptrace_may_access(struct task_struct *child, unsigned int mode)
+int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
- return security_ops->ptrace_may_access(child, mode);
+ return security_ops->ptrace_access_check(child, mode);
}
int security_ptrace_traceme(struct task_struct *parent)
@@ -684,6 +684,11 @@ int security_task_create(unsigned long clone_flags)
return security_ops->task_create(clone_flags);
}
+int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+ return security_ops->cred_alloc_blank(cred, gfp);
+}
+
void security_cred_free(struct cred *cred)
{
security_ops->cred_free(cred);
@@ -699,6 +704,11 @@ void security_commit_creds(struct cred *new, const struct cred *old)
security_ops->cred_commit(new, old);
}
+void security_transfer_creds(struct cred *new, const struct cred *old)
+{
+ security_ops->cred_transfer(new, old);
+}
+
int security_kernel_act_as(struct cred *new, u32 secid)
{
return security_ops->kernel_act_as(new, secid);
@@ -709,6 +719,11 @@ int security_kernel_create_files_as(struct cred *new, struct inode *inode)
return security_ops->kernel_create_files_as(new, inode);
}
+int security_kernel_module_request(void)
+{
+ return security_ops->kernel_module_request();
+}
+
int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags)
{
return security_ops->task_setuid(id0, id1, id2, flags);
@@ -959,6 +974,24 @@ void security_release_secctx(char *secdata, u32 seclen)
}
EXPORT_SYMBOL(security_release_secctx);
+int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
+{
+ return security_ops->inode_notifysecctx(inode, ctx, ctxlen);
+}
+EXPORT_SYMBOL(security_inode_notifysecctx);
+
+int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+{
+ return security_ops->inode_setsecctx(dentry, ctx, ctxlen);
+}
+EXPORT_SYMBOL(security_inode_setsecctx);
+
+int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+{
+ return security_ops->inode_getsecctx(inode, ctx, ctxlen);
+}
+EXPORT_SYMBOL(security_inode_getsecctx);
+
#ifdef CONFIG_SECURITY_NETWORK
int security_unix_stream_connect(struct socket *sock, struct socket *other,
@@ -1112,6 +1145,24 @@ void security_inet_conn_established(struct sock *sk,
security_ops->inet_conn_established(sk, skb);
}
+int security_tun_dev_create(void)
+{
+ return security_ops->tun_dev_create();
+}
+EXPORT_SYMBOL(security_tun_dev_create);
+
+void security_tun_dev_post_create(struct sock *sk)
+{
+ return security_ops->tun_dev_post_create(sk);
+}
+EXPORT_SYMBOL(security_tun_dev_post_create);
+
+int security_tun_dev_attach(struct sock *sk)
+{
+ return security_ops->tun_dev_attach(sk);
+}
+EXPORT_SYMBOL(security_tun_dev_attach);
+
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1218,6 +1269,13 @@ int security_key_getsecurity(struct key *key, char **_buffer)
return security_ops->key_getsecurity(key, _buffer);
}
+int security_key_session_to_parent(const struct cred *cred,
+ const struct cred *parent_cred,
+ struct key *key)
+{
+ return security_ops->key_session_to_parent(cred, parent_cred, key);
+}
+
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index b2ab60859832..e3d19014259b 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -137,7 +137,7 @@ static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
* @tclass: target security class
* @av: access vector
*/
-void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
+static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
{
const char **common_pts = NULL;
u32 common_base = 0;
@@ -492,23 +492,35 @@ out:
return node;
}
-static inline void avc_print_ipv6_addr(struct audit_buffer *ab,
- struct in6_addr *addr, __be16 port,
- char *name1, char *name2)
+/**
+ * avc_audit_pre_callback - SELinux specific information
+ * will be called by generic audit code
+ * @ab: the audit buffer
+ * @a: audit_data
+ */
+static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
{
- if (!ipv6_addr_any(addr))
- audit_log_format(ab, " %s=%pI6", name1, addr);
- if (port)
- audit_log_format(ab, " %s=%d", name2, ntohs(port));
+ struct common_audit_data *ad = a;
+ audit_log_format(ab, "avc: %s ",
+ ad->selinux_audit_data.denied ? "denied" : "granted");
+ avc_dump_av(ab, ad->selinux_audit_data.tclass,
+ ad->selinux_audit_data.audited);
+ audit_log_format(ab, " for ");
}
-static inline void avc_print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
- __be16 port, char *name1, char *name2)
+/**
+ * avc_audit_post_callback - SELinux specific information
+ * will be called by generic audit code
+ * @ab: the audit buffer
+ * @a: audit_data
+ */
+static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
{
- if (addr)
- audit_log_format(ab, " %s=%pI4", name1, &addr);
- if (port)
- audit_log_format(ab, " %s=%d", name2, ntohs(port));
+ struct common_audit_data *ad = a;
+ audit_log_format(ab, " ");
+ avc_dump_query(ab, ad->selinux_audit_data.ssid,
+ ad->selinux_audit_data.tsid,
+ ad->selinux_audit_data.tclass);
}
/**
@@ -532,13 +544,10 @@ static inline void avc_print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
*/
void avc_audit(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
- struct av_decision *avd, int result, struct avc_audit_data *a)
+ struct av_decision *avd, int result, struct common_audit_data *a)
{
- struct task_struct *tsk = current;
- struct inode *inode = NULL;
+ struct common_audit_data stack_data;
u32 denied, audited;
- struct audit_buffer *ab;
-
denied = requested & ~avd->allowed;
if (denied) {
audited = denied;
@@ -551,144 +560,20 @@ void avc_audit(u32 ssid, u32 tsid,
if (!(audited & avd->auditallow))
return;
}
-
- ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_AVC);
- if (!ab)
- return; /* audit_panic has been called */
- audit_log_format(ab, "avc: %s ", denied ? "denied" : "granted");
- avc_dump_av(ab, tclass, audited);
- audit_log_format(ab, " for ");
- if (a && a->tsk)
- tsk = a->tsk;
- if (tsk && tsk->pid) {
- audit_log_format(ab, " pid=%d comm=", tsk->pid);
- audit_log_untrustedstring(ab, tsk->comm);
+ if (!a) {
+ a = &stack_data;
+ memset(a, 0, sizeof(*a));
+ a->type = LSM_AUDIT_NO_AUDIT;
}
- if (a) {
- switch (a->type) {
- case AVC_AUDIT_DATA_IPC:
- audit_log_format(ab, " key=%d", a->u.ipc_id);
- break;
- case AVC_AUDIT_DATA_CAP:
- audit_log_format(ab, " capability=%d", a->u.cap);
- break;
- case AVC_AUDIT_DATA_FS:
- if (a->u.fs.path.dentry) {
- struct dentry *dentry = a->u.fs.path.dentry;
- if (a->u.fs.path.mnt) {
- audit_log_d_path(ab, "path=",
- &a->u.fs.path);
- } else {
- audit_log_format(ab, " name=");
- audit_log_untrustedstring(ab, dentry->d_name.name);
- }
- inode = dentry->d_inode;
- } else if (a->u.fs.inode) {
- struct dentry *dentry;
- inode = a->u.fs.inode;
- dentry = d_find_alias(inode);
- if (dentry) {
- audit_log_format(ab, " name=");
- audit_log_untrustedstring(ab, dentry->d_name.name);
- dput(dentry);
- }
- }
- if (inode)
- audit_log_format(ab, " dev=%s ino=%lu",
- inode->i_sb->s_id,
- inode->i_ino);
- break;
- case AVC_AUDIT_DATA_NET:
- if (a->u.net.sk) {
- struct sock *sk = a->u.net.sk;
- struct unix_sock *u;
- int len = 0;
- char *p = NULL;
-
- switch (sk->sk_family) {
- case AF_INET: {
- struct inet_sock *inet = inet_sk(sk);
-
- avc_print_ipv4_addr(ab, inet->rcv_saddr,
- inet->sport,
- "laddr", "lport");
- avc_print_ipv4_addr(ab, inet->daddr,
- inet->dport,
- "faddr", "fport");
- break;
- }
- case AF_INET6: {
- struct inet_sock *inet = inet_sk(sk);
- struct ipv6_pinfo *inet6 = inet6_sk(sk);
-
- avc_print_ipv6_addr(ab, &inet6->rcv_saddr,
- inet->sport,
- "laddr", "lport");
- avc_print_ipv6_addr(ab, &inet6->daddr,
- inet->dport,
- "faddr", "fport");
- break;
- }
- case AF_UNIX:
- u = unix_sk(sk);
- if (u->dentry) {
- struct path path = {
- .dentry = u->dentry,
- .mnt = u->mnt
- };
- audit_log_d_path(ab, "path=",
- &path);
- break;
- }
- if (!u->addr)
- break;
- len = u->addr->len-sizeof(short);
- p = &u->addr->name->sun_path[0];
- audit_log_format(ab, " path=");
- if (*p)
- audit_log_untrustedstring(ab, p);
- else
- audit_log_n_hex(ab, p, len);
- break;
- }
- }
-
- switch (a->u.net.family) {
- case AF_INET:
- avc_print_ipv4_addr(ab, a->u.net.v4info.saddr,
- a->u.net.sport,
- "saddr", "src");
- avc_print_ipv4_addr(ab, a->u.net.v4info.daddr,
- a->u.net.dport,
- "daddr", "dest");
- break;
- case AF_INET6:
- avc_print_ipv6_addr(ab, &a->u.net.v6info.saddr,
- a->u.net.sport,
- "saddr", "src");
- avc_print_ipv6_addr(ab, &a->u.net.v6info.daddr,
- a->u.net.dport,
- "daddr", "dest");
- break;
- }
- if (a->u.net.netif > 0) {
- struct net_device *dev;
-
- /* NOTE: we always use init's namespace */
- dev = dev_get_by_index(&init_net,
- a->u.net.netif);
- if (dev) {
- audit_log_format(ab, " netif=%s",
- dev->name);
- dev_put(dev);
- }
- }
- break;
- }
- }
- audit_log_format(ab, " ");
- avc_dump_query(ab, ssid, tsid, tclass);
- audit_log_end(ab);
+ a->selinux_audit_data.tclass = tclass;
+ a->selinux_audit_data.requested = requested;
+ a->selinux_audit_data.ssid = ssid;
+ a->selinux_audit_data.tsid = tsid;
+ a->selinux_audit_data.audited = audited;
+ a->selinux_audit_data.denied = denied;
+ a->lsm_pre_audit = avc_audit_pre_callback;
+ a->lsm_post_audit = avc_audit_post_callback;
+ common_lsm_audit(a);
}
/**
@@ -956,7 +841,7 @@ out:
* another -errno upon other errors.
*/
int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
- u32 requested, struct avc_audit_data *auditdata)
+ u32 requested, struct common_audit_data *auditdata)
{
struct av_decision avd;
int rc;
@@ -970,3 +855,9 @@ u32 avc_policy_seqno(void)
{
return avc_cache.latest_notif;
}
+
+void avc_disable(void)
+{
+ if (avc_node_cachep)
+ kmem_cache_destroy(avc_node_cachep);
+}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 8d8b69c5664e..417f7c994522 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -13,8 +13,8 @@
* Eric Paris <eparis@redhat.com>
* Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
* <dgoeddel@trustedcs.com>
- * Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P.
- * Paul Moore <paul.moore@hp.com>
+ * Copyright (C) 2006, 2007, 2009 Hewlett-Packard Development Company, L.P.
+ * Paul Moore <paul.moore@hp.com>
* Copyright (C) 2007 Hitachi Software Engineering Co., Ltd.
* Yuichi Nakamura <ynakam@hitachisoft.jp>
*
@@ -448,6 +448,10 @@ static int sb_finish_set_opts(struct super_block *sb)
sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
sbsec->flags &= ~SE_SBLABELSUPP;
+ /* Special handling for sysfs. Is genfs but also has setxattr handler*/
+ if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
+ sbsec->flags |= SE_SBLABELSUPP;
+
/* Initialize the root inode. */
rc = inode_doinit_with_dentry(root_inode, root);
@@ -1479,14 +1483,14 @@ static int task_has_capability(struct task_struct *tsk,
const struct cred *cred,
int cap, int audit)
{
- struct avc_audit_data ad;
+ struct common_audit_data ad;
struct av_decision avd;
u16 sclass;
u32 sid = cred_sid(cred);
u32 av = CAP_TO_MASK(cap);
int rc;
- AVC_AUDIT_DATA_INIT(&ad, CAP);
+ COMMON_AUDIT_DATA_INIT(&ad, CAP);
ad.tsk = tsk;
ad.u.cap = cap;
@@ -1525,12 +1529,14 @@ static int task_has_system(struct task_struct *tsk,
static int inode_has_perm(const struct cred *cred,
struct inode *inode,
u32 perms,
- struct avc_audit_data *adp)
+ struct common_audit_data *adp)
{
struct inode_security_struct *isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 sid;
+ validate_creds(cred);
+
if (unlikely(IS_PRIVATE(inode)))
return 0;
@@ -1539,7 +1545,7 @@ static int inode_has_perm(const struct cred *cred,
if (!adp) {
adp = &ad;
- AVC_AUDIT_DATA_INIT(&ad, FS);
+ COMMON_AUDIT_DATA_INIT(&ad, FS);
ad.u.fs.inode = inode;
}
@@ -1555,9 +1561,9 @@ static inline int dentry_has_perm(const struct cred *cred,
u32 av)
{
struct inode *inode = dentry->d_inode;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
- AVC_AUDIT_DATA_INIT(&ad, FS);
+ COMMON_AUDIT_DATA_INIT(&ad, FS);
ad.u.fs.path.mnt = mnt;
ad.u.fs.path.dentry = dentry;
return inode_has_perm(cred, inode, av, &ad);
@@ -1577,11 +1583,11 @@ static int file_has_perm(const struct cred *cred,
{
struct file_security_struct *fsec = file->f_security;
struct inode *inode = file->f_path.dentry->d_inode;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 sid = cred_sid(cred);
int rc;
- AVC_AUDIT_DATA_INIT(&ad, FS);
+ COMMON_AUDIT_DATA_INIT(&ad, FS);
ad.u.fs.path = file->f_path;
if (sid != fsec->sid) {
@@ -1612,7 +1618,7 @@ static int may_create(struct inode *dir,
struct inode_security_struct *dsec;
struct superblock_security_struct *sbsec;
u32 sid, newsid;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
int rc;
dsec = dir->i_security;
@@ -1621,7 +1627,7 @@ static int may_create(struct inode *dir,
sid = tsec->sid;
newsid = tsec->create_sid;
- AVC_AUDIT_DATA_INIT(&ad, FS);
+ COMMON_AUDIT_DATA_INIT(&ad, FS);
ad.u.fs.path.dentry = dentry;
rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR,
@@ -1665,7 +1671,7 @@ static int may_link(struct inode *dir,
{
struct inode_security_struct *dsec, *isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 sid = current_sid();
u32 av;
int rc;
@@ -1673,7 +1679,7 @@ static int may_link(struct inode *dir,
dsec = dir->i_security;
isec = dentry->d_inode->i_security;
- AVC_AUDIT_DATA_INIT(&ad, FS);
+ COMMON_AUDIT_DATA_INIT(&ad, FS);
ad.u.fs.path.dentry = dentry;
av = DIR__SEARCH;
@@ -1708,7 +1714,7 @@ static inline int may_rename(struct inode *old_dir,
struct dentry *new_dentry)
{
struct inode_security_struct *old_dsec, *new_dsec, *old_isec, *new_isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 sid = current_sid();
u32 av;
int old_is_dir, new_is_dir;
@@ -1719,7 +1725,7 @@ static inline int may_rename(struct inode *old_dir,
old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
new_dsec = new_dir->i_security;
- AVC_AUDIT_DATA_INIT(&ad, FS);
+ COMMON_AUDIT_DATA_INIT(&ad, FS);
ad.u.fs.path.dentry = old_dentry;
rc = avc_has_perm(sid, old_dsec->sid, SECCLASS_DIR,
@@ -1761,7 +1767,7 @@ static inline int may_rename(struct inode *old_dir,
static int superblock_has_perm(const struct cred *cred,
struct super_block *sb,
u32 perms,
- struct avc_audit_data *ad)
+ struct common_audit_data *ad)
{
struct superblock_security_struct *sbsec;
u32 sid = cred_sid(cred);
@@ -1855,12 +1861,12 @@ static inline u32 open_file_to_av(struct file *file)
/* Hook functions begin here. */
-static int selinux_ptrace_may_access(struct task_struct *child,
+static int selinux_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
int rc;
- rc = cap_ptrace_may_access(child, mode);
+ rc = cap_ptrace_access_check(child, mode);
if (rc)
return rc;
@@ -2101,7 +2107,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
const struct task_security_struct *old_tsec;
struct task_security_struct *new_tsec;
struct inode_security_struct *isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
struct inode *inode = bprm->file->f_path.dentry->d_inode;
int rc;
@@ -2139,7 +2145,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
return rc;
}
- AVC_AUDIT_DATA_INIT(&ad, FS);
+ COMMON_AUDIT_DATA_INIT(&ad, FS);
ad.u.fs.path = bprm->file->f_path;
if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
@@ -2232,7 +2238,7 @@ extern struct dentry *selinux_null;
static inline void flush_unauthorized_files(const struct cred *cred,
struct files_struct *files)
{
- struct avc_audit_data ad;
+ struct common_audit_data ad;
struct file *file, *devnull = NULL;
struct tty_struct *tty;
struct fdtable *fdt;
@@ -2266,7 +2272,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
/* Revalidate access to inherited open files. */
- AVC_AUDIT_DATA_INIT(&ad, FS);
+ COMMON_AUDIT_DATA_INIT(&ad, FS);
spin_lock(&files->file_lock);
for (;;) {
@@ -2515,7 +2521,7 @@ out:
static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
{
const struct cred *cred = current_cred();
- struct avc_audit_data ad;
+ struct common_audit_data ad;
int rc;
rc = superblock_doinit(sb, data);
@@ -2526,7 +2532,7 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
if (flags & MS_KERNMOUNT)
return 0;
- AVC_AUDIT_DATA_INIT(&ad, FS);
+ COMMON_AUDIT_DATA_INIT(&ad, FS);
ad.u.fs.path.dentry = sb->s_root;
return superblock_has_perm(cred, sb, FILESYSTEM__MOUNT, &ad);
}
@@ -2534,9 +2540,9 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
static int selinux_sb_statfs(struct dentry *dentry)
{
const struct cred *cred = current_cred();
- struct avc_audit_data ad;
+ struct common_audit_data ad;
- AVC_AUDIT_DATA_INIT(&ad, FS);
+ COMMON_AUDIT_DATA_INIT(&ad, FS);
ad.u.fs.path.dentry = dentry->d_sb->s_root;
return superblock_has_perm(cred, dentry->d_sb, FILESYSTEM__GETATTR, &ad);
}
@@ -2711,12 +2717,18 @@ static int selinux_inode_permission(struct inode *inode, int mask)
static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
{
const struct cred *cred = current_cred();
+ unsigned int ia_valid = iattr->ia_valid;
+
+ /* ATTR_FORCE is just used for ATTR_KILL_S[UG]ID. */
+ if (ia_valid & ATTR_FORCE) {
+ ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_MODE |
+ ATTR_FORCE);
+ if (!ia_valid)
+ return 0;
+ }
- if (iattr->ia_valid & ATTR_FORCE)
- return 0;
-
- if (iattr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID |
- ATTR_ATIME_SET | ATTR_MTIME_SET))
+ if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID |
+ ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
return dentry_has_perm(cred, NULL, dentry, FILE__SETATTR);
return dentry_has_perm(cred, NULL, dentry, FILE__WRITE);
@@ -2756,7 +2768,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
struct inode *inode = dentry->d_inode;
struct inode_security_struct *isec = inode->i_security;
struct superblock_security_struct *sbsec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 newsid, sid = current_sid();
int rc = 0;
@@ -2770,7 +2782,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
if (!is_owner_or_cap(inode))
return -EPERM;
- AVC_AUDIT_DATA_INIT(&ad, FS);
+ COMMON_AUDIT_DATA_INIT(&ad, FS);
ad.u.fs.path.dentry = dentry;
rc = avc_has_perm(sid, isec->sid, isec->sclass,
@@ -2915,6 +2927,7 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
return rc;
isec->sid = newsid;
+ isec->initialized = 1;
return 0;
}
@@ -2939,11 +2952,6 @@ static int selinux_revalidate_file_permission(struct file *file, int mask)
const struct cred *cred = current_cred();
struct inode *inode = file->f_path.dentry->d_inode;
- if (!mask) {
- /* No permission to check. Existence test. */
- return 0;
- }
-
/* file_mask_to_av won't add FILE__WRITE if MAY_APPEND is set */
if ((file->f_flags & O_APPEND) && (mask & MAY_WRITE))
mask |= MAY_APPEND;
@@ -2954,10 +2962,20 @@ static int selinux_revalidate_file_permission(struct file *file, int mask)
static int selinux_file_permission(struct file *file, int mask)
{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct file_security_struct *fsec = file->f_security;
+ struct inode_security_struct *isec = inode->i_security;
+ u32 sid = current_sid();
+
if (!mask)
/* No permission to check. Existence test. */
return 0;
+ if (sid == fsec->sid && fsec->isid == isec->sid &&
+ fsec->pseqno == avc_policy_seqno())
+ /* No change since dentry_open check. */
+ return 0;
+
return selinux_revalidate_file_permission(file, mask);
}
@@ -3220,12 +3238,29 @@ static int selinux_task_create(unsigned long clone_flags)
}
/*
+ * allocate the SELinux part of blank credentials
+ */
+static int selinux_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+ struct task_security_struct *tsec;
+
+ tsec = kzalloc(sizeof(struct task_security_struct), gfp);
+ if (!tsec)
+ return -ENOMEM;
+
+ cred->security = tsec;
+ return 0;
+}
+
+/*
* detach and free the LSM part of a set of credentials
*/
static void selinux_cred_free(struct cred *cred)
{
struct task_security_struct *tsec = cred->security;
- cred->security = NULL;
+
+ BUG_ON((unsigned long) cred->security < PAGE_SIZE);
+ cred->security = (void *) 0x7UL;
kfree(tsec);
}
@@ -3249,6 +3284,17 @@ static int selinux_cred_prepare(struct cred *new, const struct cred *old,
}
/*
+ * transfer the SELinux data to a blank set of creds
+ */
+static void selinux_cred_transfer(struct cred *new, const struct cred *old)
+{
+ const struct task_security_struct *old_tsec = old->security;
+ struct task_security_struct *tsec = new->security;
+
+ *tsec = *old_tsec;
+}
+
+/*
* set the security data for a kernel service
* - all the creation contexts are set to unlabelled
*/
@@ -3292,6 +3338,11 @@ static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
return 0;
}
+static int selinux_kernel_module_request(void)
+{
+ return task_has_system(current, SYSTEM__MODULE_REQUEST);
+}
+
static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
{
return current_has_perm(p, PROCESS__SETPGID);
@@ -3409,7 +3460,7 @@ static void selinux_task_to_inode(struct task_struct *p,
/* Returns error only if unable to parse addresses */
static int selinux_parse_skb_ipv4(struct sk_buff *skb,
- struct avc_audit_data *ad, u8 *proto)
+ struct common_audit_data *ad, u8 *proto)
{
int offset, ihlen, ret = -EINVAL;
struct iphdr _iph, *ih;
@@ -3490,7 +3541,7 @@ out:
/* Returns error only if unable to parse addresses */
static int selinux_parse_skb_ipv6(struct sk_buff *skb,
- struct avc_audit_data *ad, u8 *proto)
+ struct common_audit_data *ad, u8 *proto)
{
u8 nexthdr;
int ret = -EINVAL, offset;
@@ -3561,7 +3612,7 @@ out:
#endif /* IPV6 */
-static int selinux_parse_skb(struct sk_buff *skb, struct avc_audit_data *ad,
+static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad,
char **_addrp, int src, u8 *proto)
{
char *addrp;
@@ -3643,7 +3694,7 @@ static int socket_has_perm(struct task_struct *task, struct socket *sock,
u32 perms)
{
struct inode_security_struct *isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 sid;
int err = 0;
@@ -3653,7 +3704,7 @@ static int socket_has_perm(struct task_struct *task, struct socket *sock,
goto out;
sid = task_sid(task);
- AVC_AUDIT_DATA_INIT(&ad, NET);
+ COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.sk = sock->sk;
err = avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad);
@@ -3740,7 +3791,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
if (family == PF_INET || family == PF_INET6) {
char *addrp;
struct inode_security_struct *isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
struct sockaddr_in *addr4 = NULL;
struct sockaddr_in6 *addr6 = NULL;
unsigned short snum;
@@ -3769,7 +3820,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
snum, &sid);
if (err)
goto out;
- AVC_AUDIT_DATA_INIT(&ad, NET);
+ COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.sport = htons(snum);
ad.u.net.family = family;
err = avc_has_perm(isec->sid, sid,
@@ -3802,7 +3853,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
if (err)
goto out;
- AVC_AUDIT_DATA_INIT(&ad, NET);
+ COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.sport = htons(snum);
ad.u.net.family = family;
@@ -3836,7 +3887,7 @@ static int selinux_socket_connect(struct socket *sock, struct sockaddr *address,
isec = SOCK_INODE(sock)->i_security;
if (isec->sclass == SECCLASS_TCP_SOCKET ||
isec->sclass == SECCLASS_DCCP_SOCKET) {
- struct avc_audit_data ad;
+ struct common_audit_data ad;
struct sockaddr_in *addr4 = NULL;
struct sockaddr_in6 *addr6 = NULL;
unsigned short snum;
@@ -3861,7 +3912,7 @@ static int selinux_socket_connect(struct socket *sock, struct sockaddr *address,
perm = (isec->sclass == SECCLASS_TCP_SOCKET) ?
TCP_SOCKET__NAME_CONNECT : DCCP_SOCKET__NAME_CONNECT;
- AVC_AUDIT_DATA_INIT(&ad, NET);
+ COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.dport = htons(snum);
ad.u.net.family = sk->sk_family;
err = avc_has_perm(isec->sid, sid, isec->sclass, perm, &ad);
@@ -3951,13 +4002,13 @@ static int selinux_socket_unix_stream_connect(struct socket *sock,
struct sk_security_struct *ssec;
struct inode_security_struct *isec;
struct inode_security_struct *other_isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
int err;
isec = SOCK_INODE(sock)->i_security;
other_isec = SOCK_INODE(other)->i_security;
- AVC_AUDIT_DATA_INIT(&ad, NET);
+ COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.sk = other->sk;
err = avc_has_perm(isec->sid, other_isec->sid,
@@ -3983,13 +4034,13 @@ static int selinux_socket_unix_may_send(struct socket *sock,
{
struct inode_security_struct *isec;
struct inode_security_struct *other_isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
int err;
isec = SOCK_INODE(sock)->i_security;
other_isec = SOCK_INODE(other)->i_security;
- AVC_AUDIT_DATA_INIT(&ad, NET);
+ COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.sk = other->sk;
err = avc_has_perm(isec->sid, other_isec->sid,
@@ -4002,7 +4053,7 @@ static int selinux_socket_unix_may_send(struct socket *sock,
static int selinux_inet_sys_rcv_skb(int ifindex, char *addrp, u16 family,
u32 peer_sid,
- struct avc_audit_data *ad)
+ struct common_audit_data *ad)
{
int err;
u32 if_sid;
@@ -4030,10 +4081,10 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
struct sk_security_struct *sksec = sk->sk_security;
u32 peer_sid;
u32 sk_sid = sksec->sid;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
char *addrp;
- AVC_AUDIT_DATA_INIT(&ad, NET);
+ COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.netif = skb->iif;
ad.u.net.family = family;
err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
@@ -4071,7 +4122,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
struct sk_security_struct *sksec = sk->sk_security;
u16 family = sk->sk_family;
u32 sk_sid = sksec->sid;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
char *addrp;
u8 secmark_active;
u8 peerlbl_active;
@@ -4095,7 +4146,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (!secmark_active && !peerlbl_active)
return 0;
- AVC_AUDIT_DATA_INIT(&ad, NET);
+ COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.netif = skb->iif;
ad.u.net.family = family;
err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
@@ -4309,6 +4360,59 @@ static void selinux_req_classify_flow(const struct request_sock *req,
fl->secid = req->secid;
}
+static int selinux_tun_dev_create(void)
+{
+ u32 sid = current_sid();
+
+ /* we aren't taking into account the "sockcreate" SID since the socket
+ * that is being created here is not a socket in the traditional sense,
+ * instead it is a private sock, accessible only to the kernel, and
+ * representing a wide range of network traffic spanning multiple
+ * connections unlike traditional sockets - check the TUN driver to
+ * get a better understanding of why this socket is special */
+
+ return avc_has_perm(sid, sid, SECCLASS_TUN_SOCKET, TUN_SOCKET__CREATE,
+ NULL);
+}
+
+static void selinux_tun_dev_post_create(struct sock *sk)
+{
+ struct sk_security_struct *sksec = sk->sk_security;
+
+ /* we don't currently perform any NetLabel based labeling here and it
+ * isn't clear that we would want to do so anyway; while we could apply
+ * labeling without the support of the TUN user the resulting labeled
+ * traffic from the other end of the connection would almost certainly
+ * cause confusion to the TUN user that had no idea network labeling
+ * protocols were being used */
+
+ /* see the comments in selinux_tun_dev_create() about why we don't use
+ * the sockcreate SID here */
+
+ sksec->sid = current_sid();
+ sksec->sclass = SECCLASS_TUN_SOCKET;
+}
+
+static int selinux_tun_dev_attach(struct sock *sk)
+{
+ struct sk_security_struct *sksec = sk->sk_security;
+ u32 sid = current_sid();
+ int err;
+
+ err = avc_has_perm(sid, sksec->sid, SECCLASS_TUN_SOCKET,
+ TUN_SOCKET__RELABELFROM, NULL);
+ if (err)
+ return err;
+ err = avc_has_perm(sid, sid, SECCLASS_TUN_SOCKET,
+ TUN_SOCKET__RELABELTO, NULL);
+ if (err)
+ return err;
+
+ sksec->sid = sid;
+
+ return 0;
+}
+
static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
{
int err = 0;
@@ -4353,7 +4457,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
int err;
char *addrp;
u32 peer_sid;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u8 secmark_active;
u8 netlbl_active;
u8 peerlbl_active;
@@ -4370,7 +4474,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
if (selinux_skb_peerlbl_sid(skb, family, &peer_sid) != 0)
return NF_DROP;
- AVC_AUDIT_DATA_INIT(&ad, NET);
+ COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.netif = ifindex;
ad.u.net.family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0)
@@ -4458,7 +4562,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
{
struct sock *sk = skb->sk;
struct sk_security_struct *sksec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
char *addrp;
u8 proto;
@@ -4466,7 +4570,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
return NF_ACCEPT;
sksec = sk->sk_security;
- AVC_AUDIT_DATA_INIT(&ad, NET);
+ COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.netif = ifindex;
ad.u.net.family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 0, &proto))
@@ -4490,7 +4594,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
u32 secmark_perm;
u32 peer_sid;
struct sock *sk;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
char *addrp;
u8 secmark_active;
u8 peerlbl_active;
@@ -4549,7 +4653,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
secmark_perm = PACKET__SEND;
}
- AVC_AUDIT_DATA_INIT(&ad, NET);
+ COMMON_AUDIT_DATA_INIT(&ad, NET);
ad.u.net.netif = ifindex;
ad.u.net.family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL))
@@ -4619,13 +4723,13 @@ static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
static int selinux_netlink_recv(struct sk_buff *skb, int capability)
{
int err;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
err = cap_netlink_recv(skb, capability);
if (err)
return err;
- AVC_AUDIT_DATA_INIT(&ad, CAP);
+ COMMON_AUDIT_DATA_INIT(&ad, CAP);
ad.u.cap = capability;
return avc_has_perm(NETLINK_CB(skb).sid, NETLINK_CB(skb).sid,
@@ -4684,12 +4788,12 @@ static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
u32 perms)
{
struct ipc_security_struct *isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 sid = current_sid();
isec = ipc_perms->security;
- AVC_AUDIT_DATA_INIT(&ad, IPC);
+ COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = ipc_perms->key;
return avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad);
@@ -4709,7 +4813,7 @@ static void selinux_msg_msg_free_security(struct msg_msg *msg)
static int selinux_msg_queue_alloc_security(struct msg_queue *msq)
{
struct ipc_security_struct *isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 sid = current_sid();
int rc;
@@ -4719,7 +4823,7 @@ static int selinux_msg_queue_alloc_security(struct msg_queue *msq)
isec = msq->q_perm.security;
- AVC_AUDIT_DATA_INIT(&ad, IPC);
+ COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = msq->q_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
@@ -4739,12 +4843,12 @@ static void selinux_msg_queue_free_security(struct msg_queue *msq)
static int selinux_msg_queue_associate(struct msg_queue *msq, int msqflg)
{
struct ipc_security_struct *isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 sid = current_sid();
isec = msq->q_perm.security;
- AVC_AUDIT_DATA_INIT(&ad, IPC);
+ COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = msq->q_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
@@ -4783,7 +4887,7 @@ static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg,
{
struct ipc_security_struct *isec;
struct msg_security_struct *msec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 sid = current_sid();
int rc;
@@ -4804,7 +4908,7 @@ static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg,
return rc;
}
- AVC_AUDIT_DATA_INIT(&ad, IPC);
+ COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = msq->q_perm.key;
/* Can this process write to the queue? */
@@ -4828,14 +4932,14 @@ static int selinux_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
{
struct ipc_security_struct *isec;
struct msg_security_struct *msec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 sid = task_sid(target);
int rc;
isec = msq->q_perm.security;
msec = msg->security;
- AVC_AUDIT_DATA_INIT(&ad, IPC);
+ COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = msq->q_perm.key;
rc = avc_has_perm(sid, isec->sid,
@@ -4850,7 +4954,7 @@ static int selinux_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
static int selinux_shm_alloc_security(struct shmid_kernel *shp)
{
struct ipc_security_struct *isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 sid = current_sid();
int rc;
@@ -4860,7 +4964,7 @@ static int selinux_shm_alloc_security(struct shmid_kernel *shp)
isec = shp->shm_perm.security;
- AVC_AUDIT_DATA_INIT(&ad, IPC);
+ COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = shp->shm_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_SHM,
@@ -4880,12 +4984,12 @@ static void selinux_shm_free_security(struct shmid_kernel *shp)
static int selinux_shm_associate(struct shmid_kernel *shp, int shmflg)
{
struct ipc_security_struct *isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 sid = current_sid();
isec = shp->shm_perm.security;
- AVC_AUDIT_DATA_INIT(&ad, IPC);
+ COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = shp->shm_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_SHM,
@@ -4942,7 +5046,7 @@ static int selinux_shm_shmat(struct shmid_kernel *shp,
static int selinux_sem_alloc_security(struct sem_array *sma)
{
struct ipc_security_struct *isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 sid = current_sid();
int rc;
@@ -4952,7 +5056,7 @@ static int selinux_sem_alloc_security(struct sem_array *sma)
isec = sma->sem_perm.security;
- AVC_AUDIT_DATA_INIT(&ad, IPC);
+ COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = sma->sem_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_SEM,
@@ -4972,12 +5076,12 @@ static void selinux_sem_free_security(struct sem_array *sma)
static int selinux_sem_associate(struct sem_array *sma, int semflg)
{
struct ipc_security_struct *isec;
- struct avc_audit_data ad;
+ struct common_audit_data ad;
u32 sid = current_sid();
isec = sma->sem_perm.security;
- AVC_AUDIT_DATA_INIT(&ad, IPC);
+ COMMON_AUDIT_DATA_INIT(&ad, IPC);
ad.u.ipc_id = sma->sem_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_SEM,
@@ -5195,7 +5299,7 @@ static int selinux_setprocattr(struct task_struct *p,
/* Only allow single threaded processes to change context */
error = -EPERM;
- if (!is_single_threaded(p)) {
+ if (!current_is_single_threaded()) {
error = security_bounded_transition(tsec->sid, sid);
if (error)
goto abort_change;
@@ -5252,6 +5356,32 @@ static void selinux_release_secctx(char *secdata, u32 seclen)
kfree(secdata);
}
+/*
+ * called with inode->i_mutex locked
+ */
+static int selinux_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
+{
+ return selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX, ctx, ctxlen, 0);
+}
+
+/*
+ * called with inode->i_mutex locked
+ */
+static int selinux_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+{
+ return __vfs_setxattr_noperm(dentry, XATTR_NAME_SELINUX, ctx, ctxlen, 0);
+}
+
+static int selinux_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+{
+ int len = 0;
+ len = selinux_inode_getsecurity(inode, XATTR_SELINUX_SUFFIX,
+ ctx, true);
+ if (len < 0)
+ return len;
+ *ctxlen = len;
+ return 0;
+}
#ifdef CONFIG_KEYS
static int selinux_key_alloc(struct key *k, const struct cred *cred,
@@ -5323,7 +5453,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
static struct security_operations selinux_ops = {
.name = "selinux",
- .ptrace_may_access = selinux_ptrace_may_access,
+ .ptrace_access_check = selinux_ptrace_access_check,
.ptrace_traceme = selinux_ptrace_traceme,
.capget = selinux_capget,
.capset = selinux_capset,
@@ -5396,10 +5526,13 @@ static struct security_operations selinux_ops = {
.dentry_open = selinux_dentry_open,
.task_create = selinux_task_create,
+ .cred_alloc_blank = selinux_cred_alloc_blank,
.cred_free = selinux_cred_free,
.cred_prepare = selinux_cred_prepare,
+ .cred_transfer = selinux_cred_transfer,
.kernel_act_as = selinux_kernel_act_as,
.kernel_create_files_as = selinux_kernel_create_files_as,
+ .kernel_module_request = selinux_kernel_module_request,
.task_setpgid = selinux_task_setpgid,
.task_getpgid = selinux_task_getpgid,
.task_getsid = selinux_task_getsid,
@@ -5448,6 +5581,9 @@ static struct security_operations selinux_ops = {
.secid_to_secctx = selinux_secid_to_secctx,
.secctx_to_secid = selinux_secctx_to_secid,
.release_secctx = selinux_release_secctx,
+ .inode_notifysecctx = selinux_inode_notifysecctx,
+ .inode_setsecctx = selinux_inode_setsecctx,
+ .inode_getsecctx = selinux_inode_getsecctx,
.unix_stream_connect = selinux_socket_unix_stream_connect,
.unix_may_send = selinux_socket_unix_may_send,
@@ -5477,6 +5613,9 @@ static struct security_operations selinux_ops = {
.inet_csk_clone = selinux_inet_csk_clone,
.inet_conn_established = selinux_inet_conn_established,
.req_classify_flow = selinux_req_classify_flow,
+ .tun_dev_create = selinux_tun_dev_create,
+ .tun_dev_post_create = selinux_tun_dev_post_create,
+ .tun_dev_attach = selinux_tun_dev_attach,
#ifdef CONFIG_SECURITY_NETWORK_XFRM
.xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
@@ -5691,6 +5830,9 @@ int selinux_disable(void)
selinux_disabled = 1;
selinux_enabled = 0;
+ /* Try to destroy the avc node cache */
+ avc_disable();
+
/* Reset security_ops to the secondary module, dummy or capability. */
security_ops = secondary_ops;
diff --git a/security/selinux/include/av_inherit.h b/security/selinux/include/av_inherit.h
index 8377a4ba3b95..abedcd704dae 100644
--- a/security/selinux/include/av_inherit.h
+++ b/security/selinux/include/av_inherit.h
@@ -15,6 +15,7 @@
S_(SECCLASS_KEY_SOCKET, socket, 0x00400000UL)
S_(SECCLASS_UNIX_STREAM_SOCKET, socket, 0x00400000UL)
S_(SECCLASS_UNIX_DGRAM_SOCKET, socket, 0x00400000UL)
+ S_(SECCLASS_TUN_SOCKET, socket, 0x00400000UL)
S_(SECCLASS_IPC, ipc, 0x00000200UL)
S_(SECCLASS_SEM, ipc, 0x00000200UL)
S_(SECCLASS_MSGQ, ipc, 0x00000200UL)
diff --git a/security/selinux/include/av_perm_to_string.h b/security/selinux/include/av_perm_to_string.h
index 31df1d7c1aee..2b683ad83d21 100644
--- a/security/selinux/include/av_perm_to_string.h
+++ b/security/selinux/include/av_perm_to_string.h
@@ -107,6 +107,7 @@
S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_READ, "syslog_read")
S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_MOD, "syslog_mod")
S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_CONSOLE, "syslog_console")
+ S_(SECCLASS_SYSTEM, SYSTEM__MODULE_REQUEST, "module_request")
S_(SECCLASS_CAPABILITY, CAPABILITY__CHOWN, "chown")
S_(SECCLASS_CAPABILITY, CAPABILITY__DAC_OVERRIDE, "dac_override")
S_(SECCLASS_CAPABILITY, CAPABILITY__DAC_READ_SEARCH, "dac_read_search")
diff --git a/security/selinux/include/av_permissions.h b/security/selinux/include/av_permissions.h
index d645192ee950..0546d616ccac 100644
--- a/security/selinux/include/av_permissions.h
+++ b/security/selinux/include/av_permissions.h
@@ -423,6 +423,28 @@
#define UNIX_DGRAM_SOCKET__RECV_MSG 0x00080000UL
#define UNIX_DGRAM_SOCKET__SEND_MSG 0x00100000UL
#define UNIX_DGRAM_SOCKET__NAME_BIND 0x00200000UL
+#define TUN_SOCKET__IOCTL 0x00000001UL
+#define TUN_SOCKET__READ 0x00000002UL
+#define TUN_SOCKET__WRITE 0x00000004UL
+#define TUN_SOCKET__CREATE 0x00000008UL
+#define TUN_SOCKET__GETATTR 0x00000010UL
+#define TUN_SOCKET__SETATTR 0x00000020UL
+#define TUN_SOCKET__LOCK 0x00000040UL
+#define TUN_SOCKET__RELABELFROM 0x00000080UL
+#define TUN_SOCKET__RELABELTO 0x00000100UL
+#define TUN_SOCKET__APPEND 0x00000200UL
+#define TUN_SOCKET__BIND 0x00000400UL
+#define TUN_SOCKET__CONNECT 0x00000800UL
+#define TUN_SOCKET__LISTEN 0x00001000UL
+#define TUN_SOCKET__ACCEPT 0x00002000UL
+#define TUN_SOCKET__GETOPT 0x00004000UL
+#define TUN_SOCKET__SETOPT 0x00008000UL
+#define TUN_SOCKET__SHUTDOWN 0x00010000UL
+#define TUN_SOCKET__RECVFROM 0x00020000UL
+#define TUN_SOCKET__SENDTO 0x00040000UL
+#define TUN_SOCKET__RECV_MSG 0x00080000UL
+#define TUN_SOCKET__SEND_MSG 0x00100000UL
+#define TUN_SOCKET__NAME_BIND 0x00200000UL
#define PROCESS__FORK 0x00000001UL
#define PROCESS__TRANSITION 0x00000002UL
#define PROCESS__SIGCHLD 0x00000004UL
@@ -508,6 +530,7 @@
#define SYSTEM__SYSLOG_READ 0x00000002UL
#define SYSTEM__SYSLOG_MOD 0x00000004UL
#define SYSTEM__SYSLOG_CONSOLE 0x00000008UL
+#define SYSTEM__MODULE_REQUEST 0x00000010UL
#define CAPABILITY__CHOWN 0x00000001UL
#define CAPABILITY__DAC_OVERRIDE 0x00000002UL
#define CAPABILITY__DAC_READ_SEARCH 0x00000004UL
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index d12ff1a9c0aa..e94e82f73818 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -13,6 +13,7 @@
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/audit.h>
+#include <linux/lsm_audit.h>
#include <linux/in6.h>
#include <linux/path.h>
#include <asm/system.h>
@@ -36,48 +37,6 @@ struct inode;
struct sock;
struct sk_buff;
-/* Auxiliary data to use in generating the audit record. */
-struct avc_audit_data {
- char type;
-#define AVC_AUDIT_DATA_FS 1
-#define AVC_AUDIT_DATA_NET 2
-#define AVC_AUDIT_DATA_CAP 3
-#define AVC_AUDIT_DATA_IPC 4
- struct task_struct *tsk;
- union {
- struct {
- struct path path;
- struct inode *inode;
- } fs;
- struct {
- int netif;
- struct sock *sk;
- u16 family;
- __be16 dport;
- __be16 sport;
- union {
- struct {
- __be32 daddr;
- __be32 saddr;
- } v4;
- struct {
- struct in6_addr daddr;
- struct in6_addr saddr;
- } v6;
- } fam;
- } net;
- int cap;
- int ipc_id;
- } u;
-};
-
-#define v4info fam.v4
-#define v6info fam.v6
-
-/* Initialize an AVC audit data structure. */
-#define AVC_AUDIT_DATA_INIT(_d,_t) \
- { memset((_d), 0, sizeof(struct avc_audit_data)); (_d)->type = AVC_AUDIT_DATA_##_t; }
-
/*
* AVC statistics
*/
@@ -98,7 +57,9 @@ void __init avc_init(void);
void avc_audit(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
- struct av_decision *avd, int result, struct avc_audit_data *auditdata);
+ struct av_decision *avd,
+ int result,
+ struct common_audit_data *a);
#define AVC_STRICT 1 /* Ignore permissive mode. */
int avc_has_perm_noaudit(u32 ssid, u32 tsid,
@@ -108,7 +69,7 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
int avc_has_perm(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
- struct avc_audit_data *auditdata);
+ struct common_audit_data *auditdata);
u32 avc_policy_seqno(void);
@@ -127,13 +88,13 @@ int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid,
u32 events, u32 ssid, u32 tsid,
u16 tclass, u32 perms);
-/* Shows permission in human readable form */
-void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av);
-
/* Exported to selinuxfs */
int avc_get_hash_stats(char *page);
extern unsigned int avc_cache_threshold;
+/* Attempt to free avc node cache */
+void avc_disable(void);
+
#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
DECLARE_PER_CPU(struct avc_cache_stats, avc_cache_stats);
#endif
diff --git a/security/selinux/include/class_to_string.h b/security/selinux/include/class_to_string.h
index 21ec786611d4..7ab9299bfb6b 100644
--- a/security/selinux/include/class_to_string.h
+++ b/security/selinux/include/class_to_string.h
@@ -77,3 +77,4 @@
S_(NULL)
S_(NULL)
S_("kernel_service")
+ S_("tun_socket")
diff --git a/security/selinux/include/flask.h b/security/selinux/include/flask.h
index 882f27d66fac..f248500a1e3c 100644
--- a/security/selinux/include/flask.h
+++ b/security/selinux/include/flask.h
@@ -53,6 +53,7 @@
#define SECCLASS_PEER 68
#define SECCLASS_CAPABILITY2 69
#define SECCLASS_KERNEL_SERVICE 74
+#define SECCLASS_TUN_SOCKET 75
/*
* Security identifier indices for initial entities
diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h
index b4b5b9b2f0be..8d7384280a7a 100644
--- a/security/selinux/include/netlabel.h
+++ b/security/selinux/include/netlabel.h
@@ -59,7 +59,7 @@ int selinux_netlbl_socket_post_create(struct sock *sk, u16 family);
int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
struct sk_buff *skb,
u16 family,
- struct avc_audit_data *ad);
+ struct common_audit_data *ad);
int selinux_netlbl_socket_setsockopt(struct socket *sock,
int level,
int optname);
@@ -129,7 +129,7 @@ static inline int selinux_netlbl_socket_post_create(struct sock *sk,
static inline int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
struct sk_buff *skb,
u16 family,
- struct avc_audit_data *ad)
+ struct common_audit_data *ad)
{
return 0;
}
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 289e24b39e3e..13128f9a3e5a 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -41,9 +41,9 @@ static inline int selinux_xfrm_enabled(void)
}
int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb,
- struct avc_audit_data *ad);
+ struct common_audit_data *ad);
int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct avc_audit_data *ad, u8 proto);
+ struct common_audit_data *ad, u8 proto);
int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
static inline void selinux_xfrm_notify_policyload(void)
@@ -57,13 +57,13 @@ static inline int selinux_xfrm_enabled(void)
}
static inline int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
- struct avc_audit_data *ad)
+ struct common_audit_data *ad)
{
return 0;
}
static inline int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct avc_audit_data *ad, u8 proto)
+ struct common_audit_data *ad, u8 proto)
{
return 0;
}
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index 2e984413c7b2..e68823741ad5 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -342,7 +342,7 @@ int selinux_netlbl_socket_post_create(struct sock *sk, u16 family)
int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
struct sk_buff *skb,
u16 family,
- struct avc_audit_data *ad)
+ struct common_audit_data *ad)
{
int rc;
u32 nlbl_sid;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 500e6f78e115..ff17820d35ec 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -22,6 +22,11 @@
*
* Added validation of kernel classes and permissions
*
+ * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * Added support for bounds domain and audit messaged on masked permissions
+ *
+ * Copyright (C) 2008, 2009 NEC Corporation
* Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P.
* Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
* Copyright (C) 2003 - 2004, 2006 Tresys Technology, LLC
@@ -279,6 +284,95 @@ mls_ops:
}
/*
+ * security_dump_masked_av - dumps masked permissions during
+ * security_compute_av due to RBAC, MLS/Constraint and Type bounds.
+ */
+static int dump_masked_av_helper(void *k, void *d, void *args)
+{
+ struct perm_datum *pdatum = d;
+ char **permission_names = args;
+
+ BUG_ON(pdatum->value < 1 || pdatum->value > 32);
+
+ permission_names[pdatum->value - 1] = (char *)k;
+
+ return 0;
+}
+
+static void security_dump_masked_av(struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ u32 permissions,
+ const char *reason)
+{
+ struct common_datum *common_dat;
+ struct class_datum *tclass_dat;
+ struct audit_buffer *ab;
+ char *tclass_name;
+ char *scontext_name = NULL;
+ char *tcontext_name = NULL;
+ char *permission_names[32];
+ int index, length;
+ bool need_comma = false;
+
+ if (!permissions)
+ return;
+
+ tclass_name = policydb.p_class_val_to_name[tclass - 1];
+ tclass_dat = policydb.class_val_to_struct[tclass - 1];
+ common_dat = tclass_dat->comdatum;
+
+ /* init permission_names */
+ if (common_dat &&
+ hashtab_map(common_dat->permissions.table,
+ dump_masked_av_helper, permission_names) < 0)
+ goto out;
+
+ if (hashtab_map(tclass_dat->permissions.table,
+ dump_masked_av_helper, permission_names) < 0)
+ goto out;
+
+ /* get scontext/tcontext in text form */
+ if (context_struct_to_string(scontext,
+ &scontext_name, &length) < 0)
+ goto out;
+
+ if (context_struct_to_string(tcontext,
+ &tcontext_name, &length) < 0)
+ goto out;
+
+ /* audit a message */
+ ab = audit_log_start(current->audit_context,
+ GFP_ATOMIC, AUDIT_SELINUX_ERR);
+ if (!ab)
+ goto out;
+
+ audit_log_format(ab, "op=security_compute_av reason=%s "
+ "scontext=%s tcontext=%s tclass=%s perms=",
+ reason, scontext_name, tcontext_name, tclass_name);
+
+ for (index = 0; index < 32; index++) {
+ u32 mask = (1 << index);
+
+ if ((mask & permissions) == 0)
+ continue;
+
+ audit_log_format(ab, "%s%s",
+ need_comma ? "," : "",
+ permission_names[index]
+ ? permission_names[index] : "????");
+ need_comma = true;
+ }
+ audit_log_end(ab);
+out:
+ /* release scontext/tcontext */
+ kfree(tcontext_name);
+ kfree(scontext_name);
+
+ return;
+}
+
+/*
* security_boundary_permission - drops violated permissions
* on boundary constraint.
*/
@@ -347,28 +441,12 @@ static void type_attribute_bounds_av(struct context *scontext,
}
if (masked) {
- struct audit_buffer *ab;
- char *stype_name
- = policydb.p_type_val_to_name[source->value - 1];
- char *ttype_name
- = policydb.p_type_val_to_name[target->value - 1];
- char *tclass_name
- = policydb.p_class_val_to_name[tclass - 1];
-
/* mask violated permissions */
avd->allowed &= ~masked;
- /* notice to userspace via audit message */
- ab = audit_log_start(current->audit_context,
- GFP_ATOMIC, AUDIT_SELINUX_ERR);
- if (!ab)
- return;
-
- audit_log_format(ab, "av boundary violation: "
- "source=%s target=%s tclass=%s",
- stype_name, ttype_name, tclass_name);
- avc_dump_av(ab, tclass, masked);
- audit_log_end(ab);
+ /* audit masked permissions */
+ security_dump_masked_av(scontext, tcontext,
+ tclass, masked, "bounds");
}
}
@@ -480,7 +558,7 @@ static int context_struct_compute_av(struct context *scontext,
if ((constraint->permissions & (avd->allowed)) &&
!constraint_expr_eval(scontext, tcontext, NULL,
constraint->expr)) {
- avd->allowed = (avd->allowed) & ~(constraint->permissions);
+ avd->allowed &= ~(constraint->permissions);
}
constraint = constraint->next;
}
@@ -499,8 +577,8 @@ static int context_struct_compute_av(struct context *scontext,
break;
}
if (!ra)
- avd->allowed = (avd->allowed) & ~(PROCESS__TRANSITION |
- PROCESS__DYNTRANSITION);
+ avd->allowed &= ~(PROCESS__TRANSITION |
+ PROCESS__DYNTRANSITION);
}
/*
@@ -687,6 +765,26 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
}
index = type->bounds;
}
+
+ if (rc) {
+ char *old_name = NULL;
+ char *new_name = NULL;
+ int length;
+
+ if (!context_struct_to_string(old_context,
+ &old_name, &length) &&
+ !context_struct_to_string(new_context,
+ &new_name, &length)) {
+ audit_log(current->audit_context,
+ GFP_ATOMIC, AUDIT_SELINUX_ERR,
+ "op=security_bounded_transition "
+ "result=denied "
+ "oldcontext=%s newcontext=%s",
+ old_name, new_name);
+ }
+ kfree(new_name);
+ kfree(old_name);
+ }
out:
read_unlock(&policy_rwlock);
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 72b18452e1a1..f3cb9ed731a9 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -401,7 +401,7 @@ int selinux_xfrm_state_delete(struct xfrm_state *x)
* gone thru the IPSec process.
*/
int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
- struct avc_audit_data *ad)
+ struct common_audit_data *ad)
{
int i, rc = 0;
struct sec_path *sp;
@@ -442,7 +442,7 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
* checked in the selinux_xfrm_state_pol_flow_match hook above.
*/
int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct avc_audit_data *ad, u8 proto)
+ struct common_audit_data *ad, u8 proto)
{
struct dst_entry *dst;
int rc = 0;
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 243bec175be0..c6e9acae72e4 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -275,7 +275,7 @@ static inline void smk_ad_init(struct smk_audit_info *a, const char *func,
{
memset(a, 0, sizeof(*a));
a->a.type = type;
- a->a.function = func;
+ a->a.smack_audit_data.function = func;
}
static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a,
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 513dc1aa16dd..0f9ac8146900 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -240,8 +240,9 @@ static inline void smack_str_from_perm(char *string, int access)
static void smack_log_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
- struct smack_audit_data *sad = &ad->lsm_priv.smack_audit_data;
- audit_log_format(ab, "lsm=SMACK fn=%s action=%s", ad->function,
+ struct smack_audit_data *sad = &ad->smack_audit_data;
+ audit_log_format(ab, "lsm=SMACK fn=%s action=%s",
+ ad->smack_audit_data.function,
sad->result ? "denied" : "granted");
audit_log_format(ab, " subject=");
audit_log_untrustedstring(ab, sad->subject);
@@ -274,11 +275,11 @@ void smack_log(char *subject_label, char *object_label, int request,
if (result == 0 && (log_policy & SMACK_AUDIT_ACCEPT) == 0)
return;
- if (a->function == NULL)
- a->function = "unknown";
+ if (a->smack_audit_data.function == NULL)
+ a->smack_audit_data.function = "unknown";
/* end preparing the audit data */
- sad = &a->lsm_priv.smack_audit_data;
+ sad = &a->smack_audit_data;
smack_str_from_perm(request_buffer, request);
sad->subject = subject_label;
sad->object = object_label;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 0023182078c7..acae7ef4092d 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -91,7 +91,7 @@ struct inode_smack *new_inode_smack(char *smack)
*/
/**
- * smack_ptrace_may_access - Smack approval on PTRACE_ATTACH
+ * smack_ptrace_access_check - Smack approval on PTRACE_ATTACH
* @ctp: child task pointer
* @mode: ptrace attachment mode
*
@@ -99,13 +99,13 @@ struct inode_smack *new_inode_smack(char *smack)
*
* Do the capability checks, and require read and write.
*/
-static int smack_ptrace_may_access(struct task_struct *ctp, unsigned int mode)
+static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
{
int rc;
struct smk_audit_info ad;
char *sp, *tsp;
- rc = cap_ptrace_may_access(ctp, mode);
+ rc = cap_ptrace_access_check(ctp, mode);
if (rc != 0)
return rc;
@@ -1080,6 +1080,22 @@ static int smack_file_receive(struct file *file)
*/
/**
+ * smack_cred_alloc_blank - "allocate" blank task-level security credentials
+ * @new: the new credentials
+ * @gfp: the atomicity of any memory allocations
+ *
+ * Prepare a blank set of credentials for modification. This must allocate all
+ * the memory the LSM module might require such that cred_transfer() can
+ * complete without error.
+ */
+static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+ cred->security = NULL;
+ return 0;
+}
+
+
+/**
* smack_cred_free - "free" task-level security credentials
* @cred: the credentials in question
*
@@ -1117,6 +1133,18 @@ static void smack_cred_commit(struct cred *new, const struct cred *old)
}
/**
+ * smack_cred_transfer - Transfer the old credentials to the new credentials
+ * @new: the new credentials
+ * @old: the original credentials
+ *
+ * Fill in a set of blank credentials from another set of credentials.
+ */
+static void smack_cred_transfer(struct cred *new, const struct cred *old)
+{
+ new->security = old->security;
+}
+
+/**
* smack_kernel_act_as - Set the subjective context in a set of credentials
* @new: points to the set of credentials to be modified.
* @secid: specifies the security ID to be set
@@ -1638,6 +1666,7 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
nsp->smk_inode = sp;
+ nsp->smk_flags |= SMK_INODE_INSTANT;
return 0;
}
/*
@@ -2464,7 +2493,7 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
/*
* Perfectly reasonable for this to be NULL
*/
- if (sip == NULL || sip->sin_family != PF_INET)
+ if (sip == NULL || sip->sin_family != AF_INET)
return 0;
return smack_netlabel_send(sock->sk, sip);
@@ -3029,10 +3058,31 @@ static void smack_release_secctx(char *secdata, u32 seclen)
{
}
+static int smack_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
+{
+ return smack_inode_setsecurity(inode, XATTR_SMACK_SUFFIX, ctx, ctxlen, 0);
+}
+
+static int smack_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+{
+ return __vfs_setxattr_noperm(dentry, XATTR_NAME_SMACK, ctx, ctxlen, 0);
+}
+
+static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+{
+ int len = 0;
+ len = smack_inode_getsecurity(inode, XATTR_SMACK_SUFFIX, ctx, true);
+
+ if (len < 0)
+ return len;
+ *ctxlen = len;
+ return 0;
+}
+
struct security_operations smack_ops = {
.name = "smack",
- .ptrace_may_access = smack_ptrace_may_access,
+ .ptrace_access_check = smack_ptrace_access_check,
.ptrace_traceme = smack_ptrace_traceme,
.syslog = smack_syslog,
@@ -3073,9 +3123,11 @@ struct security_operations smack_ops = {
.file_send_sigiotask = smack_file_send_sigiotask,
.file_receive = smack_file_receive,
+ .cred_alloc_blank = smack_cred_alloc_blank,
.cred_free = smack_cred_free,
.cred_prepare = smack_cred_prepare,
.cred_commit = smack_cred_commit,
+ .cred_transfer = smack_cred_transfer,
.kernel_act_as = smack_kernel_act_as,
.kernel_create_files_as = smack_kernel_create_files_as,
.task_setpgid = smack_task_setpgid,
@@ -3155,6 +3207,9 @@ struct security_operations smack_ops = {
.secid_to_secctx = smack_secid_to_secctx,
.secctx_to_secid = smack_secctx_to_secid,
.release_secctx = smack_release_secctx,
+ .inode_notifysecctx = smack_inode_notifysecctx,
+ .inode_setsecctx = smack_inode_setsecctx,
+ .inode_getsecctx = smack_inode_getsecctx,
};
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index fdd1f4b8c448..3c8bd8ee0b95 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -1285,6 +1285,36 @@ static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
}
/**
+ * tomoyo_delete_domain - Delete a domain.
+ *
+ * @domainname: The name of domain.
+ *
+ * Returns 0.
+ */
+static int tomoyo_delete_domain(char *domainname)
+{
+ struct tomoyo_domain_info *domain;
+ struct tomoyo_path_info name;
+
+ name.name = domainname;
+ tomoyo_fill_path_info(&name);
+ down_write(&tomoyo_domain_list_lock);
+ /* Is there an active domain? */
+ list_for_each_entry(domain, &tomoyo_domain_list, list) {
+ /* Never delete tomoyo_kernel_domain */
+ if (domain == &tomoyo_kernel_domain)
+ continue;
+ if (domain->is_deleted ||
+ tomoyo_pathcmp(domain->domainname, &name))
+ continue;
+ domain->is_deleted = true;
+ break;
+ }
+ up_write(&tomoyo_domain_list_lock);
+ return 0;
+}
+
+/**
* tomoyo_write_domain_policy - Write domain policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 6d6ba09af457..31df541911f7 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -339,8 +339,6 @@ const char *tomoyo_get_last_name(const struct tomoyo_domain_info *domain);
const char *tomoyo_get_msg(const bool is_enforce);
/* Convert single path operation to operation name. */
const char *tomoyo_sp2keyword(const u8 operation);
-/* Delete a domain. */
-int tomoyo_delete_domain(char *data);
/* Create "alias" entry in exception policy. */
int tomoyo_write_alias_policy(char *data, const bool is_delete);
/*
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 1d8b16960576..fcf52accce2b 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -717,38 +717,6 @@ int tomoyo_write_alias_policy(char *data, const bool is_delete)
return tomoyo_update_alias_entry(data, cp, is_delete);
}
-/* Domain create/delete handler. */
-
-/**
- * tomoyo_delete_domain - Delete a domain.
- *
- * @domainname: The name of domain.
- *
- * Returns 0.
- */
-int tomoyo_delete_domain(char *domainname)
-{
- struct tomoyo_domain_info *domain;
- struct tomoyo_path_info name;
-
- name.name = domainname;
- tomoyo_fill_path_info(&name);
- down_write(&tomoyo_domain_list_lock);
- /* Is there an active domain? */
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
- /* Never delete tomoyo_kernel_domain */
- if (domain == &tomoyo_kernel_domain)
- continue;
- if (domain->is_deleted ||
- tomoyo_pathcmp(domain->domainname, &name))
- continue;
- domain->is_deleted = true;
- break;
- }
- up_write(&tomoyo_domain_list_lock);
- return 0;
-}
-
/**
* tomoyo_find_or_assign_new_domain - Create a domain.
*
@@ -818,13 +786,11 @@ struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
/**
* tomoyo_find_next_domain - Find a domain.
*
- * @bprm: Pointer to "struct linux_binprm".
- * @next_domain: Pointer to pointer to "struct tomoyo_domain_info".
+ * @bprm: Pointer to "struct linux_binprm".
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_find_next_domain(struct linux_binprm *bprm,
- struct tomoyo_domain_info **next_domain)
+int tomoyo_find_next_domain(struct linux_binprm *bprm)
{
/*
* This function assumes that the size of buffer returned by
@@ -946,9 +912,11 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm,
tomoyo_set_domain_flag(old_domain, false,
TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED);
out:
+ if (!domain)
+ domain = old_domain;
+ bprm->cred->security = domain;
tomoyo_free(real_program_name);
tomoyo_free(symlink_program_name);
- *next_domain = domain ? domain : old_domain;
tomoyo_free(tmp);
return retval;
}
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 3194d09fe0f4..9548a0984cc4 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -14,6 +14,12 @@
#include "tomoyo.h"
#include "realpath.h"
+static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp)
+{
+ new->security = NULL;
+ return 0;
+}
+
static int tomoyo_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
@@ -25,6 +31,15 @@ static int tomoyo_cred_prepare(struct cred *new, const struct cred *old,
return 0;
}
+static void tomoyo_cred_transfer(struct cred *new, const struct cred *old)
+{
+ /*
+ * Since "struct tomoyo_domain_info *" is a sharable pointer,
+ * we don't need to duplicate.
+ */
+ new->security = old->security;
+}
+
static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
{
int rc;
@@ -61,14 +76,8 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
* Execute permission is checked against pathname passed to do_execve()
* using current domain.
*/
- if (!domain) {
- struct tomoyo_domain_info *next_domain = NULL;
- int retval = tomoyo_find_next_domain(bprm, &next_domain);
-
- if (!retval)
- bprm->cred->security = next_domain;
- return retval;
- }
+ if (!domain)
+ return tomoyo_find_next_domain(bprm);
/*
* Read permission is checked against interpreters using next domain.
* '1' is the result of open_to_namei_flags(O_RDONLY).
@@ -268,7 +277,9 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
*/
static struct security_operations tomoyo_security_ops = {
.name = "tomoyo",
+ .cred_alloc_blank = tomoyo_cred_alloc_blank,
.cred_prepare = tomoyo_cred_prepare,
+ .cred_transfer = tomoyo_cred_transfer,
.bprm_set_creds = tomoyo_bprm_set_creds,
.bprm_check_security = tomoyo_bprm_check_security,
#ifdef CONFIG_SYSCTL
diff --git a/security/tomoyo/tomoyo.h b/security/tomoyo/tomoyo.h
index 0fd588a629cf..cd6ba0bf7069 100644
--- a/security/tomoyo/tomoyo.h
+++ b/security/tomoyo/tomoyo.h
@@ -31,8 +31,7 @@ int tomoyo_check_2path_perm(struct tomoyo_domain_info *domain,
struct path *path2);
int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
struct file *filp);
-int tomoyo_find_next_domain(struct linux_binprm *bprm,
- struct tomoyo_domain_info **next_domain);
+int tomoyo_find_next_domain(struct linux_binprm *bprm);
/* Index numbers for Access Controls. */
diff --git a/sound/Kconfig b/sound/Kconfig
index 1eceb85287c5..439e15c8faa3 100644
--- a/sound/Kconfig
+++ b/sound/Kconfig
@@ -32,6 +32,34 @@ config SOUND_OSS_CORE
bool
default n
+config SOUND_OSS_CORE_PRECLAIM
+ bool "Preclaim OSS device numbers"
+ depends on SOUND_OSS_CORE
+ default y
+ help
+ With this option enabled, the kernel will claim all OSS device
+ numbers if any OSS support (native or emulation) is enabled
+ whether the respective module is loaded or not and try to load the
+ appropriate module using sound-slot/service-* and char-major-*
+ module aliases when one of the device numbers is opened. With
+ this option disabled, kernel will only claim actually in-use
+ device numbers and opening a missing device will generate only the
+ standard char-major-* aliases.
+
+ The only visible difference is use of additional module aliases
+ and whether OSS sound devices appear multiple times in
+ /proc/devices. sound-slot/service-* module aliases are scheduled
+ to be removed (ie. PRECLAIM won't be available) and this option is
+ to make the transition easier. This option can be overridden
+ during boot using the kernel parameter soundcore.preclaim_oss.
+
+ Disabling this allows alternative OSS implementations.
+
+ Please read Documentation/feature-removal-schedule.txt for
+ details.
+
+ If unusre, say Y.
+
source "sound/oss/dmasound/Kconfig"
if !M68K
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index c570ebd9d177..4e34d19ddbc0 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -170,6 +170,13 @@ static int __devinit pxa2xx_ac97_probe(struct platform_device *dev)
struct snd_ac97_bus *ac97_bus;
struct snd_ac97_template ac97_template;
int ret;
+ pxa2xx_audio_ops_t *pdata = dev->dev.platform_data;
+
+ if (dev->id >= 0) {
+ dev_err(&dev->dev, "PXA2xx has only one AC97 port.\n");
+ ret = -ENXIO;
+ goto err_dev;
+ }
ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
THIS_MODULE, 0, &card);
@@ -200,6 +207,8 @@ static int __devinit pxa2xx_ac97_probe(struct platform_device *dev)
snprintf(card->longname, sizeof(card->longname),
"%s (%s)", dev->dev.driver->name, card->mixername);
+ if (pdata && pdata->codec_pdata[0])
+ snd_ac97_dev_add_pdata(ac97_bus->codec[0], pdata->codec_pdata[0]);
snd_card_set_dev(card, &dev->dev);
ret = snd_card_register(card);
if (ret == 0) {
@@ -212,6 +221,7 @@ err_remove:
err:
if (card)
snd_card_free(card);
+err_dev:
return ret;
}
diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
index 6205f37d547c..743ac6a29065 100644
--- a/sound/arm/pxa2xx-pcm-lib.c
+++ b/sound/arm/pxa2xx-pcm-lib.c
@@ -136,6 +136,9 @@ int __pxa2xx_pcm_prepare(struct snd_pcm_substream *substream)
{
struct pxa2xx_runtime_data *prtd = substream->runtime->private_data;
+ if (!prtd || !prtd->params)
+ return 0;
+
DCSR(prtd->dma_ch) &= ~DCSR_RUN;
DCSR(prtd->dma_ch) = 0;
DCMD(prtd->dma_ch) = 0;
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index 6061fb5f4e1c..c15682a2f9db 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -206,4 +206,8 @@ config SND_PCM_XRUN_DEBUG
config SND_VMASTER
bool
+config SND_DMA_SGBUF
+ def_bool y
+ depends on X86
+
source "sound/core/seq/Kconfig"
diff --git a/sound/core/Makefile b/sound/core/Makefile
index 4229052e7b91..350a08d277f4 100644
--- a/sound/core/Makefile
+++ b/sound/core/Makefile
@@ -13,7 +13,7 @@ snd-pcm-objs := pcm.o pcm_native.o pcm_lib.o pcm_timer.o pcm_misc.o \
pcm_memory.o
snd-page-alloc-y := memalloc.o
-snd-page-alloc-$(CONFIG_HAS_DMA) += sgbuf.o
+snd-page-alloc-$(CONFIG_SND_DMA_SGBUF) += sgbuf.o
snd-rawmidi-objs := rawmidi.o
snd-timer-objs := timer.o
diff --git a/sound/core/control.c b/sound/core/control.c
index 17b8d47a5cd0..a8b7fabe645e 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -414,7 +414,7 @@ int snd_ctl_remove_id(struct snd_card *card, struct snd_ctl_elem_id *id)
EXPORT_SYMBOL(snd_ctl_remove_id);
/**
- * snd_ctl_remove_unlocked_id - remove the unlocked control of the given id and release it
+ * snd_ctl_remove_user_ctl - remove and release the unlocked user control
* @file: active control handle
* @id: the control id to remove
*
@@ -423,8 +423,8 @@ EXPORT_SYMBOL(snd_ctl_remove_id);
*
* Returns 0 if successful, or a negative error code on failure.
*/
-static int snd_ctl_remove_unlocked_id(struct snd_ctl_file * file,
- struct snd_ctl_elem_id *id)
+static int snd_ctl_remove_user_ctl(struct snd_ctl_file * file,
+ struct snd_ctl_elem_id *id)
{
struct snd_card *card = file->card;
struct snd_kcontrol *kctl;
@@ -433,15 +433,23 @@ static int snd_ctl_remove_unlocked_id(struct snd_ctl_file * file,
down_write(&card->controls_rwsem);
kctl = snd_ctl_find_id(card, id);
if (kctl == NULL) {
- up_write(&card->controls_rwsem);
- return -ENOENT;
+ ret = -ENOENT;
+ goto error;
+ }
+ if (!(kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_USER)) {
+ ret = -EINVAL;
+ goto error;
}
for (idx = 0; idx < kctl->count; idx++)
if (kctl->vd[idx].owner != NULL && kctl->vd[idx].owner != file) {
- up_write(&card->controls_rwsem);
- return -EBUSY;
+ ret = -EBUSY;
+ goto error;
}
ret = snd_ctl_remove(card, kctl);
+ if (ret < 0)
+ goto error;
+ card->user_ctl_count--;
+error:
up_write(&card->controls_rwsem);
return ret;
}
@@ -951,7 +959,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
if (card->user_ctl_count >= MAX_USER_CONTROLS)
return -ENOMEM;
- if (info->count > 1024)
+ if (info->count < 1)
return -EINVAL;
access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
(info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
@@ -1052,18 +1060,10 @@ static int snd_ctl_elem_remove(struct snd_ctl_file *file,
struct snd_ctl_elem_id __user *_id)
{
struct snd_ctl_elem_id id;
- int err;
if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
- err = snd_ctl_remove_unlocked_id(file, &id);
- if (! err) {
- struct snd_card *card = file->card;
- down_write(&card->controls_rwsem);
- card->user_ctl_count--;
- up_write(&card->controls_rwsem);
- }
- return err;
+ return snd_ctl_remove_user_ctl(file, &id);
}
static int snd_ctl_subscribe_events(struct snd_ctl_file *file, int __user *ptr)
diff --git a/sound/core/info.c b/sound/core/info.c
index 35df614f6c55..d749a0d394a7 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -88,12 +88,10 @@ static int resize_info_buffer(struct snd_info_buffer *buffer,
char *nbuf;
nsize = PAGE_ALIGN(nsize);
- nbuf = kmalloc(nsize, GFP_KERNEL);
+ nbuf = krealloc(buffer->buffer, nsize, GFP_KERNEL);
if (! nbuf)
return -ENOMEM;
- memcpy(nbuf, buffer->buffer, buffer->len);
- kfree(buffer->buffer);
buffer->buffer = nbuf;
buffer->len = nsize;
return 0;
@@ -108,7 +106,7 @@ static int resize_info_buffer(struct snd_info_buffer *buffer,
*
* Returns the size of output string.
*/
-int snd_iprintf(struct snd_info_buffer *buffer, char *fmt,...)
+int snd_iprintf(struct snd_info_buffer *buffer, const char *fmt, ...)
{
va_list args;
int len, res;
@@ -727,7 +725,7 @@ EXPORT_SYMBOL(snd_info_get_line);
* Returns the updated pointer of the original string so that
* it can be used for the next call.
*/
-char *snd_info_get_str(char *dest, char *src, int len)
+const char *snd_info_get_str(char *dest, const char *src, int len)
{
int c;
diff --git a/sound/core/init.c b/sound/core/init.c
index d5d40d78c409..ec4a50ce5656 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -31,6 +31,14 @@
#include <sound/control.h>
#include <sound/info.h>
+/* monitor files for graceful shutdown (hotplug) */
+struct snd_monitor_file {
+ struct file *file;
+ const struct file_operations *disconnected_f_op;
+ struct list_head shutdown_list; /* still need to shutdown */
+ struct list_head list; /* link of monitor files */
+};
+
static DEFINE_SPINLOCK(shutdown_lock);
static LIST_HEAD(shutdown_files);
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 1b3534d67686..9e92441f9b78 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -199,6 +199,8 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
case SNDRV_DMA_TYPE_DEV:
dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
break;
+#endif
+#ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG:
snd_malloc_sgbuf_pages(device, size, dmab, NULL);
break;
@@ -269,6 +271,8 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab)
case SNDRV_DMA_TYPE_DEV:
snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
break;
+#endif
+#ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG:
snd_free_sgbuf_pages(dmab);
break;
diff --git a/sound/core/misc.c b/sound/core/misc.c
index a9710e0c97af..23a032c6d487 100644
--- a/sound/core/misc.c
+++ b/sound/core/misc.c
@@ -24,6 +24,20 @@
#include <linux/ioport.h>
#include <sound/core.h>
+#ifdef CONFIG_SND_DEBUG
+
+#ifdef CONFIG_SND_DEBUG_VERBOSE
+#define DEFAULT_DEBUG_LEVEL 2
+#else
+#define DEFAULT_DEBUG_LEVEL 1
+#endif
+
+static int debug = DEFAULT_DEBUG_LEVEL;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level (0 = disable)");
+
+#endif /* CONFIG_SND_DEBUG */
+
void release_and_free_resource(struct resource *res)
{
if (res) {
@@ -35,46 +49,53 @@ void release_and_free_resource(struct resource *res)
EXPORT_SYMBOL(release_and_free_resource);
#ifdef CONFIG_SND_VERBOSE_PRINTK
-void snd_verbose_printk(const char *file, int line, const char *format, ...)
+/* strip the leading path if the given path is absolute */
+static const char *sanity_file_name(const char *path)
{
- va_list args;
-
- if (format[0] == '<' && format[1] >= '0' && format[1] <= '7' && format[2] == '>') {
- char tmp[] = "<0>";
+ if (*path == '/')
+ return strrchr(path, '/') + 1;
+ else
+ return path;
+}
+
+/* print file and line with a certain printk prefix */
+static int print_snd_pfx(unsigned int level, const char *path, int line,
+ const char *format)
+{
+ const char *file = sanity_file_name(path);
+ char tmp[] = "<0>";
+ const char *pfx = level ? KERN_DEBUG : KERN_DEFAULT;
+ int ret = 0;
+
+ if (format[0] == '<' && format[2] == '>') {
tmp[1] = format[1];
- printk("%sALSA %s:%d: ", tmp, file, line);
- format += 3;
- } else {
- printk("ALSA %s:%d: ", file, line);
+ pfx = tmp;
+ ret = 1;
}
- va_start(args, format);
- vprintk(format, args);
- va_end(args);
+ printk("%sALSA %s:%d: ", pfx, file, line);
+ return ret;
}
-
-EXPORT_SYMBOL(snd_verbose_printk);
+#else
+#define print_snd_pfx(level, path, line, format) 0
#endif
-#if defined(CONFIG_SND_DEBUG) && defined(CONFIG_SND_VERBOSE_PRINTK)
-void snd_verbose_printd(const char *file, int line, const char *format, ...)
+#if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
+void __snd_printk(unsigned int level, const char *path, int line,
+ const char *format, ...)
{
va_list args;
- if (format[0] == '<' && format[1] >= '0' && format[1] <= '7' && format[2] == '>') {
- char tmp[] = "<0>";
- tmp[1] = format[1];
- printk("%sALSA %s:%d: ", tmp, file, line);
- format += 3;
- } else {
- printk(KERN_DEBUG "ALSA %s:%d: ", file, line);
- }
+#ifdef CONFIG_SND_DEBUG
+ if (debug < level)
+ return;
+#endif
va_start(args, format);
+ if (print_snd_pfx(level, path, line, format))
+ format += 3; /* skip the printk level-prefix */
vprintk(format, args);
va_end(args);
-
}
-
-EXPORT_SYMBOL(snd_verbose_printd);
+EXPORT_SYMBOL_GPL(__snd_printk);
#endif
#ifdef CONFIG_PCI
diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
index 5dcd8a526970..772423889eb3 100644
--- a/sound/core/oss/mixer_oss.c
+++ b/sound/core/oss/mixer_oss.c
@@ -1154,7 +1154,8 @@ static void snd_mixer_oss_proc_write(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_mixer_oss *mixer = entry->private_data;
- char line[128], str[32], idxstr[16], *cptr;
+ char line[128], str[32], idxstr[16];
+ const char *cptr;
int ch, idx;
struct snd_mixer_oss_assign_table *tbl;
struct slot *slot;
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index dbe406b82591..d9c96353121a 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1043,10 +1043,15 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
runtime->oss.channels = params_channels(params);
runtime->oss.rate = params_rate(params);
- runtime->oss.params = 0;
- runtime->oss.prepare = 1;
vfree(runtime->oss.buffer);
runtime->oss.buffer = vmalloc(runtime->oss.period_bytes);
+ if (!runtime->oss.buffer) {
+ err = -ENOMEM;
+ goto failure;
+ }
+
+ runtime->oss.params = 0;
+ runtime->oss.prepare = 1;
runtime->oss.buffer_used = 0;
if (runtime->dma_area)
snd_pcm_format_set_silence(runtime->format, runtime->dma_area, bytes_to_samples(runtime, runtime->dma_bytes));
@@ -2836,7 +2841,8 @@ static void snd_pcm_oss_proc_write(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_pcm_str *pstr = entry->private_data;
- char line[128], str[32], task_name[32], *ptr;
+ char line[128], str[32], task_name[32];
+ const char *ptr;
int idx1;
struct snd_pcm_oss_setup *setup, *setup1, template;
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 145931a9ff30..0c1440121c22 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -162,18 +162,7 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
return -ENOIOCTLCMD;
}
-#ifdef CONFIG_SND_VERBOSE_PROCFS
-
-#define STATE(v) [SNDRV_PCM_STATE_##v] = #v
-#define STREAM(v) [SNDRV_PCM_STREAM_##v] = #v
-#define READY(v) [SNDRV_PCM_READY_##v] = #v
-#define XRUN(v) [SNDRV_PCM_XRUN_##v] = #v
-#define SILENCE(v) [SNDRV_PCM_SILENCE_##v] = #v
-#define TSTAMP(v) [SNDRV_PCM_TSTAMP_##v] = #v
-#define ACCESS(v) [SNDRV_PCM_ACCESS_##v] = #v
-#define START(v) [SNDRV_PCM_START_##v] = #v
#define FORMAT(v) [SNDRV_PCM_FORMAT_##v] = #v
-#define SUBFORMAT(v) [SNDRV_PCM_SUBFORMAT_##v] = #v
static char *snd_pcm_format_names[] = {
FORMAT(S8),
@@ -216,10 +205,23 @@ static char *snd_pcm_format_names[] = {
FORMAT(U18_3BE),
};
-static const char *snd_pcm_format_name(snd_pcm_format_t format)
+const char *snd_pcm_format_name(snd_pcm_format_t format)
{
return snd_pcm_format_names[format];
}
+EXPORT_SYMBOL_GPL(snd_pcm_format_name);
+
+#ifdef CONFIG_SND_VERBOSE_PROCFS
+
+#define STATE(v) [SNDRV_PCM_STATE_##v] = #v
+#define STREAM(v) [SNDRV_PCM_STREAM_##v] = #v
+#define READY(v) [SNDRV_PCM_READY_##v] = #v
+#define XRUN(v) [SNDRV_PCM_XRUN_##v] = #v
+#define SILENCE(v) [SNDRV_PCM_SILENCE_##v] = #v
+#define TSTAMP(v) [SNDRV_PCM_TSTAMP_##v] = #v
+#define ACCESS(v) [SNDRV_PCM_ACCESS_##v] = #v
+#define START(v) [SNDRV_PCM_START_##v] = #v
+#define SUBFORMAT(v) [SNDRV_PCM_SUBFORMAT_##v] = #v
static char *snd_pcm_stream_names[] = {
STREAM(PLAYBACK),
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 9db60d831bb2..30f410832a25 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -197,12 +197,16 @@ static int snd_pcm_update_hw_ptr_post(struct snd_pcm_substream *substream,
avail = snd_pcm_capture_avail(runtime);
if (avail > runtime->avail_max)
runtime->avail_max = avail;
- if (avail >= runtime->stop_threshold) {
- if (substream->runtime->status->state == SNDRV_PCM_STATE_DRAINING)
+ if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
+ if (avail >= runtime->buffer_size) {
snd_pcm_drain_done(substream);
- else
+ return -EPIPE;
+ }
+ } else {
+ if (avail >= runtime->stop_threshold) {
xrun(substream);
- return -EPIPE;
+ return -EPIPE;
+ }
}
if (avail >= runtime->control->avail_min)
wake_up(&runtime->sleep);
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index a6d42808828c..caa7796bc2f5 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -304,6 +304,7 @@ int snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages_for_all);
+#ifdef CONFIG_SND_DMA_SGBUF
/**
* snd_pcm_sgbuf_ops_page - get the page struct at the given offset
* @substream: the pcm substream instance
@@ -349,6 +350,7 @@ unsigned int snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
return size;
}
EXPORT_SYMBOL(snd_pcm_sgbuf_get_chunk_size);
+#endif /* CONFIG_SND_DMA_SGBUF */
/**
* snd_pcm_lib_malloc_pages - allocate the DMA buffer
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index ac2150e0670d..59e5fbe6af51 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1343,8 +1343,6 @@ static int snd_pcm_prepare(struct snd_pcm_substream *substream,
static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state)
{
- if (substream->f_flags & O_NONBLOCK)
- return -EAGAIN;
substream->runtime->trigger_master = substream;
return 0;
}
@@ -1392,7 +1390,6 @@ static struct action_ops snd_pcm_action_drain_init = {
struct drain_rec {
struct snd_pcm_substream *substream;
wait_queue_t wait;
- snd_pcm_uframes_t stop_threshold;
};
static int snd_pcm_drop(struct snd_pcm_substream *substream);
@@ -1404,13 +1401,15 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream);
* After this call, all streams are supposed to be either SETUP or DRAINING
* (capture only) state.
*/
-static int snd_pcm_drain(struct snd_pcm_substream *substream)
+static int snd_pcm_drain(struct snd_pcm_substream *substream,
+ struct file *file)
{
struct snd_card *card;
struct snd_pcm_runtime *runtime;
struct snd_pcm_substream *s;
int result = 0;
int i, num_drecs;
+ int nonblock = 0;
struct drain_rec *drec, drec_tmp, *d;
card = substream->pcm->card;
@@ -1428,6 +1427,15 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream)
}
}
+ if (file) {
+ if (file->f_flags & O_NONBLOCK)
+ nonblock = 1;
+ } else if (substream->f_flags & O_NONBLOCK)
+ nonblock = 1;
+
+ if (nonblock)
+ goto lock; /* no need to allocate waitqueues */
+
/* allocate temporary record for drain sync */
down_read(&snd_pcm_link_rwsem);
if (snd_pcm_stream_linked(substream)) {
@@ -1449,16 +1457,11 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream)
d->substream = s;
init_waitqueue_entry(&d->wait, current);
add_wait_queue(&runtime->sleep, &d->wait);
- /* stop_threshold fixup to avoid endless loop when
- * stop_threshold > buffer_size
- */
- d->stop_threshold = runtime->stop_threshold;
- if (runtime->stop_threshold > runtime->buffer_size)
- runtime->stop_threshold = runtime->buffer_size;
}
}
up_read(&snd_pcm_link_rwsem);
+ lock:
snd_pcm_stream_lock_irq(substream);
/* resume pause */
if (substream->runtime->status->state == SNDRV_PCM_STATE_PAUSED)
@@ -1466,9 +1469,12 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream)
/* pre-start/stop - all running streams are changed to DRAINING state */
result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0);
- if (result < 0) {
- snd_pcm_stream_unlock_irq(substream);
- goto _error;
+ if (result < 0)
+ goto unlock;
+ /* in non-blocking, we don't wait in ioctl but let caller poll */
+ if (nonblock) {
+ result = -EAGAIN;
+ goto unlock;
}
for (;;) {
@@ -1504,18 +1510,18 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream)
}
}
+ unlock:
snd_pcm_stream_unlock_irq(substream);
- _error:
- for (i = 0; i < num_drecs; i++) {
- d = &drec[i];
- runtime = d->substream->runtime;
- remove_wait_queue(&runtime->sleep, &d->wait);
- runtime->stop_threshold = d->stop_threshold;
+ if (!nonblock) {
+ for (i = 0; i < num_drecs; i++) {
+ d = &drec[i];
+ runtime = d->substream->runtime;
+ remove_wait_queue(&runtime->sleep, &d->wait);
+ }
+ if (drec != &drec_tmp)
+ kfree(drec);
}
-
- if (drec != &drec_tmp)
- kfree(drec);
snd_power_unlock(card);
return result;
@@ -2208,6 +2214,9 @@ static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *subst
case SNDRV_PCM_STATE_XRUN:
ret = -EPIPE;
goto __end;
+ case SNDRV_PCM_STATE_SUSPENDED:
+ ret = -ESTRPIPE;
+ goto __end;
default:
ret = -EBADFD;
goto __end;
@@ -2253,6 +2262,9 @@ static snd_pcm_sframes_t snd_pcm_capture_rewind(struct snd_pcm_substream *substr
case SNDRV_PCM_STATE_XRUN:
ret = -EPIPE;
goto __end;
+ case SNDRV_PCM_STATE_SUSPENDED:
+ ret = -ESTRPIPE;
+ goto __end;
default:
ret = -EBADFD;
goto __end;
@@ -2299,6 +2311,9 @@ static snd_pcm_sframes_t snd_pcm_playback_forward(struct snd_pcm_substream *subs
case SNDRV_PCM_STATE_XRUN:
ret = -EPIPE;
goto __end;
+ case SNDRV_PCM_STATE_SUSPENDED:
+ ret = -ESTRPIPE;
+ goto __end;
default:
ret = -EBADFD;
goto __end;
@@ -2345,6 +2360,9 @@ static snd_pcm_sframes_t snd_pcm_capture_forward(struct snd_pcm_substream *subst
case SNDRV_PCM_STATE_XRUN:
ret = -EPIPE;
goto __end;
+ case SNDRV_PCM_STATE_SUSPENDED:
+ ret = -ESTRPIPE;
+ goto __end;
default:
ret = -EBADFD;
goto __end;
@@ -2544,7 +2562,7 @@ static int snd_pcm_common_ioctl1(struct file *file,
return snd_pcm_hw_params_old_user(substream, arg);
#endif
case SNDRV_PCM_IOCTL_DRAIN:
- return snd_pcm_drain(substream);
+ return snd_pcm_drain(substream, file);
case SNDRV_PCM_IOCTL_DROP:
return snd_pcm_drop(substream);
case SNDRV_PCM_IOCTL_PAUSE:
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 473247c8e6d3..c0adc14c91f0 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -274,7 +274,7 @@ static int open_substream(struct snd_rawmidi *rmidi,
return err;
substream->opened = 1;
if (substream->use_count++ == 0)
- substream->active_sensing = 1;
+ substream->active_sensing = 0;
if (mode & SNDRV_RAWMIDI_LFLG_APPEND)
substream->append = 1;
rmidi->streams[substream->stream].substream_opened++;
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index 0a711d2d04f0..9dfb2f77be60 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -20,6 +20,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <sound/asoundef.h>
#include "seq_oss_midi.h"
#include "seq_oss_readq.h"
#include "seq_oss_timer.h"
@@ -476,19 +477,20 @@ snd_seq_oss_midi_reset(struct seq_oss_devinfo *dp, int dev)
ev.source.port = dp->port;
if (dp->seq_mode == SNDRV_SEQ_OSS_MODE_SYNTH) {
ev.type = SNDRV_SEQ_EVENT_SENSING;
- snd_seq_oss_dispatch(dp, &ev, 0, 0); /* active sensing */
+ snd_seq_oss_dispatch(dp, &ev, 0, 0);
}
for (c = 0; c < 16; c++) {
ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
ev.data.control.channel = c;
- ev.data.control.param = 123;
- snd_seq_oss_dispatch(dp, &ev, 0, 0); /* all notes off */
+ ev.data.control.param = MIDI_CTL_ALL_NOTES_OFF;
+ snd_seq_oss_dispatch(dp, &ev, 0, 0);
if (dp->seq_mode == SNDRV_SEQ_OSS_MODE_MUSIC) {
- ev.data.control.param = 121;
- snd_seq_oss_dispatch(dp, &ev, 0, 0); /* reset all controllers */
+ ev.data.control.param =
+ MIDI_CTL_RESET_CONTROLLERS;
+ snd_seq_oss_dispatch(dp, &ev, 0, 0);
ev.type = SNDRV_SEQ_EVENT_PITCHBEND;
ev.data.control.value = 0;
- snd_seq_oss_dispatch(dp, &ev, 0, 0); /* bender off */
+ snd_seq_oss_dispatch(dp, &ev, 0, 0);
}
}
}
diff --git a/sound/core/seq/seq_midi.c b/sound/core/seq/seq_midi.c
index 4d26146a62cc..ebaf1b541dcd 100644
--- a/sound/core/seq/seq_midi.c
+++ b/sound/core/seq/seq_midi.c
@@ -120,7 +120,8 @@ static int dump_midi(struct snd_rawmidi_substream *substream, const char *buf, i
return -EINVAL;
runtime = substream->runtime;
if ((tmp = runtime->avail) < count) {
- snd_printd("warning, output event was lost (count = %i, available = %i)\n", count, tmp);
+ if (printk_ratelimit())
+ snd_printk(KERN_ERR "MIDI output buffer overrun\n");
return -ENOMEM;
}
if (snd_rawmidi_kernel_write(substream, buf, count) < count)
@@ -236,6 +237,7 @@ static int midisynth_use(void *private_data, struct snd_seq_port_subscribe *info
memset(&params, 0, sizeof(params));
params.avail_min = 1;
params.buffer_size = output_buffer_size;
+ params.no_active_sensing = 1;
if ((err = snd_rawmidi_output_params(msynth->output_rfile.output, &params)) < 0) {
snd_rawmidi_kernel_release(&msynth->output_rfile);
return err;
@@ -248,12 +250,9 @@ static int midisynth_use(void *private_data, struct snd_seq_port_subscribe *info
static int midisynth_unuse(void *private_data, struct snd_seq_port_subscribe *info)
{
struct seq_midisynth *msynth = private_data;
- unsigned char buf = 0xff; /* MIDI reset */
if (snd_BUG_ON(!msynth->output_rfile.output))
return -EINVAL;
- /* sending single MIDI reset message to shut the device up */
- snd_rawmidi_kernel_write(msynth->output_rfile.output, &buf, 1);
snd_rawmidi_drain_output(msynth->output_rfile.output);
return snd_rawmidi_kernel_release(&msynth->output_rfile);
}
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 257624bd1997..3b9b550109cb 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -353,7 +353,8 @@ static void master_free(struct snd_kcontrol *kcontrol)
*
* The optional argument @tlv can be used to specify the TLV information
* for dB scale of the master control. It should be a single element
- * with #SNDRV_CTL_TLVT_DB_SCALE type, and should be the max 0dB.
+ * with #SNDRV_CTL_TLVT_DB_SCALE, #SNDRV_CTL_TLV_DB_MINMAX or
+ * #SNDRV_CTL_TLVT_DB_MINMAX_MUTE type, and should be the max 0dB.
*/
struct snd_kcontrol *snd_ctl_make_virtual_master(char *name,
const unsigned int *tlv)
@@ -384,7 +385,10 @@ struct snd_kcontrol *snd_ctl_make_virtual_master(char *name,
kctl->private_free = master_free;
/* additional (constant) TLV read */
- if (tlv && tlv[0] == SNDRV_CTL_TLVT_DB_SCALE) {
+ if (tlv &&
+ (tlv[0] == SNDRV_CTL_TLVT_DB_SCALE ||
+ tlv[0] == SNDRV_CTL_TLVT_DB_MINMAX ||
+ tlv[0] == SNDRV_CTL_TLVT_DB_MINMAX_MUTE)) {
kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
memcpy(master->tlv, tlv, sizeof(master->tlv));
kctl->tlv.p = master->tlv;
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index 54239d2e0997..6ba066c41d2e 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -25,12 +25,15 @@
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/wait.h>
+#include <linux/hrtimer.h>
+#include <linux/math64.h>
#include <linux/moduleparam.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/tlv.h>
#include <sound/pcm.h>
#include <sound/rawmidi.h>
+#include <sound/info.h>
#include <sound/initval.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
@@ -39,7 +42,7 @@ MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{ALSA,Dummy soundcard}}");
#define MAX_PCM_DEVICES 4
-#define MAX_PCM_SUBSTREAMS 16
+#define MAX_PCM_SUBSTREAMS 128
#define MAX_MIDI_DEVICES 2
#if 0 /* emu10k1 emulation */
@@ -148,6 +151,10 @@ static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 0};
static int pcm_devs[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1};
static int pcm_substreams[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 8};
//static int midi_devs[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2};
+#ifdef CONFIG_HIGH_RES_TIMERS
+static int hrtimer = 1;
+#endif
+static int fake_buffer = 1;
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for dummy soundcard.");
@@ -161,6 +168,12 @@ module_param_array(pcm_substreams, int, NULL, 0444);
MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-16) for dummy driver.");
//module_param_array(midi_devs, int, NULL, 0444);
//MODULE_PARM_DESC(midi_devs, "MIDI devices # (0-2) for dummy driver.");
+module_param(fake_buffer, bool, 0444);
+MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations.");
+#ifdef CONFIG_HIGH_RES_TIMERS
+module_param(hrtimer, bool, 0644);
+MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source.");
+#endif
static struct platform_device *devices[SNDRV_CARDS];
@@ -171,137 +184,324 @@ static struct platform_device *devices[SNDRV_CARDS];
#define MIXER_ADDR_CD 4
#define MIXER_ADDR_LAST 4
+struct dummy_timer_ops {
+ int (*create)(struct snd_pcm_substream *);
+ void (*free)(struct snd_pcm_substream *);
+ int (*prepare)(struct snd_pcm_substream *);
+ int (*start)(struct snd_pcm_substream *);
+ int (*stop)(struct snd_pcm_substream *);
+ snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
+};
+
struct snd_dummy {
struct snd_card *card;
struct snd_pcm *pcm;
spinlock_t mixer_lock;
int mixer_volume[MIXER_ADDR_LAST+1][2];
int capture_source[MIXER_ADDR_LAST+1][2];
+ const struct dummy_timer_ops *timer_ops;
};
-struct snd_dummy_pcm {
- struct snd_dummy *dummy;
+/*
+ * system timer interface
+ */
+
+struct dummy_systimer_pcm {
spinlock_t lock;
struct timer_list timer;
- unsigned int pcm_buffer_size;
- unsigned int pcm_period_size;
- unsigned int pcm_bps; /* bytes per second */
- unsigned int pcm_hz; /* HZ */
- unsigned int pcm_irq_pos; /* IRQ position */
- unsigned int pcm_buf_pos; /* position in buffer */
+ unsigned long base_time;
+ unsigned int frac_pos; /* fractional sample position (based HZ) */
+ unsigned int frac_period_rest;
+ unsigned int frac_buffer_size; /* buffer_size * HZ */
+ unsigned int frac_period_size; /* period_size * HZ */
+ unsigned int rate;
+ int elapsed;
struct snd_pcm_substream *substream;
};
-
-static inline void snd_card_dummy_pcm_timer_start(struct snd_dummy_pcm *dpcm)
+static void dummy_systimer_rearm(struct dummy_systimer_pcm *dpcm)
{
- dpcm->timer.expires = 1 + jiffies;
+ dpcm->timer.expires = jiffies +
+ (dpcm->frac_period_rest + dpcm->rate - 1) / dpcm->rate;
add_timer(&dpcm->timer);
}
-static inline void snd_card_dummy_pcm_timer_stop(struct snd_dummy_pcm *dpcm)
+static void dummy_systimer_update(struct dummy_systimer_pcm *dpcm)
{
- del_timer(&dpcm->timer);
+ unsigned long delta;
+
+ delta = jiffies - dpcm->base_time;
+ if (!delta)
+ return;
+ dpcm->base_time += delta;
+ delta *= dpcm->rate;
+ dpcm->frac_pos += delta;
+ while (dpcm->frac_pos >= dpcm->frac_buffer_size)
+ dpcm->frac_pos -= dpcm->frac_buffer_size;
+ while (dpcm->frac_period_rest <= delta) {
+ dpcm->elapsed++;
+ dpcm->frac_period_rest += dpcm->frac_period_size;
+ }
+ dpcm->frac_period_rest -= delta;
}
-static int snd_card_dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int dummy_systimer_start(struct snd_pcm_substream *substream)
{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_dummy_pcm *dpcm = runtime->private_data;
- int err = 0;
+ struct dummy_systimer_pcm *dpcm = substream->runtime->private_data;
+ spin_lock(&dpcm->lock);
+ dpcm->base_time = jiffies;
+ dummy_systimer_rearm(dpcm);
+ spin_unlock(&dpcm->lock);
+ return 0;
+}
+static int dummy_systimer_stop(struct snd_pcm_substream *substream)
+{
+ struct dummy_systimer_pcm *dpcm = substream->runtime->private_data;
spin_lock(&dpcm->lock);
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- snd_card_dummy_pcm_timer_start(dpcm);
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- snd_card_dummy_pcm_timer_stop(dpcm);
- break;
- default:
- err = -EINVAL;
- break;
- }
+ del_timer(&dpcm->timer);
spin_unlock(&dpcm->lock);
return 0;
}
-static int snd_card_dummy_pcm_prepare(struct snd_pcm_substream *substream)
+static int dummy_systimer_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_dummy_pcm *dpcm = runtime->private_data;
- int bps;
-
- bps = snd_pcm_format_width(runtime->format) * runtime->rate *
- runtime->channels / 8;
-
- if (bps <= 0)
- return -EINVAL;
-
- dpcm->pcm_bps = bps;
- dpcm->pcm_hz = HZ;
- dpcm->pcm_buffer_size = snd_pcm_lib_buffer_bytes(substream);
- dpcm->pcm_period_size = snd_pcm_lib_period_bytes(substream);
- dpcm->pcm_irq_pos = 0;
- dpcm->pcm_buf_pos = 0;
+ struct dummy_systimer_pcm *dpcm = runtime->private_data;
- snd_pcm_format_set_silence(runtime->format, runtime->dma_area,
- bytes_to_samples(runtime, runtime->dma_bytes));
+ dpcm->frac_pos = 0;
+ dpcm->rate = runtime->rate;
+ dpcm->frac_buffer_size = runtime->buffer_size * HZ;
+ dpcm->frac_period_size = runtime->period_size * HZ;
+ dpcm->frac_period_rest = dpcm->frac_period_size;
+ dpcm->elapsed = 0;
return 0;
}
-static void snd_card_dummy_pcm_timer_function(unsigned long data)
+static void dummy_systimer_callback(unsigned long data)
{
- struct snd_dummy_pcm *dpcm = (struct snd_dummy_pcm *)data;
+ struct dummy_systimer_pcm *dpcm = (struct dummy_systimer_pcm *)data;
unsigned long flags;
+ int elapsed = 0;
spin_lock_irqsave(&dpcm->lock, flags);
- dpcm->timer.expires = 1 + jiffies;
- add_timer(&dpcm->timer);
- dpcm->pcm_irq_pos += dpcm->pcm_bps;
- dpcm->pcm_buf_pos += dpcm->pcm_bps;
- dpcm->pcm_buf_pos %= dpcm->pcm_buffer_size * dpcm->pcm_hz;
- if (dpcm->pcm_irq_pos >= dpcm->pcm_period_size * dpcm->pcm_hz) {
- dpcm->pcm_irq_pos %= dpcm->pcm_period_size * dpcm->pcm_hz;
- spin_unlock_irqrestore(&dpcm->lock, flags);
+ dummy_systimer_update(dpcm);
+ dummy_systimer_rearm(dpcm);
+ elapsed = dpcm->elapsed;
+ dpcm->elapsed = 0;
+ spin_unlock_irqrestore(&dpcm->lock, flags);
+ if (elapsed)
+ snd_pcm_period_elapsed(dpcm->substream);
+}
+
+static snd_pcm_uframes_t
+dummy_systimer_pointer(struct snd_pcm_substream *substream)
+{
+ struct dummy_systimer_pcm *dpcm = substream->runtime->private_data;
+ snd_pcm_uframes_t pos;
+
+ spin_lock(&dpcm->lock);
+ dummy_systimer_update(dpcm);
+ pos = dpcm->frac_pos / HZ;
+ spin_unlock(&dpcm->lock);
+ return pos;
+}
+
+static int dummy_systimer_create(struct snd_pcm_substream *substream)
+{
+ struct dummy_systimer_pcm *dpcm;
+
+ dpcm = kzalloc(sizeof(*dpcm), GFP_KERNEL);
+ if (!dpcm)
+ return -ENOMEM;
+ substream->runtime->private_data = dpcm;
+ init_timer(&dpcm->timer);
+ dpcm->timer.data = (unsigned long) dpcm;
+ dpcm->timer.function = dummy_systimer_callback;
+ spin_lock_init(&dpcm->lock);
+ dpcm->substream = substream;
+ return 0;
+}
+
+static void dummy_systimer_free(struct snd_pcm_substream *substream)
+{
+ kfree(substream->runtime->private_data);
+}
+
+static struct dummy_timer_ops dummy_systimer_ops = {
+ .create = dummy_systimer_create,
+ .free = dummy_systimer_free,
+ .prepare = dummy_systimer_prepare,
+ .start = dummy_systimer_start,
+ .stop = dummy_systimer_stop,
+ .pointer = dummy_systimer_pointer,
+};
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+/*
+ * hrtimer interface
+ */
+
+struct dummy_hrtimer_pcm {
+ ktime_t base_time;
+ ktime_t period_time;
+ atomic_t running;
+ struct hrtimer timer;
+ struct tasklet_struct tasklet;
+ struct snd_pcm_substream *substream;
+};
+
+static void dummy_hrtimer_pcm_elapsed(unsigned long priv)
+{
+ struct dummy_hrtimer_pcm *dpcm = (struct dummy_hrtimer_pcm *)priv;
+ if (atomic_read(&dpcm->running))
snd_pcm_period_elapsed(dpcm->substream);
- } else
- spin_unlock_irqrestore(&dpcm->lock, flags);
}
-static snd_pcm_uframes_t snd_card_dummy_pcm_pointer(struct snd_pcm_substream *substream)
+static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer)
+{
+ struct dummy_hrtimer_pcm *dpcm;
+
+ dpcm = container_of(timer, struct dummy_hrtimer_pcm, timer);
+ if (!atomic_read(&dpcm->running))
+ return HRTIMER_NORESTART;
+ tasklet_schedule(&dpcm->tasklet);
+ hrtimer_forward_now(timer, dpcm->period_time);
+ return HRTIMER_RESTART;
+}
+
+static int dummy_hrtimer_start(struct snd_pcm_substream *substream)
+{
+ struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
+
+ dpcm->base_time = hrtimer_cb_get_time(&dpcm->timer);
+ hrtimer_start(&dpcm->timer, dpcm->period_time, HRTIMER_MODE_REL);
+ atomic_set(&dpcm->running, 1);
+ return 0;
+}
+
+static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
+{
+ struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
+
+ atomic_set(&dpcm->running, 0);
+ hrtimer_cancel(&dpcm->timer);
+ return 0;
+}
+
+static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
+{
+ tasklet_kill(&dpcm->tasklet);
+}
+
+static snd_pcm_uframes_t
+dummy_hrtimer_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_dummy_pcm *dpcm = runtime->private_data;
+ struct dummy_hrtimer_pcm *dpcm = runtime->private_data;
+ u64 delta;
+ u32 pos;
+
+ delta = ktime_us_delta(hrtimer_cb_get_time(&dpcm->timer),
+ dpcm->base_time);
+ delta = div_u64(delta * runtime->rate + 999999, 1000000);
+ div_u64_rem(delta, runtime->buffer_size, &pos);
+ return pos;
+}
- return bytes_to_frames(runtime, dpcm->pcm_buf_pos / dpcm->pcm_hz);
+static int dummy_hrtimer_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct dummy_hrtimer_pcm *dpcm = runtime->private_data;
+ unsigned int period, rate;
+ long sec;
+ unsigned long nsecs;
+
+ dummy_hrtimer_sync(dpcm);
+ period = runtime->period_size;
+ rate = runtime->rate;
+ sec = period / rate;
+ period %= rate;
+ nsecs = div_u64((u64)period * 1000000000UL + rate - 1, rate);
+ dpcm->period_time = ktime_set(sec, nsecs);
+
+ return 0;
}
-static struct snd_pcm_hardware snd_card_dummy_playback =
+static int dummy_hrtimer_create(struct snd_pcm_substream *substream)
{
- .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID),
- .formats = USE_FORMATS,
- .rates = USE_RATE,
- .rate_min = USE_RATE_MIN,
- .rate_max = USE_RATE_MAX,
- .channels_min = USE_CHANNELS_MIN,
- .channels_max = USE_CHANNELS_MAX,
- .buffer_bytes_max = MAX_BUFFER_SIZE,
- .period_bytes_min = 64,
- .period_bytes_max = MAX_PERIOD_SIZE,
- .periods_min = USE_PERIODS_MIN,
- .periods_max = USE_PERIODS_MAX,
- .fifo_size = 0,
+ struct dummy_hrtimer_pcm *dpcm;
+
+ dpcm = kzalloc(sizeof(*dpcm), GFP_KERNEL);
+ if (!dpcm)
+ return -ENOMEM;
+ substream->runtime->private_data = dpcm;
+ hrtimer_init(&dpcm->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ dpcm->timer.function = dummy_hrtimer_callback;
+ dpcm->substream = substream;
+ atomic_set(&dpcm->running, 0);
+ tasklet_init(&dpcm->tasklet, dummy_hrtimer_pcm_elapsed,
+ (unsigned long)dpcm);
+ return 0;
+}
+
+static void dummy_hrtimer_free(struct snd_pcm_substream *substream)
+{
+ struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
+ dummy_hrtimer_sync(dpcm);
+ kfree(dpcm);
+}
+
+static struct dummy_timer_ops dummy_hrtimer_ops = {
+ .create = dummy_hrtimer_create,
+ .free = dummy_hrtimer_free,
+ .prepare = dummy_hrtimer_prepare,
+ .start = dummy_hrtimer_start,
+ .stop = dummy_hrtimer_stop,
+ .pointer = dummy_hrtimer_pointer,
};
-static struct snd_pcm_hardware snd_card_dummy_capture =
+#endif /* CONFIG_HIGH_RES_TIMERS */
+
+/*
+ * PCM interface
+ */
+
+static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
- .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID),
+ struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ return dummy->timer_ops->start(substream);
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ return dummy->timer_ops->stop(substream);
+ }
+ return -EINVAL;
+}
+
+static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+
+ return dummy->timer_ops->prepare(substream);
+}
+
+static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+
+ return dummy->timer_ops->pointer(substream);
+}
+
+static struct snd_pcm_hardware dummy_pcm_hardware = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_MMAP_VALID),
.formats = USE_FORMATS,
.rates = USE_RATE,
.rate_min = USE_RATE_MIN,
@@ -316,123 +516,152 @@ static struct snd_pcm_hardware snd_card_dummy_capture =
.fifo_size = 0,
};
-static void snd_card_dummy_runtime_free(struct snd_pcm_runtime *runtime)
-{
- kfree(runtime->private_data);
-}
-
-static int snd_card_dummy_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
+static int dummy_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
+ if (fake_buffer) {
+ /* runtime->dma_bytes has to be set manually to allow mmap */
+ substream->runtime->dma_bytes = params_buffer_bytes(hw_params);
+ return 0;
+ }
+ return snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
}
-static int snd_card_dummy_hw_free(struct snd_pcm_substream *substream)
+static int dummy_pcm_hw_free(struct snd_pcm_substream *substream)
{
+ if (fake_buffer)
+ return 0;
return snd_pcm_lib_free_pages(substream);
}
-static struct snd_dummy_pcm *new_pcm_stream(struct snd_pcm_substream *substream)
-{
- struct snd_dummy_pcm *dpcm;
-
- dpcm = kzalloc(sizeof(*dpcm), GFP_KERNEL);
- if (! dpcm)
- return dpcm;
- init_timer(&dpcm->timer);
- dpcm->timer.data = (unsigned long) dpcm;
- dpcm->timer.function = snd_card_dummy_pcm_timer_function;
- spin_lock_init(&dpcm->lock);
- dpcm->substream = substream;
- return dpcm;
-}
-
-static int snd_card_dummy_playback_open(struct snd_pcm_substream *substream)
+static int dummy_pcm_open(struct snd_pcm_substream *substream)
{
+ struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_dummy_pcm *dpcm;
int err;
- if ((dpcm = new_pcm_stream(substream)) == NULL)
- return -ENOMEM;
- runtime->private_data = dpcm;
- /* makes the infrastructure responsible for freeing dpcm */
- runtime->private_free = snd_card_dummy_runtime_free;
- runtime->hw = snd_card_dummy_playback;
+ dummy->timer_ops = &dummy_systimer_ops;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ if (hrtimer)
+ dummy->timer_ops = &dummy_hrtimer_ops;
+#endif
+
+ err = dummy->timer_ops->create(substream);
+ if (err < 0)
+ return err;
+
+ runtime->hw = dummy_pcm_hardware;
if (substream->pcm->device & 1) {
runtime->hw.info &= ~SNDRV_PCM_INFO_INTERLEAVED;
runtime->hw.info |= SNDRV_PCM_INFO_NONINTERLEAVED;
}
if (substream->pcm->device & 2)
- runtime->hw.info &= ~(SNDRV_PCM_INFO_MMAP|SNDRV_PCM_INFO_MMAP_VALID);
- err = add_playback_constraints(runtime);
- if (err < 0)
+ runtime->hw.info &= ~(SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ err = add_playback_constraints(substream->runtime);
+ else
+ err = add_capture_constraints(substream->runtime);
+ if (err < 0) {
+ dummy->timer_ops->free(substream);
return err;
-
+ }
return 0;
}
-static int snd_card_dummy_capture_open(struct snd_pcm_substream *substream)
+static int dummy_pcm_close(struct snd_pcm_substream *substream)
{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_dummy_pcm *dpcm;
- int err;
+ struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+ dummy->timer_ops->free(substream);
+ return 0;
+}
- if ((dpcm = new_pcm_stream(substream)) == NULL)
- return -ENOMEM;
- runtime->private_data = dpcm;
- /* makes the infrastructure responsible for freeing dpcm */
- runtime->private_free = snd_card_dummy_runtime_free;
- runtime->hw = snd_card_dummy_capture;
- if (substream->pcm->device == 1) {
- runtime->hw.info &= ~SNDRV_PCM_INFO_INTERLEAVED;
- runtime->hw.info |= SNDRV_PCM_INFO_NONINTERLEAVED;
+/*
+ * dummy buffer handling
+ */
+
+static void *dummy_page[2];
+
+static void free_fake_buffer(void)
+{
+ if (fake_buffer) {
+ int i;
+ for (i = 0; i < 2; i++)
+ if (dummy_page[i]) {
+ free_page((unsigned long)dummy_page[i]);
+ dummy_page[i] = NULL;
+ }
}
- if (substream->pcm->device & 2)
- runtime->hw.info &= ~(SNDRV_PCM_INFO_MMAP|SNDRV_PCM_INFO_MMAP_VALID);
- err = add_capture_constraints(runtime);
- if (err < 0)
- return err;
+}
+static int alloc_fake_buffer(void)
+{
+ int i;
+
+ if (!fake_buffer)
+ return 0;
+ for (i = 0; i < 2; i++) {
+ dummy_page[i] = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!dummy_page[i]) {
+ free_fake_buffer();
+ return -ENOMEM;
+ }
+ }
return 0;
}
-static int snd_card_dummy_playback_close(struct snd_pcm_substream *substream)
+static int dummy_pcm_copy(struct snd_pcm_substream *substream,
+ int channel, snd_pcm_uframes_t pos,
+ void __user *dst, snd_pcm_uframes_t count)
{
- return 0;
+ return 0; /* do nothing */
}
-static int snd_card_dummy_capture_close(struct snd_pcm_substream *substream)
+static int dummy_pcm_silence(struct snd_pcm_substream *substream,
+ int channel, snd_pcm_uframes_t pos,
+ snd_pcm_uframes_t count)
{
- return 0;
+ return 0; /* do nothing */
+}
+
+static struct page *dummy_pcm_page(struct snd_pcm_substream *substream,
+ unsigned long offset)
+{
+ return virt_to_page(dummy_page[substream->stream]); /* the same page */
}
-static struct snd_pcm_ops snd_card_dummy_playback_ops = {
- .open = snd_card_dummy_playback_open,
- .close = snd_card_dummy_playback_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_card_dummy_hw_params,
- .hw_free = snd_card_dummy_hw_free,
- .prepare = snd_card_dummy_pcm_prepare,
- .trigger = snd_card_dummy_pcm_trigger,
- .pointer = snd_card_dummy_pcm_pointer,
+static struct snd_pcm_ops dummy_pcm_ops = {
+ .open = dummy_pcm_open,
+ .close = dummy_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = dummy_pcm_hw_params,
+ .hw_free = dummy_pcm_hw_free,
+ .prepare = dummy_pcm_prepare,
+ .trigger = dummy_pcm_trigger,
+ .pointer = dummy_pcm_pointer,
};
-static struct snd_pcm_ops snd_card_dummy_capture_ops = {
- .open = snd_card_dummy_capture_open,
- .close = snd_card_dummy_capture_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_card_dummy_hw_params,
- .hw_free = snd_card_dummy_hw_free,
- .prepare = snd_card_dummy_pcm_prepare,
- .trigger = snd_card_dummy_pcm_trigger,
- .pointer = snd_card_dummy_pcm_pointer,
+static struct snd_pcm_ops dummy_pcm_ops_no_buf = {
+ .open = dummy_pcm_open,
+ .close = dummy_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = dummy_pcm_hw_params,
+ .hw_free = dummy_pcm_hw_free,
+ .prepare = dummy_pcm_prepare,
+ .trigger = dummy_pcm_trigger,
+ .pointer = dummy_pcm_pointer,
+ .copy = dummy_pcm_copy,
+ .silence = dummy_pcm_silence,
+ .page = dummy_pcm_page,
};
static int __devinit snd_card_dummy_pcm(struct snd_dummy *dummy, int device,
int substreams)
{
struct snd_pcm *pcm;
+ struct snd_pcm_ops *ops;
int err;
err = snd_pcm_new(dummy->card, "Dummy PCM", device,
@@ -440,17 +669,28 @@ static int __devinit snd_card_dummy_pcm(struct snd_dummy *dummy, int device,
if (err < 0)
return err;
dummy->pcm = pcm;
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_card_dummy_playback_ops);
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_card_dummy_capture_ops);
+ if (fake_buffer)
+ ops = &dummy_pcm_ops_no_buf;
+ else
+ ops = &dummy_pcm_ops;
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, ops);
pcm->private_data = dummy;
pcm->info_flags = 0;
strcpy(pcm->name, "Dummy PCM");
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
- 0, 64*1024);
+ if (!fake_buffer) {
+ snd_pcm_lib_preallocate_pages_for_all(pcm,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ 0, 64*1024);
+ }
return 0;
}
+/*
+ * mixer interface
+ */
+
#define DUMMY_VOLUME(xname, xindex, addr) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \
@@ -581,6 +821,131 @@ static int __devinit snd_card_dummy_new_mixer(struct snd_dummy *dummy)
return 0;
}
+#if defined(CONFIG_SND_DEBUG) && defined(CONFIG_PROC_FS)
+/*
+ * proc interface
+ */
+static void print_formats(struct snd_info_buffer *buffer)
+{
+ int i;
+
+ for (i = 0; i < SNDRV_PCM_FORMAT_LAST; i++) {
+ if (dummy_pcm_hardware.formats & (1ULL << i))
+ snd_iprintf(buffer, " %s", snd_pcm_format_name(i));
+ }
+}
+
+static void print_rates(struct snd_info_buffer *buffer)
+{
+ static int rates[] = {
+ 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000,
+ 64000, 88200, 96000, 176400, 192000,
+ };
+ int i;
+
+ if (dummy_pcm_hardware.rates & SNDRV_PCM_RATE_CONTINUOUS)
+ snd_iprintf(buffer, " continuous");
+ if (dummy_pcm_hardware.rates & SNDRV_PCM_RATE_KNOT)
+ snd_iprintf(buffer, " knot");
+ for (i = 0; i < ARRAY_SIZE(rates); i++)
+ if (dummy_pcm_hardware.rates & (1 << i))
+ snd_iprintf(buffer, " %d", rates[i]);
+}
+
+#define get_dummy_int_ptr(ofs) \
+ (unsigned int *)((char *)&dummy_pcm_hardware + (ofs))
+#define get_dummy_ll_ptr(ofs) \
+ (unsigned long long *)((char *)&dummy_pcm_hardware + (ofs))
+
+struct dummy_hw_field {
+ const char *name;
+ const char *format;
+ unsigned int offset;
+ unsigned int size;
+};
+#define FIELD_ENTRY(item, fmt) { \
+ .name = #item, \
+ .format = fmt, \
+ .offset = offsetof(struct snd_pcm_hardware, item), \
+ .size = sizeof(dummy_pcm_hardware.item) }
+
+static struct dummy_hw_field fields[] = {
+ FIELD_ENTRY(formats, "%#llx"),
+ FIELD_ENTRY(rates, "%#x"),
+ FIELD_ENTRY(rate_min, "%d"),
+ FIELD_ENTRY(rate_max, "%d"),
+ FIELD_ENTRY(channels_min, "%d"),
+ FIELD_ENTRY(channels_max, "%d"),
+ FIELD_ENTRY(buffer_bytes_max, "%ld"),
+ FIELD_ENTRY(period_bytes_min, "%ld"),
+ FIELD_ENTRY(period_bytes_max, "%ld"),
+ FIELD_ENTRY(periods_min, "%d"),
+ FIELD_ENTRY(periods_max, "%d"),
+};
+
+static void dummy_proc_read(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fields); i++) {
+ snd_iprintf(buffer, "%s ", fields[i].name);
+ if (fields[i].size == sizeof(int))
+ snd_iprintf(buffer, fields[i].format,
+ *get_dummy_int_ptr(fields[i].offset));
+ else
+ snd_iprintf(buffer, fields[i].format,
+ *get_dummy_ll_ptr(fields[i].offset));
+ if (!strcmp(fields[i].name, "formats"))
+ print_formats(buffer);
+ else if (!strcmp(fields[i].name, "rates"))
+ print_rates(buffer);
+ snd_iprintf(buffer, "\n");
+ }
+}
+
+static void dummy_proc_write(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ char line[64];
+
+ while (!snd_info_get_line(buffer, line, sizeof(line))) {
+ char item[20];
+ const char *ptr;
+ unsigned long long val;
+ int i;
+
+ ptr = snd_info_get_str(item, line, sizeof(item));
+ for (i = 0; i < ARRAY_SIZE(fields); i++) {
+ if (!strcmp(item, fields[i].name))
+ break;
+ }
+ if (i >= ARRAY_SIZE(fields))
+ continue;
+ snd_info_get_str(item, ptr, sizeof(item));
+ if (strict_strtoull(item, 0, &val))
+ continue;
+ if (fields[i].size == sizeof(int))
+ *get_dummy_int_ptr(fields[i].offset) = val;
+ else
+ *get_dummy_ll_ptr(fields[i].offset) = val;
+ }
+}
+
+static void __devinit dummy_proc_init(struct snd_dummy *chip)
+{
+ struct snd_info_entry *entry;
+
+ if (!snd_card_proc_new(chip->card, "dummy_pcm", &entry)) {
+ snd_info_set_text_ops(entry, chip, dummy_proc_read);
+ entry->c.text.write = dummy_proc_write;
+ entry->mode |= S_IWUSR;
+ }
+}
+#else
+#define dummy_proc_init(x)
+#endif /* CONFIG_SND_DEBUG && CONFIG_PROC_FS */
+
static int __devinit snd_dummy_probe(struct platform_device *devptr)
{
struct snd_card *card;
@@ -610,6 +975,8 @@ static int __devinit snd_dummy_probe(struct platform_device *devptr)
strcpy(card->shortname, "Dummy");
sprintf(card->longname, "Dummy %i", dev + 1);
+ dummy_proc_init(dummy);
+
snd_card_set_dev(card, &devptr->dev);
err = snd_card_register(card);
@@ -670,6 +1037,7 @@ static void snd_dummy_unregister_all(void)
for (i = 0; i < ARRAY_SIZE(devices); ++i)
platform_device_unregister(devices[i]);
platform_driver_unregister(&snd_dummy_driver);
+ free_fake_buffer();
}
static int __init alsa_card_dummy_init(void)
@@ -680,6 +1048,12 @@ static int __init alsa_card_dummy_init(void)
if (err < 0)
return err;
+ err = alloc_fake_buffer();
+ if (err < 0) {
+ platform_driver_unregister(&snd_dummy_driver);
+ return err;
+ }
+
cards = 0;
for (i = 0; i < SNDRV_CARDS; i++) {
struct platform_device *device;
diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
index 3ee0269e5bd0..02f79d252718 100644
--- a/sound/isa/cmi8330.c
+++ b/sound/isa/cmi8330.c
@@ -1,5 +1,5 @@
/*
- * Driver for C-Media's CMI8330 soundcards.
+ * Driver for C-Media's CMI8330 and CMI8329 soundcards.
* Copyright (c) by George Talusan <gstalusan@uwaterloo.ca>
* http://www.undergrad.math.uwaterloo.ca/~gstalusa
*
@@ -35,7 +35,7 @@
*
* This card has two mixers and two PCM devices. I've cheesed it such
* that recording and playback can be done through the same device.
- * The driver "magically" routes the capturing to the CMI8330 codec,
+ * The driver "magically" routes the capturing to the AD1848 codec,
* and playback to the SB16 codec. This allows for full-duplex mode
* to some extent.
* The utilities in alsa-utils are aware of both devices, so passing
@@ -64,7 +64,7 @@
/*
*/
MODULE_AUTHOR("George Talusan <gstalusan@uwaterloo.ca>");
-MODULE_DESCRIPTION("C-Media CMI8330");
+MODULE_DESCRIPTION("C-Media CMI8330/CMI8329");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8330,isapnp:{CMI0001,@@@0001,@X@0001}}}");
@@ -86,38 +86,38 @@ static long mpuport[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
static int mpuirq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;
module_param_array(index, int, NULL, 0444);
-MODULE_PARM_DESC(index, "Index value for CMI8330 soundcard.");
+MODULE_PARM_DESC(index, "Index value for CMI8330/CMI8329 soundcard.");
module_param_array(id, charp, NULL, 0444);
-MODULE_PARM_DESC(id, "ID string for CMI8330 soundcard.");
+MODULE_PARM_DESC(id, "ID string for CMI8330/CMI8329 soundcard.");
module_param_array(enable, bool, NULL, 0444);
-MODULE_PARM_DESC(enable, "Enable CMI8330 soundcard.");
+MODULE_PARM_DESC(enable, "Enable CMI8330/CMI8329 soundcard.");
#ifdef CONFIG_PNP
module_param_array(isapnp, bool, NULL, 0444);
MODULE_PARM_DESC(isapnp, "PnP detection for specified soundcard.");
#endif
module_param_array(sbport, long, NULL, 0444);
-MODULE_PARM_DESC(sbport, "Port # for CMI8330 SB driver.");
+MODULE_PARM_DESC(sbport, "Port # for CMI8330/CMI8329 SB driver.");
module_param_array(sbirq, int, NULL, 0444);
-MODULE_PARM_DESC(sbirq, "IRQ # for CMI8330 SB driver.");
+MODULE_PARM_DESC(sbirq, "IRQ # for CMI8330/CMI8329 SB driver.");
module_param_array(sbdma8, int, NULL, 0444);
-MODULE_PARM_DESC(sbdma8, "DMA8 for CMI8330 SB driver.");
+MODULE_PARM_DESC(sbdma8, "DMA8 for CMI8330/CMI8329 SB driver.");
module_param_array(sbdma16, int, NULL, 0444);
-MODULE_PARM_DESC(sbdma16, "DMA16 for CMI8330 SB driver.");
+MODULE_PARM_DESC(sbdma16, "DMA16 for CMI8330/CMI8329 SB driver.");
module_param_array(wssport, long, NULL, 0444);
-MODULE_PARM_DESC(wssport, "Port # for CMI8330 WSS driver.");
+MODULE_PARM_DESC(wssport, "Port # for CMI8330/CMI8329 WSS driver.");
module_param_array(wssirq, int, NULL, 0444);
-MODULE_PARM_DESC(wssirq, "IRQ # for CMI8330 WSS driver.");
+MODULE_PARM_DESC(wssirq, "IRQ # for CMI8330/CMI8329 WSS driver.");
module_param_array(wssdma, int, NULL, 0444);
-MODULE_PARM_DESC(wssdma, "DMA for CMI8330 WSS driver.");
+MODULE_PARM_DESC(wssdma, "DMA for CMI8330/CMI8329 WSS driver.");
module_param_array(fmport, long, NULL, 0444);
-MODULE_PARM_DESC(fmport, "FM port # for CMI8330 driver.");
+MODULE_PARM_DESC(fmport, "FM port # for CMI8330/CMI8329 driver.");
module_param_array(mpuport, long, NULL, 0444);
-MODULE_PARM_DESC(mpuport, "MPU-401 port # for CMI8330 driver.");
+MODULE_PARM_DESC(mpuport, "MPU-401 port # for CMI8330/CMI8329 driver.");
module_param_array(mpuirq, int, NULL, 0444);
-MODULE_PARM_DESC(mpuirq, "IRQ # for CMI8330 MPU-401 port.");
+MODULE_PARM_DESC(mpuirq, "IRQ # for CMI8330/CMI8329 MPU-401 port.");
#ifdef CONFIG_PNP
static int isa_registered;
static int pnp_registered;
@@ -156,6 +156,11 @@ static unsigned char snd_cmi8330_image[((CMI8330_CDINGAIN)-16) + 1] =
typedef int (*snd_pcm_open_callback_t)(struct snd_pcm_substream *);
+enum card_type {
+ CMI8330,
+ CMI8329
+};
+
struct snd_cmi8330 {
#ifdef CONFIG_PNP
struct pnp_dev *cap;
@@ -172,11 +177,14 @@ struct snd_cmi8330 {
snd_pcm_open_callback_t open;
void *private_data; /* sb or wss */
} streams[2];
+
+ enum card_type type;
};
#ifdef CONFIG_PNP
static struct pnp_card_device_id snd_cmi8330_pnpids[] = {
+ { .id = "CMI0001", .devs = { { "@X@0001" }, { "@@@0001" }, { "@H@0001" }, { "A@@0001" } } },
{ .id = "CMI0001", .devs = { { "@@@0001" }, { "@X@0001" }, { "@H@0001" } } },
{ .id = "" }
};
@@ -304,7 +312,7 @@ static int __devinit snd_cmi8330_mixer(struct snd_card *card, struct snd_cmi8330
unsigned int idx;
int err;
- strcpy(card->mixername, "CMI8330/C3D");
+ strcpy(card->mixername, (acard->type == CMI8329) ? "CMI8329" : "CMI8330/C3D");
for (idx = 0; idx < ARRAY_SIZE(snd_cmi8330_controls); idx++) {
err = snd_ctl_add(card,
@@ -329,6 +337,9 @@ static int __devinit snd_cmi8330_pnp(int dev, struct snd_cmi8330 *acard,
struct pnp_dev *pdev;
int err;
+ /* CMI8329 has a device with ID A@@0001, CMI8330 does not */
+ acard->type = (id->devs[3].id[0]) ? CMI8329 : CMI8330;
+
acard->cap = pnp_request_card_device(card, id->devs[0].id, NULL);
if (acard->cap == NULL)
return -EBUSY;
@@ -345,38 +356,45 @@ static int __devinit snd_cmi8330_pnp(int dev, struct snd_cmi8330 *acard,
err = pnp_activate_dev(pdev);
if (err < 0) {
- snd_printk(KERN_ERR "CMI8330/C3D PnP configure failure\n");
+ snd_printk(KERN_ERR "AD1848 PnP configure failure\n");
return -EBUSY;
}
wssport[dev] = pnp_port_start(pdev, 0);
wssdma[dev] = pnp_dma(pdev, 0);
wssirq[dev] = pnp_irq(pdev, 0);
- fmport[dev] = pnp_port_start(pdev, 1);
+ if (pnp_port_start(pdev, 1))
+ fmport[dev] = pnp_port_start(pdev, 1);
/* allocate SB16 resources */
pdev = acard->play;
err = pnp_activate_dev(pdev);
if (err < 0) {
- snd_printk(KERN_ERR "CMI8330/C3D (SB16) PnP configure failure\n");
+ snd_printk(KERN_ERR "SB16 PnP configure failure\n");
return -EBUSY;
}
sbport[dev] = pnp_port_start(pdev, 0);
sbdma8[dev] = pnp_dma(pdev, 0);
sbdma16[dev] = pnp_dma(pdev, 1);
sbirq[dev] = pnp_irq(pdev, 0);
+ /* On CMI8239, the OPL3 port might be present in SB16 PnP resources */
+ if (fmport[dev] == SNDRV_AUTO_PORT) {
+ if (pnp_port_start(pdev, 1))
+ fmport[dev] = pnp_port_start(pdev, 1);
+ else
+ fmport[dev] = 0x388; /* Or hardwired */
+ }
/* allocate MPU-401 resources */
pdev = acard->mpu;
err = pnp_activate_dev(pdev);
- if (err < 0) {
- snd_printk(KERN_ERR
- "CMI8330/C3D (MPU-401) PnP configure failure\n");
- return -EBUSY;
+ if (err < 0)
+ snd_printk(KERN_ERR "MPU-401 PnP configure failure: will be disabled\n");
+ else {
+ mpuport[dev] = pnp_port_start(pdev, 0);
+ mpuirq[dev] = pnp_irq(pdev, 0);
}
- mpuport[dev] = pnp_port_start(pdev, 0);
- mpuirq[dev] = pnp_irq(pdev, 0);
return 0;
}
#endif
@@ -430,9 +448,9 @@ static int __devinit snd_cmi8330_pcm(struct snd_card *card, struct snd_cmi8330 *
snd_cmi8330_capture_open
};
- if ((err = snd_pcm_new(card, "CMI8330", 0, 1, 1, &pcm)) < 0)
+ if ((err = snd_pcm_new(card, (chip->type == CMI8329) ? "CMI8329" : "CMI8330", 0, 1, 1, &pcm)) < 0)
return err;
- strcpy(pcm->name, "CMI8330");
+ strcpy(pcm->name, (chip->type == CMI8329) ? "CMI8329" : "CMI8330");
pcm->private_data = chip;
/* SB16 */
@@ -527,11 +545,11 @@ static int __devinit snd_cmi8330_probe(struct snd_card *card, int dev)
wssdma[dev], -1,
WSS_HW_DETECT, 0, &acard->wss);
if (err < 0) {
- snd_printk(KERN_ERR PFX "(CMI8330) device busy??\n");
+ snd_printk(KERN_ERR PFX "AD1848 device busy??\n");
return err;
}
if (acard->wss->hardware != WSS_HW_CMI8330) {
- snd_printk(KERN_ERR PFX "(CMI8330) not found during probe\n");
+ snd_printk(KERN_ERR PFX "AD1848 not found during probe\n");
return -ENODEV;
}
@@ -541,11 +559,11 @@ static int __devinit snd_cmi8330_probe(struct snd_card *card, int dev)
sbdma8[dev],
sbdma16[dev],
SB_HW_AUTO, &acard->sb)) < 0) {
- snd_printk(KERN_ERR PFX "(SB16) device busy??\n");
+ snd_printk(KERN_ERR PFX "SB16 device busy??\n");
return err;
}
if (acard->sb->hardware != SB_HW_16) {
- snd_printk(KERN_ERR PFX "(SB16) not found during probe\n");
+ snd_printk(KERN_ERR PFX "SB16 not found during probe\n");
return err;
}
@@ -585,8 +603,8 @@ static int __devinit snd_cmi8330_probe(struct snd_card *card, int dev)
mpuport[dev]);
}
- strcpy(card->driver, "CMI8330/C3D");
- strcpy(card->shortname, "C-Media CMI8330/C3D");
+ strcpy(card->driver, (acard->type == CMI8329) ? "CMI8329" : "CMI8330/C3D");
+ strcpy(card->shortname, (acard->type == CMI8329) ? "C-Media CMI8329" : "C-Media CMI8330/C3D");
sprintf(card->longname, "%s at 0x%lx, irq %d, dma %d",
card->shortname,
acard->wss->port,
diff --git a/sound/oss/midibuf.c b/sound/oss/midibuf.c
index a40be0cf1d97..782b3b84dac6 100644
--- a/sound/oss/midibuf.c
+++ b/sound/oss/midibuf.c
@@ -127,15 +127,16 @@ static void midi_poll(unsigned long dummy)
for (dev = 0; dev < num_midis; dev++)
if (midi_devs[dev] != NULL && midi_out_buf[dev] != NULL)
{
- int ok = 1;
-
- while (DATA_AVAIL(midi_out_buf[dev]) && ok)
+ while (DATA_AVAIL(midi_out_buf[dev]))
{
+ int ok;
int c = midi_out_buf[dev]->queue[midi_out_buf[dev]->head];
spin_unlock_irqrestore(&lock,flags);/* Give some time to others */
ok = midi_devs[dev]->outputc(dev, c);
spin_lock_irqsave(&lock, flags);
+ if (!ok)
+ break;
midi_out_buf[dev]->head = (midi_out_buf[dev]->head + 1) % MAX_QUEUE_SIZE;
midi_out_buf[dev]->len--;
}
diff --git a/sound/oss/vwsnd.c b/sound/oss/vwsnd.c
index 187f72750e8f..6713110bdc75 100644
--- a/sound/oss/vwsnd.c
+++ b/sound/oss/vwsnd.c
@@ -628,7 +628,7 @@ static void li_setup_dma(dma_chan_t *chan,
ASSERT(!(buffer_paddr & 0xFF));
chan->baseval = (buffer_paddr >> 8) | 1 << (37 - 8);
- chan->cfgval = (!LI_CCFG_LOCK |
+ chan->cfgval = ((chan->cfgval & ~LI_CCFG_LOCK) |
SHIFT_FIELD(desc->ad1843_slot, LI_CCFG_SLOT) |
desc->direction |
mode |
@@ -638,9 +638,9 @@ static void li_setup_dma(dma_chan_t *chan,
tmask = 13 - fragshift; /* See Lithium DMA Notes above. */
ASSERT(size >= 2 && size <= 7);
ASSERT(tmask >= 1 && tmask <= 7);
- chan->ctlval = (!LI_CCTL_RESET |
+ chan->ctlval = ((chan->ctlval & ~LI_CCTL_RESET) |
SHIFT_FIELD(size, LI_CCTL_SIZE) |
- !LI_CCTL_DMA_ENABLE |
+ (chan->ctlval & ~LI_CCTL_DMA_ENABLE) |
SHIFT_FIELD(tmask, LI_CCTL_TMASK) |
SHIFT_FIELD(0, LI_CCTL_TPTR));
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 748f6b7d90b7..fb5ee3cc3968 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -135,11 +135,11 @@ config SND_AW2
config SND_AZT3328
- tristate "Aztech AZF3328 / PCI168 (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ tristate "Aztech AZF3328 / PCI168"
select SND_OPL3_LIB
select SND_MPU401_UART
select SND_PCM
+ select SND_RAWMIDI
help
Say Y here to include support for Aztech AZF3328 (PCI168)
soundcards.
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index 76d76c08339b..b458d208720b 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -478,45 +478,6 @@ static int snd_ali_reset_5451(struct snd_ali *codec)
return 0;
}
-#ifdef CODEC_RESET
-
-static int snd_ali_reset_codec(struct snd_ali *codec)
-{
- struct pci_dev *pci_dev;
- unsigned char bVal;
- unsigned int dwVal;
- unsigned short wCount, wReg;
-
- pci_dev = codec->pci_m1533;
-
- pci_read_config_dword(pci_dev, 0x7c, &dwVal);
- pci_write_config_dword(pci_dev, 0x7c, dwVal | 0x08000000);
- udelay(5000);
- pci_read_config_dword(pci_dev, 0x7c, &dwVal);
- pci_write_config_dword(pci_dev, 0x7c, dwVal & 0xf7ffffff);
- udelay(5000);
-
- bVal = inb(ALI_REG(codec,ALI_SCTRL));
- bVal |= 0x02;
- outb(ALI_REG(codec,ALI_SCTRL),bVal);
- udelay(5000);
- bVal = inb(ALI_REG(codec,ALI_SCTRL));
- bVal &= 0xfd;
- outb(ALI_REG(codec,ALI_SCTRL),bVal);
- udelay(15000);
-
- wCount = 200;
- while (wCount--) {
- wReg = snd_ali_codec_read(codec->ac97, AC97_POWERDOWN);
- if ((wReg & 0x000f) == 0x000f)
- return 0;
- udelay(5000);
- }
- return -1;
-}
-
-#endif
-
/*
* ALI 5451 Controller
*/
@@ -561,22 +522,6 @@ static void snd_ali_disable_address_interrupt(struct snd_ali *codec)
outl(gc, ALI_REG(codec, ALI_GC_CIR));
}
-#if 0 /* not used */
-static void snd_ali_enable_voice_irq(struct snd_ali *codec,
- unsigned int channel)
-{
- unsigned int mask;
- struct snd_ali_channel_control *pchregs = &(codec->chregs);
-
- snd_ali_printk("enable_voice_irq channel=%d\n",channel);
-
- mask = 1 << (channel & 0x1f);
- pchregs->data.ainten = inl(ALI_REG(codec, pchregs->regs.ainten));
- pchregs->data.ainten |= mask;
- outl(pchregs->data.ainten, ALI_REG(codec, pchregs->regs.ainten));
-}
-#endif
-
static void snd_ali_disable_voice_irq(struct snd_ali *codec,
unsigned int channel)
{
@@ -677,16 +622,6 @@ static void snd_ali_free_channel_pcm(struct snd_ali *codec, int channel)
}
}
-#if 0 /* not used */
-static void snd_ali_start_voice(struct snd_ali *codec, unsigned int channel)
-{
- unsigned int mask = 1 << (channel & 0x1f);
-
- snd_ali_printk("start_voice: channel=%d\n",channel);
- outl(mask, ALI_REG(codec,codec->chregs.regs.start));
-}
-#endif
-
static void snd_ali_stop_voice(struct snd_ali *codec, unsigned int channel)
{
unsigned int mask = 1 << (channel & 0x1f);
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index f290bc56178f..8451a0169f32 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -1,6 +1,6 @@
/*
* azt3328.c - driver for Aztech AZF3328 based soundcards (e.g. PCI168).
- * Copyright (C) 2002, 2005 - 2008 by Andreas Mohr <andi AT lisas.de>
+ * Copyright (C) 2002, 2005 - 2009 by Andreas Mohr <andi AT lisas.de>
*
* Framework borrowed from Bart Hartgers's als4000.c.
* Driver developed on PCI168 AP(W) version (PCI rev. 10, subsystem ID 1801),
@@ -10,6 +10,13 @@
* PCI168 A/AP, sub ID 8000
* Please give me feedback in case you try my driver with one of these!!
*
+ * Keywords: Windows XP Vista 168nt4-125.zip 168win95-125.zip PCI 168 download
+ * (XP/Vista do not support this card at all but every Linux distribution
+ * has very good support out of the box;
+ * just to make sure that the right people hit this and get to know that,
+ * despite the high level of Internet ignorance - as usual :-P -
+ * about very good support for this card - on Linux!)
+ *
* GPL LICENSE
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -71,10 +78,11 @@
* - built-in General DirectX timer having a 20 bits counter
* with 1us resolution (see below!)
* - I2S serial output port for external DAC
+ * [FIXME: 3.3V or 5V level? maximum rate is 66.2kHz right?]
* - supports 33MHz PCI spec 2.1, PCI power management 1.0, compliant with ACPI
* - supports hardware volume control
* - single chip low cost solution (128 pin QFP)
- * - supports programmable Sub-vendor and Sub-system ID
+ * - supports programmable Sub-vendor and Sub-system ID [24C02 SEEPROM chip]
* required for Microsoft's logo compliance (FIXME: where?)
* At least the Trident 4D Wave DX has one bit somewhere
* to enable writes to PCI subsystem VID registers, that should be it.
@@ -82,6 +90,7 @@
* some custom data starting at 0x80. What kind of config settings
* are located in our extended PCI space anyway??
* - PCI168 AP(W) card: power amplifier with 4 Watts/channel at 4 Ohms
+ * [TDA1517P chip]
*
* Note that this driver now is actually *better* than the Windows driver,
* since it additionally supports the card's 1MHz DirectX timer - just try
@@ -146,10 +155,15 @@
* to read the Digital Enhanced Game Port. Not sure whether it is fixable.
*
* TODO
+ * - use PCI_VDEVICE
+ * - verify driver status on x86_64
+ * - test multi-card driver operation
+ * - (ab)use 1MHz DirectX timer as kernel clocksource
* - test MPU401 MIDI playback etc.
* - add more power micro-management (disable various units of the card
- * as long as they're unused). However this requires more I/O ports which I
- * haven't figured out yet and which thus might not even exist...
+ * as long as they're unused, to improve audio quality and save power).
+ * However this requires more I/O ports which I haven't figured out yet
+ * and which thus might not even exist...
* The standard suspend/resume functionality could probably make use of
* some improvement, too...
* - figure out what all unknown port bits are responsible for
@@ -185,25 +199,46 @@ MODULE_SUPPORTED_DEVICE("{{Aztech,AZF3328}}");
#define SUPPORT_GAMEPORT 1
#endif
+/* === Debug settings ===
+ Further diagnostic functionality than the settings below
+ does not need to be provided, since one can easily write a bash script
+ to dump the card's I/O ports (those listed in lspci -v -v):
+ function dump()
+ {
+ local descr=$1; local addr=$2; local count=$3
+
+ echo "${descr}: ${count} @ ${addr}:"
+ dd if=/dev/port skip=$[${addr}] count=${count} bs=1 2>/dev/null| hexdump -C
+ }
+ and then use something like
+ "dump joy200 0x200 8", "dump mpu388 0x388 4", "dump joy 0xb400 8",
+ "dump codec00 0xa800 32", "dump mixer 0xb800 64", "dump synth 0xbc00 8",
+ possibly within a "while true; do ... sleep 1; done" loop.
+ Tweaking ports could be done using
+ VALSTRING="`printf "%02x" $value`"
+ printf "\x""$VALSTRING"|dd of=/dev/port seek=$[${addr}] bs=1 2>/dev/null
+*/
+
#define DEBUG_MISC 0
#define DEBUG_CALLS 0
#define DEBUG_MIXER 0
-#define DEBUG_PLAY_REC 0
+#define DEBUG_CODEC 0
#define DEBUG_IO 0
#define DEBUG_TIMER 0
#define DEBUG_GAME 0
+#define DEBUG_PM 0
#define MIXER_TESTING 0
#if DEBUG_MISC
-#define snd_azf3328_dbgmisc(format, args...) printk(KERN_ERR format, ##args)
+#define snd_azf3328_dbgmisc(format, args...) printk(KERN_DEBUG format, ##args)
#else
#define snd_azf3328_dbgmisc(format, args...)
#endif
#if DEBUG_CALLS
#define snd_azf3328_dbgcalls(format, args...) printk(format, ##args)
-#define snd_azf3328_dbgcallenter() printk(KERN_ERR "--> %s\n", __func__)
-#define snd_azf3328_dbgcallleave() printk(KERN_ERR "<-- %s\n", __func__)
+#define snd_azf3328_dbgcallenter() printk(KERN_DEBUG "--> %s\n", __func__)
+#define snd_azf3328_dbgcallleave() printk(KERN_DEBUG "<-- %s\n", __func__)
#else
#define snd_azf3328_dbgcalls(format, args...)
#define snd_azf3328_dbgcallenter()
@@ -216,10 +251,10 @@ MODULE_SUPPORTED_DEVICE("{{Aztech,AZF3328}}");
#define snd_azf3328_dbgmixer(format, args...)
#endif
-#if DEBUG_PLAY_REC
-#define snd_azf3328_dbgplay(format, args...) printk(KERN_DEBUG format, ##args)
+#if DEBUG_CODEC
+#define snd_azf3328_dbgcodec(format, args...) printk(KERN_DEBUG format, ##args)
#else
-#define snd_azf3328_dbgplay(format, args...)
+#define snd_azf3328_dbgcodec(format, args...)
#endif
#if DEBUG_MISC
@@ -234,6 +269,12 @@ MODULE_SUPPORTED_DEVICE("{{Aztech,AZF3328}}");
#define snd_azf3328_dbggame(format, args...)
#endif
+#if DEBUG_PM
+#define snd_azf3328_dbgpm(format, args...) printk(KERN_DEBUG format, ##args)
+#else
+#define snd_azf3328_dbgpm(format, args...)
+#endif
+
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for AZF3328 soundcard.");
@@ -250,22 +291,23 @@ static int seqtimer_scaling = 128;
module_param(seqtimer_scaling, int, 0444);
MODULE_PARM_DESC(seqtimer_scaling, "Set 1024000Hz sequencer timer scale factor (lockup danger!). Default 128.");
-struct snd_azf3328_audio_stream {
+struct snd_azf3328_codec_data {
+ unsigned long io_base;
struct snd_pcm_substream *substream;
- int enabled;
- int running;
- unsigned long portbase;
+ bool running;
+ const char *name;
};
-enum snd_azf3328_stream_index {
- AZF_PLAYBACK = 0,
- AZF_CAPTURE = 1,
+enum snd_azf3328_codec_type {
+ AZF_CODEC_PLAYBACK = 0,
+ AZF_CODEC_CAPTURE = 1,
+ AZF_CODEC_I2S_OUT = 2,
};
struct snd_azf3328 {
/* often-used fields towards beginning, then grouped */
- unsigned long codec_io; /* usually 0xb000, size 128 */
+ unsigned long ctrl_io; /* usually 0xb000, size 128 */
unsigned long game_io; /* usually 0xb400, size 8 */
unsigned long mpu_io; /* usually 0xb800, size 4 */
unsigned long opl3_io; /* usually 0xbc00, size 8 */
@@ -275,15 +317,17 @@ struct snd_azf3328 {
struct snd_timer *timer;
- struct snd_pcm *pcm;
- struct snd_azf3328_audio_stream audio_stream[2];
+ struct snd_pcm *pcm[3];
+
+ /* playback, recording and I2S out codecs */
+ struct snd_azf3328_codec_data codecs[3];
struct snd_card *card;
struct snd_rawmidi *rmidi;
#ifdef SUPPORT_GAMEPORT
struct gameport *gameport;
- int axes[4];
+ u16 axes[4];
#endif
struct pci_dev *pci;
@@ -293,16 +337,16 @@ struct snd_azf3328 {
* If we need to add more registers here, then we might try to fold this
* into some transparent combined shadow register handling with
* CONFIG_PM register storage below, but that's slightly difficult. */
- u16 shadow_reg_codec_6AH;
+ u16 shadow_reg_ctrl_6AH;
#ifdef CONFIG_PM
/* register value containers for power management
- * Note: not always full I/O range preserved (just like Win driver!) */
- u16 saved_regs_codec[AZF_IO_SIZE_CODEC_PM / 2];
- u16 saved_regs_game [AZF_IO_SIZE_GAME_PM / 2];
- u16 saved_regs_mpu [AZF_IO_SIZE_MPU_PM / 2];
- u16 saved_regs_opl3 [AZF_IO_SIZE_OPL3_PM / 2];
- u16 saved_regs_mixer[AZF_IO_SIZE_MIXER_PM / 2];
+ * Note: not always full I/O range preserved (similar to Win driver!) */
+ u32 saved_regs_ctrl[AZF_ALIGN(AZF_IO_SIZE_CTRL_PM) / 4];
+ u32 saved_regs_game[AZF_ALIGN(AZF_IO_SIZE_GAME_PM) / 4];
+ u32 saved_regs_mpu[AZF_ALIGN(AZF_IO_SIZE_MPU_PM) / 4];
+ u32 saved_regs_opl3[AZF_ALIGN(AZF_IO_SIZE_OPL3_PM) / 4];
+ u32 saved_regs_mixer[AZF_ALIGN(AZF_IO_SIZE_MIXER_PM) / 4];
#endif
};
@@ -316,7 +360,7 @@ MODULE_DEVICE_TABLE(pci, snd_azf3328_ids);
static int
-snd_azf3328_io_reg_setb(unsigned reg, u8 mask, int do_set)
+snd_azf3328_io_reg_setb(unsigned reg, u8 mask, bool do_set)
{
u8 prev = inb(reg), new;
@@ -331,39 +375,72 @@ snd_azf3328_io_reg_setb(unsigned reg, u8 mask, int do_set)
}
static inline void
-snd_azf3328_codec_outb(const struct snd_azf3328 *chip, unsigned reg, u8 value)
+snd_azf3328_codec_outb(const struct snd_azf3328_codec_data *codec,
+ unsigned reg,
+ u8 value
+)
{
- outb(value, chip->codec_io + reg);
+ outb(value, codec->io_base + reg);
}
static inline u8
-snd_azf3328_codec_inb(const struct snd_azf3328 *chip, unsigned reg)
+snd_azf3328_codec_inb(const struct snd_azf3328_codec_data *codec, unsigned reg)
{
- return inb(chip->codec_io + reg);
+ return inb(codec->io_base + reg);
}
static inline void
-snd_azf3328_codec_outw(const struct snd_azf3328 *chip, unsigned reg, u16 value)
+snd_azf3328_codec_outw(const struct snd_azf3328_codec_data *codec,
+ unsigned reg,
+ u16 value
+)
{
- outw(value, chip->codec_io + reg);
+ outw(value, codec->io_base + reg);
}
static inline u16
-snd_azf3328_codec_inw(const struct snd_azf3328 *chip, unsigned reg)
+snd_azf3328_codec_inw(const struct snd_azf3328_codec_data *codec, unsigned reg)
{
- return inw(chip->codec_io + reg);
+ return inw(codec->io_base + reg);
}
static inline void
-snd_azf3328_codec_outl(const struct snd_azf3328 *chip, unsigned reg, u32 value)
+snd_azf3328_codec_outl(const struct snd_azf3328_codec_data *codec,
+ unsigned reg,
+ u32 value
+)
{
- outl(value, chip->codec_io + reg);
+ outl(value, codec->io_base + reg);
}
static inline u32
-snd_azf3328_codec_inl(const struct snd_azf3328 *chip, unsigned reg)
+snd_azf3328_codec_inl(const struct snd_azf3328_codec_data *codec, unsigned reg)
+{
+ return inl(codec->io_base + reg);
+}
+
+static inline void
+snd_azf3328_ctrl_outb(const struct snd_azf3328 *chip, unsigned reg, u8 value)
+{
+ outb(value, chip->ctrl_io + reg);
+}
+
+static inline u8
+snd_azf3328_ctrl_inb(const struct snd_azf3328 *chip, unsigned reg)
+{
+ return inb(chip->ctrl_io + reg);
+}
+
+static inline void
+snd_azf3328_ctrl_outw(const struct snd_azf3328 *chip, unsigned reg, u16 value)
+{
+ outw(value, chip->ctrl_io + reg);
+}
+
+static inline void
+snd_azf3328_ctrl_outl(const struct snd_azf3328 *chip, unsigned reg, u32 value)
{
- return inl(chip->codec_io + reg);
+ outl(value, chip->ctrl_io + reg);
}
static inline void
@@ -404,13 +481,13 @@ snd_azf3328_mixer_inw(const struct snd_azf3328 *chip, unsigned reg)
#define AZF_MUTE_BIT 0x80
-static int
+static bool
snd_azf3328_mixer_set_mute(const struct snd_azf3328 *chip,
- unsigned reg, int do_mute
+ unsigned reg, bool do_mute
)
{
unsigned long portbase = chip->mixer_io + reg + 1;
- int updated;
+ bool updated;
/* the mute bit is on the *second* (i.e. right) register of a
* left/right channel setting */
@@ -569,7 +646,7 @@ snd_azf3328_get_mixer(struct snd_kcontrol *kcontrol,
{
struct snd_azf3328 *chip = snd_kcontrol_chip(kcontrol);
struct azf3328_mixer_reg reg;
- unsigned int oreg, val;
+ u16 oreg, val;
snd_azf3328_dbgcallenter();
snd_azf3328_mixer_reg_decode(&reg, kcontrol->private_value);
@@ -600,7 +677,7 @@ snd_azf3328_put_mixer(struct snd_kcontrol *kcontrol,
{
struct snd_azf3328 *chip = snd_kcontrol_chip(kcontrol);
struct azf3328_mixer_reg reg;
- unsigned int oreg, nreg, val;
+ u16 oreg, nreg, val;
snd_azf3328_dbgcallenter();
snd_azf3328_mixer_reg_decode(&reg, kcontrol->private_value);
@@ -709,7 +786,7 @@ snd_azf3328_put_mixer_enum(struct snd_kcontrol *kcontrol,
{
struct snd_azf3328 *chip = snd_kcontrol_chip(kcontrol);
struct azf3328_mixer_reg reg;
- unsigned int oreg, nreg, val;
+ u16 oreg, nreg, val;
snd_azf3328_mixer_reg_decode(&reg, kcontrol->private_value);
oreg = snd_azf3328_mixer_inw(chip, reg.reg);
@@ -867,14 +944,15 @@ snd_azf3328_hw_free(struct snd_pcm_substream *substream)
static void
snd_azf3328_codec_setfmt(struct snd_azf3328 *chip,
- unsigned reg,
+ enum snd_azf3328_codec_type codec_type,
enum azf_freq_t bitrate,
unsigned int format_width,
unsigned int channels
)
{
- u16 val = 0xff00;
unsigned long flags;
+ const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
+ u16 val = 0xff00;
snd_azf3328_dbgcallenter();
switch (bitrate) {
@@ -917,7 +995,7 @@ snd_azf3328_codec_setfmt(struct snd_azf3328 *chip,
spin_lock_irqsave(&chip->reg_lock, flags);
/* set bitrate/format */
- snd_azf3328_codec_outw(chip, reg, val);
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_SOUNDFORMAT, val);
/* changing the bitrate/format settings switches off the
* audio output with an annoying click in case of 8/16bit format change
@@ -926,11 +1004,11 @@ snd_azf3328_codec_setfmt(struct snd_azf3328 *chip,
* (FIXME: yes, it works, but what exactly am I doing here?? :)
* FIXME: does this have some side effects for full-duplex
* or other dramatic side effects? */
- if (reg == IDX_IO_PLAY_SOUNDFORMAT) /* only do it for playback */
- snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS,
- snd_azf3328_codec_inw(chip, IDX_IO_PLAY_FLAGS) |
- DMA_PLAY_SOMETHING1 |
- DMA_PLAY_SOMETHING2 |
+ if (codec_type == AZF_CODEC_PLAYBACK) /* only do it for playback */
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
+ snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS) |
+ DMA_RUN_SOMETHING1 |
+ DMA_RUN_SOMETHING2 |
SOMETHING_ALMOST_ALWAYS_SET |
DMA_EPILOGUE_SOMETHING |
DMA_SOMETHING_ELSE
@@ -942,112 +1020,134 @@ snd_azf3328_codec_setfmt(struct snd_azf3328 *chip,
static inline void
snd_azf3328_codec_setfmt_lowpower(struct snd_azf3328 *chip,
- unsigned reg
+ enum snd_azf3328_codec_type codec_type
)
{
/* choose lowest frequency for low power consumption.
* While this will cause louder noise due to rather coarse frequency,
* it should never matter since output should always
* get disabled properly when idle anyway. */
- snd_azf3328_codec_setfmt(chip, reg, AZF_FREQ_4000, 8, 1);
+ snd_azf3328_codec_setfmt(chip, codec_type, AZF_FREQ_4000, 8, 1);
}
static void
-snd_azf3328_codec_reg_6AH_update(struct snd_azf3328 *chip,
+snd_azf3328_ctrl_reg_6AH_update(struct snd_azf3328 *chip,
unsigned bitmask,
- int enable
+ bool enable
)
{
- if (enable)
- chip->shadow_reg_codec_6AH &= ~bitmask;
+ bool do_mask = !enable;
+ if (do_mask)
+ chip->shadow_reg_ctrl_6AH |= bitmask;
else
- chip->shadow_reg_codec_6AH |= bitmask;
- snd_azf3328_dbgplay("6AH_update mask 0x%04x enable %d: val 0x%04x\n",
- bitmask, enable, chip->shadow_reg_codec_6AH);
- snd_azf3328_codec_outw(chip, IDX_IO_6AH, chip->shadow_reg_codec_6AH);
+ chip->shadow_reg_ctrl_6AH &= ~bitmask;
+ snd_azf3328_dbgcodec("6AH_update mask 0x%04x do_mask %d: val 0x%04x\n",
+ bitmask, do_mask, chip->shadow_reg_ctrl_6AH);
+ snd_azf3328_ctrl_outw(chip, IDX_IO_6AH, chip->shadow_reg_ctrl_6AH);
}
static inline void
-snd_azf3328_codec_enable(struct snd_azf3328 *chip, int enable)
+snd_azf3328_ctrl_enable_codecs(struct snd_azf3328 *chip, bool enable)
{
- snd_azf3328_dbgplay("codec_enable %d\n", enable);
+ snd_azf3328_dbgcodec("codec_enable %d\n", enable);
/* no idea what exactly is being done here, but I strongly assume it's
* PM related */
- snd_azf3328_codec_reg_6AH_update(
+ snd_azf3328_ctrl_reg_6AH_update(
chip, IO_6A_PAUSE_PLAYBACK_BIT8, enable
);
}
static void
-snd_azf3328_codec_activity(struct snd_azf3328 *chip,
- enum snd_azf3328_stream_index stream_type,
- int enable
+snd_azf3328_ctrl_codec_activity(struct snd_azf3328 *chip,
+ enum snd_azf3328_codec_type codec_type,
+ bool enable
)
{
- int need_change = (chip->audio_stream[stream_type].running != enable);
+ struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
+ bool need_change = (codec->running != enable);
- snd_azf3328_dbgplay(
- "codec_activity: type %d, enable %d, need_change %d\n",
- stream_type, enable, need_change
+ snd_azf3328_dbgcodec(
+ "codec_activity: %s codec, enable %d, need_change %d\n",
+ codec->name, enable, need_change
);
if (need_change) {
- enum snd_azf3328_stream_index other =
- (stream_type == AZF_PLAYBACK) ?
- AZF_CAPTURE : AZF_PLAYBACK;
- /* small check to prevent shutting down the other party
- * in case it's active */
- if ((enable) || !(chip->audio_stream[other].running))
- snd_azf3328_codec_enable(chip, enable);
+ static const struct {
+ enum snd_azf3328_codec_type other1;
+ enum snd_azf3328_codec_type other2;
+ } peer_codecs[3] =
+ { { AZF_CODEC_CAPTURE, AZF_CODEC_I2S_OUT },
+ { AZF_CODEC_PLAYBACK, AZF_CODEC_I2S_OUT },
+ { AZF_CODEC_PLAYBACK, AZF_CODEC_CAPTURE } };
+ bool call_function;
+
+ if (enable)
+ /* if enable codec, call enable_codecs func
+ to enable codec supply... */
+ call_function = 1;
+ else {
+ /* ...otherwise call enable_codecs func
+ (which globally shuts down operation of codecs)
+ only in case the other codecs are currently
+ not active either! */
+ call_function =
+ ((!chip->codecs[peer_codecs[codec_type].other1]
+ .running)
+ && (!chip->codecs[peer_codecs[codec_type].other2]
+ .running));
+ }
+ if (call_function)
+ snd_azf3328_ctrl_enable_codecs(chip, enable);
/* ...and adjust clock, too
* (reduce noise and power consumption) */
if (!enable)
snd_azf3328_codec_setfmt_lowpower(
chip,
- chip->audio_stream[stream_type].portbase
- + IDX_IO_PLAY_SOUNDFORMAT
+ codec_type
);
+ codec->running = enable;
}
- chip->audio_stream[stream_type].running = enable;
}
static void
-snd_azf3328_setdmaa(struct snd_azf3328 *chip,
- long unsigned int addr,
- unsigned int count,
- unsigned int size,
- enum snd_azf3328_stream_index stream_type
+snd_azf3328_codec_setdmaa(struct snd_azf3328 *chip,
+ enum snd_azf3328_codec_type codec_type,
+ unsigned long addr,
+ unsigned int count,
+ unsigned int size
)
{
+ const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
snd_azf3328_dbgcallenter();
- if (!chip->audio_stream[stream_type].running) {
- /* AZF3328 uses a two buffer pointer DMA playback approach */
+ if (!codec->running) {
+ /* AZF3328 uses a two buffer pointer DMA transfer approach */
- unsigned long flags, portbase, addr_area2;
+ unsigned long flags, addr_area2;
/* width 32bit (prevent overflow): */
- unsigned long count_areas, count_tmp;
+ u32 count_areas, lengths;
- portbase = chip->audio_stream[stream_type].portbase;
count_areas = size/2;
addr_area2 = addr+count_areas;
count_areas--; /* max. index */
- snd_azf3328_dbgplay("set DMA: buf1 %08lx[%lu], buf2 %08lx[%lu]\n", addr, count_areas, addr_area2, count_areas);
+ snd_azf3328_dbgcodec("setdma: buffers %08lx[%u] / %08lx[%u]\n",
+ addr, count_areas, addr_area2, count_areas);
/* build combined I/O buffer length word */
- count_tmp = count_areas;
- count_areas |= (count_tmp << 16);
+ lengths = (count_areas << 16) | (count_areas);
spin_lock_irqsave(&chip->reg_lock, flags);
- outl(addr, portbase + IDX_IO_PLAY_DMA_START_1);
- outl(addr_area2, portbase + IDX_IO_PLAY_DMA_START_2);
- outl(count_areas, portbase + IDX_IO_PLAY_DMA_LEN_1);
+ snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_START_1, addr);
+ snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_START_2,
+ addr_area2);
+ snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_LENGTHS,
+ lengths);
spin_unlock_irqrestore(&chip->reg_lock, flags);
}
snd_azf3328_dbgcallleave();
}
static int
-snd_azf3328_playback_prepare(struct snd_pcm_substream *substream)
+snd_azf3328_codec_prepare(struct snd_pcm_substream *substream)
{
#if 0
struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
@@ -1058,157 +1158,161 @@ snd_azf3328_playback_prepare(struct snd_pcm_substream *substream)
snd_azf3328_dbgcallenter();
#if 0
- snd_azf3328_codec_setfmt(chip, IDX_IO_PLAY_SOUNDFORMAT,
+ snd_azf3328_codec_setfmt(chip, AZF_CODEC_...,
runtime->rate,
snd_pcm_format_width(runtime->format),
runtime->channels);
- snd_azf3328_setdmaa(chip, runtime->dma_addr, count, size, AZF_PLAYBACK);
+ snd_azf3328_codec_setdmaa(chip, AZF_CODEC_...,
+ runtime->dma_addr, count, size);
#endif
snd_azf3328_dbgcallleave();
return 0;
}
static int
-snd_azf3328_capture_prepare(struct snd_pcm_substream *substream)
-{
-#if 0
- struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
- unsigned int size = snd_pcm_lib_buffer_bytes(substream);
- unsigned int count = snd_pcm_lib_period_bytes(substream);
-#endif
-
- snd_azf3328_dbgcallenter();
-#if 0
- snd_azf3328_codec_setfmt(chip, IDX_IO_REC_SOUNDFORMAT,
- runtime->rate,
- snd_pcm_format_width(runtime->format),
- runtime->channels);
- snd_azf3328_setdmaa(chip, runtime->dma_addr, count, size, AZF_CAPTURE);
-#endif
- snd_azf3328_dbgcallleave();
- return 0;
-}
-
-static int
-snd_azf3328_playback_trigger(struct snd_pcm_substream *substream, int cmd)
+snd_azf3328_codec_trigger(enum snd_azf3328_codec_type codec_type,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
+ const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
struct snd_pcm_runtime *runtime = substream->runtime;
int result = 0;
- unsigned int status1;
- int previously_muted;
+ u16 flags1;
+ bool previously_muted = 0;
+ bool is_playback_codec = (AZF_CODEC_PLAYBACK == codec_type);
- snd_azf3328_dbgcalls("snd_azf3328_playback_trigger cmd %d\n", cmd);
+ snd_azf3328_dbgcalls("snd_azf3328_codec_trigger cmd %d\n", cmd);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- snd_azf3328_dbgplay("START PLAYBACK\n");
-
- /* mute WaveOut (avoid clicking during setup) */
- previously_muted =
- snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 1);
+ snd_azf3328_dbgcodec("START %s\n", codec->name);
+
+ if (is_playback_codec) {
+ /* mute WaveOut (avoid clicking during setup) */
+ previously_muted =
+ snd_azf3328_mixer_set_mute(
+ chip, IDX_MIXER_WAVEOUT, 1
+ );
+ }
- snd_azf3328_codec_setfmt(chip, IDX_IO_PLAY_SOUNDFORMAT,
+ snd_azf3328_codec_setfmt(chip, codec_type,
runtime->rate,
snd_pcm_format_width(runtime->format),
runtime->channels);
spin_lock(&chip->reg_lock);
/* first, remember current value: */
- status1 = snd_azf3328_codec_inw(chip, IDX_IO_PLAY_FLAGS);
+ flags1 = snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS);
- /* stop playback */
- status1 &= ~DMA_RESUME;
- snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, status1);
+ /* stop transfer */
+ flags1 &= ~DMA_RESUME;
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
/* FIXME: clear interrupts or what??? */
- snd_azf3328_codec_outw(chip, IDX_IO_PLAY_IRQTYPE, 0xffff);
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_IRQTYPE, 0xffff);
spin_unlock(&chip->reg_lock);
- snd_azf3328_setdmaa(chip, runtime->dma_addr,
+ snd_azf3328_codec_setdmaa(chip, codec_type, runtime->dma_addr,
snd_pcm_lib_period_bytes(substream),
- snd_pcm_lib_buffer_bytes(substream),
- AZF_PLAYBACK);
+ snd_pcm_lib_buffer_bytes(substream)
+ );
spin_lock(&chip->reg_lock);
#ifdef WIN9X
/* FIXME: enable playback/recording??? */
- status1 |= DMA_PLAY_SOMETHING1 | DMA_PLAY_SOMETHING2;
- snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, status1);
+ flags1 |= DMA_RUN_SOMETHING1 | DMA_RUN_SOMETHING2;
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
- /* start playback again */
+ /* start transfer again */
/* FIXME: what is this value (0x0010)??? */
- status1 |= DMA_RESUME | DMA_EPILOGUE_SOMETHING;
- snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, status1);
+ flags1 |= DMA_RESUME | DMA_EPILOGUE_SOMETHING;
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
#else /* NT4 */
- snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS,
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
0x0000);
- snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS,
- DMA_PLAY_SOMETHING1);
- snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS,
- DMA_PLAY_SOMETHING1 |
- DMA_PLAY_SOMETHING2);
- snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS,
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
+ DMA_RUN_SOMETHING1);
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
+ DMA_RUN_SOMETHING1 |
+ DMA_RUN_SOMETHING2);
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
DMA_RESUME |
SOMETHING_ALMOST_ALWAYS_SET |
DMA_EPILOGUE_SOMETHING |
DMA_SOMETHING_ELSE);
#endif
spin_unlock(&chip->reg_lock);
- snd_azf3328_codec_activity(chip, AZF_PLAYBACK, 1);
-
- /* now unmute WaveOut */
- if (!previously_muted)
- snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 0);
+ snd_azf3328_ctrl_codec_activity(chip, codec_type, 1);
+
+ if (is_playback_codec) {
+ /* now unmute WaveOut */
+ if (!previously_muted)
+ snd_azf3328_mixer_set_mute(
+ chip, IDX_MIXER_WAVEOUT, 0
+ );
+ }
- snd_azf3328_dbgplay("STARTED PLAYBACK\n");
+ snd_azf3328_dbgcodec("STARTED %s\n", codec->name);
break;
case SNDRV_PCM_TRIGGER_RESUME:
- snd_azf3328_dbgplay("RESUME PLAYBACK\n");
- /* resume playback if we were active */
+ snd_azf3328_dbgcodec("RESUME %s\n", codec->name);
+ /* resume codec if we were active */
spin_lock(&chip->reg_lock);
- if (chip->audio_stream[AZF_PLAYBACK].running)
- snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS,
- snd_azf3328_codec_inw(chip, IDX_IO_PLAY_FLAGS) | DMA_RESUME);
+ if (codec->running)
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
+ snd_azf3328_codec_inw(
+ codec, IDX_IO_CODEC_DMA_FLAGS
+ ) | DMA_RESUME
+ );
spin_unlock(&chip->reg_lock);
break;
case SNDRV_PCM_TRIGGER_STOP:
- snd_azf3328_dbgplay("STOP PLAYBACK\n");
-
- /* mute WaveOut (avoid clicking during setup) */
- previously_muted =
- snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 1);
+ snd_azf3328_dbgcodec("STOP %s\n", codec->name);
+
+ if (is_playback_codec) {
+ /* mute WaveOut (avoid clicking during setup) */
+ previously_muted =
+ snd_azf3328_mixer_set_mute(
+ chip, IDX_MIXER_WAVEOUT, 1
+ );
+ }
spin_lock(&chip->reg_lock);
/* first, remember current value: */
- status1 = snd_azf3328_codec_inw(chip, IDX_IO_PLAY_FLAGS);
+ flags1 = snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS);
- /* stop playback */
- status1 &= ~DMA_RESUME;
- snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, status1);
+ /* stop transfer */
+ flags1 &= ~DMA_RESUME;
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
/* hmm, is this really required? we're resetting the same bit
* immediately thereafter... */
- status1 |= DMA_PLAY_SOMETHING1;
- snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, status1);
+ flags1 |= DMA_RUN_SOMETHING1;
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
- status1 &= ~DMA_PLAY_SOMETHING1;
- snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, status1);
+ flags1 &= ~DMA_RUN_SOMETHING1;
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
spin_unlock(&chip->reg_lock);
- snd_azf3328_codec_activity(chip, AZF_PLAYBACK, 0);
-
- /* now unmute WaveOut */
- if (!previously_muted)
- snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 0);
+ snd_azf3328_ctrl_codec_activity(chip, codec_type, 0);
+
+ if (is_playback_codec) {
+ /* now unmute WaveOut */
+ if (!previously_muted)
+ snd_azf3328_mixer_set_mute(
+ chip, IDX_MIXER_WAVEOUT, 0
+ );
+ }
- snd_azf3328_dbgplay("STOPPED PLAYBACK\n");
+ snd_azf3328_dbgcodec("STOPPED %s\n", codec->name);
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
- snd_azf3328_dbgplay("SUSPEND PLAYBACK\n");
- /* make sure playback is stopped */
- snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS,
- snd_azf3328_codec_inw(chip, IDX_IO_PLAY_FLAGS) & ~DMA_RESUME);
+ snd_azf3328_dbgcodec("SUSPEND %s\n", codec->name);
+ /* make sure codec is stopped */
+ snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
+ snd_azf3328_codec_inw(
+ codec, IDX_IO_CODEC_DMA_FLAGS
+ ) & ~DMA_RESUME
+ );
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
snd_printk(KERN_ERR "FIXME: SNDRV_PCM_TRIGGER_PAUSE_PUSH NIY!\n");
@@ -1217,7 +1321,7 @@ snd_azf3328_playback_trigger(struct snd_pcm_substream *substream, int cmd)
snd_printk(KERN_ERR "FIXME: SNDRV_PCM_TRIGGER_PAUSE_RELEASE NIY!\n");
break;
default:
- printk(KERN_ERR "FIXME: unknown trigger mode!\n");
+ snd_printk(KERN_ERR "FIXME: unknown trigger mode!\n");
return -EINVAL;
}
@@ -1225,172 +1329,74 @@ snd_azf3328_playback_trigger(struct snd_pcm_substream *substream, int cmd)
return result;
}
-/* this is just analogous to playback; I'm not quite sure whether recording
- * should actually be triggered like that */
static int
-snd_azf3328_capture_trigger(struct snd_pcm_substream *substream, int cmd)
+snd_azf3328_codec_playback_trigger(struct snd_pcm_substream *substream, int cmd)
{
- struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
- int result = 0;
- unsigned int status1;
-
- snd_azf3328_dbgcalls("snd_azf3328_capture_trigger cmd %d\n", cmd);
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
-
- snd_azf3328_dbgplay("START CAPTURE\n");
-
- snd_azf3328_codec_setfmt(chip, IDX_IO_REC_SOUNDFORMAT,
- runtime->rate,
- snd_pcm_format_width(runtime->format),
- runtime->channels);
-
- spin_lock(&chip->reg_lock);
- /* first, remember current value: */
- status1 = snd_azf3328_codec_inw(chip, IDX_IO_REC_FLAGS);
-
- /* stop recording */
- status1 &= ~DMA_RESUME;
- snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS, status1);
-
- /* FIXME: clear interrupts or what??? */
- snd_azf3328_codec_outw(chip, IDX_IO_REC_IRQTYPE, 0xffff);
- spin_unlock(&chip->reg_lock);
-
- snd_azf3328_setdmaa(chip, runtime->dma_addr,
- snd_pcm_lib_period_bytes(substream),
- snd_pcm_lib_buffer_bytes(substream),
- AZF_CAPTURE);
-
- spin_lock(&chip->reg_lock);
-#ifdef WIN9X
- /* FIXME: enable playback/recording??? */
- status1 |= DMA_PLAY_SOMETHING1 | DMA_PLAY_SOMETHING2;
- snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS, status1);
-
- /* start capture again */
- /* FIXME: what is this value (0x0010)??? */
- status1 |= DMA_RESUME | DMA_EPILOGUE_SOMETHING;
- snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS, status1);
-#else
- snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS,
- 0x0000);
- snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS,
- DMA_PLAY_SOMETHING1);
- snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS,
- DMA_PLAY_SOMETHING1 |
- DMA_PLAY_SOMETHING2);
- snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS,
- DMA_RESUME |
- SOMETHING_ALMOST_ALWAYS_SET |
- DMA_EPILOGUE_SOMETHING |
- DMA_SOMETHING_ELSE);
-#endif
- spin_unlock(&chip->reg_lock);
- snd_azf3328_codec_activity(chip, AZF_CAPTURE, 1);
-
- snd_azf3328_dbgplay("STARTED CAPTURE\n");
- break;
- case SNDRV_PCM_TRIGGER_RESUME:
- snd_azf3328_dbgplay("RESUME CAPTURE\n");
- /* resume recording if we were active */
- spin_lock(&chip->reg_lock);
- if (chip->audio_stream[AZF_CAPTURE].running)
- snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS,
- snd_azf3328_codec_inw(chip, IDX_IO_REC_FLAGS) | DMA_RESUME);
- spin_unlock(&chip->reg_lock);
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- snd_azf3328_dbgplay("STOP CAPTURE\n");
-
- spin_lock(&chip->reg_lock);
- /* first, remember current value: */
- status1 = snd_azf3328_codec_inw(chip, IDX_IO_REC_FLAGS);
-
- /* stop recording */
- status1 &= ~DMA_RESUME;
- snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS, status1);
-
- status1 |= DMA_PLAY_SOMETHING1;
- snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS, status1);
-
- status1 &= ~DMA_PLAY_SOMETHING1;
- snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS, status1);
- spin_unlock(&chip->reg_lock);
- snd_azf3328_codec_activity(chip, AZF_CAPTURE, 0);
+ return snd_azf3328_codec_trigger(AZF_CODEC_PLAYBACK, substream, cmd);
+}
- snd_azf3328_dbgplay("STOPPED CAPTURE\n");
- break;
- case SNDRV_PCM_TRIGGER_SUSPEND:
- snd_azf3328_dbgplay("SUSPEND CAPTURE\n");
- /* make sure recording is stopped */
- snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS,
- snd_azf3328_codec_inw(chip, IDX_IO_REC_FLAGS) & ~DMA_RESUME);
- break;
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- snd_printk(KERN_ERR "FIXME: SNDRV_PCM_TRIGGER_PAUSE_PUSH NIY!\n");
- break;
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- snd_printk(KERN_ERR "FIXME: SNDRV_PCM_TRIGGER_PAUSE_RELEASE NIY!\n");
- break;
- default:
- printk(KERN_ERR "FIXME: unknown trigger mode!\n");
- return -EINVAL;
- }
+static int
+snd_azf3328_codec_capture_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ return snd_azf3328_codec_trigger(AZF_CODEC_CAPTURE, substream, cmd);
+}
- snd_azf3328_dbgcallleave();
- return result;
+static int
+snd_azf3328_codec_i2s_out_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ return snd_azf3328_codec_trigger(AZF_CODEC_I2S_OUT, substream, cmd);
}
static snd_pcm_uframes_t
-snd_azf3328_playback_pointer(struct snd_pcm_substream *substream)
+snd_azf3328_codec_pointer(struct snd_pcm_substream *substream,
+ enum snd_azf3328_codec_type codec_type
+)
{
- struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
+ const struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
+ const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
unsigned long bufptr, result;
snd_pcm_uframes_t frmres;
#ifdef QUERY_HARDWARE
- bufptr = snd_azf3328_codec_inl(chip, IDX_IO_PLAY_DMA_START_1);
+ bufptr = snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_START_1);
#else
bufptr = substream->runtime->dma_addr;
#endif
- result = snd_azf3328_codec_inl(chip, IDX_IO_PLAY_DMA_CURRPOS);
+ result = snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_CURRPOS);
/* calculate offset */
result -= bufptr;
frmres = bytes_to_frames( substream->runtime, result);
- snd_azf3328_dbgplay("PLAY @ 0x%8lx, frames %8ld\n", result, frmres);
+ snd_azf3328_dbgcodec("%s @ 0x%8lx, frames %8ld\n",
+ codec->name, result, frmres);
return frmres;
}
static snd_pcm_uframes_t
-snd_azf3328_capture_pointer(struct snd_pcm_substream *substream)
+snd_azf3328_codec_playback_pointer(struct snd_pcm_substream *substream)
{
- struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
- unsigned long bufptr, result;
- snd_pcm_uframes_t frmres;
+ return snd_azf3328_codec_pointer(substream, AZF_CODEC_PLAYBACK);
+}
-#ifdef QUERY_HARDWARE
- bufptr = snd_azf3328_codec_inl(chip, IDX_IO_REC_DMA_START_1);
-#else
- bufptr = substream->runtime->dma_addr;
-#endif
- result = snd_azf3328_codec_inl(chip, IDX_IO_REC_DMA_CURRPOS);
+static snd_pcm_uframes_t
+snd_azf3328_codec_capture_pointer(struct snd_pcm_substream *substream)
+{
+ return snd_azf3328_codec_pointer(substream, AZF_CODEC_CAPTURE);
+}
- /* calculate offset */
- result -= bufptr;
- frmres = bytes_to_frames( substream->runtime, result);
- snd_azf3328_dbgplay("REC @ 0x%8lx, frames %8ld\n", result, frmres);
- return frmres;
+static snd_pcm_uframes_t
+snd_azf3328_codec_i2s_out_pointer(struct snd_pcm_substream *substream)
+{
+ return snd_azf3328_codec_pointer(substream, AZF_CODEC_I2S_OUT);
}
/******************************************************************/
#ifdef SUPPORT_GAMEPORT
static inline void
-snd_azf3328_gameport_irq_enable(struct snd_azf3328 *chip, int enable)
+snd_azf3328_gameport_irq_enable(struct snd_azf3328 *chip,
+ bool enable
+)
{
snd_azf3328_io_reg_setb(
chip->game_io+IDX_GAME_HWCONFIG,
@@ -1400,7 +1406,9 @@ snd_azf3328_gameport_irq_enable(struct snd_azf3328 *chip, int enable)
}
static inline void
-snd_azf3328_gameport_legacy_address_enable(struct snd_azf3328 *chip, int enable)
+snd_azf3328_gameport_legacy_address_enable(struct snd_azf3328 *chip,
+ bool enable
+)
{
snd_azf3328_io_reg_setb(
chip->game_io+IDX_GAME_HWCONFIG,
@@ -1409,10 +1417,27 @@ snd_azf3328_gameport_legacy_address_enable(struct snd_azf3328 *chip, int enable)
);
}
+static void
+snd_azf3328_gameport_set_counter_frequency(struct snd_azf3328 *chip,
+ unsigned int freq_cfg
+)
+{
+ snd_azf3328_io_reg_setb(
+ chip->game_io+IDX_GAME_HWCONFIG,
+ 0x02,
+ (freq_cfg & 1) != 0
+ );
+ snd_azf3328_io_reg_setb(
+ chip->game_io+IDX_GAME_HWCONFIG,
+ 0x04,
+ (freq_cfg & 2) != 0
+ );
+}
+
static inline void
-snd_azf3328_gameport_axis_circuit_enable(struct snd_azf3328 *chip, int enable)
+snd_azf3328_gameport_axis_circuit_enable(struct snd_azf3328 *chip, bool enable)
{
- snd_azf3328_codec_reg_6AH_update(
+ snd_azf3328_ctrl_reg_6AH_update(
chip, IO_6A_SOMETHING2_GAMEPORT, enable
);
}
@@ -1447,6 +1472,8 @@ snd_azf3328_gameport_open(struct gameport *gameport, int mode)
break;
}
+ snd_azf3328_gameport_set_counter_frequency(chip,
+ GAME_HWCFG_ADC_COUNTER_FREQ_STD);
snd_azf3328_gameport_axis_circuit_enable(chip, (res == 0));
return res;
@@ -1458,6 +1485,8 @@ snd_azf3328_gameport_close(struct gameport *gameport)
struct snd_azf3328 *chip = gameport_get_port_data(gameport);
snd_azf3328_dbggame("gameport_close\n");
+ snd_azf3328_gameport_set_counter_frequency(chip,
+ GAME_HWCFG_ADC_COUNTER_FREQ_1_200);
snd_azf3328_gameport_axis_circuit_enable(chip, 0);
}
@@ -1491,7 +1520,7 @@ snd_azf3328_gameport_cooked_read(struct gameport *gameport,
val = snd_azf3328_game_inb(chip, IDX_GAME_AXES_CONFIG);
if (val & GAME_AXES_SAMPLING_READY) {
- for (i = 0; i < 4; ++i) {
+ for (i = 0; i < ARRAY_SIZE(chip->axes); ++i) {
/* configure the axis to read */
val = (i << 4) | 0x0f;
snd_azf3328_game_outb(chip, IDX_GAME_AXES_CONFIG, val);
@@ -1514,7 +1543,7 @@ snd_azf3328_gameport_cooked_read(struct gameport *gameport,
snd_azf3328_game_outw(chip, IDX_GAME_AXIS_VALUE, 0xffff);
spin_unlock_irqrestore(&chip->reg_lock, flags);
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < ARRAY_SIZE(chip->axes); i++) {
axes[i] = chip->axes[i];
if (axes[i] == 0xffff)
axes[i] = -1;
@@ -1552,6 +1581,8 @@ snd_azf3328_gameport(struct snd_azf3328 *chip, int dev)
/* DISABLE legacy address: we don't need it! */
snd_azf3328_gameport_legacy_address_enable(chip, 0);
+ snd_azf3328_gameport_set_counter_frequency(chip,
+ GAME_HWCFG_ADC_COUNTER_FREQ_1_200);
snd_azf3328_gameport_axis_circuit_enable(chip, 0);
gameport_register_port(chip->gameport);
@@ -1585,40 +1616,77 @@ snd_azf3328_gameport_interrupt(struct snd_azf3328 *chip)
static inline void
snd_azf3328_irq_log_unknown_type(u8 which)
{
- snd_azf3328_dbgplay(
+ snd_azf3328_dbgcodec(
"azt3328: unknown IRQ type (%x) occurred, please report!\n",
which
);
}
+static inline void
+snd_azf3328_codec_interrupt(struct snd_azf3328 *chip, u8 status)
+{
+ u8 which;
+ enum snd_azf3328_codec_type codec_type;
+ const struct snd_azf3328_codec_data *codec;
+
+ for (codec_type = AZF_CODEC_PLAYBACK;
+ codec_type <= AZF_CODEC_I2S_OUT;
+ ++codec_type) {
+
+ /* skip codec if there's no interrupt for it */
+ if (!(status & (1 << codec_type)))
+ continue;
+
+ codec = &chip->codecs[codec_type];
+
+ spin_lock(&chip->reg_lock);
+ which = snd_azf3328_codec_inb(codec, IDX_IO_CODEC_IRQTYPE);
+ /* ack all IRQ types immediately */
+ snd_azf3328_codec_outb(codec, IDX_IO_CODEC_IRQTYPE, which);
+ spin_unlock(&chip->reg_lock);
+
+ if ((chip->pcm[codec_type]) && (codec->substream)) {
+ snd_pcm_period_elapsed(codec->substream);
+ snd_azf3328_dbgcodec("%s period done (#%x), @ %x\n",
+ codec->name,
+ which,
+ snd_azf3328_codec_inl(
+ codec, IDX_IO_CODEC_DMA_CURRPOS
+ )
+ );
+ } else
+ printk(KERN_WARNING "azt3328: irq handler problem!\n");
+ if (which & IRQ_SOMETHING)
+ snd_azf3328_irq_log_unknown_type(which);
+ }
+}
+
static irqreturn_t
snd_azf3328_interrupt(int irq, void *dev_id)
{
struct snd_azf3328 *chip = dev_id;
- u8 status, which;
-#if DEBUG_PLAY_REC
+ u8 status;
+#if DEBUG_CODEC
static unsigned long irq_count;
#endif
- status = snd_azf3328_codec_inb(chip, IDX_IO_IRQSTATUS);
+ status = snd_azf3328_ctrl_inb(chip, IDX_IO_IRQSTATUS);
/* fast path out, to ease interrupt sharing */
if (!(status &
- (IRQ_PLAYBACK|IRQ_RECORDING|IRQ_GAMEPORT|IRQ_MPU401|IRQ_TIMER)
+ (IRQ_PLAYBACK|IRQ_RECORDING|IRQ_I2S_OUT
+ |IRQ_GAMEPORT|IRQ_MPU401|IRQ_TIMER)
))
return IRQ_NONE; /* must be interrupt for another device */
- snd_azf3328_dbgplay(
- "irq_count %ld! IDX_IO_PLAY_FLAGS %04x, "
- "IDX_IO_PLAY_IRQTYPE %04x, IDX_IO_IRQSTATUS %04x\n",
+ snd_azf3328_dbgcodec(
+ "irq_count %ld! IDX_IO_IRQSTATUS %04x\n",
irq_count++ /* debug-only */,
- snd_azf3328_codec_inw(chip, IDX_IO_PLAY_FLAGS),
- snd_azf3328_codec_inw(chip, IDX_IO_PLAY_IRQTYPE),
status
);
if (status & IRQ_TIMER) {
- /* snd_azf3328_dbgplay("timer %ld\n",
+ /* snd_azf3328_dbgcodec("timer %ld\n",
snd_azf3328_codec_inl(chip, IDX_IO_TIMER_VALUE)
& TIMER_VALUE_MASK
); */
@@ -1626,71 +1694,36 @@ snd_azf3328_interrupt(int irq, void *dev_id)
snd_timer_interrupt(chip->timer, chip->timer->sticks);
/* ACK timer */
spin_lock(&chip->reg_lock);
- snd_azf3328_codec_outb(chip, IDX_IO_TIMER_VALUE + 3, 0x07);
+ snd_azf3328_ctrl_outb(chip, IDX_IO_TIMER_VALUE + 3, 0x07);
spin_unlock(&chip->reg_lock);
- snd_azf3328_dbgplay("azt3328: timer IRQ\n");
+ snd_azf3328_dbgcodec("azt3328: timer IRQ\n");
}
- if (status & IRQ_PLAYBACK) {
- spin_lock(&chip->reg_lock);
- which = snd_azf3328_codec_inb(chip, IDX_IO_PLAY_IRQTYPE);
- /* ack all IRQ types immediately */
- snd_azf3328_codec_outb(chip, IDX_IO_PLAY_IRQTYPE, which);
- spin_unlock(&chip->reg_lock);
- if (chip->pcm && chip->audio_stream[AZF_PLAYBACK].substream) {
- snd_pcm_period_elapsed(
- chip->audio_stream[AZF_PLAYBACK].substream
- );
- snd_azf3328_dbgplay("PLAY period done (#%x), @ %x\n",
- which,
- snd_azf3328_codec_inl(
- chip, IDX_IO_PLAY_DMA_CURRPOS
- )
- );
- } else
- printk(KERN_WARNING "azt3328: irq handler problem!\n");
- if (which & IRQ_PLAY_SOMETHING)
- snd_azf3328_irq_log_unknown_type(which);
- }
- if (status & IRQ_RECORDING) {
- spin_lock(&chip->reg_lock);
- which = snd_azf3328_codec_inb(chip, IDX_IO_REC_IRQTYPE);
- /* ack all IRQ types immediately */
- snd_azf3328_codec_outb(chip, IDX_IO_REC_IRQTYPE, which);
- spin_unlock(&chip->reg_lock);
+ if (status & (IRQ_PLAYBACK|IRQ_RECORDING|IRQ_I2S_OUT))
+ snd_azf3328_codec_interrupt(chip, status);
- if (chip->pcm && chip->audio_stream[AZF_CAPTURE].substream) {
- snd_pcm_period_elapsed(
- chip->audio_stream[AZF_CAPTURE].substream
- );
- snd_azf3328_dbgplay("REC period done (#%x), @ %x\n",
- which,
- snd_azf3328_codec_inl(
- chip, IDX_IO_REC_DMA_CURRPOS
- )
- );
- } else
- printk(KERN_WARNING "azt3328: irq handler problem!\n");
- if (which & IRQ_REC_SOMETHING)
- snd_azf3328_irq_log_unknown_type(which);
- }
if (status & IRQ_GAMEPORT)
snd_azf3328_gameport_interrupt(chip);
+
/* MPU401 has less critical IRQ requirements
* than timer and playback/recording, right? */
if (status & IRQ_MPU401) {
snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data);
/* hmm, do we have to ack the IRQ here somehow?
- * If so, then I don't know how... */
- snd_azf3328_dbgplay("azt3328: MPU401 IRQ\n");
+ * If so, then I don't know how yet... */
+ snd_azf3328_dbgcodec("azt3328: MPU401 IRQ\n");
}
return IRQ_HANDLED;
}
/*****************************************************************/
-static const struct snd_pcm_hardware snd_azf3328_playback =
+/* as long as we think we have identical snd_pcm_hardware parameters
+ for playback, capture and i2s out, we can use the same physical struct
+ since the struct is simply being copied into a member.
+*/
+static const struct snd_pcm_hardware snd_azf3328_hardware =
{
/* FIXME!! Correct? */
.info = SNDRV_PCM_INFO_MMAP |
@@ -1718,31 +1751,6 @@ static const struct snd_pcm_hardware snd_azf3328_playback =
.fifo_size = 0,
};
-static const struct snd_pcm_hardware snd_azf3328_capture =
-{
- /* FIXME */
- .info = SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_MMAP_VALID,
- .formats = SNDRV_PCM_FMTBIT_S8 |
- SNDRV_PCM_FMTBIT_U8 |
- SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_U16_LE,
- .rates = SNDRV_PCM_RATE_5512 |
- SNDRV_PCM_RATE_8000_48000 |
- SNDRV_PCM_RATE_KNOT,
- .rate_min = AZF_FREQ_4000,
- .rate_max = AZF_FREQ_66200,
- .channels_min = 1,
- .channels_max = 2,
- .buffer_bytes_max = 65536,
- .period_bytes_min = 64,
- .period_bytes_max = 65536,
- .periods_min = 1,
- .periods_max = 1024,
- .fifo_size = 0,
-};
-
static unsigned int snd_azf3328_fixed_rates[] = {
AZF_FREQ_4000,
@@ -1770,14 +1778,19 @@ static struct snd_pcm_hw_constraint_list snd_azf3328_hw_constraints_rates = {
/*****************************************************************/
static int
-snd_azf3328_playback_open(struct snd_pcm_substream *substream)
+snd_azf3328_pcm_open(struct snd_pcm_substream *substream,
+ enum snd_azf3328_codec_type codec_type
+)
{
struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
snd_azf3328_dbgcallenter();
- chip->audio_stream[AZF_PLAYBACK].substream = substream;
- runtime->hw = snd_azf3328_playback;
+ chip->codecs[codec_type].substream = substream;
+
+ /* same parameters for all our codecs - at least we think so... */
+ runtime->hw = snd_azf3328_hardware;
+
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&snd_azf3328_hw_constraints_rates);
snd_azf3328_dbgcallleave();
@@ -1785,40 +1798,52 @@ snd_azf3328_playback_open(struct snd_pcm_substream *substream)
}
static int
+snd_azf3328_playback_open(struct snd_pcm_substream *substream)
+{
+ return snd_azf3328_pcm_open(substream, AZF_CODEC_PLAYBACK);
+}
+
+static int
snd_azf3328_capture_open(struct snd_pcm_substream *substream)
{
- struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
+ return snd_azf3328_pcm_open(substream, AZF_CODEC_CAPTURE);
+}
- snd_azf3328_dbgcallenter();
- chip->audio_stream[AZF_CAPTURE].substream = substream;
- runtime->hw = snd_azf3328_capture;
- snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- &snd_azf3328_hw_constraints_rates);
- snd_azf3328_dbgcallleave();
- return 0;
+static int
+snd_azf3328_i2s_out_open(struct snd_pcm_substream *substream)
+{
+ return snd_azf3328_pcm_open(substream, AZF_CODEC_I2S_OUT);
}
static int
-snd_azf3328_playback_close(struct snd_pcm_substream *substream)
+snd_azf3328_pcm_close(struct snd_pcm_substream *substream,
+ enum snd_azf3328_codec_type codec_type
+)
{
struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
snd_azf3328_dbgcallenter();
- chip->audio_stream[AZF_PLAYBACK].substream = NULL;
+ chip->codecs[codec_type].substream = NULL;
snd_azf3328_dbgcallleave();
return 0;
}
static int
+snd_azf3328_playback_close(struct snd_pcm_substream *substream)
+{
+ return snd_azf3328_pcm_close(substream, AZF_CODEC_PLAYBACK);
+}
+
+static int
snd_azf3328_capture_close(struct snd_pcm_substream *substream)
{
- struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
+ return snd_azf3328_pcm_close(substream, AZF_CODEC_CAPTURE);
+}
- snd_azf3328_dbgcallenter();
- chip->audio_stream[AZF_CAPTURE].substream = NULL;
- snd_azf3328_dbgcallleave();
- return 0;
+static int
+snd_azf3328_i2s_out_close(struct snd_pcm_substream *substream)
+{
+ return snd_azf3328_pcm_close(substream, AZF_CODEC_I2S_OUT);
}
/******************************************************************/
@@ -1829,9 +1854,9 @@ static struct snd_pcm_ops snd_azf3328_playback_ops = {
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_azf3328_hw_params,
.hw_free = snd_azf3328_hw_free,
- .prepare = snd_azf3328_playback_prepare,
- .trigger = snd_azf3328_playback_trigger,
- .pointer = snd_azf3328_playback_pointer
+ .prepare = snd_azf3328_codec_prepare,
+ .trigger = snd_azf3328_codec_playback_trigger,
+ .pointer = snd_azf3328_codec_playback_pointer
};
static struct snd_pcm_ops snd_azf3328_capture_ops = {
@@ -1840,30 +1865,67 @@ static struct snd_pcm_ops snd_azf3328_capture_ops = {
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_azf3328_hw_params,
.hw_free = snd_azf3328_hw_free,
- .prepare = snd_azf3328_capture_prepare,
- .trigger = snd_azf3328_capture_trigger,
- .pointer = snd_azf3328_capture_pointer
+ .prepare = snd_azf3328_codec_prepare,
+ .trigger = snd_azf3328_codec_capture_trigger,
+ .pointer = snd_azf3328_codec_capture_pointer
+};
+
+static struct snd_pcm_ops snd_azf3328_i2s_out_ops = {
+ .open = snd_azf3328_i2s_out_open,
+ .close = snd_azf3328_i2s_out_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_azf3328_hw_params,
+ .hw_free = snd_azf3328_hw_free,
+ .prepare = snd_azf3328_codec_prepare,
+ .trigger = snd_azf3328_codec_i2s_out_trigger,
+ .pointer = snd_azf3328_codec_i2s_out_pointer
};
static int __devinit
-snd_azf3328_pcm(struct snd_azf3328 *chip, int device)
+snd_azf3328_pcm(struct snd_azf3328 *chip)
{
+enum { AZF_PCMDEV_STD, AZF_PCMDEV_I2S_OUT, NUM_AZF_PCMDEVS }; /* pcm devices */
+
struct snd_pcm *pcm;
int err;
snd_azf3328_dbgcallenter();
- if ((err = snd_pcm_new(chip->card, "AZF3328 DSP", device, 1, 1, &pcm)) < 0)
+
+ err = snd_pcm_new(chip->card, "AZF3328 DSP", AZF_PCMDEV_STD,
+ 1, 1, &pcm);
+ if (err < 0)
return err;
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_azf3328_playback_ops);
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_azf3328_capture_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ &snd_azf3328_playback_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
+ &snd_azf3328_capture_ops);
pcm->private_data = chip;
pcm->info_flags = 0;
strcpy(pcm->name, chip->card->shortname);
- chip->pcm = pcm;
+ /* same pcm object for playback/capture (see snd_pcm_new() above) */
+ chip->pcm[AZF_CODEC_PLAYBACK] = pcm;
+ chip->pcm[AZF_CODEC_CAPTURE] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 64*1024);
+ snd_dma_pci_data(chip->pci),
+ 64*1024, 64*1024);
+
+ err = snd_pcm_new(chip->card, "AZF3328 I2S OUT", AZF_PCMDEV_I2S_OUT,
+ 1, 0, &pcm);
+ if (err < 0)
+ return err;
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ &snd_azf3328_i2s_out_ops);
+
+ pcm->private_data = chip;
+ pcm->info_flags = 0;
+ strcpy(pcm->name, chip->card->shortname);
+ chip->pcm[AZF_CODEC_I2S_OUT] = pcm;
+
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(chip->pci),
+ 64*1024, 64*1024);
snd_azf3328_dbgcallleave();
return 0;
@@ -1902,7 +1964,7 @@ snd_azf3328_timer_start(struct snd_timer *timer)
snd_azf3328_dbgtimer("setting timer countdown value %d, add COUNTDOWN|IRQ\n", delay);
delay |= TIMER_COUNTDOWN_ENABLE | TIMER_IRQ_ENABLE;
spin_lock_irqsave(&chip->reg_lock, flags);
- snd_azf3328_codec_outl(chip, IDX_IO_TIMER_VALUE, delay);
+ snd_azf3328_ctrl_outl(chip, IDX_IO_TIMER_VALUE, delay);
spin_unlock_irqrestore(&chip->reg_lock, flags);
snd_azf3328_dbgcallleave();
return 0;
@@ -1919,7 +1981,7 @@ snd_azf3328_timer_stop(struct snd_timer *timer)
spin_lock_irqsave(&chip->reg_lock, flags);
/* disable timer countdown and interrupt */
/* FIXME: should we write TIMER_IRQ_ACK here? */
- snd_azf3328_codec_outb(chip, IDX_IO_TIMER_VALUE + 3, 0);
+ snd_azf3328_ctrl_outb(chip, IDX_IO_TIMER_VALUE + 3, 0);
spin_unlock_irqrestore(&chip->reg_lock, flags);
snd_azf3328_dbgcallleave();
return 0;
@@ -2035,7 +2097,7 @@ snd_azf3328_test_bit(unsigned unsigned reg, int bit)
outb(val, reg);
- printk(KERN_ERR "reg %04x bit %d: %02x %02x %02x\n",
+ printk(KERN_DEBUG "reg %04x bit %d: %02x %02x %02x\n",
reg, bit, val, valoff, valon
);
}
@@ -2048,9 +2110,9 @@ snd_azf3328_debug_show_ports(const struct snd_azf3328 *chip)
u16 tmp;
snd_azf3328_dbgmisc(
- "codec_io 0x%lx, game_io 0x%lx, mpu_io 0x%lx, "
+ "ctrl_io 0x%lx, game_io 0x%lx, mpu_io 0x%lx, "
"opl3_io 0x%lx, mixer_io 0x%lx, irq %d\n",
- chip->codec_io, chip->game_io, chip->mpu_io,
+ chip->ctrl_io, chip->game_io, chip->mpu_io,
chip->opl3_io, chip->mixer_io, chip->irq
);
@@ -2083,9 +2145,9 @@ snd_azf3328_debug_show_ports(const struct snd_azf3328 *chip)
inb(0x38c + tmp)
);
- for (tmp = 0; tmp < AZF_IO_SIZE_CODEC; tmp += 2)
- snd_azf3328_dbgmisc("codec 0x%02x: 0x%04x\n",
- tmp, snd_azf3328_codec_inw(chip, tmp)
+ for (tmp = 0; tmp < AZF_IO_SIZE_CTRL; tmp += 2)
+ snd_azf3328_dbgmisc("ctrl 0x%02x: 0x%04x\n",
+ tmp, snd_azf3328_ctrl_inw(chip, tmp)
);
for (tmp = 0; tmp < AZF_IO_SIZE_MIXER; tmp += 2)
@@ -2106,7 +2168,8 @@ snd_azf3328_create(struct snd_card *card,
static struct snd_device_ops ops = {
.dev_free = snd_azf3328_dev_free,
};
- u16 tmp;
+ u8 dma_init;
+ enum snd_azf3328_codec_type codec_type;
*rchip = NULL;
@@ -2138,14 +2201,21 @@ snd_azf3328_create(struct snd_card *card,
if (err < 0)
goto out_err;
- chip->codec_io = pci_resource_start(pci, 0);
+ chip->ctrl_io = pci_resource_start(pci, 0);
chip->game_io = pci_resource_start(pci, 1);
chip->mpu_io = pci_resource_start(pci, 2);
- chip->opl3_io = pci_resource_start(pci, 3);
+ chip->opl3_io = pci_resource_start(pci, 3);
chip->mixer_io = pci_resource_start(pci, 4);
- chip->audio_stream[AZF_PLAYBACK].portbase = chip->codec_io + 0x00;
- chip->audio_stream[AZF_CAPTURE].portbase = chip->codec_io + 0x20;
+ chip->codecs[AZF_CODEC_PLAYBACK].io_base =
+ chip->ctrl_io + AZF_IO_OFFS_CODEC_PLAYBACK;
+ chip->codecs[AZF_CODEC_PLAYBACK].name = "PLAYBACK";
+ chip->codecs[AZF_CODEC_CAPTURE].io_base =
+ chip->ctrl_io + AZF_IO_OFFS_CODEC_CAPTURE;
+ chip->codecs[AZF_CODEC_CAPTURE].name = "CAPTURE";
+ chip->codecs[AZF_CODEC_I2S_OUT].io_base =
+ chip->ctrl_io + AZF_IO_OFFS_CODEC_I2S_OUT;
+ chip->codecs[AZF_CODEC_I2S_OUT].name = "I2S_OUT";
if (request_irq(pci->irq, snd_azf3328_interrupt,
IRQF_SHARED, card->shortname, chip)) {
@@ -2168,20 +2238,25 @@ snd_azf3328_create(struct snd_card *card,
if (err < 0)
goto out_err;
- /* shutdown codecs to save power */
- /* have snd_azf3328_codec_activity() act properly */
- chip->audio_stream[AZF_PLAYBACK].running = 1;
- snd_azf3328_codec_activity(chip, AZF_PLAYBACK, 0);
+ /* standard codec init stuff */
+ /* default DMA init value */
+ dma_init = DMA_RUN_SOMETHING2|DMA_EPILOGUE_SOMETHING|DMA_SOMETHING_ELSE;
- /* standard chip init stuff */
- /* default IRQ init value */
- tmp = DMA_PLAY_SOMETHING2|DMA_EPILOGUE_SOMETHING|DMA_SOMETHING_ELSE;
+ for (codec_type = AZF_CODEC_PLAYBACK;
+ codec_type <= AZF_CODEC_I2S_OUT; ++codec_type) {
+ struct snd_azf3328_codec_data *codec =
+ &chip->codecs[codec_type];
- spin_lock_irq(&chip->reg_lock);
- snd_azf3328_codec_outb(chip, IDX_IO_PLAY_FLAGS, tmp);
- snd_azf3328_codec_outb(chip, IDX_IO_REC_FLAGS, tmp);
- snd_azf3328_codec_outb(chip, IDX_IO_SOMETHING_FLAGS, tmp);
- spin_unlock_irq(&chip->reg_lock);
+ /* shutdown codecs to save power */
+ /* have ...ctrl_codec_activity() act properly */
+ codec->running = 1;
+ snd_azf3328_ctrl_codec_activity(chip, codec_type, 0);
+
+ spin_lock_irq(&chip->reg_lock);
+ snd_azf3328_codec_outb(codec, IDX_IO_CODEC_DMA_FLAGS,
+ dma_init);
+ spin_unlock_irq(&chip->reg_lock);
+ }
snd_card_set_dev(card, &pci->dev);
@@ -2229,8 +2304,11 @@ snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
card->private_data = chip;
+ /* chose to use MPU401_HW_AZT2320 ID instead of MPU401_HW_MPU401,
+ since our hardware ought to be similar, thus use same ID. */
err = snd_mpu401_uart_new(
- card, 0, MPU401_HW_MPU401, chip->mpu_io, MPU401_INFO_INTEGRATED,
+ card, 0,
+ MPU401_HW_AZT2320, chip->mpu_io, MPU401_INFO_INTEGRATED,
pci->irq, 0, &chip->rmidi
);
if (err < 0) {
@@ -2244,7 +2322,7 @@ snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
if (err < 0)
goto out_err;
- err = snd_azf3328_pcm(chip, 0);
+ err = snd_azf3328_pcm(chip);
if (err < 0)
goto out_err;
@@ -2266,14 +2344,14 @@ snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
opl3->private_data = chip;
sprintf(card->longname, "%s at 0x%lx, irq %i",
- card->shortname, chip->codec_io, chip->irq);
+ card->shortname, chip->ctrl_io, chip->irq);
err = snd_card_register(card);
if (err < 0)
goto out_err;
#ifdef MODULE
- printk(
+ printk(KERN_INFO
"azt3328: Sound driver for Aztech AZF3328-based soundcards such as PCI168.\n"
"azt3328: Hardware was completely undocumented, unfortunately.\n"
"azt3328: Feel free to contact andi AT lisas.de for bug reports etc.!\n"
@@ -2308,36 +2386,52 @@ snd_azf3328_remove(struct pci_dev *pci)
}
#ifdef CONFIG_PM
+static inline void
+snd_azf3328_suspend_regs(unsigned long io_addr, unsigned count, u32 *saved_regs)
+{
+ unsigned reg;
+
+ for (reg = 0; reg < count; ++reg) {
+ *saved_regs = inl(io_addr);
+ snd_azf3328_dbgpm("suspend: io 0x%04lx: 0x%08x\n",
+ io_addr, *saved_regs);
+ ++saved_regs;
+ io_addr += sizeof(*saved_regs);
+ }
+}
+
static int
snd_azf3328_suspend(struct pci_dev *pci, pm_message_t state)
{
struct snd_card *card = pci_get_drvdata(pci);
struct snd_azf3328 *chip = card->private_data;
- unsigned reg;
+ u16 *saved_regs_ctrl_u16;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
+ snd_pcm_suspend_all(chip->pcm[AZF_CODEC_PLAYBACK]);
+ snd_pcm_suspend_all(chip->pcm[AZF_CODEC_I2S_OUT]);
- for (reg = 0; reg < AZF_IO_SIZE_MIXER_PM / 2; ++reg)
- chip->saved_regs_mixer[reg] = inw(chip->mixer_io + reg * 2);
+ snd_azf3328_suspend_regs(chip->mixer_io,
+ ARRAY_SIZE(chip->saved_regs_mixer), chip->saved_regs_mixer);
/* make sure to disable master volume etc. to prevent looping sound */
snd_azf3328_mixer_set_mute(chip, IDX_MIXER_PLAY_MASTER, 1);
snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 1);
- for (reg = 0; reg < AZF_IO_SIZE_CODEC_PM / 2; ++reg)
- chip->saved_regs_codec[reg] = inw(chip->codec_io + reg * 2);
+ snd_azf3328_suspend_regs(chip->ctrl_io,
+ ARRAY_SIZE(chip->saved_regs_ctrl), chip->saved_regs_ctrl);
/* manually store the one currently relevant write-only reg, too */
- chip->saved_regs_codec[IDX_IO_6AH / 2] = chip->shadow_reg_codec_6AH;
+ saved_regs_ctrl_u16 = (u16 *)chip->saved_regs_ctrl;
+ saved_regs_ctrl_u16[IDX_IO_6AH / 2] = chip->shadow_reg_ctrl_6AH;
- for (reg = 0; reg < AZF_IO_SIZE_GAME_PM / 2; ++reg)
- chip->saved_regs_game[reg] = inw(chip->game_io + reg * 2);
- for (reg = 0; reg < AZF_IO_SIZE_MPU_PM / 2; ++reg)
- chip->saved_regs_mpu[reg] = inw(chip->mpu_io + reg * 2);
- for (reg = 0; reg < AZF_IO_SIZE_OPL3_PM / 2; ++reg)
- chip->saved_regs_opl3[reg] = inw(chip->opl3_io + reg * 2);
+ snd_azf3328_suspend_regs(chip->game_io,
+ ARRAY_SIZE(chip->saved_regs_game), chip->saved_regs_game);
+ snd_azf3328_suspend_regs(chip->mpu_io,
+ ARRAY_SIZE(chip->saved_regs_mpu), chip->saved_regs_mpu);
+ snd_azf3328_suspend_regs(chip->opl3_io,
+ ARRAY_SIZE(chip->saved_regs_opl3), chip->saved_regs_opl3);
pci_disable_device(pci);
pci_save_state(pci);
@@ -2345,12 +2439,28 @@ snd_azf3328_suspend(struct pci_dev *pci, pm_message_t state)
return 0;
}
+static inline void
+snd_azf3328_resume_regs(const u32 *saved_regs,
+ unsigned long io_addr,
+ unsigned count
+)
+{
+ unsigned reg;
+
+ for (reg = 0; reg < count; ++reg) {
+ outl(*saved_regs, io_addr);
+ snd_azf3328_dbgpm("resume: io 0x%04lx: 0x%08x --> 0x%08x\n",
+ io_addr, *saved_regs, inl(io_addr));
+ ++saved_regs;
+ io_addr += sizeof(*saved_regs);
+ }
+}
+
static int
snd_azf3328_resume(struct pci_dev *pci)
{
struct snd_card *card = pci_get_drvdata(pci);
- struct snd_azf3328 *chip = card->private_data;
- unsigned reg;
+ const struct snd_azf3328 *chip = card->private_data;
pci_set_power_state(pci, PCI_D0);
pci_restore_state(pci);
@@ -2362,16 +2472,24 @@ snd_azf3328_resume(struct pci_dev *pci)
}
pci_set_master(pci);
- for (reg = 0; reg < AZF_IO_SIZE_GAME_PM / 2; ++reg)
- outw(chip->saved_regs_game[reg], chip->game_io + reg * 2);
- for (reg = 0; reg < AZF_IO_SIZE_MPU_PM / 2; ++reg)
- outw(chip->saved_regs_mpu[reg], chip->mpu_io + reg * 2);
- for (reg = 0; reg < AZF_IO_SIZE_OPL3_PM / 2; ++reg)
- outw(chip->saved_regs_opl3[reg], chip->opl3_io + reg * 2);
- for (reg = 0; reg < AZF_IO_SIZE_MIXER_PM / 2; ++reg)
- outw(chip->saved_regs_mixer[reg], chip->mixer_io + reg * 2);
- for (reg = 0; reg < AZF_IO_SIZE_CODEC_PM / 2; ++reg)
- outw(chip->saved_regs_codec[reg], chip->codec_io + reg * 2);
+ snd_azf3328_resume_regs(chip->saved_regs_game, chip->game_io,
+ ARRAY_SIZE(chip->saved_regs_game));
+ snd_azf3328_resume_regs(chip->saved_regs_mpu, chip->mpu_io,
+ ARRAY_SIZE(chip->saved_regs_mpu));
+ snd_azf3328_resume_regs(chip->saved_regs_opl3, chip->opl3_io,
+ ARRAY_SIZE(chip->saved_regs_opl3));
+
+ snd_azf3328_resume_regs(chip->saved_regs_mixer, chip->mixer_io,
+ ARRAY_SIZE(chip->saved_regs_mixer));
+
+ /* unfortunately with 32bit transfers, IDX_MIXER_PLAY_MASTER (0x02)
+ and IDX_MIXER_RESET (offset 0x00) get touched at the same time,
+ resulting in a mixer reset condition persisting until _after_
+ master vol was restored. Thus master vol needs an extra restore. */
+ outw(((u16 *)chip->saved_regs_mixer)[1], chip->mixer_io + 2);
+
+ snd_azf3328_resume_regs(chip->saved_regs_ctrl, chip->ctrl_io,
+ ARRAY_SIZE(chip->saved_regs_ctrl));
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
diff --git a/sound/pci/azt3328.h b/sound/pci/azt3328.h
index 974e05122f00..6f46b97650cc 100644
--- a/sound/pci/azt3328.h
+++ b/sound/pci/azt3328.h
@@ -6,50 +6,59 @@
/*** main I/O area port indices ***/
/* (only 0x70 of 0x80 bytes saved/restored by Windows driver) */
-#define AZF_IO_SIZE_CODEC 0x80
-#define AZF_IO_SIZE_CODEC_PM 0x70
+#define AZF_IO_SIZE_CTRL 0x80
+#define AZF_IO_SIZE_CTRL_PM 0x70
-/* the driver initialisation suggests a layout of 4 main areas:
- * from 0x00 (playback), from 0x20 (recording) and from 0x40 (maybe MPU401??).
+/* the driver initialisation suggests a layout of 4 areas
+ * within the main card control I/O:
+ * from 0x00 (playback codec), from 0x20 (recording codec)
+ * and from 0x40 (most certainly I2S out codec).
* And another area from 0x60 to 0x6f (DirectX timer, IRQ management,
* power management etc.???). */
-/** playback area **/
-#define IDX_IO_PLAY_FLAGS 0x00 /* PU:0x0000 */
+#define AZF_IO_OFFS_CODEC_PLAYBACK 0x00
+#define AZF_IO_OFFS_CODEC_CAPTURE 0x20
+#define AZF_IO_OFFS_CODEC_I2S_OUT 0x40
+
+#define IDX_IO_CODEC_DMA_FLAGS 0x00 /* PU:0x0000 */
/* able to reactivate output after output muting due to 8/16bit
* output change, just like 0x0002.
* 0x0001 is the only bit that's able to start the DMA counter */
- #define DMA_RESUME 0x0001 /* paused if cleared ? */
+ #define DMA_RESUME 0x0001 /* paused if cleared? */
/* 0x0002 *temporarily* set during DMA stopping. hmm
* both 0x0002 and 0x0004 set in playback setup. */
/* able to reactivate output after output muting due to 8/16bit
* output change, just like 0x0001. */
- #define DMA_PLAY_SOMETHING1 0x0002 /* \ alternated (toggled) */
+ #define DMA_RUN_SOMETHING1 0x0002 /* \ alternated (toggled) */
/* 0x0004: NOT able to reactivate output */
- #define DMA_PLAY_SOMETHING2 0x0004 /* / bits */
+ #define DMA_RUN_SOMETHING2 0x0004 /* / bits */
#define SOMETHING_ALMOST_ALWAYS_SET 0x0008 /* ???; can be modified */
#define DMA_EPILOGUE_SOMETHING 0x0010
#define DMA_SOMETHING_ELSE 0x0020 /* ??? */
- #define SOMETHING_UNMODIFIABLE 0xffc0 /* unused ? not modifiable */
-#define IDX_IO_PLAY_IRQTYPE 0x02 /* PU:0x0001 */
+ #define SOMETHING_UNMODIFIABLE 0xffc0 /* unused? not modifiable */
+#define IDX_IO_CODEC_IRQTYPE 0x02 /* PU:0x0001 */
/* write back to flags in case flags are set, in order to ACK IRQ in handler
* (bit 1 of port 0x64 indicates interrupt for one of these three types)
* sometimes in this case it just writes 0xffff to globally ACK all IRQs
* settings written are not reflected when reading back, though.
- * seems to be IRQ, too (frequently used: port |= 0x07 !), but who knows ? */
- #define IRQ_PLAY_SOMETHING 0x0001 /* something & ACK */
- #define IRQ_FINISHED_PLAYBUF_1 0x0002 /* 1st dmabuf finished & ACK */
- #define IRQ_FINISHED_PLAYBUF_2 0x0004 /* 2nd dmabuf finished & ACK */
+ * seems to be IRQ, too (frequently used: port |= 0x07 !), but who knows? */
+ #define IRQ_SOMETHING 0x0001 /* something & ACK */
+ #define IRQ_FINISHED_DMABUF_1 0x0002 /* 1st dmabuf finished & ACK */
+ #define IRQ_FINISHED_DMABUF_2 0x0004 /* 2nd dmabuf finished & ACK */
#define IRQMASK_SOME_STATUS_1 0x0008 /* \ related bits */
#define IRQMASK_SOME_STATUS_2 0x0010 /* / (checked together in loop) */
- #define IRQMASK_UNMODIFIABLE 0xffe0 /* unused ? not modifiable */
-#define IDX_IO_PLAY_DMA_START_1 0x04 /* start address of 1st DMA play area, PU:0x00000000 */
-#define IDX_IO_PLAY_DMA_START_2 0x08 /* start address of 2nd DMA play area, PU:0x00000000 */
-#define IDX_IO_PLAY_DMA_LEN_1 0x0c /* length of 1st DMA play area, PU:0x0000 */
-#define IDX_IO_PLAY_DMA_LEN_2 0x0e /* length of 2nd DMA play area, PU:0x0000 */
-#define IDX_IO_PLAY_DMA_CURRPOS 0x10 /* current DMA position, PU:0x00000000 */
-#define IDX_IO_PLAY_DMA_CURROFS 0x14 /* offset within current DMA play area, PU:0x0000 */
-#define IDX_IO_PLAY_SOUNDFORMAT 0x16 /* PU:0x0010 */
+ #define IRQMASK_UNMODIFIABLE 0xffe0 /* unused? not modifiable */
+ /* start address of 1st DMA transfer area, PU:0x00000000 */
+#define IDX_IO_CODEC_DMA_START_1 0x04
+ /* start address of 2nd DMA transfer area, PU:0x00000000 */
+#define IDX_IO_CODEC_DMA_START_2 0x08
+ /* both lengths of DMA transfer areas, PU:0x00000000
+ length1: offset 0x0c, length2: offset 0x0e */
+#define IDX_IO_CODEC_DMA_LENGTHS 0x0c
+#define IDX_IO_CODEC_DMA_CURRPOS 0x10 /* current DMA position, PU:0x00000000 */
+ /* offset within current DMA transfer area, PU:0x0000 */
+#define IDX_IO_CODEC_DMA_CURROFS 0x14
+#define IDX_IO_CODEC_SOUNDFORMAT 0x16 /* PU:0x0010 */
/* all unspecified bits can't be modified */
#define SOUNDFORMAT_FREQUENCY_MASK 0x000f
#define SOUNDFORMAT_XTAL1 0x00
@@ -76,6 +85,7 @@
#define SOUNDFORMAT_FLAG_16BIT 0x0010
#define SOUNDFORMAT_FLAG_2CHANNELS 0x0020
+
/* define frequency helpers, for maximum value safety */
enum azf_freq_t {
#define AZF_FREQ(rate) AZF_FREQ_##rate = rate
@@ -96,29 +106,6 @@ enum azf_freq_t {
#undef AZF_FREQ
};
-/** recording area (see also: playback bit flag definitions) **/
-#define IDX_IO_REC_FLAGS 0x20 /* ??, PU:0x0000 */
-#define IDX_IO_REC_IRQTYPE 0x22 /* ??, PU:0x0000 */
- #define IRQ_REC_SOMETHING 0x0001 /* something & ACK */
- #define IRQ_FINISHED_RECBUF_1 0x0002 /* 1st dmabuf finished & ACK */
- #define IRQ_FINISHED_RECBUF_2 0x0004 /* 2nd dmabuf finished & ACK */
- /* hmm, maybe these are just the corresponding *recording* flags ?
- * but OTOH they are most likely at port 0x22 instead */
- #define IRQMASK_SOME_STATUS_1 0x0008 /* \ related bits */
- #define IRQMASK_SOME_STATUS_2 0x0010 /* / (checked together in loop) */
-#define IDX_IO_REC_DMA_START_1 0x24 /* PU:0x00000000 */
-#define IDX_IO_REC_DMA_START_2 0x28 /* PU:0x00000000 */
-#define IDX_IO_REC_DMA_LEN_1 0x2c /* PU:0x0000 */
-#define IDX_IO_REC_DMA_LEN_2 0x2e /* PU:0x0000 */
-#define IDX_IO_REC_DMA_CURRPOS 0x30 /* PU:0x00000000 */
-#define IDX_IO_REC_DMA_CURROFS 0x34 /* PU:0x00000000 */
-#define IDX_IO_REC_SOUNDFORMAT 0x36 /* PU:0x0000 */
-
-/** hmm, what is this I/O area for? MPU401?? or external DAC via I2S?? (after playback, recording, ???, timer) **/
-#define IDX_IO_SOMETHING_FLAGS 0x40 /* gets set to 0x34 just like port 0x0 and 0x20 on card init, PU:0x0000 */
-/* general */
-#define IDX_IO_42H 0x42 /* PU:0x0001 */
-
/** DirectX timer, main interrupt area (FIXME: and something else?) **/
#define IDX_IO_TIMER_VALUE 0x60 /* found this timer area by pure luck :-) */
/* timer countdown value; triggers IRQ when timer is finished */
@@ -133,17 +120,19 @@ enum azf_freq_t {
#define IDX_IO_IRQSTATUS 0x64
/* some IRQ bit in here might also be used to signal a power-management timer
* timeout, to request shutdown of the chip (e.g. AD1815JS has such a thing).
- * Some OPL3 hardware (e.g. in LM4560) has some special timer hardware which
- * can trigger an OPL3 timer IRQ, so maybe there's such a thing as well... */
+ * OPL3 hardware contains several timers which confusingly in most cases
+ * are NOT routed to an IRQ, but some designs (e.g. LM4560) DO support that,
+ * so I wouldn't be surprised at all to discover that AZF3328
+ * supports that thing as well... */
#define IRQ_PLAYBACK 0x0001
#define IRQ_RECORDING 0x0002
- #define IRQ_UNKNOWN1 0x0004 /* most probably I2S port */
+ #define IRQ_I2S_OUT 0x0004 /* this IS I2S, right!? (untested) */
#define IRQ_GAMEPORT 0x0008 /* Interrupt of Digital(ly) Enhanced Game Port */
#define IRQ_MPU401 0x0010
#define IRQ_TIMER 0x0020 /* DirectX timer */
- #define IRQ_UNKNOWN2 0x0040 /* probably unused, or possibly I2S port? */
- #define IRQ_UNKNOWN3 0x0080 /* probably unused, or possibly I2S port? */
+ #define IRQ_UNKNOWN2 0x0040 /* probably unused, or possibly OPL3 timer? */
+ #define IRQ_UNKNOWN3 0x0080 /* probably unused, or possibly OPL3 timer? */
#define IDX_IO_66H 0x66 /* writing 0xffff returns 0x0000 */
/* this is set to e.g. 0x3ff or 0x300, and writable;
* maybe some buffer limit, but I couldn't find out more, PU:0x00ff: */
@@ -206,7 +195,7 @@ enum azf_freq_t {
/*** Gameport area port indices ***/
/* (only 0x06 of 0x08 bytes saved/restored by Windows driver) */
#define AZF_IO_SIZE_GAME 0x08
-#define AZF_IO_SIZE_GAME_PM 0x06
+#define AZF_IO_SIZE_GAME_PM 0x06
enum {
AZF_GAME_LEGACY_IO_PORT = 0x200
@@ -272,6 +261,12 @@ enum {
* 11 --> 1/200: */
#define GAME_HWCFG_ADC_COUNTER_FREQ_MASK 0x06
+ /* FIXME: these values might be reversed... */
+ #define GAME_HWCFG_ADC_COUNTER_FREQ_STD 0
+ #define GAME_HWCFG_ADC_COUNTER_FREQ_1_2 1
+ #define GAME_HWCFG_ADC_COUNTER_FREQ_1_20 2
+ #define GAME_HWCFG_ADC_COUNTER_FREQ_1_200 3
+
/* enable gameport legacy I/O address (0x200)
* I was unable to locate any configurability for a different address: */
#define GAME_HWCFG_LEGACY_ADDRESS_ENABLE 0x08
@@ -281,6 +276,7 @@ enum {
#define AZF_IO_SIZE_MPU_PM 0x04
/*** OPL3 synth ***/
+/* (only 0x06 of 0x08 bytes saved/restored by Windows driver) */
#define AZF_IO_SIZE_OPL3 0x08
#define AZF_IO_SIZE_OPL3_PM 0x06
/* hmm, given that a standard OPL3 has 4 registers only,
@@ -340,4 +336,7 @@ enum {
#define SET_CHAN_LEFT 1
#define SET_CHAN_RIGHT 2
+/* helper macro to align I/O port ranges to 32bit I/O width */
+#define AZF_ALIGN(x) (((x) + 3) & (~3))
+
#endif /* __SOUND_AZT3328_H */
diff --git a/sound/pci/cs46xx/cs46xx_lib.h b/sound/pci/cs46xx/cs46xx_lib.h
index 4eb55aa33612..b5189495d58a 100644
--- a/sound/pci/cs46xx/cs46xx_lib.h
+++ b/sound/pci/cs46xx/cs46xx_lib.h
@@ -35,7 +35,7 @@
#ifdef CONFIG_SND_CS46XX_NEW_DSP
-#define CS46XX_MIN_PERIOD_SIZE 1
+#define CS46XX_MIN_PERIOD_SIZE 64
#define CS46XX_MAX_PERIOD_SIZE 1024*1024
#else
#define CS46XX_MIN_PERIOD_SIZE 2048
diff --git a/sound/pci/ctxfi/ct20k2reg.h b/sound/pci/ctxfi/ct20k2reg.h
index 2d07986f57cc..e0394e3996e8 100644
--- a/sound/pci/ctxfi/ct20k2reg.h
+++ b/sound/pci/ctxfi/ct20k2reg.h
@@ -11,9 +11,12 @@
/* Timer Registers */
-#define TIMER_TIMR 0x1B7004
-#define INTERRUPT_GIP 0x1B7010
-#define INTERRUPT_GIE 0x1B7014
+#define WC 0x1b7000
+#define TIMR 0x1b7004
+# define TIMR_IE (1<<15)
+# define TIMR_IP (1<<14)
+#define GIP 0x1b7010
+#define GIE 0x1b7014
/* I2C Registers */
#define I2C_IF_ADDRESS 0x1B9000
diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c
index a7f4a671f7b7..fee35cfc0c7f 100644
--- a/sound/pci/ctxfi/ctamixer.c
+++ b/sound/pci/ctxfi/ctamixer.c
@@ -63,7 +63,7 @@ static int amixer_set_input(struct amixer *amixer, struct rsc *rsc)
hw = amixer->rsc.hw;
hw->amixer_set_mode(amixer->rsc.ctrl_blk, AMIXER_Y_IMMEDIATE);
amixer->input = rsc;
- if (NULL == rsc)
+ if (!rsc)
hw->amixer_set_x(amixer->rsc.ctrl_blk, BLANK_SLOT);
else
hw->amixer_set_x(amixer->rsc.ctrl_blk,
@@ -99,7 +99,7 @@ static int amixer_set_sum(struct amixer *amixer, struct sum *sum)
hw = amixer->rsc.hw;
amixer->sum = sum;
- if (NULL == sum) {
+ if (!sum) {
hw->amixer_set_se(amixer->rsc.ctrl_blk, 0);
} else {
hw->amixer_set_se(amixer->rsc.ctrl_blk, 1);
@@ -124,20 +124,20 @@ static int amixer_commit_write(struct amixer *amixer)
/* Program master and conjugate resources */
amixer->rsc.ops->master(&amixer->rsc);
- if (NULL != input)
+ if (input)
input->ops->master(input);
- if (NULL != sum)
+ if (sum)
sum->rsc.ops->master(&sum->rsc);
for (i = 0; i < amixer->rsc.msr; i++) {
hw->amixer_set_dirty_all(amixer->rsc.ctrl_blk);
- if (NULL != input) {
+ if (input) {
hw->amixer_set_x(amixer->rsc.ctrl_blk,
input->ops->output_slot(input));
input->ops->next_conj(input);
}
- if (NULL != sum) {
+ if (sum) {
hw->amixer_set_sadr(amixer->rsc.ctrl_blk,
sum->rsc.ops->index(&sum->rsc));
sum->rsc.ops->next_conj(&sum->rsc);
@@ -147,10 +147,10 @@ static int amixer_commit_write(struct amixer *amixer)
amixer->rsc.ops->next_conj(&amixer->rsc);
}
amixer->rsc.ops->master(&amixer->rsc);
- if (NULL != input)
+ if (input)
input->ops->master(input);
- if (NULL != sum)
+ if (sum)
sum->rsc.ops->master(&sum->rsc);
return 0;
@@ -303,7 +303,7 @@ int amixer_mgr_create(void *hw, struct amixer_mgr **ramixer_mgr)
*ramixer_mgr = NULL;
amixer_mgr = kzalloc(sizeof(*amixer_mgr), GFP_KERNEL);
- if (NULL == amixer_mgr)
+ if (!amixer_mgr)
return -ENOMEM;
err = rsc_mgr_init(&amixer_mgr->mgr, AMIXER, AMIXER_RESOURCE_NUM, hw);
@@ -456,7 +456,7 @@ int sum_mgr_create(void *hw, struct sum_mgr **rsum_mgr)
*rsum_mgr = NULL;
sum_mgr = kzalloc(sizeof(*sum_mgr), GFP_KERNEL);
- if (NULL == sum_mgr)
+ if (!sum_mgr)
return -ENOMEM;
err = rsc_mgr_init(&sum_mgr->mgr, SUM, SUM_RESOURCE_NUM, hw);
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index a49c76647307..b1b3a644f738 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -136,7 +136,7 @@ static int ct_map_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
struct snd_pcm_runtime *runtime;
struct ct_vm *vm;
- if (NULL == apcm->substream)
+ if (!apcm->substream)
return 0;
runtime = apcm->substream->runtime;
@@ -144,7 +144,7 @@ static int ct_map_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
apcm->vm_block = vm->map(vm, apcm->substream, runtime->dma_bytes);
- if (NULL == apcm->vm_block)
+ if (!apcm->vm_block)
return -ENOENT;
return 0;
@@ -154,7 +154,7 @@ static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
{
struct ct_vm *vm;
- if (NULL == apcm->vm_block)
+ if (!apcm->vm_block)
return;
vm = atc->vm;
@@ -231,16 +231,16 @@ atc_get_pitch(unsigned int input_rate, unsigned int output_rate)
static int select_rom(unsigned int pitch)
{
- if ((pitch > 0x00428f5c) && (pitch < 0x01b851ec)) {
+ if (pitch > 0x00428f5c && pitch < 0x01b851ec) {
/* 0.26 <= pitch <= 1.72 */
return 1;
- } else if ((0x01d66666 == pitch) || (0x01d66667 == pitch)) {
+ } else if (pitch == 0x01d66666 || pitch == 0x01d66667) {
/* pitch == 1.8375 */
return 2;
- } else if (0x02000000 == pitch) {
+ } else if (pitch == 0x02000000) {
/* pitch == 2 */
return 3;
- } else if ((pitch >= 0x0) && (pitch <= 0x08000000)) {
+ } else if (pitch >= 0x0 && pitch <= 0x08000000) {
/* 0 <= pitch <= 8 */
return 0;
} else {
@@ -283,7 +283,7 @@ static int atc_pcm_playback_prepare(struct ct_atc *atc, struct ct_atc_pcm *apcm)
/* Get AMIXER resource */
n_amixer = (n_amixer < 2) ? 2 : n_amixer;
apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL);
- if (NULL == apcm->amixers) {
+ if (!apcm->amixers) {
err = -ENOMEM;
goto error1;
}
@@ -311,7 +311,7 @@ static int atc_pcm_playback_prepare(struct ct_atc *atc, struct ct_atc_pcm *apcm)
INIT_VOL, atc->pcm[i+device*2]);
mutex_unlock(&atc->atc_mutex);
src = src->ops->next_interleave(src);
- if (NULL == src)
+ if (!src)
src = apcm->src;
}
@@ -334,7 +334,7 @@ atc_pcm_release_resources(struct ct_atc *atc, struct ct_atc_pcm *apcm)
struct srcimp *srcimp;
int i;
- if (NULL != apcm->srcimps) {
+ if (apcm->srcimps) {
for (i = 0; i < apcm->n_srcimp; i++) {
srcimp = apcm->srcimps[i];
srcimp->ops->unmap(srcimp);
@@ -345,7 +345,7 @@ atc_pcm_release_resources(struct ct_atc *atc, struct ct_atc_pcm *apcm)
apcm->srcimps = NULL;
}
- if (NULL != apcm->srccs) {
+ if (apcm->srccs) {
for (i = 0; i < apcm->n_srcc; i++) {
src_mgr->put_src(src_mgr, apcm->srccs[i]);
apcm->srccs[i] = NULL;
@@ -354,7 +354,7 @@ atc_pcm_release_resources(struct ct_atc *atc, struct ct_atc_pcm *apcm)
apcm->srccs = NULL;
}
- if (NULL != apcm->amixers) {
+ if (apcm->amixers) {
for (i = 0; i < apcm->n_amixer; i++) {
amixer_mgr->put_amixer(amixer_mgr, apcm->amixers[i]);
apcm->amixers[i] = NULL;
@@ -363,17 +363,17 @@ atc_pcm_release_resources(struct ct_atc *atc, struct ct_atc_pcm *apcm)
apcm->amixers = NULL;
}
- if (NULL != apcm->mono) {
+ if (apcm->mono) {
sum_mgr->put_sum(sum_mgr, apcm->mono);
apcm->mono = NULL;
}
- if (NULL != apcm->src) {
+ if (apcm->src) {
src_mgr->put_src(src_mgr, apcm->src);
apcm->src = NULL;
}
- if (NULL != apcm->vm_block) {
+ if (apcm->vm_block) {
/* Undo device virtual mem map */
ct_unmap_audio_buffer(atc, apcm);
apcm->vm_block = NULL;
@@ -419,7 +419,7 @@ static int atc_pcm_stop(struct ct_atc *atc, struct ct_atc_pcm *apcm)
src->ops->set_state(src, SRC_STATE_OFF);
src->ops->commit_write(src);
- if (NULL != apcm->srccs) {
+ if (apcm->srccs) {
for (i = 0; i < apcm->n_srcc; i++) {
src = apcm->srccs[i];
src->ops->set_bm(src, 0);
@@ -544,18 +544,18 @@ atc_pcm_capture_get_resources(struct ct_atc *atc, struct ct_atc_pcm *apcm)
if (n_srcc) {
apcm->srccs = kzalloc(sizeof(void *)*n_srcc, GFP_KERNEL);
- if (NULL == apcm->srccs)
+ if (!apcm->srccs)
return -ENOMEM;
}
if (n_amixer) {
apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL);
- if (NULL == apcm->amixers) {
+ if (!apcm->amixers) {
err = -ENOMEM;
goto error1;
}
}
apcm->srcimps = kzalloc(sizeof(void *)*n_srcimp, GFP_KERNEL);
- if (NULL == apcm->srcimps) {
+ if (!apcm->srcimps) {
err = -ENOMEM;
goto error1;
}
@@ -818,7 +818,7 @@ static int spdif_passthru_playback_get_resources(struct ct_atc *atc,
/* Get AMIXER resource */
n_amixer = (n_amixer < 2) ? 2 : n_amixer;
apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL);
- if (NULL == apcm->amixers) {
+ if (!apcm->amixers) {
err = -ENOMEM;
goto error1;
}
@@ -919,7 +919,7 @@ spdif_passthru_playback_prepare(struct ct_atc *atc, struct ct_atc_pcm *apcm)
amixer = apcm->amixers[i];
amixer->ops->setup(amixer, &src->rsc, INIT_VOL, NULL);
src = src->ops->next_interleave(src);
- if (NULL == src)
+ if (!src)
src = apcm->src;
}
/* Connect to SPDIFOO */
@@ -1121,7 +1121,7 @@ static int atc_release_resources(struct ct_atc *atc)
struct ct_mixer *mixer = NULL;
/* disconnect internal mixer objects */
- if (NULL != atc->mixer) {
+ if (atc->mixer) {
mixer = atc->mixer;
mixer->set_input_left(mixer, MIX_LINE_IN, NULL);
mixer->set_input_right(mixer, MIX_LINE_IN, NULL);
@@ -1131,7 +1131,7 @@ static int atc_release_resources(struct ct_atc *atc)
mixer->set_input_right(mixer, MIX_SPDIF_IN, NULL);
}
- if (NULL != atc->daios) {
+ if (atc->daios) {
daio_mgr = (struct daio_mgr *)atc->rsc_mgrs[DAIO];
for (i = 0; i < atc->n_daio; i++) {
daio = atc->daios[i];
@@ -1149,7 +1149,7 @@ static int atc_release_resources(struct ct_atc *atc)
atc->daios = NULL;
}
- if (NULL != atc->pcm) {
+ if (atc->pcm) {
sum_mgr = atc->rsc_mgrs[SUM];
for (i = 0; i < atc->n_pcm; i++)
sum_mgr->put_sum(sum_mgr, atc->pcm[i]);
@@ -1158,7 +1158,7 @@ static int atc_release_resources(struct ct_atc *atc)
atc->pcm = NULL;
}
- if (NULL != atc->srcs) {
+ if (atc->srcs) {
src_mgr = atc->rsc_mgrs[SRC];
for (i = 0; i < atc->n_src; i++)
src_mgr->put_src(src_mgr, atc->srcs[i]);
@@ -1167,7 +1167,7 @@ static int atc_release_resources(struct ct_atc *atc)
atc->srcs = NULL;
}
- if (NULL != atc->srcimps) {
+ if (atc->srcimps) {
srcimp_mgr = atc->rsc_mgrs[SRCIMP];
for (i = 0; i < atc->n_srcimp; i++) {
srcimp = atc->srcimps[i];
@@ -1185,7 +1185,7 @@ static int ct_atc_destroy(struct ct_atc *atc)
{
int i = 0;
- if (NULL == atc)
+ if (!atc)
return 0;
if (atc->timer) {
@@ -1196,21 +1196,20 @@ static int ct_atc_destroy(struct ct_atc *atc)
atc_release_resources(atc);
/* Destroy internal mixer objects */
- if (NULL != atc->mixer)
+ if (atc->mixer)
ct_mixer_destroy(atc->mixer);
for (i = 0; i < NUM_RSCTYP; i++) {
- if ((NULL != rsc_mgr_funcs[i].destroy) &&
- (NULL != atc->rsc_mgrs[i]))
+ if (rsc_mgr_funcs[i].destroy && atc->rsc_mgrs[i])
rsc_mgr_funcs[i].destroy(atc->rsc_mgrs[i]);
}
- if (NULL != atc->hw)
+ if (atc->hw)
destroy_hw_obj((struct hw *)atc->hw);
/* Destroy device virtual memory manager object */
- if (NULL != atc->vm) {
+ if (atc->vm) {
ct_vm_destroy(atc->vm);
atc->vm = NULL;
}
@@ -1275,7 +1274,7 @@ int __devinit ct_atc_create_alsa_devs(struct ct_atc *atc)
alsa_dev_funcs[MIXER].public_name = atc->chip_name;
for (i = 0; i < NUM_CTALSADEVS; i++) {
- if (NULL == alsa_dev_funcs[i].create)
+ if (!alsa_dev_funcs[i].create)
continue;
err = alsa_dev_funcs[i].create(atc, i,
@@ -1312,7 +1311,7 @@ static int __devinit atc_create_hw_devs(struct ct_atc *atc)
return err;
for (i = 0; i < NUM_RSCTYP; i++) {
- if (NULL == rsc_mgr_funcs[i].create)
+ if (!rsc_mgr_funcs[i].create)
continue;
err = rsc_mgr_funcs[i].create(atc->hw, &atc->rsc_mgrs[i]);
@@ -1339,19 +1338,19 @@ static int atc_get_resources(struct ct_atc *atc)
int err, i;
atc->daios = kzalloc(sizeof(void *)*(DAIONUM), GFP_KERNEL);
- if (NULL == atc->daios)
+ if (!atc->daios)
return -ENOMEM;
atc->srcs = kzalloc(sizeof(void *)*(2*2), GFP_KERNEL);
- if (NULL == atc->srcs)
+ if (!atc->srcs)
return -ENOMEM;
atc->srcimps = kzalloc(sizeof(void *)*(2*2), GFP_KERNEL);
- if (NULL == atc->srcimps)
+ if (!atc->srcimps)
return -ENOMEM;
atc->pcm = kzalloc(sizeof(void *)*(2*4), GFP_KERNEL);
- if (NULL == atc->pcm)
+ if (!atc->pcm)
return -ENOMEM;
daio_mgr = (struct daio_mgr *)atc->rsc_mgrs[DAIO];
@@ -1648,7 +1647,7 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci,
*ratc = NULL;
atc = kzalloc(sizeof(*atc), GFP_KERNEL);
- if (NULL == atc)
+ if (!atc)
return -ENOMEM;
/* Set operations */
diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
index deb6cfa73600..af56eb949bde 100644
--- a/sound/pci/ctxfi/ctdaio.c
+++ b/sound/pci/ctxfi/ctdaio.c
@@ -173,7 +173,7 @@ static int dao_set_left_input(struct dao *dao, struct rsc *input)
int i;
entry = kzalloc((sizeof(*entry) * daio->rscl.msr), GFP_KERNEL);
- if (NULL == entry)
+ if (!entry)
return -ENOMEM;
/* Program master and conjugate resources */
@@ -201,7 +201,7 @@ static int dao_set_right_input(struct dao *dao, struct rsc *input)
int i;
entry = kzalloc((sizeof(*entry) * daio->rscr.msr), GFP_KERNEL);
- if (NULL == entry)
+ if (!entry)
return -ENOMEM;
/* Program master and conjugate resources */
@@ -228,7 +228,7 @@ static int dao_clear_left_input(struct dao *dao)
struct daio *daio = &dao->daio;
int i;
- if (NULL == dao->imappers[0])
+ if (!dao->imappers[0])
return 0;
entry = dao->imappers[0];
@@ -252,7 +252,7 @@ static int dao_clear_right_input(struct dao *dao)
struct daio *daio = &dao->daio;
int i;
- if (NULL == dao->imappers[daio->rscl.msr])
+ if (!dao->imappers[daio->rscl.msr])
return 0;
entry = dao->imappers[daio->rscl.msr];
@@ -408,7 +408,7 @@ static int dao_rsc_init(struct dao *dao,
return err;
dao->imappers = kzalloc(sizeof(void *)*desc->msr*2, GFP_KERNEL);
- if (NULL == dao->imappers) {
+ if (!dao->imappers) {
err = -ENOMEM;
goto error1;
}
@@ -442,11 +442,11 @@ error1:
static int dao_rsc_uninit(struct dao *dao)
{
- if (NULL != dao->imappers) {
- if (NULL != dao->imappers[0])
+ if (dao->imappers) {
+ if (dao->imappers[0])
dao_clear_left_input(dao);
- if (NULL != dao->imappers[dao->daio.rscl.msr])
+ if (dao->imappers[dao->daio.rscl.msr])
dao_clear_right_input(dao);
kfree(dao->imappers);
@@ -555,7 +555,7 @@ static int get_daio_rsc(struct daio_mgr *mgr,
/* Allocate mem for daio resource */
if (desc->type <= DAIO_OUT_MAX) {
dao = kzalloc(sizeof(*dao), GFP_KERNEL);
- if (NULL == dao) {
+ if (!dao) {
err = -ENOMEM;
goto error;
}
@@ -566,7 +566,7 @@ static int get_daio_rsc(struct daio_mgr *mgr,
*rdaio = &dao->daio;
} else {
dai = kzalloc(sizeof(*dai), GFP_KERNEL);
- if (NULL == dai) {
+ if (!dai) {
err = -ENOMEM;
goto error;
}
@@ -583,9 +583,9 @@ static int get_daio_rsc(struct daio_mgr *mgr,
return 0;
error:
- if (NULL != dao)
+ if (dao)
kfree(dao);
- else if (NULL != dai)
+ else if (dai)
kfree(dai);
spin_lock_irqsave(&mgr->mgr_lock, flags);
@@ -663,7 +663,7 @@ static int daio_imap_add(struct daio_mgr *mgr, struct imapper *entry)
int err;
spin_lock_irqsave(&mgr->imap_lock, flags);
- if ((0 == entry->addr) && (mgr->init_imap_added)) {
+ if (!entry->addr && mgr->init_imap_added) {
input_mapper_delete(&mgr->imappers, mgr->init_imap,
daio_map_op, mgr);
mgr->init_imap_added = 0;
@@ -707,7 +707,7 @@ int daio_mgr_create(void *hw, struct daio_mgr **rdaio_mgr)
*rdaio_mgr = NULL;
daio_mgr = kzalloc(sizeof(*daio_mgr), GFP_KERNEL);
- if (NULL == daio_mgr)
+ if (!daio_mgr)
return -ENOMEM;
err = rsc_mgr_init(&daio_mgr->mgr, DAIO, DAIO_RESOURCE_NUM, hw);
@@ -718,7 +718,7 @@ int daio_mgr_create(void *hw, struct daio_mgr **rdaio_mgr)
spin_lock_init(&daio_mgr->imap_lock);
INIT_LIST_HEAD(&daio_mgr->imappers);
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (NULL == entry) {
+ if (!entry) {
err = -ENOMEM;
goto error2;
}
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
index ad3e1d144464..0cf400f879f9 100644
--- a/sound/pci/ctxfi/cthw20k1.c
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -168,7 +168,7 @@ static int src_get_rsc_ctrl_blk(void **rblk)
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
*rblk = blk;
@@ -494,7 +494,7 @@ static int src_mgr_get_ctrl_blk(void **rblk)
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
*rblk = blk;
@@ -515,7 +515,7 @@ static int srcimp_mgr_get_ctrl_blk(void **rblk)
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
*rblk = blk;
@@ -702,7 +702,7 @@ static int amixer_rsc_get_ctrl_blk(void **rblk)
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
*rblk = blk;
@@ -723,7 +723,7 @@ static int amixer_mgr_get_ctrl_blk(void **rblk)
*rblk = NULL;
/*blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
*rblk = blk;*/
@@ -909,7 +909,7 @@ static int dai_get_ctrl_blk(void **rblk)
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
*rblk = blk;
@@ -958,7 +958,7 @@ static int dao_get_ctrl_blk(void **rblk)
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
*rblk = blk;
@@ -1152,7 +1152,7 @@ static int daio_mgr_get_ctrl_blk(struct hw *hw, void **rblk)
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
blk->i2sctl = hw_read_20kx(hw, I2SCTL);
@@ -1808,7 +1808,7 @@ static int uaa_to_xfi(struct pci_dev *pci)
/* By default, Hendrix card UAA Bar0 should be using memory... */
io_base = pci_resource_start(pci, 0);
mem_base = ioremap(io_base, pci_resource_len(pci, 0));
- if (NULL == mem_base)
+ if (!mem_base)
return -ENOENT;
/* Read current mode from Mode Change Register */
@@ -1977,7 +1977,7 @@ static int hw_card_shutdown(struct hw *hw)
hw->irq = -1;
- if (NULL != ((void *)hw->mem_base))
+ if (hw->mem_base)
iounmap((void *)hw->mem_base);
hw->mem_base = (unsigned long)NULL;
@@ -2274,7 +2274,7 @@ int __devinit create_20k1_hw_obj(struct hw **rhw)
*rhw = NULL;
hw20k1 = kzalloc(sizeof(*hw20k1), GFP_KERNEL);
- if (NULL == hw20k1)
+ if (!hw20k1)
return -ENOMEM;
spin_lock_init(&hw20k1->reg_20k1_lock);
diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
index dec46d04b041..b6b11bfe7574 100644
--- a/sound/pci/ctxfi/cthw20k2.c
+++ b/sound/pci/ctxfi/cthw20k2.c
@@ -166,7 +166,7 @@ static int src_get_rsc_ctrl_blk(void **rblk)
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
*rblk = blk;
@@ -492,7 +492,7 @@ static int src_mgr_get_ctrl_blk(void **rblk)
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
*rblk = blk;
@@ -513,7 +513,7 @@ static int srcimp_mgr_get_ctrl_blk(void **rblk)
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
*rblk = blk;
@@ -702,7 +702,7 @@ static int amixer_rsc_get_ctrl_blk(void **rblk)
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
*rblk = blk;
@@ -891,7 +891,7 @@ static int dai_get_ctrl_blk(void **rblk)
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
*rblk = blk;
@@ -941,7 +941,7 @@ static int dao_get_ctrl_blk(void **rblk)
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
*rblk = blk;
@@ -1092,7 +1092,7 @@ static int daio_mgr_get_ctrl_blk(struct hw *hw, void **rblk)
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
- if (NULL == blk)
+ if (!blk)
return -ENOMEM;
for (i = 0; i < 8; i++) {
@@ -1112,6 +1112,26 @@ static int daio_mgr_put_ctrl_blk(void *blk)
return 0;
}
+/* Timer interrupt */
+static int set_timer_irq(struct hw *hw, int enable)
+{
+ hw_write_20kx(hw, GIE, enable ? IT_INT : 0);
+ return 0;
+}
+
+static int set_timer_tick(struct hw *hw, unsigned int ticks)
+{
+ if (ticks)
+ ticks |= TIMR_IE | TIMR_IP;
+ hw_write_20kx(hw, TIMR, ticks);
+ return 0;
+}
+
+static unsigned int get_wc(struct hw *hw)
+{
+ return hw_read_20kx(hw, WC);
+}
+
/* Card hardware initialization block */
struct dac_conf {
unsigned int msr; /* master sample rate in rsrs */
@@ -1841,6 +1861,22 @@ static int hw_have_digit_io_switch(struct hw *hw)
return 0;
}
+static irqreturn_t ct_20k2_interrupt(int irq, void *dev_id)
+{
+ struct hw *hw = dev_id;
+ unsigned int status;
+
+ status = hw_read_20kx(hw, GIP);
+ if (!status)
+ return IRQ_NONE;
+
+ if (hw->irq_callback)
+ hw->irq_callback(hw->irq_callback_data, status);
+
+ hw_write_20kx(hw, GIP, status);
+ return IRQ_HANDLED;
+}
+
static int hw_card_start(struct hw *hw)
{
int err = 0;
@@ -1868,7 +1904,7 @@ static int hw_card_start(struct hw *hw)
hw->io_base = pci_resource_start(hw->pci, 2);
hw->mem_base = (unsigned long)ioremap(hw->io_base,
pci_resource_len(hw->pci, 2));
- if (NULL == (void *)hw->mem_base) {
+ if (!hw->mem_base) {
err = -ENOENT;
goto error2;
}
@@ -1879,12 +1915,15 @@ static int hw_card_start(struct hw *hw)
set_field(&gctl, GCTL_UAA, 0);
hw_write_20kx(hw, GLOBAL_CNTL_GCTL, gctl);
- /*if ((err = request_irq(pci->irq, ct_atc_interrupt, IRQF_SHARED,
- atc->chip_details->nm_card, hw))) {
- goto error3;
+ if (hw->irq < 0) {
+ err = request_irq(pci->irq, ct_20k2_interrupt, IRQF_SHARED,
+ "ctxfi", hw);
+ if (err < 0) {
+ printk(KERN_ERR "XFi: Cannot get irq %d\n", pci->irq);
+ goto error2;
+ }
+ hw->irq = pci->irq;
}
- hw->irq = pci->irq;
- */
pci_set_master(pci);
@@ -1923,7 +1962,7 @@ static int hw_card_shutdown(struct hw *hw)
hw->irq = -1;
- if (NULL != ((void *)hw->mem_base))
+ if (hw->mem_base)
iounmap((void *)hw->mem_base);
hw->mem_base = (unsigned long)NULL;
@@ -1972,7 +2011,7 @@ static int hw_card_init(struct hw *hw, struct card_conf *info)
hw_write_20kx(hw, GLOBAL_CNTL_GCTL, gctl);
/* Reset all global pending interrupts */
- hw_write_20kx(hw, INTERRUPT_GIE, 0);
+ hw_write_20kx(hw, GIE, 0);
/* Reset all SRC pending interrupts */
hw_write_20kx(hw, SRC_IP, 0);
@@ -2149,6 +2188,10 @@ static struct hw ct20k2_preset __devinitdata = {
.daio_mgr_set_imapnxt = daio_mgr_set_imapnxt,
.daio_mgr_set_imapaddr = daio_mgr_set_imapaddr,
.daio_mgr_commit_write = daio_mgr_commit_write,
+
+ .set_timer_irq = set_timer_irq,
+ .set_timer_tick = set_timer_tick,
+ .get_wc = get_wc,
};
int __devinit create_20k2_hw_obj(struct hw **rhw)
diff --git a/sound/pci/ctxfi/ctmixer.c b/sound/pci/ctxfi/ctmixer.c
index f26d7cd9db9f..15c1e7271ea8 100644
--- a/sound/pci/ctxfi/ctmixer.c
+++ b/sound/pci/ctxfi/ctmixer.c
@@ -654,7 +654,7 @@ ct_mixer_kcontrol_new(struct ct_mixer *mixer, struct snd_kcontrol_new *new)
int err;
kctl = snd_ctl_new1(new, mixer->atc);
- if (NULL == kctl)
+ if (!kctl)
return -ENOMEM;
if (SNDRV_CTL_ELEM_IFACE_PCM == kctl->id.iface)
@@ -837,17 +837,17 @@ static int ct_mixer_get_mem(struct ct_mixer **rmixer)
*rmixer = NULL;
/* Allocate mem for mixer obj */
mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
- if (NULL == mixer)
+ if (!mixer)
return -ENOMEM;
mixer->amixers = kzalloc(sizeof(void *)*(NUM_CT_AMIXERS*CHN_NUM),
GFP_KERNEL);
- if (NULL == mixer->amixers) {
+ if (!mixer->amixers) {
err = -ENOMEM;
goto error1;
}
mixer->sums = kzalloc(sizeof(void *)*(NUM_CT_SUMS*CHN_NUM), GFP_KERNEL);
- if (NULL == mixer->sums) {
+ if (!mixer->sums) {
err = -ENOMEM;
goto error2;
}
diff --git a/sound/pci/ctxfi/ctpcm.c b/sound/pci/ctxfi/ctpcm.c
index 60ea23180acb..d0dc227fbdd3 100644
--- a/sound/pci/ctxfi/ctpcm.c
+++ b/sound/pci/ctxfi/ctpcm.c
@@ -97,7 +97,7 @@ static void ct_atc_pcm_interrupt(struct ct_atc_pcm *atc_pcm)
{
struct ct_atc_pcm *apcm = atc_pcm;
- if (NULL == apcm->substream)
+ if (!apcm->substream)
return;
snd_pcm_period_elapsed(apcm->substream);
@@ -123,7 +123,7 @@ static int ct_pcm_playback_open(struct snd_pcm_substream *substream)
int err;
apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
- if (NULL == apcm)
+ if (!apcm)
return -ENOMEM;
apcm->substream = substream;
@@ -271,7 +271,7 @@ static int ct_pcm_capture_open(struct snd_pcm_substream *substream)
int err;
apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
- if (NULL == apcm)
+ if (!apcm)
return -ENOMEM;
apcm->started = 0;
diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c
index 889c495bb7d1..7dfaf67344d4 100644
--- a/sound/pci/ctxfi/ctresource.c
+++ b/sound/pci/ctxfi/ctresource.c
@@ -144,7 +144,7 @@ int rsc_init(struct rsc *rsc, u32 idx, enum RSCTYP type, u32 msr, void *hw)
rsc->msr = msr;
rsc->hw = hw;
rsc->ops = &rsc_generic_ops;
- if (NULL == hw) {
+ if (!hw) {
rsc->ctrl_blk = NULL;
return 0;
}
@@ -216,7 +216,7 @@ int rsc_mgr_init(struct rsc_mgr *mgr, enum RSCTYP type,
mgr->type = NUM_RSCTYP;
mgr->rscs = kzalloc(((amount + 8 - 1) / 8), GFP_KERNEL);
- if (NULL == mgr->rscs)
+ if (!mgr->rscs)
return -ENOMEM;
switch (type) {
diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c
index df43a5cd3938..c749fa720889 100644
--- a/sound/pci/ctxfi/ctsrc.c
+++ b/sound/pci/ctxfi/ctsrc.c
@@ -441,7 +441,7 @@ get_src_rsc(struct src_mgr *mgr, const struct src_desc *desc, struct src **rsrc)
else
src = kzalloc(sizeof(*src), GFP_KERNEL);
- if (NULL == src) {
+ if (!src) {
err = -ENOMEM;
goto error1;
}
@@ -550,7 +550,7 @@ int src_mgr_create(void *hw, struct src_mgr **rsrc_mgr)
*rsrc_mgr = NULL;
src_mgr = kzalloc(sizeof(*src_mgr), GFP_KERNEL);
- if (NULL == src_mgr)
+ if (!src_mgr)
return -ENOMEM;
err = rsc_mgr_init(&src_mgr->mgr, SRC, SRC_RESOURCE_NUM, hw);
@@ -679,7 +679,7 @@ static int srcimp_rsc_init(struct srcimp *srcimp,
/* Reserve memory for imapper nodes */
srcimp->imappers = kzalloc(sizeof(struct imapper)*desc->msr,
GFP_KERNEL);
- if (NULL == srcimp->imappers) {
+ if (!srcimp->imappers) {
err = -ENOMEM;
goto error1;
}
@@ -833,7 +833,7 @@ int srcimp_mgr_create(void *hw, struct srcimp_mgr **rsrcimp_mgr)
*rsrcimp_mgr = NULL;
srcimp_mgr = kzalloc(sizeof(*srcimp_mgr), GFP_KERNEL);
- if (NULL == srcimp_mgr)
+ if (!srcimp_mgr)
return -ENOMEM;
err = rsc_mgr_init(&srcimp_mgr->mgr, SRCIMP, SRCIMP_RESOURCE_NUM, hw);
@@ -844,7 +844,7 @@ int srcimp_mgr_create(void *hw, struct srcimp_mgr **rsrcimp_mgr)
spin_lock_init(&srcimp_mgr->imap_lock);
INIT_LIST_HEAD(&srcimp_mgr->imappers);
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (NULL == entry) {
+ if (!entry) {
err = -ENOMEM;
goto error2;
}
diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c
index 67665a7e43c6..6b78752e9503 100644
--- a/sound/pci/ctxfi/ctvmem.c
+++ b/sound/pci/ctxfi/ctvmem.c
@@ -60,7 +60,7 @@ get_vm_block(struct ct_vm *vm, unsigned int size)
}
block = kzalloc(sizeof(*block), GFP_KERNEL);
- if (NULL == block)
+ if (!block)
goto out;
block->addr = entry->addr;
@@ -181,7 +181,7 @@ int ct_vm_create(struct ct_vm **rvm)
*rvm = NULL;
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
- if (NULL == vm)
+ if (!vm)
return -ENOMEM;
mutex_init(&vm->lock);
@@ -189,7 +189,7 @@ int ct_vm_create(struct ct_vm **rvm)
/* Allocate page table pages */
for (i = 0; i < CT_PTP_NUM; i++) {
vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (NULL == vm->ptp[i])
+ if (!vm->ptp[i])
break;
}
if (!i) {
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 04438f1d682d..55545e0818b5 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -46,6 +46,20 @@ config SND_HDA_INPUT_JACK
Say Y here to enable the jack plugging notification via
input layer.
+config SND_HDA_PATCH_LOADER
+ bool "Support initialization patch loading for HD-audio"
+ depends on EXPERIMENTAL
+ select FW_LOADER
+ select SND_HDA_HWDEP
+ select SND_HDA_RECONFIG
+ help
+ Say Y here to allow the HD-audio driver to load a pseudo
+ firmware file ("patch") for overriding the BIOS setup at
+ start up. The "patch" file can be specified via patch module
+ option, such as patch=hda-init.
+
+ This option turns on hwdep and reconfig features automatically.
+
config SND_HDA_CODEC_REALTEK
bool "Build Realtek HD-audio codec support"
default y
@@ -134,6 +148,19 @@ config SND_HDA_ELD
def_bool y
depends on SND_HDA_CODEC_INTELHDMI
+config SND_HDA_CODEC_CIRRUS
+ bool "Build Cirrus Logic codec support"
+ depends on SND_HDA_INTEL
+ default y
+ help
+ Say Y here to include Cirrus Logic codec support in
+ snd-hda-intel driver, such as CS4206.
+
+ When the HD-audio driver is built as a module, the codec
+ support code is also built as another module,
+ snd-hda-codec-cirrus.
+ This module is automatically loaded at probing.
+
config SND_HDA_CODEC_CONEXANT
bool "Build Conexant HD-audio codec support"
default y
diff --git a/sound/pci/hda/Makefile b/sound/pci/hda/Makefile
index e3081d4586cc..315a1c4f8998 100644
--- a/sound/pci/hda/Makefile
+++ b/sound/pci/hda/Makefile
@@ -13,6 +13,7 @@ snd-hda-codec-analog-objs := patch_analog.o
snd-hda-codec-idt-objs := patch_sigmatel.o
snd-hda-codec-si3054-objs := patch_si3054.o
snd-hda-codec-atihdmi-objs := patch_atihdmi.o
+snd-hda-codec-cirrus-objs := patch_cirrus.o
snd-hda-codec-ca0110-objs := patch_ca0110.o
snd-hda-codec-conexant-objs := patch_conexant.o
snd-hda-codec-via-objs := patch_via.o
@@ -41,6 +42,9 @@ endif
ifdef CONFIG_SND_HDA_CODEC_ATIHDMI
obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-codec-atihdmi.o
endif
+ifdef CONFIG_SND_HDA_CODEC_CIRRUS
+obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-codec-cirrus.o
+endif
ifdef CONFIG_SND_HDA_CODEC_CA0110
obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-codec-ca0110.o
endif
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index b0275a050870..3f51a981e604 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -24,6 +24,7 @@
#include <linux/workqueue.h>
#include <sound/core.h>
#include "hda_beep.h"
+#include "hda_local.h"
enum {
DIGBEEP_HZ_STEP = 46875, /* 46.875 Hz */
@@ -118,6 +119,9 @@ int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
struct hda_beep *beep;
int err;
+ if (!snd_hda_get_bool_hint(codec, "beep"))
+ return 0; /* disabled explicitly */
+
beep = kzalloc(sizeof(*beep), GFP_KERNEL);
if (beep == NULL)
return -ENOMEM;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index c7df01b72cac..af989f660cca 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -44,6 +44,7 @@ struct hda_vendor_id {
/* codec vendor labels */
static struct hda_vendor_id hda_vendor_ids[] = {
{ 0x1002, "ATI" },
+ { 0x1013, "Cirrus Logic" },
{ 0x1057, "Motorola" },
{ 0x1095, "Silicon Image" },
{ 0x10de, "Nvidia" },
@@ -150,7 +151,14 @@ make_codec_cmd(struct hda_codec *codec, hda_nid_t nid, int direct,
{
u32 val;
- val = (u32)(codec->addr & 0x0f) << 28;
+ if ((codec->addr & ~0xf) || (direct & ~1) || (nid & ~0x7f) ||
+ (verb & ~0xfff) || (parm & ~0xffff)) {
+ printk(KERN_ERR "hda-codec: out of range cmd %x:%x:%x:%x:%x\n",
+ codec->addr, direct, nid, verb, parm);
+ return ~0;
+ }
+
+ val = (u32)codec->addr << 28;
val |= (u32)direct << 27;
val |= (u32)nid << 20;
val |= verb << 8;
@@ -167,6 +175,9 @@ static int codec_exec_verb(struct hda_codec *codec, unsigned int cmd,
struct hda_bus *bus = codec->bus;
int err;
+ if (cmd == ~0)
+ return -1;
+
if (res)
*res = -1;
again:
@@ -291,11 +302,20 @@ int snd_hda_get_connections(struct hda_codec *codec, hda_nid_t nid,
unsigned int parm;
int i, conn_len, conns;
unsigned int shift, num_elems, mask;
+ unsigned int wcaps;
hda_nid_t prev_nid;
if (snd_BUG_ON(!conn_list || max_conns <= 0))
return -EINVAL;
+ wcaps = get_wcaps(codec, nid);
+ if (!(wcaps & AC_WCAP_CONN_LIST) &&
+ get_wcaps_type(wcaps) != AC_WID_VOL_KNB) {
+ snd_printk(KERN_WARNING "hda_codec: "
+ "connection list not available for 0x%x\n", nid);
+ return -EINVAL;
+ }
+
parm = snd_hda_param_read(codec, nid, AC_PAR_CONNLIST_LEN);
if (parm & AC_CLIST_LONG) {
/* long form */
@@ -316,6 +336,8 @@ int snd_hda_get_connections(struct hda_codec *codec, hda_nid_t nid,
/* single connection */
parm = snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_CONNECT_LIST, 0);
+ if (parm == -1 && codec->bus->rirb_error)
+ return -EIO;
conn_list[0] = parm & mask;
return 1;
}
@@ -327,9 +349,12 @@ int snd_hda_get_connections(struct hda_codec *codec, hda_nid_t nid,
int range_val;
hda_nid_t val, n;
- if (i % num_elems == 0)
+ if (i % num_elems == 0) {
parm = snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_CONNECT_LIST, i);
+ if (parm == -1 && codec->bus->rirb_error)
+ return -EIO;
+ }
range_val = !!(parm & (1 << (shift-1))); /* ranges */
val = parm & mask;
if (val == 0) {
@@ -727,8 +752,7 @@ static int read_pin_defaults(struct hda_codec *codec)
for (i = 0; i < codec->num_nodes; i++, nid++) {
struct hda_pincfg *pin;
unsigned int wcaps = get_wcaps(codec, nid);
- unsigned int wid_type = (wcaps & AC_WCAP_TYPE) >>
- AC_WCAP_TYPE_SHIFT;
+ unsigned int wid_type = get_wcaps_type(wcaps);
if (wid_type != AC_WID_PIN)
continue;
pin = snd_array_new(&codec->init_pins);
@@ -891,7 +915,7 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
* Returns 0 if successful, or a negative error code.
*/
int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr,
- int do_init, struct hda_codec **codecp)
+ struct hda_codec **codecp)
{
struct hda_codec *codec;
char component[31];
@@ -984,11 +1008,6 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr
codec->afg ? codec->afg : codec->mfg,
AC_PWRST_D0);
- if (do_init) {
- err = snd_hda_codec_configure(codec);
- if (err < 0)
- goto error;
- }
snd_hda_codec_proc_new(codec);
snd_hda_create_hwdep(codec);
@@ -1042,6 +1061,7 @@ int snd_hda_codec_configure(struct hda_codec *codec)
err = init_unsol_queue(codec->bus);
return err;
}
+EXPORT_SYMBOL_HDA(snd_hda_codec_configure);
/**
* snd_hda_codec_setup_stream - set up the codec for streaming
@@ -2356,16 +2376,20 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
hda_nid_t nid;
int i;
- snd_hda_codec_write(codec, fg, 0, AC_VERB_SET_POWER_STATE,
+ /* this delay seems necessary to avoid click noise at power-down */
+ if (power_state == AC_PWRST_D3)
+ msleep(100);
+ snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE,
power_state);
- msleep(10); /* partial workaround for "azx_get_response timeout" */
+ /* partial workaround for "azx_get_response timeout" */
+ if (power_state == AC_PWRST_D0)
+ msleep(10);
nid = codec->start_nid;
for (i = 0; i < codec->num_nodes; i++, nid++) {
unsigned int wcaps = get_wcaps(codec, nid);
if (wcaps & AC_WCAP_POWER) {
- unsigned int wid_type = (wcaps & AC_WCAP_TYPE) >>
- AC_WCAP_TYPE_SHIFT;
+ unsigned int wid_type = get_wcaps_type(wcaps);
if (power_state == AC_PWRST_D3 &&
wid_type == AC_WID_PIN) {
unsigned int pincap;
@@ -2573,7 +2597,7 @@ unsigned int snd_hda_calc_stream_format(unsigned int rate,
case 20:
case 24:
case 32:
- if (maxbps >= 32)
+ if (maxbps >= 32 || format == SNDRV_PCM_FORMAT_FLOAT_LE)
val |= 0x40;
else if (maxbps >= 24)
val |= 0x30;
@@ -2700,11 +2724,12 @@ static int snd_hda_query_supported_pcm(struct hda_codec *codec, hda_nid_t nid,
bps = 20;
}
}
- else if (streams == AC_SUPFMT_FLOAT32) {
- /* should be exclusive */
+ if (streams & AC_SUPFMT_FLOAT32) {
formats |= SNDRV_PCM_FMTBIT_FLOAT_LE;
- bps = 32;
- } else if (streams == AC_SUPFMT_AC3) {
+ if (!bps)
+ bps = 32;
+ }
+ if (streams == AC_SUPFMT_AC3) {
/* should be exclusive */
/* temporary hack: we have still no proper support
* for the direct AC3 stream...
@@ -3102,7 +3127,7 @@ int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
tbl = q;
if (tbl->value >= 0 && tbl->value < num_configs) {
-#ifdef CONFIG_SND_DEBUG_DETECT
+#ifdef CONFIG_SND_DEBUG_VERBOSE
char tmp[10];
const char *model = NULL;
if (models)
@@ -3655,8 +3680,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec,
end_nid = codec->start_nid + codec->num_nodes;
for (nid = codec->start_nid; nid < end_nid; nid++) {
unsigned int wid_caps = get_wcaps(codec, nid);
- unsigned int wid_type =
- (wid_caps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+ unsigned int wid_type = get_wcaps_type(wid_caps);
unsigned int def_conf;
short assoc, loc;
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 1b75f28ed092..99552fb5f756 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -830,7 +830,8 @@ enum {
int snd_hda_bus_new(struct snd_card *card, const struct hda_bus_template *temp,
struct hda_bus **busp);
int snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr,
- int do_init, struct hda_codec **codecp);
+ struct hda_codec **codecp);
+int snd_hda_codec_configure(struct hda_codec *codec);
/*
* low level functions
@@ -938,6 +939,13 @@ static inline void snd_hda_power_down(struct hda_codec *codec) {}
#define snd_hda_codec_needs_resume(codec) 1
#endif
+#ifdef CONFIG_SND_HDA_PATCH_LOADER
+/*
+ * patch firmware
+ */
+int snd_hda_load_patch(struct hda_bus *bus, const char *patch);
+#endif
+
/*
* Codec modularization
*/
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 1d5797a96682..b36f6c5a92df 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -121,11 +121,17 @@ static int add_new_node(struct hda_codec *codec, struct hda_gspec *spec, hda_nid
if (node == NULL)
return -ENOMEM;
node->nid = nid;
- nconns = snd_hda_get_connections(codec, nid, conn_list,
- HDA_MAX_CONNECTIONS);
- if (nconns < 0) {
- kfree(node);
- return nconns;
+ node->wid_caps = get_wcaps(codec, nid);
+ node->type = get_wcaps_type(node->wid_caps);
+ if (node->wid_caps & AC_WCAP_CONN_LIST) {
+ nconns = snd_hda_get_connections(codec, nid, conn_list,
+ HDA_MAX_CONNECTIONS);
+ if (nconns < 0) {
+ kfree(node);
+ return nconns;
+ }
+ } else {
+ nconns = 0;
}
if (nconns <= ARRAY_SIZE(node->slist))
node->conn_list = node->slist;
@@ -140,8 +146,6 @@ static int add_new_node(struct hda_codec *codec, struct hda_gspec *spec, hda_nid
}
memcpy(node->conn_list, conn_list, nconns * sizeof(hda_nid_t));
node->nconns = nconns;
- node->wid_caps = get_wcaps(codec, nid);
- node->type = (node->wid_caps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
if (node->type == AC_WID_PIN) {
node->pin_caps = snd_hda_query_pin_caps(codec, node->nid);
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 6812fbe80fa4..cc24e6721d74 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -24,6 +24,7 @@
#include <linux/compat.h>
#include <linux/mutex.h>
#include <linux/ctype.h>
+#include <linux/firmware.h>
#include <sound/core.h>
#include "hda_codec.h"
#include "hda_local.h"
@@ -312,12 +313,8 @@ static ssize_t init_verbs_show(struct device *dev,
return len;
}
-static ssize_t init_verbs_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int parse_init_verbs(struct hda_codec *codec, const char *buf)
{
- struct snd_hwdep *hwdep = dev_get_drvdata(dev);
- struct hda_codec *codec = hwdep->private_data;
struct hda_verb *v;
int nid, verb, param;
@@ -331,6 +328,18 @@ static ssize_t init_verbs_store(struct device *dev,
v->nid = nid;
v->verb = verb;
v->param = param;
+ return 0;
+}
+
+static ssize_t init_verbs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct snd_hwdep *hwdep = dev_get_drvdata(dev);
+ struct hda_codec *codec = hwdep->private_data;
+ int err = parse_init_verbs(codec, buf);
+ if (err < 0)
+ return err;
return count;
}
@@ -376,19 +385,15 @@ static void remove_trail_spaces(char *str)
#define MAX_HINTS 1024
-static ssize_t hints_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int parse_hints(struct hda_codec *codec, const char *buf)
{
- struct snd_hwdep *hwdep = dev_get_drvdata(dev);
- struct hda_codec *codec = hwdep->private_data;
char *key, *val;
struct hda_hint *hint;
while (isspace(*buf))
buf++;
if (!*buf || *buf == '#' || *buf == '\n')
- return count;
+ return 0;
if (*buf == '=')
return -EINVAL;
key = kstrndup_noeol(buf, 1024);
@@ -411,7 +416,7 @@ static ssize_t hints_store(struct device *dev,
kfree(hint->key);
hint->key = key;
hint->val = val;
- return count;
+ return 0;
}
/* allocate a new hint entry */
if (codec->hints.used >= MAX_HINTS)
@@ -424,6 +429,18 @@ static ssize_t hints_store(struct device *dev,
}
hint->key = key;
hint->val = val;
+ return 0;
+}
+
+static ssize_t hints_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct snd_hwdep *hwdep = dev_get_drvdata(dev);
+ struct hda_codec *codec = hwdep->private_data;
+ int err = parse_hints(codec, buf);
+ if (err < 0)
+ return err;
return count;
}
@@ -469,20 +486,24 @@ static ssize_t driver_pin_configs_show(struct device *dev,
#define MAX_PIN_CONFIGS 32
-static ssize_t user_pin_configs_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int parse_user_pin_configs(struct hda_codec *codec, const char *buf)
{
- struct snd_hwdep *hwdep = dev_get_drvdata(dev);
- struct hda_codec *codec = hwdep->private_data;
int nid, cfg;
- int err;
if (sscanf(buf, "%i %i", &nid, &cfg) != 2)
return -EINVAL;
if (!nid)
return -EINVAL;
- err = snd_hda_add_pincfg(codec, &codec->user_pins, nid, cfg);
+ return snd_hda_add_pincfg(codec, &codec->user_pins, nid, cfg);
+}
+
+static ssize_t user_pin_configs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct snd_hwdep *hwdep = dev_get_drvdata(dev);
+ struct hda_codec *codec = hwdep->private_data;
+ int err = parse_user_pin_configs(codec, buf);
if (err < 0)
return err;
return count;
@@ -553,3 +574,180 @@ int snd_hda_get_bool_hint(struct hda_codec *codec, const char *key)
EXPORT_SYMBOL_HDA(snd_hda_get_bool_hint);
#endif /* CONFIG_SND_HDA_RECONFIG */
+
+#ifdef CONFIG_SND_HDA_PATCH_LOADER
+
+/* parser mode */
+enum {
+ LINE_MODE_NONE,
+ LINE_MODE_CODEC,
+ LINE_MODE_MODEL,
+ LINE_MODE_PINCFG,
+ LINE_MODE_VERB,
+ LINE_MODE_HINT,
+ NUM_LINE_MODES,
+};
+
+static inline int strmatch(const char *a, const char *b)
+{
+ return strnicmp(a, b, strlen(b)) == 0;
+}
+
+/* parse the contents after the line "[codec]"
+ * accept only the line with three numbers, and assign the current codec
+ */
+static void parse_codec_mode(char *buf, struct hda_bus *bus,
+ struct hda_codec **codecp)
+{
+ unsigned int vendorid, subid, caddr;
+ struct hda_codec *codec;
+
+ *codecp = NULL;
+ if (sscanf(buf, "%i %i %i", &vendorid, &subid, &caddr) == 3) {
+ list_for_each_entry(codec, &bus->codec_list, list) {
+ if (codec->addr == caddr) {
+ *codecp = codec;
+ break;
+ }
+ }
+ }
+}
+
+/* parse the contents after the other command tags, [pincfg], [verb],
+ * [hint] and [model]
+ * just pass to the sysfs helper (only when any codec was specified)
+ */
+static void parse_pincfg_mode(char *buf, struct hda_bus *bus,
+ struct hda_codec **codecp)
+{
+ if (!*codecp)
+ return;
+ parse_user_pin_configs(*codecp, buf);
+}
+
+static void parse_verb_mode(char *buf, struct hda_bus *bus,
+ struct hda_codec **codecp)
+{
+ if (!*codecp)
+ return;
+ parse_init_verbs(*codecp, buf);
+}
+
+static void parse_hint_mode(char *buf, struct hda_bus *bus,
+ struct hda_codec **codecp)
+{
+ if (!*codecp)
+ return;
+ parse_hints(*codecp, buf);
+}
+
+static void parse_model_mode(char *buf, struct hda_bus *bus,
+ struct hda_codec **codecp)
+{
+ if (!*codecp)
+ return;
+ kfree((*codecp)->modelname);
+ (*codecp)->modelname = kstrdup(buf, GFP_KERNEL);
+}
+
+struct hda_patch_item {
+ const char *tag;
+ void (*parser)(char *buf, struct hda_bus *bus, struct hda_codec **retc);
+};
+
+static struct hda_patch_item patch_items[NUM_LINE_MODES] = {
+ [LINE_MODE_CODEC] = { "[codec]", parse_codec_mode },
+ [LINE_MODE_MODEL] = { "[model]", parse_model_mode },
+ [LINE_MODE_VERB] = { "[verb]", parse_verb_mode },
+ [LINE_MODE_PINCFG] = { "[pincfg]", parse_pincfg_mode },
+ [LINE_MODE_HINT] = { "[hint]", parse_hint_mode },
+};
+
+/* check the line starting with '[' -- change the parser mode accodingly */
+static int parse_line_mode(char *buf, struct hda_bus *bus)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(patch_items); i++) {
+ if (!patch_items[i].tag)
+ continue;
+ if (strmatch(buf, patch_items[i].tag))
+ return i;
+ }
+ return LINE_MODE_NONE;
+}
+
+/* copy one line from the buffer in fw, and update the fields in fw
+ * return zero if it reaches to the end of the buffer, or non-zero
+ * if successfully copied a line
+ *
+ * the spaces at the beginning and the end of the line are stripped
+ */
+static int get_line_from_fw(char *buf, int size, struct firmware *fw)
+{
+ int len;
+ const char *p = fw->data;
+ while (isspace(*p) && fw->size) {
+ p++;
+ fw->size--;
+ }
+ if (!fw->size)
+ return 0;
+ if (size < fw->size)
+ size = fw->size;
+
+ for (len = 0; len < fw->size; len++) {
+ if (!*p)
+ break;
+ if (*p == '\n') {
+ p++;
+ len++;
+ break;
+ }
+ if (len < size)
+ *buf++ = *p++;
+ }
+ *buf = 0;
+ fw->size -= len;
+ fw->data = p;
+ remove_trail_spaces(buf);
+ return 1;
+}
+
+/*
+ * load a "patch" firmware file and parse it
+ */
+int snd_hda_load_patch(struct hda_bus *bus, const char *patch)
+{
+ int err;
+ const struct firmware *fw;
+ struct firmware tmp;
+ char buf[128];
+ struct hda_codec *codec;
+ int line_mode;
+ struct device *dev = bus->card->dev;
+
+ if (snd_BUG_ON(!dev))
+ return -ENODEV;
+ err = request_firmware(&fw, patch, dev);
+ if (err < 0) {
+ printk(KERN_ERR "hda-codec: Cannot load the patch '%s'\n",
+ patch);
+ return err;
+ }
+
+ tmp = *fw;
+ line_mode = LINE_MODE_NONE;
+ codec = NULL;
+ while (get_line_from_fw(buf, sizeof(buf) - 1, &tmp)) {
+ if (!*buf || *buf == '#' || *buf == '\n')
+ continue;
+ if (*buf == '[')
+ line_mode = parse_line_mode(buf, bus);
+ else if (patch_items[line_mode].parser)
+ patch_items[line_mode].parser(buf, bus, &codec);
+ }
+ release_firmware(fw);
+ return 0;
+}
+EXPORT_SYMBOL_HDA(snd_hda_load_patch);
+#endif /* CONFIG_SND_HDA_PATCH_LOADER */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 175f07a381ba..20a66f85f0a4 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -61,6 +61,9 @@ static int probe_mask[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1};
static int probe_only[SNDRV_CARDS];
static int single_cmd;
static int enable_msi;
+#ifdef CONFIG_SND_HDA_PATCH_LOADER
+static char *patch[SNDRV_CARDS];
+#endif
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for Intel HD audio interface.");
@@ -84,6 +87,10 @@ MODULE_PARM_DESC(single_cmd, "Use single command to communicate with codecs "
"(for debugging only).");
module_param(enable_msi, int, 0444);
MODULE_PARM_DESC(enable_msi, "Enable Message Signaled Interrupt (MSI)");
+#ifdef CONFIG_SND_HDA_PATCH_LOADER
+module_param_array(patch, charp, NULL, 0444);
+MODULE_PARM_DESC(patch, "Patch file for Intel HD audio interface.");
+#endif
#ifdef CONFIG_SND_HDA_POWER_SAVE
static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
@@ -1331,8 +1338,7 @@ static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] __devinitdata = {
[AZX_DRIVER_TERA] = 1,
};
-static int __devinit azx_codec_create(struct azx *chip, const char *model,
- int no_init)
+static int __devinit azx_codec_create(struct azx *chip, const char *model)
{
struct hda_bus_template bus_temp;
int c, codecs, err;
@@ -1391,7 +1397,7 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model,
for (c = 0; c < max_slots; c++) {
if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
struct hda_codec *codec;
- err = snd_hda_codec_new(chip->bus, c, !no_init, &codec);
+ err = snd_hda_codec_new(chip->bus, c, &codec);
if (err < 0)
continue;
codecs++;
@@ -1401,7 +1407,16 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model,
snd_printk(KERN_ERR SFX "no codecs initialized\n");
return -ENXIO;
}
+ return 0;
+}
+/* configure each codec instance */
+static int __devinit azx_codec_configure(struct azx *chip)
+{
+ struct hda_codec *codec;
+ list_for_each_entry(codec, &chip->bus->codec_list, list) {
+ snd_hda_codec_configure(codec);
+ }
return 0;
}
@@ -2284,6 +2299,30 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
}
}
+/*
+ * white-list for enable_msi
+ */
+static struct snd_pci_quirk msi_white_list[] __devinitdata = {
+ SND_PCI_QUIRK(0x103c, 0x3607, "HP Compa CQ40", 1),
+ {}
+};
+
+static void __devinit check_msi(struct azx *chip)
+{
+ const struct snd_pci_quirk *q;
+
+ chip->msi = enable_msi;
+ if (chip->msi)
+ return;
+ q = snd_pci_quirk_lookup(chip->pci, msi_white_list);
+ if (q) {
+ printk(KERN_INFO
+ "hda_intel: msi for device %04x:%04x set to %d\n",
+ q->subvendor, q->subdevice, q->value);
+ chip->msi = q->value;
+ }
+}
+
/*
* constructor
@@ -2318,7 +2357,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
chip->pci = pci;
chip->irq = -1;
chip->driver_type = driver_type;
- chip->msi = enable_msi;
+ check_msi(chip);
chip->dev_index = dev;
INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work);
@@ -2526,15 +2565,32 @@ static int __devinit azx_probe(struct pci_dev *pci,
return err;
}
+ /* set this here since it's referred in snd_hda_load_patch() */
+ snd_card_set_dev(card, &pci->dev);
+
err = azx_create(card, pci, dev, pci_id->driver_data, &chip);
if (err < 0)
goto out_free;
card->private_data = chip;
/* create codec instances */
- err = azx_codec_create(chip, model[dev], probe_only[dev]);
+ err = azx_codec_create(chip, model[dev]);
if (err < 0)
goto out_free;
+#ifdef CONFIG_SND_HDA_PATCH_LOADER
+ if (patch[dev]) {
+ snd_printk(KERN_ERR SFX "Applying patch firmware '%s'\n",
+ patch[dev]);
+ err = snd_hda_load_patch(chip->bus, patch[dev]);
+ if (err < 0)
+ goto out_free;
+ }
+#endif
+ if (!probe_only[dev]) {
+ err = azx_codec_configure(chip);
+ if (err < 0)
+ goto out_free;
+ }
/* create PCM streams */
err = snd_hda_build_pcms(chip->bus);
@@ -2546,8 +2602,6 @@ static int __devinit azx_probe(struct pci_dev *pci,
if (err < 0)
goto out_free;
- snd_card_set_dev(card, &pci->dev);
-
err = snd_card_register(card);
if (err < 0)
goto out_free;
@@ -2649,11 +2703,15 @@ static struct pci_device_id azx_ids[] = {
/* this entry seems still valid -- i.e. without emu20kx chip */
{ PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_GENERIC },
#endif
- /* AMD Generic, PCI class code and Vendor ID for HD Audio */
+ /* AMD/ATI Generic, PCI class code and Vendor ID for HD Audio */
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID),
.class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
.class_mask = 0xffffff,
.driver_data = AZX_DRIVER_GENERIC },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_ANY_ID),
+ .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
+ .class_mask = 0xffffff,
+ .driver_data = AZX_DRIVER_GENERIC },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, azx_ids);
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 83349013b4df..5f1dcc59002b 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -99,7 +99,6 @@ struct snd_kcontrol *snd_hda_find_mixer_ctl(struct hda_codec *codec,
int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
unsigned int *tlv, const char **slaves);
int snd_hda_codec_reset(struct hda_codec *codec);
-int snd_hda_codec_configure(struct hda_codec *codec);
/* amp value bits */
#define HDA_AMP_MUTE 0x80
@@ -408,6 +407,19 @@ static inline u32 get_wcaps(struct hda_codec *codec, hda_nid_t nid)
return codec->wcaps[nid - codec->start_nid];
}
+/* get the widget type from widget capability bits */
+#define get_wcaps_type(wcaps) (((wcaps) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT)
+
+static inline unsigned int get_wcaps_channels(u32 wcaps)
+{
+ unsigned int chans;
+
+ chans = (wcaps & AC_WCAP_CHAN_CNT_EXT) >> 13;
+ chans = ((chans << 1) | 1) + 1;
+
+ return chans;
+}
+
u32 query_amp_caps(struct hda_codec *codec, hda_nid_t nid, int direction);
int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir,
unsigned int caps);
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index 418c5d1badaa..95f24e4729f8 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -508,17 +508,14 @@ static void print_codec_info(struct snd_info_entry *entry,
unsigned int wid_caps =
snd_hda_param_read(codec, nid,
AC_PAR_AUDIO_WIDGET_CAP);
- unsigned int wid_type =
- (wid_caps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+ unsigned int wid_type = get_wcaps_type(wid_caps);
hda_nid_t conn[HDA_MAX_CONNECTIONS];
int conn_len = 0;
snd_iprintf(buffer, "Node 0x%02x [%s] wcaps 0x%x:", nid,
get_wid_type_name(wid_type), wid_caps);
if (wid_caps & AC_WCAP_STEREO) {
- unsigned int chans;
- chans = (wid_caps & AC_WCAP_CHAN_CNT_EXT) >> 13;
- chans = ((chans << 1) | 1) + 1;
+ unsigned int chans = get_wcaps_channels(wid_caps);
if (chans == 2)
snd_iprintf(buffer, " Stereo");
else
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 403588c6e3f6..215e72a87113 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -2982,7 +2982,8 @@ static int patch_ad1988(struct hda_codec *codec)
board_config = snd_hda_check_board_config(codec, AD1988_MODEL_LAST,
ad1988_models, ad1988_cfg_tbl);
if (board_config < 0) {
- printk(KERN_INFO "hda_codec: Unknown model for AD1988, trying auto-probe from BIOS...\n");
+ printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
board_config = AD1988_AUTO;
}
@@ -3702,19 +3703,29 @@ static struct hda_amp_list ad1884a_loopbacks[] = {
* Port F: Internal speakers
*/
-static struct hda_input_mux ad1884a_laptop_capture_source = {
- .num_items = 4,
- .items = {
- { "Mic", 0x0 }, /* port-B */
- { "Internal Mic", 0x1 }, /* port-C */
- { "Dock Mic", 0x4 }, /* port-E */
- { "Mix", 0x3 },
- },
-};
+static int ad1884a_mobile_master_sw_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ int ret = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
+ int mute = (!ucontrol->value.integer.value[0] &&
+ !ucontrol->value.integer.value[1]);
+ /* toggle GPIO1 according to the mute state */
+ snd_hda_codec_write_cache(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
+ mute ? 0x02 : 0x0);
+ return ret;
+}
static struct snd_kcontrol_new ad1884a_laptop_mixers[] = {
HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Master Playback Switch",
+ .info = snd_hda_mixer_amp_switch_info,
+ .get = snd_hda_mixer_amp_switch_get,
+ .put = ad1884a_mobile_master_sw_put,
+ .private_value = HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT),
+ },
HDA_CODEC_MUTE("Dock Playback Switch", 0x12, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
@@ -3729,36 +3740,9 @@ static struct snd_kcontrol_new ad1884a_laptop_mixers[] = {
HDA_CODEC_VOLUME("Dock Mic Boost", 0x25, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x0d, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- /* The multiple "Capture Source" controls confuse alsamixer
- * So call somewhat different..
- */
- /* .name = "Capture Source", */
- .name = "Input Source",
- .count = 2,
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
{ } /* end */
};
-static int ad1884a_mobile_master_sw_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- int ret = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
- int mute = (!ucontrol->value.integer.value[0] &&
- !ucontrol->value.integer.value[1]);
- /* toggle GPIO1 according to the mute state */
- snd_hda_codec_write_cache(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
- mute ? 0x02 : 0x0);
- return ret;
-}
-
static struct snd_kcontrol_new ad1884a_mobile_mixers[] = {
HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
/*HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),*/
@@ -3828,6 +3812,63 @@ static int ad1884a_hp_init(struct hda_codec *codec)
return 0;
}
+/* mute internal speaker if HP or docking HP is plugged */
+static void ad1884a_laptop_automute(struct hda_codec *codec)
+{
+ unsigned int present;
+
+ present = snd_hda_codec_read(codec, 0x11, 0, AC_VERB_GET_PIN_SENSE, 0);
+ present &= AC_PINSENSE_PRESENCE;
+ if (!present) {
+ present = snd_hda_codec_read(codec, 0x12, 0,
+ AC_VERB_GET_PIN_SENSE, 0);
+ present &= AC_PINSENSE_PRESENCE;
+ }
+ snd_hda_codec_amp_stereo(codec, 0x16, HDA_OUTPUT, 0,
+ HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
+ snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_EAPD_BTLENABLE,
+ present ? 0x00 : 0x02);
+}
+
+/* switch to external mic if plugged */
+static void ad1884a_laptop_automic(struct hda_codec *codec)
+{
+ unsigned int idx;
+
+ if (snd_hda_codec_read(codec, 0x14, 0, AC_VERB_GET_PIN_SENSE, 0) &
+ AC_PINSENSE_PRESENCE)
+ idx = 0;
+ else if (snd_hda_codec_read(codec, 0x1c, 0, AC_VERB_GET_PIN_SENSE, 0) &
+ AC_PINSENSE_PRESENCE)
+ idx = 4;
+ else
+ idx = 1;
+ snd_hda_codec_write(codec, 0x0c, 0, AC_VERB_SET_CONNECT_SEL, idx);
+}
+
+/* unsolicited event for HP jack sensing */
+static void ad1884a_laptop_unsol_event(struct hda_codec *codec,
+ unsigned int res)
+{
+ switch (res >> 26) {
+ case AD1884A_HP_EVENT:
+ ad1884a_laptop_automute(codec);
+ break;
+ case AD1884A_MIC_EVENT:
+ ad1884a_laptop_automic(codec);
+ break;
+ }
+}
+
+/* initialize jack-sensing, too */
+static int ad1884a_laptop_init(struct hda_codec *codec)
+{
+ ad198x_init(codec);
+ ad1884a_laptop_automute(codec);
+ ad1884a_laptop_automic(codec);
+ return 0;
+}
+
/* additional verbs for laptop model */
static struct hda_verb ad1884a_laptop_verbs[] = {
/* Port-A (HP) pin - always unmuted */
@@ -3844,11 +3885,19 @@ static struct hda_verb ad1884a_laptop_verbs[] = {
{0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */
+ /* Port-D (docking line-out) pin - default unmuted */
+ {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
/* analog mix */
{0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
/* unsolicited event for pin-sense */
{0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
+ {0x12, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
{0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
+ {0x1c, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
+ /* allow to touch GPIO1 (for mute control) */
+ {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
+ {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
+ {0x01, AC_VERB_SET_GPIO_DATA, 0x02}, /* first muted */
{ } /* end */
};
@@ -4008,6 +4057,7 @@ static struct snd_pci_quirk ad1884a_cfg_tbl[] = {
SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x30d0, "HP laptop", AD1884A_LAPTOP),
SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x30e0, "HP laptop", AD1884A_LAPTOP),
SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3600, "HP laptop", AD1884A_LAPTOP),
+ SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x7010, "HP laptop", AD1884A_MOBILE),
SND_PCI_QUIRK(0x17aa, 0x20ac, "Thinkpad X300", AD1884A_THINKPAD),
{}
};
@@ -4057,9 +4107,8 @@ static int patch_ad1884a(struct hda_codec *codec)
spec->mixers[0] = ad1884a_laptop_mixers;
spec->init_verbs[spec->num_init_verbs++] = ad1884a_laptop_verbs;
spec->multiout.dig_out_nid = 0;
- spec->input_mux = &ad1884a_laptop_capture_source;
- codec->patch_ops.unsol_event = ad1884a_hp_unsol_event;
- codec->patch_ops.init = ad1884a_hp_init;
+ codec->patch_ops.unsol_event = ad1884a_laptop_unsol_event;
+ codec->patch_ops.init = ad1884a_laptop_init;
/* set the upper-limit for mixer amp to 0dB for avoiding the
* possible damage by overloading
*/
diff --git a/sound/pci/hda/patch_atihdmi.c b/sound/pci/hda/patch_atihdmi.c
index 233e4778bba9..fb684f00156b 100644
--- a/sound/pci/hda/patch_atihdmi.c
+++ b/sound/pci/hda/patch_atihdmi.c
@@ -141,8 +141,7 @@ static int atihdmi_build_pcms(struct hda_codec *codec)
/* FIXME: we must check ELD and change the PCM parameters dynamically
*/
chans = get_wcaps(codec, CVT_NID);
- chans = (chans & AC_WCAP_CHAN_CNT_EXT) >> 13;
- chans = ((chans << 1) | 1) + 1;
+ chans = get_wcaps_channels(chans);
info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max = chans;
return 0;
diff --git a/sound/pci/hda/patch_ca0110.c b/sound/pci/hda/patch_ca0110.c
index 019ca7cb56d7..d08353d3bb7f 100644
--- a/sound/pci/hda/patch_ca0110.c
+++ b/sound/pci/hda/patch_ca0110.c
@@ -459,8 +459,7 @@ static void parse_input(struct hda_codec *codec)
nid = codec->start_nid;
for (i = 0; i < codec->num_nodes; i++, nid++) {
unsigned int wcaps = get_wcaps(codec, nid);
- unsigned int type = (wcaps & AC_WCAP_TYPE) >>
- AC_WCAP_TYPE_SHIFT;
+ unsigned int type = get_wcaps_type(wcaps);
if (type != AC_WID_AUD_IN)
continue;
if (snd_hda_get_connections(codec, nid, &pin, 1) != 1)
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
new file mode 100644
index 000000000000..8ba306856d38
--- /dev/null
+++ b/sound/pci/hda/patch_cirrus.c
@@ -0,0 +1,1194 @@
+/*
+ * HD audio interface patch for Cirrus Logic CS420x chip
+ *
+ * Copyright (c) 2009 Takashi Iwai <tiwai@suse.de>
+ *
+ * This driver is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <sound/core.h>
+#include "hda_codec.h"
+#include "hda_local.h"
+
+/*
+ */
+
+struct cs_spec {
+ int board_config;
+ struct auto_pin_cfg autocfg;
+ struct hda_multi_out multiout;
+ struct snd_kcontrol *vmaster_sw;
+ struct snd_kcontrol *vmaster_vol;
+
+ hda_nid_t dac_nid[AUTO_CFG_MAX_OUTS];
+ hda_nid_t slave_dig_outs[2];
+
+ unsigned int input_idx[AUTO_PIN_LAST];
+ unsigned int capsrc_idx[AUTO_PIN_LAST];
+ hda_nid_t adc_nid[AUTO_PIN_LAST];
+ unsigned int adc_idx[AUTO_PIN_LAST];
+ unsigned int num_inputs;
+ unsigned int cur_input;
+ unsigned int automic_idx;
+ hda_nid_t cur_adc;
+ unsigned int cur_adc_stream_tag;
+ unsigned int cur_adc_format;
+ hda_nid_t dig_in;
+
+ struct hda_bind_ctls *capture_bind[2];
+
+ unsigned int gpio_mask;
+ unsigned int gpio_dir;
+ unsigned int gpio_data;
+
+ struct hda_pcm pcm_rec[2]; /* PCM information */
+
+ unsigned int hp_detect:1;
+ unsigned int mic_detect:1;
+};
+
+/* available models */
+enum {
+ CS420X_MBP55,
+ CS420X_AUTO,
+ CS420X_MODELS
+};
+
+/* Vendor-specific processing widget */
+#define CS420X_VENDOR_NID 0x11
+#define CS_DIG_OUT1_PIN_NID 0x10
+#define CS_DIG_OUT2_PIN_NID 0x15
+#define CS_DMIC1_PIN_NID 0x12
+#define CS_DMIC2_PIN_NID 0x0e
+
+/* coef indices */
+#define IDX_SPDIF_STAT 0x0000
+#define IDX_SPDIF_CTL 0x0001
+#define IDX_ADC_CFG 0x0002
+/* SZC bitmask, 4 modes below:
+ * 0 = immediate,
+ * 1 = digital immediate, analog zero-cross
+ * 2 = digtail & analog soft-ramp
+ * 3 = digital soft-ramp, analog zero-cross
+ */
+#define CS_COEF_ADC_SZC_MASK (3 << 0)
+#define CS_COEF_ADC_MIC_SZC_MODE (3 << 0) /* SZC setup for mic */
+#define CS_COEF_ADC_LI_SZC_MODE (3 << 0) /* SZC setup for line-in */
+/* PGA mode: 0 = differential, 1 = signle-ended */
+#define CS_COEF_ADC_MIC_PGA_MODE (1 << 5) /* PGA setup for mic */
+#define CS_COEF_ADC_LI_PGA_MODE (1 << 6) /* PGA setup for line-in */
+#define IDX_DAC_CFG 0x0003
+/* SZC bitmask, 4 modes below:
+ * 0 = Immediate
+ * 1 = zero-cross
+ * 2 = soft-ramp
+ * 3 = soft-ramp on zero-cross
+ */
+#define CS_COEF_DAC_HP_SZC_MODE (3 << 0) /* nid 0x02 */
+#define CS_COEF_DAC_LO_SZC_MODE (3 << 2) /* nid 0x03 */
+#define CS_COEF_DAC_SPK_SZC_MODE (3 << 4) /* nid 0x04 */
+
+#define IDX_BEEP_CFG 0x0004
+/* 0x0008 - test reg key */
+/* 0x0009 - 0x0014 -> 12 test regs */
+/* 0x0015 - visibility reg */
+
+
+static inline int cs_vendor_coef_get(struct hda_codec *codec, unsigned int idx)
+{
+ snd_hda_codec_write(codec, CS420X_VENDOR_NID, 0,
+ AC_VERB_SET_COEF_INDEX, idx);
+ return snd_hda_codec_read(codec, CS420X_VENDOR_NID, 0,
+ AC_VERB_GET_PROC_COEF, 0);
+}
+
+static inline void cs_vendor_coef_set(struct hda_codec *codec, unsigned int idx,
+ unsigned int coef)
+{
+ snd_hda_codec_write(codec, CS420X_VENDOR_NID, 0,
+ AC_VERB_SET_COEF_INDEX, idx);
+ snd_hda_codec_write(codec, CS420X_VENDOR_NID, 0,
+ AC_VERB_SET_PROC_COEF, coef);
+}
+
+
+#define HP_EVENT 1
+#define MIC_EVENT 2
+
+/*
+ * PCM callbacks
+ */
+static int cs_playback_pcm_open(struct hda_pcm_stream *hinfo,
+ struct hda_codec *codec,
+ struct snd_pcm_substream *substream)
+{
+ struct cs_spec *spec = codec->spec;
+ return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream,
+ hinfo);
+}
+
+static int cs_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+ struct hda_codec *codec,
+ unsigned int stream_tag,
+ unsigned int format,
+ struct snd_pcm_substream *substream)
+{
+ struct cs_spec *spec = codec->spec;
+ return snd_hda_multi_out_analog_prepare(codec, &spec->multiout,
+ stream_tag, format, substream);
+}
+
+static int cs_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
+ struct hda_codec *codec,
+ struct snd_pcm_substream *substream)
+{
+ struct cs_spec *spec = codec->spec;
+ return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
+}
+
+/*
+ * Digital out
+ */
+static int cs_dig_playback_pcm_open(struct hda_pcm_stream *hinfo,
+ struct hda_codec *codec,
+ struct snd_pcm_substream *substream)
+{
+ struct cs_spec *spec = codec->spec;
+ return snd_hda_multi_out_dig_open(codec, &spec->multiout);
+}
+
+static int cs_dig_playback_pcm_close(struct hda_pcm_stream *hinfo,
+ struct hda_codec *codec,
+ struct snd_pcm_substream *substream)
+{
+ struct cs_spec *spec = codec->spec;
+ return snd_hda_multi_out_dig_close(codec, &spec->multiout);
+}
+
+static int cs_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+ struct hda_codec *codec,
+ unsigned int stream_tag,
+ unsigned int format,
+ struct snd_pcm_substream *substream)
+{
+ struct cs_spec *spec = codec->spec;
+ return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, stream_tag,
+ format, substream);
+}
+
+static int cs_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
+ struct hda_codec *codec,
+ struct snd_pcm_substream *substream)
+{
+ struct cs_spec *spec = codec->spec;
+ return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout);
+}
+
+/*
+ * Analog capture
+ */
+static int cs_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
+ struct hda_codec *codec,
+ unsigned int stream_tag,
+ unsigned int format,
+ struct snd_pcm_substream *substream)
+{
+ struct cs_spec *spec = codec->spec;
+ spec->cur_adc = spec->adc_nid[spec->cur_input];
+ spec->cur_adc_stream_tag = stream_tag;
+ spec->cur_adc_format = format;
+ snd_hda_codec_setup_stream(codec, spec->cur_adc, stream_tag, 0, format);
+ return 0;
+}
+
+static int cs_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
+ struct hda_codec *codec,
+ struct snd_pcm_substream *substream)
+{
+ struct cs_spec *spec = codec->spec;
+ snd_hda_codec_cleanup_stream(codec, spec->cur_adc);
+ spec->cur_adc = 0;
+ return 0;
+}
+
+/*
+ */
+static struct hda_pcm_stream cs_pcm_analog_playback = {
+ .substreams = 1,
+ .channels_min = 2,
+ .channels_max = 2,
+ .ops = {
+ .open = cs_playback_pcm_open,
+ .prepare = cs_playback_pcm_prepare,
+ .cleanup = cs_playback_pcm_cleanup
+ },
+};
+
+static struct hda_pcm_stream cs_pcm_analog_capture = {
+ .substreams = 1,
+ .channels_min = 2,
+ .channels_max = 2,
+ .ops = {
+ .prepare = cs_capture_pcm_prepare,
+ .cleanup = cs_capture_pcm_cleanup
+ },
+};
+
+static struct hda_pcm_stream cs_pcm_digital_playback = {
+ .substreams = 1,
+ .channels_min = 2,
+ .channels_max = 2,
+ .ops = {
+ .open = cs_dig_playback_pcm_open,
+ .close = cs_dig_playback_pcm_close,
+ .prepare = cs_dig_playback_pcm_prepare,
+ .cleanup = cs_dig_playback_pcm_cleanup
+ },
+};
+
+static struct hda_pcm_stream cs_pcm_digital_capture = {
+ .substreams = 1,
+ .channels_min = 2,
+ .channels_max = 2,
+};
+
+static int cs_build_pcms(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ struct hda_pcm *info = spec->pcm_rec;
+
+ codec->pcm_info = info;
+ codec->num_pcms = 0;
+
+ info->name = "Cirrus Analog";
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK] = cs_pcm_analog_playback;
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dac_nid[0];
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max =
+ spec->multiout.max_channels;
+ info->stream[SNDRV_PCM_STREAM_CAPTURE] = cs_pcm_analog_capture;
+ info->stream[SNDRV_PCM_STREAM_CAPTURE].nid =
+ spec->adc_nid[spec->cur_input];
+ codec->num_pcms++;
+
+ if (!spec->multiout.dig_out_nid && !spec->dig_in)
+ return 0;
+
+ info++;
+ info->name = "Cirrus Digital";
+ info->pcm_type = spec->autocfg.dig_out_type[0];
+ if (!info->pcm_type)
+ info->pcm_type = HDA_PCM_TYPE_SPDIF;
+ if (spec->multiout.dig_out_nid) {
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK] =
+ cs_pcm_digital_playback;
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid =
+ spec->multiout.dig_out_nid;
+ }
+ if (spec->dig_in) {
+ info->stream[SNDRV_PCM_STREAM_CAPTURE] =
+ cs_pcm_digital_capture;
+ info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in;
+ }
+ codec->num_pcms++;
+
+ return 0;
+}
+
+/*
+ * parse codec topology
+ */
+
+static hda_nid_t get_dac(struct hda_codec *codec, hda_nid_t pin)
+{
+ hda_nid_t dac;
+ if (!pin)
+ return 0;
+ if (snd_hda_get_connections(codec, pin, &dac, 1) != 1)
+ return 0;
+ return dac;
+}
+
+static int is_ext_mic(struct hda_codec *codec, unsigned int idx)
+{
+ struct cs_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ hda_nid_t pin = cfg->input_pins[idx];
+ unsigned int val = snd_hda_query_pin_caps(codec, pin);
+ if (!(val & AC_PINCAP_PRES_DETECT))
+ return 0;
+ val = snd_hda_codec_get_pincfg(codec, pin);
+ return (get_defcfg_connect(val) == AC_JACK_PORT_COMPLEX);
+}
+
+static hda_nid_t get_adc(struct hda_codec *codec, hda_nid_t pin,
+ unsigned int *idxp)
+{
+ int i;
+ hda_nid_t nid;
+
+ nid = codec->start_nid;
+ for (i = 0; i < codec->num_nodes; i++, nid++) {
+ hda_nid_t pins[2];
+ unsigned int type;
+ int j, nums;
+ type = (get_wcaps(codec, nid) & AC_WCAP_TYPE)
+ >> AC_WCAP_TYPE_SHIFT;
+ if (type != AC_WID_AUD_IN)
+ continue;
+ nums = snd_hda_get_connections(codec, nid, pins,
+ ARRAY_SIZE(pins));
+ if (nums <= 0)
+ continue;
+ for (j = 0; j < nums; j++) {
+ if (pins[j] == pin) {
+ *idxp = j;
+ return nid;
+ }
+ }
+ }
+ return 0;
+}
+
+static int is_active_pin(struct hda_codec *codec, hda_nid_t nid)
+{
+ unsigned int val;
+ val = snd_hda_codec_get_pincfg(codec, nid);
+ return (get_defcfg_connect(val) != AC_JACK_PORT_NONE);
+}
+
+static int parse_output(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ int i, extra_nids;
+ hda_nid_t dac;
+
+ for (i = 0; i < cfg->line_outs; i++) {
+ dac = get_dac(codec, cfg->line_out_pins[i]);
+ if (!dac)
+ break;
+ spec->dac_nid[i] = dac;
+ }
+ spec->multiout.num_dacs = i;
+ spec->multiout.dac_nids = spec->dac_nid;
+ spec->multiout.max_channels = i * 2;
+
+ /* add HP and speakers */
+ extra_nids = 0;
+ for (i = 0; i < cfg->hp_outs; i++) {
+ dac = get_dac(codec, cfg->hp_pins[i]);
+ if (!dac)
+ break;
+ if (!i)
+ spec->multiout.hp_nid = dac;
+ else
+ spec->multiout.extra_out_nid[extra_nids++] = dac;
+ }
+ for (i = 0; i < cfg->speaker_outs; i++) {
+ dac = get_dac(codec, cfg->speaker_pins[i]);
+ if (!dac)
+ break;
+ spec->multiout.extra_out_nid[extra_nids++] = dac;
+ }
+
+ if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
+ cfg->speaker_outs = cfg->line_outs;
+ memcpy(cfg->speaker_pins, cfg->line_out_pins,
+ sizeof(cfg->speaker_pins));
+ cfg->line_outs = 0;
+ }
+
+ return 0;
+}
+
+static int parse_input(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ int i;
+
+ for (i = 0; i < AUTO_PIN_LAST; i++) {
+ hda_nid_t pin = cfg->input_pins[i];
+ if (!pin)
+ continue;
+ spec->input_idx[spec->num_inputs] = i;
+ spec->capsrc_idx[i] = spec->num_inputs++;
+ spec->cur_input = i;
+ spec->adc_nid[i] = get_adc(codec, pin, &spec->adc_idx[i]);
+ }
+ if (!spec->num_inputs)
+ return 0;
+
+ /* check whether the automatic mic switch is available */
+ if (spec->num_inputs == 2 &&
+ spec->adc_nid[AUTO_PIN_MIC] && spec->adc_nid[AUTO_PIN_FRONT_MIC]) {
+ if (is_ext_mic(codec, cfg->input_pins[AUTO_PIN_FRONT_MIC])) {
+ if (!is_ext_mic(codec, cfg->input_pins[AUTO_PIN_MIC])) {
+ spec->mic_detect = 1;
+ spec->automic_idx = AUTO_PIN_FRONT_MIC;
+ }
+ } else {
+ if (is_ext_mic(codec, cfg->input_pins[AUTO_PIN_MIC])) {
+ spec->mic_detect = 1;
+ spec->automic_idx = AUTO_PIN_MIC;
+ }
+ }
+ }
+ return 0;
+}
+
+
+static int parse_digital_output(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ hda_nid_t nid;
+
+ if (!cfg->dig_outs)
+ return 0;
+ if (snd_hda_get_connections(codec, cfg->dig_out_pins[0], &nid, 1) < 1)
+ return 0;
+ spec->multiout.dig_out_nid = nid;
+ spec->multiout.share_spdif = 1;
+ if (cfg->dig_outs > 1 &&
+ snd_hda_get_connections(codec, cfg->dig_out_pins[1], &nid, 1) > 0) {
+ spec->slave_dig_outs[0] = nid;
+ codec->slave_dig_outs = spec->slave_dig_outs;
+ }
+ return 0;
+}
+
+static int parse_digital_input(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ int idx;
+
+ if (cfg->dig_in_pin)
+ spec->dig_in = get_adc(codec, cfg->dig_in_pin, &idx);
+ return 0;
+}
+
+/*
+ * create mixer controls
+ */
+
+static const char *dir_sfx[2] = { "Playback", "Capture" };
+
+static int add_mute(struct hda_codec *codec, const char *name, int index,
+ unsigned int pval, int dir, struct snd_kcontrol **kctlp)
+{
+ char tmp[44];
+ struct snd_kcontrol_new knew =
+ HDA_CODEC_MUTE_IDX(tmp, index, 0, 0, HDA_OUTPUT);
+ knew.private_value = pval;
+ snprintf(tmp, sizeof(tmp), "%s %s Switch", name, dir_sfx[dir]);
+ *kctlp = snd_ctl_new1(&knew, codec);
+ return snd_hda_ctl_add(codec, *kctlp);
+}
+
+static int add_volume(struct hda_codec *codec, const char *name,
+ int index, unsigned int pval, int dir,
+ struct snd_kcontrol **kctlp)
+{
+ char tmp[32];
+ struct snd_kcontrol_new knew =
+ HDA_CODEC_VOLUME_IDX(tmp, index, 0, 0, HDA_OUTPUT);
+ knew.private_value = pval;
+ snprintf(tmp, sizeof(tmp), "%s %s Volume", name, dir_sfx[dir]);
+ *kctlp = snd_ctl_new1(&knew, codec);
+ return snd_hda_ctl_add(codec, *kctlp);
+}
+
+static void fix_volume_caps(struct hda_codec *codec, hda_nid_t dac)
+{
+ unsigned int caps;
+
+ /* set the upper-limit for mixer amp to 0dB */
+ caps = query_amp_caps(codec, dac, HDA_OUTPUT);
+ caps &= ~(0x7f << AC_AMPCAP_NUM_STEPS_SHIFT);
+ caps |= ((caps >> AC_AMPCAP_OFFSET_SHIFT) & 0x7f)
+ << AC_AMPCAP_NUM_STEPS_SHIFT;
+ snd_hda_override_amp_caps(codec, dac, HDA_OUTPUT, caps);
+}
+
+static int add_vmaster(struct hda_codec *codec, hda_nid_t dac)
+{
+ struct cs_spec *spec = codec->spec;
+ unsigned int tlv[4];
+ int err;
+
+ spec->vmaster_sw =
+ snd_ctl_make_virtual_master("Master Playback Switch", NULL);
+ err = snd_hda_ctl_add(codec, spec->vmaster_sw);
+ if (err < 0)
+ return err;
+
+ snd_hda_set_vmaster_tlv(codec, dac, HDA_OUTPUT, tlv);
+ spec->vmaster_vol =
+ snd_ctl_make_virtual_master("Master Playback Volume", tlv);
+ err = snd_hda_ctl_add(codec, spec->vmaster_vol);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+static int add_output(struct hda_codec *codec, hda_nid_t dac, int idx,
+ int num_ctls, int type)
+{
+ struct cs_spec *spec = codec->spec;
+ const char *name;
+ int err, index;
+ struct snd_kcontrol *kctl;
+ static char *speakers[] = {
+ "Front Speaker", "Surround Speaker", "Bass Speaker"
+ };
+ static char *line_outs[] = {
+ "Front Line-Out", "Surround Line-Out", "Bass Line-Out"
+ };
+
+ fix_volume_caps(codec, dac);
+ if (!spec->vmaster_sw) {
+ err = add_vmaster(codec, dac);
+ if (err < 0)
+ return err;
+ }
+
+ index = 0;
+ switch (type) {
+ case AUTO_PIN_HP_OUT:
+ name = "Headphone";
+ index = idx;
+ break;
+ case AUTO_PIN_SPEAKER_OUT:
+ if (num_ctls > 1)
+ name = speakers[idx];
+ else
+ name = "Speaker";
+ break;
+ default:
+ if (num_ctls > 1)
+ name = line_outs[idx];
+ else
+ name = "Line-Out";
+ break;
+ }
+
+ err = add_mute(codec, name, index,
+ HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
+ if (err < 0)
+ return err;
+ err = snd_ctl_add_slave(spec->vmaster_sw, kctl);
+ if (err < 0)
+ return err;
+
+ err = add_volume(codec, name, index,
+ HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
+ if (err < 0)
+ return err;
+ err = snd_ctl_add_slave(spec->vmaster_vol, kctl);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int build_output(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ int i, err;
+
+ for (i = 0; i < cfg->line_outs; i++) {
+ err = add_output(codec, get_dac(codec, cfg->line_out_pins[i]),
+ i, cfg->line_outs, cfg->line_out_type);
+ if (err < 0)
+ return err;
+ }
+ for (i = 0; i < cfg->hp_outs; i++) {
+ err = add_output(codec, get_dac(codec, cfg->hp_pins[i]),
+ i, cfg->hp_outs, AUTO_PIN_HP_OUT);
+ if (err < 0)
+ return err;
+ }
+ for (i = 0; i < cfg->speaker_outs; i++) {
+ err = add_output(codec, get_dac(codec, cfg->speaker_pins[i]),
+ i, cfg->speaker_outs, AUTO_PIN_SPEAKER_OUT);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+/*
+ */
+
+static struct snd_kcontrol_new cs_capture_ctls[] = {
+ HDA_BIND_SW("Capture Switch", 0),
+ HDA_BIND_VOL("Capture Volume", 0),
+};
+
+static int change_cur_input(struct hda_codec *codec, unsigned int idx,
+ int force)
+{
+ struct cs_spec *spec = codec->spec;
+
+ if (spec->cur_input == idx && !force)
+ return 0;
+ if (spec->cur_adc && spec->cur_adc != spec->adc_nid[idx]) {
+ /* stream is running, let's swap the current ADC */
+ snd_hda_codec_cleanup_stream(codec, spec->cur_adc);
+ spec->cur_adc = spec->adc_nid[idx];
+ snd_hda_codec_setup_stream(codec, spec->cur_adc,
+ spec->cur_adc_stream_tag, 0,
+ spec->cur_adc_format);
+ }
+ snd_hda_codec_write(codec, spec->cur_adc, 0,
+ AC_VERB_SET_CONNECT_SEL,
+ spec->adc_idx[idx]);
+ spec->cur_input = idx;
+ return 1;
+}
+
+static int cs_capture_source_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct cs_spec *spec = codec->spec;
+ unsigned int idx;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = spec->num_inputs;
+ if (uinfo->value.enumerated.item >= spec->num_inputs)
+ uinfo->value.enumerated.item = spec->num_inputs - 1;
+ idx = spec->input_idx[uinfo->value.enumerated.item];
+ strcpy(uinfo->value.enumerated.name, auto_pin_cfg_labels[idx]);
+ return 0;
+}
+
+static int cs_capture_source_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct cs_spec *spec = codec->spec;
+ ucontrol->value.enumerated.item[0] = spec->capsrc_idx[spec->cur_input];
+ return 0;
+}
+
+static int cs_capture_source_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct cs_spec *spec = codec->spec;
+ unsigned int idx = ucontrol->value.enumerated.item[0];
+
+ if (idx >= spec->num_inputs)
+ return -EINVAL;
+ idx = spec->input_idx[idx];
+ return change_cur_input(codec, idx, 0);
+}
+
+static struct snd_kcontrol_new cs_capture_source = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Capture Source",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = cs_capture_source_info,
+ .get = cs_capture_source_get,
+ .put = cs_capture_source_put,
+};
+
+static struct hda_bind_ctls *make_bind_capture(struct hda_codec *codec,
+ struct hda_ctl_ops *ops)
+{
+ struct cs_spec *spec = codec->spec;
+ struct hda_bind_ctls *bind;
+ int i, n;
+
+ bind = kzalloc(sizeof(*bind) + sizeof(long) * (spec->num_inputs + 1),
+ GFP_KERNEL);
+ if (!bind)
+ return NULL;
+ bind->ops = ops;
+ n = 0;
+ for (i = 0; i < AUTO_PIN_LAST; i++) {
+ if (!spec->adc_nid[i])
+ continue;
+ bind->values[n++] =
+ HDA_COMPOSE_AMP_VAL(spec->adc_nid[i], 3,
+ spec->adc_idx[i], HDA_INPUT);
+ }
+ return bind;
+}
+
+static int build_input(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ int i, err;
+
+ if (!spec->num_inputs)
+ return 0;
+
+ /* make bind-capture */
+ spec->capture_bind[0] = make_bind_capture(codec, &snd_hda_bind_sw);
+ spec->capture_bind[1] = make_bind_capture(codec, &snd_hda_bind_vol);
+ for (i = 0; i < 2; i++) {
+ struct snd_kcontrol *kctl;
+ if (!spec->capture_bind[i])
+ return -ENOMEM;
+ kctl = snd_ctl_new1(&cs_capture_ctls[i], codec);
+ if (!kctl)
+ return -ENOMEM;
+ kctl->private_value = (long)spec->capture_bind[i];
+ err = snd_hda_ctl_add(codec, kctl);
+ if (err < 0)
+ return err;
+ }
+
+ if (spec->num_inputs > 1 && !spec->mic_detect) {
+ err = snd_hda_ctl_add(codec,
+ snd_ctl_new1(&cs_capture_source, codec));
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ */
+
+static int build_digital_output(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ int err;
+
+ if (!spec->multiout.dig_out_nid)
+ return 0;
+
+ err = snd_hda_create_spdif_out_ctls(codec, spec->multiout.dig_out_nid);
+ if (err < 0)
+ return err;
+ err = snd_hda_create_spdif_share_sw(codec, &spec->multiout);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+static int build_digital_input(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ if (spec->dig_in)
+ return snd_hda_create_spdif_in_ctls(codec, spec->dig_in);
+ return 0;
+}
+
+/*
+ * auto-mute and auto-mic switching
+ */
+
+static void cs_automute(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ unsigned int caps, present, hp_present;
+ hda_nid_t nid;
+ int i;
+
+ hp_present = 0;
+ for (i = 0; i < cfg->hp_outs; i++) {
+ nid = cfg->hp_pins[i];
+ caps = snd_hda_query_pin_caps(codec, nid);
+ if (!(caps & AC_PINCAP_PRES_DETECT))
+ continue;
+ if (caps & AC_PINCAP_TRIG_REQ)
+ snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_SET_PIN_SENSE, 0);
+ present = snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_GET_PIN_SENSE, 0);
+ hp_present |= (present & AC_PINSENSE_PRESENCE) != 0;
+ if (hp_present)
+ break;
+ }
+ for (i = 0; i < cfg->speaker_outs; i++) {
+ nid = cfg->speaker_pins[i];
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL,
+ hp_present ? 0 : PIN_OUT);
+ }
+ if (spec->board_config == CS420X_MBP55) {
+ unsigned int gpio = hp_present ? 0x02 : 0x08;
+ snd_hda_codec_write(codec, 0x01, 0,
+ AC_VERB_SET_GPIO_DATA, gpio);
+ }
+}
+
+static void cs_automic(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ hda_nid_t nid;
+ unsigned int caps, present;
+
+ nid = cfg->input_pins[spec->automic_idx];
+ caps = snd_hda_query_pin_caps(codec, nid);
+ if (caps & AC_PINCAP_TRIG_REQ)
+ snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0);
+ present = snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_GET_PIN_SENSE, 0);
+ if (present & AC_PINSENSE_PRESENCE)
+ change_cur_input(codec, spec->automic_idx, 0);
+ else {
+ unsigned int imic = (spec->automic_idx == AUTO_PIN_MIC) ?
+ AUTO_PIN_FRONT_MIC : AUTO_PIN_MIC;
+ change_cur_input(codec, imic, 0);
+ }
+}
+
+/*
+ */
+
+static void init_output(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ int i;
+
+ /* mute first */
+ for (i = 0; i < spec->multiout.num_dacs; i++)
+ snd_hda_codec_write(codec, spec->multiout.dac_nids[i], 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ if (spec->multiout.hp_nid)
+ snd_hda_codec_write(codec, spec->multiout.hp_nid, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ for (i = 0; i < ARRAY_SIZE(spec->multiout.extra_out_nid); i++) {
+ if (!spec->multiout.extra_out_nid[i])
+ break;
+ snd_hda_codec_write(codec, spec->multiout.extra_out_nid[i], 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ }
+
+ /* set appropriate pin controls */
+ for (i = 0; i < cfg->line_outs; i++)
+ snd_hda_codec_write(codec, cfg->line_out_pins[i], 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ for (i = 0; i < cfg->hp_outs; i++) {
+ hda_nid_t nid = cfg->hp_pins[i];
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP);
+ if (!cfg->speaker_outs)
+ continue;
+ if (get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP) {
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_UNSOLICITED_ENABLE,
+ AC_USRSP_EN | HP_EVENT);
+ spec->hp_detect = 1;
+ }
+ }
+ for (i = 0; i < cfg->speaker_outs; i++)
+ snd_hda_codec_write(codec, cfg->speaker_pins[i], 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ if (spec->hp_detect)
+ cs_automute(codec);
+}
+
+static void init_input(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ unsigned int coef;
+ int i;
+
+ for (i = 0; i < AUTO_PIN_LAST; i++) {
+ unsigned int ctl;
+ hda_nid_t pin = cfg->input_pins[i];
+ if (!pin || !spec->adc_nid[i])
+ continue;
+ /* set appropriate pin control and mute first */
+ ctl = PIN_IN;
+ if (i <= AUTO_PIN_FRONT_MIC) {
+ unsigned int caps = snd_hda_query_pin_caps(codec, pin);
+ caps >>= AC_PINCAP_VREF_SHIFT;
+ if (caps & AC_PINCAP_VREF_80)
+ ctl = PIN_VREF80;
+ }
+ snd_hda_codec_write(codec, pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, ctl);
+ snd_hda_codec_write(codec, spec->adc_nid[i], 0,
+ AC_VERB_SET_AMP_GAIN_MUTE,
+ AMP_IN_MUTE(spec->adc_idx[i]));
+ if (spec->mic_detect && spec->automic_idx == i)
+ snd_hda_codec_write(codec, pin, 0,
+ AC_VERB_SET_UNSOLICITED_ENABLE,
+ AC_USRSP_EN | MIC_EVENT);
+ }
+ change_cur_input(codec, spec->cur_input, 1);
+ if (spec->mic_detect)
+ cs_automic(codec);
+
+ coef = 0x000a; /* ADC1/2 - Digital and Analog Soft Ramp */
+ if (is_active_pin(codec, CS_DMIC2_PIN_NID))
+ coef |= 0x0500; /* DMIC2 enable 2 channels, disable GPIO1 */
+ if (is_active_pin(codec, CS_DMIC1_PIN_NID))
+ coef |= 0x1800; /* DMIC1 enable 2 channels, disable GPIO0
+ * No effect if SPDIF_OUT2 is slected in
+ * IDX_SPDIF_CTL.
+ */
+ cs_vendor_coef_set(codec, IDX_ADC_CFG, coef);
+}
+
+static struct hda_verb cs_coef_init_verbs[] = {
+ {0x11, AC_VERB_SET_PROC_STATE, 1},
+ {0x11, AC_VERB_SET_COEF_INDEX, IDX_DAC_CFG},
+ {0x11, AC_VERB_SET_PROC_COEF,
+ (0x002a /* DAC1/2/3 SZCMode Soft Ramp */
+ | 0x0040 /* Mute DACs on FIFO error */
+ | 0x1000 /* Enable DACs High Pass Filter */
+ | 0x0400 /* Disable Coefficient Auto increment */
+ )},
+ /* Beep */
+ {0x11, AC_VERB_SET_COEF_INDEX, IDX_DAC_CFG},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x0007}, /* Enable Beep thru DAC1/2/3 */
+
+ {} /* terminator */
+};
+
+/* SPDIF setup */
+static void init_digital(struct hda_codec *codec)
+{
+ unsigned int coef;
+
+ coef = 0x0002; /* SRC_MUTE soft-mute on SPDIF (if no lock) */
+ coef |= 0x0008; /* Replace with mute on error */
+ if (is_active_pin(codec, CS_DIG_OUT2_PIN_NID))
+ coef |= 0x4000; /* RX to TX1 or TX2 Loopthru / SPDIF2
+ * SPDIF_OUT2 is shared with GPIO1 and
+ * DMIC_SDA2.
+ */
+ cs_vendor_coef_set(codec, IDX_SPDIF_CTL, coef);
+}
+
+static int cs_init(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+
+ snd_hda_sequence_write(codec, cs_coef_init_verbs);
+
+ if (spec->gpio_mask) {
+ snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_MASK,
+ spec->gpio_mask);
+ snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DIRECTION,
+ spec->gpio_dir);
+ snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
+ spec->gpio_data);
+ }
+
+ init_output(codec);
+ init_input(codec);
+ init_digital(codec);
+ return 0;
+}
+
+static int cs_build_controls(struct hda_codec *codec)
+{
+ int err;
+
+ err = build_output(codec);
+ if (err < 0)
+ return err;
+ err = build_input(codec);
+ if (err < 0)
+ return err;
+ err = build_digital_output(codec);
+ if (err < 0)
+ return err;
+ err = build_digital_input(codec);
+ if (err < 0)
+ return err;
+ return cs_init(codec);
+}
+
+static void cs_free(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ kfree(spec->capture_bind[0]);
+ kfree(spec->capture_bind[1]);
+ kfree(codec->spec);
+}
+
+static void cs_unsol_event(struct hda_codec *codec, unsigned int res)
+{
+ switch ((res >> 26) & 0x7f) {
+ case HP_EVENT:
+ cs_automute(codec);
+ break;
+ case MIC_EVENT:
+ cs_automic(codec);
+ break;
+ }
+}
+
+static struct hda_codec_ops cs_patch_ops = {
+ .build_controls = cs_build_controls,
+ .build_pcms = cs_build_pcms,
+ .init = cs_init,
+ .free = cs_free,
+ .unsol_event = cs_unsol_event,
+};
+
+static int cs_parse_auto_config(struct hda_codec *codec)
+{
+ struct cs_spec *spec = codec->spec;
+ int err;
+
+ err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, NULL);
+ if (err < 0)
+ return err;
+
+ err = parse_output(codec);
+ if (err < 0)
+ return err;
+ err = parse_input(codec);
+ if (err < 0)
+ return err;
+ err = parse_digital_output(codec);
+ if (err < 0)
+ return err;
+ err = parse_digital_input(codec);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+static const char *cs420x_models[CS420X_MODELS] = {
+ [CS420X_MBP55] = "mbp55",
+ [CS420X_AUTO] = "auto",
+};
+
+
+static struct snd_pci_quirk cs420x_cfg_tbl[] = {
+ SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
+ {} /* terminator */
+};
+
+struct cs_pincfg {
+ hda_nid_t nid;
+ u32 val;
+};
+
+static struct cs_pincfg mbp55_pincfgs[] = {
+ { 0x09, 0x012b4030 },
+ { 0x0a, 0x90100121 },
+ { 0x0b, 0x90100120 },
+ { 0x0c, 0x400000f0 },
+ { 0x0d, 0x90a00110 },
+ { 0x0e, 0x400000f0 },
+ { 0x0f, 0x400000f0 },
+ { 0x10, 0x014be040 },
+ { 0x12, 0x400000f0 },
+ { 0x15, 0x400000f0 },
+ {} /* terminator */
+};
+
+static struct cs_pincfg *cs_pincfgs[CS420X_MODELS] = {
+ [CS420X_MBP55] = mbp55_pincfgs,
+};
+
+static void fix_pincfg(struct hda_codec *codec, int model)
+{
+ const struct cs_pincfg *cfg = cs_pincfgs[model];
+ if (!cfg)
+ return;
+ for (; cfg->nid; cfg++)
+ snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
+}
+
+
+static int patch_cs420x(struct hda_codec *codec)
+{
+ struct cs_spec *spec;
+ int err;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+ codec->spec = spec;
+
+ spec->board_config =
+ snd_hda_check_board_config(codec, CS420X_MODELS,
+ cs420x_models, cs420x_cfg_tbl);
+ if (spec->board_config >= 0)
+ fix_pincfg(codec, spec->board_config);
+
+ switch (spec->board_config) {
+ case CS420X_MBP55:
+ /* GPIO1 = headphones */
+ /* GPIO3 = speakers */
+ spec->gpio_mask = 0x0a;
+ spec->gpio_dir = 0x0a;
+ break;
+ }
+
+ err = cs_parse_auto_config(codec);
+ if (err < 0)
+ goto error;
+
+ codec->patch_ops = cs_patch_ops;
+
+ return 0;
+
+ error:
+ kfree(codec->spec);
+ codec->spec = NULL;
+ return err;
+}
+
+
+/*
+ * patch entries
+ */
+static struct hda_codec_preset snd_hda_preset_cirrus[] = {
+ { .id = 0x10134206, .name = "CS4206", .patch = patch_cs420x },
+ { .id = 0x10134207, .name = "CS4207", .patch = patch_cs420x },
+ {} /* terminator */
+};
+
+MODULE_ALIAS("snd-hda-codec-id:10134206");
+MODULE_ALIAS("snd-hda-codec-id:10134207");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cirrus Logic HD-audio codec");
+
+static struct hda_codec_preset_list cirrus_list = {
+ .preset = snd_hda_preset_cirrus,
+ .owner = THIS_MODULE,
+};
+
+static int __init patch_cirrus_init(void)
+{
+ return snd_hda_add_codec_preset(&cirrus_list);
+}
+
+static void __exit patch_cirrus_exit(void)
+{
+ snd_hda_delete_codec_preset(&cirrus_list);
+}
+
+module_init(patch_cirrus_init)
+module_exit(patch_cirrus_exit)
diff --git a/sound/pci/hda/patch_cmedia.c b/sound/pci/hda/patch_cmedia.c
index c921264bbd71..780e1a72114a 100644
--- a/sound/pci/hda/patch_cmedia.c
+++ b/sound/pci/hda/patch_cmedia.c
@@ -635,7 +635,8 @@ static int patch_cmi9880(struct hda_codec *codec)
cmi9880_models,
cmi9880_cfg_tbl);
if (spec->board_config < 0) {
- snd_printdd(KERN_INFO "hda_codec: Unknown model for CMI9880\n");
+ snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
spec->board_config = CMI_AUTO; /* try everything */
}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index ac868c59f9e3..9d899eda44d7 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -108,6 +108,8 @@ struct conexant_spec {
struct hda_input_mux private_imux;
hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS];
+ unsigned int dell_automute;
+ unsigned int port_d_mode;
};
static int conexant_playback_pcm_open(struct hda_pcm_stream *hinfo,
@@ -1908,6 +1910,480 @@ static int patch_cxt5051(struct hda_codec *codec)
return 0;
}
+/* Conexant 5066 specific */
+
+static hda_nid_t cxt5066_dac_nids[1] = { 0x10 };
+static hda_nid_t cxt5066_adc_nids[3] = { 0x14, 0x15, 0x16 };
+static hda_nid_t cxt5066_capsrc_nids[1] = { 0x17 };
+#define CXT5066_SPDIF_OUT 0x21
+
+static struct hda_channel_mode cxt5066_modes[1] = {
+ { 2, NULL },
+};
+
+static void cxt5066_update_speaker(struct hda_codec *codec)
+{
+ struct conexant_spec *spec = codec->spec;
+ unsigned int pinctl;
+
+ snd_printdd("CXT5066: update speaker, hp_present=%d\n",
+ spec->hp_present);
+
+ /* Port A (HP) */
+ pinctl = ((spec->hp_present & 1) && spec->cur_eapd) ? PIN_HP : 0;
+ snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+ pinctl);
+
+ /* Port D (HP/LO) */
+ pinctl = ((spec->hp_present & 2) && spec->cur_eapd)
+ ? spec->port_d_mode : 0;
+ snd_hda_codec_write(codec, 0x1c, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+ pinctl);
+
+ /* CLASS_D AMP */
+ pinctl = (!spec->hp_present && spec->cur_eapd) ? PIN_OUT : 0;
+ snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+ pinctl);
+
+ if (spec->dell_automute) {
+ /* DELL AIO Port Rule: PortA > PortD > IntSpk */
+ pinctl = (!(spec->hp_present & 1) && spec->cur_eapd)
+ ? PIN_OUT : 0;
+ snd_hda_codec_write(codec, 0x1c, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl);
+ }
+}
+
+/* turn on/off EAPD (+ mute HP) as a master switch */
+static int cxt5066_hp_master_sw_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+
+ if (!cxt_eapd_put(kcontrol, ucontrol))
+ return 0;
+
+ cxt5066_update_speaker(codec);
+ return 1;
+}
+
+/* toggle input of built-in and mic jack appropriately */
+static void cxt5066_automic(struct hda_codec *codec)
+{
+ static struct hda_verb ext_mic_present[] = {
+ /* enable external mic, port B */
+ {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+
+ /* switch to external mic input */
+ {0x17, AC_VERB_SET_CONNECT_SEL, 0},
+
+ /* disable internal mic, port C */
+ {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+ {}
+ };
+ static struct hda_verb ext_mic_absent[] = {
+ /* enable internal mic, port C */
+ {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+
+ /* switch to internal mic input */
+ {0x17, AC_VERB_SET_CONNECT_SEL, 1},
+
+ /* disable external mic, port B */
+ {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+ {}
+ };
+ unsigned int present;
+
+ present = snd_hda_codec_read(codec, 0x1a, 0,
+ AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
+ if (present) {
+ snd_printdd("CXT5066: external microphone detected\n");
+ snd_hda_sequence_write(codec, ext_mic_present);
+ } else {
+ snd_printdd("CXT5066: external microphone absent\n");
+ snd_hda_sequence_write(codec, ext_mic_absent);
+ }
+}
+
+/* mute internal speaker if HP is plugged */
+static void cxt5066_hp_automute(struct hda_codec *codec)
+{
+ struct conexant_spec *spec = codec->spec;
+ unsigned int portA, portD;
+
+ /* Port A */
+ portA = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_PIN_SENSE, 0)
+ & AC_PINSENSE_PRESENCE;
+
+ /* Port D */
+ portD = (snd_hda_codec_read(codec, 0x1c, 0, AC_VERB_GET_PIN_SENSE, 0)
+ & AC_PINSENSE_PRESENCE) << 1;
+
+ spec->hp_present = !!(portA | portD);
+ snd_printdd("CXT5066: hp automute portA=%x portD=%x present=%d\n",
+ portA, portD, spec->hp_present);
+ cxt5066_update_speaker(codec);
+}
+
+/* unsolicited event for jack sensing */
+static void cxt5066_unsol_event(struct hda_codec *codec, unsigned int res)
+{
+ snd_printdd("CXT5066: unsol event %x (%x)\n", res, res >> 26);
+ switch (res >> 26) {
+ case CONEXANT_HP_EVENT:
+ cxt5066_hp_automute(codec);
+ break;
+ case CONEXANT_MIC_EVENT:
+ cxt5066_automic(codec);
+ break;
+ }
+}
+
+static const struct hda_input_mux cxt5066_analog_mic_boost = {
+ .num_items = 5,
+ .items = {
+ { "0dB", 0 },
+ { "10dB", 1 },
+ { "20dB", 2 },
+ { "30dB", 3 },
+ { "40dB", 4 },
+ },
+};
+
+static int cxt5066_mic_boost_mux_enum_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ return snd_hda_input_mux_info(&cxt5066_analog_mic_boost, uinfo);
+}
+
+static int cxt5066_mic_boost_mux_enum_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ int val;
+
+ val = snd_hda_codec_read(codec, 0x17, 0,
+ AC_VERB_GET_AMP_GAIN_MUTE, AC_AMP_GET_OUTPUT);
+
+ ucontrol->value.enumerated.item[0] = val & AC_AMP_GAIN;
+ return 0;
+}
+
+static int cxt5066_mic_boost_mux_enum_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ const struct hda_input_mux *imux = &cxt5066_analog_mic_boost;
+ unsigned int idx;
+
+ if (!imux->num_items)
+ return 0;
+ idx = ucontrol->value.enumerated.item[0];
+ if (idx >= imux->num_items)
+ idx = imux->num_items - 1;
+
+ snd_hda_codec_write_cache(codec, 0x17, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE,
+ AC_AMP_SET_RIGHT | AC_AMP_SET_LEFT | AC_AMP_SET_OUTPUT |
+ imux->items[idx].index);
+
+ return 1;
+}
+
+static struct hda_input_mux cxt5066_capture_source = {
+ .num_items = 4,
+ .items = {
+ { "Mic B", 0 },
+ { "Mic C", 1 },
+ { "Mic E", 2 },
+ { "Mic F", 3 },
+ },
+};
+
+static struct hda_bind_ctls cxt5066_bind_capture_vol_others = {
+ .ops = &snd_hda_bind_vol,
+ .values = {
+ HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_INPUT),
+ HDA_COMPOSE_AMP_VAL(0x14, 3, 2, HDA_INPUT),
+ 0
+ },
+};
+
+static struct hda_bind_ctls cxt5066_bind_capture_sw_others = {
+ .ops = &snd_hda_bind_sw,
+ .values = {
+ HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_INPUT),
+ HDA_COMPOSE_AMP_VAL(0x14, 3, 2, HDA_INPUT),
+ 0
+ },
+};
+
+static struct snd_kcontrol_new cxt5066_mixer_master[] = {
+ HDA_CODEC_VOLUME("Master Playback Volume", 0x10, 0x00, HDA_OUTPUT),
+ {}
+};
+
+static struct snd_kcontrol_new cxt5066_mixer_master_olpc[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Master Playback Volume",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+ SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
+ .info = snd_hda_mixer_amp_volume_info,
+ .get = snd_hda_mixer_amp_volume_get,
+ .put = snd_hda_mixer_amp_volume_put,
+ .tlv = { .c = snd_hda_mixer_amp_tlv },
+ /* offset by 28 volume steps to limit minimum gain to -46dB */
+ .private_value =
+ HDA_COMPOSE_AMP_VAL_OFS(0x10, 3, 0, HDA_OUTPUT, 28),
+ },
+ {}
+};
+
+static struct snd_kcontrol_new cxt5066_mixers[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Master Playback Switch",
+ .info = cxt_eapd_info,
+ .get = cxt_eapd_get,
+ .put = cxt5066_hp_master_sw_put,
+ .private_value = 0x1d,
+ },
+
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Analog Mic Boost Capture Enum",
+ .info = cxt5066_mic_boost_mux_enum_info,
+ .get = cxt5066_mic_boost_mux_enum_get,
+ .put = cxt5066_mic_boost_mux_enum_put,
+ },
+
+ HDA_BIND_VOL("Capture Volume", &cxt5066_bind_capture_vol_others),
+ HDA_BIND_SW("Capture Switch", &cxt5066_bind_capture_sw_others),
+ {}
+};
+
+static struct hda_verb cxt5066_init_verbs[] = {
+ {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, /* Port B */
+ {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, /* Port C */
+ {0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Port F */
+ {0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Port E */
+
+ /* Speakers */
+ {0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ {0x1f, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
+
+ /* HP, Amp */
+ {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+ {0x19, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
+
+ {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+ {0x1c, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
+
+ /* DAC1 */
+ {0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+
+ /* Node 14 connections: 0x17 0x18 0x23 0x24 0x27 */
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x50},
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2) | 0x50},
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
+
+ /* no digital microphone support yet */
+ {0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+
+ /* Audio input selector */
+ {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x3},
+
+ /* SPDIF route: PCM */
+ {0x20, AC_VERB_SET_CONNECT_SEL, 0x0},
+ {0x22, AC_VERB_SET_CONNECT_SEL, 0x0},
+
+ {0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ {0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+
+ /* EAPD */
+ {0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
+
+ /* not handling these yet */
+ {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
+ {0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
+ {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
+ {0x1c, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
+ {0x1d, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
+ {0x1e, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
+ {0x20, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
+ {0x22, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
+ { } /* end */
+};
+
+static struct hda_verb cxt5066_init_verbs_olpc[] = {
+ /* Port A: headphones */
+ {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+ {0x19, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
+
+ /* Port B: external microphone */
+ {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+
+ /* Port C: internal microphone */
+ {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+
+ /* Port D: unused */
+ {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+
+ /* Port E: unused, but has primary EAPD */
+ {0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+ {0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
+
+ /* Port F: unused */
+ {0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+
+ /* Port G: internal speakers */
+ {0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ {0x1f, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
+
+ /* DAC1 */
+ {0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+
+ /* DAC2: unused */
+ {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x50},
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
+ {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
+ {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
+ {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
+ {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
+
+ /* Disable digital microphone port */
+ {0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+
+ /* Audio input selectors */
+ {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x3},
+ {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+
+ /* Disable SPDIF */
+ {0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+ {0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+
+ /* enable unsolicited events for Port A and B */
+ {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
+ {0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
+ { } /* end */
+};
+
+static struct hda_verb cxt5066_init_verbs_portd_lo[] = {
+ {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ { } /* end */
+};
+
+/* initialize jack-sensing, too */
+static int cxt5066_init(struct hda_codec *codec)
+{
+ snd_printdd("CXT5066: init\n");
+ conexant_init(codec);
+ if (codec->patch_ops.unsol_event) {
+ cxt5066_hp_automute(codec);
+ cxt5066_automic(codec);
+ }
+ return 0;
+}
+
+enum {
+ CXT5066_LAPTOP, /* Laptops w/ EAPD support */
+ CXT5066_DELL_LAPTOP, /* Dell Laptop */
+ CXT5066_OLPC_XO_1_5, /* OLPC XO 1.5 */
+ CXT5066_MODELS
+};
+
+static const char *cxt5066_models[CXT5066_MODELS] = {
+ [CXT5066_LAPTOP] = "laptop",
+ [CXT5066_DELL_LAPTOP] = "dell-laptop",
+ [CXT5066_OLPC_XO_1_5] = "olpc-xo-1_5",
+};
+
+static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
+ SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board",
+ CXT5066_LAPTOP),
+ SND_PCI_QUIRK(0x1028, 0x02f5, "Dell",
+ CXT5066_DELL_LAPTOP),
+ {}
+};
+
+static int patch_cxt5066(struct hda_codec *codec)
+{
+ struct conexant_spec *spec;
+ int board_config;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+ codec->spec = spec;
+
+ codec->patch_ops = conexant_patch_ops;
+ codec->patch_ops.init = cxt5066_init;
+
+ spec->dell_automute = 0;
+ spec->multiout.max_channels = 2;
+ spec->multiout.num_dacs = ARRAY_SIZE(cxt5066_dac_nids);
+ spec->multiout.dac_nids = cxt5066_dac_nids;
+ spec->multiout.dig_out_nid = CXT5066_SPDIF_OUT;
+ spec->num_adc_nids = 1;
+ spec->adc_nids = cxt5066_adc_nids;
+ spec->capsrc_nids = cxt5066_capsrc_nids;
+ spec->input_mux = &cxt5066_capture_source;
+
+ spec->port_d_mode = PIN_HP;
+
+ spec->num_init_verbs = 1;
+ spec->init_verbs[0] = cxt5066_init_verbs;
+ spec->num_channel_mode = ARRAY_SIZE(cxt5066_modes);
+ spec->channel_mode = cxt5066_modes;
+ spec->cur_adc = 0;
+ spec->cur_adc_idx = 0;
+
+ board_config = snd_hda_check_board_config(codec, CXT5066_MODELS,
+ cxt5066_models, cxt5066_cfg_tbl);
+ switch (board_config) {
+ default:
+ case CXT5066_LAPTOP:
+ spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
+ spec->mixers[spec->num_mixers++] = cxt5066_mixers;
+ break;
+ case CXT5066_DELL_LAPTOP:
+ spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
+ spec->mixers[spec->num_mixers++] = cxt5066_mixers;
+
+ spec->port_d_mode = PIN_OUT;
+ spec->init_verbs[spec->num_init_verbs] = cxt5066_init_verbs_portd_lo;
+ spec->num_init_verbs++;
+ spec->dell_automute = 1;
+ break;
+ case CXT5066_OLPC_XO_1_5:
+ codec->patch_ops.unsol_event = cxt5066_unsol_event;
+ spec->init_verbs[0] = cxt5066_init_verbs_olpc;
+ spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
+ spec->mixers[spec->num_mixers++] = cxt5066_mixers;
+ spec->port_d_mode = 0;
+
+ /* no S/PDIF out */
+ spec->multiout.dig_out_nid = 0;
+
+ /* input source automatically selected */
+ spec->input_mux = NULL;
+ break;
+ }
+
+ return 0;
+}
/*
*/
@@ -1919,12 +2395,15 @@ static struct hda_codec_preset snd_hda_preset_conexant[] = {
.patch = patch_cxt5047 },
{ .id = 0x14f15051, .name = "CX20561 (Hermosa)",
.patch = patch_cxt5051 },
+ { .id = 0x14f15066, .name = "CX20582 (Pebble)",
+ .patch = patch_cxt5066 },
{} /* terminator */
};
MODULE_ALIAS("snd-hda-codec-id:14f15045");
MODULE_ALIAS("snd-hda-codec-id:14f15047");
MODULE_ALIAS("snd-hda-codec-id:14f15051");
+MODULE_ALIAS("snd-hda-codec-id:14f15066");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Conexant HD-audio codec");
diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c
index fcc77fec4487..01a18ed475ac 100644
--- a/sound/pci/hda/patch_intelhdmi.c
+++ b/sound/pci/hda/patch_intelhdmi.c
@@ -33,8 +33,8 @@
#include "hda_codec.h"
#include "hda_local.h"
-#define CVT_NID 0x02 /* audio converter */
-#define PIN_NID 0x03 /* HDMI output pin */
+static hda_nid_t cvt_nid; /* audio converter */
+static hda_nid_t pin_nid; /* HDMI output pin */
#define INTEL_HDMI_EVENT_TAG 0x08
@@ -44,30 +44,6 @@ struct intel_hdmi_spec {
struct hdmi_eld sink_eld;
};
-static struct hda_verb pinout_enable_verb[] = {
- {PIN_NID, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {} /* terminator */
-};
-
-static struct hda_verb unsolicited_response_verb[] = {
- {PIN_NID, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN |
- INTEL_HDMI_EVENT_TAG},
- {}
-};
-
-static struct hda_verb def_chan_map[] = {
- {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x00},
- {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x11},
- {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x22},
- {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x33},
- {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x44},
- {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x55},
- {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x66},
- {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x77},
- {}
-};
-
-
struct hdmi_audio_infoframe {
u8 type; /* 0x84 */
u8 ver; /* 0x01 */
@@ -244,11 +220,12 @@ static void hdmi_write_dip_byte(struct hda_codec *codec, hda_nid_t nid,
static void hdmi_enable_output(struct hda_codec *codec)
{
/* Unmute */
- if (get_wcaps(codec, PIN_NID) & AC_WCAP_OUT_AMP)
- snd_hda_codec_write(codec, PIN_NID, 0,
+ if (get_wcaps(codec, pin_nid) & AC_WCAP_OUT_AMP)
+ snd_hda_codec_write(codec, pin_nid, 0,
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
/* Enable pin out */
- snd_hda_sequence_write(codec, pinout_enable_verb);
+ snd_hda_codec_write(codec, pin_nid, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
}
/*
@@ -256,8 +233,8 @@ static void hdmi_enable_output(struct hda_codec *codec)
*/
static void hdmi_start_infoframe_trans(struct hda_codec *codec)
{
- hdmi_set_dip_index(codec, PIN_NID, 0x0, 0x0);
- snd_hda_codec_write(codec, PIN_NID, 0, AC_VERB_SET_HDMI_DIP_XMIT,
+ hdmi_set_dip_index(codec, pin_nid, 0x0, 0x0);
+ snd_hda_codec_write(codec, pin_nid, 0, AC_VERB_SET_HDMI_DIP_XMIT,
AC_DIPXMIT_BEST);
}
@@ -266,20 +243,20 @@ static void hdmi_start_infoframe_trans(struct hda_codec *codec)
*/
static void hdmi_stop_infoframe_trans(struct hda_codec *codec)
{
- hdmi_set_dip_index(codec, PIN_NID, 0x0, 0x0);
- snd_hda_codec_write(codec, PIN_NID, 0, AC_VERB_SET_HDMI_DIP_XMIT,
+ hdmi_set_dip_index(codec, pin_nid, 0x0, 0x0);
+ snd_hda_codec_write(codec, pin_nid, 0, AC_VERB_SET_HDMI_DIP_XMIT,
AC_DIPXMIT_DISABLE);
}
static int hdmi_get_channel_count(struct hda_codec *codec)
{
- return 1 + snd_hda_codec_read(codec, CVT_NID, 0,
+ return 1 + snd_hda_codec_read(codec, cvt_nid, 0,
AC_VERB_GET_CVT_CHAN_COUNT, 0);
}
static void hdmi_set_channel_count(struct hda_codec *codec, int chs)
{
- snd_hda_codec_write(codec, CVT_NID, 0,
+ snd_hda_codec_write(codec, cvt_nid, 0,
AC_VERB_SET_CVT_CHAN_COUNT, chs - 1);
if (chs != hdmi_get_channel_count(codec))
@@ -294,7 +271,7 @@ static void hdmi_debug_channel_mapping(struct hda_codec *codec)
int slot;
for (i = 0; i < 8; i++) {
- slot = snd_hda_codec_read(codec, CVT_NID, 0,
+ slot = snd_hda_codec_read(codec, cvt_nid, 0,
AC_VERB_GET_HDMI_CHAN_SLOT, i);
printk(KERN_DEBUG "HDMI: ASP channel %d => slot %d\n",
slot >> 4, slot & 0x7);
@@ -307,7 +284,7 @@ static void hdmi_parse_eld(struct hda_codec *codec)
struct intel_hdmi_spec *spec = codec->spec;
struct hdmi_eld *eld = &spec->sink_eld;
- if (!snd_hdmi_get_eld(eld, codec, PIN_NID))
+ if (!snd_hdmi_get_eld(eld, codec, pin_nid))
snd_hdmi_show_eld(eld);
}
@@ -322,11 +299,11 @@ static void hdmi_debug_dip_size(struct hda_codec *codec)
int i;
int size;
- size = snd_hdmi_get_eld_size(codec, PIN_NID);
+ size = snd_hdmi_get_eld_size(codec, pin_nid);
printk(KERN_DEBUG "HDMI: ELD buf size is %d\n", size);
for (i = 0; i < 8; i++) {
- size = snd_hda_codec_read(codec, PIN_NID, 0,
+ size = snd_hda_codec_read(codec, pin_nid, 0,
AC_VERB_GET_HDMI_DIP_SIZE, i);
printk(KERN_DEBUG "HDMI: DIP GP[%d] buf size is %d\n", i, size);
}
@@ -340,15 +317,15 @@ static void hdmi_clear_dip_buffers(struct hda_codec *codec)
int size;
int pi, bi;
for (i = 0; i < 8; i++) {
- size = snd_hda_codec_read(codec, PIN_NID, 0,
+ size = snd_hda_codec_read(codec, pin_nid, 0,
AC_VERB_GET_HDMI_DIP_SIZE, i);
if (size == 0)
continue;
- hdmi_set_dip_index(codec, PIN_NID, i, 0x0);
+ hdmi_set_dip_index(codec, pin_nid, i, 0x0);
for (j = 1; j < 1000; j++) {
- hdmi_write_dip_byte(codec, PIN_NID, 0x0);
- hdmi_get_dip_index(codec, PIN_NID, &pi, &bi);
+ hdmi_write_dip_byte(codec, pin_nid, 0x0);
+ hdmi_get_dip_index(codec, pin_nid, &pi, &bi);
if (pi != i)
snd_printd(KERN_INFO "dip index %d: %d != %d\n",
bi, pi, i);
@@ -376,9 +353,9 @@ static void hdmi_fill_audio_infoframe(struct hda_codec *codec,
sum += params[i];
ai->checksum = - sum;
- hdmi_set_dip_index(codec, PIN_NID, 0x0, 0x0);
+ hdmi_set_dip_index(codec, pin_nid, 0x0, 0x0);
for (i = 0; i < sizeof(ai); i++)
- hdmi_write_dip_byte(codec, PIN_NID, params[i]);
+ hdmi_write_dip_byte(codec, pin_nid, params[i]);
}
/*
@@ -465,6 +442,8 @@ static int hdmi_setup_channel_allocation(struct hda_codec *codec,
static void hdmi_setup_channel_mapping(struct hda_codec *codec,
struct hdmi_audio_infoframe *ai)
{
+ int i;
+
if (!ai->CA)
return;
@@ -473,7 +452,11 @@ static void hdmi_setup_channel_mapping(struct hda_codec *codec,
* ALSA sequence is front/surr/clfe/side?
*/
- snd_hda_sequence_write(codec, def_chan_map);
+ for (i = 0; i < 8; i++)
+ snd_hda_codec_write(codec, cvt_nid, 0,
+ AC_VERB_SET_HDMI_CHAN_SLOT,
+ (i << 4) | i);
+
hdmi_debug_channel_mapping(codec);
}
@@ -597,7 +580,6 @@ static struct hda_pcm_stream intel_hdmi_pcm_playback = {
.substreams = 1,
.channels_min = 2,
.channels_max = 8,
- .nid = CVT_NID, /* NID to query formats and rates and setup streams */
.ops = {
.open = intel_hdmi_playback_pcm_open,
.close = intel_hdmi_playback_pcm_close,
@@ -613,6 +595,9 @@ static int intel_hdmi_build_pcms(struct hda_codec *codec)
codec->num_pcms = 1;
codec->pcm_info = info;
+ /* NID to query formats and rates and setup streams */
+ intel_hdmi_pcm_playback.nid = cvt_nid;
+
info->name = "INTEL HDMI";
info->pcm_type = HDA_PCM_TYPE_HDMI;
info->stream[SNDRV_PCM_STREAM_PLAYBACK] = intel_hdmi_pcm_playback;
@@ -636,8 +621,9 @@ static int intel_hdmi_init(struct hda_codec *codec)
{
hdmi_enable_output(codec);
- snd_hda_sequence_write(codec, unsolicited_response_verb);
-
+ snd_hda_codec_write(codec, pin_nid, 0,
+ AC_VERB_SET_UNSOLICITED_ENABLE,
+ AC_USRSP_EN | INTEL_HDMI_EVENT_TAG);
return 0;
}
@@ -657,7 +643,7 @@ static struct hda_codec_ops intel_hdmi_patch_ops = {
.unsol_event = intel_hdmi_unsol_event,
};
-static int patch_intel_hdmi(struct hda_codec *codec)
+static int do_patch_intel_hdmi(struct hda_codec *codec)
{
struct intel_hdmi_spec *spec;
@@ -667,7 +653,7 @@ static int patch_intel_hdmi(struct hda_codec *codec)
spec->multiout.num_dacs = 0; /* no analog */
spec->multiout.max_channels = 8;
- spec->multiout.dig_out_nid = CVT_NID;
+ spec->multiout.dig_out_nid = cvt_nid;
codec->spec = spec;
codec->patch_ops = intel_hdmi_patch_ops;
@@ -679,12 +665,27 @@ static int patch_intel_hdmi(struct hda_codec *codec)
return 0;
}
+static int patch_intel_hdmi(struct hda_codec *codec)
+{
+ cvt_nid = 0x02;
+ pin_nid = 0x03;
+ return do_patch_intel_hdmi(codec);
+}
+
+static int patch_intel_hdmi_ibexpeak(struct hda_codec *codec)
+{
+ cvt_nid = 0x02;
+ pin_nid = 0x04;
+ return do_patch_intel_hdmi(codec);
+}
+
static struct hda_codec_preset snd_hda_preset_intelhdmi[] = {
{ .id = 0x808629fb, .name = "G45 DEVCL", .patch = patch_intel_hdmi },
{ .id = 0x80862801, .name = "G45 DEVBLC", .patch = patch_intel_hdmi },
{ .id = 0x80862802, .name = "G45 DEVCTG", .patch = patch_intel_hdmi },
{ .id = 0x80862803, .name = "G45 DEVELK", .patch = patch_intel_hdmi },
{ .id = 0x80862804, .name = "G45 DEVIBX", .patch = patch_intel_hdmi },
+ { .id = 0x80860054, .name = "Q57 DEVIBX", .patch = patch_intel_hdmi_ibexpeak },
{ .id = 0x10951392, .name = "SiI1392 HDMI", .patch = patch_intel_hdmi },
{} /* terminator */
};
@@ -694,6 +695,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862801");
MODULE_ALIAS("snd-hda-codec-id:80862802");
MODULE_ALIAS("snd-hda-codec-id:80862803");
MODULE_ALIAS("snd-hda-codec-id:80862804");
+MODULE_ALIAS("snd-hda-codec-id:80860054");
MODULE_ALIAS("snd-hda-codec-id:10951392");
MODULE_LICENSE("GPL");
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
index f5792e2eea82..c8435c9a97f9 100644
--- a/sound/pci/hda/patch_nvhdmi.c
+++ b/sound/pci/hda/patch_nvhdmi.c
@@ -377,6 +377,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
*/
static struct hda_codec_preset snd_hda_preset_nvhdmi[] = {
{ .id = 0x10de0002, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch },
+ { .id = 0x10de0003, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch },
{ .id = 0x10de0006, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch },
{ .id = 0x10de0007, .name = "MCP7A HDMI", .patch = patch_nvhdmi_8ch },
{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
@@ -385,6 +386,7 @@ static struct hda_codec_preset snd_hda_preset_nvhdmi[] = {
};
MODULE_ALIAS("snd-hda-codec-id:10de0002");
+MODULE_ALIAS("snd-hda-codec-id:10de0003");
MODULE_ALIAS("snd-hda-codec-id:10de0006");
MODULE_ALIAS("snd-hda-codec-id:10de0007");
MODULE_ALIAS("snd-hda-codec-id:10de0067");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 30eeb304351c..7ed47f66ddd1 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -208,12 +208,6 @@ enum {
ALC885_MBP3,
ALC885_MB5,
ALC885_IMAC24,
- ALC882_AUTO,
- ALC882_MODEL_LAST,
-};
-
-/* ALC883 models */
-enum {
ALC883_3ST_2ch_DIG,
ALC883_3ST_6ch_DIG,
ALC883_3ST_6ch,
@@ -226,6 +220,7 @@ enum {
ALC888_ACER_ASPIRE_4930G,
ALC888_ACER_ASPIRE_6530G,
ALC888_ACER_ASPIRE_8930G,
+ ALC888_ACER_ASPIRE_7730G,
ALC883_MEDION,
ALC883_MEDION_MD2,
ALC883_LAPTOP_EAPD,
@@ -237,17 +232,20 @@ enum {
ALC888_3ST_HP,
ALC888_6ST_DELL,
ALC883_MITAC,
+ ALC883_CLEVO_M540R,
ALC883_CLEVO_M720,
ALC883_FUJITSU_PI2515,
ALC888_FUJITSU_XA3530,
ALC883_3ST_6ch_INTEL,
+ ALC889A_INTEL,
+ ALC889_INTEL,
ALC888_ASUS_M90V,
ALC888_ASUS_EEE1601,
ALC889A_MB31,
ALC1200_ASUS_P5Q,
ALC883_SONY_VAIO_TT,
- ALC883_AUTO,
- ALC883_MODEL_LAST,
+ ALC882_AUTO,
+ ALC882_MODEL_LAST,
};
/* for GPIO Poll */
@@ -262,6 +260,14 @@ enum {
ALC_INIT_GPIO3,
};
+struct alc_mic_route {
+ hda_nid_t pin;
+ unsigned char mux_idx;
+ unsigned char amix_idx;
+};
+
+#define MUX_IDX_UNDEF ((unsigned char)-1)
+
struct alc_spec {
/* codec parameterization */
struct snd_kcontrol_new *mixers[5]; /* mixer arrays */
@@ -304,6 +310,8 @@ struct alc_spec {
unsigned int num_mux_defs;
const struct hda_input_mux *input_mux;
unsigned int cur_mux[3];
+ struct alc_mic_route ext_mic;
+ struct alc_mic_route int_mic;
/* channel model */
const struct hda_channel_mode *channel_mode;
@@ -320,6 +328,8 @@ struct alc_spec {
struct snd_array kctls;
struct hda_input_mux private_imux[3];
hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS];
+ hda_nid_t private_adc_nids[AUTO_CFG_MAX_OUTS];
+ hda_nid_t private_capsrc_nids[AUTO_CFG_MAX_OUTS];
/* hooks */
void (*init_hook)(struct hda_codec *codec);
@@ -329,6 +339,7 @@ struct alc_spec {
unsigned int sense_updated: 1;
unsigned int jack_present: 1;
unsigned int master_sw: 1;
+ unsigned int auto_mic:1;
/* other flags */
unsigned int no_analog :1; /* digital I/O only */
@@ -370,6 +381,7 @@ struct alc_config_preset {
unsigned int num_mux_defs;
const struct hda_input_mux *input_mux;
void (*unsol_event)(struct hda_codec *, unsigned int);
+ void (*setup)(struct hda_codec *);
void (*init_hook)(struct hda_codec *);
#ifdef CONFIG_SND_HDA_POWER_SAVE
struct hda_amp_list *loopbacks;
@@ -417,7 +429,7 @@ static int alc_mux_enum_put(struct snd_kcontrol *kcontrol,
mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
imux = &spec->input_mux[mux_idx];
- type = (get_wcaps(codec, nid) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+ type = get_wcaps_type(get_wcaps(codec, nid));
if (type == AC_WID_AUD_MIX) {
/* Matrix-mixer style (e.g. ALC882) */
unsigned int *cur_val = &spec->cur_mux[adc_idx];
@@ -842,9 +854,10 @@ static void print_realtek_coef(struct snd_info_buffer *buffer,
/*
* set up from the preset table
*/
-static void setup_preset(struct alc_spec *spec,
+static void setup_preset(struct hda_codec *codec,
const struct alc_config_preset *preset)
{
+ struct alc_spec *spec = codec->spec;
int i;
for (i = 0; i < ARRAY_SIZE(preset->mixers) && preset->mixers[i]; i++)
@@ -886,6 +899,9 @@ static void setup_preset(struct alc_spec *spec,
#ifdef CONFIG_SND_HDA_POWER_SAVE
spec->loopback.amplist = preset->loopbacks;
#endif
+
+ if (preset->setup)
+ preset->setup(codec);
}
/* Enable GPIO mask and set output */
@@ -965,30 +981,64 @@ static void alc_automute_pin(struct hda_codec *codec)
}
}
-#if 0 /* it's broken in some cases -- temporarily disabled */
+static int get_connection_index(struct hda_codec *codec, hda_nid_t mux,
+ hda_nid_t nid)
+{
+ hda_nid_t conn[HDA_MAX_NUM_INPUTS];
+ int i, nums;
+
+ nums = snd_hda_get_connections(codec, mux, conn, ARRAY_SIZE(conn));
+ for (i = 0; i < nums; i++)
+ if (conn[i] == nid)
+ return i;
+ return -1;
+}
+
static void alc_mic_automute(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- unsigned int present;
- unsigned int mic_nid = spec->autocfg.input_pins[AUTO_PIN_MIC];
- unsigned int fmic_nid = spec->autocfg.input_pins[AUTO_PIN_FRONT_MIC];
- unsigned int mix_nid = spec->capsrc_nids[0];
- unsigned int capsrc_idx_mic, capsrc_idx_fmic;
-
- capsrc_idx_mic = mic_nid - 0x18;
- capsrc_idx_fmic = fmic_nid - 0x18;
- present = snd_hda_codec_read(codec, mic_nid, 0,
- AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
- snd_hda_codec_write(codec, mix_nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- 0x7000 | (capsrc_idx_mic << 8) | (present ? 0 : 0x80));
- snd_hda_codec_write(codec, mix_nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- 0x7000 | (capsrc_idx_fmic << 8) | (present ? 0x80 : 0));
- snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, capsrc_idx_fmic,
- HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
+ struct alc_mic_route *dead, *alive;
+ unsigned int present, type;
+ hda_nid_t cap_nid;
+
+ if (!spec->auto_mic)
+ return;
+ if (!spec->int_mic.pin || !spec->ext_mic.pin)
+ return;
+ if (snd_BUG_ON(!spec->adc_nids))
+ return;
+
+ cap_nid = spec->capsrc_nids ? spec->capsrc_nids[0] : spec->adc_nids[0];
+
+ present = snd_hda_codec_read(codec, spec->ext_mic.pin, 0,
+ AC_VERB_GET_PIN_SENSE, 0);
+ present &= AC_PINSENSE_PRESENCE;
+ if (present) {
+ alive = &spec->ext_mic;
+ dead = &spec->int_mic;
+ } else {
+ alive = &spec->int_mic;
+ dead = &spec->ext_mic;
+ }
+
+ type = get_wcaps_type(get_wcaps(codec, cap_nid));
+ if (type == AC_WID_AUD_MIX) {
+ /* Matrix-mixer style (e.g. ALC882) */
+ snd_hda_codec_amp_stereo(codec, cap_nid, HDA_INPUT,
+ alive->mux_idx,
+ HDA_AMP_MUTE, 0);
+ snd_hda_codec_amp_stereo(codec, cap_nid, HDA_INPUT,
+ dead->mux_idx,
+ HDA_AMP_MUTE, HDA_AMP_MUTE);
+ } else {
+ /* MUX style (e.g. ALC880) */
+ snd_hda_codec_write_cache(codec, cap_nid, 0,
+ AC_VERB_SET_CONNECT_SEL,
+ alive->mux_idx);
+ }
+
+ /* FIXME: analog mixer */
}
-#else
-#define alc_mic_automute(codec) do {} while(0) /* NOP */
-#endif /* disabled */
/* unsolicited event for HP jack sensing */
static void alc_sku_unsol_event(struct hda_codec *codec, unsigned int res)
@@ -1031,6 +1081,16 @@ static void alc888_coef_init(struct hda_codec *codec)
AC_VERB_SET_PROC_COEF, 0x3030);
}
+static void alc889_coef_init(struct hda_codec *codec)
+{
+ unsigned int tmp;
+
+ snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 7);
+ tmp = snd_hda_codec_read(codec, 0x20, 0, AC_VERB_GET_PROC_COEF, 0);
+ snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 7);
+ snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_PROC_COEF, tmp|0x2010);
+}
+
static void alc_auto_init_amp(struct hda_codec *codec, int type)
{
unsigned int tmp;
@@ -1088,15 +1148,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
case 0x10ec0885:
case 0x10ec0887:
case 0x10ec0889:
- snd_hda_codec_write(codec, 0x20, 0,
- AC_VERB_SET_COEF_INDEX, 7);
- tmp = snd_hda_codec_read(codec, 0x20, 0,
- AC_VERB_GET_PROC_COEF, 0);
- snd_hda_codec_write(codec, 0x20, 0,
- AC_VERB_SET_COEF_INDEX, 7);
- snd_hda_codec_write(codec, 0x20, 0,
- AC_VERB_SET_PROC_COEF,
- tmp | 0x2010);
+ alc889_coef_init(codec);
break;
case 0x10ec0888:
alc888_coef_init(codec);
@@ -1142,6 +1194,55 @@ static void alc_init_auto_hp(struct hda_codec *codec)
spec->unsol_event = alc_sku_unsol_event;
}
+static void alc_init_auto_mic(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ hda_nid_t fixed, ext;
+ int i;
+
+ /* there must be only two mic inputs exclusively */
+ for (i = AUTO_PIN_LINE; i < AUTO_PIN_LAST; i++)
+ if (cfg->input_pins[i])
+ return;
+
+ fixed = ext = 0;
+ for (i = AUTO_PIN_MIC; i <= AUTO_PIN_FRONT_MIC; i++) {
+ hda_nid_t nid = cfg->input_pins[i];
+ unsigned int defcfg;
+ if (!nid)
+ return;
+ defcfg = snd_hda_codec_get_pincfg(codec, nid);
+ switch (get_defcfg_connect(defcfg)) {
+ case AC_JACK_PORT_FIXED:
+ if (fixed)
+ return; /* already occupied */
+ fixed = nid;
+ break;
+ case AC_JACK_PORT_COMPLEX:
+ if (ext)
+ return; /* already occupied */
+ ext = nid;
+ break;
+ default:
+ return; /* invalid entry */
+ }
+ }
+ if (!(get_wcaps(codec, ext) & AC_WCAP_UNSOL_CAP))
+ return; /* no unsol support */
+ snd_printdd("realtek: Enable auto-mic switch on NID 0x%x/0x%x\n",
+ ext, fixed);
+ spec->ext_mic.pin = ext;
+ spec->int_mic.pin = fixed;
+ spec->ext_mic.mux_idx = MUX_IDX_UNDEF; /* set later */
+ spec->int_mic.mux_idx = MUX_IDX_UNDEF; /* set later */
+ spec->auto_mic = 1;
+ snd_hda_codec_write_cache(codec, spec->ext_mic.pin, 0,
+ AC_VERB_SET_UNSOLICITED_ENABLE,
+ AC_USRSP_EN | ALC880_MIC_EVENT);
+ spec->unsol_event = alc_sku_unsol_event;
+}
+
/* check subsystem ID and set up device-specific initialization;
* return 1 if initialized, 0 if invalid SSID
*/
@@ -1243,6 +1344,7 @@ do_sku:
}
alc_init_auto_hp(codec);
+ alc_init_auto_mic(codec);
return 1;
}
@@ -1255,6 +1357,7 @@ static void alc_ssid_check(struct hda_codec *codec,
"Enable default setup for auto mode as fallback\n");
spec->init_amp = ALC_INIT_DEFAULT;
alc_init_auto_hp(codec);
+ alc_init_auto_mic(codec);
}
}
@@ -1436,7 +1539,25 @@ static void alc_automute_amp_unsol_event(struct hda_codec *codec,
alc_automute_amp(codec);
}
-static void alc888_fujitsu_xa3530_init_hook(struct hda_codec *codec)
+static void alc889_automute_setup(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ spec->autocfg.hp_pins[0] = 0x15;
+ spec->autocfg.speaker_pins[0] = 0x14;
+ spec->autocfg.speaker_pins[1] = 0x16;
+ spec->autocfg.speaker_pins[2] = 0x17;
+ spec->autocfg.speaker_pins[3] = 0x19;
+ spec->autocfg.speaker_pins[4] = 0x1a;
+}
+
+static void alc889_intel_init_hook(struct hda_codec *codec)
+{
+ alc889_coef_init(codec);
+ alc_automute_amp(codec);
+}
+
+static void alc888_fujitsu_xa3530_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -1444,7 +1565,6 @@ static void alc888_fujitsu_xa3530_init_hook(struct hda_codec *codec)
spec->autocfg.hp_pins[1] = 0x1b; /* hp */
spec->autocfg.speaker_pins[0] = 0x14; /* speaker */
spec->autocfg.speaker_pins[1] = 0x15; /* bass */
- alc_automute_amp(codec);
}
/*
@@ -1643,16 +1763,15 @@ static struct snd_kcontrol_new alc888_base_mixer[] = {
{ } /* end */
};
-static void alc888_acer_aspire_4930g_init_hook(struct hda_codec *codec)
+static void alc888_acer_aspire_4930g_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x15;
spec->autocfg.speaker_pins[0] = 0x14;
- alc_automute_amp(codec);
}
-static void alc888_acer_aspire_6530g_init_hook(struct hda_codec *codec)
+static void alc888_acer_aspire_6530g_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -1660,10 +1779,9 @@ static void alc888_acer_aspire_6530g_init_hook(struct hda_codec *codec)
spec->autocfg.speaker_pins[0] = 0x14;
spec->autocfg.speaker_pins[1] = 0x16;
spec->autocfg.speaker_pins[2] = 0x17;
- alc_automute_amp(codec);
}
-static void alc889_acer_aspire_8930g_init_hook(struct hda_codec *codec)
+static void alc889_acer_aspire_8930g_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -1671,7 +1789,6 @@ static void alc889_acer_aspire_8930g_init_hook(struct hda_codec *codec)
spec->autocfg.speaker_pins[0] = 0x14;
spec->autocfg.speaker_pins[1] = 0x16;
spec->autocfg.speaker_pins[2] = 0x1b;
- alc_automute_amp(codec);
}
/*
@@ -2651,13 +2768,17 @@ static void alc880_uniwill_mic_automute(struct hda_codec *codec)
snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1, HDA_AMP_MUTE, bits);
}
-static void alc880_uniwill_init_hook(struct hda_codec *codec)
+static void alc880_uniwill_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x14;
spec->autocfg.speaker_pins[0] = 0x15;
spec->autocfg.speaker_pins[0] = 0x16;
+}
+
+static void alc880_uniwill_init_hook(struct hda_codec *codec)
+{
alc_automute_amp(codec);
alc880_uniwill_mic_automute(codec);
}
@@ -2678,13 +2799,12 @@ static void alc880_uniwill_unsol_event(struct hda_codec *codec,
}
}
-static void alc880_uniwill_p53_init_hook(struct hda_codec *codec)
+static void alc880_uniwill_p53_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x14;
spec->autocfg.speaker_pins[0] = 0x15;
- alc_automute_amp(codec);
}
static void alc880_uniwill_p53_dcvol_automute(struct hda_codec *codec)
@@ -2947,13 +3067,12 @@ static struct hda_verb alc880_lg_init_verbs[] = {
};
/* toggle speaker-output according to the hp-jack state */
-static void alc880_lg_init_hook(struct hda_codec *codec)
+static void alc880_lg_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x1b;
spec->autocfg.speaker_pins[0] = 0x17;
- alc_automute_amp(codec);
}
/*
@@ -3032,13 +3151,12 @@ static struct hda_verb alc880_lg_lw_init_verbs[] = {
};
/* toggle speaker-output according to the hp-jack state */
-static void alc880_lg_lw_init_hook(struct hda_codec *codec)
+static void alc880_lg_lw_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x1b;
spec->autocfg.speaker_pins[0] = 0x14;
- alc_automute_amp(codec);
}
static struct snd_kcontrol_new alc880_medion_rim_mixer[] = {
@@ -3104,13 +3222,12 @@ static void alc880_medion_rim_unsol_event(struct hda_codec *codec,
alc880_medion_rim_automute(codec);
}
-static void alc880_medion_rim_init_hook(struct hda_codec *codec)
+static void alc880_medion_rim_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x14;
spec->autocfg.speaker_pins[0] = 0x1b;
- alc880_medion_rim_automute(codec);
}
#ifdef CONFIG_SND_HDA_POWER_SAVE
@@ -3977,7 +4094,8 @@ static struct alc_config_preset alc880_presets[] = {
.channel_mode = alc880_2_jack_modes,
.input_mux = &alc880_f1734_capture_source,
.unsol_event = alc880_uniwill_p53_unsol_event,
- .init_hook = alc880_uniwill_p53_init_hook,
+ .setup = alc880_uniwill_p53_setup,
+ .init_hook = alc_automute_amp,
},
[ALC880_ASUS] = {
.mixers = { alc880_asus_mixer },
@@ -4054,6 +4172,7 @@ static struct alc_config_preset alc880_presets[] = {
.need_dac_fix = 1,
.input_mux = &alc880_capture_source,
.unsol_event = alc880_uniwill_unsol_event,
+ .setup = alc880_uniwill_setup,
.init_hook = alc880_uniwill_init_hook,
},
[ALC880_UNIWILL_P53] = {
@@ -4066,7 +4185,8 @@ static struct alc_config_preset alc880_presets[] = {
.channel_mode = alc880_threestack_modes,
.input_mux = &alc880_capture_source,
.unsol_event = alc880_uniwill_p53_unsol_event,
- .init_hook = alc880_uniwill_p53_init_hook,
+ .setup = alc880_uniwill_p53_setup,
+ .init_hook = alc_automute_amp,
},
[ALC880_FUJITSU] = {
.mixers = { alc880_fujitsu_mixer },
@@ -4080,7 +4200,8 @@ static struct alc_config_preset alc880_presets[] = {
.channel_mode = alc880_2_jack_modes,
.input_mux = &alc880_capture_source,
.unsol_event = alc880_uniwill_p53_unsol_event,
- .init_hook = alc880_uniwill_p53_init_hook,
+ .setup = alc880_uniwill_p53_setup,
+ .init_hook = alc_automute_amp,
},
[ALC880_CLEVO] = {
.mixers = { alc880_three_stack_mixer },
@@ -4106,7 +4227,8 @@ static struct alc_config_preset alc880_presets[] = {
.need_dac_fix = 1,
.input_mux = &alc880_lg_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc880_lg_init_hook,
+ .setup = alc880_lg_setup,
+ .init_hook = alc_automute_amp,
#ifdef CONFIG_SND_HDA_POWER_SAVE
.loopbacks = alc880_lg_loopbacks,
#endif
@@ -4122,7 +4244,8 @@ static struct alc_config_preset alc880_presets[] = {
.channel_mode = alc880_lg_lw_modes,
.input_mux = &alc880_lg_lw_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc880_lg_lw_init_hook,
+ .setup = alc880_lg_lw_setup,
+ .init_hook = alc_automute_amp,
},
[ALC880_MEDION_RIM] = {
.mixers = { alc880_medion_rim_mixer },
@@ -4136,7 +4259,8 @@ static struct alc_config_preset alc880_presets[] = {
.channel_mode = alc880_2_jack_modes,
.input_mux = &alc880_medion_rim_capture_source,
.unsol_event = alc880_medion_rim_unsol_event,
- .init_hook = alc880_medion_rim_init_hook,
+ .setup = alc880_medion_rim_setup,
+ .init_hook = alc880_medion_rim_automute,
},
#ifdef CONFIG_SND_DEBUG
[ALC880_TEST] = {
@@ -4189,8 +4313,6 @@ static int add_control(struct alc_spec *spec, int type, const char *name,
#define alc880_fixed_pin_idx(nid) ((nid) - 0x14)
#define alc880_is_multi_pin(nid) ((nid) >= 0x18)
#define alc880_multi_pin_idx(nid) ((nid) - 0x18)
-#define alc880_is_input_pin(nid) ((nid) >= 0x18)
-#define alc880_input_pin_idx(nid) ((nid) - 0x18)
#define alc880_idx_to_dac(nid) ((nid) + 0x02)
#define alc880_dac_to_idx(nid) ((nid) - 0x02)
#define alc880_idx_to_mixer(nid) ((nid) + 0x0c)
@@ -4278,13 +4400,19 @@ static int alc880_auto_create_multi_out_ctls(struct alc_spec *spec,
if (err < 0)
return err;
} else {
- sprintf(name, "%s Playback Volume", chname[i]);
+ const char *pfx;
+ if (cfg->line_outs == 1 &&
+ cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
+ pfx = "Speaker";
+ else
+ pfx = chname[i];
+ sprintf(name, "%s Playback Volume", pfx);
err = add_control(spec, ALC_CTL_WIDGET_VOL, name,
HDA_COMPOSE_AMP_VAL(nid, 3, 0,
HDA_OUTPUT));
if (err < 0)
return err;
- sprintf(name, "%s Playback Switch", chname[i]);
+ sprintf(name, "%s Playback Switch", pfx);
err = add_control(spec, ALC_CTL_BIND_MUTE, name,
HDA_COMPOSE_AMP_VAL(nid, 3, 2,
HDA_INPUT));
@@ -4358,31 +4486,61 @@ static int new_analog_input(struct alc_spec *spec, hda_nid_t pin,
return 0;
}
+static int alc_is_input_pin(struct hda_codec *codec, hda_nid_t nid)
+{
+ unsigned int pincap = snd_hda_query_pin_caps(codec, nid);
+ return (pincap & AC_PINCAP_IN) != 0;
+}
+
/* create playback/capture controls for input pins */
-static int alc880_auto_create_analog_input_ctls(struct alc_spec *spec,
- const struct auto_pin_cfg *cfg)
+static int alc_auto_create_input_ctls(struct hda_codec *codec,
+ const struct auto_pin_cfg *cfg,
+ hda_nid_t mixer,
+ hda_nid_t cap1, hda_nid_t cap2)
{
+ struct alc_spec *spec = codec->spec;
struct hda_input_mux *imux = &spec->private_imux[0];
int i, err, idx;
for (i = 0; i < AUTO_PIN_LAST; i++) {
- if (alc880_is_input_pin(cfg->input_pins[i])) {
- idx = alc880_input_pin_idx(cfg->input_pins[i]);
- err = new_analog_input(spec, cfg->input_pins[i],
- auto_pin_cfg_labels[i],
- idx, 0x0b);
- if (err < 0)
- return err;
+ hda_nid_t pin;
+
+ pin = cfg->input_pins[i];
+ if (!alc_is_input_pin(codec, pin))
+ continue;
+
+ if (mixer) {
+ idx = get_connection_index(codec, mixer, pin);
+ if (idx >= 0) {
+ err = new_analog_input(spec, pin,
+ auto_pin_cfg_labels[i],
+ idx, mixer);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ if (!cap1)
+ continue;
+ idx = get_connection_index(codec, cap1, pin);
+ if (idx < 0 && cap2)
+ idx = get_connection_index(codec, cap2, pin);
+ if (idx >= 0) {
imux->items[imux->num_items].label =
auto_pin_cfg_labels[i];
- imux->items[imux->num_items].index =
- alc880_input_pin_idx(cfg->input_pins[i]);
+ imux->items[imux->num_items].index = idx;
imux->num_items++;
}
}
return 0;
}
+static int alc880_auto_create_input_ctls(struct hda_codec *codec,
+ const struct auto_pin_cfg *cfg)
+{
+ return alc_auto_create_input_ctls(codec, cfg, 0x0b, 0x08, 0x09);
+}
+
static void alc_set_pin_output(struct hda_codec *codec, hda_nid_t nid,
unsigned int pin_type)
{
@@ -4448,7 +4606,7 @@ static void alc880_auto_init_analog_input(struct hda_codec *codec)
for (i = 0; i < AUTO_PIN_LAST; i++) {
hda_nid_t nid = spec->autocfg.input_pins[i];
- if (alc880_is_input_pin(nid)) {
+ if (alc_is_input_pin(codec, nid)) {
alc_set_input_pin(codec, nid, i);
if (nid != ALC880_PIN_CD_NID &&
(get_wcaps(codec, nid) & AC_WCAP_OUT_AMP))
@@ -4491,7 +4649,7 @@ static int alc880_parse_auto_config(struct hda_codec *codec)
"Headphone");
if (err < 0)
return err;
- err = alc880_auto_create_analog_input_ctls(spec, &spec->autocfg);
+ err = alc880_auto_create_input_ctls(codec, &spec->autocfg);
if (err < 0)
return err;
@@ -4505,12 +4663,6 @@ static int alc880_parse_auto_config(struct hda_codec *codec)
&dig_nid, 1);
if (err < 0)
continue;
- if (dig_nid > 0x7f) {
- printk(KERN_ERR "alc880_auto: invalid dig_nid "
- "connection 0x%x for NID 0x%x\n", dig_nid,
- spec->autocfg.dig_out_pins[i]);
- continue;
- }
if (!i)
spec->multiout.dig_out_nid = dig_nid;
else {
@@ -4547,8 +4699,42 @@ static void alc880_auto_init(struct hda_codec *codec)
alc_inithook(codec);
}
-static void set_capture_mixer(struct alc_spec *spec)
+/* check the ADC/MUX contains all input pins; some ADC/MUX contains only
+ * one of two digital mic pins, e.g. on ALC272
+ */
+static void fixup_automic_adc(struct hda_codec *codec)
{
+ struct alc_spec *spec = codec->spec;
+ int i;
+
+ for (i = 0; i < spec->num_adc_nids; i++) {
+ hda_nid_t cap = spec->capsrc_nids ?
+ spec->capsrc_nids[i] : spec->adc_nids[i];
+ int iidx, eidx;
+
+ iidx = get_connection_index(codec, cap, spec->int_mic.pin);
+ if (iidx < 0)
+ continue;
+ eidx = get_connection_index(codec, cap, spec->ext_mic.pin);
+ if (eidx < 0)
+ continue;
+ spec->int_mic.mux_idx = iidx;
+ spec->ext_mic.mux_idx = eidx;
+ if (spec->capsrc_nids)
+ spec->capsrc_nids += i;
+ spec->adc_nids += i;
+ spec->num_adc_nids = 1;
+ return;
+ }
+ snd_printd(KERN_INFO "hda_codec: %s: "
+ "No ADC/MUX containing both 0x%x and 0x%x pins\n",
+ codec->chip_name, spec->int_mic.pin, spec->ext_mic.pin);
+ spec->auto_mic = 0; /* disable auto-mic to be sure */
+}
+
+static void set_capture_mixer(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
static struct snd_kcontrol_new *caps[2][3] = {
{ alc_capture_mixer_nosrc1,
alc_capture_mixer_nosrc2,
@@ -4559,7 +4745,10 @@ static void set_capture_mixer(struct alc_spec *spec)
};
if (spec->num_adc_nids > 0 && spec->num_adc_nids <= 3) {
int mux;
- if (spec->input_mux && spec->input_mux->num_items > 1)
+ if (spec->auto_mic) {
+ mux = 0;
+ fixup_automic_adc(codec);
+ } else if (spec->input_mux && spec->input_mux->num_items > 1)
mux = 1;
else
mux = 0;
@@ -4590,8 +4779,8 @@ static int patch_alc880(struct hda_codec *codec)
alc880_models,
alc880_cfg_tbl);
if (board_config < 0) {
- printk(KERN_INFO "hda_codec: Unknown model for %s, "
- "trying auto-probe from BIOS...\n", codec->chip_name);
+ printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
board_config = ALC880_AUTO;
}
@@ -4616,7 +4805,7 @@ static int patch_alc880(struct hda_codec *codec)
}
if (board_config != ALC880_AUTO)
- setup_preset(spec, &alc880_presets[board_config]);
+ setup_preset(codec, &alc880_presets[board_config]);
spec->stream_analog_playback = &alc880_pcm_analog_playback;
spec->stream_analog_capture = &alc880_pcm_analog_capture;
@@ -4629,7 +4818,7 @@ static int patch_alc880(struct hda_codec *codec)
/* check whether NID 0x07 is valid */
unsigned int wcap = get_wcaps(codec, alc880_adc_nids[0]);
/* get type */
- wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+ wcap = get_wcaps_type(wcap);
if (wcap != AC_WID_AUD_IN) {
spec->adc_nids = alc880_adc_nids_alt;
spec->num_adc_nids = ARRAY_SIZE(alc880_adc_nids_alt);
@@ -4638,7 +4827,7 @@ static int patch_alc880(struct hda_codec *codec)
spec->num_adc_nids = ARRAY_SIZE(alc880_adc_nids);
}
}
- set_capture_mixer(spec);
+ set_capture_mixer(codec);
set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
spec->vmaster_nid = 0x0c;
@@ -5830,7 +6019,14 @@ static int alc260_auto_create_multi_out_ctls(struct alc_spec *spec,
nid = cfg->line_out_pins[0];
if (nid) {
- err = alc260_add_playback_controls(spec, nid, "Front", &vols);
+ const char *pfx;
+ if (!cfg->speaker_pins[0] && !cfg->hp_pins[0])
+ pfx = "Master";
+ else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
+ pfx = "Speaker";
+ else
+ pfx = "Front";
+ err = alc260_add_playback_controls(spec, nid, pfx, &vols);
if (err < 0)
return err;
}
@@ -5853,39 +6049,10 @@ static int alc260_auto_create_multi_out_ctls(struct alc_spec *spec,
}
/* create playback/capture controls for input pins */
-static int alc260_auto_create_analog_input_ctls(struct alc_spec *spec,
+static int alc260_auto_create_input_ctls(struct hda_codec *codec,
const struct auto_pin_cfg *cfg)
{
- struct hda_input_mux *imux = &spec->private_imux[0];
- int i, err, idx;
-
- for (i = 0; i < AUTO_PIN_LAST; i++) {
- if (cfg->input_pins[i] >= 0x12) {
- idx = cfg->input_pins[i] - 0x12;
- err = new_analog_input(spec, cfg->input_pins[i],
- auto_pin_cfg_labels[i], idx,
- 0x07);
- if (err < 0)
- return err;
- imux->items[imux->num_items].label =
- auto_pin_cfg_labels[i];
- imux->items[imux->num_items].index = idx;
- imux->num_items++;
- }
- if (cfg->input_pins[i] >= 0x0f && cfg->input_pins[i] <= 0x10){
- idx = cfg->input_pins[i] - 0x09;
- err = new_analog_input(spec, cfg->input_pins[i],
- auto_pin_cfg_labels[i], idx,
- 0x07);
- if (err < 0)
- return err;
- imux->items[imux->num_items].label =
- auto_pin_cfg_labels[i];
- imux->items[imux->num_items].index = idx;
- imux->num_items++;
- }
- }
- return 0;
+ return alc_auto_create_input_ctls(codec, cfg, 0x07, 0x04, 0x05);
}
static void alc260_auto_set_output_and_unmute(struct hda_codec *codec,
@@ -5999,7 +6166,7 @@ static int alc260_parse_auto_config(struct hda_codec *codec)
return err;
if (!spec->kctls.list)
return 0; /* can't find valid BIOS pin config */
- err = alc260_auto_create_analog_input_ctls(spec, &spec->autocfg);
+ err = alc260_auto_create_input_ctls(codec, &spec->autocfg);
if (err < 0)
return err;
@@ -6234,8 +6401,7 @@ static int patch_alc260(struct hda_codec *codec)
alc260_models,
alc260_cfg_tbl);
if (board_config < 0) {
- snd_printd(KERN_INFO "hda_codec: Unknown model for %s, "
- "trying auto-probe from BIOS...\n",
+ snd_printd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
codec->chip_name);
board_config = ALC260_AUTO;
}
@@ -6261,7 +6427,7 @@ static int patch_alc260(struct hda_codec *codec)
}
if (board_config != ALC260_AUTO)
- setup_preset(spec, &alc260_presets[board_config]);
+ setup_preset(codec, &alc260_presets[board_config]);
spec->stream_analog_playback = &alc260_pcm_analog_playback;
spec->stream_analog_capture = &alc260_pcm_analog_capture;
@@ -6272,7 +6438,7 @@ static int patch_alc260(struct hda_codec *codec)
if (!spec->adc_nids && spec->input_mux) {
/* check whether NID 0x04 is valid */
unsigned int wcap = get_wcaps(codec, 0x04);
- wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+ wcap = get_wcaps_type(wcap);
/* get type */
if (wcap != AC_WID_AUD_IN || spec->input_mux->num_items == 1) {
spec->adc_nids = alc260_adc_nids_alt;
@@ -6282,7 +6448,7 @@ static int patch_alc260(struct hda_codec *codec)
spec->num_adc_nids = ARRAY_SIZE(alc260_adc_nids);
}
}
- set_capture_mixer(spec);
+ set_capture_mixer(codec);
set_beep_amp(spec, 0x07, 0x05, HDA_INPUT);
spec->vmaster_nid = 0x08;
@@ -6301,7 +6467,7 @@ static int patch_alc260(struct hda_codec *codec)
/*
- * ALC882 support
+ * ALC882/883/885/888/889 support
*
* ALC882 is almost identical with ALC880 but has cleaner and more flexible
* configuration. Each pin widget can choose any input DACs and a mixer.
@@ -6313,22 +6479,35 @@ static int patch_alc260(struct hda_codec *codec)
*/
#define ALC882_DIGOUT_NID 0x06
#define ALC882_DIGIN_NID 0x0a
+#define ALC883_DIGOUT_NID ALC882_DIGOUT_NID
+#define ALC883_DIGIN_NID ALC882_DIGIN_NID
+#define ALC1200_DIGOUT_NID 0x10
+
static struct hda_channel_mode alc882_ch_modes[1] = {
{ 8, NULL }
};
+/* DACs */
static hda_nid_t alc882_dac_nids[4] = {
/* front, rear, clfe, rear_surr */
0x02, 0x03, 0x04, 0x05
};
+#define alc883_dac_nids alc882_dac_nids
-/* identical with ALC880 */
+/* ADCs */
#define alc882_adc_nids alc880_adc_nids
#define alc882_adc_nids_alt alc880_adc_nids_alt
+#define alc883_adc_nids alc882_adc_nids_alt
+static hda_nid_t alc883_adc_nids_alt[1] = { 0x08 };
+static hda_nid_t alc883_adc_nids_rev[2] = { 0x09, 0x08 };
+#define alc889_adc_nids alc880_adc_nids
static hda_nid_t alc882_capsrc_nids[3] = { 0x24, 0x23, 0x22 };
static hda_nid_t alc882_capsrc_nids_alt[2] = { 0x23, 0x22 };
+#define alc883_capsrc_nids alc882_capsrc_nids_alt
+static hda_nid_t alc883_capsrc_nids_rev[2] = { 0x22, 0x23 };
+#define alc889_capsrc_nids alc882_capsrc_nids
/* input MUX */
/* FIXME: should be a matrix-type input source selection */
@@ -6343,6 +6522,17 @@ static struct hda_input_mux alc882_capture_source = {
},
};
+#define alc883_capture_source alc882_capture_source
+
+static struct hda_input_mux alc889_capture_source = {
+ .num_items = 3,
+ .items = {
+ { "Front Mic", 0x0 },
+ { "Mic", 0x3 },
+ { "Line", 0x2 },
+ },
+};
+
static struct hda_input_mux mb5_capture_source = {
.num_items = 3,
.items = {
@@ -6352,6 +6542,77 @@ static struct hda_input_mux mb5_capture_source = {
},
};
+static struct hda_input_mux alc883_3stack_6ch_intel = {
+ .num_items = 4,
+ .items = {
+ { "Mic", 0x1 },
+ { "Front Mic", 0x0 },
+ { "Line", 0x2 },
+ { "CD", 0x4 },
+ },
+};
+
+static struct hda_input_mux alc883_lenovo_101e_capture_source = {
+ .num_items = 2,
+ .items = {
+ { "Mic", 0x1 },
+ { "Line", 0x2 },
+ },
+};
+
+static struct hda_input_mux alc883_lenovo_nb0763_capture_source = {
+ .num_items = 4,
+ .items = {
+ { "Mic", 0x0 },
+ { "iMic", 0x1 },
+ { "Line", 0x2 },
+ { "CD", 0x4 },
+ },
+};
+
+static struct hda_input_mux alc883_fujitsu_pi2515_capture_source = {
+ .num_items = 2,
+ .items = {
+ { "Mic", 0x0 },
+ { "Int Mic", 0x1 },
+ },
+};
+
+static struct hda_input_mux alc883_lenovo_sky_capture_source = {
+ .num_items = 3,
+ .items = {
+ { "Mic", 0x0 },
+ { "Front Mic", 0x1 },
+ { "Line", 0x4 },
+ },
+};
+
+static struct hda_input_mux alc883_asus_eee1601_capture_source = {
+ .num_items = 2,
+ .items = {
+ { "Mic", 0x0 },
+ { "Line", 0x2 },
+ },
+};
+
+static struct hda_input_mux alc889A_mb31_capture_source = {
+ .num_items = 2,
+ .items = {
+ { "Mic", 0x0 },
+ /* Front Mic (0x01) unused */
+ { "Line", 0x2 },
+ /* Line 2 (0x03) unused */
+ /* CD (0x04) unsused? */
+ },
+};
+
+/*
+ * 2ch mode
+ */
+static struct hda_channel_mode alc883_3ST_2ch_modes[1] = {
+ { 2, NULL }
+};
+
/*
* 2ch mode
*/
@@ -6364,6 +6625,18 @@ static struct hda_verb alc882_3ST_ch2_init[] = {
};
/*
+ * 4ch mode
+ */
+static struct hda_verb alc882_3ST_ch4_init[] = {
+ { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
+ { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { } /* end */
+};
+
+/*
* 6ch mode
*/
static struct hda_verb alc882_3ST_ch6_init[] = {
@@ -6376,11 +6649,60 @@ static struct hda_verb alc882_3ST_ch6_init[] = {
{ } /* end */
};
-static struct hda_channel_mode alc882_3ST_6ch_modes[2] = {
+static struct hda_channel_mode alc882_3ST_6ch_modes[3] = {
{ 2, alc882_3ST_ch2_init },
+ { 4, alc882_3ST_ch4_init },
{ 6, alc882_3ST_ch6_init },
};
+#define alc883_3ST_6ch_modes alc882_3ST_6ch_modes
+
+/*
+ * 2ch mode
+ */
+static struct hda_verb alc883_3ST_ch2_clevo_init[] = {
+ { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP },
+ { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
+ { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { } /* end */
+};
+
+/*
+ * 4ch mode
+ */
+static struct hda_verb alc883_3ST_ch4_clevo_init[] = {
+ { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
+ { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { } /* end */
+};
+
+/*
+ * 6ch mode
+ */
+static struct hda_verb alc883_3ST_ch6_clevo_init[] = {
+ { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { } /* end */
+};
+
+static struct hda_channel_mode alc883_3ST_6ch_clevo_modes[3] = {
+ { 2, alc883_3ST_ch2_clevo_init },
+ { 4, alc883_3ST_ch4_clevo_init },
+ { 6, alc883_3ST_ch6_clevo_init },
+};
+
+
/*
* 6ch mode
*/
@@ -6468,6 +6790,189 @@ static struct hda_channel_mode alc885_mb5_6ch_modes[2] = {
{ 6, alc885_mb5_ch6_init },
};
+
+/*
+ * 2ch mode
+ */
+static struct hda_verb alc883_4ST_ch2_init[] = {
+ { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
+ { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { } /* end */
+};
+
+/*
+ * 4ch mode
+ */
+static struct hda_verb alc883_4ST_ch4_init[] = {
+ { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
+ { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { } /* end */
+};
+
+/*
+ * 6ch mode
+ */
+static struct hda_verb alc883_4ST_ch6_init[] = {
+ { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { } /* end */
+};
+
+/*
+ * 8ch mode
+ */
+static struct hda_verb alc883_4ST_ch8_init[] = {
+ { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x17, AC_VERB_SET_CONNECT_SEL, 0x03 },
+ { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { } /* end */
+};
+
+static struct hda_channel_mode alc883_4ST_8ch_modes[4] = {
+ { 2, alc883_4ST_ch2_init },
+ { 4, alc883_4ST_ch4_init },
+ { 6, alc883_4ST_ch6_init },
+ { 8, alc883_4ST_ch8_init },
+};
+
+
+/*
+ * 2ch mode
+ */
+static struct hda_verb alc883_3ST_ch2_intel_init[] = {
+ { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
+ { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { } /* end */
+};
+
+/*
+ * 4ch mode
+ */
+static struct hda_verb alc883_3ST_ch4_intel_init[] = {
+ { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
+ { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { } /* end */
+};
+
+/*
+ * 6ch mode
+ */
+static struct hda_verb alc883_3ST_ch6_intel_init[] = {
+ { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x19, AC_VERB_SET_CONNECT_SEL, 0x02 },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { } /* end */
+};
+
+static struct hda_channel_mode alc883_3ST_6ch_intel_modes[3] = {
+ { 2, alc883_3ST_ch2_intel_init },
+ { 4, alc883_3ST_ch4_intel_init },
+ { 6, alc883_3ST_ch6_intel_init },
+};
+
+/*
+ * 2ch mode
+ */
+static struct hda_verb alc889_ch2_intel_init[] = {
+ { 0x14, AC_VERB_SET_CONNECT_SEL, 0x00 },
+ { 0x19, AC_VERB_SET_CONNECT_SEL, 0x00 },
+ { 0x16, AC_VERB_SET_CONNECT_SEL, 0x00 },
+ { 0x17, AC_VERB_SET_CONNECT_SEL, 0x00 },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { } /* end */
+};
+
+/*
+ * 6ch mode
+ */
+static struct hda_verb alc889_ch6_intel_init[] = {
+ { 0x14, AC_VERB_SET_CONNECT_SEL, 0x00 },
+ { 0x19, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { 0x16, AC_VERB_SET_CONNECT_SEL, 0x02 },
+ { 0x17, AC_VERB_SET_CONNECT_SEL, 0x03 },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
+ { } /* end */
+};
+
+/*
+ * 8ch mode
+ */
+static struct hda_verb alc889_ch8_intel_init[] = {
+ { 0x14, AC_VERB_SET_CONNECT_SEL, 0x00 },
+ { 0x19, AC_VERB_SET_CONNECT_SEL, 0x01 },
+ { 0x16, AC_VERB_SET_CONNECT_SEL, 0x02 },
+ { 0x17, AC_VERB_SET_CONNECT_SEL, 0x03 },
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x03 },
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
+ { } /* end */
+};
+
+static struct hda_channel_mode alc889_8ch_intel_modes[3] = {
+ { 2, alc889_ch2_intel_init },
+ { 6, alc889_ch6_intel_init },
+ { 8, alc889_ch8_intel_init },
+};
+
+/*
+ * 6ch mode
+ */
+static struct hda_verb alc883_sixstack_ch6_init[] = {
+ { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x00 },
+ { 0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { } /* end */
+};
+
+/*
+ * 8ch mode
+ */
+static struct hda_verb alc883_sixstack_ch8_init[] = {
+ { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { 0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
+ { } /* end */
+};
+
+static struct hda_channel_mode alc883_sixstack_modes[2] = {
+ { 6, alc883_sixstack_ch6_init },
+ { 8, alc883_sixstack_ch8_init },
+};
+
+
/* Pin assignment: Front=0x14, Rear=0x15, CLFE=0x16, Side=0x17
* Mic=0x18, Front Mic=0x19, Line-In=0x1a, HP=0x1b
*/
@@ -6604,7 +7109,7 @@ static struct snd_kcontrol_new alc882_chmode_mixer[] = {
{ } /* end */
};
-static struct hda_verb alc882_init_verbs[] = {
+static struct hda_verb alc882_base_init_verbs[] = {
/* Front mixer: unmute input/output amp left and right (volume = 0) */
{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
@@ -6622,6 +7127,13 @@ static struct hda_verb alc882_init_verbs[] = {
{0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ /* mute analog input loopbacks */
+ {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
+ {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
+ {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
+
/* Front Pin: output 0 (0x0c) */
{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
@@ -6656,11 +7168,6 @@ static struct hda_verb alc882_init_verbs[] = {
/* FIXME: use matrix-type input source selection */
/* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */
- /* Input mixer1: unmute Mic, F-Mic, Line, CD inputs */
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
/* Input mixer2 */
{0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
{0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
@@ -6671,9 +7178,6 @@ static struct hda_verb alc882_init_verbs[] = {
{0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
{0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
{0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- /* ADC1: mute amp left and right */
- {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x07, AC_VERB_SET_CONNECT_SEL, 0x00},
/* ADC2: mute amp left and right */
{0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x08, AC_VERB_SET_CONNECT_SEL, 0x00},
@@ -6684,6 +7188,18 @@ static struct hda_verb alc882_init_verbs[] = {
{ }
};
+static struct hda_verb alc882_adc1_init_verbs[] = {
+ /* Input mixer1: unmute Mic, F-Mic, Line, CD inputs */
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
+ /* ADC1: mute amp left and right */
+ {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x07, AC_VERB_SET_CONNECT_SEL, 0x00},
+ { }
+};
+
static struct hda_verb alc882_eapd_verbs[] = {
/* change to EAPD mode */
{0x20, AC_VERB_SET_COEF_INDEX, 0x07},
@@ -6691,6 +7207,110 @@ static struct hda_verb alc882_eapd_verbs[] = {
{ }
};
+static struct hda_verb alc889_eapd_verbs[] = {
+ {0x14, AC_VERB_SET_EAPD_BTLENABLE, 2},
+ {0x15, AC_VERB_SET_EAPD_BTLENABLE, 2},
+ { }
+};
+
+static struct hda_verb alc_hp15_unsol_verbs[] = {
+ {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC880_HP_EVENT},
+ {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+ {}
+};
+
+static struct hda_verb alc885_init_verbs[] = {
+ /* Front mixer: unmute input/output amp left and right (volume = 0) */
+ {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
+ {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ /* Rear mixer */
+ {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
+ {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ /* CLFE mixer */
+ {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
+ {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ /* Side mixer */
+ {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
+ {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+
+ /* mute analog input loopbacks */
+ {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
+ {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
+
+ /* Front HP Pin: output 0 (0x0c) */
+ {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+ {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
+ /* Front Pin: output 0 (0x0c) */
+ {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ {0x14, AC_VERB_SET_CONNECT_SEL, 0x00},
+ /* Rear Pin: output 1 (0x0d) */
+ {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ {0x19, AC_VERB_SET_CONNECT_SEL, 0x01},
+ /* CLFE Pin: output 2 (0x0e) */
+ {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ {0x16, AC_VERB_SET_CONNECT_SEL, 0x02},
+ /* Side Pin: output 3 (0x0f) */
+ {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ {0x17, AC_VERB_SET_CONNECT_SEL, 0x03},
+ /* Mic (rear) pin: input vref at 80% */
+ {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+ {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+ /* Front Mic pin: input vref at 80% */
+ {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+ {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+ /* Line In pin: input */
+ {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+ {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+
+ /* Mixer elements: 0x18, , 0x1a, 0x1b */
+ /* Input mixer1 */
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3)},
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
+ /* Input mixer2 */
+ {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
+ {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
+ {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
+ /* Input mixer3 */
+ {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)},
+ {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
+ /* ADC2: mute amp left and right */
+ {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ /* ADC3: mute amp left and right */
+ {0x09, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+
+ { }
+};
+
+static struct hda_verb alc885_init_input_verbs[] = {
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)},
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3)},
+ { }
+};
+
+
+/* Unmute Selector 24h and set the default input to front mic */
+static struct hda_verb alc889_init_input_verbs[] = {
+ {0x24, AC_VERB_SET_CONNECT_SEL, 0x00},
+ {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ { }
+};
+
+
+#define alc883_init_verbs alc882_base_init_verbs
+
/* Mac Pro test */
static struct snd_kcontrol_new alc882_macpro_mixer[] = {
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
@@ -6898,23 +7518,21 @@ static struct hda_verb alc885_imac24_init_verbs[] = {
};
/* Toggle speaker-output according to the hp-jack state */
-static void alc885_imac24_automute_init_hook(struct hda_codec *codec)
+static void alc885_imac24_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x14;
spec->autocfg.speaker_pins[0] = 0x18;
spec->autocfg.speaker_pins[1] = 0x1a;
- alc_automute_amp(codec);
}
-static void alc885_mbp3_init_hook(struct hda_codec *codec)
+static void alc885_mbp3_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x15;
spec->autocfg.speaker_pins[0] = 0x14;
- alc_automute_amp(codec);
}
@@ -6942,13 +7560,12 @@ static void alc882_targa_automute(struct hda_codec *codec)
spec->jack_present ? 1 : 3);
}
-static void alc882_targa_init_hook(struct hda_codec *codec)
+static void alc882_targa_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x14;
spec->autocfg.speaker_pins[0] = 0x1b;
- alc882_targa_automute(codec);
}
static void alc882_targa_unsol_event(struct hda_codec *codec, unsigned int res)
@@ -7036,18 +7653,16 @@ static void alc885_macpro_init_hook(struct hda_codec *codec)
static void alc885_imac24_init_hook(struct hda_codec *codec)
{
alc885_macpro_init_hook(codec);
- alc885_imac24_automute_init_hook(codec);
+ alc_automute_amp(codec);
}
/*
* generic initialization of ADC, input mixers and output mixers
*/
-static struct hda_verb alc882_auto_init_verbs[] = {
+static struct hda_verb alc883_auto_init_verbs[] = {
/*
* Unmute ADC0-2 and set the default input to mic-in
*/
- {0x07, AC_VERB_SET_CONNECT_SEL, 0x00},
- {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
{0x08, AC_VERB_SET_CONNECT_SEL, 0x00},
{0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
{0x09, AC_VERB_SET_CONNECT_SEL, 0x00},
@@ -7088,11 +7703,6 @@ static struct hda_verb alc882_auto_init_verbs[] = {
/* FIXME: use matrix-type input source selection */
/* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */
- /* Input mixer1: unmute Mic, F-Mic, Line, CD inputs */
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x00 << 8))},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x03 << 8))},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x02 << 8))},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x04 << 8))},
/* Input mixer2 */
{0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x00 << 8))},
{0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x03 << 8))},
@@ -7107,821 +7717,6 @@ static struct hda_verb alc882_auto_init_verbs[] = {
{ }
};
-#ifdef CONFIG_SND_HDA_POWER_SAVE
-#define alc882_loopbacks alc880_loopbacks
-#endif
-
-/* pcm configuration: identical with ALC880 */
-#define alc882_pcm_analog_playback alc880_pcm_analog_playback
-#define alc882_pcm_analog_capture alc880_pcm_analog_capture
-#define alc882_pcm_digital_playback alc880_pcm_digital_playback
-#define alc882_pcm_digital_capture alc880_pcm_digital_capture
-
-/*
- * configuration and preset
- */
-static const char *alc882_models[ALC882_MODEL_LAST] = {
- [ALC882_3ST_DIG] = "3stack-dig",
- [ALC882_6ST_DIG] = "6stack-dig",
- [ALC882_ARIMA] = "arima",
- [ALC882_W2JC] = "w2jc",
- [ALC882_TARGA] = "targa",
- [ALC882_ASUS_A7J] = "asus-a7j",
- [ALC882_ASUS_A7M] = "asus-a7m",
- [ALC885_MACPRO] = "macpro",
- [ALC885_MB5] = "mb5",
- [ALC885_MBP3] = "mbp3",
- [ALC885_IMAC24] = "imac24",
- [ALC882_AUTO] = "auto",
-};
-
-static struct snd_pci_quirk alc882_cfg_tbl[] = {
- SND_PCI_QUIRK(0x1019, 0x6668, "ECS", ALC882_6ST_DIG),
- SND_PCI_QUIRK(0x1043, 0x060d, "Asus A7J", ALC882_ASUS_A7J),
- SND_PCI_QUIRK(0x1043, 0x1243, "Asus A7J", ALC882_ASUS_A7J),
- SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_ASUS_A7M),
- SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_W2JC),
- SND_PCI_QUIRK(0x1043, 0x817f, "Asus P5LD2", ALC882_6ST_DIG),
- SND_PCI_QUIRK(0x1043, 0x81d8, "Asus P5WD", ALC882_6ST_DIG),
- SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC882_6ST_DIG),
- SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG),
- SND_PCI_QUIRK(0x1462, 0x28fb, "Targa T8", ALC882_TARGA), /* MSI-1049 T8 */
- SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC882_6ST_DIG),
- SND_PCI_QUIRK(0x161f, 0x2054, "Arima W820", ALC882_ARIMA),
- {}
-};
-
-static struct alc_config_preset alc882_presets[] = {
- [ALC882_3ST_DIG] = {
- .mixers = { alc882_base_mixer },
- .init_verbs = { alc882_init_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .dig_in_nid = ALC882_DIGIN_NID,
- .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
- .channel_mode = alc882_ch_modes,
- .need_dac_fix = 1,
- .input_mux = &alc882_capture_source,
- },
- [ALC882_6ST_DIG] = {
- .mixers = { alc882_base_mixer, alc882_chmode_mixer },
- .init_verbs = { alc882_init_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .dig_in_nid = ALC882_DIGIN_NID,
- .num_channel_mode = ARRAY_SIZE(alc882_sixstack_modes),
- .channel_mode = alc882_sixstack_modes,
- .input_mux = &alc882_capture_source,
- },
- [ALC882_ARIMA] = {
- .mixers = { alc882_base_mixer, alc882_chmode_mixer },
- .init_verbs = { alc882_init_verbs, alc882_eapd_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .num_channel_mode = ARRAY_SIZE(alc882_sixstack_modes),
- .channel_mode = alc882_sixstack_modes,
- .input_mux = &alc882_capture_source,
- },
- [ALC882_W2JC] = {
- .mixers = { alc882_w2jc_mixer, alc882_chmode_mixer },
- .init_verbs = { alc882_init_verbs, alc882_eapd_verbs,
- alc880_gpio1_init_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .num_channel_mode = ARRAY_SIZE(alc880_threestack_modes),
- .channel_mode = alc880_threestack_modes,
- .need_dac_fix = 1,
- .input_mux = &alc882_capture_source,
- .dig_out_nid = ALC882_DIGOUT_NID,
- },
- [ALC885_MBP3] = {
- .mixers = { alc885_mbp3_mixer, alc882_chmode_mixer },
- .init_verbs = { alc885_mbp3_init_verbs,
- alc880_gpio1_init_verbs },
- .num_dacs = 2,
- .dac_nids = alc882_dac_nids,
- .hp_nid = 0x04,
- .channel_mode = alc885_mbp_4ch_modes,
- .num_channel_mode = ARRAY_SIZE(alc885_mbp_4ch_modes),
- .input_mux = &alc882_capture_source,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .dig_in_nid = ALC882_DIGIN_NID,
- .unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc885_mbp3_init_hook,
- },
- [ALC885_MB5] = {
- .mixers = { alc885_mb5_mixer, alc882_chmode_mixer },
- .init_verbs = { alc885_mb5_init_verbs,
- alc880_gpio1_init_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .channel_mode = alc885_mb5_6ch_modes,
- .num_channel_mode = ARRAY_SIZE(alc885_mb5_6ch_modes),
- .input_mux = &mb5_capture_source,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .dig_in_nid = ALC882_DIGIN_NID,
- },
- [ALC885_MACPRO] = {
- .mixers = { alc882_macpro_mixer },
- .init_verbs = { alc882_macpro_init_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .dig_in_nid = ALC882_DIGIN_NID,
- .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
- .channel_mode = alc882_ch_modes,
- .input_mux = &alc882_capture_source,
- .init_hook = alc885_macpro_init_hook,
- },
- [ALC885_IMAC24] = {
- .mixers = { alc885_imac24_mixer },
- .init_verbs = { alc885_imac24_init_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .dig_in_nid = ALC882_DIGIN_NID,
- .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
- .channel_mode = alc882_ch_modes,
- .input_mux = &alc882_capture_source,
- .unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc885_imac24_init_hook,
- },
- [ALC882_TARGA] = {
- .mixers = { alc882_targa_mixer, alc882_chmode_mixer },
- .init_verbs = { alc882_init_verbs, alc880_gpio3_init_verbs,
- alc882_targa_verbs},
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .num_adc_nids = ARRAY_SIZE(alc882_adc_nids),
- .adc_nids = alc882_adc_nids,
- .capsrc_nids = alc882_capsrc_nids,
- .num_channel_mode = ARRAY_SIZE(alc882_3ST_6ch_modes),
- .channel_mode = alc882_3ST_6ch_modes,
- .need_dac_fix = 1,
- .input_mux = &alc882_capture_source,
- .unsol_event = alc882_targa_unsol_event,
- .init_hook = alc882_targa_init_hook,
- },
- [ALC882_ASUS_A7J] = {
- .mixers = { alc882_asus_a7j_mixer, alc882_chmode_mixer },
- .init_verbs = { alc882_init_verbs, alc882_asus_a7j_verbs},
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .num_adc_nids = ARRAY_SIZE(alc882_adc_nids),
- .adc_nids = alc882_adc_nids,
- .capsrc_nids = alc882_capsrc_nids,
- .num_channel_mode = ARRAY_SIZE(alc882_3ST_6ch_modes),
- .channel_mode = alc882_3ST_6ch_modes,
- .need_dac_fix = 1,
- .input_mux = &alc882_capture_source,
- },
- [ALC882_ASUS_A7M] = {
- .mixers = { alc882_asus_a7m_mixer, alc882_chmode_mixer },
- .init_verbs = { alc882_init_verbs, alc882_eapd_verbs,
- alc880_gpio1_init_verbs,
- alc882_asus_a7m_verbs },
- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
- .dac_nids = alc882_dac_nids,
- .dig_out_nid = ALC882_DIGOUT_NID,
- .num_channel_mode = ARRAY_SIZE(alc880_threestack_modes),
- .channel_mode = alc880_threestack_modes,
- .need_dac_fix = 1,
- .input_mux = &alc882_capture_source,
- },
-};
-
-
-/*
- * Pin config fixes
- */
-enum {
- PINFIX_ABIT_AW9D_MAX
-};
-
-static struct alc_pincfg alc882_abit_aw9d_pinfix[] = {
- { 0x15, 0x01080104 }, /* side */
- { 0x16, 0x01011012 }, /* rear */
- { 0x17, 0x01016011 }, /* clfe */
- { }
-};
-
-static const struct alc_pincfg *alc882_pin_fixes[] = {
- [PINFIX_ABIT_AW9D_MAX] = alc882_abit_aw9d_pinfix,
-};
-
-static struct snd_pci_quirk alc882_pinfix_tbl[] = {
- SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX),
- {}
-};
-
-/*
- * BIOS auto configuration
- */
-static void alc882_auto_set_output_and_unmute(struct hda_codec *codec,
- hda_nid_t nid, int pin_type,
- int dac_idx)
-{
- /* set as output */
- struct alc_spec *spec = codec->spec;
- int idx;
-
- alc_set_pin_output(codec, nid, pin_type);
- if (spec->multiout.dac_nids[dac_idx] == 0x25)
- idx = 4;
- else
- idx = spec->multiout.dac_nids[dac_idx] - 2;
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, idx);
-
-}
-
-static void alc882_auto_init_multi_out(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- int i;
-
- for (i = 0; i <= HDA_SIDE; i++) {
- hda_nid_t nid = spec->autocfg.line_out_pins[i];
- int pin_type = get_pin_type(spec->autocfg.line_out_type);
- if (nid)
- alc882_auto_set_output_and_unmute(codec, nid, pin_type,
- i);
- }
-}
-
-static void alc882_auto_init_hp_out(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- hda_nid_t pin;
-
- pin = spec->autocfg.hp_pins[0];
- if (pin) /* connect to front */
- /* use dac 0 */
- alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, 0);
- pin = spec->autocfg.speaker_pins[0];
- if (pin)
- alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0);
-}
-
-#define alc882_is_input_pin(nid) alc880_is_input_pin(nid)
-#define ALC882_PIN_CD_NID ALC880_PIN_CD_NID
-
-static void alc882_auto_init_analog_input(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- int i;
-
- for (i = 0; i < AUTO_PIN_LAST; i++) {
- hda_nid_t nid = spec->autocfg.input_pins[i];
- if (!nid)
- continue;
- alc_set_input_pin(codec, nid, AUTO_PIN_FRONT_MIC /*i*/);
- if (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP)
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_AMP_GAIN_MUTE,
- AMP_OUT_MUTE);
- }
-}
-
-static void alc882_auto_init_input_src(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- int c;
-
- for (c = 0; c < spec->num_adc_nids; c++) {
- hda_nid_t conn_list[HDA_MAX_NUM_INPUTS];
- hda_nid_t nid = spec->capsrc_nids[c];
- unsigned int mux_idx;
- const struct hda_input_mux *imux;
- int conns, mute, idx, item;
-
- conns = snd_hda_get_connections(codec, nid, conn_list,
- ARRAY_SIZE(conn_list));
- if (conns < 0)
- continue;
- mux_idx = c >= spec->num_mux_defs ? 0 : c;
- imux = &spec->input_mux[mux_idx];
- for (idx = 0; idx < conns; idx++) {
- /* if the current connection is the selected one,
- * unmute it as default - otherwise mute it
- */
- mute = AMP_IN_MUTE(idx);
- for (item = 0; item < imux->num_items; item++) {
- if (imux->items[item].index == idx) {
- if (spec->cur_mux[c] == item)
- mute = AMP_IN_UNMUTE(idx);
- break;
- }
- }
- /* check if we have a selector or mixer
- * we could check for the widget type instead, but
- * just check for Amp-In presence (in case of mixer
- * without amp-in there is something wrong, this
- * function shouldn't be used or capsrc nid is wrong)
- */
- if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_AMP_GAIN_MUTE,
- mute);
- else if (mute != AMP_IN_MUTE(idx))
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_CONNECT_SEL,
- idx);
- }
- }
-}
-
-/* add mic boosts if needed */
-static int alc_auto_add_mic_boost(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- int err;
- hda_nid_t nid;
-
- nid = spec->autocfg.input_pins[AUTO_PIN_MIC];
- if (nid && (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) {
- err = add_control(spec, ALC_CTL_WIDGET_VOL,
- "Mic Boost",
- HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
- if (err < 0)
- return err;
- }
- nid = spec->autocfg.input_pins[AUTO_PIN_FRONT_MIC];
- if (nid && (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) {
- err = add_control(spec, ALC_CTL_WIDGET_VOL,
- "Front Mic Boost",
- HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
- if (err < 0)
- return err;
- }
- return 0;
-}
-
-/* almost identical with ALC880 parser... */
-static int alc882_parse_auto_config(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- int err = alc880_parse_auto_config(codec);
-
- if (err < 0)
- return err;
- else if (!err)
- return 0; /* no config found */
-
- err = alc_auto_add_mic_boost(codec);
- if (err < 0)
- return err;
-
- /* hack - override the init verbs */
- spec->init_verbs[0] = alc882_auto_init_verbs;
-
- return 1; /* config found */
-}
-
-/* additional initialization for auto-configuration model */
-static void alc882_auto_init(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- alc882_auto_init_multi_out(codec);
- alc882_auto_init_hp_out(codec);
- alc882_auto_init_analog_input(codec);
- alc882_auto_init_input_src(codec);
- if (spec->unsol_event)
- alc_inithook(codec);
-}
-
-static int patch_alc883(struct hda_codec *codec); /* called in patch_alc882() */
-
-static int patch_alc882(struct hda_codec *codec)
-{
- struct alc_spec *spec;
- int err, board_config;
-
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
-
- codec->spec = spec;
-
- board_config = snd_hda_check_board_config(codec, ALC882_MODEL_LAST,
- alc882_models,
- alc882_cfg_tbl);
-
- if (board_config < 0 || board_config >= ALC882_MODEL_LAST) {
- /* Pick up systems that don't supply PCI SSID */
- switch (codec->subsystem_id) {
- case 0x106b0c00: /* Mac Pro */
- board_config = ALC885_MACPRO;
- break;
- case 0x106b1000: /* iMac 24 */
- case 0x106b2800: /* AppleTV */
- case 0x106b3e00: /* iMac 24 Aluminium */
- board_config = ALC885_IMAC24;
- break;
- case 0x106b00a0: /* MacBookPro3,1 - Another revision */
- case 0x106b00a1: /* Macbook (might be wrong - PCI SSID?) */
- case 0x106b00a4: /* MacbookPro4,1 */
- case 0x106b2c00: /* Macbook Pro rev3 */
- /* Macbook 3.1 (0x106b3600) is handled by patch_alc883() */
- case 0x106b3800: /* MacbookPro4,1 - latter revision */
- board_config = ALC885_MBP3;
- break;
- case 0x106b3f00: /* Macbook 5,1 */
- case 0x106b4000: /* Macbook Pro 5,1 - FIXME: HP jack sense
- * seems not working, so apparently
- * no perfect solution yet
- */
- board_config = ALC885_MB5;
- break;
- default:
- /* ALC889A is handled better as ALC888-compatible */
- if (codec->revision_id == 0x100101 ||
- codec->revision_id == 0x100103) {
- alc_free(codec);
- return patch_alc883(codec);
- }
- printk(KERN_INFO "hda_codec: Unknown model for %s, "
- "trying auto-probe from BIOS...\n",
- codec->chip_name);
- board_config = ALC882_AUTO;
- }
- }
-
- alc_fix_pincfg(codec, alc882_pinfix_tbl, alc882_pin_fixes);
-
- if (board_config == ALC882_AUTO) {
- /* automatic parse from the BIOS config */
- err = alc882_parse_auto_config(codec);
- if (err < 0) {
- alc_free(codec);
- return err;
- } else if (!err) {
- printk(KERN_INFO
- "hda_codec: Cannot set up configuration "
- "from BIOS. Using base mode...\n");
- board_config = ALC882_3ST_DIG;
- }
- }
-
- err = snd_hda_attach_beep_device(codec, 0x1);
- if (err < 0) {
- alc_free(codec);
- return err;
- }
-
- if (board_config != ALC882_AUTO)
- setup_preset(spec, &alc882_presets[board_config]);
-
- spec->stream_analog_playback = &alc882_pcm_analog_playback;
- spec->stream_analog_capture = &alc882_pcm_analog_capture;
- /* FIXME: setup DAC5 */
- /*spec->stream_analog_alt_playback = &alc880_pcm_analog_alt_playback;*/
- spec->stream_analog_alt_capture = &alc880_pcm_analog_alt_capture;
-
- spec->stream_digital_playback = &alc882_pcm_digital_playback;
- spec->stream_digital_capture = &alc882_pcm_digital_capture;
-
- if (!spec->adc_nids && spec->input_mux) {
- /* check whether NID 0x07 is valid */
- unsigned int wcap = get_wcaps(codec, 0x07);
- /* get type */
- wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
- if (wcap != AC_WID_AUD_IN) {
- spec->adc_nids = alc882_adc_nids_alt;
- spec->num_adc_nids = ARRAY_SIZE(alc882_adc_nids_alt);
- spec->capsrc_nids = alc882_capsrc_nids_alt;
- } else {
- spec->adc_nids = alc882_adc_nids;
- spec->num_adc_nids = ARRAY_SIZE(alc882_adc_nids);
- spec->capsrc_nids = alc882_capsrc_nids;
- }
- }
- set_capture_mixer(spec);
- set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
-
- spec->vmaster_nid = 0x0c;
-
- codec->patch_ops = alc_patch_ops;
- if (board_config == ALC882_AUTO)
- spec->init_hook = alc882_auto_init;
-#ifdef CONFIG_SND_HDA_POWER_SAVE
- if (!spec->loopback.amplist)
- spec->loopback.amplist = alc882_loopbacks;
-#endif
- codec->proc_widget_hook = print_realtek_coef;
-
- return 0;
-}
-
-/*
- * ALC883 support
- *
- * ALC883 is almost identical with ALC880 but has cleaner and more flexible
- * configuration. Each pin widget can choose any input DACs and a mixer.
- * Each ADC is connected from a mixer of all inputs. This makes possible
- * 6-channel independent captures.
- *
- * In addition, an independent DAC for the multi-playback (not used in this
- * driver yet).
- */
-#define ALC883_DIGOUT_NID 0x06
-#define ALC883_DIGIN_NID 0x0a
-
-#define ALC1200_DIGOUT_NID 0x10
-
-static hda_nid_t alc883_dac_nids[4] = {
- /* front, rear, clfe, rear_surr */
- 0x02, 0x03, 0x04, 0x05
-};
-
-static hda_nid_t alc883_adc_nids[2] = {
- /* ADC1-2 */
- 0x08, 0x09,
-};
-
-static hda_nid_t alc883_adc_nids_alt[1] = {
- /* ADC1 */
- 0x08,
-};
-
-static hda_nid_t alc883_adc_nids_rev[2] = {
- /* ADC2-1 */
- 0x09, 0x08
-};
-
-#define alc889_adc_nids alc880_adc_nids
-
-static hda_nid_t alc883_capsrc_nids[2] = { 0x23, 0x22 };
-
-static hda_nid_t alc883_capsrc_nids_rev[2] = { 0x22, 0x23 };
-
-#define alc889_capsrc_nids alc882_capsrc_nids
-
-/* input MUX */
-/* FIXME: should be a matrix-type input source selection */
-
-static struct hda_input_mux alc883_capture_source = {
- .num_items = 4,
- .items = {
- { "Mic", 0x0 },
- { "Front Mic", 0x1 },
- { "Line", 0x2 },
- { "CD", 0x4 },
- },
-};
-
-static struct hda_input_mux alc883_3stack_6ch_intel = {
- .num_items = 4,
- .items = {
- { "Mic", 0x1 },
- { "Front Mic", 0x0 },
- { "Line", 0x2 },
- { "CD", 0x4 },
- },
-};
-
-static struct hda_input_mux alc883_lenovo_101e_capture_source = {
- .num_items = 2,
- .items = {
- { "Mic", 0x1 },
- { "Line", 0x2 },
- },
-};
-
-static struct hda_input_mux alc883_lenovo_nb0763_capture_source = {
- .num_items = 4,
- .items = {
- { "Mic", 0x0 },
- { "iMic", 0x1 },
- { "Line", 0x2 },
- { "CD", 0x4 },
- },
-};
-
-static struct hda_input_mux alc883_fujitsu_pi2515_capture_source = {
- .num_items = 2,
- .items = {
- { "Mic", 0x0 },
- { "Int Mic", 0x1 },
- },
-};
-
-static struct hda_input_mux alc883_lenovo_sky_capture_source = {
- .num_items = 3,
- .items = {
- { "Mic", 0x0 },
- { "Front Mic", 0x1 },
- { "Line", 0x4 },
- },
-};
-
-static struct hda_input_mux alc883_asus_eee1601_capture_source = {
- .num_items = 2,
- .items = {
- { "Mic", 0x0 },
- { "Line", 0x2 },
- },
-};
-
-static struct hda_input_mux alc889A_mb31_capture_source = {
- .num_items = 2,
- .items = {
- { "Mic", 0x0 },
- /* Front Mic (0x01) unused */
- { "Line", 0x2 },
- /* Line 2 (0x03) unused */
- /* CD (0x04) unsused? */
- },
-};
-
-/*
- * 2ch mode
- */
-static struct hda_channel_mode alc883_3ST_2ch_modes[1] = {
- { 2, NULL }
-};
-
-/*
- * 2ch mode
- */
-static struct hda_verb alc883_3ST_ch2_init[] = {
- { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { } /* end */
-};
-
-/*
- * 4ch mode
- */
-static struct hda_verb alc883_3ST_ch4_init[] = {
- { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
- { } /* end */
-};
-
-/*
- * 6ch mode
- */
-static struct hda_verb alc883_3ST_ch6_init[] = {
- { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
- { } /* end */
-};
-
-static struct hda_channel_mode alc883_3ST_6ch_modes[3] = {
- { 2, alc883_3ST_ch2_init },
- { 4, alc883_3ST_ch4_init },
- { 6, alc883_3ST_ch6_init },
-};
-
-
-/*
- * 2ch mode
- */
-static struct hda_verb alc883_4ST_ch2_init[] = {
- { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { } /* end */
-};
-
-/*
- * 4ch mode
- */
-static struct hda_verb alc883_4ST_ch4_init[] = {
- { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
- { } /* end */
-};
-
-/*
- * 6ch mode
- */
-static struct hda_verb alc883_4ST_ch6_init[] = {
- { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
- { } /* end */
-};
-
-/*
- * 8ch mode
- */
-static struct hda_verb alc883_4ST_ch8_init[] = {
- { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x17, AC_VERB_SET_CONNECT_SEL, 0x03 },
- { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
- { } /* end */
-};
-
-static struct hda_channel_mode alc883_4ST_8ch_modes[4] = {
- { 2, alc883_4ST_ch2_init },
- { 4, alc883_4ST_ch4_init },
- { 6, alc883_4ST_ch6_init },
- { 8, alc883_4ST_ch8_init },
-};
-
-
-/*
- * 2ch mode
- */
-static struct hda_verb alc883_3ST_ch2_intel_init[] = {
- { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { } /* end */
-};
-
-/*
- * 4ch mode
- */
-static struct hda_verb alc883_3ST_ch4_intel_init[] = {
- { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
- { } /* end */
-};
-
-/*
- * 6ch mode
- */
-static struct hda_verb alc883_3ST_ch6_intel_init[] = {
- { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x19, AC_VERB_SET_CONNECT_SEL, 0x02 },
- { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
- { } /* end */
-};
-
-static struct hda_channel_mode alc883_3ST_6ch_intel_modes[3] = {
- { 2, alc883_3ST_ch2_intel_init },
- { 4, alc883_3ST_ch4_intel_init },
- { 6, alc883_3ST_ch6_intel_init },
-};
-
-/*
- * 6ch mode
- */
-static struct hda_verb alc883_sixstack_ch6_init[] = {
- { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x00 },
- { 0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { } /* end */
-};
-
-/*
- * 8ch mode
- */
-static struct hda_verb alc883_sixstack_ch8_init[] = {
- { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { } /* end */
-};
-
-static struct hda_channel_mode alc883_sixstack_modes[2] = {
- { 6, alc883_sixstack_ch6_init },
- { 8, alc883_sixstack_ch8_init },
-};
-
/* 2ch mode (Speaker:front, Subwoofer:CLFE, Line:input, Headphones:front) */
static struct hda_verb alc889A_mb31_ch2_init[] = {
{0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, /* HP as front */
@@ -7972,34 +7767,7 @@ static struct hda_verb alc883_medion_eapd_verbs[] = {
{ }
};
-/* Pin assignment: Front=0x14, Rear=0x15, CLFE=0x16, Side=0x17
- * Mic=0x18, Front Mic=0x19, Line-In=0x1a, HP=0x1b
- */
-
-static struct snd_kcontrol_new alc883_base_mixer[] = {
- HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
- HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
- HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
- HDA_BIND_MUTE("Surround Playback Switch", 0x0d, 2, HDA_INPUT),
- HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0e, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT),
- HDA_BIND_MUTE_MONO("Center Playback Switch", 0x0e, 1, 2, HDA_INPUT),
- HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0e, 2, 2, HDA_INPUT),
- HDA_CODEC_VOLUME("Side Playback Volume", 0x0f, 0x0, HDA_OUTPUT),
- HDA_BIND_MUTE("Side Playback Switch", 0x0f, 2, HDA_INPUT),
- HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
- HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
- HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
- HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
- HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
- { } /* end */
-};
+#define alc883_base_mixer alc882_base_mixer
static struct snd_kcontrol_new alc883_mitac_mixer[] = {
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
@@ -8110,6 +7878,30 @@ static struct snd_kcontrol_new alc883_3ST_6ch_intel_mixer[] = {
{ } /* end */
};
+static struct snd_kcontrol_new alc885_8ch_intel_mixer[] = {
+ HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
+ HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
+ HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
+ HDA_BIND_MUTE("Surround Playback Switch", 0x0d, 2, HDA_INPUT),
+ HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0e, 1, 0x0,
+ HDA_OUTPUT),
+ HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT),
+ HDA_BIND_MUTE_MONO("Center Playback Switch", 0x0e, 1, 2, HDA_INPUT),
+ HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0e, 2, 2, HDA_INPUT),
+ HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0f, 0x0, HDA_OUTPUT),
+ HDA_BIND_MUTE("Speaker Playback Switch", 0x0f, 2, HDA_INPUT),
+ HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
+ HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
+ HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x3, HDA_INPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x1b, 0, HDA_INPUT),
+ HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x3, HDA_INPUT),
+ HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+ HDA_CODEC_VOLUME("Front Mic Boost", 0x18, 0, HDA_INPUT),
+ HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+ { } /* end */
+};
+
static struct snd_kcontrol_new alc883_fivestack_mixer[] = {
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
@@ -8350,93 +8142,14 @@ static struct snd_kcontrol_new alc883_chmode_mixer[] = {
{ } /* end */
};
-static struct hda_verb alc883_init_verbs[] = {
- /* ADC1: mute amp left and right */
- {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x08, AC_VERB_SET_CONNECT_SEL, 0x00},
- /* ADC2: mute amp left and right */
- {0x09, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x09, AC_VERB_SET_CONNECT_SEL, 0x00},
- /* Front mixer: unmute input/output amp left and right (volume = 0) */
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- /* Rear mixer */
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- /* CLFE mixer */
- {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- /* Side mixer */
- {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-
- /* mute analog input loopbacks */
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-
- /* Front Pin: output 0 (0x0c) */
- {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x14, AC_VERB_SET_CONNECT_SEL, 0x00},
- /* Rear Pin: output 1 (0x0d) */
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x15, AC_VERB_SET_CONNECT_SEL, 0x01},
- /* CLFE Pin: output 2 (0x0e) */
- {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x16, AC_VERB_SET_CONNECT_SEL, 0x02},
- /* Side Pin: output 3 (0x0f) */
- {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x17, AC_VERB_SET_CONNECT_SEL, 0x03},
- /* Mic (rear) pin: input vref at 80% */
- {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Front Mic pin: input vref at 80% */
- {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Line In pin: input */
- {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
- {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Line-2 In: Headphone output (output 0 - 0x0c) */
- {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x1b, AC_VERB_SET_CONNECT_SEL, 0x00},
- /* CD pin widget for input */
- {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
-
- /* FIXME: use matrix-type input source selection */
- /* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */
- /* Input mixer2 */
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- /* Input mixer3 */
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- { }
-};
-
/* toggle speaker-output according to the hp-jack state */
-static void alc883_mitac_init_hook(struct hda_codec *codec)
+static void alc883_mitac_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x15;
spec->autocfg.speaker_pins[0] = 0x14;
spec->autocfg.speaker_pins[1] = 0x17;
- alc_automute_amp(codec);
}
/* auto-toggle front mic */
@@ -8468,6 +8181,22 @@ static struct hda_verb alc883_mitac_verbs[] = {
{ } /* end */
};
+static struct hda_verb alc883_clevo_m540r_verbs[] = {
+ /* HP */
+ {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
+ {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+ /* Int speaker */
+ /*{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},*/
+
+ /* enable unsolicited event */
+ /*
+ {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
+ {0x18, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_MIC_EVENT | AC_USRSP_EN},
+ */
+
+ { } /* end */
+};
+
static struct hda_verb alc883_clevo_m720_verbs[] = {
/* HP */
{0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
@@ -8591,7 +8320,7 @@ static struct hda_verb alc883_vaiott_verbs[] = {
{ } /* end */
};
-static void alc888_3st_hp_init_hook(struct hda_codec *codec)
+static void alc888_3st_hp_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -8599,7 +8328,6 @@ static void alc888_3st_hp_init_hook(struct hda_codec *codec)
spec->autocfg.speaker_pins[0] = 0x14;
spec->autocfg.speaker_pins[1] = 0x16;
spec->autocfg.speaker_pins[2] = 0x18;
- alc_automute_amp(codec);
}
static struct hda_verb alc888_3st_hp_verbs[] = {
@@ -8696,13 +8424,12 @@ static struct hda_verb alc883_medion_md2_verbs[] = {
};
/* toggle speaker-output according to the hp-jack state */
-static void alc883_medion_md2_init_hook(struct hda_codec *codec)
+static void alc883_medion_md2_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x14;
spec->autocfg.speaker_pins[0] = 0x15;
- alc_automute_amp(codec);
}
/* toggle speaker-output according to the hp-jack state */
@@ -8719,12 +8446,16 @@ static void alc883_clevo_m720_mic_automute(struct hda_codec *codec)
HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
}
-static void alc883_clevo_m720_init_hook(struct hda_codec *codec)
+static void alc883_clevo_m720_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x15;
spec->autocfg.speaker_pins[0] = 0x14;
+}
+
+static void alc883_clevo_m720_init_hook(struct hda_codec *codec)
+{
alc_automute_amp(codec);
alc883_clevo_m720_mic_automute(codec);
}
@@ -8743,22 +8474,20 @@ static void alc883_clevo_m720_unsol_event(struct hda_codec *codec,
}
/* toggle speaker-output according to the hp-jack state */
-static void alc883_2ch_fujitsu_pi2515_init_hook(struct hda_codec *codec)
+static void alc883_2ch_fujitsu_pi2515_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x14;
spec->autocfg.speaker_pins[0] = 0x15;
- alc_automute_amp(codec);
}
-static void alc883_haier_w66_init_hook(struct hda_codec *codec)
+static void alc883_haier_w66_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x1b;
spec->autocfg.speaker_pins[0] = 0x14;
- alc_automute_amp(codec);
}
static void alc883_lenovo_101e_ispeaker_automute(struct hda_codec *codec)
@@ -8797,14 +8526,13 @@ static void alc883_lenovo_101e_unsol_event(struct hda_codec *codec,
}
/* toggle speaker-output according to the hp-jack state */
-static void alc883_acer_aspire_init_hook(struct hda_codec *codec)
+static void alc883_acer_aspire_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x14;
spec->autocfg.speaker_pins[0] = 0x15;
spec->autocfg.speaker_pins[1] = 0x16;
- alc_automute_amp(codec);
}
static struct hda_verb alc883_acer_eapd_verbs[] = {
@@ -8825,7 +8553,14 @@ static struct hda_verb alc883_acer_eapd_verbs[] = {
{ }
};
-static void alc888_6st_dell_init_hook(struct hda_codec *codec)
+static struct hda_verb alc888_acer_aspire_7730G_verbs[] = {
+ {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
+ {0x17, AC_VERB_SET_CONNECT_SEL, 0x02},
+ {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
+ { } /* end */
+};
+
+static void alc888_6st_dell_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -8834,10 +8569,9 @@ static void alc888_6st_dell_init_hook(struct hda_codec *codec)
spec->autocfg.speaker_pins[1] = 0x15;
spec->autocfg.speaker_pins[2] = 0x16;
spec->autocfg.speaker_pins[3] = 0x17;
- alc_automute_amp(codec);
}
-static void alc888_lenovo_sky_init_hook(struct hda_codec *codec)
+static void alc888_lenovo_sky_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -8847,82 +8581,17 @@ static void alc888_lenovo_sky_init_hook(struct hda_codec *codec)
spec->autocfg.speaker_pins[2] = 0x16;
spec->autocfg.speaker_pins[3] = 0x17;
spec->autocfg.speaker_pins[4] = 0x1a;
- alc_automute_amp(codec);
}
-static void alc883_vaiott_init_hook(struct hda_codec *codec)
+static void alc883_vaiott_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x15;
spec->autocfg.speaker_pins[0] = 0x14;
spec->autocfg.speaker_pins[1] = 0x17;
- alc_automute_amp(codec);
}
-/*
- * generic initialization of ADC, input mixers and output mixers
- */
-static struct hda_verb alc883_auto_init_verbs[] = {
- /*
- * Unmute ADC0-2 and set the default input to mic-in
- */
- {0x08, AC_VERB_SET_CONNECT_SEL, 0x00},
- {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x09, AC_VERB_SET_CONNECT_SEL, 0x00},
- {0x09, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-
- /* Mute input amps (CD, Line In, Mic 1 & Mic 2) of the analog-loopback
- * mixer widget
- * Note: PASD motherboards uses the Line In 2 as the input for
- * front panel mic (mic 2)
- */
- /* Amp Indices: Mic1 = 0, Mic2 = 1, Line1 = 2, Line2 = 3, CD = 4 */
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-
- /*
- * Set up output mixers (0x0c - 0x0f)
- */
- /* set vol=0 to output mixers */
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- /* set up input amps for analog loopback */
- /* Amp Indices: DAC = 0, mixer = 1 */
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-
- /* FIXME: use matrix-type input source selection */
- /* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */
- /* Input mixer1 */
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)},
- /* {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3)}, */
- {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(4)},
- /* Input mixer2 */
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)},
- /* {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3)}, */
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(4)},
-
- { }
-};
-
static struct hda_verb alc888_asus_m90v_verbs[] = {
{0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
@@ -8933,19 +8602,7 @@ static struct hda_verb alc888_asus_m90v_verbs[] = {
{ } /* end */
};
-static void alc883_nb_mic_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_codec_read(codec, 0x18, 0,
- AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
- snd_hda_codec_write(codec, 0x23, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- 0x7000 | (0x00 << 8) | (present ? 0 : 0x80));
- snd_hda_codec_write(codec, 0x23, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- 0x7000 | (0x01 << 8) | (present ? 0x80 : 0));
-}
-
-static void alc883_M90V_init_hook(struct hda_codec *codec)
+static void alc883_mode2_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -8953,26 +8610,11 @@ static void alc883_M90V_init_hook(struct hda_codec *codec)
spec->autocfg.speaker_pins[0] = 0x14;
spec->autocfg.speaker_pins[1] = 0x15;
spec->autocfg.speaker_pins[2] = 0x16;
- alc_automute_pin(codec);
-}
-
-static void alc883_mode2_unsol_event(struct hda_codec *codec,
- unsigned int res)
-{
- switch (res >> 26) {
- case ALC880_MIC_EVENT:
- alc883_nb_mic_automute(codec);
- break;
- default:
- alc_sku_unsol_event(codec, res);
- break;
- }
-}
-
-static void alc883_mode2_inithook(struct hda_codec *codec)
-{
- alc883_M90V_init_hook(codec);
- alc883_nb_mic_automute(codec);
+ spec->ext_mic.pin = 0x18;
+ spec->int_mic.pin = 0x19;
+ spec->ext_mic.mux_idx = 0;
+ spec->int_mic.mux_idx = 1;
+ spec->auto_mic = 1;
}
static struct hda_verb alc888_asus_eee1601_verbs[] = {
@@ -9033,25 +8675,44 @@ static void alc889A_mb31_unsol_event(struct hda_codec *codec, unsigned int res)
alc889A_mb31_automute(codec);
}
+
#ifdef CONFIG_SND_HDA_POWER_SAVE
-#define alc883_loopbacks alc880_loopbacks
+#define alc882_loopbacks alc880_loopbacks
#endif
/* pcm configuration: identical with ALC880 */
-#define alc883_pcm_analog_playback alc880_pcm_analog_playback
-#define alc883_pcm_analog_capture alc880_pcm_analog_capture
-#define alc883_pcm_analog_alt_capture alc880_pcm_analog_alt_capture
-#define alc883_pcm_digital_playback alc880_pcm_digital_playback
-#define alc883_pcm_digital_capture alc880_pcm_digital_capture
+#define alc882_pcm_analog_playback alc880_pcm_analog_playback
+#define alc882_pcm_analog_capture alc880_pcm_analog_capture
+#define alc882_pcm_digital_playback alc880_pcm_digital_playback
+#define alc882_pcm_digital_capture alc880_pcm_digital_capture
+
+static hda_nid_t alc883_slave_dig_outs[] = {
+ ALC1200_DIGOUT_NID, 0,
+};
+
+static hda_nid_t alc1200_slave_dig_outs[] = {
+ ALC883_DIGOUT_NID, 0,
+};
/*
* configuration and preset
*/
-static const char *alc883_models[ALC883_MODEL_LAST] = {
- [ALC883_3ST_2ch_DIG] = "3stack-dig",
+static const char *alc882_models[ALC882_MODEL_LAST] = {
+ [ALC882_3ST_DIG] = "3stack-dig",
+ [ALC882_6ST_DIG] = "6stack-dig",
+ [ALC882_ARIMA] = "arima",
+ [ALC882_W2JC] = "w2jc",
+ [ALC882_TARGA] = "targa",
+ [ALC882_ASUS_A7J] = "asus-a7j",
+ [ALC882_ASUS_A7M] = "asus-a7m",
+ [ALC885_MACPRO] = "macpro",
+ [ALC885_MB5] = "mb5",
+ [ALC885_MBP3] = "mbp3",
+ [ALC885_IMAC24] = "imac24",
+ [ALC883_3ST_2ch_DIG] = "3stack-2ch-dig",
[ALC883_3ST_6ch_DIG] = "3stack-6ch-dig",
[ALC883_3ST_6ch] = "3stack-6ch",
- [ALC883_6ST_DIG] = "6stack-dig",
+ [ALC883_6ST_DIG] = "alc883-6stack-dig",
[ALC883_TARGA_DIG] = "targa-dig",
[ALC883_TARGA_2ch_DIG] = "targa-2ch-dig",
[ALC883_TARGA_8ch_DIG] = "targa-8ch-dig",
@@ -9060,6 +8721,7 @@ static const char *alc883_models[ALC883_MODEL_LAST] = {
[ALC888_ACER_ASPIRE_4930G] = "acer-aspire-4930g",
[ALC888_ACER_ASPIRE_6530G] = "acer-aspire-6530g",
[ALC888_ACER_ASPIRE_8930G] = "acer-aspire-8930g",
+ [ALC888_ACER_ASPIRE_7730G] = "acer-aspire-7730g",
[ALC883_MEDION] = "medion",
[ALC883_MEDION_MD2] = "medion-md2",
[ALC883_LAPTOP_EAPD] = "laptop-eapd",
@@ -9071,18 +8733,22 @@ static const char *alc883_models[ALC883_MODEL_LAST] = {
[ALC888_3ST_HP] = "3stack-hp",
[ALC888_6ST_DELL] = "6stack-dell",
[ALC883_MITAC] = "mitac",
+ [ALC883_CLEVO_M540R] = "clevo-m540r",
[ALC883_CLEVO_M720] = "clevo-m720",
[ALC883_FUJITSU_PI2515] = "fujitsu-pi2515",
[ALC888_FUJITSU_XA3530] = "fujitsu-xa3530",
[ALC883_3ST_6ch_INTEL] = "3stack-6ch-intel",
+ [ALC889A_INTEL] = "intel-alc889a",
+ [ALC889_INTEL] = "intel-x58",
[ALC1200_ASUS_P5Q] = "asus-p5q",
[ALC889A_MB31] = "mb31",
[ALC883_SONY_VAIO_TT] = "sony-vaio-tt",
- [ALC883_AUTO] = "auto",
+ [ALC882_AUTO] = "auto",
};
-static struct snd_pci_quirk alc883_cfg_tbl[] = {
- SND_PCI_QUIRK(0x1019, 0x6668, "ECS", ALC883_3ST_6ch_DIG),
+static struct snd_pci_quirk alc882_cfg_tbl[] = {
+ SND_PCI_QUIRK(0x1019, 0x6668, "ECS", ALC882_6ST_DIG),
+
SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_ACER_ASPIRE),
SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_ACER_ASPIRE),
SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_ACER_ASPIRE),
@@ -9097,40 +8763,56 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
ALC888_ACER_ASPIRE_8930G),
SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G",
ALC888_ACER_ASPIRE_8930G),
- SND_PCI_QUIRK(0x1025, 0x0157, "Acer X3200", ALC883_AUTO),
- SND_PCI_QUIRK(0x1025, 0x0158, "Acer AX1700-U3700A", ALC883_AUTO),
+ SND_PCI_QUIRK(0x1025, 0x0157, "Acer X3200", ALC882_AUTO),
+ SND_PCI_QUIRK(0x1025, 0x0158, "Acer AX1700-U3700A", ALC882_AUTO),
SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
ALC888_ACER_ASPIRE_6530G),
SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
ALC888_ACER_ASPIRE_6530G),
+ SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
+ ALC888_ACER_ASPIRE_7730G),
/* default Acer -- disabled as it causes more problems.
* model=auto should work fine now
*/
/* SND_PCI_QUIRK_VENDOR(0x1025, "Acer laptop", ALC883_ACER), */
+
SND_PCI_QUIRK(0x1028, 0x020d, "Dell Inspiron 530", ALC888_6ST_DELL),
+
SND_PCI_QUIRK(0x103c, 0x2a3d, "HP Pavillion", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x103c, 0x2a4f, "HP Samba", ALC888_3ST_HP),
SND_PCI_QUIRK(0x103c, 0x2a60, "HP Lucknow", ALC888_3ST_HP),
SND_PCI_QUIRK(0x103c, 0x2a61, "HP Nettle", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x103c, 0x2a66, "HP Acacia", ALC888_3ST_HP),
SND_PCI_QUIRK(0x103c, 0x2a72, "HP Educ.ar", ALC888_3ST_HP),
+
+ SND_PCI_QUIRK(0x1043, 0x060d, "Asus A7J", ALC882_ASUS_A7J),
+ SND_PCI_QUIRK(0x1043, 0x1243, "Asus A7J", ALC882_ASUS_A7J),
+ SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_ASUS_A7M),
SND_PCI_QUIRK(0x1043, 0x1873, "Asus M90V", ALC888_ASUS_M90V),
+ SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_W2JC),
+ SND_PCI_QUIRK(0x1043, 0x817f, "Asus P5LD2", ALC882_6ST_DIG),
+ SND_PCI_QUIRK(0x1043, 0x81d8, "Asus P5WD", ALC882_6ST_DIG),
SND_PCI_QUIRK(0x1043, 0x8249, "Asus M2A-VM HDMI", ALC883_3ST_6ch_DIG),
SND_PCI_QUIRK(0x1043, 0x8284, "Asus Z37E", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x1043, 0x82fe, "Asus P5Q-EM HDMI", ALC1200_ASUS_P5Q),
SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_ASUS_EEE1601),
+
+ SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC883_SONY_VAIO_TT),
SND_PCI_QUIRK(0x105b, 0x0ce8, "Foxconn P35AX-S", ALC883_6ST_DIG),
- SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC883_6ST_DIG),
+ SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC882_6ST_DIG),
SND_PCI_QUIRK(0x1071, 0x8227, "Mitac 82801H", ALC883_MITAC),
SND_PCI_QUIRK(0x1071, 0x8253, "Mitac 8252d", ALC883_MITAC),
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD),
SND_PCI_QUIRK(0x10f1, 0x2350, "TYAN-S2350", ALC888_6ST_DELL),
SND_PCI_QUIRK(0x108e, 0x534d, NULL, ALC883_3ST_6ch),
- SND_PCI_QUIRK(0x1458, 0xa002, "MSI", ALC883_6ST_DIG),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG),
+
SND_PCI_QUIRK(0x1462, 0x0349, "MSI", ALC883_TARGA_2ch_DIG),
SND_PCI_QUIRK(0x1462, 0x040d, "MSI", ALC883_TARGA_2ch_DIG),
SND_PCI_QUIRK(0x1462, 0x0579, "MSI", ALC883_TARGA_2ch_DIG),
+ SND_PCI_QUIRK(0x1462, 0x28fb, "Targa T8", ALC882_TARGA), /* MSI-1049 T8 */
SND_PCI_QUIRK(0x1462, 0x2fb3, "MSI", ALC883_TARGA_2ch_DIG),
+ SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC882_6ST_DIG),
SND_PCI_QUIRK(0x1462, 0x3729, "MSI S420", ALC883_TARGA_DIG),
SND_PCI_QUIRK(0x1462, 0x3783, "NEC S970", ALC883_TARGA_DIG),
SND_PCI_QUIRK(0x1462, 0x3b7f, "MSI", ALC883_TARGA_2ch_DIG),
@@ -9139,6 +8821,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
SND_PCI_QUIRK(0x1462, 0x3fc3, "MSI", ALC883_TARGA_DIG),
SND_PCI_QUIRK(0x1462, 0x3fcc, "MSI", ALC883_TARGA_DIG),
SND_PCI_QUIRK(0x1462, 0x3fdf, "MSI", ALC883_TARGA_DIG),
+ SND_PCI_QUIRK(0x1462, 0x42cd, "MSI", ALC883_TARGA_DIG),
SND_PCI_QUIRK(0x1462, 0x4314, "MSI", ALC883_TARGA_DIG),
SND_PCI_QUIRK(0x1462, 0x4319, "MSI", ALC883_TARGA_DIG),
SND_PCI_QUIRK(0x1462, 0x4324, "MSI", ALC883_TARGA_DIG),
@@ -9152,11 +8835,15 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
SND_PCI_QUIRK(0x1462, 0x7327, "MSI", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x1462, 0x7350, "MSI", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x1462, 0xa422, "MSI", ALC883_TARGA_2ch_DIG),
+ SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG),
+
SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720),
+ SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC883_LAPTOP_EAPD),
SND_PCI_QUIRK(0x15d9, 0x8780, "Supermicro PDSBA", ALC883_3ST_6ch),
+ /* SND_PCI_QUIRK(0x161f, 0x2054, "Arima W820", ALC882_ARIMA), */
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_MEDION),
SND_PCI_QUIRK_MASK(0x1734, 0xfff0, 0x1100, "FSC AMILO Xi/Pi25xx",
ALC883_FUJITSU_PI2515),
@@ -9171,24 +8858,186 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
SND_PCI_QUIRK(0x17c0, 0x4085, "MEDION MD96630", ALC888_LENOVO_MS7195_DIG),
SND_PCI_QUIRK(0x17f2, 0x5000, "Albatron KI690-AM2", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66),
+
SND_PCI_QUIRK(0x8086, 0x0001, "DG33BUC", ALC883_3ST_6ch_INTEL),
SND_PCI_QUIRK(0x8086, 0x0002, "DG33FBC", ALC883_3ST_6ch_INTEL),
SND_PCI_QUIRK(0x8086, 0x2503, "82801H", ALC883_MITAC),
- SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC883_3ST_6ch_INTEL),
+ SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC889_INTEL),
+ SND_PCI_QUIRK(0x8086, 0x0021, "Intel IbexPeak", ALC889A_INTEL),
+ SND_PCI_QUIRK(0x8086, 0x3b56, "Intel IbexPeak", ALC889A_INTEL),
SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch),
- SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC883_SONY_VAIO_TT),
- {}
-};
-static hda_nid_t alc883_slave_dig_outs[] = {
- ALC1200_DIGOUT_NID, 0,
+ {}
};
-static hda_nid_t alc1200_slave_dig_outs[] = {
- ALC883_DIGOUT_NID, 0,
+/* codec SSID table for Intel Mac */
+static struct snd_pci_quirk alc882_ssid_cfg_tbl[] = {
+ SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC885_MBP3),
+ SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC885_MBP3),
+ SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC885_MBP3),
+ SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC885_MACPRO),
+ SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_IMAC24),
+ SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_IMAC24),
+ SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC885_MBP3),
+ SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889A_MB31),
+ SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC885_MBP3),
+ SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_IMAC24),
+ SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC885_MB5),
+ /* FIXME: HP jack sense seems not working for MBP 5,1, so apparently
+ * no perfect solution yet
+ */
+ SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC885_MB5),
+ {} /* terminator */
};
-static struct alc_config_preset alc883_presets[] = {
+static struct alc_config_preset alc882_presets[] = {
+ [ALC882_3ST_DIG] = {
+ .mixers = { alc882_base_mixer },
+ .init_verbs = { alc882_base_init_verbs,
+ alc882_adc1_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .dig_in_nid = ALC882_DIGIN_NID,
+ .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
+ .channel_mode = alc882_ch_modes,
+ .need_dac_fix = 1,
+ .input_mux = &alc882_capture_source,
+ },
+ [ALC882_6ST_DIG] = {
+ .mixers = { alc882_base_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc882_base_init_verbs,
+ alc882_adc1_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .dig_in_nid = ALC882_DIGIN_NID,
+ .num_channel_mode = ARRAY_SIZE(alc882_sixstack_modes),
+ .channel_mode = alc882_sixstack_modes,
+ .input_mux = &alc882_capture_source,
+ },
+ [ALC882_ARIMA] = {
+ .mixers = { alc882_base_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
+ alc882_eapd_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .num_channel_mode = ARRAY_SIZE(alc882_sixstack_modes),
+ .channel_mode = alc882_sixstack_modes,
+ .input_mux = &alc882_capture_source,
+ },
+ [ALC882_W2JC] = {
+ .mixers = { alc882_w2jc_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
+ alc882_eapd_verbs, alc880_gpio1_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .num_channel_mode = ARRAY_SIZE(alc880_threestack_modes),
+ .channel_mode = alc880_threestack_modes,
+ .need_dac_fix = 1,
+ .input_mux = &alc882_capture_source,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ },
+ [ALC885_MBP3] = {
+ .mixers = { alc885_mbp3_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc885_mbp3_init_verbs,
+ alc880_gpio1_init_verbs },
+ .num_dacs = 2,
+ .dac_nids = alc882_dac_nids,
+ .hp_nid = 0x04,
+ .channel_mode = alc885_mbp_4ch_modes,
+ .num_channel_mode = ARRAY_SIZE(alc885_mbp_4ch_modes),
+ .input_mux = &alc882_capture_source,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .dig_in_nid = ALC882_DIGIN_NID,
+ .unsol_event = alc_automute_amp_unsol_event,
+ .setup = alc885_mbp3_setup,
+ .init_hook = alc_automute_amp,
+ },
+ [ALC885_MB5] = {
+ .mixers = { alc885_mb5_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc885_mb5_init_verbs,
+ alc880_gpio1_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .channel_mode = alc885_mb5_6ch_modes,
+ .num_channel_mode = ARRAY_SIZE(alc885_mb5_6ch_modes),
+ .input_mux = &mb5_capture_source,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .dig_in_nid = ALC882_DIGIN_NID,
+ },
+ [ALC885_MACPRO] = {
+ .mixers = { alc882_macpro_mixer },
+ .init_verbs = { alc882_macpro_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .dig_in_nid = ALC882_DIGIN_NID,
+ .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
+ .channel_mode = alc882_ch_modes,
+ .input_mux = &alc882_capture_source,
+ .init_hook = alc885_macpro_init_hook,
+ },
+ [ALC885_IMAC24] = {
+ .mixers = { alc885_imac24_mixer },
+ .init_verbs = { alc885_imac24_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .dig_in_nid = ALC882_DIGIN_NID,
+ .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
+ .channel_mode = alc882_ch_modes,
+ .input_mux = &alc882_capture_source,
+ .unsol_event = alc_automute_amp_unsol_event,
+ .setup = alc885_imac24_setup,
+ .init_hook = alc885_imac24_init_hook,
+ },
+ [ALC882_TARGA] = {
+ .mixers = { alc882_targa_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
+ alc880_gpio3_init_verbs, alc882_targa_verbs},
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .num_adc_nids = ARRAY_SIZE(alc882_adc_nids),
+ .adc_nids = alc882_adc_nids,
+ .capsrc_nids = alc882_capsrc_nids,
+ .num_channel_mode = ARRAY_SIZE(alc882_3ST_6ch_modes),
+ .channel_mode = alc882_3ST_6ch_modes,
+ .need_dac_fix = 1,
+ .input_mux = &alc882_capture_source,
+ .unsol_event = alc882_targa_unsol_event,
+ .setup = alc882_targa_setup,
+ .init_hook = alc882_targa_automute,
+ },
+ [ALC882_ASUS_A7J] = {
+ .mixers = { alc882_asus_a7j_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
+ alc882_asus_a7j_verbs},
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .num_adc_nids = ARRAY_SIZE(alc882_adc_nids),
+ .adc_nids = alc882_adc_nids,
+ .capsrc_nids = alc882_capsrc_nids,
+ .num_channel_mode = ARRAY_SIZE(alc882_3ST_6ch_modes),
+ .channel_mode = alc882_3ST_6ch_modes,
+ .need_dac_fix = 1,
+ .input_mux = &alc882_capture_source,
+ },
+ [ALC882_ASUS_A7M] = {
+ .mixers = { alc882_asus_a7m_mixer, alc882_chmode_mixer },
+ .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
+ alc882_eapd_verbs, alc880_gpio1_init_verbs,
+ alc882_asus_a7m_verbs },
+ .num_dacs = ARRAY_SIZE(alc882_dac_nids),
+ .dac_nids = alc882_dac_nids,
+ .dig_out_nid = ALC882_DIGOUT_NID,
+ .num_channel_mode = ARRAY_SIZE(alc880_threestack_modes),
+ .channel_mode = alc880_threestack_modes,
+ .need_dac_fix = 1,
+ .input_mux = &alc882_capture_source,
+ },
[ALC883_3ST_2ch_DIG] = {
.mixers = { alc883_3ST_2ch_mixer },
.init_verbs = { alc883_init_verbs },
@@ -9235,6 +9084,46 @@ static struct alc_config_preset alc883_presets[] = {
.need_dac_fix = 1,
.input_mux = &alc883_3stack_6ch_intel,
},
+ [ALC889A_INTEL] = {
+ .mixers = { alc885_8ch_intel_mixer, alc883_chmode_mixer },
+ .init_verbs = { alc885_init_verbs, alc885_init_input_verbs,
+ alc_hp15_unsol_verbs },
+ .num_dacs = ARRAY_SIZE(alc883_dac_nids),
+ .dac_nids = alc883_dac_nids,
+ .num_adc_nids = ARRAY_SIZE(alc889_adc_nids),
+ .adc_nids = alc889_adc_nids,
+ .dig_out_nid = ALC883_DIGOUT_NID,
+ .dig_in_nid = ALC883_DIGIN_NID,
+ .slave_dig_outs = alc883_slave_dig_outs,
+ .num_channel_mode = ARRAY_SIZE(alc889_8ch_intel_modes),
+ .channel_mode = alc889_8ch_intel_modes,
+ .capsrc_nids = alc889_capsrc_nids,
+ .input_mux = &alc889_capture_source,
+ .setup = alc889_automute_setup,
+ .init_hook = alc_automute_amp,
+ .unsol_event = alc_automute_amp_unsol_event,
+ .need_dac_fix = 1,
+ },
+ [ALC889_INTEL] = {
+ .mixers = { alc885_8ch_intel_mixer, alc883_chmode_mixer },
+ .init_verbs = { alc885_init_verbs, alc889_init_input_verbs,
+ alc889_eapd_verbs, alc_hp15_unsol_verbs},
+ .num_dacs = ARRAY_SIZE(alc883_dac_nids),
+ .dac_nids = alc883_dac_nids,
+ .num_adc_nids = ARRAY_SIZE(alc889_adc_nids),
+ .adc_nids = alc889_adc_nids,
+ .dig_out_nid = ALC883_DIGOUT_NID,
+ .dig_in_nid = ALC883_DIGIN_NID,
+ .slave_dig_outs = alc883_slave_dig_outs,
+ .num_channel_mode = ARRAY_SIZE(alc889_8ch_intel_modes),
+ .channel_mode = alc889_8ch_intel_modes,
+ .capsrc_nids = alc889_capsrc_nids,
+ .input_mux = &alc889_capture_source,
+ .setup = alc889_automute_setup,
+ .init_hook = alc889_intel_init_hook,
+ .unsol_event = alc_automute_amp_unsol_event,
+ .need_dac_fix = 1,
+ },
[ALC883_6ST_DIG] = {
.mixers = { alc883_base_mixer, alc883_chmode_mixer },
.init_verbs = { alc883_init_verbs },
@@ -9258,7 +9147,8 @@ static struct alc_config_preset alc883_presets[] = {
.need_dac_fix = 1,
.input_mux = &alc883_capture_source,
.unsol_event = alc883_targa_unsol_event,
- .init_hook = alc883_targa_init_hook,
+ .setup = alc882_targa_setup,
+ .init_hook = alc882_targa_automute,
},
[ALC883_TARGA_2ch_DIG] = {
.mixers = { alc883_targa_2ch_mixer},
@@ -9273,7 +9163,8 @@ static struct alc_config_preset alc883_presets[] = {
.channel_mode = alc883_3ST_2ch_modes,
.input_mux = &alc883_capture_source,
.unsol_event = alc883_targa_unsol_event,
- .init_hook = alc883_targa_init_hook,
+ .setup = alc882_targa_setup,
+ .init_hook = alc882_targa_automute,
},
[ALC883_TARGA_8ch_DIG] = {
.mixers = { alc883_base_mixer, alc883_chmode_mixer },
@@ -9291,7 +9182,8 @@ static struct alc_config_preset alc883_presets[] = {
.need_dac_fix = 1,
.input_mux = &alc883_capture_source,
.unsol_event = alc883_targa_unsol_event,
- .init_hook = alc883_targa_init_hook,
+ .setup = alc882_targa_setup,
+ .init_hook = alc882_targa_automute,
},
[ALC883_ACER] = {
.mixers = { alc883_base_mixer },
@@ -9317,7 +9209,8 @@ static struct alc_config_preset alc883_presets[] = {
.channel_mode = alc883_3ST_2ch_modes,
.input_mux = &alc883_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc883_acer_aspire_init_hook,
+ .setup = alc883_acer_aspire_setup,
+ .init_hook = alc_automute_amp,
},
[ALC888_ACER_ASPIRE_4930G] = {
.mixers = { alc888_base_mixer,
@@ -9337,7 +9230,8 @@ static struct alc_config_preset alc883_presets[] = {
ARRAY_SIZE(alc888_2_capture_sources),
.input_mux = alc888_2_capture_sources,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc888_acer_aspire_4930g_init_hook,
+ .setup = alc888_acer_aspire_4930g_setup,
+ .init_hook = alc_automute_amp,
},
[ALC888_ACER_ASPIRE_6530G] = {
.mixers = { alc888_acer_aspire_6530_mixer },
@@ -9355,7 +9249,8 @@ static struct alc_config_preset alc883_presets[] = {
ARRAY_SIZE(alc888_2_capture_sources),
.input_mux = alc888_acer_aspire_6530_sources,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc888_acer_aspire_6530g_init_hook,
+ .setup = alc888_acer_aspire_6530g_setup,
+ .init_hook = alc_automute_amp,
},
[ALC888_ACER_ASPIRE_8930G] = {
.mixers = { alc888_base_mixer,
@@ -9376,7 +9271,28 @@ static struct alc_config_preset alc883_presets[] = {
ARRAY_SIZE(alc889_capture_sources),
.input_mux = alc889_capture_sources,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc889_acer_aspire_8930g_init_hook,
+ .setup = alc889_acer_aspire_8930g_setup,
+ .init_hook = alc_automute_amp,
+ },
+ [ALC888_ACER_ASPIRE_7730G] = {
+ .mixers = { alc883_3ST_6ch_mixer,
+ alc883_chmode_mixer },
+ .init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs,
+ alc888_acer_aspire_7730G_verbs },
+ .num_dacs = ARRAY_SIZE(alc883_dac_nids),
+ .dac_nids = alc883_dac_nids,
+ .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev),
+ .adc_nids = alc883_adc_nids_rev,
+ .capsrc_nids = alc883_capsrc_nids_rev,
+ .dig_out_nid = ALC883_DIGOUT_NID,
+ .num_channel_mode = ARRAY_SIZE(alc883_3ST_6ch_modes),
+ .channel_mode = alc883_3ST_6ch_modes,
+ .need_dac_fix = 1,
+ .const_channel_count = 6,
+ .input_mux = &alc883_capture_source,
+ .unsol_event = alc_automute_amp_unsol_event,
+ .setup = alc888_acer_aspire_6530g_setup,
+ .init_hook = alc_automute_amp,
},
[ALC883_MEDION] = {
.mixers = { alc883_fivestack_mixer,
@@ -9401,7 +9317,8 @@ static struct alc_config_preset alc883_presets[] = {
.channel_mode = alc883_3ST_2ch_modes,
.input_mux = &alc883_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc883_medion_md2_init_hook,
+ .setup = alc883_medion_md2_setup,
+ .init_hook = alc_automute_amp,
},
[ALC883_LAPTOP_EAPD] = {
.mixers = { alc883_base_mixer },
@@ -9412,6 +9329,21 @@ static struct alc_config_preset alc883_presets[] = {
.channel_mode = alc883_3ST_2ch_modes,
.input_mux = &alc883_capture_source,
},
+ [ALC883_CLEVO_M540R] = {
+ .mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer },
+ .init_verbs = { alc883_init_verbs, alc883_clevo_m540r_verbs },
+ .num_dacs = ARRAY_SIZE(alc883_dac_nids),
+ .dac_nids = alc883_dac_nids,
+ .dig_out_nid = ALC883_DIGOUT_NID,
+ .dig_in_nid = ALC883_DIGIN_NID,
+ .num_channel_mode = ARRAY_SIZE(alc883_3ST_6ch_clevo_modes),
+ .channel_mode = alc883_3ST_6ch_clevo_modes,
+ .need_dac_fix = 1,
+ .input_mux = &alc883_capture_source,
+ /* This machine has the hardware HP auto-muting, thus
+ * we need no software mute via unsol event
+ */
+ },
[ALC883_CLEVO_M720] = {
.mixers = { alc883_clevo_m720_mixer },
.init_verbs = { alc883_init_verbs, alc883_clevo_m720_verbs },
@@ -9422,6 +9354,7 @@ static struct alc_config_preset alc883_presets[] = {
.channel_mode = alc883_3ST_2ch_modes,
.input_mux = &alc883_capture_source,
.unsol_event = alc883_clevo_m720_unsol_event,
+ .setup = alc883_clevo_m720_setup,
.init_hook = alc883_clevo_m720_init_hook,
},
[ALC883_LENOVO_101E_2ch] = {
@@ -9447,7 +9380,8 @@ static struct alc_config_preset alc883_presets[] = {
.need_dac_fix = 1,
.input_mux = &alc883_lenovo_nb0763_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc883_medion_md2_init_hook,
+ .setup = alc883_medion_md2_setup,
+ .init_hook = alc_automute_amp,
},
[ALC888_LENOVO_MS7195_DIG] = {
.mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer },
@@ -9472,7 +9406,8 @@ static struct alc_config_preset alc883_presets[] = {
.channel_mode = alc883_3ST_2ch_modes,
.input_mux = &alc883_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc883_haier_w66_init_hook,
+ .setup = alc883_haier_w66_setup,
+ .init_hook = alc_automute_amp,
},
[ALC888_3ST_HP] = {
.mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer },
@@ -9484,7 +9419,8 @@ static struct alc_config_preset alc883_presets[] = {
.need_dac_fix = 1,
.input_mux = &alc883_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc888_3st_hp_init_hook,
+ .setup = alc888_3st_hp_setup,
+ .init_hook = alc_automute_amp,
},
[ALC888_6ST_DELL] = {
.mixers = { alc883_base_mixer, alc883_chmode_mixer },
@@ -9497,7 +9433,8 @@ static struct alc_config_preset alc883_presets[] = {
.channel_mode = alc883_sixstack_modes,
.input_mux = &alc883_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc888_6st_dell_init_hook,
+ .setup = alc888_6st_dell_setup,
+ .init_hook = alc_automute_amp,
},
[ALC883_MITAC] = {
.mixers = { alc883_mitac_mixer },
@@ -9508,7 +9445,8 @@ static struct alc_config_preset alc883_presets[] = {
.channel_mode = alc883_3ST_2ch_modes,
.input_mux = &alc883_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc883_mitac_init_hook,
+ .setup = alc883_mitac_setup,
+ .init_hook = alc_automute_amp,
},
[ALC883_FUJITSU_PI2515] = {
.mixers = { alc883_2ch_fujitsu_pi2515_mixer },
@@ -9521,7 +9459,8 @@ static struct alc_config_preset alc883_presets[] = {
.channel_mode = alc883_3ST_2ch_modes,
.input_mux = &alc883_fujitsu_pi2515_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc883_2ch_fujitsu_pi2515_init_hook,
+ .setup = alc883_2ch_fujitsu_pi2515_setup,
+ .init_hook = alc_automute_amp,
},
[ALC888_FUJITSU_XA3530] = {
.mixers = { alc888_base_mixer, alc883_chmode_mixer },
@@ -9539,7 +9478,8 @@ static struct alc_config_preset alc883_presets[] = {
ARRAY_SIZE(alc888_2_capture_sources),
.input_mux = alc888_2_capture_sources,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc888_fujitsu_xa3530_init_hook,
+ .setup = alc888_fujitsu_xa3530_setup,
+ .init_hook = alc_automute_amp,
},
[ALC888_LENOVO_SKY] = {
.mixers = { alc888_lenovo_sky_mixer, alc883_chmode_mixer },
@@ -9552,7 +9492,8 @@ static struct alc_config_preset alc883_presets[] = {
.need_dac_fix = 1,
.input_mux = &alc883_lenovo_sky_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc888_lenovo_sky_init_hook,
+ .setup = alc888_lenovo_sky_setup,
+ .init_hook = alc_automute_amp,
},
[ALC888_ASUS_M90V] = {
.mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer },
@@ -9565,8 +9506,9 @@ static struct alc_config_preset alc883_presets[] = {
.channel_mode = alc883_3ST_6ch_modes,
.need_dac_fix = 1,
.input_mux = &alc883_fujitsu_pi2515_capture_source,
- .unsol_event = alc883_mode2_unsol_event,
- .init_hook = alc883_mode2_inithook,
+ .unsol_event = alc_sku_unsol_event,
+ .setup = alc883_mode2_setup,
+ .init_hook = alc_inithook,
},
[ALC888_ASUS_EEE1601] = {
.mixers = { alc883_asus_eee1601_mixer },
@@ -9619,15 +9561,45 @@ static struct alc_config_preset alc883_presets[] = {
.channel_mode = alc883_3ST_2ch_modes,
.input_mux = &alc883_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc883_vaiott_init_hook,
+ .setup = alc883_vaiott_setup,
+ .init_hook = alc_automute_amp,
},
};
/*
+ * Pin config fixes
+ */
+enum {
+ PINFIX_ABIT_AW9D_MAX
+};
+
+static struct alc_pincfg alc882_abit_aw9d_pinfix[] = {
+ { 0x15, 0x01080104 }, /* side */
+ { 0x16, 0x01011012 }, /* rear */
+ { 0x17, 0x01016011 }, /* clfe */
+ { }
+};
+
+static const struct alc_pincfg *alc882_pin_fixes[] = {
+ [PINFIX_ABIT_AW9D_MAX] = alc882_abit_aw9d_pinfix,
+};
+
+static struct snd_pci_quirk alc882_pinfix_tbl[] = {
+ SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX),
+ {}
+};
+
+/*
* BIOS auto configuration
*/
-static void alc883_auto_set_output_and_unmute(struct hda_codec *codec,
+static int alc882_auto_create_input_ctls(struct hda_codec *codec,
+ const struct auto_pin_cfg *cfg)
+{
+ return alc_auto_create_input_ctls(codec, cfg, 0x0b, 0x23, 0x22);
+}
+
+static void alc882_auto_set_output_and_unmute(struct hda_codec *codec,
hda_nid_t nid, int pin_type,
int dac_idx)
{
@@ -9644,7 +9616,7 @@ static void alc883_auto_set_output_and_unmute(struct hda_codec *codec,
}
-static void alc883_auto_init_multi_out(struct hda_codec *codec)
+static void alc882_auto_init_multi_out(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
int i;
@@ -9653,12 +9625,12 @@ static void alc883_auto_init_multi_out(struct hda_codec *codec)
hda_nid_t nid = spec->autocfg.line_out_pins[i];
int pin_type = get_pin_type(spec->autocfg.line_out_type);
if (nid)
- alc883_auto_set_output_and_unmute(codec, nid, pin_type,
+ alc882_auto_set_output_and_unmute(codec, nid, pin_type,
i);
}
}
-static void alc883_auto_init_hp_out(struct hda_codec *codec)
+static void alc882_auto_init_hp_out(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
hda_nid_t pin;
@@ -9666,91 +9638,191 @@ static void alc883_auto_init_hp_out(struct hda_codec *codec)
pin = spec->autocfg.hp_pins[0];
if (pin) /* connect to front */
/* use dac 0 */
- alc883_auto_set_output_and_unmute(codec, pin, PIN_HP, 0);
+ alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, 0);
pin = spec->autocfg.speaker_pins[0];
if (pin)
- alc883_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0);
+ alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0);
}
-#define alc883_is_input_pin(nid) alc880_is_input_pin(nid)
-#define ALC883_PIN_CD_NID ALC880_PIN_CD_NID
-
-static void alc883_auto_init_analog_input(struct hda_codec *codec)
+static void alc882_auto_init_analog_input(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
int i;
for (i = 0; i < AUTO_PIN_LAST; i++) {
hda_nid_t nid = spec->autocfg.input_pins[i];
- if (alc883_is_input_pin(nid)) {
- alc_set_input_pin(codec, nid, i);
- if (nid != ALC883_PIN_CD_NID &&
- (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP))
+ if (!nid)
+ continue;
+ alc_set_input_pin(codec, nid, i);
+ if (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP)
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE,
+ AMP_OUT_MUTE);
+ }
+}
+
+static void alc882_auto_init_input_src(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ int c;
+
+ for (c = 0; c < spec->num_adc_nids; c++) {
+ hda_nid_t conn_list[HDA_MAX_NUM_INPUTS];
+ hda_nid_t nid = spec->capsrc_nids[c];
+ unsigned int mux_idx;
+ const struct hda_input_mux *imux;
+ int conns, mute, idx, item;
+
+ conns = snd_hda_get_connections(codec, nid, conn_list,
+ ARRAY_SIZE(conn_list));
+ if (conns < 0)
+ continue;
+ mux_idx = c >= spec->num_mux_defs ? 0 : c;
+ imux = &spec->input_mux[mux_idx];
+ for (idx = 0; idx < conns; idx++) {
+ /* if the current connection is the selected one,
+ * unmute it as default - otherwise mute it
+ */
+ mute = AMP_IN_MUTE(idx);
+ for (item = 0; item < imux->num_items; item++) {
+ if (imux->items[item].index == idx) {
+ if (spec->cur_mux[c] == item)
+ mute = AMP_IN_UNMUTE(idx);
+ break;
+ }
+ }
+ /* check if we have a selector or mixer
+ * we could check for the widget type instead, but
+ * just check for Amp-In presence (in case of mixer
+ * without amp-in there is something wrong, this
+ * function shouldn't be used or capsrc nid is wrong)
+ */
+ if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)
snd_hda_codec_write(codec, nid, 0,
AC_VERB_SET_AMP_GAIN_MUTE,
- AMP_OUT_MUTE);
+ mute);
+ else if (mute != AMP_IN_MUTE(idx))
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_CONNECT_SEL,
+ idx);
}
}
}
-#define alc883_auto_init_input_src alc882_auto_init_input_src
+/* add mic boosts if needed */
+static int alc_auto_add_mic_boost(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ int err;
+ hda_nid_t nid;
+
+ nid = spec->autocfg.input_pins[AUTO_PIN_MIC];
+ if (nid && (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) {
+ err = add_control(spec, ALC_CTL_WIDGET_VOL,
+ "Mic Boost",
+ HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
+ if (err < 0)
+ return err;
+ }
+ nid = spec->autocfg.input_pins[AUTO_PIN_FRONT_MIC];
+ if (nid && (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) {
+ err = add_control(spec, ALC_CTL_WIDGET_VOL,
+ "Front Mic Boost",
+ HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
/* almost identical with ALC880 parser... */
-static int alc883_parse_auto_config(struct hda_codec *codec)
+static int alc882_parse_auto_config(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- int err = alc880_parse_auto_config(codec);
- struct auto_pin_cfg *cfg = &spec->autocfg;
- int i;
+ static hda_nid_t alc882_ignore[] = { 0x1d, 0 };
+ int i, err;
+ err = snd_hda_parse_pin_def_config(codec, &spec->autocfg,
+ alc882_ignore);
if (err < 0)
return err;
- else if (!err)
- return 0; /* no config found */
+ if (!spec->autocfg.line_outs)
+ return 0; /* can't find valid BIOS pin config */
- err = alc_auto_add_mic_boost(codec);
+ err = alc880_auto_fill_dac_nids(spec, &spec->autocfg);
+ if (err < 0)
+ return err;
+ err = alc880_auto_create_multi_out_ctls(spec, &spec->autocfg);
+ if (err < 0)
+ return err;
+ err = alc880_auto_create_extra_out(spec,
+ spec->autocfg.speaker_pins[0],
+ "Speaker");
+ if (err < 0)
+ return err;
+ err = alc880_auto_create_extra_out(spec, spec->autocfg.hp_pins[0],
+ "Headphone");
+ if (err < 0)
+ return err;
+ err = alc882_auto_create_input_ctls(codec, &spec->autocfg);
if (err < 0)
return err;
- /* hack - override the init verbs */
- spec->init_verbs[0] = alc883_auto_init_verbs;
+ spec->multiout.max_channels = spec->multiout.num_dacs * 2;
- /* setup input_mux for ALC889 */
- if (codec->vendor_id == 0x10ec0889) {
- /* digital-mic input pin is excluded in alc880_auto_create..()
- * because it's under 0x18
- */
- if (cfg->input_pins[AUTO_PIN_MIC] == 0x12 ||
- cfg->input_pins[AUTO_PIN_FRONT_MIC] == 0x12) {
- struct hda_input_mux *imux = &spec->private_imux[0];
- for (i = 1; i < 3; i++)
- memcpy(&spec->private_imux[i],
- &spec->private_imux[0],
- sizeof(spec->private_imux[0]));
- imux->items[imux->num_items].label = "Int DMic";
- imux->items[imux->num_items].index = 0x0b;
- imux->num_items++;
- spec->num_mux_defs = 3;
- spec->input_mux = spec->private_imux;
+ /* check multiple SPDIF-out (for recent codecs) */
+ for (i = 0; i < spec->autocfg.dig_outs; i++) {
+ hda_nid_t dig_nid;
+ err = snd_hda_get_connections(codec,
+ spec->autocfg.dig_out_pins[i],
+ &dig_nid, 1);
+ if (err < 0)
+ continue;
+ if (!i)
+ spec->multiout.dig_out_nid = dig_nid;
+ else {
+ spec->multiout.slave_dig_outs = spec->slave_dig_outs;
+ spec->slave_dig_outs[i - 1] = dig_nid;
+ if (i == ARRAY_SIZE(spec->slave_dig_outs) - 1)
+ break;
}
}
+ if (spec->autocfg.dig_in_pin)
+ spec->dig_in_nid = ALC880_DIGIN_NID;
+
+ if (spec->kctls.list)
+ add_mixer(spec, spec->kctls.list);
+
+ add_verb(spec, alc883_auto_init_verbs);
+ /* if ADC 0x07 is available, initialize it, too */
+ if (get_wcaps_type(get_wcaps(codec, 0x07)) == AC_WID_AUD_IN)
+ add_verb(spec, alc882_adc1_init_verbs);
+
+ spec->num_mux_defs = 1;
+ spec->input_mux = &spec->private_imux[0];
+
+ alc_ssid_check(codec, 0x15, 0x1b, 0x14);
+
+ err = alc_auto_add_mic_boost(codec);
+ if (err < 0)
+ return err;
return 1; /* config found */
}
/* additional initialization for auto-configuration model */
-static void alc883_auto_init(struct hda_codec *codec)
+static void alc882_auto_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- alc883_auto_init_multi_out(codec);
- alc883_auto_init_hp_out(codec);
- alc883_auto_init_analog_input(codec);
- alc883_auto_init_input_src(codec);
+ alc882_auto_init_multi_out(codec);
+ alc882_auto_init_hp_out(codec);
+ alc882_auto_init_analog_input(codec);
+ alc882_auto_init_input_src(codec);
if (spec->unsol_event)
alc_inithook(codec);
}
-static int patch_alc883(struct hda_codec *codec)
+static int patch_alc882(struct hda_codec *codec)
{
struct alc_spec *spec;
int err, board_config;
@@ -9761,28 +9833,35 @@ static int patch_alc883(struct hda_codec *codec)
codec->spec = spec;
- alc_fix_pll_init(codec, 0x20, 0x0a, 10);
+ switch (codec->vendor_id) {
+ case 0x10ec0882:
+ case 0x10ec0885:
+ break;
+ default:
+ /* ALC883 and variants */
+ alc_fix_pll_init(codec, 0x20, 0x0a, 10);
+ break;
+ }
- board_config = snd_hda_check_board_config(codec, ALC883_MODEL_LAST,
- alc883_models,
- alc883_cfg_tbl);
- if (board_config < 0 || board_config >= ALC883_MODEL_LAST) {
- /* Pick up systems that don't supply PCI SSID */
- switch (codec->subsystem_id) {
- case 0x106b3600: /* Macbook 3.1 */
- board_config = ALC889A_MB31;
- break;
- default:
- printk(KERN_INFO
- "hda_codec: Unknown model for %s, trying "
- "auto-probe from BIOS...\n", codec->chip_name);
- board_config = ALC883_AUTO;
- }
+ board_config = snd_hda_check_board_config(codec, ALC882_MODEL_LAST,
+ alc882_models,
+ alc882_cfg_tbl);
+
+ if (board_config < 0 || board_config >= ALC882_MODEL_LAST)
+ board_config = snd_hda_check_board_codec_sid_config(codec,
+ ALC882_MODEL_LAST, alc882_models, alc882_ssid_cfg_tbl);
+
+ if (board_config < 0 || board_config >= ALC882_MODEL_LAST) {
+ printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
+ board_config = ALC882_AUTO;
}
- if (board_config == ALC883_AUTO) {
+ alc_fix_pincfg(codec, alc882_pinfix_tbl, alc882_pin_fixes);
+
+ if (board_config == ALC882_AUTO) {
/* automatic parse from the BIOS config */
- err = alc883_parse_auto_config(codec);
+ err = alc882_parse_auto_config(codec);
if (err < 0) {
alc_free(codec);
return err;
@@ -9790,7 +9869,7 @@ static int patch_alc883(struct hda_codec *codec)
printk(KERN_INFO
"hda_codec: Cannot set up configuration "
"from BIOS. Using base mode...\n");
- board_config = ALC883_3ST_2ch_DIG;
+ board_config = ALC882_3ST_DIG;
}
}
@@ -9800,63 +9879,61 @@ static int patch_alc883(struct hda_codec *codec)
return err;
}
- if (board_config != ALC883_AUTO)
- setup_preset(spec, &alc883_presets[board_config]);
+ if (board_config != ALC882_AUTO)
+ setup_preset(codec, &alc882_presets[board_config]);
- switch (codec->vendor_id) {
- case 0x10ec0888:
- if (!spec->num_adc_nids) {
- spec->num_adc_nids = ARRAY_SIZE(alc883_adc_nids);
- spec->adc_nids = alc883_adc_nids;
- }
- if (!spec->capsrc_nids)
- spec->capsrc_nids = alc883_capsrc_nids;
+ spec->stream_analog_playback = &alc882_pcm_analog_playback;
+ spec->stream_analog_capture = &alc882_pcm_analog_capture;
+ /* FIXME: setup DAC5 */
+ /*spec->stream_analog_alt_playback = &alc880_pcm_analog_alt_playback;*/
+ spec->stream_analog_alt_capture = &alc880_pcm_analog_alt_capture;
+
+ spec->stream_digital_playback = &alc882_pcm_digital_playback;
+ spec->stream_digital_capture = &alc882_pcm_digital_capture;
+
+ if (codec->vendor_id == 0x10ec0888)
spec->init_amp = ALC_INIT_DEFAULT; /* always initialize */
- break;
- case 0x10ec0889:
- if (!spec->num_adc_nids) {
- spec->num_adc_nids = ARRAY_SIZE(alc889_adc_nids);
- spec->adc_nids = alc889_adc_nids;
- }
- if (!spec->capsrc_nids)
- spec->capsrc_nids = alc889_capsrc_nids;
- break;
- default:
- if (!spec->num_adc_nids) {
- spec->num_adc_nids = ARRAY_SIZE(alc883_adc_nids);
- spec->adc_nids = alc883_adc_nids;
+
+ if (!spec->adc_nids && spec->input_mux) {
+ int i;
+ spec->num_adc_nids = 0;
+ for (i = 0; i < ARRAY_SIZE(alc882_adc_nids); i++) {
+ hda_nid_t cap;
+ hda_nid_t nid = alc882_adc_nids[i];
+ unsigned int wcap = get_wcaps(codec, nid);
+ /* get type */
+ wcap = get_wcaps_type(wcap);
+ if (wcap != AC_WID_AUD_IN)
+ continue;
+ spec->private_adc_nids[spec->num_adc_nids] = nid;
+ err = snd_hda_get_connections(codec, nid, &cap, 1);
+ if (err < 0)
+ continue;
+ spec->private_capsrc_nids[spec->num_adc_nids] = cap;
+ spec->num_adc_nids++;
}
- if (!spec->capsrc_nids)
- spec->capsrc_nids = alc883_capsrc_nids;
- break;
+ spec->adc_nids = spec->private_adc_nids;
+ spec->capsrc_nids = spec->private_capsrc_nids;
}
- spec->stream_analog_playback = &alc883_pcm_analog_playback;
- spec->stream_analog_capture = &alc883_pcm_analog_capture;
- spec->stream_analog_alt_capture = &alc883_pcm_analog_alt_capture;
-
- spec->stream_digital_playback = &alc883_pcm_digital_playback;
- spec->stream_digital_capture = &alc883_pcm_digital_capture;
-
- if (!spec->cap_mixer)
- set_capture_mixer(spec);
+ set_capture_mixer(codec);
set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
spec->vmaster_nid = 0x0c;
codec->patch_ops = alc_patch_ops;
- if (board_config == ALC883_AUTO)
- spec->init_hook = alc883_auto_init;
-
+ if (board_config == ALC882_AUTO)
+ spec->init_hook = alc882_auto_init;
#ifdef CONFIG_SND_HDA_POWER_SAVE
if (!spec->loopback.amplist)
- spec->loopback.amplist = alc883_loopbacks;
+ spec->loopback.amplist = alc882_loopbacks;
#endif
codec->proc_widget_hook = print_realtek_coef;
return 0;
}
+
/*
* ALC262 support
*/
@@ -10032,13 +10109,12 @@ static struct snd_kcontrol_new alc262_HP_BPC_WildWest_option_mixer[] = {
};
/* mute/unmute internal speaker according to the hp jack and mute state */
-static void alc262_hp_t5735_init_hook(struct hda_codec *codec)
+static void alc262_hp_t5735_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x15;
spec->autocfg.speaker_pins[0] = 0x0c; /* HACK: not actually a pin */
- alc_automute_amp(codec);
}
static struct snd_kcontrol_new alc262_hp_t5735_mixer[] = {
@@ -10195,22 +10271,20 @@ static void alc262_hippo_unsol_event(struct hda_codec *codec, unsigned int res)
alc262_hippo_automute(codec);
}
-static void alc262_hippo_init_hook(struct hda_codec *codec)
+static void alc262_hippo_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x15;
spec->autocfg.speaker_pins[0] = 0x14;
- alc262_hippo_automute(codec);
}
-static void alc262_hippo1_init_hook(struct hda_codec *codec)
+static void alc262_hippo1_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x1b;
spec->autocfg.speaker_pins[0] = 0x14;
- alc262_hippo_automute(codec);
}
@@ -10267,13 +10341,12 @@ static struct hda_verb alc262_tyan_verbs[] = {
};
/* unsolicited event for HP jack sensing */
-static void alc262_tyan_init_hook(struct hda_codec *codec)
+static void alc262_tyan_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x1b;
spec->autocfg.speaker_pins[0] = 0x15;
- alc_automute_amp(codec);
}
@@ -10365,12 +10438,6 @@ static struct hda_verb alc262_eapd_verbs[] = {
{ }
};
-static struct hda_verb alc262_hippo_unsol_verbs[] = {
- {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC880_HP_EVENT},
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- {}
-};
-
static struct hda_verb alc262_hippo1_unsol_verbs[] = {
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0},
{0x1b, AC_VERB_SET_CONNECT_SEL, 0x00},
@@ -10391,14 +10458,6 @@ static struct hda_verb alc262_sony_unsol_verbs[] = {
{}
};
-static struct hda_input_mux alc262_dmic_capture_source = {
- .num_items = 2,
- .items = {
- { "Int DMic", 0x9 },
- { "Mic", 0x0 },
- },
-};
-
static struct snd_kcontrol_new alc262_toshiba_s06_mixer[] = {
HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT),
@@ -10420,35 +10479,17 @@ static struct hda_verb alc262_toshiba_s06_verbs[] = {
{}
};
-static void alc262_dmic_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_codec_read(codec, 0x18, 0,
- AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
- snd_hda_codec_write(codec, 0x22, 0,
- AC_VERB_SET_CONNECT_SEL, present ? 0x0 : 0x09);
-}
-
-
-/* unsolicited event for HP jack sensing */
-static void alc262_toshiba_s06_unsol_event(struct hda_codec *codec,
- unsigned int res)
-{
- if ((res >> 26) == ALC880_MIC_EVENT)
- alc262_dmic_automute(codec);
- else
- alc_sku_unsol_event(codec, res);
-}
-
-static void alc262_toshiba_s06_init_hook(struct hda_codec *codec)
+static void alc262_toshiba_s06_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x15;
spec->autocfg.speaker_pins[0] = 0x14;
- alc_automute_pin(codec);
- alc262_dmic_automute(codec);
+ spec->ext_mic.pin = 0x18;
+ spec->ext_mic.mux_idx = 0;
+ spec->int_mic.pin = 0x12;
+ spec->int_mic.mux_idx = 9;
+ spec->auto_mic = 1;
}
/*
@@ -10866,104 +10907,111 @@ static struct snd_kcontrol_new alc262_ultra_capture_mixer[] = {
{ } /* end */
};
+/* We use two mixers depending on the output pin; 0x16 is a mono output
+ * and thus it's bound with a different mixer.
+ * This function returns which mixer amp should be used.
+ */
+static int alc262_check_volbit(hda_nid_t nid)
+{
+ if (!nid)
+ return 0;
+ else if (nid == 0x16)
+ return 2;
+ else
+ return 1;
+}
+
+static int alc262_add_out_vol_ctl(struct alc_spec *spec, hda_nid_t nid,
+ const char *pfx, int *vbits)
+{
+ char name[32];
+ unsigned long val;
+ int vbit;
+
+ vbit = alc262_check_volbit(nid);
+ if (!vbit)
+ return 0;
+ if (*vbits & vbit) /* a volume control for this mixer already there */
+ return 0;
+ *vbits |= vbit;
+ snprintf(name, sizeof(name), "%s Playback Volume", pfx);
+ if (vbit == 2)
+ val = HDA_COMPOSE_AMP_VAL(0x0e, 2, 0, HDA_OUTPUT);
+ else
+ val = HDA_COMPOSE_AMP_VAL(0x0c, 3, 0, HDA_OUTPUT);
+ return add_control(spec, ALC_CTL_WIDGET_VOL, name, val);
+}
+
+static int alc262_add_out_sw_ctl(struct alc_spec *spec, hda_nid_t nid,
+ const char *pfx)
+{
+ char name[32];
+ unsigned long val;
+
+ if (!nid)
+ return 0;
+ snprintf(name, sizeof(name), "%s Playback Switch", pfx);
+ if (nid == 0x16)
+ val = HDA_COMPOSE_AMP_VAL(nid, 2, 0, HDA_OUTPUT);
+ else
+ val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
+ return add_control(spec, ALC_CTL_WIDGET_MUTE, name, val);
+}
+
/* add playback controls from the parsed DAC table */
static int alc262_auto_create_multi_out_ctls(struct alc_spec *spec,
const struct auto_pin_cfg *cfg)
{
- hda_nid_t nid;
+ const char *pfx;
+ int vbits;
int err;
spec->multiout.num_dacs = 1; /* only use one dac */
spec->multiout.dac_nids = spec->private_dac_nids;
spec->multiout.dac_nids[0] = 2;
- nid = cfg->line_out_pins[0];
- if (nid) {
- err = add_control(spec, ALC_CTL_WIDGET_VOL,
- "Front Playback Volume",
- HDA_COMPOSE_AMP_VAL(0x0c, 3, 0, HDA_OUTPUT));
- if (err < 0)
- return err;
- err = add_control(spec, ALC_CTL_WIDGET_MUTE,
- "Front Playback Switch",
- HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT));
- if (err < 0)
- return err;
- }
-
- nid = cfg->speaker_pins[0];
- if (nid) {
- if (nid == 0x16) {
- err = add_control(spec, ALC_CTL_WIDGET_VOL,
- "Speaker Playback Volume",
- HDA_COMPOSE_AMP_VAL(0x0e, 2, 0,
- HDA_OUTPUT));
- if (err < 0)
- return err;
- err = add_control(spec, ALC_CTL_WIDGET_MUTE,
- "Speaker Playback Switch",
- HDA_COMPOSE_AMP_VAL(nid, 2, 0,
- HDA_OUTPUT));
- if (err < 0)
- return err;
- } else {
- err = add_control(spec, ALC_CTL_WIDGET_MUTE,
- "Speaker Playback Switch",
- HDA_COMPOSE_AMP_VAL(nid, 3, 0,
- HDA_OUTPUT));
- if (err < 0)
- return err;
- }
- }
- nid = cfg->hp_pins[0];
- if (nid) {
- /* spec->multiout.hp_nid = 2; */
- if (nid == 0x16) {
- err = add_control(spec, ALC_CTL_WIDGET_VOL,
- "Headphone Playback Volume",
- HDA_COMPOSE_AMP_VAL(0x0e, 2, 0,
- HDA_OUTPUT));
- if (err < 0)
- return err;
- err = add_control(spec, ALC_CTL_WIDGET_MUTE,
- "Headphone Playback Switch",
- HDA_COMPOSE_AMP_VAL(nid, 2, 0,
- HDA_OUTPUT));
- if (err < 0)
- return err;
- } else {
- err = add_control(spec, ALC_CTL_WIDGET_MUTE,
- "Headphone Playback Switch",
- HDA_COMPOSE_AMP_VAL(nid, 3, 0,
- HDA_OUTPUT));
- if (err < 0)
- return err;
- }
- }
- return 0;
-}
-
-static int alc262_auto_create_analog_input_ctls(struct alc_spec *spec,
- const struct auto_pin_cfg *cfg)
-{
- int err;
+ if (!cfg->speaker_pins[0] && !cfg->hp_pins[0])
+ pfx = "Master";
+ else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
+ pfx = "Speaker";
+ else
+ pfx = "Front";
+ err = alc262_add_out_sw_ctl(spec, cfg->line_out_pins[0], pfx);
+ if (err < 0)
+ return err;
+ err = alc262_add_out_sw_ctl(spec, cfg->speaker_pins[0], "Speaker");
+ if (err < 0)
+ return err;
+ err = alc262_add_out_sw_ctl(spec, cfg->hp_pins[0], "Headphone");
+ if (err < 0)
+ return err;
- err = alc880_auto_create_analog_input_ctls(spec, cfg);
+ vbits = alc262_check_volbit(cfg->line_out_pins[0]) |
+ alc262_check_volbit(cfg->speaker_pins[0]) |
+ alc262_check_volbit(cfg->hp_pins[0]);
+ if (vbits == 1 || vbits == 2)
+ pfx = "Master"; /* only one mixer is used */
+ else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
+ pfx = "Speaker";
+ else
+ pfx = "Front";
+ vbits = 0;
+ err = alc262_add_out_vol_ctl(spec, cfg->line_out_pins[0], pfx, &vbits);
+ if (err < 0)
+ return err;
+ err = alc262_add_out_vol_ctl(spec, cfg->speaker_pins[0], "Speaker",
+ &vbits);
+ if (err < 0)
+ return err;
+ err = alc262_add_out_vol_ctl(spec, cfg->hp_pins[0], "Headphone",
+ &vbits);
if (err < 0)
return err;
- /* digital-mic input pin is excluded in alc880_auto_create..()
- * because it's under 0x18
- */
- if (cfg->input_pins[AUTO_PIN_MIC] == 0x12 ||
- cfg->input_pins[AUTO_PIN_FRONT_MIC] == 0x12) {
- struct hda_input_mux *imux = &spec->private_imux[0];
- imux->items[imux->num_items].label = "Int Mic";
- imux->items[imux->num_items].index = 0x09;
- imux->num_items++;
- }
return 0;
}
+#define alc262_auto_create_input_ctls \
+ alc880_auto_create_input_ctls
/*
* generic initialization of ADC, input mixers and output mixers
@@ -11281,7 +11329,7 @@ static int alc262_parse_auto_config(struct hda_codec *codec)
err = alc262_auto_create_multi_out_ctls(spec, &spec->autocfg);
if (err < 0)
return err;
- err = alc262_auto_create_analog_input_ctls(spec, &spec->autocfg);
+ err = alc262_auto_create_input_ctls(codec, &spec->autocfg);
if (err < 0)
return err;
@@ -11412,7 +11460,7 @@ static struct alc_config_preset alc262_presets[] = {
},
[ALC262_HIPPO] = {
.mixers = { alc262_hippo_mixer },
- .init_verbs = { alc262_init_verbs, alc262_hippo_unsol_verbs},
+ .init_verbs = { alc262_init_verbs, alc_hp15_unsol_verbs},
.num_dacs = ARRAY_SIZE(alc262_dac_nids),
.dac_nids = alc262_dac_nids,
.hp_nid = 0x03,
@@ -11421,7 +11469,8 @@ static struct alc_config_preset alc262_presets[] = {
.channel_mode = alc262_modes,
.input_mux = &alc262_capture_source,
.unsol_event = alc262_hippo_unsol_event,
- .init_hook = alc262_hippo_init_hook,
+ .setup = alc262_hippo_setup,
+ .init_hook = alc262_hippo_automute,
},
[ALC262_HIPPO_1] = {
.mixers = { alc262_hippo1_mixer },
@@ -11434,7 +11483,8 @@ static struct alc_config_preset alc262_presets[] = {
.channel_mode = alc262_modes,
.input_mux = &alc262_capture_source,
.unsol_event = alc262_hippo_unsol_event,
- .init_hook = alc262_hippo1_init_hook,
+ .setup = alc262_hippo1_setup,
+ .init_hook = alc262_hippo_automute,
},
[ALC262_FUJITSU] = {
.mixers = { alc262_fujitsu_mixer },
@@ -11497,7 +11547,8 @@ static struct alc_config_preset alc262_presets[] = {
.channel_mode = alc262_modes,
.input_mux = &alc262_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc262_hp_t5735_init_hook,
+ .setup = alc262_hp_t5735_setup,
+ .init_hook = alc_automute_amp,
},
[ALC262_HP_RP5700] = {
.mixers = { alc262_hp_rp5700_mixer },
@@ -11528,11 +11579,13 @@ static struct alc_config_preset alc262_presets[] = {
.channel_mode = alc262_modes,
.input_mux = &alc262_capture_source,
.unsol_event = alc262_hippo_unsol_event,
- .init_hook = alc262_hippo_init_hook,
+ .setup = alc262_hippo_setup,
+ .init_hook = alc262_hippo_automute,
},
[ALC262_BENQ_T31] = {
.mixers = { alc262_benq_t31_mixer },
- .init_verbs = { alc262_init_verbs, alc262_benq_t31_EAPD_verbs, alc262_hippo_unsol_verbs },
+ .init_verbs = { alc262_init_verbs, alc262_benq_t31_EAPD_verbs,
+ alc_hp15_unsol_verbs },
.num_dacs = ARRAY_SIZE(alc262_dac_nids),
.dac_nids = alc262_dac_nids,
.hp_nid = 0x03,
@@ -11540,7 +11593,8 @@ static struct alc_config_preset alc262_presets[] = {
.channel_mode = alc262_modes,
.input_mux = &alc262_capture_source,
.unsol_event = alc262_hippo_unsol_event,
- .init_hook = alc262_hippo_init_hook,
+ .setup = alc262_hippo_setup,
+ .init_hook = alc262_hippo_automute,
},
[ALC262_ULTRA] = {
.mixers = { alc262_ultra_mixer },
@@ -11592,9 +11646,9 @@ static struct alc_config_preset alc262_presets[] = {
.dig_out_nid = ALC262_DIGOUT_NID,
.num_channel_mode = ARRAY_SIZE(alc262_modes),
.channel_mode = alc262_modes,
- .input_mux = &alc262_dmic_capture_source,
- .unsol_event = alc262_toshiba_s06_unsol_event,
- .init_hook = alc262_toshiba_s06_init_hook,
+ .unsol_event = alc_sku_unsol_event,
+ .setup = alc262_toshiba_s06_setup,
+ .init_hook = alc_inithook,
},
[ALC262_TOSHIBA_RX1] = {
.mixers = { alc262_toshiba_rx1_mixer },
@@ -11606,7 +11660,8 @@ static struct alc_config_preset alc262_presets[] = {
.channel_mode = alc262_modes,
.input_mux = &alc262_capture_source,
.unsol_event = alc262_hippo_unsol_event,
- .init_hook = alc262_hippo_init_hook,
+ .setup = alc262_hippo_setup,
+ .init_hook = alc262_hippo_automute,
},
[ALC262_TYAN] = {
.mixers = { alc262_tyan_mixer },
@@ -11619,7 +11674,8 @@ static struct alc_config_preset alc262_presets[] = {
.channel_mode = alc262_modes,
.input_mux = &alc262_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc262_tyan_init_hook,
+ .setup = alc262_tyan_setup,
+ .init_hook = alc_automute_amp,
},
};
@@ -11654,8 +11710,8 @@ static int patch_alc262(struct hda_codec *codec)
alc262_cfg_tbl);
if (board_config < 0) {
- printk(KERN_INFO "hda_codec: Unknown model for %s, "
- "trying auto-probe from BIOS...\n", codec->chip_name);
+ printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
board_config = ALC262_AUTO;
}
@@ -11682,7 +11738,7 @@ static int patch_alc262(struct hda_codec *codec)
}
if (board_config != ALC262_AUTO)
- setup_preset(spec, &alc262_presets[board_config]);
+ setup_preset(codec, &alc262_presets[board_config]);
spec->stream_analog_playback = &alc262_pcm_analog_playback;
spec->stream_analog_capture = &alc262_pcm_analog_capture;
@@ -11708,7 +11764,7 @@ static int patch_alc262(struct hda_codec *codec)
unsigned int wcap = get_wcaps(codec, 0x07);
/* get type */
- wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+ wcap = get_wcaps_type(wcap);
if (wcap != AC_WID_AUD_IN) {
spec->adc_nids = alc262_adc_nids_alt;
spec->num_adc_nids =
@@ -11723,7 +11779,7 @@ static int patch_alc262(struct hda_codec *codec)
}
}
if (!spec->cap_mixer && !spec->no_analog)
- set_capture_mixer(spec);
+ set_capture_mixer(codec);
if (!spec->no_analog)
set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
@@ -11815,14 +11871,6 @@ static struct hda_verb alc268_toshiba_verbs[] = {
{ } /* end */
};
-static struct hda_input_mux alc268_acer_lc_capture_source = {
- .num_items = 2,
- .items = {
- { "i-Mic", 0x6 },
- { "E-Mic", 0x0 },
- },
-};
-
/* Acer specific */
/* bind volumes of both NID 0x02 and 0x03 */
static struct hda_bind_ctls alc268_acer_bind_master_vol = {
@@ -11941,7 +11989,8 @@ static struct hda_verb alc268_acer_verbs[] = {
/* unsolicited event for HP jack sensing */
#define alc268_toshiba_unsol_event alc262_hippo_unsol_event
-#define alc268_toshiba_init_hook alc262_hippo_init_hook
+#define alc268_toshiba_setup alc262_hippo_setup
+#define alc268_toshiba_automute alc262_hippo_automute
static void alc268_acer_unsol_event(struct hda_codec *codec,
unsigned int res)
@@ -11971,30 +12020,33 @@ static void alc268_aspire_one_speaker_automute(struct hda_codec *codec)
AMP_IN_MUTE(0), bits);
}
-
-static void alc268_acer_mic_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_codec_read(codec, 0x18, 0,
- AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
- snd_hda_codec_write(codec, 0x23, 0, AC_VERB_SET_CONNECT_SEL,
- present ? 0x0 : 0x6);
-}
-
static void alc268_acer_lc_unsol_event(struct hda_codec *codec,
unsigned int res)
{
- if ((res >> 26) == ALC880_HP_EVENT)
+ switch (res >> 26) {
+ case ALC880_HP_EVENT:
alc268_aspire_one_speaker_automute(codec);
- if ((res >> 26) == ALC880_MIC_EVENT)
- alc268_acer_mic_automute(codec);
+ break;
+ case ALC880_MIC_EVENT:
+ alc_mic_automute(codec);
+ break;
+ }
+}
+
+static void alc268_acer_lc_setup(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ spec->ext_mic.pin = 0x18;
+ spec->ext_mic.mux_idx = 0;
+ spec->int_mic.pin = 0x12;
+ spec->int_mic.mux_idx = 6;
+ spec->auto_mic = 1;
}
static void alc268_acer_lc_init_hook(struct hda_codec *codec)
{
alc268_aspire_one_speaker_automute(codec);
- alc268_acer_mic_automute(codec);
+ alc_mic_automute(codec);
}
static struct snd_kcontrol_new alc268_dell_mixer[] = {
@@ -12012,17 +12064,22 @@ static struct hda_verb alc268_dell_verbs[] = {
{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
{0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
{0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
+ {0x18, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_MIC_EVENT | AC_USRSP_EN},
{ }
};
/* mute/unmute internal speaker according to the hp jack and mute state */
-static void alc268_dell_init_hook(struct hda_codec *codec)
+static void alc268_dell_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x15;
spec->autocfg.speaker_pins[0] = 0x14;
- alc_automute_pin(codec);
+ spec->ext_mic.pin = 0x18;
+ spec->ext_mic.mux_idx = 0;
+ spec->int_mic.pin = 0x19;
+ spec->int_mic.mux_idx = 1;
+ spec->auto_mic = 1;
}
static struct snd_kcontrol_new alc267_quanta_il1_mixer[] = {
@@ -12043,38 +12100,16 @@ static struct hda_verb alc267_quanta_il1_verbs[] = {
{ }
};
-static void alc267_quanta_il1_mic_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_codec_read(codec, 0x18, 0,
- AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
- snd_hda_codec_write(codec, 0x23, 0,
- AC_VERB_SET_CONNECT_SEL,
- present ? 0x00 : 0x01);
-}
-
-static void alc267_quanta_il1_init_hook(struct hda_codec *codec)
+static void alc267_quanta_il1_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
-
spec->autocfg.hp_pins[0] = 0x15;
spec->autocfg.speaker_pins[0] = 0x14;
- alc_automute_pin(codec);
- alc267_quanta_il1_mic_automute(codec);
-}
-
-static void alc267_quanta_il1_unsol_event(struct hda_codec *codec,
- unsigned int res)
-{
- switch (res >> 26) {
- case ALC880_MIC_EVENT:
- alc267_quanta_il1_mic_automute(codec);
- break;
- default:
- alc_sku_unsol_event(codec, res);
- break;
- }
+ spec->ext_mic.pin = 0x18;
+ spec->ext_mic.mux_idx = 0;
+ spec->int_mic.pin = 0x19;
+ spec->int_mic.mux_idx = 1;
+ spec->auto_mic = 1;
}
/*
@@ -12154,21 +12189,16 @@ static struct hda_verb alc268_volume_init_verbs[] = {
{ }
};
+static struct snd_kcontrol_new alc268_capture_nosrc_mixer[] = {
+ HDA_CODEC_VOLUME("Capture Volume", 0x23, 0x0, HDA_OUTPUT),
+ HDA_CODEC_MUTE("Capture Switch", 0x23, 0x0, HDA_OUTPUT),
+ { } /* end */
+};
+
static struct snd_kcontrol_new alc268_capture_alt_mixer[] = {
HDA_CODEC_VOLUME("Capture Volume", 0x23, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Capture Switch", 0x23, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- /* The multiple "Capture Source" controls confuse alsamixer
- * So call somewhat different..
- */
- /* .name = "Capture Source", */
- .name = "Input Source",
- .count = 1,
- .info = alc_mux_enum_info,
- .get = alc_mux_enum_get,
- .put = alc_mux_enum_put,
- },
+ _DEFINE_CAPSRC(1),
{ } /* end */
};
@@ -12177,18 +12207,7 @@ static struct snd_kcontrol_new alc268_capture_mixer[] = {
HDA_CODEC_MUTE("Capture Switch", 0x23, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x24, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x24, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- /* The multiple "Capture Source" controls confuse alsamixer
- * So call somewhat different..
- */
- /* .name = "Capture Source", */
- .name = "Input Source",
- .count = 2,
- .info = alc_mux_enum_info,
- .get = alc_mux_enum_get,
- .put = alc_mux_enum_put,
- },
+ _DEFINE_CAPSRC(2),
{ } /* end */
};
@@ -12275,26 +12294,38 @@ static int alc268_new_analog_output(struct alc_spec *spec, hda_nid_t nid,
const char *ctlname, int idx)
{
char name[32];
+ hda_nid_t dac;
int err;
sprintf(name, "%s Playback Volume", ctlname);
- if (nid == 0x14) {
- err = add_control(spec, ALC_CTL_WIDGET_VOL, name,
- HDA_COMPOSE_AMP_VAL(0x02, 3, idx,
- HDA_OUTPUT));
- if (err < 0)
- return err;
- } else if (nid == 0x15) {
+ switch (nid) {
+ case 0x14:
+ case 0x16:
+ dac = 0x02;
+ break;
+ case 0x15:
+ dac = 0x03;
+ break;
+ default:
+ return 0;
+ }
+ if (spec->multiout.dac_nids[0] != dac &&
+ spec->multiout.dac_nids[1] != dac) {
err = add_control(spec, ALC_CTL_WIDGET_VOL, name,
- HDA_COMPOSE_AMP_VAL(0x03, 3, idx,
+ HDA_COMPOSE_AMP_VAL(dac, 3, idx,
HDA_OUTPUT));
if (err < 0)
return err;
- } else
- return -1;
+ spec->multiout.dac_nids[spec->multiout.num_dacs++] = dac;
+ }
+
sprintf(name, "%s Playback Switch", ctlname);
- err = add_control(spec, ALC_CTL_WIDGET_MUTE, name,
+ if (nid != 0x16)
+ err = add_control(spec, ALC_CTL_WIDGET_MUTE, name,
HDA_COMPOSE_AMP_VAL(nid, 3, idx, HDA_OUTPUT));
+ else /* mono */
+ err = add_control(spec, ALC_CTL_WIDGET_MUTE, name,
+ HDA_COMPOSE_AMP_VAL(nid, 2, idx, HDA_OUTPUT));
if (err < 0)
return err;
return 0;
@@ -12307,14 +12338,19 @@ static int alc268_auto_create_multi_out_ctls(struct alc_spec *spec,
hda_nid_t nid;
int err;
- spec->multiout.num_dacs = 2; /* only use one dac */
spec->multiout.dac_nids = spec->private_dac_nids;
- spec->multiout.dac_nids[0] = 2;
- spec->multiout.dac_nids[1] = 3;
nid = cfg->line_out_pins[0];
- if (nid)
- alc268_new_analog_output(spec, nid, "Front", 0);
+ if (nid) {
+ const char *name;
+ if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
+ name = "Speaker";
+ else
+ name = "Front";
+ err = alc268_new_analog_output(spec, nid, name, 0);
+ if (err < 0)
+ return err;
+ }
nid = cfg->speaker_pins[0];
if (nid == 0x1d) {
@@ -12323,16 +12359,23 @@ static int alc268_auto_create_multi_out_ctls(struct alc_spec *spec,
HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
if (err < 0)
return err;
+ } else {
+ err = alc268_new_analog_output(spec, nid, "Speaker", 0);
+ if (err < 0)
+ return err;
}
nid = cfg->hp_pins[0];
- if (nid)
- alc268_new_analog_output(spec, nid, "Headphone", 0);
+ if (nid) {
+ err = alc268_new_analog_output(spec, nid, "Headphone", 0);
+ if (err < 0)
+ return err;
+ }
nid = cfg->line_out_pins[1] | cfg->line_out_pins[2];
if (nid == 0x16) {
err = add_control(spec, ALC_CTL_WIDGET_MUTE,
"Mono Playback Switch",
- HDA_COMPOSE_AMP_VAL(nid, 2, 0, HDA_INPUT));
+ HDA_COMPOSE_AMP_VAL(nid, 2, 0, HDA_OUTPUT));
if (err < 0)
return err;
}
@@ -12340,38 +12383,46 @@ static int alc268_auto_create_multi_out_ctls(struct alc_spec *spec,
}
/* create playback/capture controls for input pins */
-static int alc268_auto_create_analog_input_ctls(struct alc_spec *spec,
+static int alc268_auto_create_input_ctls(struct hda_codec *codec,
const struct auto_pin_cfg *cfg)
{
- struct hda_input_mux *imux = &spec->private_imux[0];
- int i, idx1;
+ return alc_auto_create_input_ctls(codec, cfg, 0, 0x23, 0x24);
+}
- for (i = 0; i < AUTO_PIN_LAST; i++) {
- switch(cfg->input_pins[i]) {
- case 0x18:
- idx1 = 0; /* Mic 1 */
- break;
- case 0x19:
- idx1 = 1; /* Mic 2 */
- break;
- case 0x1a:
- idx1 = 2; /* Line In */
- break;
- case 0x1c:
- idx1 = 3; /* CD */
- break;
- case 0x12:
- case 0x13:
- idx1 = 6; /* digital mics */
- break;
- default:
- continue;
- }
- imux->items[imux->num_items].label = auto_pin_cfg_labels[i];
- imux->items[imux->num_items].index = idx1;
- imux->num_items++;
+static void alc268_auto_set_output_and_unmute(struct hda_codec *codec,
+ hda_nid_t nid, int pin_type)
+{
+ int idx;
+
+ alc_set_pin_output(codec, nid, pin_type);
+ if (nid == 0x14 || nid == 0x16)
+ idx = 0;
+ else
+ idx = 1;
+ snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, idx);
+}
+
+static void alc268_auto_init_multi_out(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t nid = spec->autocfg.line_out_pins[0];
+ if (nid) {
+ int pin_type = get_pin_type(spec->autocfg.line_out_type);
+ alc268_auto_set_output_and_unmute(codec, nid, pin_type);
}
- return 0;
+}
+
+static void alc268_auto_init_hp_out(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t pin;
+
+ pin = spec->autocfg.hp_pins[0];
+ if (pin)
+ alc268_auto_set_output_and_unmute(codec, pin, PIN_HP);
+ pin = spec->autocfg.speaker_pins[0];
+ if (pin)
+ alc268_auto_set_output_and_unmute(codec, pin, PIN_OUT);
}
static void alc268_auto_init_mono_speaker_out(struct hda_codec *codec)
@@ -12382,9 +12433,10 @@ static void alc268_auto_init_mono_speaker_out(struct hda_codec *codec)
hda_nid_t line_nid = spec->autocfg.line_out_pins[0];
unsigned int dac_vol1, dac_vol2;
- if (speaker_nid) {
+ if (line_nid == 0x1d || speaker_nid == 0x1d) {
snd_hda_codec_write(codec, speaker_nid, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ /* mute mixer inputs from 0x1d */
snd_hda_codec_write(codec, 0x0f, 0,
AC_VERB_SET_AMP_GAIN_MUTE,
AMP_IN_UNMUTE(1));
@@ -12392,6 +12444,7 @@ static void alc268_auto_init_mono_speaker_out(struct hda_codec *codec)
AC_VERB_SET_AMP_GAIN_MUTE,
AMP_IN_UNMUTE(1));
} else {
+ /* unmute mixer inputs from 0x1d */
snd_hda_codec_write(codec, 0x0f, 0,
AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1));
snd_hda_codec_write(codec, 0x10, 0,
@@ -12448,7 +12501,7 @@ static int alc268_parse_auto_config(struct hda_codec *codec)
err = alc268_auto_create_multi_out_ctls(spec, &spec->autocfg);
if (err < 0)
return err;
- err = alc268_auto_create_analog_input_ctls(spec, &spec->autocfg);
+ err = alc268_auto_create_input_ctls(codec, &spec->autocfg);
if (err < 0)
return err;
@@ -12467,7 +12520,7 @@ static int alc268_parse_auto_config(struct hda_codec *codec)
add_mixer(spec, alc268_beep_mixer);
add_verb(spec, alc268_volume_init_verbs);
- spec->num_mux_defs = 1;
+ spec->num_mux_defs = 2;
spec->input_mux = &spec->private_imux[0];
err = alc_auto_add_mic_boost(codec);
@@ -12479,8 +12532,6 @@ static int alc268_parse_auto_config(struct hda_codec *codec)
return 1;
}
-#define alc268_auto_init_multi_out alc882_auto_init_multi_out
-#define alc268_auto_init_hp_out alc882_auto_init_hp_out
#define alc268_auto_init_analog_input alc882_auto_init_analog_input
/* init callback for auto-configuration model -- overriding the default init */
@@ -12523,8 +12574,11 @@ static struct snd_pci_quirk alc268_cfg_tbl[] = {
ALC268_ACER_ASPIRE_ONE),
SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL),
SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron Mini9", ALC268_DELL),
+ /* almost compatible with toshiba but with optional digital outs;
+ * auto-probing seems working fine
+ */
SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3000, "HP TX25xx series",
- ALC268_TOSHIBA),
+ ALC268_AUTO),
SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST),
SND_PCI_QUIRK(0x1170, 0x0040, "ZEPTO", ALC268_ZEPTO),
SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA),
@@ -12545,7 +12599,8 @@ static struct snd_pci_quirk alc268_ssid_cfg_tbl[] = {
static struct alc_config_preset alc268_presets[] = {
[ALC267_QUANTA_IL1] = {
- .mixers = { alc267_quanta_il1_mixer, alc268_beep_mixer },
+ .mixers = { alc267_quanta_il1_mixer, alc268_beep_mixer,
+ alc268_capture_nosrc_mixer },
.init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
alc267_quanta_il1_verbs },
.num_dacs = ARRAY_SIZE(alc268_dac_nids),
@@ -12555,9 +12610,9 @@ static struct alc_config_preset alc268_presets[] = {
.hp_nid = 0x03,
.num_channel_mode = ARRAY_SIZE(alc268_modes),
.channel_mode = alc268_modes,
- .input_mux = &alc268_capture_source,
- .unsol_event = alc267_quanta_il1_unsol_event,
- .init_hook = alc267_quanta_il1_init_hook,
+ .unsol_event = alc_sku_unsol_event,
+ .setup = alc267_quanta_il1_setup,
+ .init_hook = alc_inithook,
},
[ALC268_3ST] = {
.mixers = { alc268_base_mixer, alc268_capture_alt_mixer,
@@ -12589,10 +12644,11 @@ static struct alc_config_preset alc268_presets[] = {
.channel_mode = alc268_modes,
.input_mux = &alc268_capture_source,
.unsol_event = alc268_toshiba_unsol_event,
- .init_hook = alc268_toshiba_init_hook,
+ .setup = alc268_toshiba_setup,
+ .init_hook = alc268_toshiba_automute,
},
[ALC268_ACER] = {
- .mixers = { alc268_acer_mixer, alc268_capture_alt_mixer,
+ .mixers = { alc268_acer_mixer, alc268_capture_nosrc_mixer,
alc268_beep_mixer },
.init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
alc268_acer_verbs },
@@ -12628,7 +12684,7 @@ static struct alc_config_preset alc268_presets[] = {
[ALC268_ACER_ASPIRE_ONE] = {
.mixers = { alc268_acer_aspire_one_mixer,
alc268_beep_mixer,
- alc268_capture_alt_mixer },
+ alc268_capture_nosrc_mixer },
.init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
alc268_acer_aspire_one_verbs },
.num_dacs = ARRAY_SIZE(alc268_dac_nids),
@@ -12639,22 +12695,26 @@ static struct alc_config_preset alc268_presets[] = {
.hp_nid = 0x03,
.num_channel_mode = ARRAY_SIZE(alc268_modes),
.channel_mode = alc268_modes,
- .input_mux = &alc268_acer_lc_capture_source,
.unsol_event = alc268_acer_lc_unsol_event,
+ .setup = alc268_acer_lc_setup,
.init_hook = alc268_acer_lc_init_hook,
},
[ALC268_DELL] = {
- .mixers = { alc268_dell_mixer, alc268_beep_mixer },
+ .mixers = { alc268_dell_mixer, alc268_beep_mixer,
+ alc268_capture_nosrc_mixer },
.init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
alc268_dell_verbs },
.num_dacs = ARRAY_SIZE(alc268_dac_nids),
.dac_nids = alc268_dac_nids,
+ .num_adc_nids = ARRAY_SIZE(alc268_adc_nids_alt),
+ .adc_nids = alc268_adc_nids_alt,
+ .capsrc_nids = alc268_capsrc_nids,
.hp_nid = 0x02,
.num_channel_mode = ARRAY_SIZE(alc268_modes),
.channel_mode = alc268_modes,
.unsol_event = alc_sku_unsol_event,
- .init_hook = alc268_dell_init_hook,
- .input_mux = &alc268_capture_source,
+ .setup = alc268_dell_setup,
+ .init_hook = alc_inithook,
},
[ALC268_ZEPTO] = {
.mixers = { alc268_base_mixer, alc268_capture_alt_mixer,
@@ -12671,8 +12731,8 @@ static struct alc_config_preset alc268_presets[] = {
.num_channel_mode = ARRAY_SIZE(alc268_modes),
.channel_mode = alc268_modes,
.input_mux = &alc268_capture_source,
- .unsol_event = alc268_toshiba_unsol_event,
- .init_hook = alc268_toshiba_init_hook
+ .setup = alc268_toshiba_setup,
+ .init_hook = alc268_toshiba_automute,
},
#ifdef CONFIG_SND_DEBUG
[ALC268_TEST] = {
@@ -12714,8 +12774,8 @@ static int patch_alc268(struct hda_codec *codec)
ALC882_MODEL_LAST, alc268_models, alc268_ssid_cfg_tbl);
if (board_config < 0 || board_config >= ALC268_MODEL_LAST) {
- printk(KERN_INFO "hda_codec: Unknown model for %s, "
- "trying auto-probe from BIOS...\n", codec->chip_name);
+ printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
board_config = ALC268_AUTO;
}
@@ -12734,7 +12794,7 @@ static int patch_alc268(struct hda_codec *codec)
}
if (board_config != ALC268_AUTO)
- setup_preset(spec, &alc268_presets[board_config]);
+ setup_preset(codec, &alc268_presets[board_config]);
spec->stream_analog_playback = &alc268_pcm_analog_playback;
spec->stream_analog_capture = &alc268_pcm_analog_capture;
@@ -12771,11 +12831,15 @@ static int patch_alc268(struct hda_codec *codec)
int i;
/* get type */
- wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
- if (wcap != AC_WID_AUD_IN || spec->input_mux->num_items == 1) {
+ wcap = get_wcaps_type(wcap);
+ if (spec->auto_mic ||
+ wcap != AC_WID_AUD_IN || spec->input_mux->num_items == 1) {
spec->adc_nids = alc268_adc_nids_alt;
spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids_alt);
- add_mixer(spec, alc268_capture_alt_mixer);
+ if (spec->auto_mic || spec->input_mux->num_items == 1)
+ add_mixer(spec, alc268_capture_nosrc_mixer);
+ else
+ add_mixer(spec, alc268_capture_alt_mixer);
} else {
spec->adc_nids = alc268_adc_nids;
spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids);
@@ -12786,6 +12850,8 @@ static int patch_alc268(struct hda_codec *codec)
for (i = 0; i < spec->num_adc_nids; i++)
snd_hda_codec_write_cache(codec, alc268_capsrc_nids[i],
0, AC_VERB_SET_CONNECT_SEL,
+ i < spec->num_mux_defs ?
+ spec->input_mux[i].items[0].index :
spec->input_mux->items[0].index);
}
@@ -12820,22 +12886,6 @@ static hda_nid_t alc269_capsrc_nids[1] = {
* not a mux!
*/
-static struct hda_input_mux alc269_eeepc_dmic_capture_source = {
- .num_items = 2,
- .items = {
- { "i-Mic", 0x5 },
- { "e-Mic", 0x0 },
- },
-};
-
-static struct hda_input_mux alc269_eeepc_amic_capture_source = {
- .num_items = 2,
- .items = {
- { "i-Mic", 0x1 },
- { "e-Mic", 0x0 },
- },
-};
-
#define alc269_modes alc260_modes
#define alc269_capture_source alc880_lg_lw_capture_source
@@ -12997,16 +13047,6 @@ static void alc269_lifebook_speaker_automute(struct hda_codec *codec)
AC_VERB_SET_PROC_COEF, 0x480);
}
-static void alc269_quanta_fl1_mic_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_codec_read(codec, 0x18, 0,
- AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
- snd_hda_codec_write(codec, 0x23, 0,
- AC_VERB_SET_CONNECT_SEL, present ? 0x0 : 0x1);
-}
-
static void alc269_lifebook_mic_autoswitch(struct hda_codec *codec)
{
unsigned int present_laptop;
@@ -13033,10 +13073,14 @@ static void alc269_lifebook_mic_autoswitch(struct hda_codec *codec)
static void alc269_quanta_fl1_unsol_event(struct hda_codec *codec,
unsigned int res)
{
- if ((res >> 26) == ALC880_HP_EVENT)
+ switch (res >> 26) {
+ case ALC880_HP_EVENT:
alc269_quanta_fl1_speaker_automute(codec);
- if ((res >> 26) == ALC880_MIC_EVENT)
- alc269_quanta_fl1_mic_automute(codec);
+ break;
+ case ALC880_MIC_EVENT:
+ alc_mic_automute(codec);
+ break;
+ }
}
static void alc269_lifebook_unsol_event(struct hda_codec *codec,
@@ -13048,10 +13092,20 @@ static void alc269_lifebook_unsol_event(struct hda_codec *codec,
alc269_lifebook_mic_autoswitch(codec);
}
+static void alc269_quanta_fl1_setup(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ spec->ext_mic.pin = 0x18;
+ spec->ext_mic.mux_idx = 0;
+ spec->int_mic.pin = 0x19;
+ spec->int_mic.mux_idx = 1;
+ spec->auto_mic = 1;
+}
+
static void alc269_quanta_fl1_init_hook(struct hda_codec *codec)
{
alc269_quanta_fl1_speaker_automute(codec);
- alc269_quanta_fl1_mic_automute(codec);
+ alc_mic_automute(codec);
}
static void alc269_lifebook_init_hook(struct hda_codec *codec)
@@ -13096,60 +13150,44 @@ static void alc269_speaker_automute(struct hda_codec *codec)
AMP_IN_MUTE(0), bits);
}
-static void alc269_eeepc_dmic_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_codec_read(codec, 0x18, 0,
- AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
- snd_hda_codec_write(codec, 0x23, 0,
- AC_VERB_SET_CONNECT_SEL, (present ? 0 : 5));
-}
-
-static void alc269_eeepc_amic_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_codec_read(codec, 0x18, 0,
- AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
- snd_hda_codec_write(codec, 0x24, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- 0x7000 | (0x00 << 8) | (present ? 0 : 0x80));
- snd_hda_codec_write(codec, 0x24, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- 0x7000 | (0x01 << 8) | (present ? 0x80 : 0));
-}
-
/* unsolicited event for HP jack sensing */
-static void alc269_eeepc_dmic_unsol_event(struct hda_codec *codec,
+static void alc269_eeepc_unsol_event(struct hda_codec *codec,
unsigned int res)
{
- if ((res >> 26) == ALC880_HP_EVENT)
+ switch (res >> 26) {
+ case ALC880_HP_EVENT:
alc269_speaker_automute(codec);
-
- if ((res >> 26) == ALC880_MIC_EVENT)
- alc269_eeepc_dmic_automute(codec);
+ break;
+ case ALC880_MIC_EVENT:
+ alc_mic_automute(codec);
+ break;
+ }
}
-static void alc269_eeepc_dmic_inithook(struct hda_codec *codec)
+static void alc269_eeepc_dmic_setup(struct hda_codec *codec)
{
- alc269_speaker_automute(codec);
- alc269_eeepc_dmic_automute(codec);
+ struct alc_spec *spec = codec->spec;
+ spec->ext_mic.pin = 0x18;
+ spec->ext_mic.mux_idx = 0;
+ spec->int_mic.pin = 0x12;
+ spec->int_mic.mux_idx = 5;
+ spec->auto_mic = 1;
}
-/* unsolicited event for HP jack sensing */
-static void alc269_eeepc_amic_unsol_event(struct hda_codec *codec,
- unsigned int res)
+static void alc269_eeepc_amic_setup(struct hda_codec *codec)
{
- if ((res >> 26) == ALC880_HP_EVENT)
- alc269_speaker_automute(codec);
-
- if ((res >> 26) == ALC880_MIC_EVENT)
- alc269_eeepc_amic_automute(codec);
+ struct alc_spec *spec = codec->spec;
+ spec->ext_mic.pin = 0x18;
+ spec->ext_mic.mux_idx = 0;
+ spec->int_mic.pin = 0x19;
+ spec->int_mic.mux_idx = 1;
+ spec->auto_mic = 1;
}
-static void alc269_eeepc_amic_inithook(struct hda_codec *codec)
+static void alc269_eeepc_inithook(struct hda_codec *codec)
{
alc269_speaker_automute(codec);
- alc269_eeepc_amic_automute(codec);
+ alc_mic_automute(codec);
}
/*
@@ -13222,89 +13260,10 @@ static struct hda_verb alc269_init_verbs[] = {
{ }
};
-/* add playback controls from the parsed DAC table */
-static int alc269_auto_create_multi_out_ctls(struct alc_spec *spec,
- const struct auto_pin_cfg *cfg)
-{
- hda_nid_t nid;
- int err;
-
- spec->multiout.num_dacs = 1; /* only use one dac */
- spec->multiout.dac_nids = spec->private_dac_nids;
- spec->multiout.dac_nids[0] = 2;
-
- nid = cfg->line_out_pins[0];
- if (nid) {
- err = add_control(spec, ALC_CTL_WIDGET_VOL,
- "Front Playback Volume",
- HDA_COMPOSE_AMP_VAL(0x02, 3, 0, HDA_OUTPUT));
- if (err < 0)
- return err;
- err = add_control(spec, ALC_CTL_WIDGET_MUTE,
- "Front Playback Switch",
- HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT));
- if (err < 0)
- return err;
- }
-
- nid = cfg->speaker_pins[0];
- if (nid) {
- if (!cfg->line_out_pins[0]) {
- err = add_control(spec, ALC_CTL_WIDGET_VOL,
- "Speaker Playback Volume",
- HDA_COMPOSE_AMP_VAL(0x02, 3, 0,
- HDA_OUTPUT));
- if (err < 0)
- return err;
- }
- if (nid == 0x16) {
- err = add_control(spec, ALC_CTL_WIDGET_MUTE,
- "Speaker Playback Switch",
- HDA_COMPOSE_AMP_VAL(nid, 2, 0,
- HDA_OUTPUT));
- if (err < 0)
- return err;
- } else {
- err = add_control(spec, ALC_CTL_WIDGET_MUTE,
- "Speaker Playback Switch",
- HDA_COMPOSE_AMP_VAL(nid, 3, 0,
- HDA_OUTPUT));
- if (err < 0)
- return err;
- }
- }
- nid = cfg->hp_pins[0];
- if (nid) {
- /* spec->multiout.hp_nid = 2; */
- if (!cfg->line_out_pins[0] && !cfg->speaker_pins[0]) {
- err = add_control(spec, ALC_CTL_WIDGET_VOL,
- "Headphone Playback Volume",
- HDA_COMPOSE_AMP_VAL(0x02, 3, 0,
- HDA_OUTPUT));
- if (err < 0)
- return err;
- }
- if (nid == 0x16) {
- err = add_control(spec, ALC_CTL_WIDGET_MUTE,
- "Headphone Playback Switch",
- HDA_COMPOSE_AMP_VAL(nid, 2, 0,
- HDA_OUTPUT));
- if (err < 0)
- return err;
- } else {
- err = add_control(spec, ALC_CTL_WIDGET_MUTE,
- "Headphone Playback Switch",
- HDA_COMPOSE_AMP_VAL(nid, 3, 0,
- HDA_OUTPUT));
- if (err < 0)
- return err;
- }
- }
- return 0;
-}
-
-#define alc269_auto_create_analog_input_ctls \
- alc262_auto_create_analog_input_ctls
+#define alc269_auto_create_multi_out_ctls \
+ alc268_auto_create_multi_out_ctls
+#define alc269_auto_create_input_ctls \
+ alc268_auto_create_input_ctls
#ifdef CONFIG_SND_HDA_POWER_SAVE
#define alc269_loopbacks alc880_loopbacks
@@ -13354,7 +13313,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
err = alc269_auto_create_multi_out_ctls(spec, &spec->autocfg);
if (err < 0)
return err;
- err = alc269_auto_create_analog_input_ctls(spec, &spec->autocfg);
+ err = alc269_auto_create_input_ctls(codec, &spec->autocfg);
if (err < 0)
return err;
@@ -13379,15 +13338,15 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
return err;
if (!spec->cap_mixer && !spec->no_analog)
- set_capture_mixer(spec);
+ set_capture_mixer(codec);
alc_ssid_check(codec, 0x15, 0x1b, 0x14);
return 1;
}
-#define alc269_auto_init_multi_out alc882_auto_init_multi_out
-#define alc269_auto_init_hp_out alc882_auto_init_hp_out
+#define alc269_auto_init_multi_out alc268_auto_init_multi_out
+#define alc269_auto_init_hp_out alc268_auto_init_hp_out
#define alc269_auto_init_analog_input alc882_auto_init_analog_input
@@ -13455,6 +13414,7 @@ static struct alc_config_preset alc269_presets[] = {
.channel_mode = alc269_modes,
.input_mux = &alc269_capture_source,
.unsol_event = alc269_quanta_fl1_unsol_event,
+ .setup = alc269_quanta_fl1_setup,
.init_hook = alc269_quanta_fl1_init_hook,
},
[ALC269_ASUS_EEEPC_P703] = {
@@ -13467,9 +13427,9 @@ static struct alc_config_preset alc269_presets[] = {
.hp_nid = 0x03,
.num_channel_mode = ARRAY_SIZE(alc269_modes),
.channel_mode = alc269_modes,
- .input_mux = &alc269_eeepc_amic_capture_source,
- .unsol_event = alc269_eeepc_amic_unsol_event,
- .init_hook = alc269_eeepc_amic_inithook,
+ .unsol_event = alc269_eeepc_unsol_event,
+ .setup = alc269_eeepc_amic_setup,
+ .init_hook = alc269_eeepc_inithook,
},
[ALC269_ASUS_EEEPC_P901] = {
.mixers = { alc269_eeepc_mixer },
@@ -13481,9 +13441,9 @@ static struct alc_config_preset alc269_presets[] = {
.hp_nid = 0x03,
.num_channel_mode = ARRAY_SIZE(alc269_modes),
.channel_mode = alc269_modes,
- .input_mux = &alc269_eeepc_dmic_capture_source,
- .unsol_event = alc269_eeepc_dmic_unsol_event,
- .init_hook = alc269_eeepc_dmic_inithook,
+ .unsol_event = alc269_eeepc_unsol_event,
+ .setup = alc269_eeepc_dmic_setup,
+ .init_hook = alc269_eeepc_inithook,
},
[ALC269_FUJITSU] = {
.mixers = { alc269_fujitsu_mixer },
@@ -13495,9 +13455,9 @@ static struct alc_config_preset alc269_presets[] = {
.hp_nid = 0x03,
.num_channel_mode = ARRAY_SIZE(alc269_modes),
.channel_mode = alc269_modes,
- .input_mux = &alc269_eeepc_dmic_capture_source,
- .unsol_event = alc269_eeepc_dmic_unsol_event,
- .init_hook = alc269_eeepc_dmic_inithook,
+ .unsol_event = alc269_eeepc_unsol_event,
+ .setup = alc269_eeepc_dmic_setup,
+ .init_hook = alc269_eeepc_inithook,
},
[ALC269_LIFEBOOK] = {
.mixers = { alc269_lifebook_mixer },
@@ -13532,8 +13492,8 @@ static int patch_alc269(struct hda_codec *codec)
alc269_cfg_tbl);
if (board_config < 0) {
- printk(KERN_INFO "hda_codec: Unknown model for %s, "
- "trying auto-probe from BIOS...\n", codec->chip_name);
+ printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
board_config = ALC269_AUTO;
}
@@ -13558,7 +13518,7 @@ static int patch_alc269(struct hda_codec *codec)
}
if (board_config != ALC269_AUTO)
- setup_preset(spec, &alc269_presets[board_config]);
+ setup_preset(codec, &alc269_presets[board_config]);
if (codec->subsystem_id == 0x17aa3bf8) {
/* Due to a hardware problem on Lenovo Ideadpad, we need to
@@ -13577,7 +13537,7 @@ static int patch_alc269(struct hda_codec *codec)
spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids);
spec->capsrc_nids = alc269_capsrc_nids;
if (!spec->cap_mixer)
- set_capture_mixer(spec);
+ set_capture_mixer(codec);
set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
spec->vmaster_nid = 0x02;
@@ -14127,23 +14087,23 @@ static struct hda_verb alc861_auto_init_verbs[] = {
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)},
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb00c},
- {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
+ {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+ {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
{0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)},
- {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3)},
+ {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
+ {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
{0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
{0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)},
- {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3)},
+ {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
+ {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
{0x08, AC_VERB_SET_CONNECT_SEL, 0x00}, /* set Mic 1 */
@@ -14215,64 +14175,96 @@ static struct hda_input_mux alc861_capture_source = {
},
};
+static hda_nid_t alc861_look_for_dac(struct hda_codec *codec, hda_nid_t pin)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t mix, srcs[5];
+ int i, j, num;
+
+ if (snd_hda_get_connections(codec, pin, &mix, 1) != 1)
+ return 0;
+ num = snd_hda_get_connections(codec, mix, srcs, ARRAY_SIZE(srcs));
+ if (num < 0)
+ return 0;
+ for (i = 0; i < num; i++) {
+ unsigned int type;
+ type = get_wcaps_type(get_wcaps(codec, srcs[i]));
+ if (type != AC_WID_AUD_OUT)
+ continue;
+ for (j = 0; j < spec->multiout.num_dacs; j++)
+ if (spec->multiout.dac_nids[j] == srcs[i])
+ break;
+ if (j >= spec->multiout.num_dacs)
+ return srcs[i];
+ }
+ return 0;
+}
+
/* fill in the dac_nids table from the parsed pin configuration */
-static int alc861_auto_fill_dac_nids(struct alc_spec *spec,
+static int alc861_auto_fill_dac_nids(struct hda_codec *codec,
const struct auto_pin_cfg *cfg)
{
+ struct alc_spec *spec = codec->spec;
int i;
- hda_nid_t nid;
+ hda_nid_t nid, dac;
spec->multiout.dac_nids = spec->private_dac_nids;
for (i = 0; i < cfg->line_outs; i++) {
nid = cfg->line_out_pins[i];
- if (nid) {
- if (i >= ARRAY_SIZE(alc861_dac_nids))
- continue;
- spec->multiout.dac_nids[i] = alc861_dac_nids[i];
- }
+ dac = alc861_look_for_dac(codec, nid);
+ if (!dac)
+ continue;
+ spec->multiout.dac_nids[spec->multiout.num_dacs++] = dac;
}
- spec->multiout.num_dacs = cfg->line_outs;
return 0;
}
+static int alc861_create_out_sw(struct hda_codec *codec, const char *pfx,
+ hda_nid_t nid, unsigned int chs)
+{
+ char name[32];
+ snprintf(name, sizeof(name), "%s Playback Switch", pfx);
+ return add_control(codec->spec, ALC_CTL_WIDGET_MUTE, name,
+ HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_OUTPUT));
+}
+
/* add playback controls from the parsed DAC table */
-static int alc861_auto_create_multi_out_ctls(struct alc_spec *spec,
+static int alc861_auto_create_multi_out_ctls(struct hda_codec *codec,
const struct auto_pin_cfg *cfg)
{
- char name[32];
+ struct alc_spec *spec = codec->spec;
static const char *chname[4] = {
"Front", "Surround", NULL /*CLFE*/, "Side"
};
hda_nid_t nid;
- int i, idx, err;
+ int i, err;
+
+ if (cfg->line_outs == 1) {
+ const char *pfx = NULL;
+ if (!cfg->hp_outs)
+ pfx = "Master";
+ else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
+ pfx = "Speaker";
+ if (pfx) {
+ nid = spec->multiout.dac_nids[0];
+ return alc861_create_out_sw(codec, pfx, nid, 3);
+ }
+ }
for (i = 0; i < cfg->line_outs; i++) {
nid = spec->multiout.dac_nids[i];
if (!nid)
continue;
- if (nid == 0x05) {
+ if (i == 2) {
/* Center/LFE */
- err = add_control(spec, ALC_CTL_BIND_MUTE,
- "Center Playback Switch",
- HDA_COMPOSE_AMP_VAL(nid, 1, 0,
- HDA_OUTPUT));
+ err = alc861_create_out_sw(codec, "Center", nid, 1);
if (err < 0)
return err;
- err = add_control(spec, ALC_CTL_BIND_MUTE,
- "LFE Playback Switch",
- HDA_COMPOSE_AMP_VAL(nid, 2, 0,
- HDA_OUTPUT));
+ err = alc861_create_out_sw(codec, "LFE", nid, 2);
if (err < 0)
return err;
} else {
- for (idx = 0; idx < ARRAY_SIZE(alc861_dac_nids) - 1;
- idx++)
- if (nid == alc861_dac_nids[idx])
- break;
- sprintf(name, "%s Playback Switch", chname[idx]);
- err = add_control(spec, ALC_CTL_BIND_MUTE, name,
- HDA_COMPOSE_AMP_VAL(nid, 3, 0,
- HDA_OUTPUT));
+ err = alc861_create_out_sw(codec, chname[i], nid, 3);
if (err < 0)
return err;
}
@@ -14280,8 +14272,9 @@ static int alc861_auto_create_multi_out_ctls(struct alc_spec *spec,
return 0;
}
-static int alc861_auto_create_hp_ctls(struct alc_spec *spec, hda_nid_t pin)
+static int alc861_auto_create_hp_ctls(struct hda_codec *codec, hda_nid_t pin)
{
+ struct alc_spec *spec = codec->spec;
int err;
hda_nid_t nid;
@@ -14289,70 +14282,49 @@ static int alc861_auto_create_hp_ctls(struct alc_spec *spec, hda_nid_t pin)
return 0;
if ((pin >= 0x0b && pin <= 0x10) || pin == 0x1f || pin == 0x20) {
- nid = 0x03;
- err = add_control(spec, ALC_CTL_WIDGET_MUTE,
- "Headphone Playback Switch",
- HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT));
- if (err < 0)
- return err;
- spec->multiout.hp_nid = nid;
+ nid = alc861_look_for_dac(codec, pin);
+ if (nid) {
+ err = alc861_create_out_sw(codec, "Headphone", nid, 3);
+ if (err < 0)
+ return err;
+ spec->multiout.hp_nid = nid;
+ }
}
return 0;
}
/* create playback/capture controls for input pins */
-static int alc861_auto_create_analog_input_ctls(struct alc_spec *spec,
+static int alc861_auto_create_input_ctls(struct hda_codec *codec,
const struct auto_pin_cfg *cfg)
{
- struct hda_input_mux *imux = &spec->private_imux[0];
- int i, err, idx, idx1;
-
- for (i = 0; i < AUTO_PIN_LAST; i++) {
- switch (cfg->input_pins[i]) {
- case 0x0c:
- idx1 = 1;
- idx = 2; /* Line In */
- break;
- case 0x0f:
- idx1 = 2;
- idx = 2; /* Line In */
- break;
- case 0x0d:
- idx1 = 0;
- idx = 1; /* Mic In */
- break;
- case 0x10:
- idx1 = 3;
- idx = 1; /* Mic In */
- break;
- case 0x11:
- idx1 = 4;
- idx = 0; /* CD */
- break;
- default:
- continue;
- }
-
- err = new_analog_input(spec, cfg->input_pins[i],
- auto_pin_cfg_labels[i], idx, 0x15);
- if (err < 0)
- return err;
-
- imux->items[imux->num_items].label = auto_pin_cfg_labels[i];
- imux->items[imux->num_items].index = idx1;
- imux->num_items++;
- }
- return 0;
+ return alc_auto_create_input_ctls(codec, cfg, 0x15, 0x08, 0);
}
static void alc861_auto_set_output_and_unmute(struct hda_codec *codec,
hda_nid_t nid,
- int pin_type, int dac_idx)
+ int pin_type, hda_nid_t dac)
{
+ hda_nid_t mix, srcs[5];
+ int i, num;
+
snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
pin_type);
- snd_hda_codec_write(codec, dac_idx, 0, AC_VERB_SET_AMP_GAIN_MUTE,
+ snd_hda_codec_write(codec, dac, 0, AC_VERB_SET_AMP_GAIN_MUTE,
AMP_OUT_UNMUTE);
+ if (snd_hda_get_connections(codec, nid, &mix, 1) != 1)
+ return;
+ num = snd_hda_get_connections(codec, mix, srcs, ARRAY_SIZE(srcs));
+ if (num < 0)
+ return;
+ for (i = 0; i < num; i++) {
+ unsigned int mute;
+ if (srcs[i] == dac || srcs[i] == 0x15)
+ mute = AMP_IN_UNMUTE(i);
+ else
+ mute = AMP_IN_MUTE(i);
+ snd_hda_codec_write(codec, mix, 0, AC_VERB_SET_AMP_GAIN_MUTE,
+ mute);
+ }
}
static void alc861_auto_init_multi_out(struct hda_codec *codec)
@@ -14375,12 +14347,13 @@ static void alc861_auto_init_hp_out(struct hda_codec *codec)
hda_nid_t pin;
pin = spec->autocfg.hp_pins[0];
- if (pin) /* connect to front */
+ if (pin)
alc861_auto_set_output_and_unmute(codec, pin, PIN_HP,
- spec->multiout.dac_nids[0]);
+ spec->multiout.hp_nid);
pin = spec->autocfg.speaker_pins[0];
if (pin)
- alc861_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0);
+ alc861_auto_set_output_and_unmute(codec, pin, PIN_OUT,
+ spec->multiout.dac_nids[0]);
}
static void alc861_auto_init_analog_input(struct hda_codec *codec)
@@ -14412,16 +14385,16 @@ static int alc861_parse_auto_config(struct hda_codec *codec)
if (!spec->autocfg.line_outs)
return 0; /* can't find valid BIOS pin config */
- err = alc861_auto_fill_dac_nids(spec, &spec->autocfg);
+ err = alc861_auto_fill_dac_nids(codec, &spec->autocfg);
if (err < 0)
return err;
- err = alc861_auto_create_multi_out_ctls(spec, &spec->autocfg);
+ err = alc861_auto_create_multi_out_ctls(codec, &spec->autocfg);
if (err < 0)
return err;
- err = alc861_auto_create_hp_ctls(spec, spec->autocfg.hp_pins[0]);
+ err = alc861_auto_create_hp_ctls(codec, spec->autocfg.hp_pins[0]);
if (err < 0)
return err;
- err = alc861_auto_create_analog_input_ctls(spec, &spec->autocfg);
+ err = alc861_auto_create_input_ctls(codec, &spec->autocfg);
if (err < 0)
return err;
@@ -14440,7 +14413,7 @@ static int alc861_parse_auto_config(struct hda_codec *codec)
spec->adc_nids = alc861_adc_nids;
spec->num_adc_nids = ARRAY_SIZE(alc861_adc_nids);
- set_capture_mixer(spec);
+ set_capture_mixer(codec);
alc_ssid_check(codec, 0x0e, 0x0f, 0x0b);
@@ -14633,8 +14606,8 @@ static int patch_alc861(struct hda_codec *codec)
alc861_cfg_tbl);
if (board_config < 0) {
- printk(KERN_INFO "hda_codec: Unknown model for %s, "
- "trying auto-probe from BIOS...\n", codec->chip_name);
+ printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
board_config = ALC861_AUTO;
}
@@ -14659,7 +14632,7 @@ static int patch_alc861(struct hda_codec *codec)
}
if (board_config != ALC861_AUTO)
- setup_preset(spec, &alc861_presets[board_config]);
+ setup_preset(codec, &alc861_presets[board_config]);
spec->stream_analog_playback = &alc861_pcm_analog_playback;
spec->stream_analog_capture = &alc861_pcm_analog_capture;
@@ -15062,12 +15035,15 @@ static void alc861vd_lenovo_mic_automute(struct hda_codec *codec)
HDA_AMP_MUTE, bits);
}
-static void alc861vd_lenovo_init_hook(struct hda_codec *codec)
+static void alc861vd_lenovo_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
-
spec->autocfg.hp_pins[0] = 0x1b;
spec->autocfg.speaker_pins[0] = 0x14;
+}
+
+static void alc861vd_lenovo_init_hook(struct hda_codec *codec)
+{
alc_automute_amp(codec);
alc861vd_lenovo_mic_automute(codec);
}
@@ -15131,13 +15107,12 @@ static struct hda_verb alc861vd_dallas_verbs[] = {
};
/* toggle speaker-output according to the hp-jack state */
-static void alc861vd_dallas_init_hook(struct hda_codec *codec)
+static void alc861vd_dallas_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x15;
spec->autocfg.speaker_pins[0] = 0x14;
- alc_automute_amp(codec);
}
#ifdef CONFIG_SND_HDA_POWER_SAVE
@@ -15251,6 +15226,7 @@ static struct alc_config_preset alc861vd_presets[] = {
.channel_mode = alc861vd_3stack_2ch_modes,
.input_mux = &alc861vd_capture_source,
.unsol_event = alc861vd_lenovo_unsol_event,
+ .setup = alc861vd_lenovo_setup,
.init_hook = alc861vd_lenovo_init_hook,
},
[ALC861VD_DALLAS] = {
@@ -15262,7 +15238,8 @@ static struct alc_config_preset alc861vd_presets[] = {
.channel_mode = alc861vd_3stack_2ch_modes,
.input_mux = &alc861vd_dallas_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc861vd_dallas_init_hook,
+ .setup = alc861vd_dallas_setup,
+ .init_hook = alc_automute_amp,
},
[ALC861VD_HP] = {
.mixers = { alc861vd_hp_mixer },
@@ -15274,7 +15251,8 @@ static struct alc_config_preset alc861vd_presets[] = {
.channel_mode = alc861vd_3stack_2ch_modes,
.input_mux = &alc861vd_hp_capture_source,
.unsol_event = alc_automute_amp_unsol_event,
- .init_hook = alc861vd_dallas_init_hook,
+ .setup = alc861vd_dallas_setup,
+ .init_hook = alc_automute_amp,
},
[ALC660VD_ASUS_V1S] = {
.mixers = { alc861vd_lenovo_mixer },
@@ -15289,6 +15267,7 @@ static struct alc_config_preset alc861vd_presets[] = {
.channel_mode = alc861vd_3stack_2ch_modes,
.input_mux = &alc861vd_capture_source,
.unsol_event = alc861vd_lenovo_unsol_event,
+ .setup = alc861vd_lenovo_setup,
.init_hook = alc861vd_lenovo_init_hook,
},
};
@@ -15296,6 +15275,13 @@ static struct alc_config_preset alc861vd_presets[] = {
/*
* BIOS auto configuration
*/
+static int alc861vd_auto_create_input_ctls(struct hda_codec *codec,
+ const struct auto_pin_cfg *cfg)
+{
+ return alc_auto_create_input_ctls(codec, cfg, 0x15, 0x09, 0);
+}
+
+
static void alc861vd_auto_set_output_and_unmute(struct hda_codec *codec,
hda_nid_t nid, int pin_type, int dac_idx)
{
@@ -15330,7 +15316,6 @@ static void alc861vd_auto_init_hp_out(struct hda_codec *codec)
alc861vd_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0);
}
-#define alc861vd_is_input_pin(nid) alc880_is_input_pin(nid)
#define ALC861VD_PIN_CD_NID ALC880_PIN_CD_NID
static void alc861vd_auto_init_analog_input(struct hda_codec *codec)
@@ -15340,7 +15325,7 @@ static void alc861vd_auto_init_analog_input(struct hda_codec *codec)
for (i = 0; i < AUTO_PIN_LAST; i++) {
hda_nid_t nid = spec->autocfg.input_pins[i];
- if (alc861vd_is_input_pin(nid)) {
+ if (alc_is_input_pin(codec, nid)) {
alc_set_input_pin(codec, nid, i);
if (nid != ALC861VD_PIN_CD_NID &&
(get_wcaps(codec, nid) & AC_WCAP_OUT_AMP))
@@ -15404,13 +15389,25 @@ static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec,
if (err < 0)
return err;
} else {
- sprintf(name, "%s Playback Volume", chname[i]);
+ const char *pfx;
+ if (cfg->line_outs == 1 &&
+ cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
+ if (!cfg->hp_pins)
+ pfx = "Speaker";
+ else
+ pfx = "PCM";
+ } else
+ pfx = chname[i];
+ sprintf(name, "%s Playback Volume", pfx);
err = add_control(spec, ALC_CTL_WIDGET_VOL, name,
HDA_COMPOSE_AMP_VAL(nid_v, 3, 0,
HDA_OUTPUT));
if (err < 0)
return err;
- sprintf(name, "%s Playback Switch", chname[i]);
+ if (cfg->line_outs == 1 &&
+ cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
+ pfx = "Speaker";
+ sprintf(name, "%s Playback Switch", pfx);
err = add_control(spec, ALC_CTL_BIND_MUTE, name,
HDA_COMPOSE_AMP_VAL(nid_s, 3, 2,
HDA_INPUT));
@@ -15503,7 +15500,7 @@ static int alc861vd_parse_auto_config(struct hda_codec *codec)
"Headphone");
if (err < 0)
return err;
- err = alc880_auto_create_analog_input_ctls(spec, &spec->autocfg);
+ err = alc861vd_auto_create_input_ctls(codec, &spec->autocfg);
if (err < 0)
return err;
@@ -15557,8 +15554,8 @@ static int patch_alc861vd(struct hda_codec *codec)
alc861vd_cfg_tbl);
if (board_config < 0 || board_config >= ALC861VD_MODEL_LAST) {
- printk(KERN_INFO "hda_codec: Unknown model for %s, "
- "trying auto-probe from BIOS...\n", codec->chip_name);
+ printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
board_config = ALC861VD_AUTO;
}
@@ -15583,7 +15580,7 @@ static int patch_alc861vd(struct hda_codec *codec)
}
if (board_config != ALC861VD_AUTO)
- setup_preset(spec, &alc861vd_presets[board_config]);
+ setup_preset(codec, &alc861vd_presets[board_config]);
if (codec->vendor_id == 0x10ec0660) {
/* always turn on EAPD */
@@ -15603,7 +15600,7 @@ static int patch_alc861vd(struct hda_codec *codec)
if (!spec->capsrc_nids)
spec->capsrc_nids = alc861vd_capsrc_nids;
- set_capture_mixer(spec);
+ set_capture_mixer(codec);
set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
spec->vmaster_nid = 0x02;
@@ -15644,9 +15641,9 @@ static hda_nid_t alc272_dac_nids[2] = {
0x02, 0x03
};
-static hda_nid_t alc662_adc_nids[1] = {
+static hda_nid_t alc662_adc_nids[2] = {
/* ADC1-2 */
- 0x09,
+ 0x09, 0x08
};
static hda_nid_t alc272_adc_nids[1] = {
@@ -15654,7 +15651,7 @@ static hda_nid_t alc272_adc_nids[1] = {
0x08,
};
-static hda_nid_t alc662_capsrc_nids[1] = { 0x22 };
+static hda_nid_t alc662_capsrc_nids[2] = { 0x22, 0x23 };
static hda_nid_t alc272_capsrc_nids[1] = { 0x23 };
@@ -15678,14 +15675,6 @@ static struct hda_input_mux alc662_lenovo_101e_capture_source = {
},
};
-static struct hda_input_mux alc662_eeepc_capture_source = {
- .num_items = 2,
- .items = {
- { "i-Mic", 0x1 },
- { "e-Mic", 0x0 },
- },
-};
-
static struct hda_input_mux alc663_capture_source = {
.num_items = 3,
.items = {
@@ -15695,23 +15684,7 @@ static struct hda_input_mux alc663_capture_source = {
},
};
-static struct hda_input_mux alc663_m51va_capture_source = {
- .num_items = 2,
- .items = {
- { "Ext-Mic", 0x0 },
- { "D-Mic", 0x9 },
- },
-};
-
-#if 1 /* set to 0 for testing other input sources below */
-static struct hda_input_mux alc272_nc10_capture_source = {
- .num_items = 2,
- .items = {
- { "Autoselect Mic", 0x0 },
- { "Internal Mic", 0x1 },
- },
-};
-#else
+#if 0 /* set to 1 for testing other input sources below */
static struct hda_input_mux alc272_nc10_capture_source = {
.num_items = 16,
.items = {
@@ -16380,47 +16353,44 @@ static void alc662_lenovo_101e_unsol_event(struct hda_codec *codec,
alc662_lenovo_101e_ispeaker_automute(codec);
}
-static void alc662_eeepc_mic_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_codec_read(codec, 0x18, 0,
- AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
- snd_hda_codec_write(codec, 0x22, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- 0x7000 | (0x00 << 8) | (present ? 0 : 0x80));
- snd_hda_codec_write(codec, 0x23, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- 0x7000 | (0x00 << 8) | (present ? 0 : 0x80));
- snd_hda_codec_write(codec, 0x22, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- 0x7000 | (0x01 << 8) | (present ? 0x80 : 0));
- snd_hda_codec_write(codec, 0x23, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- 0x7000 | (0x01 << 8) | (present ? 0x80 : 0));
-}
-
/* unsolicited event for HP jack sensing */
static void alc662_eeepc_unsol_event(struct hda_codec *codec,
unsigned int res)
{
if ((res >> 26) == ALC880_MIC_EVENT)
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
else
alc262_hippo_unsol_event(codec, res);
}
+static void alc662_eeepc_setup(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ alc262_hippo1_setup(codec);
+ spec->ext_mic.pin = 0x18;
+ spec->ext_mic.mux_idx = 0;
+ spec->int_mic.pin = 0x19;
+ spec->int_mic.mux_idx = 1;
+ spec->auto_mic = 1;
+}
+
static void alc662_eeepc_inithook(struct hda_codec *codec)
{
- alc262_hippo1_init_hook(codec);
- alc662_eeepc_mic_automute(codec);
+ alc262_hippo_automute(codec);
+ alc_mic_automute(codec);
}
-static void alc662_eeepc_ep20_inithook(struct hda_codec *codec)
+static void alc662_eeepc_ep20_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
spec->autocfg.hp_pins[0] = 0x14;
spec->autocfg.speaker_pins[0] = 0x1b;
- alc262_hippo_master_update(codec);
}
+#define alc662_eeepc_ep20_inithook alc262_hippo_master_update
+
static void alc663_m51va_speaker_automute(struct hda_codec *codec)
{
unsigned int present;
@@ -16531,23 +16501,6 @@ static void alc663_two_hp_m2_speaker_automute(struct hda_codec *codec)
}
}
-static void alc663_m51va_mic_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_codec_read(codec, 0x18, 0,
- AC_VERB_GET_PIN_SENSE, 0)
- & AC_PINSENSE_PRESENCE;
- snd_hda_codec_write_cache(codec, 0x22, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- 0x7000 | (0x00 << 8) | (present ? 0 : 0x80));
- snd_hda_codec_write_cache(codec, 0x23, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- 0x7000 | (0x00 << 8) | (present ? 0 : 0x80));
- snd_hda_codec_write_cache(codec, 0x22, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- 0x7000 | (0x09 << 8) | (present ? 0x80 : 0));
- snd_hda_codec_write_cache(codec, 0x23, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- 0x7000 | (0x09 << 8) | (present ? 0x80 : 0));
-}
-
static void alc663_m51va_unsol_event(struct hda_codec *codec,
unsigned int res)
{
@@ -16556,36 +16509,32 @@ static void alc663_m51va_unsol_event(struct hda_codec *codec,
alc663_m51va_speaker_automute(codec);
break;
case ALC880_MIC_EVENT:
- alc663_m51va_mic_automute(codec);
+ alc_mic_automute(codec);
break;
}
}
+static void alc663_m51va_setup(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ spec->ext_mic.pin = 0x18;
+ spec->ext_mic.mux_idx = 0;
+ spec->int_mic.pin = 0x12;
+ spec->int_mic.mux_idx = 1;
+ spec->auto_mic = 1;
+}
+
static void alc663_m51va_inithook(struct hda_codec *codec)
{
alc663_m51va_speaker_automute(codec);
- alc663_m51va_mic_automute(codec);
+ alc_mic_automute(codec);
}
/* ***************** Mode1 ******************************/
-static void alc663_mode1_unsol_event(struct hda_codec *codec,
- unsigned int res)
-{
- switch (res >> 26) {
- case ALC880_HP_EVENT:
- alc663_m51va_speaker_automute(codec);
- break;
- case ALC880_MIC_EVENT:
- alc662_eeepc_mic_automute(codec);
- break;
- }
-}
+#define alc663_mode1_unsol_event alc663_m51va_unsol_event
+#define alc663_mode1_setup alc663_m51va_setup
+#define alc663_mode1_inithook alc663_m51va_inithook
-static void alc663_mode1_inithook(struct hda_codec *codec)
-{
- alc663_m51va_speaker_automute(codec);
- alc662_eeepc_mic_automute(codec);
-}
/* ***************** Mode2 ******************************/
static void alc662_mode2_unsol_event(struct hda_codec *codec,
unsigned int res)
@@ -16595,15 +16544,17 @@ static void alc662_mode2_unsol_event(struct hda_codec *codec,
alc662_f5z_speaker_automute(codec);
break;
case ALC880_MIC_EVENT:
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
break;
}
}
+#define alc662_mode2_setup alc663_m51va_setup
+
static void alc662_mode2_inithook(struct hda_codec *codec)
{
alc662_f5z_speaker_automute(codec);
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
}
/* ***************** Mode3 ******************************/
static void alc663_mode3_unsol_event(struct hda_codec *codec,
@@ -16614,15 +16565,17 @@ static void alc663_mode3_unsol_event(struct hda_codec *codec,
alc663_two_hp_m1_speaker_automute(codec);
break;
case ALC880_MIC_EVENT:
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
break;
}
}
+#define alc663_mode3_setup alc663_m51va_setup
+
static void alc663_mode3_inithook(struct hda_codec *codec)
{
alc663_two_hp_m1_speaker_automute(codec);
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
}
/* ***************** Mode4 ******************************/
static void alc663_mode4_unsol_event(struct hda_codec *codec,
@@ -16633,15 +16586,17 @@ static void alc663_mode4_unsol_event(struct hda_codec *codec,
alc663_21jd_two_speaker_automute(codec);
break;
case ALC880_MIC_EVENT:
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
break;
}
}
+#define alc663_mode4_setup alc663_m51va_setup
+
static void alc663_mode4_inithook(struct hda_codec *codec)
{
alc663_21jd_two_speaker_automute(codec);
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
}
/* ***************** Mode5 ******************************/
static void alc663_mode5_unsol_event(struct hda_codec *codec,
@@ -16652,15 +16607,17 @@ static void alc663_mode5_unsol_event(struct hda_codec *codec,
alc663_15jd_two_speaker_automute(codec);
break;
case ALC880_MIC_EVENT:
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
break;
}
}
+#define alc663_mode5_setup alc663_m51va_setup
+
static void alc663_mode5_inithook(struct hda_codec *codec)
{
alc663_15jd_two_speaker_automute(codec);
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
}
/* ***************** Mode6 ******************************/
static void alc663_mode6_unsol_event(struct hda_codec *codec,
@@ -16671,15 +16628,17 @@ static void alc663_mode6_unsol_event(struct hda_codec *codec,
alc663_two_hp_m2_speaker_automute(codec);
break;
case ALC880_MIC_EVENT:
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
break;
}
}
+#define alc663_mode6_setup alc663_m51va_setup
+
static void alc663_mode6_inithook(struct hda_codec *codec)
{
alc663_two_hp_m2_speaker_automute(codec);
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
}
static void alc663_g71v_hp_automute(struct hda_codec *codec)
@@ -16721,16 +16680,18 @@ static void alc663_g71v_unsol_event(struct hda_codec *codec,
alc663_g71v_front_automute(codec);
break;
case ALC880_MIC_EVENT:
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
break;
}
}
+#define alc663_g71v_setup alc663_m51va_setup
+
static void alc663_g71v_inithook(struct hda_codec *codec)
{
alc663_g71v_front_automute(codec);
alc663_g71v_hp_automute(codec);
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
}
static void alc663_g50v_unsol_event(struct hda_codec *codec,
@@ -16741,15 +16702,17 @@ static void alc663_g50v_unsol_event(struct hda_codec *codec,
alc663_m51va_speaker_automute(codec);
break;
case ALC880_MIC_EVENT:
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
break;
}
}
+#define alc663_g50v_setup alc663_m51va_setup
+
static void alc663_g50v_inithook(struct hda_codec *codec)
{
alc663_m51va_speaker_automute(codec);
- alc662_eeepc_mic_automute(codec);
+ alc_mic_automute(codec);
}
static struct snd_kcontrol_new alc662_ecs_mixer[] = {
@@ -16953,8 +16916,8 @@ static struct alc_config_preset alc662_presets[] = {
.dac_nids = alc662_dac_nids,
.num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
.channel_mode = alc662_3ST_2ch_modes,
- .input_mux = &alc662_eeepc_capture_source,
.unsol_event = alc662_eeepc_unsol_event,
+ .setup = alc662_eeepc_setup,
.init_hook = alc662_eeepc_inithook,
},
[ALC662_ASUS_EEEPC_EP20] = {
@@ -16968,6 +16931,7 @@ static struct alc_config_preset alc662_presets[] = {
.channel_mode = alc662_3ST_6ch_modes,
.input_mux = &alc662_lenovo_101e_capture_source,
.unsol_event = alc662_eeepc_unsol_event,
+ .setup = alc662_eeepc_ep20_setup,
.init_hook = alc662_eeepc_ep20_inithook,
},
[ALC662_ECS] = {
@@ -16978,8 +16942,8 @@ static struct alc_config_preset alc662_presets[] = {
.dac_nids = alc662_dac_nids,
.num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
.channel_mode = alc662_3ST_2ch_modes,
- .input_mux = &alc662_eeepc_capture_source,
.unsol_event = alc662_eeepc_unsol_event,
+ .setup = alc662_eeepc_setup,
.init_hook = alc662_eeepc_inithook,
},
[ALC663_ASUS_M51VA] = {
@@ -16990,8 +16954,8 @@ static struct alc_config_preset alc662_presets[] = {
.dig_out_nid = ALC662_DIGOUT_NID,
.num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
.channel_mode = alc662_3ST_2ch_modes,
- .input_mux = &alc663_m51va_capture_source,
.unsol_event = alc663_m51va_unsol_event,
+ .setup = alc663_m51va_setup,
.init_hook = alc663_m51va_inithook,
},
[ALC663_ASUS_G71V] = {
@@ -17002,8 +16966,8 @@ static struct alc_config_preset alc662_presets[] = {
.dig_out_nid = ALC662_DIGOUT_NID,
.num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
.channel_mode = alc662_3ST_2ch_modes,
- .input_mux = &alc662_eeepc_capture_source,
.unsol_event = alc663_g71v_unsol_event,
+ .setup = alc663_g71v_setup,
.init_hook = alc663_g71v_inithook,
},
[ALC663_ASUS_H13] = {
@@ -17013,7 +16977,6 @@ static struct alc_config_preset alc662_presets[] = {
.dac_nids = alc662_dac_nids,
.num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
.channel_mode = alc662_3ST_2ch_modes,
- .input_mux = &alc663_m51va_capture_source,
.unsol_event = alc663_m51va_unsol_event,
.init_hook = alc663_m51va_inithook,
},
@@ -17027,6 +16990,7 @@ static struct alc_config_preset alc662_presets[] = {
.channel_mode = alc662_3ST_6ch_modes,
.input_mux = &alc663_capture_source,
.unsol_event = alc663_g50v_unsol_event,
+ .setup = alc663_g50v_setup,
.init_hook = alc663_g50v_inithook,
},
[ALC663_ASUS_MODE1] = {
@@ -17040,8 +17004,8 @@ static struct alc_config_preset alc662_presets[] = {
.dig_out_nid = ALC662_DIGOUT_NID,
.num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
.channel_mode = alc662_3ST_2ch_modes,
- .input_mux = &alc662_eeepc_capture_source,
.unsol_event = alc663_mode1_unsol_event,
+ .setup = alc663_mode1_setup,
.init_hook = alc663_mode1_inithook,
},
[ALC662_ASUS_MODE2] = {
@@ -17054,8 +17018,8 @@ static struct alc_config_preset alc662_presets[] = {
.dig_out_nid = ALC662_DIGOUT_NID,
.num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
.channel_mode = alc662_3ST_2ch_modes,
- .input_mux = &alc662_eeepc_capture_source,
.unsol_event = alc662_mode2_unsol_event,
+ .setup = alc662_mode2_setup,
.init_hook = alc662_mode2_inithook,
},
[ALC663_ASUS_MODE3] = {
@@ -17069,8 +17033,8 @@ static struct alc_config_preset alc662_presets[] = {
.dig_out_nid = ALC662_DIGOUT_NID,
.num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
.channel_mode = alc662_3ST_2ch_modes,
- .input_mux = &alc662_eeepc_capture_source,
.unsol_event = alc663_mode3_unsol_event,
+ .setup = alc663_mode3_setup,
.init_hook = alc663_mode3_inithook,
},
[ALC663_ASUS_MODE4] = {
@@ -17084,8 +17048,8 @@ static struct alc_config_preset alc662_presets[] = {
.dig_out_nid = ALC662_DIGOUT_NID,
.num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
.channel_mode = alc662_3ST_2ch_modes,
- .input_mux = &alc662_eeepc_capture_source,
.unsol_event = alc663_mode4_unsol_event,
+ .setup = alc663_mode4_setup,
.init_hook = alc663_mode4_inithook,
},
[ALC663_ASUS_MODE5] = {
@@ -17099,8 +17063,8 @@ static struct alc_config_preset alc662_presets[] = {
.dig_out_nid = ALC662_DIGOUT_NID,
.num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
.channel_mode = alc662_3ST_2ch_modes,
- .input_mux = &alc662_eeepc_capture_source,
.unsol_event = alc663_mode5_unsol_event,
+ .setup = alc663_mode5_setup,
.init_hook = alc663_mode5_inithook,
},
[ALC663_ASUS_MODE6] = {
@@ -17114,8 +17078,8 @@ static struct alc_config_preset alc662_presets[] = {
.dig_out_nid = ALC662_DIGOUT_NID,
.num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
.channel_mode = alc662_3ST_2ch_modes,
- .input_mux = &alc662_eeepc_capture_source,
.unsol_event = alc663_mode6_unsol_event,
+ .setup = alc663_mode6_setup,
.init_hook = alc663_mode6_inithook,
},
[ALC272_DELL] = {
@@ -17129,8 +17093,8 @@ static struct alc_config_preset alc662_presets[] = {
.num_adc_nids = ARRAY_SIZE(alc272_adc_nids),
.capsrc_nids = alc272_capsrc_nids,
.channel_mode = alc662_3ST_2ch_modes,
- .input_mux = &alc663_m51va_capture_source,
.unsol_event = alc663_m51va_unsol_event,
+ .setup = alc663_m51va_setup,
.init_hook = alc663_m51va_inithook,
},
[ALC272_DELL_ZM1] = {
@@ -17141,11 +17105,11 @@ static struct alc_config_preset alc662_presets[] = {
.dac_nids = alc662_dac_nids,
.num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
.adc_nids = alc662_adc_nids,
- .num_adc_nids = ARRAY_SIZE(alc662_adc_nids),
+ .num_adc_nids = 1,
.capsrc_nids = alc662_capsrc_nids,
.channel_mode = alc662_3ST_2ch_modes,
- .input_mux = &alc663_m51va_capture_source,
.unsol_event = alc663_m51va_unsol_event,
+ .setup = alc663_m51va_setup,
.init_hook = alc663_m51va_inithook,
},
[ALC272_SAMSUNG_NC10] = {
@@ -17156,8 +17120,9 @@ static struct alc_config_preset alc662_presets[] = {
.dac_nids = alc272_dac_nids,
.num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
.channel_mode = alc662_3ST_2ch_modes,
- .input_mux = &alc272_nc10_capture_source,
+ /*.input_mux = &alc272_nc10_capture_source,*/
.unsol_event = alc663_mode4_unsol_event,
+ .setup = alc663_mode4_setup,
.init_hook = alc663_mode4_inithook,
},
};
@@ -17209,13 +17174,25 @@ static int alc662_auto_create_multi_out_ctls(struct alc_spec *spec,
if (err < 0)
return err;
} else {
- sprintf(name, "%s Playback Volume", chname[i]);
+ const char *pfx;
+ if (cfg->line_outs == 1 &&
+ cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
+ if (!cfg->hp_pins)
+ pfx = "Speaker";
+ else
+ pfx = "PCM";
+ } else
+ pfx = chname[i];
+ sprintf(name, "%s Playback Volume", pfx);
err = add_control(spec, ALC_CTL_WIDGET_VOL, name,
HDA_COMPOSE_AMP_VAL(nid, 3, 0,
HDA_OUTPUT));
if (err < 0)
return err;
- sprintf(name, "%s Playback Switch", chname[i]);
+ if (cfg->line_outs == 1 &&
+ cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
+ pfx = "Speaker";
+ sprintf(name, "%s Playback Switch", pfx);
err = add_control(spec, ALC_CTL_WIDGET_MUTE, name,
HDA_COMPOSE_AMP_VAL(alc880_idx_to_mixer(i),
3, 0, HDA_INPUT));
@@ -17277,62 +17254,9 @@ static int alc662_auto_create_extra_out(struct alc_spec *spec, hda_nid_t pin,
return 0;
}
-/* return the index of the src widget from the connection list of the nid.
- * return -1 if not found
- */
-static int alc662_input_pin_idx(struct hda_codec *codec, hda_nid_t nid,
- hda_nid_t src)
-{
- hda_nid_t conn_list[HDA_MAX_CONNECTIONS];
- int i, conns;
-
- conns = snd_hda_get_connections(codec, nid, conn_list,
- ARRAY_SIZE(conn_list));
- if (conns < 0)
- return -1;
- for (i = 0; i < conns; i++)
- if (conn_list[i] == src)
- return i;
- return -1;
-}
-
-static int alc662_is_input_pin(struct hda_codec *codec, hda_nid_t nid)
-{
- unsigned int pincap = snd_hda_query_pin_caps(codec, nid);
- return (pincap & AC_PINCAP_IN) != 0;
-}
-
/* create playback/capture controls for input pins */
-static int alc662_auto_create_analog_input_ctls(struct hda_codec *codec,
- const struct auto_pin_cfg *cfg)
-{
- struct alc_spec *spec = codec->spec;
- struct hda_input_mux *imux = &spec->private_imux[0];
- int i, err, idx;
-
- for (i = 0; i < AUTO_PIN_LAST; i++) {
- if (alc662_is_input_pin(codec, cfg->input_pins[i])) {
- idx = alc662_input_pin_idx(codec, 0x0b,
- cfg->input_pins[i]);
- if (idx >= 0) {
- err = new_analog_input(spec, cfg->input_pins[i],
- auto_pin_cfg_labels[i],
- idx, 0x0b);
- if (err < 0)
- return err;
- }
- idx = alc662_input_pin_idx(codec, 0x22,
- cfg->input_pins[i]);
- if (idx >= 0) {
- imux->items[imux->num_items].label =
- auto_pin_cfg_labels[i];
- imux->items[imux->num_items].index = idx;
- imux->num_items++;
- }
- }
- }
- return 0;
-}
+#define alc662_auto_create_input_ctls \
+ alc880_auto_create_input_ctls
static void alc662_auto_set_output_and_unmute(struct hda_codec *codec,
hda_nid_t nid, int pin_type,
@@ -17386,7 +17310,7 @@ static void alc662_auto_init_analog_input(struct hda_codec *codec)
for (i = 0; i < AUTO_PIN_LAST; i++) {
hda_nid_t nid = spec->autocfg.input_pins[i];
- if (alc662_is_input_pin(codec, nid)) {
+ if (alc_is_input_pin(codec, nid)) {
alc_set_input_pin(codec, nid, i);
if (nid != ALC662_PIN_CD_NID &&
(get_wcaps(codec, nid) & AC_WCAP_OUT_AMP))
@@ -17427,7 +17351,7 @@ static int alc662_parse_auto_config(struct hda_codec *codec)
"Headphone");
if (err < 0)
return err;
- err = alc662_auto_create_analog_input_ctls(codec, &spec->autocfg);
+ err = alc662_auto_create_input_ctls(codec, &spec->autocfg);
if (err < 0)
return err;
@@ -17484,8 +17408,8 @@ static int patch_alc662(struct hda_codec *codec)
alc662_models,
alc662_cfg_tbl);
if (board_config < 0) {
- printk(KERN_INFO "hda_codec: Unknown model for %s, "
- "trying auto-probe from BIOS...\n", codec->chip_name);
+ printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
board_config = ALC662_AUTO;
}
@@ -17510,7 +17434,7 @@ static int patch_alc662(struct hda_codec *codec)
}
if (board_config != ALC662_AUTO)
- setup_preset(spec, &alc662_presets[board_config]);
+ setup_preset(codec, &alc662_presets[board_config]);
spec->stream_analog_playback = &alc662_pcm_analog_playback;
spec->stream_analog_capture = &alc662_pcm_analog_capture;
@@ -17526,7 +17450,7 @@ static int patch_alc662(struct hda_codec *codec)
spec->capsrc_nids = alc662_capsrc_nids;
if (!spec->cap_mixer)
- set_capture_mixer(spec);
+ set_capture_mixer(codec);
if (codec->vendor_id == 0x10ec0662)
set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
else
@@ -17562,23 +17486,23 @@ static struct hda_codec_preset snd_hda_preset_realtek[] = {
{ .id = 0x10ec0861, .name = "ALC861", .patch = patch_alc861 },
{ .id = 0x10ec0862, .name = "ALC861-VD", .patch = patch_alc861vd },
{ .id = 0x10ec0662, .rev = 0x100002, .name = "ALC662 rev2",
- .patch = patch_alc883 },
+ .patch = patch_alc882 },
{ .id = 0x10ec0662, .rev = 0x100101, .name = "ALC662 rev1",
.patch = patch_alc662 },
{ .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 },
{ .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
{ .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 },
- { .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc883 },
+ { .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc882 },
{ .id = 0x10ec0885, .rev = 0x100101, .name = "ALC889A",
- .patch = patch_alc882 }, /* should be patch_alc883() in future */
+ .patch = patch_alc882 },
{ .id = 0x10ec0885, .rev = 0x100103, .name = "ALC889A",
- .patch = patch_alc882 }, /* should be patch_alc883() in future */
+ .patch = patch_alc882 },
{ .id = 0x10ec0885, .name = "ALC885", .patch = patch_alc882 },
- { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc883 },
+ { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc882 },
{ .id = 0x10ec0888, .rev = 0x100101, .name = "ALC1200",
- .patch = patch_alc883 },
- { .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc883 },
- { .id = 0x10ec0889, .name = "ALC889", .patch = patch_alc883 },
+ .patch = patch_alc882 },
+ { .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc882 },
+ { .id = 0x10ec0889, .name = "ALC889", .patch = patch_alc882 },
{} /* terminator */
};
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 6990cfcb6a38..e31e53dc6962 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -40,6 +40,8 @@ enum {
STAC_INSERT_EVENT,
STAC_PWR_EVENT,
STAC_HP_EVENT,
+ STAC_LO_EVENT,
+ STAC_MIC_EVENT,
};
enum {
@@ -81,6 +83,7 @@ enum {
STAC_DELL_M6_DMIC,
STAC_DELL_M6_BOTH,
STAC_DELL_EQ,
+ STAC_ALIENWARE_M17X,
STAC_92HD73XX_MODELS
};
@@ -177,6 +180,12 @@ struct sigmatel_jack {
struct snd_jack *jack;
};
+struct sigmatel_mic_route {
+ hda_nid_t pin;
+ unsigned char mux_idx;
+ unsigned char dmux_idx;
+};
+
struct sigmatel_spec {
struct snd_kcontrol_new *mixers[4];
unsigned int num_mixers;
@@ -188,6 +197,7 @@ struct sigmatel_spec {
unsigned int hp_detect: 1;
unsigned int spdif_mute: 1;
unsigned int check_volume_offset:1;
+ unsigned int auto_mic:1;
/* gpio lines */
unsigned int eapd_mask;
@@ -219,7 +229,6 @@ struct sigmatel_spec {
/* playback */
struct hda_input_mux *mono_mux;
- struct hda_input_mux *amp_mux;
unsigned int cur_mmux;
struct hda_multi_out multiout;
hda_nid_t dac_nids[5];
@@ -239,6 +248,15 @@ struct sigmatel_spec {
unsigned int num_dmuxes;
hda_nid_t *smux_nids;
unsigned int num_smuxes;
+ unsigned int num_analog_muxes;
+
+ unsigned long *capvols; /* amp-volume attr: HDA_COMPOSE_AMP_VAL() */
+ unsigned long *capsws; /* amp-mute attr: HDA_COMPOSE_AMP_VAL() */
+ unsigned int num_caps; /* number of capture volume/switch elements */
+
+ struct sigmatel_mic_route ext_mic;
+ struct sigmatel_mic_route int_mic;
+
const char **spdif_labels;
hda_nid_t dig_in_nid;
@@ -263,7 +281,6 @@ struct sigmatel_spec {
unsigned int cur_smux[2];
unsigned int cur_amux;
hda_nid_t *amp_nids;
- unsigned int num_amps;
unsigned int powerdown_adcs;
/* i/o switches */
@@ -282,7 +299,6 @@ struct sigmatel_spec {
struct hda_input_mux private_dimux;
struct hda_input_mux private_imux;
struct hda_input_mux private_smux;
- struct hda_input_mux private_amp_mux;
struct hda_input_mux private_mono_mux;
};
@@ -311,11 +327,6 @@ static hda_nid_t stac92hd73xx_adc_nids[2] = {
0x1a, 0x1b
};
-#define DELL_M6_AMP 2
-static hda_nid_t stac92hd73xx_amp_nids[3] = {
- 0x0b, 0x0c, 0x0e
-};
-
#define STAC92HD73XX_NUM_DMICS 2
static hda_nid_t stac92hd73xx_dmic_nids[STAC92HD73XX_NUM_DMICS + 1] = {
0x13, 0x14, 0
@@ -323,8 +334,8 @@ static hda_nid_t stac92hd73xx_dmic_nids[STAC92HD73XX_NUM_DMICS + 1] = {
#define STAC92HD73_DAC_COUNT 5
-static hda_nid_t stac92hd73xx_mux_nids[4] = {
- 0x28, 0x29, 0x2a, 0x2b,
+static hda_nid_t stac92hd73xx_mux_nids[2] = {
+ 0x20, 0x21,
};
static hda_nid_t stac92hd73xx_dmux_nids[2] = {
@@ -335,14 +346,16 @@ static hda_nid_t stac92hd73xx_smux_nids[2] = {
0x22, 0x23,
};
-#define STAC92HD83XXX_NUM_DMICS 2
-static hda_nid_t stac92hd83xxx_dmic_nids[STAC92HD83XXX_NUM_DMICS + 1] = {
- 0x11, 0x12, 0
+#define STAC92HD73XX_NUM_CAPS 2
+static unsigned long stac92hd73xx_capvols[] = {
+ HDA_COMPOSE_AMP_VAL(0x20, 3, 0, HDA_OUTPUT),
+ HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT),
};
+#define stac92hd73xx_capsws stac92hd73xx_capvols
#define STAC92HD83_DAC_COUNT 3
-static hda_nid_t stac92hd83xxx_dmux_nids[2] = {
+static hda_nid_t stac92hd83xxx_mux_nids[2] = {
0x17, 0x18,
};
@@ -362,9 +375,12 @@ static unsigned int stac92hd83xxx_pwr_mapping[4] = {
0x03, 0x0c, 0x20, 0x40,
};
-static hda_nid_t stac92hd83xxx_amp_nids[1] = {
- 0xc,
+#define STAC92HD83XXX_NUM_CAPS 2
+static unsigned long stac92hd83xxx_capvols[] = {
+ HDA_COMPOSE_AMP_VAL(0x17, 3, 0, HDA_OUTPUT),
+ HDA_COMPOSE_AMP_VAL(0x18, 3, 0, HDA_OUTPUT),
};
+#define stac92hd83xxx_capsws stac92hd83xxx_capvols
static hda_nid_t stac92hd71bxx_pwr_nids[3] = {
0x0a, 0x0d, 0x0f
@@ -395,6 +411,13 @@ static hda_nid_t stac92hd71bxx_slave_dig_outs[2] = {
0x22, 0
};
+#define STAC92HD71BXX_NUM_CAPS 2
+static unsigned long stac92hd71bxx_capvols[] = {
+ HDA_COMPOSE_AMP_VAL(0x1c, 3, 0, HDA_OUTPUT),
+ HDA_COMPOSE_AMP_VAL(0x1d, 3, 0, HDA_OUTPUT),
+};
+#define stac92hd71bxx_capsws stac92hd71bxx_capvols
+
static hda_nid_t stac925x_adc_nids[1] = {
0x03,
};
@@ -416,6 +439,13 @@ static hda_nid_t stac925x_dmux_nids[1] = {
0x14,
};
+static unsigned long stac925x_capvols[] = {
+ HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_OUTPUT),
+};
+static unsigned long stac925x_capsws[] = {
+ HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT),
+};
+
static hda_nid_t stac922x_adc_nids[2] = {
0x06, 0x07,
};
@@ -424,6 +454,13 @@ static hda_nid_t stac922x_mux_nids[2] = {
0x12, 0x13,
};
+#define STAC922X_NUM_CAPS 2
+static unsigned long stac922x_capvols[] = {
+ HDA_COMPOSE_AMP_VAL(0x17, 3, 0, HDA_INPUT),
+ HDA_COMPOSE_AMP_VAL(0x18, 3, 0, HDA_INPUT),
+};
+#define stac922x_capsws stac922x_capvols
+
static hda_nid_t stac927x_slave_dig_outs[2] = {
0x1f, 0,
};
@@ -453,6 +490,18 @@ static hda_nid_t stac927x_dmic_nids[STAC927X_NUM_DMICS + 1] = {
0x13, 0x14, 0
};
+#define STAC927X_NUM_CAPS 3
+static unsigned long stac927x_capvols[] = {
+ HDA_COMPOSE_AMP_VAL(0x18, 3, 0, HDA_INPUT),
+ HDA_COMPOSE_AMP_VAL(0x19, 3, 0, HDA_INPUT),
+ HDA_COMPOSE_AMP_VAL(0x1a, 3, 0, HDA_INPUT),
+};
+static unsigned long stac927x_capsws[] = {
+ HDA_COMPOSE_AMP_VAL(0x1b, 3, 0, HDA_OUTPUT),
+ HDA_COMPOSE_AMP_VAL(0x1c, 3, 0, HDA_OUTPUT),
+ HDA_COMPOSE_AMP_VAL(0x1d, 3, 0, HDA_OUTPUT),
+};
+
static const char *stac927x_spdif_labels[5] = {
"Digital Playback", "ADAT", "Analog Mux 1",
"Analog Mux 2", "Analog Mux 3"
@@ -479,6 +528,16 @@ static hda_nid_t stac9205_dmic_nids[STAC9205_NUM_DMICS + 1] = {
0x17, 0x18, 0
};
+#define STAC9205_NUM_CAPS 2
+static unsigned long stac9205_capvols[] = {
+ HDA_COMPOSE_AMP_VAL(0x1b, 3, 0, HDA_INPUT),
+ HDA_COMPOSE_AMP_VAL(0x1c, 3, 0, HDA_INPUT),
+};
+static unsigned long stac9205_capsws[] = {
+ HDA_COMPOSE_AMP_VAL(0x1d, 3, 0, HDA_OUTPUT),
+ HDA_COMPOSE_AMP_VAL(0x1e, 3, 0, HDA_OUTPUT),
+};
+
static hda_nid_t stac9200_pin_nids[8] = {
0x08, 0x09, 0x0d, 0x0e,
0x0f, 0x10, 0x11, 0x12,
@@ -529,34 +588,6 @@ static hda_nid_t stac9205_pin_nids[12] = {
0x21, 0x22,
};
-#define stac92xx_amp_volume_info snd_hda_mixer_amp_volume_info
-
-static int stac92xx_amp_volume_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct sigmatel_spec *spec = codec->spec;
- hda_nid_t nid = spec->amp_nids[spec->cur_amux];
-
- kcontrol->private_value ^= get_amp_nid(kcontrol);
- kcontrol->private_value |= nid;
-
- return snd_hda_mixer_amp_volume_get(kcontrol, ucontrol);
-}
-
-static int stac92xx_amp_volume_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct sigmatel_spec *spec = codec->spec;
- hda_nid_t nid = spec->amp_nids[spec->cur_amux];
-
- kcontrol->private_value ^= get_amp_nid(kcontrol);
- kcontrol->private_value |= nid;
-
- return snd_hda_mixer_amp_volume_put(kcontrol, ucontrol);
-}
-
static int stac92xx_dmux_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
@@ -693,9 +724,35 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct sigmatel_spec *spec = codec->spec;
unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
-
- return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol,
- spec->mux_nids[adc_idx], &spec->cur_mux[adc_idx]);
+ const struct hda_input_mux *imux = spec->input_mux;
+ unsigned int idx, prev_idx;
+
+ idx = ucontrol->value.enumerated.item[0];
+ if (idx >= imux->num_items)
+ idx = imux->num_items - 1;
+ prev_idx = spec->cur_mux[adc_idx];
+ if (prev_idx == idx)
+ return 0;
+ if (idx < spec->num_analog_muxes) {
+ snd_hda_codec_write_cache(codec, spec->mux_nids[adc_idx], 0,
+ AC_VERB_SET_CONNECT_SEL,
+ imux->items[idx].index);
+ if (prev_idx >= spec->num_analog_muxes) {
+ imux = spec->dinput_mux;
+ /* 0 = analog */
+ snd_hda_codec_write_cache(codec,
+ spec->dmux_nids[adc_idx], 0,
+ AC_VERB_SET_CONNECT_SEL,
+ imux->items[0].index);
+ }
+ } else {
+ imux = spec->dinput_mux;
+ snd_hda_codec_write_cache(codec, spec->dmux_nids[adc_idx], 0,
+ AC_VERB_SET_CONNECT_SEL,
+ imux->items[idx - 1].index);
+ }
+ spec->cur_mux[adc_idx] = idx;
+ return 1;
}
static int stac92xx_mono_mux_enum_info(struct snd_kcontrol *kcontrol,
@@ -726,41 +783,6 @@ static int stac92xx_mono_mux_enum_put(struct snd_kcontrol *kcontrol,
spec->mono_nid, &spec->cur_mmux);
}
-static int stac92xx_amp_mux_enum_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct sigmatel_spec *spec = codec->spec;
- return snd_hda_input_mux_info(spec->amp_mux, uinfo);
-}
-
-static int stac92xx_amp_mux_enum_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct sigmatel_spec *spec = codec->spec;
-
- ucontrol->value.enumerated.item[0] = spec->cur_amux;
- return 0;
-}
-
-static int stac92xx_amp_mux_enum_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct sigmatel_spec *spec = codec->spec;
- struct snd_kcontrol *ctl =
- snd_hda_find_mixer_ctl(codec, "Amp Capture Volume");
- if (!ctl)
- return -EINVAL;
-
- snd_ctl_notify(codec->bus->card, SNDRV_CTL_EVENT_MASK_VALUE |
- SNDRV_CTL_EVENT_MASK_INFO, &ctl->id);
-
- return snd_hda_input_mux_put(codec, spec->amp_mux, ucontrol,
- 0, &spec->cur_amux);
-}
-
#define stac92xx_aloopback_info snd_ctl_boolean_mono_info
static int stac92xx_aloopback_get(struct snd_kcontrol *kcontrol,
@@ -828,84 +850,16 @@ static struct hda_verb stac9200_eapd_init[] = {
{}
};
-static struct hda_verb stac92hd73xx_6ch_core_init[] = {
- /* set master volume and direct control */
- { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff},
- /* setup adcs to point to mixer */
- { 0x20, AC_VERB_SET_CONNECT_SEL, 0x0b},
- { 0x21, AC_VERB_SET_CONNECT_SEL, 0x0b},
- { 0x0f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- { 0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- { 0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- /* setup import muxs */
- { 0x28, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x29, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x2a, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x2b, AC_VERB_SET_CONNECT_SEL, 0x00},
- {}
-};
-
static struct hda_verb dell_eq_core_init[] = {
/* set master volume to max value without distortion
* and direct control */
{ 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xec},
- /* setup adcs to point to mixer */
- { 0x20, AC_VERB_SET_CONNECT_SEL, 0x0b},
- { 0x21, AC_VERB_SET_CONNECT_SEL, 0x0b},
- /* setup import muxs */
- { 0x28, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x29, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x2a, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x2b, AC_VERB_SET_CONNECT_SEL, 0x00},
- {}
-};
-
-static struct hda_verb dell_m6_core_init[] = {
- { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff},
- /* setup adcs to point to mixer */
- { 0x20, AC_VERB_SET_CONNECT_SEL, 0x0b},
- { 0x21, AC_VERB_SET_CONNECT_SEL, 0x0b},
- /* setup import muxs */
- { 0x28, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x29, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x2a, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x2b, AC_VERB_SET_CONNECT_SEL, 0x00},
- {}
-};
-
-static struct hda_verb stac92hd73xx_8ch_core_init[] = {
- /* set master volume and direct control */
- { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff},
- /* setup adcs to point to mixer */
- { 0x20, AC_VERB_SET_CONNECT_SEL, 0x0b},
- { 0x21, AC_VERB_SET_CONNECT_SEL, 0x0b},
- { 0x0f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- { 0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- { 0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- /* setup import muxs */
- { 0x28, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x29, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x2a, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x2b, AC_VERB_SET_CONNECT_SEL, 0x03},
{}
};
-static struct hda_verb stac92hd73xx_10ch_core_init[] = {
+static struct hda_verb stac92hd73xx_core_init[] = {
/* set master volume and direct control */
{ 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff},
- /* dac3 is connected to import3 mux */
- { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, 0xb07f},
- /* setup adcs to point to mixer */
- { 0x20, AC_VERB_SET_CONNECT_SEL, 0x0b},
- { 0x21, AC_VERB_SET_CONNECT_SEL, 0x0b},
- { 0x0f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- { 0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- { 0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- /* setup import muxs */
- { 0x28, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x29, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x2a, AC_VERB_SET_CONNECT_SEL, 0x01},
- { 0x2b, AC_VERB_SET_CONNECT_SEL, 0x03},
{}
};
@@ -925,19 +879,6 @@ static struct hda_verb stac92hd71bxx_core_init[] = {
{}
};
-#define HD_DISABLE_PORTF 1
-static struct hda_verb stac92hd71bxx_analog_core_init[] = {
- /* start of config #1 */
-
- /* connect port 0f to audio mixer */
- { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x2},
- /* start of config #2 */
-
- /* set master volume and direct control */
- { 0x28, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff},
- {}
-};
-
static struct hda_verb stac92hd71bxx_unmute_core_init[] = {
/* unmute right and left channels for nodes 0x0f, 0xa, 0x0d */
{ 0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
@@ -996,31 +937,6 @@ static struct hda_verb stac9205_core_init[] = {
.put = stac92xx_mono_mux_enum_put, \
}
-#define STAC_AMP_MUX \
- { \
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = "Amp Selector Capture Switch", \
- .count = 1, \
- .info = stac92xx_amp_mux_enum_info, \
- .get = stac92xx_amp_mux_enum_get, \
- .put = stac92xx_amp_mux_enum_put, \
- }
-
-#define STAC_AMP_VOL(xname, nid, chs, idx, dir) \
- { \
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .index = 0, \
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
- SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
- SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, \
- .info = stac92xx_amp_volume_info, \
- .get = stac92xx_amp_volume_get, \
- .put = stac92xx_amp_volume_put, \
- .tlv = { .c = snd_hda_mixer_amp_tlv }, \
- .private_value = HDA_COMPOSE_AMP_VAL(nid, chs, idx, dir) \
- }
-
#define STAC_ANALOG_LOOPBACK(verb_read, verb_write, cnt) \
{ \
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
@@ -1051,34 +967,6 @@ static struct snd_kcontrol_new stac9200_mixer[] = {
{ } /* end */
};
-#define DELL_M6_MIXER 6
-static struct snd_kcontrol_new stac92hd73xx_6ch_mixer[] = {
- /* start of config #1 */
- HDA_CODEC_VOLUME("Front Mic Mixer Capture Volume", 0x1d, 0, HDA_INPUT),
- HDA_CODEC_MUTE("Front Mic Mixer Capture Switch", 0x1d, 0, HDA_INPUT),
-
- HDA_CODEC_VOLUME("Line In Mixer Capture Volume", 0x1d, 0x2, HDA_INPUT),
- HDA_CODEC_MUTE("Line In Mixer Capture Switch", 0x1d, 0x2, HDA_INPUT),
-
- HDA_CODEC_VOLUME("CD Mixer Capture Volume", 0x1d, 0x4, HDA_INPUT),
- HDA_CODEC_MUTE("CD Mixer Capture Switch", 0x1d, 0x4, HDA_INPUT),
-
- /* start of config #2 */
- HDA_CODEC_VOLUME("Mic Mixer Capture Volume", 0x1d, 0x1, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Mixer Capture Switch", 0x1d, 0x1, HDA_INPUT),
-
- HDA_CODEC_VOLUME("DAC Mixer Capture Volume", 0x1d, 0x3, HDA_INPUT),
- HDA_CODEC_MUTE("DAC Mixer Capture Switch", 0x1d, 0x3, HDA_INPUT),
-
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x20, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x20, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x21, 0x0, HDA_OUTPUT),
-
- { } /* end */
-};
-
static struct snd_kcontrol_new stac92hd73xx_6ch_loopback[] = {
STAC_ANALOG_LOOPBACK(0xFA0, 0x7A1, 3),
{}
@@ -1094,134 +982,14 @@ static struct snd_kcontrol_new stac92hd73xx_10ch_loopback[] = {
{}
};
-static struct snd_kcontrol_new stac92hd73xx_8ch_mixer[] = {
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x20, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x20, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x21, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME("Front Mic Mixer Capture Volume", 0x1d, 0, HDA_INPUT),
- HDA_CODEC_MUTE("Front Mic Mixer Capture Switch", 0x1d, 0, HDA_INPUT),
-
- HDA_CODEC_VOLUME("Mic Mixer Capture Volume", 0x1d, 0x1, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Mixer Capture Switch", 0x1d, 0x1, HDA_INPUT),
-
- HDA_CODEC_VOLUME("Line In Mixer Capture Volume", 0x1d, 0x2, HDA_INPUT),
- HDA_CODEC_MUTE("Line In Mixer Capture Switch", 0x1d, 0x2, HDA_INPUT),
-
- HDA_CODEC_VOLUME("DAC Mixer Capture Volume", 0x1d, 0x3, HDA_INPUT),
- HDA_CODEC_MUTE("DAC Mixer Capture Switch", 0x1d, 0x3, HDA_INPUT),
-
- HDA_CODEC_VOLUME("CD Mixer Capture Volume", 0x1d, 0x4, HDA_INPUT),
- HDA_CODEC_MUTE("CD Mixer Capture Switch", 0x1d, 0x4, HDA_INPUT),
- { } /* end */
-};
-
-static struct snd_kcontrol_new stac92hd73xx_10ch_mixer[] = {
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x20, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x20, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x21, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME("Front Mic Mixer Capture Volume", 0x1d, 0, HDA_INPUT),
- HDA_CODEC_MUTE("Front Mic Mixer Capture Switch", 0x1d, 0, HDA_INPUT),
-
- HDA_CODEC_VOLUME("Mic Mixer Capture Volume", 0x1d, 0x1, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Mixer Capture Switch", 0x1d, 0x1, HDA_INPUT),
-
- HDA_CODEC_VOLUME("Line In Mixer Capture Volume", 0x1d, 0x2, HDA_INPUT),
- HDA_CODEC_MUTE("Line In Mixer Capture Switch", 0x1d, 0x2, HDA_INPUT),
-
- HDA_CODEC_VOLUME("DAC Mixer Capture Volume", 0x1d, 0x3, HDA_INPUT),
- HDA_CODEC_MUTE("DAC Mixer Capture Switch", 0x1d, 0x3, HDA_INPUT),
-
- HDA_CODEC_VOLUME("CD Mixer Capture Volume", 0x1d, 0x4, HDA_INPUT),
- HDA_CODEC_MUTE("CD Mixer Capture Switch", 0x1d, 0x4, HDA_INPUT),
- { } /* end */
-};
-
-
-static struct snd_kcontrol_new stac92hd83xxx_mixer[] = {
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x17, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x17, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x18, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x18, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME("DAC0 Capture Volume", 0x1b, 0x3, HDA_INPUT),
- HDA_CODEC_MUTE("DAC0 Capture Switch", 0x1b, 0x3, HDA_INPUT),
-
- HDA_CODEC_VOLUME("DAC1 Capture Volume", 0x1b, 0x4, HDA_INPUT),
- HDA_CODEC_MUTE("DAC1 Capture Switch", 0x1b, 0x4, HDA_INPUT),
-
- HDA_CODEC_VOLUME("Front Mic Capture Volume", 0x1b, 0x0, HDA_INPUT),
- HDA_CODEC_MUTE("Front Mic Capture Switch", 0x1b, 0x0, HDA_INPUT),
-
- HDA_CODEC_VOLUME("Line In Capture Volume", 0x1b, 0x2, HDA_INPUT),
- HDA_CODEC_MUTE("Line In Capture Switch", 0x1b, 0x2, HDA_INPUT),
-
- /*
- HDA_CODEC_VOLUME("Mic Capture Volume", 0x1b, 0x1, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Capture Switch", 0x1b 0x1, HDA_INPUT),
- */
- { } /* end */
-};
-
-static struct snd_kcontrol_new stac92hd71bxx_analog_mixer[] = {
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x1c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x1c, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x1d, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x1d, 0x0, HDA_OUTPUT),
- /* analog pc-beep replaced with digital beep support */
- /*
- HDA_CODEC_VOLUME("PC Beep Volume", 0x17, 0x2, HDA_INPUT),
- HDA_CODEC_MUTE("PC Beep Switch", 0x17, 0x2, HDA_INPUT),
- */
-
- HDA_CODEC_MUTE("Import0 Mux Capture Switch", 0x17, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Import0 Mux Capture Volume", 0x17, 0x0, HDA_INPUT),
-
- HDA_CODEC_MUTE("Import1 Mux Capture Switch", 0x17, 0x1, HDA_INPUT),
- HDA_CODEC_VOLUME("Import1 Mux Capture Volume", 0x17, 0x1, HDA_INPUT),
-
- HDA_CODEC_MUTE("DAC0 Capture Switch", 0x17, 0x3, HDA_INPUT),
- HDA_CODEC_VOLUME("DAC0 Capture Volume", 0x17, 0x3, HDA_INPUT),
-
- HDA_CODEC_MUTE("DAC1 Capture Switch", 0x17, 0x4, HDA_INPUT),
- HDA_CODEC_VOLUME("DAC1 Capture Volume", 0x17, 0x4, HDA_INPUT),
- { } /* end */
-};
static struct snd_kcontrol_new stac92hd71bxx_loopback[] = {
STAC_ANALOG_LOOPBACK(0xFA0, 0x7A0, 2)
};
-static struct snd_kcontrol_new stac92hd71bxx_mixer[] = {
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x1c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x1c, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x1d, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x1d, 0x0, HDA_OUTPUT),
- { } /* end */
-};
-
static struct snd_kcontrol_new stac925x_mixer[] = {
HDA_CODEC_VOLUME("Master Playback Volume", 0x0e, 0, HDA_OUTPUT),
HDA_CODEC_MUTE("Master Playback Switch", 0x0e, 0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x09, 0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x14, 0, HDA_OUTPUT),
- { } /* end */
-};
-
-static struct snd_kcontrol_new stac9205_mixer[] = {
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x1b, 0x0, HDA_INPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x1d, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x1c, 0x0, HDA_INPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x1e, 0x0, HDA_OUTPUT),
{ } /* end */
};
@@ -1230,29 +998,6 @@ static struct snd_kcontrol_new stac9205_loopback[] = {
{}
};
-/* This needs to be generated dynamically based on sequence */
-static struct snd_kcontrol_new stac922x_mixer[] = {
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x17, 0x0, HDA_INPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x17, 0x0, HDA_INPUT),
-
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x18, 0x0, HDA_INPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x18, 0x0, HDA_INPUT),
- { } /* end */
-};
-
-
-static struct snd_kcontrol_new stac927x_mixer[] = {
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x18, 0x0, HDA_INPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x1b, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x19, 0x0, HDA_INPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x1c, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME_IDX("Capture Volume", 0x2, 0x1A, 0x0, HDA_INPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 0x2, 0x1d, 0x0, HDA_OUTPUT),
- { } /* end */
-};
-
static struct snd_kcontrol_new stac927x_loopback[] = {
STAC_ANALOG_LOOPBACK(0xFEB, 0x7EB, 1),
{}
@@ -1310,16 +1055,19 @@ static int stac92xx_build_controls(struct hda_codec *codec)
int err;
int i;
- err = snd_hda_add_new_ctls(codec, spec->mixer);
- if (err < 0)
- return err;
+ if (spec->mixer) {
+ err = snd_hda_add_new_ctls(codec, spec->mixer);
+ if (err < 0)
+ return err;
+ }
for (i = 0; i < spec->num_mixers; i++) {
err = snd_hda_add_new_ctls(codec, spec->mixers[i]);
if (err < 0)
return err;
}
- if (spec->num_dmuxes > 0) {
+ if (!spec->auto_mic && spec->num_dmuxes > 0 &&
+ snd_hda_get_bool_hint(codec, "separate_dmux") == 1) {
stac_dmux_mixer.count = spec->num_dmuxes;
err = snd_hda_ctl_add(codec,
snd_ctl_new1(&stac_dmux_mixer, codec));
@@ -1766,12 +1514,20 @@ static unsigned int dell_m6_pin_configs[13] = {
0x4f0000f0,
};
+static unsigned int alienware_m17x_pin_configs[13] = {
+ 0x0321101f, 0x0321101f, 0x03a11020, 0x03014020,
+ 0x90170110, 0x4f0000f0, 0x4f0000f0, 0x4f0000f0,
+ 0x4f0000f0, 0x90a60160, 0x4f0000f0, 0x4f0000f0,
+ 0x904601b0,
+};
+
static unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = {
[STAC_92HD73XX_REF] = ref92hd73xx_pin_configs,
[STAC_DELL_M6_AMIC] = dell_m6_pin_configs,
[STAC_DELL_M6_DMIC] = dell_m6_pin_configs,
[STAC_DELL_M6_BOTH] = dell_m6_pin_configs,
[STAC_DELL_EQ] = dell_m6_pin_configs,
+ [STAC_ALIENWARE_M17X] = alienware_m17x_pin_configs,
};
static const char *stac92hd73xx_models[STAC_92HD73XX_MODELS] = {
@@ -1783,6 +1539,7 @@ static const char *stac92hd73xx_models[STAC_92HD73XX_MODELS] = {
[STAC_DELL_M6_DMIC] = "dell-m6-dmic",
[STAC_DELL_M6_BOTH] = "dell-m6",
[STAC_DELL_EQ] = "dell-eq",
+ [STAC_ALIENWARE_M17X] = "alienware",
};
static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
@@ -1820,6 +1577,12 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
{} /* terminator */
};
+static struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02a1,
+ "Alienware M17x", STAC_ALIENWARE_M17X),
+ {} /* terminator */
+};
+
static unsigned int ref92hd83xxx_pin_configs[10] = {
0x02214030, 0x02211010, 0x02a19020, 0x02170130,
0x01014050, 0x01819040, 0x01014020, 0x90a3014e,
@@ -1927,6 +1690,8 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = {
"HP mini 1000", STAC_HP_M4),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x361b,
"HP HDX", STAC_HP_HDX), /* HDX16 */
+ SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010,
+ "HP", STAC_HP_DV5),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233,
"unknown Dell", STAC_DELL_M4_1),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0234,
@@ -2642,8 +2407,7 @@ static int stac92xx_hp_switch_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static void stac_issue_unsol_event(struct hda_codec *codec, hda_nid_t nid,
- unsigned char type);
+static void stac_issue_unsol_event(struct hda_codec *codec, hda_nid_t nid);
static int stac92xx_hp_switch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
@@ -2657,7 +2421,7 @@ static int stac92xx_hp_switch_put(struct snd_kcontrol *kcontrol,
/* check to be sure that the ports are upto date with
* switch changes
*/
- stac_issue_unsol_event(codec, nid, STAC_HP_EVENT);
+ stac_issue_unsol_event(codec, nid);
return 1;
}
@@ -2790,7 +2554,7 @@ static int stac92xx_io_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_
* appropriately according to the pin direction
*/
if (spec->hp_detect)
- stac_issue_unsol_event(codec, nid, STAC_HP_EVENT);
+ stac_issue_unsol_event(codec, nid);
return 1;
}
@@ -2859,8 +2623,6 @@ enum {
STAC_CTL_WIDGET_VOL,
STAC_CTL_WIDGET_MUTE,
STAC_CTL_WIDGET_MONO_MUX,
- STAC_CTL_WIDGET_AMP_MUX,
- STAC_CTL_WIDGET_AMP_VOL,
STAC_CTL_WIDGET_HP_SWITCH,
STAC_CTL_WIDGET_IO_SWITCH,
STAC_CTL_WIDGET_CLFE_SWITCH,
@@ -2871,8 +2633,6 @@ static struct snd_kcontrol_new stac92xx_control_templates[] = {
HDA_CODEC_VOLUME(NULL, 0, 0, 0),
HDA_CODEC_MUTE(NULL, 0, 0, 0),
STAC_MONO_MUX,
- STAC_AMP_MUX,
- STAC_AMP_VOL(NULL, 0, 0, 0, 0),
STAC_CODEC_HP_SWITCH(NULL),
STAC_CODEC_IO_SWITCH(NULL, 0),
STAC_CODEC_CLFE_SWITCH(NULL, 0),
@@ -2973,6 +2733,8 @@ static int stac92xx_add_input_source(struct sigmatel_spec *spec)
struct snd_kcontrol_new *knew;
struct hda_input_mux *imux = &spec->private_imux;
+ if (spec->auto_mic)
+ return 0; /* no need for input source */
if (!spec->num_adcs || imux->num_items <= 1)
return 0; /* no need for input source control */
knew = stac_control_new(spec, &stac_input_src_temp,
@@ -3066,7 +2828,7 @@ static hda_nid_t get_unassigned_dac(struct hda_codec *codec, hda_nid_t nid)
HDA_MAX_CONNECTIONS);
for (j = 0; j < conn_len; j++) {
wcaps = get_wcaps(codec, conn[j]);
- wtype = (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+ wtype = get_wcaps_type(wcaps);
/* we check only analog outputs */
if (wtype != AC_WID_AUD_OUT || (wcaps & AC_WCAP_DIGITAL))
continue;
@@ -3325,6 +3087,21 @@ static int create_multi_out_ctls(struct hda_codec *codec, int num_outs,
return 0;
}
+static int stac92xx_add_capvol_ctls(struct hda_codec *codec, unsigned long vol,
+ unsigned long sw, int idx)
+{
+ int err;
+ err = stac92xx_add_control_idx(codec->spec, STAC_CTL_WIDGET_VOL, idx,
+ "Capture Volume", vol);
+ if (err < 0)
+ return err;
+ err = stac92xx_add_control_idx(codec->spec, STAC_CTL_WIDGET_MUTE, idx,
+ "Capture Switch", sw);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
/* add playback controls from the parsed DAC table */
static int stac92xx_auto_create_multi_out_ctls(struct hda_codec *codec,
const struct auto_pin_cfg *cfg)
@@ -3398,7 +3175,7 @@ static int stac92xx_auto_create_mono_output_ctls(struct hda_codec *codec)
spec->mono_nid,
con_lst,
HDA_MAX_NUM_INPUTS);
- if (!num_cons || num_cons > ARRAY_SIZE(stac92xx_mono_labels))
+ if (num_cons <= 0 || num_cons > ARRAY_SIZE(stac92xx_mono_labels))
return -EINVAL;
for (i = 0; i < num_cons; i++) {
@@ -3412,37 +3189,6 @@ static int stac92xx_auto_create_mono_output_ctls(struct hda_codec *codec)
"Mono Mux", spec->mono_nid);
}
-/* labels for amp mux outputs */
-static const char *stac92xx_amp_labels[3] = {
- "Front Microphone", "Microphone", "Line In",
-};
-
-/* create amp out controls mux on capable codecs */
-static int stac92xx_auto_create_amp_output_ctls(struct hda_codec *codec)
-{
- struct sigmatel_spec *spec = codec->spec;
- struct hda_input_mux *amp_mux = &spec->private_amp_mux;
- int i, err;
-
- for (i = 0; i < spec->num_amps; i++) {
- amp_mux->items[amp_mux->num_items].label =
- stac92xx_amp_labels[i];
- amp_mux->items[amp_mux->num_items].index = i;
- amp_mux->num_items++;
- }
-
- if (spec->num_amps > 1) {
- err = stac92xx_add_control(spec, STAC_CTL_WIDGET_AMP_MUX,
- "Amp Selector Capture Switch", 0);
- if (err < 0)
- return err;
- }
- return stac92xx_add_control(spec, STAC_CTL_WIDGET_AMP_VOL,
- "Amp Capture Volume",
- HDA_COMPOSE_AMP_VAL(spec->amp_nids[0], 3, 0, HDA_INPUT));
-}
-
-
/* create PC beep volume controls */
static int stac92xx_auto_create_beep_ctls(struct hda_codec *codec,
hda_nid_t nid)
@@ -3511,19 +3257,33 @@ static int stac92xx_beep_switch_ctl(struct hda_codec *codec)
static int stac92xx_auto_create_mux_input_ctls(struct hda_codec *codec)
{
struct sigmatel_spec *spec = codec->spec;
- int wcaps, nid, i, err = 0;
+ int i, j, err = 0;
for (i = 0; i < spec->num_muxes; i++) {
+ hda_nid_t nid;
+ unsigned int wcaps;
+ unsigned long val;
+
nid = spec->mux_nids[i];
wcaps = get_wcaps(codec, nid);
+ if (!(wcaps & AC_WCAP_OUT_AMP))
+ continue;
- if (wcaps & AC_WCAP_OUT_AMP) {
- err = stac92xx_add_control_idx(spec,
- STAC_CTL_WIDGET_VOL, i, "Mux Capture Volume",
- HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT));
- if (err < 0)
- return err;
+ /* check whether already the same control was created as
+ * normal Capture Volume.
+ */
+ val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
+ for (j = 0; j < spec->num_caps; j++) {
+ if (spec->capvols[j] == val)
+ break;
}
+ if (j < spec->num_caps)
+ continue;
+
+ err = stac92xx_add_control_idx(spec, STAC_CTL_WIDGET_VOL, i,
+ "Mux Capture Volume", val);
+ if (err < 0)
+ return err;
}
return 0;
};
@@ -3544,7 +3304,7 @@ static int stac92xx_auto_create_spdif_mux_ctls(struct hda_codec *codec)
spec->smux_nids[0],
con_lst,
HDA_MAX_NUM_INPUTS);
- if (!num_cons)
+ if (num_cons <= 0)
return -EINVAL;
if (!labels)
@@ -3565,101 +3325,231 @@ static const char *stac92xx_dmic_labels[5] = {
"Digital Mic 3", "Digital Mic 4"
};
+static int get_connection_index(struct hda_codec *codec, hda_nid_t mux,
+ hda_nid_t nid)
+{
+ hda_nid_t conn[HDA_MAX_NUM_INPUTS];
+ int i, nums;
+
+ nums = snd_hda_get_connections(codec, mux, conn, ARRAY_SIZE(conn));
+ for (i = 0; i < nums; i++)
+ if (conn[i] == nid)
+ return i;
+ return -1;
+}
+
+/* create a volume assigned to the given pin (only if supported) */
+/* return 1 if the volume control is created */
+static int create_elem_capture_vol(struct hda_codec *codec, hda_nid_t nid,
+ const char *label, int direction)
+{
+ unsigned int caps, nums;
+ char name[32];
+ int err;
+
+ if (direction == HDA_OUTPUT)
+ caps = AC_WCAP_OUT_AMP;
+ else
+ caps = AC_WCAP_IN_AMP;
+ if (!(get_wcaps(codec, nid) & caps))
+ return 0;
+ caps = query_amp_caps(codec, nid, direction);
+ nums = (caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT;
+ if (!nums)
+ return 0;
+ snprintf(name, sizeof(name), "%s Capture Volume", label);
+ err = stac92xx_add_control(codec->spec, STAC_CTL_WIDGET_VOL, name,
+ HDA_COMPOSE_AMP_VAL(nid, 3, 0, direction));
+ if (err < 0)
+ return err;
+ return 1;
+}
+
/* create playback/capture controls for input pins on dmic capable codecs */
static int stac92xx_auto_create_dmic_input_ctls(struct hda_codec *codec,
const struct auto_pin_cfg *cfg)
{
struct sigmatel_spec *spec = codec->spec;
+ struct hda_input_mux *imux = &spec->private_imux;
struct hda_input_mux *dimux = &spec->private_dimux;
- hda_nid_t con_lst[HDA_MAX_NUM_INPUTS];
- int err, i, j;
- char name[32];
+ int err, i, active_mics;
+ unsigned int def_conf;
dimux->items[dimux->num_items].label = stac92xx_dmic_labels[0];
dimux->items[dimux->num_items].index = 0;
dimux->num_items++;
+ active_mics = 0;
+ for (i = 0; i < spec->num_dmics; i++) {
+ /* check the validity: sometimes it's a dead vendor-spec node */
+ if (get_wcaps_type(get_wcaps(codec, spec->dmic_nids[i]))
+ != AC_WID_PIN)
+ continue;
+ def_conf = snd_hda_codec_get_pincfg(codec, spec->dmic_nids[i]);
+ if (get_defcfg_connect(def_conf) != AC_JACK_PORT_NONE)
+ active_mics++;
+ }
+
for (i = 0; i < spec->num_dmics; i++) {
hda_nid_t nid;
int index;
- int num_cons;
- unsigned int wcaps;
- unsigned int def_conf;
+ const char *label;
- def_conf = snd_hda_codec_get_pincfg(codec, spec->dmic_nids[i]);
+ nid = spec->dmic_nids[i];
+ if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN)
+ continue;
+ def_conf = snd_hda_codec_get_pincfg(codec, nid);
if (get_defcfg_connect(def_conf) == AC_JACK_PORT_NONE)
continue;
- nid = spec->dmic_nids[i];
- num_cons = snd_hda_get_connections(codec,
- spec->dmux_nids[0],
- con_lst,
- HDA_MAX_NUM_INPUTS);
- for (j = 0; j < num_cons; j++)
- if (con_lst[j] == nid) {
- index = j;
- goto found;
- }
- continue;
-found:
- wcaps = get_wcaps(codec, nid) &
- (AC_WCAP_OUT_AMP | AC_WCAP_IN_AMP);
+ index = get_connection_index(codec, spec->dmux_nids[0], nid);
+ if (index < 0)
+ continue;
- if (wcaps) {
- sprintf(name, "%s Capture Volume",
- stac92xx_dmic_labels[dimux->num_items]);
+ if (active_mics == 1)
+ label = "Digital Mic";
+ else
+ label = stac92xx_dmic_labels[dimux->num_items];
- err = stac92xx_add_control(spec,
- STAC_CTL_WIDGET_VOL,
- name,
- HDA_COMPOSE_AMP_VAL(nid, 3, 0,
- (wcaps & AC_WCAP_OUT_AMP) ?
- HDA_OUTPUT : HDA_INPUT));
+ err = create_elem_capture_vol(codec, nid, label, HDA_INPUT);
+ if (err < 0)
+ return err;
+ if (!err) {
+ err = create_elem_capture_vol(codec, nid, label,
+ HDA_OUTPUT);
if (err < 0)
return err;
}
- dimux->items[dimux->num_items].label =
- stac92xx_dmic_labels[dimux->num_items];
+ dimux->items[dimux->num_items].label = label;
dimux->items[dimux->num_items].index = index;
dimux->num_items++;
+ if (snd_hda_get_bool_hint(codec, "separate_dmux") != 1) {
+ imux->items[imux->num_items].label = label;
+ imux->items[imux->num_items].index = index;
+ imux->num_items++;
+ }
}
return 0;
}
+static int check_mic_pin(struct hda_codec *codec, hda_nid_t nid,
+ hda_nid_t *fixed, hda_nid_t *ext)
+{
+ unsigned int cfg;
+
+ if (!nid)
+ return 0;
+ cfg = snd_hda_codec_get_pincfg(codec, nid);
+ switch (get_defcfg_connect(cfg)) {
+ case AC_JACK_PORT_FIXED:
+ if (*fixed)
+ return 1; /* already occupied */
+ *fixed = nid;
+ break;
+ case AC_JACK_PORT_COMPLEX:
+ if (*ext)
+ return 1; /* already occupied */
+ *ext = nid;
+ break;
+ }
+ return 0;
+}
+
+static int set_mic_route(struct hda_codec *codec,
+ struct sigmatel_mic_route *mic,
+ hda_nid_t pin)
+{
+ struct sigmatel_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ int i;
+
+ mic->pin = pin;
+ for (i = AUTO_PIN_MIC; i <= AUTO_PIN_FRONT_MIC; i++)
+ if (pin == cfg->input_pins[i])
+ break;
+ if (i <= AUTO_PIN_FRONT_MIC) {
+ /* analog pin */
+ mic->dmux_idx = 0;
+ i = get_connection_index(codec, spec->mux_nids[0], pin);
+ if (i < 0)
+ return -1;
+ mic->mux_idx = i;
+ } else if (spec->dmux_nids) {
+ /* digital pin */
+ mic->mux_idx = 0;
+ i = get_connection_index(codec, spec->dmux_nids[0], pin);
+ if (i < 0)
+ return -1;
+ mic->dmux_idx = i;
+ }
+ return 0;
+}
+
+/* return non-zero if the device is for automatic mic switch */
+static int stac_check_auto_mic(struct hda_codec *codec)
+{
+ struct sigmatel_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ hda_nid_t fixed, ext;
+ int i;
+
+ for (i = AUTO_PIN_LINE; i < AUTO_PIN_LAST; i++) {
+ if (cfg->input_pins[i])
+ return 0; /* must be exclusively mics */
+ }
+ fixed = ext = 0;
+ for (i = AUTO_PIN_MIC; i <= AUTO_PIN_FRONT_MIC; i++)
+ if (check_mic_pin(codec, cfg->input_pins[i], &fixed, &ext))
+ return 0;
+ for (i = 0; i < spec->num_dmics; i++)
+ if (check_mic_pin(codec, spec->dmic_nids[i], &fixed, &ext))
+ return 0;
+ if (!fixed || !ext)
+ return 0;
+ if (!(get_wcaps(codec, ext) & AC_WCAP_UNSOL_CAP))
+ return 0; /* no unsol support */
+ if (set_mic_route(codec, &spec->ext_mic, ext) ||
+ set_mic_route(codec, &spec->int_mic, fixed))
+ return 0; /* something is wrong */
+ return 1;
+}
+
/* create playback/capture controls for input pins */
static int stac92xx_auto_create_analog_input_ctls(struct hda_codec *codec, const struct auto_pin_cfg *cfg)
{
struct sigmatel_spec *spec = codec->spec;
struct hda_input_mux *imux = &spec->private_imux;
- hda_nid_t con_lst[HDA_MAX_NUM_INPUTS];
- int i, j, k;
+ int i, j;
for (i = 0; i < AUTO_PIN_LAST; i++) {
- int index;
+ hda_nid_t nid = cfg->input_pins[i];
+ int index, err;
- if (!cfg->input_pins[i])
+ if (!nid)
continue;
index = -1;
for (j = 0; j < spec->num_muxes; j++) {
- int num_cons;
- num_cons = snd_hda_get_connections(codec,
- spec->mux_nids[j],
- con_lst,
- HDA_MAX_NUM_INPUTS);
- for (k = 0; k < num_cons; k++)
- if (con_lst[k] == cfg->input_pins[i]) {
- index = k;
- goto found;
- }
+ index = get_connection_index(codec, spec->mux_nids[j],
+ nid);
+ if (index >= 0)
+ break;
}
- continue;
- found:
+ if (index < 0)
+ continue;
+
+ err = create_elem_capture_vol(codec, nid,
+ auto_pin_cfg_labels[i],
+ HDA_INPUT);
+ if (err < 0)
+ return err;
+
imux->items[imux->num_items].label = auto_pin_cfg_labels[i];
imux->items[imux->num_items].index = index;
imux->num_items++;
}
+ spec->num_analog_muxes = imux->num_items;
if (imux->num_items) {
/*
@@ -3711,7 +3601,7 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
{
struct sigmatel_spec *spec = codec->spec;
int hp_swap = 0;
- int err;
+ int i, err;
if ((err = snd_hda_parse_pin_def_config(codec,
&spec->autocfg,
@@ -3751,11 +3641,10 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
if (snd_hda_get_connections(codec,
spec->autocfg.mono_out_pin, conn_list, 1) &&
snd_hda_get_connections(codec, conn_list[0],
- conn_list, 1)) {
+ conn_list, 1) > 0) {
int wcaps = get_wcaps(codec, conn_list[0]);
- int wid_type = (wcaps & AC_WCAP_TYPE)
- >> AC_WCAP_TYPE_SHIFT;
+ int wid_type = get_wcaps_type(wcaps);
/* LR swap check, some stac925x have a mux that
* changes the DACs output path instead of the
* mono-mux path.
@@ -3846,6 +3735,21 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
spec->autocfg.line_outs = 0;
}
+ if (stac_check_auto_mic(codec)) {
+ spec->auto_mic = 1;
+ /* only one capture for auto-mic */
+ spec->num_adcs = 1;
+ spec->num_caps = 1;
+ spec->num_muxes = 1;
+ }
+
+ for (i = 0; i < spec->num_caps; i++) {
+ err = stac92xx_add_capvol_ctls(codec, spec->capvols[i],
+ spec->capsws[i], i);
+ if (err < 0)
+ return err;
+ }
+
err = stac92xx_auto_create_analog_input_ctls(codec, &spec->autocfg);
if (err < 0)
return err;
@@ -3855,11 +3759,6 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
if (err < 0)
return err;
}
- if (spec->num_amps > 0) {
- err = stac92xx_auto_create_amp_output_ctls(codec);
- if (err < 0)
- return err;
- }
if (spec->num_dmics > 0 && !spec->dinput_mux)
if ((err = stac92xx_auto_create_dmic_input_ctls(codec,
&spec->autocfg)) < 0)
@@ -3896,7 +3795,6 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
spec->dinput_mux = &spec->private_dimux;
spec->sinput_mux = &spec->private_smux;
spec->mono_mux = &spec->private_mono_mux;
- spec->amp_mux = &spec->private_amp_mux;
return 1;
}
@@ -4108,14 +4006,14 @@ static int stac_add_event(struct sigmatel_spec *spec, hda_nid_t nid,
}
static struct sigmatel_event *stac_get_event(struct hda_codec *codec,
- hda_nid_t nid, unsigned char type)
+ hda_nid_t nid)
{
struct sigmatel_spec *spec = codec->spec;
struct sigmatel_event *event = spec->events.list;
int i;
for (i = 0; i < spec->events.used; i++, event++) {
- if (event->nid == nid && event->type == type)
+ if (event->nid == nid)
return event;
}
return NULL;
@@ -4135,24 +4033,32 @@ static struct sigmatel_event *stac_get_event_from_tag(struct hda_codec *codec,
return NULL;
}
-static void enable_pin_detect(struct hda_codec *codec, hda_nid_t nid,
- unsigned int type)
+/* check if given nid is a valid pin and no other events are assigned
+ * to it. If OK, assign the event, set the unsol flag, and returns 1.
+ * Otherwise, returns zero.
+ */
+static int enable_pin_detect(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int type)
{
struct sigmatel_event *event;
int tag;
if (!(get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP))
- return;
- event = stac_get_event(codec, nid, type);
- if (event)
+ return 0;
+ event = stac_get_event(codec, nid);
+ if (event) {
+ if (event->type != type)
+ return 0;
tag = event->tag;
- else
+ } else {
tag = stac_add_event(codec->spec, nid, type, 0);
- if (tag < 0)
- return;
+ if (tag < 0)
+ return 0;
+ }
snd_hda_codec_write_cache(codec, nid, 0,
AC_VERB_SET_UNSOLICITED_ENABLE,
AC_USRSP_EN | tag);
+ return 1;
}
static int is_nid_hp_pin(struct auto_pin_cfg *cfg, hda_nid_t nid)
@@ -4245,20 +4151,36 @@ static int stac92xx_init(struct hda_codec *codec)
hda_nid_t nid = cfg->hp_pins[i];
enable_pin_detect(codec, nid, STAC_HP_EVENT);
}
+ if (cfg->line_out_type == AUTO_PIN_LINE_OUT &&
+ cfg->speaker_outs > 0) {
+ /* enable pin-detect for line-outs as well */
+ for (i = 0; i < cfg->line_outs; i++) {
+ hda_nid_t nid = cfg->line_out_pins[i];
+ enable_pin_detect(codec, nid, STAC_LO_EVENT);
+ }
+ }
+
/* force to enable the first line-out; the others are set up
* in unsol_event
*/
stac92xx_auto_set_pinctl(codec, spec->autocfg.line_out_pins[0],
AC_PINCTL_OUT_EN);
/* fake event to set up pins */
- stac_issue_unsol_event(codec, spec->autocfg.hp_pins[0],
- STAC_HP_EVENT);
+ stac_issue_unsol_event(codec, spec->autocfg.hp_pins[0]);
} else {
stac92xx_auto_init_multi_out(codec);
stac92xx_auto_init_hp_out(codec);
for (i = 0; i < cfg->hp_outs; i++)
stac_toggle_power_map(codec, cfg->hp_pins[i], 1);
}
+ if (spec->auto_mic) {
+ /* initialize connection to analog input */
+ if (spec->dmux_nids)
+ snd_hda_codec_write_cache(codec, spec->dmux_nids[0], 0,
+ AC_VERB_SET_CONNECT_SEL, 0);
+ if (enable_pin_detect(codec, spec->ext_mic.pin, STAC_MIC_EVENT))
+ stac_issue_unsol_event(codec, spec->ext_mic.pin);
+ }
for (i = 0; i < AUTO_PIN_LAST; i++) {
hda_nid_t nid = cfg->input_pins[i];
if (nid) {
@@ -4285,10 +4207,9 @@ static int stac92xx_init(struct hda_codec *codec)
}
conf = snd_hda_codec_get_pincfg(codec, nid);
if (get_defcfg_connect(conf) != AC_JACK_PORT_FIXED) {
- enable_pin_detect(codec, nid,
- STAC_INSERT_EVENT);
- stac_issue_unsol_event(codec, nid,
- STAC_INSERT_EVENT);
+ if (enable_pin_detect(codec, nid,
+ STAC_INSERT_EVENT))
+ stac_issue_unsol_event(codec, nid);
}
}
}
@@ -4333,10 +4254,8 @@ static int stac92xx_init(struct hda_codec *codec)
stac_toggle_power_map(codec, nid, 1);
continue;
}
- if (!stac_get_event(codec, nid, STAC_INSERT_EVENT)) {
- enable_pin_detect(codec, nid, STAC_PWR_EVENT);
- stac_issue_unsol_event(codec, nid, STAC_PWR_EVENT);
- }
+ if (enable_pin_detect(codec, nid, STAC_PWR_EVENT))
+ stac_issue_unsol_event(codec, nid);
}
if (spec->dac_list)
stac92xx_power_down(codec);
@@ -4440,6 +4359,48 @@ static int get_pin_presence(struct hda_codec *codec, hda_nid_t nid)
return 0;
}
+static void stac92xx_line_out_detect(struct hda_codec *codec,
+ int presence)
+{
+ struct sigmatel_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ int i;
+
+ for (i = 0; i < cfg->line_outs; i++) {
+ if (presence)
+ break;
+ presence = get_pin_presence(codec, cfg->line_out_pins[i]);
+ if (presence) {
+ unsigned int pinctl;
+ pinctl = snd_hda_codec_read(codec,
+ cfg->line_out_pins[i], 0,
+ AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+ if (pinctl & AC_PINCTL_IN_EN)
+ presence = 0; /* mic- or line-input */
+ }
+ }
+
+ if (presence) {
+ /* disable speakers */
+ for (i = 0; i < cfg->speaker_outs; i++)
+ stac92xx_reset_pinctl(codec, cfg->speaker_pins[i],
+ AC_PINCTL_OUT_EN);
+ if (spec->eapd_mask && spec->eapd_switch)
+ stac_gpio_set(codec, spec->gpio_mask,
+ spec->gpio_dir, spec->gpio_data &
+ ~spec->eapd_mask);
+ } else {
+ /* enable speakers */
+ for (i = 0; i < cfg->speaker_outs; i++)
+ stac92xx_set_pinctl(codec, cfg->speaker_pins[i],
+ AC_PINCTL_OUT_EN);
+ if (spec->eapd_mask && spec->eapd_switch)
+ stac_gpio_set(codec, spec->gpio_mask,
+ spec->gpio_dir, spec->gpio_data |
+ spec->eapd_mask);
+ }
+}
+
/* return non-zero if the hp-pin of the given array index isn't
* a jack-detection target
*/
@@ -4492,13 +4453,6 @@ static void stac92xx_hp_detect(struct hda_codec *codec)
for (i = 0; i < cfg->line_outs; i++)
stac92xx_reset_pinctl(codec, cfg->line_out_pins[i],
AC_PINCTL_OUT_EN);
- for (i = 0; i < cfg->speaker_outs; i++)
- stac92xx_reset_pinctl(codec, cfg->speaker_pins[i],
- AC_PINCTL_OUT_EN);
- if (spec->eapd_mask && spec->eapd_switch)
- stac_gpio_set(codec, spec->gpio_mask,
- spec->gpio_dir, spec->gpio_data &
- ~spec->eapd_mask);
} else {
/* enable lineouts */
if (spec->hp_switch)
@@ -4507,14 +4461,8 @@ static void stac92xx_hp_detect(struct hda_codec *codec)
for (i = 0; i < cfg->line_outs; i++)
stac92xx_set_pinctl(codec, cfg->line_out_pins[i],
AC_PINCTL_OUT_EN);
- for (i = 0; i < cfg->speaker_outs; i++)
- stac92xx_set_pinctl(codec, cfg->speaker_pins[i],
- AC_PINCTL_OUT_EN);
- if (spec->eapd_mask && spec->eapd_switch)
- stac_gpio_set(codec, spec->gpio_mask,
- spec->gpio_dir, spec->gpio_data |
- spec->eapd_mask);
}
+ stac92xx_line_out_detect(codec, presence);
/* toggle hp outs */
for (i = 0; i < cfg->hp_outs; i++) {
unsigned int val = AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN;
@@ -4599,10 +4547,28 @@ static void stac92xx_report_jack(struct hda_codec *codec, hda_nid_t nid)
}
}
-static void stac_issue_unsol_event(struct hda_codec *codec, hda_nid_t nid,
- unsigned char type)
+static void stac92xx_mic_detect(struct hda_codec *codec)
{
- struct sigmatel_event *event = stac_get_event(codec, nid, type);
+ struct sigmatel_spec *spec = codec->spec;
+ struct sigmatel_mic_route *mic;
+
+ if (get_pin_presence(codec, spec->ext_mic.pin))
+ mic = &spec->ext_mic;
+ else
+ mic = &spec->int_mic;
+ if (mic->dmux_idx)
+ snd_hda_codec_write_cache(codec, spec->dmux_nids[0], 0,
+ AC_VERB_SET_CONNECT_SEL,
+ mic->dmux_idx);
+ else
+ snd_hda_codec_write_cache(codec, spec->mux_nids[0], 0,
+ AC_VERB_SET_CONNECT_SEL,
+ mic->mux_idx);
+}
+
+static void stac_issue_unsol_event(struct hda_codec *codec, hda_nid_t nid)
+{
+ struct sigmatel_event *event = stac_get_event(codec, nid);
if (!event)
return;
codec->patch_ops.unsol_event(codec, (unsigned)event->tag << 26);
@@ -4621,8 +4587,18 @@ static void stac92xx_unsol_event(struct hda_codec *codec, unsigned int res)
switch (event->type) {
case STAC_HP_EVENT:
+ case STAC_LO_EVENT:
stac92xx_hp_detect(codec);
- /* fallthru */
+ break;
+ case STAC_MIC_EVENT:
+ stac92xx_mic_detect(codec);
+ break;
+ }
+
+ switch (event->type) {
+ case STAC_HP_EVENT:
+ case STAC_LO_EVENT:
+ case STAC_MIC_EVENT:
case STAC_INSERT_EVENT:
case STAC_PWR_EVENT:
if (spec->num_pwrs > 0)
@@ -4713,8 +4689,7 @@ static int stac92xx_resume(struct hda_codec *codec)
snd_hda_codec_resume_cache(codec);
/* fake event to set up pins again to override cached values */
if (spec->hp_detect)
- stac_issue_unsol_event(codec, spec->autocfg.hp_pins[0],
- STAC_HP_EVENT);
+ stac_issue_unsol_event(codec, spec->autocfg.hp_pins[0]);
return 0;
}
@@ -4754,6 +4729,19 @@ static int stac92xx_hp_check_power_status(struct hda_codec *codec,
static int stac92xx_suspend(struct hda_codec *codec, pm_message_t state)
{
struct sigmatel_spec *spec = codec->spec;
+ int i;
+ hda_nid_t nid;
+
+ /* reset each pin before powering down DAC/ADC to avoid click noise */
+ nid = codec->start_nid;
+ for (i = 0; i < codec->num_nodes; i++, nid++) {
+ unsigned int wcaps = get_wcaps(codec, nid);
+ unsigned int wid_type = get_wcaps_type(wcaps);
+ if (wid_type == AC_WID_PIN)
+ snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
+ }
+
if (spec->eapd_mask)
stac_gpio_set(codec, spec->gpio_mask,
spec->gpio_dir, spec->gpio_data &
@@ -4790,7 +4778,8 @@ static int patch_stac9200(struct hda_codec *codec)
stac9200_models,
stac9200_cfg_tbl);
if (spec->board_config < 0)
- snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC9200, using BIOS defaults\n");
+ snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
else
stac92xx_set_config_regs(codec,
stac9200_brd_tbl[spec->board_config]);
@@ -4862,8 +4851,8 @@ static int patch_stac925x(struct hda_codec *codec)
stac925x_cfg_tbl);
again:
if (spec->board_config < 0)
- snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC925x,"
- "using BIOS defaults\n");
+ snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
else
stac92xx_set_config_regs(codec,
stac925x_brd_tbl[spec->board_config]);
@@ -4893,6 +4882,9 @@ static int patch_stac925x(struct hda_codec *codec)
spec->init = stac925x_core_init;
spec->mixer = stac925x_mixer;
+ spec->num_caps = 1;
+ spec->capvols = stac925x_capvols;
+ spec->capsws = stac925x_capsws;
err = stac92xx_parse_auto_config(codec, 0x8, 0x7);
if (!err) {
@@ -4914,16 +4906,6 @@ static int patch_stac925x(struct hda_codec *codec)
return 0;
}
-static struct hda_input_mux stac92hd73xx_dmux = {
- .num_items = 4,
- .items = {
- { "Analog Inputs", 0x0b },
- { "Digital Mic 1", 0x09 },
- { "Digital Mic 2", 0x0a },
- { "CD", 0x08 },
- }
-};
-
static int patch_stac92hd73xx(struct hda_codec *codec)
{
struct sigmatel_spec *spec;
@@ -4943,10 +4925,16 @@ static int patch_stac92hd73xx(struct hda_codec *codec)
STAC_92HD73XX_MODELS,
stac92hd73xx_models,
stac92hd73xx_cfg_tbl);
+ /* check codec subsystem id if not found */
+ if (spec->board_config < 0)
+ spec->board_config =
+ snd_hda_check_board_codec_sid_config(codec,
+ STAC_92HD73XX_MODELS, stac92hd73xx_models,
+ stac92hd73xx_codec_id_cfg_tbl);
again:
if (spec->board_config < 0)
- snd_printdd(KERN_INFO "hda_codec: Unknown model for"
- " STAC92HD73XX, using BIOS defaults\n");
+ snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
else
stac92xx_set_config_regs(codec,
stac92hd73xx_brd_tbl[spec->board_config]);
@@ -4959,20 +4947,15 @@ again:
"number of channels defaulting to DAC count\n");
num_dacs = STAC92HD73_DAC_COUNT;
}
+ spec->init = stac92hd73xx_core_init;
switch (num_dacs) {
case 0x3: /* 6 Channel */
- spec->mixer = stac92hd73xx_6ch_mixer;
- spec->init = stac92hd73xx_6ch_core_init;
spec->aloopback_ctl = stac92hd73xx_6ch_loopback;
break;
case 0x4: /* 8 Channel */
- spec->mixer = stac92hd73xx_8ch_mixer;
- spec->init = stac92hd73xx_8ch_core_init;
spec->aloopback_ctl = stac92hd73xx_8ch_loopback;
break;
case 0x5: /* 10 Channel */
- spec->mixer = stac92hd73xx_10ch_mixer;
- spec->init = stac92hd73xx_10ch_core_init;
spec->aloopback_ctl = stac92hd73xx_10ch_loopback;
break;
}
@@ -4987,14 +4970,14 @@ again:
spec->dmic_nids = stac92hd73xx_dmic_nids;
spec->dmux_nids = stac92hd73xx_dmux_nids;
spec->smux_nids = stac92hd73xx_smux_nids;
- spec->amp_nids = stac92hd73xx_amp_nids;
- spec->num_amps = ARRAY_SIZE(stac92hd73xx_amp_nids);
spec->num_muxes = ARRAY_SIZE(stac92hd73xx_mux_nids);
spec->num_adcs = ARRAY_SIZE(stac92hd73xx_adc_nids);
spec->num_dmuxes = ARRAY_SIZE(stac92hd73xx_dmux_nids);
- memcpy(&spec->private_dimux, &stac92hd73xx_dmux,
- sizeof(stac92hd73xx_dmux));
+
+ spec->num_caps = STAC92HD73XX_NUM_CAPS;
+ spec->capvols = stac92hd73xx_capvols;
+ spec->capsws = stac92hd73xx_capsws;
switch (spec->board_config) {
case STAC_DELL_EQ:
@@ -5004,43 +4987,40 @@ again:
case STAC_DELL_M6_DMIC:
case STAC_DELL_M6_BOTH:
spec->num_smuxes = 0;
- spec->mixer = &stac92hd73xx_6ch_mixer[DELL_M6_MIXER];
- spec->amp_nids = &stac92hd73xx_amp_nids[DELL_M6_AMP];
spec->eapd_switch = 0;
- spec->num_amps = 1;
- if (spec->board_config != STAC_DELL_EQ)
- spec->init = dell_m6_core_init;
switch (spec->board_config) {
case STAC_DELL_M6_AMIC: /* Analog Mics */
snd_hda_codec_set_pincfg(codec, 0x0b, 0x90A70170);
spec->num_dmics = 0;
- spec->private_dimux.num_items = 1;
break;
case STAC_DELL_M6_DMIC: /* Digital Mics */
snd_hda_codec_set_pincfg(codec, 0x13, 0x90A60160);
spec->num_dmics = 1;
- spec->private_dimux.num_items = 2;
break;
case STAC_DELL_M6_BOTH: /* Both */
snd_hda_codec_set_pincfg(codec, 0x0b, 0x90A70170);
snd_hda_codec_set_pincfg(codec, 0x13, 0x90A60160);
spec->num_dmics = 1;
- spec->private_dimux.num_items = 2;
break;
}
break;
+ case STAC_ALIENWARE_M17X:
+ spec->num_dmics = STAC92HD73XX_NUM_DMICS;
+ spec->num_smuxes = ARRAY_SIZE(stac92hd73xx_smux_nids);
+ spec->eapd_switch = 0;
+ break;
default:
spec->num_dmics = STAC92HD73XX_NUM_DMICS;
spec->num_smuxes = ARRAY_SIZE(stac92hd73xx_smux_nids);
spec->eapd_switch = 1;
+ break;
}
if (spec->board_config > STAC_92HD73XX_REF) {
/* GPIO0 High = Enable EAPD */
spec->eapd_mask = spec->gpio_mask = spec->gpio_dir = 0x1;
spec->gpio_data = 0x01;
}
- spec->dinput_mux = &spec->private_dimux;
spec->num_pwrs = ARRAY_SIZE(stac92hd73xx_pwr_nids);
spec->pwr_nids = stac92hd73xx_pwr_nids;
@@ -5072,15 +5052,6 @@ again:
return 0;
}
-static struct hda_input_mux stac92hd83xxx_dmux = {
- .num_items = 3,
- .items = {
- { "Analog Inputs", 0x03 },
- { "Digital Mic 1", 0x04 },
- { "Digital Mic 2", 0x05 },
- }
-};
-
static int patch_stac92hd83xxx(struct hda_codec *codec)
{
struct sigmatel_spec *spec;
@@ -5097,32 +5068,30 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs;
spec->mono_nid = 0x19;
spec->digbeep_nid = 0x21;
- spec->dmic_nids = stac92hd83xxx_dmic_nids;
- spec->dmux_nids = stac92hd83xxx_dmux_nids;
+ spec->mux_nids = stac92hd83xxx_mux_nids;
+ spec->num_muxes = ARRAY_SIZE(stac92hd83xxx_mux_nids);
spec->adc_nids = stac92hd83xxx_adc_nids;
+ spec->num_adcs = ARRAY_SIZE(stac92hd83xxx_adc_nids);
spec->pwr_nids = stac92hd83xxx_pwr_nids;
- spec->amp_nids = stac92hd83xxx_amp_nids;
spec->pwr_mapping = stac92hd83xxx_pwr_mapping;
spec->num_pwrs = ARRAY_SIZE(stac92hd83xxx_pwr_nids);
spec->multiout.dac_nids = spec->dac_nids;
spec->init = stac92hd83xxx_core_init;
- spec->mixer = stac92hd83xxx_mixer;
spec->num_pins = ARRAY_SIZE(stac92hd83xxx_pin_nids);
- spec->num_dmuxes = ARRAY_SIZE(stac92hd83xxx_dmux_nids);
- spec->num_adcs = ARRAY_SIZE(stac92hd83xxx_adc_nids);
- spec->num_amps = ARRAY_SIZE(stac92hd83xxx_amp_nids);
- spec->num_dmics = STAC92HD83XXX_NUM_DMICS;
- spec->dinput_mux = &stac92hd83xxx_dmux;
spec->pin_nids = stac92hd83xxx_pin_nids;
+ spec->num_caps = STAC92HD83XXX_NUM_CAPS;
+ spec->capvols = stac92hd83xxx_capvols;
+ spec->capsws = stac92hd83xxx_capsws;
+
spec->board_config = snd_hda_check_board_config(codec,
STAC_92HD83XXX_MODELS,
stac92hd83xxx_models,
stac92hd83xxx_cfg_tbl);
again:
if (spec->board_config < 0)
- snd_printdd(KERN_INFO "hda_codec: Unknown model for"
- " STAC92HD83XXX, using BIOS defaults\n");
+ snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
else
stac92xx_set_config_regs(codec,
stac92hd83xxx_brd_tbl[spec->board_config]);
@@ -5164,6 +5133,8 @@ again:
num_dacs = snd_hda_get_connections(codec, nid,
conn, STAC92HD83_DAC_COUNT + 1) - 1;
+ if (num_dacs < 0)
+ num_dacs = STAC92HD83_DAC_COUNT;
/* set port X to select the last DAC
*/
@@ -5177,25 +5148,6 @@ again:
return 0;
}
-static struct hda_input_mux stac92hd71bxx_dmux_nomixer = {
- .num_items = 3,
- .items = {
- { "Analog Inputs", 0x00 },
- { "Digital Mic 1", 0x02 },
- { "Digital Mic 2", 0x03 },
- }
-};
-
-static struct hda_input_mux stac92hd71bxx_dmux_amixer = {
- .num_items = 4,
- .items = {
- { "Analog Inputs", 0x00 },
- { "Mixer", 0x01 },
- { "Digital Mic 1", 0x02 },
- { "Digital Mic 2", 0x03 },
- }
-};
-
/* get the pin connection (fixed, none, etc) */
static unsigned int stac_get_defcfg_connect(struct hda_codec *codec, int idx)
{
@@ -5256,7 +5208,6 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
struct sigmatel_spec *spec;
struct hda_verb *unmute_init = stac92hd71bxx_unmute_core_init;
int err = 0;
- unsigned int ndmic_nids = 0;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (spec == NULL)
@@ -5285,8 +5236,8 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
stac92hd71bxx_cfg_tbl);
again:
if (spec->board_config < 0)
- snd_printdd(KERN_INFO "hda_codec: Unknown model for"
- " STAC92HD71BXX, using BIOS defaults\n");
+ snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
else
stac92xx_set_config_regs(codec,
stac92hd71bxx_brd_tbl[spec->board_config]);
@@ -5301,6 +5252,10 @@ again:
spec->dmic_nids = stac92hd71bxx_dmic_nids;
spec->dmux_nids = stac92hd71bxx_dmux_nids;
+ spec->num_caps = STAC92HD71BXX_NUM_CAPS;
+ spec->capvols = stac92hd71bxx_capvols;
+ spec->capsws = stac92hd71bxx_capsws;
+
switch (codec->vendor_id) {
case 0x111d76b6: /* 4 Port without Analog Mixer */
case 0x111d76b7:
@@ -5308,24 +5263,13 @@ again:
/* fallthru */
case 0x111d76b4: /* 6 Port without Analog Mixer */
case 0x111d76b5:
- memcpy(&spec->private_dimux, &stac92hd71bxx_dmux_nomixer,
- sizeof(stac92hd71bxx_dmux_nomixer));
- spec->mixer = stac92hd71bxx_mixer;
spec->init = stac92hd71bxx_core_init;
codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs;
spec->num_dmics = stac92hd71bxx_connected_ports(codec,
stac92hd71bxx_dmic_nids,
STAC92HD71BXX_NUM_DMICS);
- if (spec->num_dmics) {
- spec->num_dmuxes = ARRAY_SIZE(stac92hd71bxx_dmux_nids);
- spec->dinput_mux = &spec->private_dimux;
- ndmic_nids = ARRAY_SIZE(stac92hd71bxx_dmic_nids) - 1;
- }
break;
case 0x111d7608: /* 5 Port with Analog Mixer */
- memcpy(&spec->private_dimux, &stac92hd71bxx_dmux_amixer,
- sizeof(stac92hd71bxx_dmux_amixer));
- spec->private_dimux.num_items--;
switch (spec->board_config) {
case STAC_HP_M4:
/* Enable VREF power saving on GPIO1 detect */
@@ -5347,11 +5291,8 @@ again:
/* no output amps */
spec->num_pwrs = 0;
- spec->mixer = stac92hd71bxx_analog_mixer;
- spec->dinput_mux = &spec->private_dimux;
-
/* disable VSW */
- spec->init = &stac92hd71bxx_analog_core_init[HD_DISABLE_PORTF];
+ spec->init = stac92hd71bxx_core_init;
unmute_init++;
snd_hda_codec_set_pincfg(codec, 0x0f, 0x40f000f0);
snd_hda_codec_set_pincfg(codec, 0x19, 0x40f000f3);
@@ -5359,8 +5300,6 @@ again:
spec->num_dmics = stac92hd71bxx_connected_ports(codec,
stac92hd71bxx_dmic_nids,
STAC92HD71BXX_NUM_DMICS - 1);
- spec->num_dmuxes = ARRAY_SIZE(stac92hd71bxx_dmux_nids);
- ndmic_nids = ARRAY_SIZE(stac92hd71bxx_dmic_nids) - 2;
break;
case 0x111d7603: /* 6 Port with Analog Mixer */
if ((codec->revision_id & 0xf) == 1)
@@ -5370,17 +5309,12 @@ again:
spec->num_pwrs = 0;
/* fallthru */
default:
- memcpy(&spec->private_dimux, &stac92hd71bxx_dmux_amixer,
- sizeof(stac92hd71bxx_dmux_amixer));
- spec->dinput_mux = &spec->private_dimux;
- spec->mixer = stac92hd71bxx_analog_mixer;
- spec->init = stac92hd71bxx_analog_core_init;
+ spec->init = stac92hd71bxx_core_init;
codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs;
spec->num_dmics = stac92hd71bxx_connected_ports(codec,
stac92hd71bxx_dmic_nids,
STAC92HD71BXX_NUM_DMICS);
- spec->num_dmuxes = ARRAY_SIZE(stac92hd71bxx_dmux_nids);
- ndmic_nids = ARRAY_SIZE(stac92hd71bxx_dmic_nids) - 1;
+ break;
}
if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP)
@@ -5408,6 +5342,7 @@ again:
spec->num_muxes = ARRAY_SIZE(stac92hd71bxx_mux_nids);
spec->num_adcs = ARRAY_SIZE(stac92hd71bxx_adc_nids);
+ spec->num_dmuxes = ARRAY_SIZE(stac92hd71bxx_dmux_nids);
spec->num_smuxes = stac92hd71bxx_connected_smuxes(codec, 0x1e);
switch (spec->board_config) {
@@ -5462,8 +5397,6 @@ again:
#endif
spec->multiout.dac_nids = spec->dac_nids;
- if (spec->dinput_mux)
- spec->private_dimux.num_items += spec->num_dmics - ndmic_nids;
err = stac92xx_parse_auto_config(codec, 0x21, 0);
if (!err) {
@@ -5541,8 +5474,8 @@ static int patch_stac922x(struct hda_codec *codec)
again:
if (spec->board_config < 0)
- snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC922x, "
- "using BIOS defaults\n");
+ snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
else
stac92xx_set_config_regs(codec,
stac922x_brd_tbl[spec->board_config]);
@@ -5555,7 +5488,10 @@ static int patch_stac922x(struct hda_codec *codec)
spec->num_pwrs = 0;
spec->init = stac922x_core_init;
- spec->mixer = stac922x_mixer;
+
+ spec->num_caps = STAC922X_NUM_CAPS;
+ spec->capvols = stac922x_capvols;
+ spec->capsws = stac922x_capsws;
spec->multiout.dac_nids = spec->dac_nids;
@@ -5604,8 +5540,8 @@ static int patch_stac927x(struct hda_codec *codec)
stac927x_cfg_tbl);
again:
if (spec->board_config < 0)
- snd_printdd(KERN_INFO "hda_codec: Unknown model for"
- "STAC927x, using BIOS defaults\n");
+ snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
else
stac92xx_set_config_regs(codec,
stac927x_brd_tbl[spec->board_config]);
@@ -5630,7 +5566,6 @@ static int patch_stac927x(struct hda_codec *codec)
spec->num_dmics = 0;
spec->init = d965_core_init;
- spec->mixer = stac927x_mixer;
break;
case STAC_DELL_BIOS:
switch (codec->subsystem_id) {
@@ -5662,7 +5597,6 @@ static int patch_stac927x(struct hda_codec *codec)
spec->num_dmics = STAC927X_NUM_DMICS;
spec->init = d965_core_init;
- spec->mixer = stac927x_mixer;
spec->dmux_nids = stac927x_dmux_nids;
spec->num_dmuxes = ARRAY_SIZE(stac927x_dmux_nids);
break;
@@ -5675,9 +5609,12 @@ static int patch_stac927x(struct hda_codec *codec)
spec->num_dmics = 0;
spec->init = stac927x_core_init;
- spec->mixer = stac927x_mixer;
}
+ spec->num_caps = STAC927X_NUM_CAPS;
+ spec->capvols = stac927x_capvols;
+ spec->capsws = stac927x_capsws;
+
spec->num_pwrs = 0;
spec->aloopback_ctl = stac927x_loopback;
spec->aloopback_mask = 0x40;
@@ -5739,7 +5676,8 @@ static int patch_stac9205(struct hda_codec *codec)
stac9205_cfg_tbl);
again:
if (spec->board_config < 0)
- snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC9205, using BIOS defaults\n");
+ snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
else
stac92xx_set_config_regs(codec,
stac9205_brd_tbl[spec->board_config]);
@@ -5758,9 +5696,12 @@ static int patch_stac9205(struct hda_codec *codec)
spec->num_pwrs = 0;
spec->init = stac9205_core_init;
- spec->mixer = stac9205_mixer;
spec->aloopback_ctl = stac9205_loopback;
+ spec->num_caps = STAC9205_NUM_CAPS;
+ spec->capvols = stac9205_capvols;
+ spec->capsws = stac9205_capsws;
+
spec->aloopback_mask = 0x40;
spec->aloopback_shift = 0;
/* Turn on/off EAPD per HP plugging */
@@ -5835,12 +5776,6 @@ static struct hda_verb stac9872_core_init[] = {
{}
};
-static struct snd_kcontrol_new stac9872_mixer[] = {
- HDA_CODEC_VOLUME("Capture Volume", 0x09, 0, HDA_INPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x09, 0, HDA_INPUT),
- { } /* end */
-};
-
static hda_nid_t stac9872_pin_nids[] = {
0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x11, 0x13, 0x14,
@@ -5854,6 +5789,11 @@ static hda_nid_t stac9872_mux_nids[] = {
0x15
};
+static unsigned long stac9872_capvols[] = {
+ HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_INPUT),
+};
+#define stac9872_capsws stac9872_capvols
+
static unsigned int stac9872_vaio_pin_configs[9] = {
0x03211020, 0x411111f0, 0x411111f0, 0x03a15030,
0x411111f0, 0x90170110, 0x411111f0, 0x411111f0,
@@ -5891,8 +5831,8 @@ static int patch_stac9872(struct hda_codec *codec)
stac9872_models,
stac9872_cfg_tbl);
if (spec->board_config < 0)
- snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC9872, "
- "using BIOS defaults\n");
+ snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
+ codec->chip_name);
else
stac92xx_set_config_regs(codec,
stac9872_brd_tbl[spec->board_config]);
@@ -5902,8 +5842,10 @@ static int patch_stac9872(struct hda_codec *codec)
spec->adc_nids = stac9872_adc_nids;
spec->num_muxes = ARRAY_SIZE(stac9872_mux_nids);
spec->mux_nids = stac9872_mux_nids;
- spec->mixer = stac9872_mixer;
spec->init = stac9872_core_init;
+ spec->num_caps = 1;
+ spec->capvols = stac9872_capvols;
+ spec->capsws = stac9872_capsws;
err = stac92xx_parse_auto_config(codec, 0x10, 0x12);
if (err < 0) {
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index e8f10b10cceb..ee89db90c9b6 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -1339,8 +1339,7 @@ static int get_mux_nids(struct hda_codec *codec)
for (i = 0; i < spec->num_adc_nids; i++) {
nid = spec->adc_nids[i];
while (nid) {
- type = (get_wcaps(codec, nid) & AC_WCAP_TYPE)
- >> AC_WCAP_TYPE_SHIFT;
+ type = get_wcaps_type(get_wcaps(codec, nid));
if (type == AC_WID_PIN)
break;
n = snd_hda_get_connections(codec, nid, conn,
diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
index adc909ec125c..9da2dae64c5b 100644
--- a/sound/pci/ice1712/ice1712.h
+++ b/sound/pci/ice1712/ice1712.h
@@ -379,6 +379,15 @@ struct snd_ice1712 {
unsigned char (*set_mclk)(struct snd_ice1712 *ice, unsigned int rate);
void (*set_spdif_clock)(struct snd_ice1712 *ice);
+#ifdef CONFIG_PM
+ int (*pm_suspend)(struct snd_ice1712 *);
+ int (*pm_resume)(struct snd_ice1712 *);
+ int pm_suspend_enabled:1;
+ int pm_saved_is_spdif_master:1;
+ unsigned int pm_saved_spdif_ctrl;
+ unsigned char pm_saved_spdif_cfg;
+ unsigned int pm_saved_route;
+#endif
};
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index cc84a831eb21..af6e00148621 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -560,6 +560,7 @@ static int snd_vt1724_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
spin_lock(&ice->reg_lock);
old = inb(ICEMT1724(ice, DMA_CONTROL));
if (cmd == SNDRV_PCM_TRIGGER_START)
@@ -570,6 +571,10 @@ static int snd_vt1724_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
spin_unlock(&ice->reg_lock);
break;
+ case SNDRV_PCM_TRIGGER_RESUME:
+ /* apps will have to restart stream */
+ break;
+
default:
return -EINVAL;
}
@@ -2262,7 +2267,7 @@ static int __devinit snd_vt1724_read_eeprom(struct snd_ice1712 *ice,
-static void __devinit snd_vt1724_chip_reset(struct snd_ice1712 *ice)
+static void snd_vt1724_chip_reset(struct snd_ice1712 *ice)
{
outb(VT1724_RESET , ICEREG1724(ice, CONTROL));
inb(ICEREG1724(ice, CONTROL)); /* pci posting flush */
@@ -2272,7 +2277,7 @@ static void __devinit snd_vt1724_chip_reset(struct snd_ice1712 *ice)
msleep(10);
}
-static int __devinit snd_vt1724_chip_init(struct snd_ice1712 *ice)
+static int snd_vt1724_chip_init(struct snd_ice1712 *ice)
{
outb(ice->eeprom.data[ICE_EEP2_SYSCONF], ICEREG1724(ice, SYS_CFG));
outb(ice->eeprom.data[ICE_EEP2_ACLINK], ICEREG1724(ice, AC97_CFG));
@@ -2287,6 +2292,14 @@ static int __devinit snd_vt1724_chip_init(struct snd_ice1712 *ice)
outb(0, ICEREG1724(ice, POWERDOWN));
+ /* MPU_RX and TX irq masks are cleared later dynamically */
+ outb(VT1724_IRQ_MPU_RX | VT1724_IRQ_MPU_TX , ICEREG1724(ice, IRQMASK));
+
+ /* don't handle FIFO overrun/underruns (just yet),
+ * since they cause machine lockups
+ */
+ outb(VT1724_MULTI_FIFO_ERR, ICEMT1724(ice, DMA_INT_MASK));
+
return 0;
}
@@ -2431,6 +2444,8 @@ static int __devinit snd_vt1724_create(struct snd_card *card,
snd_vt1724_proc_init(ice);
synchronize_irq(pci->irq);
+ card->private_data = ice;
+
err = pci_request_regions(pci, "ICE1724");
if (err < 0) {
kfree(ice);
@@ -2459,14 +2474,6 @@ static int __devinit snd_vt1724_create(struct snd_card *card,
return -EIO;
}
- /* MPU_RX and TX irq masks are cleared later dynamically */
- outb(VT1724_IRQ_MPU_RX | VT1724_IRQ_MPU_TX , ICEREG1724(ice, IRQMASK));
-
- /* don't handle FIFO overrun/underruns (just yet),
- * since they cause machine lockups
- */
- outb(VT1724_MULTI_FIFO_ERR, ICEMT1724(ice, DMA_INT_MASK));
-
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, ice, &ops);
if (err < 0) {
snd_vt1724_free(ice);
@@ -2650,11 +2657,96 @@ static void __devexit snd_vt1724_remove(struct pci_dev *pci)
pci_set_drvdata(pci, NULL);
}
+#ifdef CONFIG_PM
+static int snd_vt1724_suspend(struct pci_dev *pci, pm_message_t state)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct snd_ice1712 *ice = card->private_data;
+
+ if (!ice->pm_suspend_enabled)
+ return 0;
+
+ snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+
+ snd_pcm_suspend_all(ice->pcm);
+ snd_pcm_suspend_all(ice->pcm_pro);
+ snd_pcm_suspend_all(ice->pcm_ds);
+ snd_ac97_suspend(ice->ac97);
+
+ spin_lock_irq(&ice->reg_lock);
+ ice->pm_saved_is_spdif_master = ice->is_spdif_master(ice);
+ ice->pm_saved_spdif_ctrl = inw(ICEMT1724(ice, SPDIF_CTRL));
+ ice->pm_saved_spdif_cfg = inb(ICEREG1724(ice, SPDIF_CFG));
+ ice->pm_saved_route = inl(ICEMT1724(ice, ROUTE_PLAYBACK));
+ spin_unlock_irq(&ice->reg_lock);
+
+ if (ice->pm_suspend)
+ ice->pm_suspend(ice);
+
+ pci_disable_device(pci);
+ pci_save_state(pci);
+ pci_set_power_state(pci, pci_choose_state(pci, state));
+ return 0;
+}
+
+static int snd_vt1724_resume(struct pci_dev *pci)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct snd_ice1712 *ice = card->private_data;
+
+ if (!ice->pm_suspend_enabled)
+ return 0;
+
+ pci_set_power_state(pci, PCI_D0);
+ pci_restore_state(pci);
+
+ if (pci_enable_device(pci) < 0) {
+ snd_card_disconnect(card);
+ return -EIO;
+ }
+
+ pci_set_master(pci);
+
+ snd_vt1724_chip_reset(ice);
+
+ if (snd_vt1724_chip_init(ice) < 0) {
+ snd_card_disconnect(card);
+ return -EIO;
+ }
+
+ if (ice->pm_resume)
+ ice->pm_resume(ice);
+
+ if (ice->pm_saved_is_spdif_master) {
+ /* switching to external clock via SPDIF */
+ ice->set_spdif_clock(ice);
+ } else {
+ /* internal on-card clock */
+ snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 1);
+ }
+
+ update_spdif_bits(ice, ice->pm_saved_spdif_ctrl);
+
+ outb(ice->pm_saved_spdif_cfg, ICEREG1724(ice, SPDIF_CFG));
+ outl(ice->pm_saved_route, ICEMT1724(ice, ROUTE_PLAYBACK));
+
+ if (ice->ac97)
+ snd_ac97_resume(ice->ac97);
+
+ snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+ return 0;
+}
+#endif
+
static struct pci_driver driver = {
.name = "ICE1724",
.id_table = snd_vt1724_ids,
.probe = snd_vt1724_probe,
.remove = __devexit_p(snd_vt1724_remove),
+#ifdef CONFIG_PM
+ .suspend = snd_vt1724_suspend,
+ .resume = snd_vt1724_resume,
+#endif
};
static int __init alsa_card_ice1724_init(void)
diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
index 043a93879bd5..c75515f5be6f 100644
--- a/sound/pci/ice1712/prodigy_hifi.c
+++ b/sound/pci/ice1712/prodigy_hifi.c
@@ -1077,7 +1077,7 @@ static int __devinit prodigy_hifi_init(struct snd_ice1712 *ice)
/*
* initialize the chip
*/
-static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice)
+static void ak4396_init(struct snd_ice1712 *ice)
{
static unsigned short ak4396_inits[] = {
AK4396_CTRL1, 0x87, /* I2S Normal Mode, 24 bit */
@@ -1087,9 +1087,37 @@ static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice)
AK4396_RCH_ATT, 0x00,
};
- struct prodigy_hifi_spec *spec;
unsigned int i;
+ /* initialize ak4396 codec */
+ /* reset codec */
+ ak4396_write(ice, AK4396_CTRL1, 0x86);
+ msleep(100);
+ ak4396_write(ice, AK4396_CTRL1, 0x87);
+
+ for (i = 0; i < ARRAY_SIZE(ak4396_inits); i += 2)
+ ak4396_write(ice, ak4396_inits[i], ak4396_inits[i+1]);
+}
+
+#ifdef CONFIG_PM
+static int __devinit prodigy_hd2_resume(struct snd_ice1712 *ice)
+{
+ /* initialize ak4396 codec and restore previous mixer volumes */
+ struct prodigy_hifi_spec *spec = ice->spec;
+ int i;
+ mutex_lock(&ice->gpio_mutex);
+ ak4396_init(ice);
+ for (i = 0; i < 2; i++)
+ ak4396_write(ice, AK4396_LCH_ATT + i, spec->vol[i] & 0xff);
+ mutex_unlock(&ice->gpio_mutex);
+ return 0;
+}
+#endif
+
+static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice)
+{
+ struct prodigy_hifi_spec *spec;
+
ice->vt1720 = 0;
ice->vt1724 = 1;
@@ -1112,14 +1140,12 @@ static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice)
return -ENOMEM;
ice->spec = spec;
- /* initialize ak4396 codec */
- /* reset codec */
- ak4396_write(ice, AK4396_CTRL1, 0x86);
- msleep(100);
- ak4396_write(ice, AK4396_CTRL1, 0x87);
-
- for (i = 0; i < ARRAY_SIZE(ak4396_inits); i += 2)
- ak4396_write(ice, ak4396_inits[i], ak4396_inits[i+1]);
+#ifdef CONFIG_PM
+ ice->pm_resume = &prodigy_hd2_resume;
+ ice->pm_suspend_enabled = 1;
+#endif
+
+ ak4396_init(ice);
return 0;
}
diff --git a/sound/pci/oxygen/oxygen_io.c b/sound/pci/oxygen/oxygen_io.c
index c1eb923f2ac9..09b2b2a36df5 100644
--- a/sound/pci/oxygen/oxygen_io.c
+++ b/sound/pci/oxygen/oxygen_io.c
@@ -215,17 +215,8 @@ EXPORT_SYMBOL(oxygen_write_spi);
void oxygen_write_i2c(struct oxygen *chip, u8 device, u8 map, u8 data)
{
- unsigned long timeout;
-
/* should not need more than about 300 us */
- timeout = jiffies + msecs_to_jiffies(1);
- do {
- if (!(oxygen_read16(chip, OXYGEN_2WIRE_BUS_STATUS)
- & OXYGEN_2WIRE_BUSY))
- break;
- udelay(1);
- cond_resched();
- } while (time_after_eq(timeout, jiffies));
+ msleep(1);
oxygen_write8(chip, OXYGEN_2WIRE_MAP, map);
oxygen_write8(chip, OXYGEN_2WIRE_DATA, data);
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 3da5c029f93b..7bb827c7d806 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -3294,15 +3294,33 @@ snd_hdsp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
char *clock_source;
int x;
- if (hdsp_check_for_iobox (hdsp)) {
- snd_iprintf(buffer, "No I/O box connected.\nPlease connect one and upload firmware.\n");
+ status = hdsp_read(hdsp, HDSP_statusRegister);
+ status2 = hdsp_read(hdsp, HDSP_status2Register);
+
+ snd_iprintf(buffer, "%s (Card #%d)\n", hdsp->card_name,
+ hdsp->card->number + 1);
+ snd_iprintf(buffer, "Buffers: capture %p playback %p\n",
+ hdsp->capture_buffer, hdsp->playback_buffer);
+ snd_iprintf(buffer, "IRQ: %d Registers bus: 0x%lx VM: 0x%lx\n",
+ hdsp->irq, hdsp->port, (unsigned long)hdsp->iobase);
+ snd_iprintf(buffer, "Control register: 0x%x\n", hdsp->control_register);
+ snd_iprintf(buffer, "Control2 register: 0x%x\n",
+ hdsp->control2_register);
+ snd_iprintf(buffer, "Status register: 0x%x\n", status);
+ snd_iprintf(buffer, "Status2 register: 0x%x\n", status2);
+
+ if (hdsp_check_for_iobox(hdsp)) {
+ snd_iprintf(buffer, "No I/O box connected.\n"
+ "Please connect one and upload firmware.\n");
return;
- }
+ }
if (hdsp_check_for_firmware(hdsp, 0)) {
if (hdsp->state & HDSP_FirmwareCached) {
if (snd_hdsp_load_firmware_from_cache(hdsp) != 0) {
- snd_iprintf(buffer, "Firmware loading from cache failed, please upload manually.\n");
+ snd_iprintf(buffer, "Firmware loading from "
+ "cache failed, "
+ "please upload manually.\n");
return;
}
} else {
@@ -3319,18 +3337,6 @@ snd_hdsp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
}
}
- status = hdsp_read(hdsp, HDSP_statusRegister);
- status2 = hdsp_read(hdsp, HDSP_status2Register);
-
- snd_iprintf(buffer, "%s (Card #%d)\n", hdsp->card_name, hdsp->card->number + 1);
- snd_iprintf(buffer, "Buffers: capture %p playback %p\n",
- hdsp->capture_buffer, hdsp->playback_buffer);
- snd_iprintf(buffer, "IRQ: %d Registers bus: 0x%lx VM: 0x%lx\n",
- hdsp->irq, hdsp->port, (unsigned long)hdsp->iobase);
- snd_iprintf(buffer, "Control register: 0x%x\n", hdsp->control_register);
- snd_iprintf(buffer, "Control2 register: 0x%x\n", hdsp->control2_register);
- snd_iprintf(buffer, "Status register: 0x%x\n", status);
- snd_iprintf(buffer, "Status2 register: 0x%x\n", status2);
snd_iprintf(buffer, "FIFO status: %d\n", hdsp_read(hdsp, HDSP_fifoStatus) & 0xff);
snd_iprintf(buffer, "MIDI1 Output status: 0x%x\n", hdsp_read(hdsp, HDSP_midiStatusOut0));
snd_iprintf(buffer, "MIDI1 Input status: 0x%x\n", hdsp_read(hdsp, HDSP_midiStatusIn0));
@@ -3351,7 +3357,6 @@ snd_hdsp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
snd_iprintf(buffer, "\n");
-
switch (hdsp_clock_source(hdsp)) {
case HDSP_CLOCK_SOURCE_AUTOSYNC:
clock_source = "AutoSync";
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index 2f0925236a1b..5518371db13f 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -834,7 +834,7 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
status = snd_ymfpci_readw(chip, YDSXGR_INTFLAG);
if (status & 1) {
if (chip->timer)
- snd_timer_interrupt(chip->timer, chip->timer->sticks);
+ snd_timer_interrupt(chip->timer, chip->timer_ticks);
}
snd_ymfpci_writew(chip, YDSXGR_INTFLAG, status);
@@ -1885,8 +1885,18 @@ static int snd_ymfpci_timer_start(struct snd_timer *timer)
unsigned int count;
chip = snd_timer_chip(timer);
- count = (timer->sticks << 1) - 1;
spin_lock_irqsave(&chip->reg_lock, flags);
+ if (timer->sticks > 1) {
+ chip->timer_ticks = timer->sticks;
+ count = timer->sticks - 1;
+ } else {
+ /*
+ * Divisor 1 is not allowed; fake it by using divisor 2 and
+ * counting two ticks for each interrupt.
+ */
+ chip->timer_ticks = 2;
+ count = 2 - 1;
+ }
snd_ymfpci_writew(chip, YDSXGR_TIMERCOUNT, count);
snd_ymfpci_writeb(chip, YDSXGR_TIMERCTRL, 0x03);
spin_unlock_irqrestore(&chip->reg_lock, flags);
@@ -1909,14 +1919,14 @@ static int snd_ymfpci_timer_precise_resolution(struct snd_timer *timer,
unsigned long *num, unsigned long *den)
{
*num = 1;
- *den = 48000;
+ *den = 96000;
return 0;
}
static struct snd_timer_hardware snd_ymfpci_timer_hw = {
.flags = SNDRV_TIMER_HW_AUTO,
- .resolution = 20833, /* 1/fs = 20.8333...us */
- .ticks = 0x8000,
+ .resolution = 10417, /* 1 / 96 kHz = 10.41666...us */
+ .ticks = 0x10000,
.start = snd_ymfpci_timer_start,
.stop = snd_ymfpci_timer_stop,
.precise_resolution = snd_ymfpci_timer_precise_resolution,
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index d3e786a9a0a7..b1749bc67979 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -29,6 +29,7 @@ source "sound/soc/au1x/Kconfig"
source "sound/soc/blackfin/Kconfig"
source "sound/soc/davinci/Kconfig"
source "sound/soc/fsl/Kconfig"
+source "sound/soc/imx/Kconfig"
source "sound/soc/omap/Kconfig"
source "sound/soc/pxa/Kconfig"
source "sound/soc/s3c24xx/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 6f1e28de23cf..0c5eac01bf2e 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -1,4 +1,4 @@
-snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o
+snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o
obj-$(CONFIG_SND_SOC) += snd-soc-core.o
obj-$(CONFIG_SND_SOC) += codecs/
@@ -7,6 +7,7 @@ obj-$(CONFIG_SND_SOC) += au1x/
obj-$(CONFIG_SND_SOC) += blackfin/
obj-$(CONFIG_SND_SOC) += davinci/
obj-$(CONFIG_SND_SOC) += fsl/
+obj-$(CONFIG_SND_SOC) += imx/
obj-$(CONFIG_SND_SOC) += omap/
obj-$(CONFIG_SND_SOC) += pxa/
obj-$(CONFIG_SND_SOC) += s3c24xx/
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index 173a239a541c..130b12118d4f 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -56,30 +56,14 @@
#define MCLK_RATE 12000000
-static struct clk *mclk;
-
-static int at91sam9g20ek_startup(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
- struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
- int ret;
-
- ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK,
- MCLK_RATE, SND_SOC_CLOCK_IN);
- if (ret < 0) {
- clk_disable(mclk);
- return ret;
- }
-
- return 0;
-}
-
-static void at91sam9g20ek_shutdown(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+/*
+ * As shipped the board does not have inputs. However, it is relatively
+ * straightforward to modify the board to hook them up so support is left
+ * in the driver.
+ */
+#undef ENABLE_MIC_INPUT
- dev_dbg(rtd->socdev->dev, "shutdown");
-}
+static struct clk *mclk;
static int at91sam9g20ek_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
@@ -87,102 +71,17 @@ static int at91sam9g20ek_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
- struct atmel_ssc_info *ssc_p = cpu_dai->private_data;
- struct ssc_device *ssc = ssc_p->ssc;
int ret;
- unsigned int rate;
- int cmr_div, period;
-
- if (ssc == NULL) {
- printk(KERN_INFO "at91sam9g20ek_hw_params: ssc is NULL!\n");
- return -EINVAL;
- }
-
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+ SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
if (ret < 0)
return ret;
/* set cpu DAI configuration */
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
- if (ret < 0)
- return ret;
-
- /*
- * The SSC clock dividers depend on the sample rate. The CMR.DIV
- * field divides the system master clock MCK to drive the SSC TK
- * signal which provides the codec BCLK. The TCMR.PERIOD and
- * RCMR.PERIOD fields further divide the BCLK signal to drive
- * the SSC TF and RF signals which provide the codec DACLRC and
- * ADCLRC clocks.
- *
- * The dividers were determined through trial and error, where a
- * CMR.DIV value is chosen such that the resulting BCLK value is
- * divisible, or almost divisible, by (2 * sample rate), and then
- * the TCMR.PERIOD or RCMR.PERIOD is BCLK / (2 * sample rate) - 1.
- */
- rate = params_rate(params);
-
- switch (rate) {
- case 8000:
- cmr_div = 55; /* BCLK = 133MHz/(2*55) = 1.209MHz */
- period = 74; /* LRC = BCLK/(2*(74+1)) ~= 8060,6Hz */
- break;
- case 11025:
- cmr_div = 67; /* BCLK = 133MHz/(2*60) = 1.108MHz */
- period = 45; /* LRC = BCLK/(2*(49+1)) = 11083,3Hz */
- break;
- case 16000:
- cmr_div = 63; /* BCLK = 133MHz/(2*63) = 1.055MHz */
- period = 32; /* LRC = BCLK/(2*(32+1)) = 15993,2Hz */
- break;
- case 22050:
- cmr_div = 52; /* BCLK = 133MHz/(2*52) = 1.278MHz */
- period = 28; /* LRC = BCLK/(2*(28+1)) = 22049Hz */
- break;
- case 32000:
- cmr_div = 66; /* BCLK = 133MHz/(2*66) = 1.007MHz */
- period = 15; /* LRC = BCLK/(2*(15+1)) = 31486,742Hz */
- break;
- case 44100:
- cmr_div = 29; /* BCLK = 133MHz/(2*29) = 2.293MHz */
- period = 25; /* LRC = BCLK/(2*(25+1)) = 44098Hz */
- break;
- case 48000:
- cmr_div = 33; /* BCLK = 133MHz/(2*33) = 2.015MHz */
- period = 20; /* LRC = BCLK/(2*(20+1)) = 47979,79Hz */
- break;
- case 88200:
- cmr_div = 29; /* BCLK = 133MHz/(2*29) = 2.293MHz */
- period = 12; /* LRC = BCLK/(2*(12+1)) = 88196Hz */
- break;
- case 96000:
- cmr_div = 23; /* BCLK = 133MHz/(2*23) = 2.891MHz */
- period = 14; /* LRC = BCLK/(2*(14+1)) = 96376Hz */
- break;
- default:
- printk(KERN_WARNING "unsupported rate %d"
- " on at91sam9g20ek board\n", rate);
- return -EINVAL;
- }
-
- /* set the MCK divider for BCLK */
- ret = snd_soc_dai_set_clkdiv(cpu_dai, ATMEL_SSC_CMR_DIV, cmr_div);
- if (ret < 0)
- return ret;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- /* set the BCLK divider for DACLRC */
- ret = snd_soc_dai_set_clkdiv(cpu_dai,
- ATMEL_SSC_TCMR_PERIOD, period);
- } else {
- /* set the BCLK divider for ADCLRC */
- ret = snd_soc_dai_set_clkdiv(cpu_dai,
- ATMEL_SSC_RCMR_PERIOD, period);
- }
+ SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
if (ret < 0)
return ret;
@@ -190,9 +89,7 @@ static int at91sam9g20ek_hw_params(struct snd_pcm_substream *substream,
}
static struct snd_soc_ops at91sam9g20ek_ops = {
- .startup = at91sam9g20ek_startup,
.hw_params = at91sam9g20ek_hw_params,
- .shutdown = at91sam9g20ek_shutdown,
};
static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
@@ -241,10 +138,20 @@ static const struct snd_soc_dapm_route intercon[] = {
*/
static int at91sam9g20ek_wm8731_init(struct snd_soc_codec *codec)
{
+ struct snd_soc_dai *codec_dai = &codec->dai[0];
+ int ret;
+
printk(KERN_DEBUG
"at91sam9g20ek_wm8731 "
": at91sam9g20ek_wm8731_init() called\n");
+ ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK,
+ MCLK_RATE, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ printk(KERN_ERR "Failed to set WM8731 SYSCLK: %d\n", ret);
+ return ret;
+ }
+
/* Add specific widgets */
snd_soc_dapm_new_controls(codec, at91sam9g20ek_dapm_widgets,
ARRAY_SIZE(at91sam9g20ek_dapm_widgets));
@@ -255,8 +162,13 @@ static int at91sam9g20ek_wm8731_init(struct snd_soc_codec *codec)
snd_soc_dapm_nc_pin(codec, "RLINEIN");
snd_soc_dapm_nc_pin(codec, "LLINEIN");
- /* always connected */
+#ifdef ENABLE_MIC_INPUT
snd_soc_dapm_enable_pin(codec, "Int Mic");
+#else
+ snd_soc_dapm_nc_pin(codec, "Int Mic");
+#endif
+
+ /* always connected */
snd_soc_dapm_enable_pin(codec, "Ext Spk");
snd_soc_dapm_sync(codec);
diff --git a/sound/soc/au1x/psc-ac97.c b/sound/soc/au1x/psc-ac97.c
index 479d7bdf1865..a521aa90ddee 100644
--- a/sound/soc/au1x/psc-ac97.c
+++ b/sound/soc/au1x/psc-ac97.c
@@ -1,8 +1,8 @@
/*
* Au12x0/Au1550 PSC ALSA ASoC audio support.
*
- * (c) 2007-2008 MSC Vertriebsges.m.b.H.,
- * Manuel Lauss <mano@roarinelk.homelinux.net>
+ * (c) 2007-2009 MSC Vertriebsges.m.b.H.,
+ * Manuel Lauss <manuel.lauss@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
+#include <linux/mutex.h>
#include <linux/suspend.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -29,6 +30,9 @@
#include "psc.h"
+/* how often to retry failed codec register reads/writes */
+#define AC97_RW_RETRIES 5
+
#define AC97_DIR \
(SND_SOC_DAIDIR_PLAYBACK | SND_SOC_DAIDIR_CAPTURE)
@@ -45,6 +49,9 @@
#define AC97PCR_CLRFIFO(stype) \
((stype) == PCM_TX ? PSC_AC97PCR_TC : PSC_AC97PCR_RC)
+#define AC97STAT_BUSY(stype) \
+ ((stype) == PCM_TX ? PSC_AC97STAT_TB : PSC_AC97STAT_RB)
+
/* instance data. There can be only one, MacLeod!!!! */
static struct au1xpsc_audio_data *au1xpsc_ac97_workdata;
@@ -54,24 +61,33 @@ static unsigned short au1xpsc_ac97_read(struct snd_ac97 *ac97,
{
/* FIXME */
struct au1xpsc_audio_data *pscdata = au1xpsc_ac97_workdata;
- unsigned short data, tmo;
+ unsigned short data, retry, tmo;
- au_writel(PSC_AC97CDC_RD | PSC_AC97CDC_INDX(reg), AC97_CDC(pscdata));
+ au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata));
au_sync();
- tmo = 1000;
- while ((!(au_readl(AC97_EVNT(pscdata)) & PSC_AC97EVNT_CD)) && --tmo)
- udelay(2);
+ retry = AC97_RW_RETRIES;
+ do {
+ mutex_lock(&pscdata->lock);
+
+ au_writel(PSC_AC97CDC_RD | PSC_AC97CDC_INDX(reg),
+ AC97_CDC(pscdata));
+ au_sync();
+
+ tmo = 2000;
+ while ((!(au_readl(AC97_EVNT(pscdata)) & PSC_AC97EVNT_CD))
+ && --tmo)
+ udelay(2);
- if (!tmo)
- data = 0xffff;
- else
data = au_readl(AC97_CDC(pscdata)) & 0xffff;
- au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata));
- au_sync();
+ au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata));
+ au_sync();
+
+ mutex_unlock(&pscdata->lock);
+ } while (--retry && !tmo);
- return data;
+ return retry ? data : 0xffff;
}
/* AC97 controller writes to codec register */
@@ -80,16 +96,29 @@ static void au1xpsc_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
{
/* FIXME */
struct au1xpsc_audio_data *pscdata = au1xpsc_ac97_workdata;
- unsigned int tmo;
+ unsigned int tmo, retry;
- au_writel(PSC_AC97CDC_INDX(reg) | (val & 0xffff), AC97_CDC(pscdata));
+ au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata));
au_sync();
- tmo = 1000;
- while ((!(au_readl(AC97_EVNT(pscdata)) & PSC_AC97EVNT_CD)) && --tmo)
+
+ retry = AC97_RW_RETRIES;
+ do {
+ mutex_lock(&pscdata->lock);
+
+ au_writel(PSC_AC97CDC_INDX(reg) | (val & 0xffff),
+ AC97_CDC(pscdata));
au_sync();
- au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata));
- au_sync();
+ tmo = 2000;
+ while ((!(au_readl(AC97_EVNT(pscdata)) & PSC_AC97EVNT_CD))
+ && --tmo)
+ udelay(2);
+
+ au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata));
+ au_sync();
+
+ mutex_unlock(&pscdata->lock);
+ } while (--retry && !tmo);
}
/* AC97 controller asserts a warm reset */
@@ -129,9 +158,9 @@ static void au1xpsc_ac97_cold_reset(struct snd_ac97 *ac97)
au_sync();
/* wait for PSC to indicate it's ready */
- i = 100000;
+ i = 1000;
while (!((au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_SR)) && (--i))
- au_sync();
+ msleep(1);
if (i == 0) {
printk(KERN_ERR "au1xpsc-ac97: PSC not ready!\n");
@@ -143,9 +172,9 @@ static void au1xpsc_ac97_cold_reset(struct snd_ac97 *ac97)
au_sync();
/* wait for AC97 core to become ready */
- i = 100000;
+ i = 1000;
while (!((au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR)) && (--i))
- au_sync();
+ msleep(1);
if (i == 0)
printk(KERN_ERR "au1xpsc-ac97: AC97 ctrl not ready\n");
}
@@ -165,12 +194,12 @@ static int au1xpsc_ac97_hw_params(struct snd_pcm_substream *substream,
{
/* FIXME */
struct au1xpsc_audio_data *pscdata = au1xpsc_ac97_workdata;
- unsigned long r, stat;
+ unsigned long r, ro, stat;
int chans, stype = SUBSTREAM_TYPE(substream);
chans = params_channels(params);
- r = au_readl(AC97_CFG(pscdata));
+ r = ro = au_readl(AC97_CFG(pscdata));
stat = au_readl(AC97_STAT(pscdata));
/* already active? */
@@ -180,9 +209,6 @@ static int au1xpsc_ac97_hw_params(struct snd_pcm_substream *substream,
(pscdata->rate != params_rate(params)))
return -EINVAL;
} else {
- /* disable AC97 device controller first */
- au_writel(r & ~PSC_AC97CFG_DE_ENABLE, AC97_CFG(pscdata));
- au_sync();
/* set sample bitdepth: REG[24:21]=(BITS-2)/2 */
r &= ~PSC_AC97CFG_LEN_MASK;
@@ -199,14 +225,40 @@ static int au1xpsc_ac97_hw_params(struct snd_pcm_substream *substream,
r |= PSC_AC97CFG_RXSLOT_ENA(4);
}
- /* finally enable the AC97 controller again */
+ /* do we need to poke the hardware? */
+ if (!(r ^ ro))
+ goto out;
+
+ /* ac97 engine is about to be disabled */
+ mutex_lock(&pscdata->lock);
+
+ /* disable AC97 device controller first... */
+ au_writel(r & ~PSC_AC97CFG_DE_ENABLE, AC97_CFG(pscdata));
+ au_sync();
+
+ /* ...wait for it... */
+ while (au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR)
+ asm volatile ("nop");
+
+ /* ...write config... */
+ au_writel(r, AC97_CFG(pscdata));
+ au_sync();
+
+ /* ...enable the AC97 controller again... */
au_writel(r | PSC_AC97CFG_DE_ENABLE, AC97_CFG(pscdata));
au_sync();
+ /* ...and wait for ready bit */
+ while (!(au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR))
+ asm volatile ("nop");
+
+ mutex_unlock(&pscdata->lock);
+
pscdata->cfg = r;
pscdata->rate = params_rate(params);
}
+out:
return 0;
}
@@ -222,6 +274,8 @@ static int au1xpsc_ac97_trigger(struct snd_pcm_substream *substream,
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
+ au_writel(AC97PCR_CLRFIFO(stype), AC97_PCR(pscdata));
+ au_sync();
au_writel(AC97PCR_START(stype), AC97_PCR(pscdata));
au_sync();
break;
@@ -229,6 +283,13 @@ static int au1xpsc_ac97_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_SUSPEND:
au_writel(AC97PCR_STOP(stype), AC97_PCR(pscdata));
au_sync();
+
+ while (au_readl(AC97_STAT(pscdata)) & AC97STAT_BUSY(stype))
+ asm volatile ("nop");
+
+ au_writel(AC97PCR_CLRFIFO(stype), AC97_PCR(pscdata));
+ au_sync();
+
break;
default:
ret = -EINVAL;
@@ -251,6 +312,8 @@ static int au1xpsc_ac97_probe(struct platform_device *pdev,
if (!au1xpsc_ac97_workdata)
return -ENOMEM;
+ mutex_init(&au1xpsc_ac97_workdata->lock);
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
ret = -ENODEV;
@@ -269,9 +332,9 @@ static int au1xpsc_ac97_probe(struct platform_device *pdev,
goto out1;
/* configuration: max dma trigger threshold, enable ac97 */
- au1xpsc_ac97_workdata->cfg = PSC_AC97CFG_RT_FIFO8 |
- PSC_AC97CFG_TT_FIFO8 |
- PSC_AC97CFG_DE_ENABLE;
+ au1xpsc_ac97_workdata->cfg = PSC_AC97CFG_RT_FIFO8 |
+ PSC_AC97CFG_TT_FIFO8 |
+ PSC_AC97CFG_DE_ENABLE;
/* preserve PSC clock source set up by platform (dev.platform_data
* is already occupied by soc layer)
@@ -386,4 +449,4 @@ module_exit(au1xpsc_ac97_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Au12x0/Au1550 PSC AC97 ALSA ASoC audio driver");
-MODULE_AUTHOR("Manuel Lauss <mano@roarinelk.homelinux.net>");
+MODULE_AUTHOR("Manuel Lauss <manuel.lauss@gmail.com>");
diff --git a/sound/soc/au1x/psc.h b/sound/soc/au1x/psc.h
index 8fdb1a04a07b..3f474e8ed4f6 100644
--- a/sound/soc/au1x/psc.h
+++ b/sound/soc/au1x/psc.h
@@ -29,6 +29,7 @@ struct au1xpsc_audio_data {
unsigned long pm[2];
struct resource *ioarea;
+ struct mutex lock;
};
#define PCM_TX 0
diff --git a/sound/soc/blackfin/Kconfig b/sound/soc/blackfin/Kconfig
index 811596f4c092..ac927ffdc961 100644
--- a/sound/soc/blackfin/Kconfig
+++ b/sound/soc/blackfin/Kconfig
@@ -7,6 +7,15 @@ config SND_BF5XX_I2S
mode (supports single stereo In/Out).
You will also need to select the audio interfaces to support below.
+config SND_BF5XX_TDM
+ tristate "SoC I2S(TDM mode) Audio for the ADI BF5xx chip"
+ depends on (BLACKFIN && SND_SOC)
+ help
+ Say Y or M if you want to add support for codecs attached to
+ the Blackfin SPORT (synchronous serial ports) interface in TDM
+ mode.
+ You will also need to select the audio interfaces to support below.
+
config SND_BF5XX_SOC_SSM2602
tristate "SoC SSM2602 Audio support for BF52x ezkit"
depends on SND_BF5XX_I2S
@@ -69,12 +78,24 @@ config SND_BF5XX_SOC_I2S
tristate
select SND_BF5XX_SOC_SPORT
+config SND_BF5XX_SOC_TDM
+ tristate
+ select SND_BF5XX_SOC_SPORT
+
config SND_BF5XX_SOC_AC97
tristate
select AC97_BUS
select SND_SOC_AC97_BUS
select SND_BF5XX_SOC_SPORT
+config SND_BF5XX_SOC_AD1836
+ tristate "SoC AD1836 Audio support for BF5xx"
+ depends on SND_BF5XX_TDM
+ select SND_BF5XX_SOC_TDM
+ select SND_SOC_AD1836
+ help
+ Say Y if you want to add support for SoC audio on BF5xx STAMP/EZKIT.
+
config SND_BF5XX_SOC_AD1980
tristate "SoC AD1980/1 Audio support for BF5xx"
depends on SND_BF5XX_AC97
@@ -83,9 +104,17 @@ config SND_BF5XX_SOC_AD1980
help
Say Y if you want to add support for SoC audio on BF5xx STAMP/EZKIT.
+config SND_BF5XX_SOC_AD1938
+ tristate "SoC AD1938 Audio support for Blackfin"
+ depends on SND_BF5XX_TDM
+ select SND_BF5XX_SOC_TDM
+ select SND_SOC_AD1938
+ help
+ Say Y if you want to add support for AD1938 codec on Blackfin.
+
config SND_BF5XX_SPORT_NUM
int "Set a SPORT for Sound chip"
- depends on (SND_BF5XX_I2S || SND_BF5XX_AC97)
+ depends on (SND_BF5XX_I2S || SND_BF5XX_AC97 || SND_BF5XX_TDM)
range 0 3 if BF54x
range 0 1 if !BF54x
default 0
diff --git a/sound/soc/blackfin/Makefile b/sound/soc/blackfin/Makefile
index 97bb37a6359c..87e30423912f 100644
--- a/sound/soc/blackfin/Makefile
+++ b/sound/soc/blackfin/Makefile
@@ -1,21 +1,29 @@
# Blackfin Platform Support
snd-bf5xx-ac97-objs := bf5xx-ac97-pcm.o
snd-bf5xx-i2s-objs := bf5xx-i2s-pcm.o
+snd-bf5xx-tdm-objs := bf5xx-tdm-pcm.o
snd-soc-bf5xx-sport-objs := bf5xx-sport.o
snd-soc-bf5xx-ac97-objs := bf5xx-ac97.o
snd-soc-bf5xx-i2s-objs := bf5xx-i2s.o
+snd-soc-bf5xx-tdm-objs := bf5xx-tdm.o
obj-$(CONFIG_SND_BF5XX_AC97) += snd-bf5xx-ac97.o
obj-$(CONFIG_SND_BF5XX_I2S) += snd-bf5xx-i2s.o
+obj-$(CONFIG_SND_BF5XX_TDM) += snd-bf5xx-tdm.o
obj-$(CONFIG_SND_BF5XX_SOC_SPORT) += snd-soc-bf5xx-sport.o
obj-$(CONFIG_SND_BF5XX_SOC_AC97) += snd-soc-bf5xx-ac97.o
obj-$(CONFIG_SND_BF5XX_SOC_I2S) += snd-soc-bf5xx-i2s.o
+obj-$(CONFIG_SND_BF5XX_SOC_TDM) += snd-soc-bf5xx-tdm.o
# Blackfin Machine Support
+snd-ad1836-objs := bf5xx-ad1836.o
snd-ad1980-objs := bf5xx-ad1980.o
snd-ssm2602-objs := bf5xx-ssm2602.o
snd-ad73311-objs := bf5xx-ad73311.o
+snd-ad1938-objs := bf5xx-ad1938.o
+obj-$(CONFIG_SND_BF5XX_SOC_AD1836) += snd-ad1836.o
obj-$(CONFIG_SND_BF5XX_SOC_AD1980) += snd-ad1980.o
obj-$(CONFIG_SND_BF5XX_SOC_SSM2602) += snd-ssm2602.o
obj-$(CONFIG_SND_BF5XX_SOC_AD73311) += snd-ad73311.o
+obj-$(CONFIG_SND_BF5XX_SOC_AD1938) += snd-ad1938.o
diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
index b1ed423fabd5..2758b9017a7f 100644
--- a/sound/soc/blackfin/bf5xx-ac97.c
+++ b/sound/soc/blackfin/bf5xx-ac97.c
@@ -277,28 +277,24 @@ static int bf5xx_ac97_resume(struct snd_soc_dai *dai)
if (!dai->active)
return 0;
- ret = sport_set_multichannel(sport_handle, 16, 0x1F, 1);
+ ret = sport_set_multichannel(sport, 16, 0x1F, 1);
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
- ret = sport_config_rx(sport_handle, IRFS, 0xF, 0, (16*16-1));
+ ret = sport_config_rx(sport, IRFS, 0xF, 0, (16*16-1));
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
- ret = sport_config_tx(sport_handle, ITFS, 0xF, 0, (16*16-1));
+ ret = sport_config_tx(sport, ITFS, 0xF, 0, (16*16-1));
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
- if (dai->capture.active)
- sport_rx_start(sport);
- if (dai->playback.active)
- sport_tx_start(sport);
return 0;
}
diff --git a/sound/soc/blackfin/bf5xx-ad1836.c b/sound/soc/blackfin/bf5xx-ad1836.c
new file mode 100644
index 000000000000..cd361e304b0f
--- /dev/null
+++ b/sound/soc/blackfin/bf5xx-ad1836.c
@@ -0,0 +1,128 @@
+/*
+ * File: sound/soc/blackfin/bf5xx-ad1836.c
+ * Author: Barry Song <Barry.Song@analog.com>
+ *
+ * Created: Aug 4 2009
+ * Description: Board driver for ad1836 sound chip
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm_params.h>
+
+#include <asm/blackfin.h>
+#include <asm/cacheflush.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+
+#include "../codecs/ad1836.h"
+#include "bf5xx-sport.h"
+
+#include "bf5xx-tdm-pcm.h"
+#include "bf5xx-tdm.h"
+
+static struct snd_soc_card bf5xx_ad1836;
+
+static int bf5xx_ad1836_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+
+ cpu_dai->private_data = sport_handle;
+ return 0;
+}
+
+static int bf5xx_ad1836_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+ int ret = 0;
+ /* set cpu DAI configuration */
+ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0)
+ return ret;
+
+ /* set codec DAI configuration */
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct snd_soc_ops bf5xx_ad1836_ops = {
+ .startup = bf5xx_ad1836_startup,
+ .hw_params = bf5xx_ad1836_hw_params,
+};
+
+static struct snd_soc_dai_link bf5xx_ad1836_dai = {
+ .name = "ad1836",
+ .stream_name = "AD1836",
+ .cpu_dai = &bf5xx_tdm_dai,
+ .codec_dai = &ad1836_dai,
+ .ops = &bf5xx_ad1836_ops,
+};
+
+static struct snd_soc_card bf5xx_ad1836 = {
+ .name = "bf5xx_ad1836",
+ .platform = &bf5xx_tdm_soc_platform,
+ .dai_link = &bf5xx_ad1836_dai,
+ .num_links = 1,
+};
+
+static struct snd_soc_device bf5xx_ad1836_snd_devdata = {
+ .card = &bf5xx_ad1836,
+ .codec_dev = &soc_codec_dev_ad1836,
+};
+
+static struct platform_device *bfxx_ad1836_snd_device;
+
+static int __init bf5xx_ad1836_init(void)
+{
+ int ret;
+
+ bfxx_ad1836_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!bfxx_ad1836_snd_device)
+ return -ENOMEM;
+
+ platform_set_drvdata(bfxx_ad1836_snd_device, &bf5xx_ad1836_snd_devdata);
+ bf5xx_ad1836_snd_devdata.dev = &bfxx_ad1836_snd_device->dev;
+ ret = platform_device_add(bfxx_ad1836_snd_device);
+
+ if (ret)
+ platform_device_put(bfxx_ad1836_snd_device);
+
+ return ret;
+}
+
+static void __exit bf5xx_ad1836_exit(void)
+{
+ platform_device_unregister(bfxx_ad1836_snd_device);
+}
+
+module_init(bf5xx_ad1836_init);
+module_exit(bf5xx_ad1836_exit);
+
+/* Module information */
+MODULE_AUTHOR("Barry Song");
+MODULE_DESCRIPTION("ALSA SoC AD1836 board driver");
+MODULE_LICENSE("GPL");
+
diff --git a/sound/soc/blackfin/bf5xx-ad1938.c b/sound/soc/blackfin/bf5xx-ad1938.c
new file mode 100644
index 000000000000..08269e91810c
--- /dev/null
+++ b/sound/soc/blackfin/bf5xx-ad1938.c
@@ -0,0 +1,142 @@
+/*
+ * File: sound/soc/blackfin/bf5xx-ad1938.c
+ * Author: Barry Song <Barry.Song@analog.com>
+ *
+ * Created: Thur June 4 2009
+ * Description: Board driver for ad1938 sound chip
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm_params.h>
+
+#include <asm/blackfin.h>
+#include <asm/cacheflush.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+
+#include "../codecs/ad1938.h"
+#include "bf5xx-sport.h"
+
+#include "bf5xx-tdm-pcm.h"
+#include "bf5xx-tdm.h"
+
+static struct snd_soc_card bf5xx_ad1938;
+
+static int bf5xx_ad1938_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+
+ cpu_dai->private_data = sport_handle;
+ return 0;
+}
+
+static int bf5xx_ad1938_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+ int ret = 0;
+ /* set cpu DAI configuration */
+ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0)
+ return ret;
+
+ /* set codec DAI configuration */
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0)
+ return ret;
+
+ /* set codec DAI slots, 8 channels, all channels are enabled */
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xFF, 8);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct snd_soc_ops bf5xx_ad1938_ops = {
+ .startup = bf5xx_ad1938_startup,
+ .hw_params = bf5xx_ad1938_hw_params,
+};
+
+static struct snd_soc_dai_link bf5xx_ad1938_dai = {
+ .name = "ad1938",
+ .stream_name = "AD1938",
+ .cpu_dai = &bf5xx_tdm_dai,
+ .codec_dai = &ad1938_dai,
+ .ops = &bf5xx_ad1938_ops,
+};
+
+static struct snd_soc_card bf5xx_ad1938 = {
+ .name = "bf5xx_ad1938",
+ .platform = &bf5xx_tdm_soc_platform,
+ .dai_link = &bf5xx_ad1938_dai,
+ .num_links = 1,
+};
+
+static struct snd_soc_device bf5xx_ad1938_snd_devdata = {
+ .card = &bf5xx_ad1938,
+ .codec_dev = &soc_codec_dev_ad1938,
+};
+
+static struct platform_device *bfxx_ad1938_snd_device;
+
+static int __init bf5xx_ad1938_init(void)
+{
+ int ret;
+
+ bfxx_ad1938_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!bfxx_ad1938_snd_device)
+ return -ENOMEM;
+
+ platform_set_drvdata(bfxx_ad1938_snd_device, &bf5xx_ad1938_snd_devdata);
+ bf5xx_ad1938_snd_devdata.dev = &bfxx_ad1938_snd_device->dev;
+ ret = platform_device_add(bfxx_ad1938_snd_device);
+
+ if (ret)
+ platform_device_put(bfxx_ad1938_snd_device);
+
+ return ret;
+}
+
+static void __exit bf5xx_ad1938_exit(void)
+{
+ platform_device_unregister(bfxx_ad1938_snd_device);
+}
+
+module_init(bf5xx_ad1938_init);
+module_exit(bf5xx_ad1938_exit);
+
+/* Module information */
+MODULE_AUTHOR("Barry Song");
+MODULE_DESCRIPTION("ALSA SoC AD1938 board driver");
+MODULE_LICENSE("GPL");
+
diff --git a/sound/soc/blackfin/bf5xx-ad73311.c b/sound/soc/blackfin/bf5xx-ad73311.c
index edfbdc024e66..9825b71d0e28 100644
--- a/sound/soc/blackfin/bf5xx-ad73311.c
+++ b/sound/soc/blackfin/bf5xx-ad73311.c
@@ -203,23 +203,23 @@ static struct snd_soc_device bf5xx_ad73311_snd_devdata = {
.codec_dev = &soc_codec_dev_ad73311,
};
-static struct platform_device *bf52x_ad73311_snd_device;
+static struct platform_device *bf5xx_ad73311_snd_device;
static int __init bf5xx_ad73311_init(void)
{
int ret;
pr_debug("%s enter\n", __func__);
- bf52x_ad73311_snd_device = platform_device_alloc("soc-audio", -1);
- if (!bf52x_ad73311_snd_device)
+ bf5xx_ad73311_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!bf5xx_ad73311_snd_device)
return -ENOMEM;
- platform_set_drvdata(bf52x_ad73311_snd_device, &bf5xx_ad73311_snd_devdata);
- bf5xx_ad73311_snd_devdata.dev = &bf52x_ad73311_snd_device->dev;
- ret = platform_device_add(bf52x_ad73311_snd_device);
+ platform_set_drvdata(bf5xx_ad73311_snd_device, &bf5xx_ad73311_snd_devdata);
+ bf5xx_ad73311_snd_devdata.dev = &bf5xx_ad73311_snd_device->dev;
+ ret = platform_device_add(bf5xx_ad73311_snd_device);
if (ret)
- platform_device_put(bf52x_ad73311_snd_device);
+ platform_device_put(bf5xx_ad73311_snd_device);
return ret;
}
@@ -227,7 +227,7 @@ static int __init bf5xx_ad73311_init(void)
static void __exit bf5xx_ad73311_exit(void)
{
pr_debug("%s enter\n", __func__);
- platform_device_unregister(bf52x_ad73311_snd_device);
+ platform_device_unregister(bf5xx_ad73311_snd_device);
}
module_init(bf5xx_ad73311_init);
diff --git a/sound/soc/blackfin/bf5xx-i2s.c b/sound/soc/blackfin/bf5xx-i2s.c
index af06904bab0f..876abade27e1 100644
--- a/sound/soc/blackfin/bf5xx-i2s.c
+++ b/sound/soc/blackfin/bf5xx-i2s.c
@@ -259,22 +259,18 @@ static int bf5xx_i2s_resume(struct snd_soc_dai *dai)
if (!dai->active)
return 0;
- ret = sport_config_rx(sport_handle, RFSR | RCKFE, RSFSE|0x1f, 0, 0);
+ ret = sport_config_rx(sport, RFSR | RCKFE, RSFSE|0x1f, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
- ret = sport_config_tx(sport_handle, TFSR | TCKFE, TSFSE|0x1f, 0, 0);
+ ret = sport_config_tx(sport, TFSR | TCKFE, TSFSE|0x1f, 0, 0);
if (ret) {
pr_err("SPORT is busy!\n");
return -EBUSY;
}
- if (dai->capture.active)
- sport_rx_start(sport);
- if (dai->playback.active)
- sport_tx_start(sport);
return 0;
}
diff --git a/sound/soc/blackfin/bf5xx-ssm2602.c b/sound/soc/blackfin/bf5xx-ssm2602.c
index bc0cdded7116..3a00fa4dbe6d 100644
--- a/sound/soc/blackfin/bf5xx-ssm2602.c
+++ b/sound/soc/blackfin/bf5xx-ssm2602.c
@@ -148,24 +148,24 @@ static struct snd_soc_device bf5xx_ssm2602_snd_devdata = {
.codec_data = &bf5xx_ssm2602_setup,
};
-static struct platform_device *bf52x_ssm2602_snd_device;
+static struct platform_device *bf5xx_ssm2602_snd_device;
static int __init bf5xx_ssm2602_init(void)
{
int ret;
pr_debug("%s enter\n", __func__);
- bf52x_ssm2602_snd_device = platform_device_alloc("soc-audio", -1);
- if (!bf52x_ssm2602_snd_device)
+ bf5xx_ssm2602_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!bf5xx_ssm2602_snd_device)
return -ENOMEM;
- platform_set_drvdata(bf52x_ssm2602_snd_device,
+ platform_set_drvdata(bf5xx_ssm2602_snd_device,
&bf5xx_ssm2602_snd_devdata);
- bf5xx_ssm2602_snd_devdata.dev = &bf52x_ssm2602_snd_device->dev;
- ret = platform_device_add(bf52x_ssm2602_snd_device);
+ bf5xx_ssm2602_snd_devdata.dev = &bf5xx_ssm2602_snd_device->dev;
+ ret = platform_device_add(bf5xx_ssm2602_snd_device);
if (ret)
- platform_device_put(bf52x_ssm2602_snd_device);
+ platform_device_put(bf5xx_ssm2602_snd_device);
return ret;
}
@@ -173,7 +173,7 @@ static int __init bf5xx_ssm2602_init(void)
static void __exit bf5xx_ssm2602_exit(void)
{
pr_debug("%s enter\n", __func__);
- platform_device_unregister(bf52x_ssm2602_snd_device);
+ platform_device_unregister(bf5xx_ssm2602_snd_device);
}
module_init(bf5xx_ssm2602_init);
diff --git a/sound/soc/blackfin/bf5xx-tdm-pcm.c b/sound/soc/blackfin/bf5xx-tdm-pcm.c
new file mode 100644
index 000000000000..ccb5e823bd18
--- /dev/null
+++ b/sound/soc/blackfin/bf5xx-tdm-pcm.c
@@ -0,0 +1,330 @@
+/*
+ * File: sound/soc/blackfin/bf5xx-tdm-pcm.c
+ * Author: Barry Song <Barry.Song@analog.com>
+ *
+ * Created: Tue June 06 2009
+ * Description: DMA driver for tdm codec
+ *
+ * Modified:
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <asm/dma.h>
+
+#include "bf5xx-tdm-pcm.h"
+#include "bf5xx-tdm.h"
+#include "bf5xx-sport.h"
+
+#define PCM_BUFFER_MAX 0x10000
+#define FRAGMENT_SIZE_MIN (4*1024)
+#define FRAGMENTS_MIN 2
+#define FRAGMENTS_MAX 32
+
+static void bf5xx_dma_irq(void *data)
+{
+ struct snd_pcm_substream *pcm = data;
+ snd_pcm_period_elapsed(pcm);
+}
+
+static const struct snd_pcm_hardware bf5xx_pcm_hardware = {
+ .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_RESUME),
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .channels_min = 2,
+ .channels_max = 8,
+ .buffer_bytes_max = PCM_BUFFER_MAX,
+ .period_bytes_min = FRAGMENT_SIZE_MIN,
+ .period_bytes_max = PCM_BUFFER_MAX/2,
+ .periods_min = FRAGMENTS_MIN,
+ .periods_max = FRAGMENTS_MAX,
+};
+
+static int bf5xx_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
+ snd_pcm_lib_malloc_pages(substream, size * 4);
+
+ return 0;
+}
+
+static int bf5xx_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ snd_pcm_lib_free_pages(substream);
+
+ return 0;
+}
+
+static int bf5xx_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct sport_device *sport = runtime->private_data;
+ int fragsize_bytes = frames_to_bytes(runtime, runtime->period_size);
+
+ fragsize_bytes /= runtime->channels;
+ /* inflate the fragsize to match the dma width of SPORT */
+ fragsize_bytes *= 8;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ sport_set_tx_callback(sport, bf5xx_dma_irq, substream);
+ sport_config_tx_dma(sport, runtime->dma_area,
+ runtime->periods, fragsize_bytes);
+ } else {
+ sport_set_rx_callback(sport, bf5xx_dma_irq, substream);
+ sport_config_rx_dma(sport, runtime->dma_area,
+ runtime->periods, fragsize_bytes);
+ }
+
+ return 0;
+}
+
+static int bf5xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct sport_device *sport = runtime->private_data;
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ sport_tx_start(sport);
+ else
+ sport_rx_start(sport);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ sport_tx_stop(sport);
+ else
+ sport_rx_stop(sport);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static snd_pcm_uframes_t bf5xx_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct sport_device *sport = runtime->private_data;
+ unsigned int diff;
+ snd_pcm_uframes_t frames;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ diff = sport_curr_offset_tx(sport);
+ frames = diff / (8*4); /* 32 bytes per frame */
+ } else {
+ diff = sport_curr_offset_rx(sport);
+ frames = diff / (8*4);
+ }
+ return frames;
+}
+
+static int bf5xx_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret = 0;
+
+ snd_soc_set_runtime_hwparams(substream, &bf5xx_pcm_hardware);
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ goto out;
+
+ if (sport_handle != NULL)
+ runtime->private_data = sport_handle;
+ else {
+ pr_err("sport_handle is NULL\n");
+ ret = -ENODEV;
+ }
+out:
+ return ret;
+}
+
+static int bf5xx_pcm_copy(struct snd_pcm_substream *substream, int channel,
+ snd_pcm_uframes_t pos, void *buf, snd_pcm_uframes_t count)
+{
+ unsigned int *src;
+ unsigned int *dst;
+ int i;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ src = buf;
+ dst = (unsigned int *)substream->runtime->dma_area;
+
+ dst += pos * 8;
+ while (count--) {
+ for (i = 0; i < substream->runtime->channels; i++)
+ *(dst + i) = *src++;
+ dst += 8;
+ }
+ } else {
+ src = (unsigned int *)substream->runtime->dma_area;
+ dst = buf;
+
+ src += pos * 8;
+ while (count--) {
+ for (i = 0; i < substream->runtime->channels; i++)
+ *dst++ = *(src+i);
+ src += 8;
+ }
+ }
+
+ return 0;
+}
+
+static int bf5xx_pcm_silence(struct snd_pcm_substream *substream,
+ int channel, snd_pcm_uframes_t pos, snd_pcm_uframes_t count)
+{
+ unsigned char *buf = substream->runtime->dma_area;
+ buf += pos * 8 * 4;
+ memset(buf, '\0', count * 8 * 4);
+
+ return 0;
+}
+
+
+struct snd_pcm_ops bf5xx_pcm_tdm_ops = {
+ .open = bf5xx_pcm_open,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = bf5xx_pcm_hw_params,
+ .hw_free = bf5xx_pcm_hw_free,
+ .prepare = bf5xx_pcm_prepare,
+ .trigger = bf5xx_pcm_trigger,
+ .pointer = bf5xx_pcm_pointer,
+ .copy = bf5xx_pcm_copy,
+ .silence = bf5xx_pcm_silence,
+};
+
+static int bf5xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
+{
+ struct snd_pcm_substream *substream = pcm->streams[stream].substream;
+ struct snd_dma_buffer *buf = &substream->dma_buffer;
+ size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
+
+ buf->dev.type = SNDRV_DMA_TYPE_DEV;
+ buf->dev.dev = pcm->card->dev;
+ buf->private_data = NULL;
+ buf->area = dma_alloc_coherent(pcm->card->dev, size * 4,
+ &buf->addr, GFP_KERNEL);
+ if (!buf->area) {
+ pr_err("Failed to allocate dma memory \
+ Please increase uncached DMA memory region\n");
+ return -ENOMEM;
+ }
+ buf->bytes = size;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ sport_handle->tx_buf = buf->area;
+ else
+ sport_handle->rx_buf = buf->area;
+
+ return 0;
+}
+
+static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_dma_buffer *buf;
+ int stream;
+
+ for (stream = 0; stream < 2; stream++) {
+ substream = pcm->streams[stream].substream;
+ if (!substream)
+ continue;
+
+ buf = &substream->dma_buffer;
+ if (!buf->area)
+ continue;
+ dma_free_coherent(NULL, buf->bytes, buf->area, 0);
+ buf->area = NULL;
+ }
+ if (sport_handle)
+ sport_done(sport_handle);
+}
+
+static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
+
+static int bf5xx_pcm_tdm_new(struct snd_card *card, struct snd_soc_dai *dai,
+ struct snd_pcm *pcm)
+{
+ int ret = 0;
+
+ if (!card->dev->dma_mask)
+ card->dev->dma_mask = &bf5xx_pcm_dmamask;
+ if (!card->dev->coherent_dma_mask)
+ card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+ if (dai->playback.channels_min) {
+ ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ if (ret)
+ goto out;
+ }
+
+ if (dai->capture.channels_min) {
+ ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_CAPTURE);
+ if (ret)
+ goto out;
+ }
+out:
+ return ret;
+}
+
+struct snd_soc_platform bf5xx_tdm_soc_platform = {
+ .name = "bf5xx-audio",
+ .pcm_ops = &bf5xx_pcm_tdm_ops,
+ .pcm_new = bf5xx_pcm_tdm_new,
+ .pcm_free = bf5xx_pcm_free_dma_buffers,
+};
+EXPORT_SYMBOL_GPL(bf5xx_tdm_soc_platform);
+
+static int __init bfin_pcm_tdm_init(void)
+{
+ return snd_soc_register_platform(&bf5xx_tdm_soc_platform);
+}
+module_init(bfin_pcm_tdm_init);
+
+static void __exit bfin_pcm_tdm_exit(void)
+{
+ snd_soc_unregister_platform(&bf5xx_tdm_soc_platform);
+}
+module_exit(bfin_pcm_tdm_exit);
+
+MODULE_AUTHOR("Barry Song");
+MODULE_DESCRIPTION("ADI Blackfin TDM PCM DMA module");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/blackfin/bf5xx-tdm-pcm.h b/sound/soc/blackfin/bf5xx-tdm-pcm.h
new file mode 100644
index 000000000000..ddc5047df88c
--- /dev/null
+++ b/sound/soc/blackfin/bf5xx-tdm-pcm.h
@@ -0,0 +1,21 @@
+/*
+ * sound/soc/blackfin/bf5xx-tdm-pcm.h -- ALSA PCM interface for the Blackfin
+ *
+ * Copyright 2009 Analog Device Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _BF5XX_TDM_PCM_H
+#define _BF5XX_TDM_PCM_H
+
+struct bf5xx_pcm_dma_params {
+ char *name; /* stream identifier */
+};
+
+/* platform data */
+extern struct snd_soc_platform bf5xx_tdm_soc_platform;
+
+#endif
diff --git a/sound/soc/blackfin/bf5xx-tdm.c b/sound/soc/blackfin/bf5xx-tdm.c
new file mode 100644
index 000000000000..3096badf09a5
--- /dev/null
+++ b/sound/soc/blackfin/bf5xx-tdm.c
@@ -0,0 +1,343 @@
+/*
+ * File: sound/soc/blackfin/bf5xx-tdm.c
+ * Author: Barry Song <Barry.Song@analog.com>
+ *
+ * Created: Thurs June 04 2009
+ * Description: Blackfin I2S(TDM) CPU DAI driver
+ * Even though TDM mode can be as part of I2S DAI, but there
+ * are so much difference in configuration and data flow,
+ * it's very ugly to integrate I2S and TDM into a module
+ *
+ * Modified:
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+#include <asm/irq.h>
+#include <asm/portmux.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+
+#include "bf5xx-sport.h"
+#include "bf5xx-tdm.h"
+
+struct bf5xx_tdm_port {
+ u16 tcr1;
+ u16 rcr1;
+ u16 tcr2;
+ u16 rcr2;
+ int configured;
+};
+
+static struct bf5xx_tdm_port bf5xx_tdm;
+static int sport_num = CONFIG_SND_BF5XX_SPORT_NUM;
+
+static struct sport_param sport_params[2] = {
+ {
+ .dma_rx_chan = CH_SPORT0_RX,
+ .dma_tx_chan = CH_SPORT0_TX,
+ .err_irq = IRQ_SPORT0_ERROR,
+ .regs = (struct sport_register *)SPORT0_TCR1,
+ },
+ {
+ .dma_rx_chan = CH_SPORT1_RX,
+ .dma_tx_chan = CH_SPORT1_TX,
+ .err_irq = IRQ_SPORT1_ERROR,
+ .regs = (struct sport_register *)SPORT1_TCR1,
+ }
+};
+
+/*
+ * Setting the TFS pin selector for SPORT 0 based on whether the selected
+ * port id F or G. If the port is F then no conflict should exist for the
+ * TFS. When Port G is selected and EMAC then there is a conflict between
+ * the PHY interrupt line and TFS. Current settings prevent the conflict
+ * by ignoring the TFS pin when Port G is selected. This allows both
+ * ssm2602 using Port G and EMAC concurrently.
+ */
+#ifdef CONFIG_BF527_SPORT0_PORTF
+#define LOCAL_SPORT0_TFS (P_SPORT0_TFS)
+#else
+#define LOCAL_SPORT0_TFS (0)
+#endif
+
+static u16 sport_req[][7] = { {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
+ P_SPORT0_DRPRI, P_SPORT0_RSCLK, LOCAL_SPORT0_TFS, 0},
+ {P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI,
+ P_SPORT1_RSCLK, P_SPORT1_TFS, 0} };
+
+static int bf5xx_tdm_set_dai_fmt(struct snd_soc_dai *cpu_dai,
+ unsigned int fmt)
+{
+ int ret = 0;
+
+ /* interface format:support TDM,slave mode */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ break;
+ default:
+ printk(KERN_ERR "%s: Unknown DAI format type\n", __func__);
+ ret = -EINVAL;
+ break;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_CBS_CFM:
+ ret = -EINVAL;
+ break;
+ default:
+ printk(KERN_ERR "%s: Unknown DAI master type\n", __func__);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int bf5xx_tdm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ int ret = 0;
+
+ bf5xx_tdm.tcr2 &= ~0x1f;
+ bf5xx_tdm.rcr2 &= ~0x1f;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S32_LE:
+ bf5xx_tdm.tcr2 |= 31;
+ bf5xx_tdm.rcr2 |= 31;
+ sport_handle->wdsize = 4;
+ break;
+ /* at present, we only support 32bit transfer */
+ default:
+ pr_err("not supported PCM format yet\n");
+ return -EINVAL;
+ break;
+ }
+
+ if (!bf5xx_tdm.configured) {
+ /*
+ * TX and RX are not independent,they are enabled at the
+ * same time, even if only one side is running. So, we
+ * need to configure both of them at the time when the first
+ * stream is opened.
+ *
+ * CPU DAI:slave mode.
+ */
+ ret = sport_config_rx(sport_handle, bf5xx_tdm.rcr1,
+ bf5xx_tdm.rcr2, 0, 0);
+ if (ret) {
+ pr_err("SPORT is busy!\n");
+ return -EBUSY;
+ }
+
+ ret = sport_config_tx(sport_handle, bf5xx_tdm.tcr1,
+ bf5xx_tdm.tcr2, 0, 0);
+ if (ret) {
+ pr_err("SPORT is busy!\n");
+ return -EBUSY;
+ }
+
+ bf5xx_tdm.configured = 1;
+ }
+
+ return 0;
+}
+
+static void bf5xx_tdm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ /* No active stream, SPORT is allowed to be configured again. */
+ if (!dai->active)
+ bf5xx_tdm.configured = 0;
+}
+
+#ifdef CONFIG_PM
+static int bf5xx_tdm_suspend(struct snd_soc_dai *dai)
+{
+ struct sport_device *sport =
+ (struct sport_device *)dai->private_data;
+
+ if (!dai->active)
+ return 0;
+ if (dai->capture.active)
+ sport_rx_stop(sport);
+ if (dai->playback.active)
+ sport_tx_stop(sport);
+ return 0;
+}
+
+static int bf5xx_tdm_resume(struct snd_soc_dai *dai)
+{
+ int ret;
+ struct sport_device *sport =
+ (struct sport_device *)dai->private_data;
+
+ if (!dai->active)
+ return 0;
+
+ ret = sport_set_multichannel(sport, 8, 0xFF, 1);
+ if (ret) {
+ pr_err("SPORT is busy!\n");
+ ret = -EBUSY;
+ }
+
+ ret = sport_config_rx(sport, IRFS, 0x1F, 0, 0);
+ if (ret) {
+ pr_err("SPORT is busy!\n");
+ ret = -EBUSY;
+ }
+
+ ret = sport_config_tx(sport, ITFS, 0x1F, 0, 0);
+ if (ret) {
+ pr_err("SPORT is busy!\n");
+ ret = -EBUSY;
+ }
+
+ return 0;
+}
+
+#else
+#define bf5xx_tdm_suspend NULL
+#define bf5xx_tdm_resume NULL
+#endif
+
+static struct snd_soc_dai_ops bf5xx_tdm_dai_ops = {
+ .hw_params = bf5xx_tdm_hw_params,
+ .set_fmt = bf5xx_tdm_set_dai_fmt,
+ .shutdown = bf5xx_tdm_shutdown,
+};
+
+struct snd_soc_dai bf5xx_tdm_dai = {
+ .name = "bf5xx-tdm",
+ .id = 0,
+ .suspend = bf5xx_tdm_suspend,
+ .resume = bf5xx_tdm_resume,
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,},
+ .capture = {
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,},
+ .ops = &bf5xx_tdm_dai_ops,
+};
+EXPORT_SYMBOL_GPL(bf5xx_tdm_dai);
+
+static int __devinit bfin_tdm_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ if (peripheral_request_list(&sport_req[sport_num][0], "soc-audio")) {
+ pr_err("Requesting Peripherals failed\n");
+ return -EFAULT;
+ }
+
+ /* request DMA for SPORT */
+ sport_handle = sport_init(&sport_params[sport_num], 4, \
+ 8 * sizeof(u32), NULL);
+ if (!sport_handle) {
+ peripheral_free_list(&sport_req[sport_num][0]);
+ return -ENODEV;
+ }
+
+ /* SPORT works in TDM mode */
+ ret = sport_set_multichannel(sport_handle, 8, 0xFF, 1);
+ if (ret) {
+ pr_err("SPORT is busy!\n");
+ ret = -EBUSY;
+ goto sport_config_err;
+ }
+
+ ret = sport_config_rx(sport_handle, IRFS, 0x1F, 0, 0);
+ if (ret) {
+ pr_err("SPORT is busy!\n");
+ ret = -EBUSY;
+ goto sport_config_err;
+ }
+
+ ret = sport_config_tx(sport_handle, ITFS, 0x1F, 0, 0);
+ if (ret) {
+ pr_err("SPORT is busy!\n");
+ ret = -EBUSY;
+ goto sport_config_err;
+ }
+
+ ret = snd_soc_register_dai(&bf5xx_tdm_dai);
+ if (ret) {
+ pr_err("Failed to register DAI: %d\n", ret);
+ goto sport_config_err;
+ }
+ return 0;
+
+sport_config_err:
+ peripheral_free_list(&sport_req[sport_num][0]);
+ return ret;
+}
+
+static int __devexit bfin_tdm_remove(struct platform_device *pdev)
+{
+ peripheral_free_list(&sport_req[sport_num][0]);
+ snd_soc_unregister_dai(&bf5xx_tdm_dai);
+
+ return 0;
+}
+
+static struct platform_driver bfin_tdm_driver = {
+ .probe = bfin_tdm_probe,
+ .remove = __devexit_p(bfin_tdm_remove),
+ .driver = {
+ .name = "bfin-tdm",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init bfin_tdm_init(void)
+{
+ return platform_driver_register(&bfin_tdm_driver);
+}
+module_init(bfin_tdm_init);
+
+static void __exit bfin_tdm_exit(void)
+{
+ platform_driver_unregister(&bfin_tdm_driver);
+}
+module_exit(bfin_tdm_exit);
+
+/* Module information */
+MODULE_AUTHOR("Barry Song");
+MODULE_DESCRIPTION("TDM driver for ADI Blackfin");
+MODULE_LICENSE("GPL");
+
diff --git a/sound/soc/blackfin/bf5xx-tdm.h b/sound/soc/blackfin/bf5xx-tdm.h
new file mode 100644
index 000000000000..618ec3d90cd4
--- /dev/null
+++ b/sound/soc/blackfin/bf5xx-tdm.h
@@ -0,0 +1,14 @@
+/*
+ * sound/soc/blackfin/bf5xx-tdm.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _BF5XX_TDM_H
+#define _BF5XX_TDM_H
+
+extern struct snd_soc_dai bf5xx_tdm_dai;
+
+#endif
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index bbc97fd76648..0edca93af3b0 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -12,11 +12,15 @@ config SND_SOC_ALL_CODECS
tristate "Build all ASoC CODEC drivers"
select SND_SOC_L3
select SND_SOC_AC97_CODEC if SND_SOC_AC97_BUS
+ select SND_SOC_AD1836 if SPI_MASTER
+ select SND_SOC_AD1938 if SPI_MASTER
select SND_SOC_AD1980 if SND_SOC_AC97_BUS
select SND_SOC_AD73311 if I2C
select SND_SOC_AK4104 if SPI_MASTER
select SND_SOC_AK4535 if I2C
+ select SND_SOC_AK4642 if I2C
select SND_SOC_CS4270 if I2C
+ select SND_SOC_MAX9877 if I2C
select SND_SOC_PCM3008
select SND_SOC_SPDIF
select SND_SOC_SSM2602 if I2C
@@ -30,18 +34,23 @@ config SND_SOC_ALL_CODECS
select SND_SOC_WM8350 if MFD_WM8350
select SND_SOC_WM8400 if MFD_WM8400
select SND_SOC_WM8510 if SND_SOC_I2C_AND_SPI
+ select SND_SOC_WM8523 if I2C
select SND_SOC_WM8580 if I2C
select SND_SOC_WM8728 if SND_SOC_I2C_AND_SPI
select SND_SOC_WM8731 if SND_SOC_I2C_AND_SPI
select SND_SOC_WM8750 if SND_SOC_I2C_AND_SPI
select SND_SOC_WM8753 if SND_SOC_I2C_AND_SPI
+ select SND_SOC_WM8776 if SND_SOC_I2C_AND_SPI
select SND_SOC_WM8900 if I2C
select SND_SOC_WM8903 if I2C
select SND_SOC_WM8940 if I2C
select SND_SOC_WM8960 if I2C
+ select SND_SOC_WM8961 if I2C
select SND_SOC_WM8971 if I2C
+ select SND_SOC_WM8974 if I2C
select SND_SOC_WM8988 if SND_SOC_I2C_AND_SPI
select SND_SOC_WM8990 if I2C
+ select SND_SOC_WM8993 if I2C
select SND_SOC_WM9081 if I2C
select SND_SOC_WM9705 if SND_SOC_AC97_BUS
select SND_SOC_WM9712 if SND_SOC_AC97_BUS
@@ -57,11 +66,21 @@ config SND_SOC_ALL_CODECS
If unsure select "N".
+config SND_SOC_WM_HUBS
+ tristate
+ default y if SND_SOC_WM8993=y
+ default m if SND_SOC_WM8993=m
config SND_SOC_AC97_CODEC
tristate
select SND_AC97_CODEC
+config SND_SOC_AD1836
+ tristate
+
+config SND_SOC_AD1938
+ tristate
+
config SND_SOC_AD1980
tristate
@@ -74,6 +93,9 @@ config SND_SOC_AK4104
config SND_SOC_AK4535
tristate
+config SND_SOC_AK4642
+ tristate
+
# Cirrus Logic CS4270 Codec
config SND_SOC_CS4270
tristate
@@ -86,6 +108,9 @@ config SND_SOC_CS4270_VD33_ERRATA
bool
depends on SND_SOC_CS4270
+config SND_SOC_CX20442
+ tristate
+
config SND_SOC_L3
tristate
@@ -129,6 +154,9 @@ config SND_SOC_WM8400
config SND_SOC_WM8510
tristate
+config SND_SOC_WM8523
+ tristate
+
config SND_SOC_WM8580
tristate
@@ -144,6 +172,9 @@ config SND_SOC_WM8750
config SND_SOC_WM8753
tristate
+config SND_SOC_WM8776
+ tristate
+
config SND_SOC_WM8900
tristate
@@ -156,15 +187,24 @@ config SND_SOC_WM8940
config SND_SOC_WM8960
tristate
+config SND_SOC_WM8961
+ tristate
+
config SND_SOC_WM8971
tristate
+config SND_SOC_WM8974
+ tristate
+
config SND_SOC_WM8988
tristate
config SND_SOC_WM8990
tristate
+config SND_SOC_WM8993
+ tristate
+
config SND_SOC_WM9081
tristate
@@ -176,3 +216,7 @@ config SND_SOC_WM9712
config SND_SOC_WM9713
tristate
+
+# Amp
+config SND_SOC_MAX9877
+ tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 8b7530546f4d..fb4af28486ba 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -1,9 +1,13 @@
snd-soc-ac97-objs := ac97.o
+snd-soc-ad1836-objs := ad1836.o
+snd-soc-ad1938-objs := ad1938.o
snd-soc-ad1980-objs := ad1980.o
snd-soc-ad73311-objs := ad73311.o
snd-soc-ak4104-objs := ak4104.o
snd-soc-ak4535-objs := ak4535.o
+snd-soc-ak4642-objs := ak4642.o
snd-soc-cs4270-objs := cs4270.o
+snd-soc-cx20442-objs := cx20442.o
snd-soc-l3-objs := l3.o
snd-soc-pcm3008-objs := pcm3008.o
snd-soc-spdif-objs := spdif_transciever.o
@@ -18,29 +22,42 @@ snd-soc-uda1380-objs := uda1380.o
snd-soc-wm8350-objs := wm8350.o
snd-soc-wm8400-objs := wm8400.o
snd-soc-wm8510-objs := wm8510.o
+snd-soc-wm8523-objs := wm8523.o
snd-soc-wm8580-objs := wm8580.o
snd-soc-wm8728-objs := wm8728.o
snd-soc-wm8731-objs := wm8731.o
snd-soc-wm8750-objs := wm8750.o
snd-soc-wm8753-objs := wm8753.o
+snd-soc-wm8776-objs := wm8776.o
snd-soc-wm8900-objs := wm8900.o
snd-soc-wm8903-objs := wm8903.o
snd-soc-wm8940-objs := wm8940.o
snd-soc-wm8960-objs := wm8960.o
+snd-soc-wm8961-objs := wm8961.o
snd-soc-wm8971-objs := wm8971.o
+snd-soc-wm8974-objs := wm8974.o
snd-soc-wm8988-objs := wm8988.o
snd-soc-wm8990-objs := wm8990.o
+snd-soc-wm8993-objs := wm8993.o
snd-soc-wm9081-objs := wm9081.o
snd-soc-wm9705-objs := wm9705.o
snd-soc-wm9712-objs := wm9712.o
snd-soc-wm9713-objs := wm9713.o
+snd-soc-wm-hubs-objs := wm_hubs.o
+
+# Amp
+snd-soc-max9877-objs := max9877.o
obj-$(CONFIG_SND_SOC_AC97_CODEC) += snd-soc-ac97.o
+obj-$(CONFIG_SND_SOC_AD1836) += snd-soc-ad1836.o
+obj-$(CONFIG_SND_SOC_AD1938) += snd-soc-ad1938.o
obj-$(CONFIG_SND_SOC_AD1980) += snd-soc-ad1980.o
obj-$(CONFIG_SND_SOC_AD73311) += snd-soc-ad73311.o
obj-$(CONFIG_SND_SOC_AK4104) += snd-soc-ak4104.o
obj-$(CONFIG_SND_SOC_AK4535) += snd-soc-ak4535.o
+obj-$(CONFIG_SND_SOC_AK4642) += snd-soc-ak4642.o
obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o
+obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o
obj-$(CONFIG_SND_SOC_L3) += snd-soc-l3.o
obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o
obj-$(CONFIG_SND_SOC_SPDIF) += snd-soc-spdif.o
@@ -55,19 +72,28 @@ obj-$(CONFIG_SND_SOC_UDA1380) += snd-soc-uda1380.o
obj-$(CONFIG_SND_SOC_WM8350) += snd-soc-wm8350.o
obj-$(CONFIG_SND_SOC_WM8400) += snd-soc-wm8400.o
obj-$(CONFIG_SND_SOC_WM8510) += snd-soc-wm8510.o
+obj-$(CONFIG_SND_SOC_WM8523) += snd-soc-wm8523.o
obj-$(CONFIG_SND_SOC_WM8580) += snd-soc-wm8580.o
obj-$(CONFIG_SND_SOC_WM8728) += snd-soc-wm8728.o
obj-$(CONFIG_SND_SOC_WM8731) += snd-soc-wm8731.o
obj-$(CONFIG_SND_SOC_WM8750) += snd-soc-wm8750.o
obj-$(CONFIG_SND_SOC_WM8753) += snd-soc-wm8753.o
+obj-$(CONFIG_SND_SOC_WM8776) += snd-soc-wm8776.o
obj-$(CONFIG_SND_SOC_WM8900) += snd-soc-wm8900.o
obj-$(CONFIG_SND_SOC_WM8903) += snd-soc-wm8903.o
obj-$(CONFIG_SND_SOC_WM8971) += snd-soc-wm8971.o
+obj-$(CONFIG_SND_SOC_WM8974) += snd-soc-wm8974.o
obj-$(CONFIG_SND_SOC_WM8940) += snd-soc-wm8940.o
obj-$(CONFIG_SND_SOC_WM8960) += snd-soc-wm8960.o
+obj-$(CONFIG_SND_SOC_WM8961) += snd-soc-wm8961.o
obj-$(CONFIG_SND_SOC_WM8988) += snd-soc-wm8988.o
obj-$(CONFIG_SND_SOC_WM8990) += snd-soc-wm8990.o
+obj-$(CONFIG_SND_SOC_WM8993) += snd-soc-wm8993.o
obj-$(CONFIG_SND_SOC_WM9081) += snd-soc-wm9081.o
obj-$(CONFIG_SND_SOC_WM9705) += snd-soc-wm9705.o
obj-$(CONFIG_SND_SOC_WM9712) += snd-soc-wm9712.o
obj-$(CONFIG_SND_SOC_WM9713) += snd-soc-wm9713.o
+obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o
+
+# Amp
+obj-$(CONFIG_SND_SOC_MAX9877) += snd-soc-max9877.o
diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c
new file mode 100644
index 000000000000..3612bb92df90
--- /dev/null
+++ b/sound/soc/codecs/ad1836.c
@@ -0,0 +1,446 @@
+/*
+ * File: sound/soc/codecs/ad1836.c
+ * Author: Barry Song <Barry.Song@analog.com>
+ *
+ * Created: Aug 04 2009
+ * Description: Driver for AD1836 sound chip
+ *
+ * Modified:
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include <sound/soc-dapm.h>
+#include <linux/spi/spi.h>
+#include "ad1836.h"
+
+/* codec private data */
+struct ad1836_priv {
+ struct snd_soc_codec codec;
+ u16 reg_cache[AD1836_NUM_REGS];
+};
+
+static struct snd_soc_codec *ad1836_codec;
+struct snd_soc_codec_device soc_codec_dev_ad1836;
+static int ad1836_register(struct ad1836_priv *ad1836);
+static void ad1836_unregister(struct ad1836_priv *ad1836);
+
+/*
+ * AD1836 volume/mute/de-emphasis etc. controls
+ */
+static const char *ad1836_deemp[] = {"None", "44.1kHz", "32kHz", "48kHz"};
+
+static const struct soc_enum ad1836_deemp_enum =
+ SOC_ENUM_SINGLE(AD1836_DAC_CTRL1, 8, 4, ad1836_deemp);
+
+static const struct snd_kcontrol_new ad1836_snd_controls[] = {
+ /* DAC volume control */
+ SOC_DOUBLE_R("DAC1 Volume", AD1836_DAC_L1_VOL,
+ AD1836_DAC_R1_VOL, 0, 0x3FF, 0),
+ SOC_DOUBLE_R("DAC2 Volume", AD1836_DAC_L2_VOL,
+ AD1836_DAC_R2_VOL, 0, 0x3FF, 0),
+ SOC_DOUBLE_R("DAC3 Volume", AD1836_DAC_L3_VOL,
+ AD1836_DAC_R3_VOL, 0, 0x3FF, 0),
+
+ /* ADC switch control */
+ SOC_DOUBLE("ADC1 Switch", AD1836_ADC_CTRL2, AD1836_ADCL1_MUTE,
+ AD1836_ADCR1_MUTE, 1, 1),
+ SOC_DOUBLE("ADC2 Switch", AD1836_ADC_CTRL2, AD1836_ADCL2_MUTE,
+ AD1836_ADCR2_MUTE, 1, 1),
+
+ /* DAC switch control */
+ SOC_DOUBLE("DAC1 Switch", AD1836_DAC_CTRL2, AD1836_DACL1_MUTE,
+ AD1836_DACR1_MUTE, 1, 1),
+ SOC_DOUBLE("DAC2 Switch", AD1836_DAC_CTRL2, AD1836_DACL2_MUTE,
+ AD1836_DACR2_MUTE, 1, 1),
+ SOC_DOUBLE("DAC3 Switch", AD1836_DAC_CTRL2, AD1836_DACL3_MUTE,
+ AD1836_DACR3_MUTE, 1, 1),
+
+ /* ADC high-pass filter */
+ SOC_SINGLE("ADC High Pass Filter Switch", AD1836_ADC_CTRL1,
+ AD1836_ADC_HIGHPASS_FILTER, 1, 0),
+
+ /* DAC de-emphasis */
+ SOC_ENUM("Playback Deemphasis", ad1836_deemp_enum),
+};
+
+static const struct snd_soc_dapm_widget ad1836_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("DAC", "Playback", AD1836_DAC_CTRL1,
+ AD1836_DAC_POWERDOWN, 1),
+ SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_SUPPLY("ADC_PWR", AD1836_ADC_CTRL1,
+ AD1836_ADC_POWERDOWN, 1, NULL, 0),
+ SND_SOC_DAPM_OUTPUT("DAC1OUT"),
+ SND_SOC_DAPM_OUTPUT("DAC2OUT"),
+ SND_SOC_DAPM_OUTPUT("DAC3OUT"),
+ SND_SOC_DAPM_INPUT("ADC1IN"),
+ SND_SOC_DAPM_INPUT("ADC2IN"),
+};
+
+static const struct snd_soc_dapm_route audio_paths[] = {
+ { "DAC", NULL, "ADC_PWR" },
+ { "ADC", NULL, "ADC_PWR" },
+ { "DAC1OUT", "DAC1 Switch", "DAC" },
+ { "DAC2OUT", "DAC2 Switch", "DAC" },
+ { "DAC3OUT", "DAC3 Switch", "DAC" },
+ { "ADC", "ADC1 Switch", "ADC1IN" },
+ { "ADC", "ADC2 Switch", "ADC2IN" },
+};
+
+/*
+ * DAI ops entries
+ */
+
+static int ad1836_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ /* at present, we support adc aux mode to interface with
+ * blackfin sport tdm mode
+ */
+ case SND_SOC_DAIFMT_DSP_A:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_IB_IF:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ /* ALCLK,ABCLK are both output, AD1836 can only be master */
+ case SND_SOC_DAIFMT_CBM_CFM:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ad1836_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ int word_len = 0;
+
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_device *socdev = rtd->socdev;
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ /* bit size */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ word_len = 3;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ word_len = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ case SNDRV_PCM_FORMAT_S32_LE:
+ word_len = 0;
+ break;
+ }
+
+ snd_soc_update_bits(codec, AD1836_DAC_CTRL1,
+ AD1836_DAC_WORD_LEN_MASK, word_len);
+
+ snd_soc_update_bits(codec, AD1836_ADC_CTRL2,
+ AD1836_ADC_WORD_LEN_MASK, word_len);
+
+ return 0;
+}
+
+
+/*
+ * interface to read/write ad1836 register
+ */
+#define AD1836_SPI_REG_SHFT 12
+#define AD1836_SPI_READ (1 << 11)
+#define AD1836_SPI_VAL_MSK 0x3FF
+
+/*
+ * write to the ad1836 register space
+ */
+
+static int ad1836_write_reg(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ u16 *reg_cache = codec->reg_cache;
+ int ret = 0;
+
+ if (value != reg_cache[reg]) {
+ unsigned short buf;
+ struct spi_transfer t = {
+ .tx_buf = &buf,
+ .len = 2,
+ };
+ struct spi_message m;
+
+ buf = (reg << AD1836_SPI_REG_SHFT) |
+ (value & AD1836_SPI_VAL_MSK);
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ ret = spi_sync(codec->control_data, &m);
+ if (ret == 0)
+ reg_cache[reg] = value;
+ }
+
+ return ret;
+}
+
+/*
+ * read from the ad1836 register space cache
+ */
+static unsigned int ad1836_read_reg_cache(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u16 *reg_cache = codec->reg_cache;
+
+ if (reg >= codec->reg_cache_size)
+ return -EINVAL;
+
+ return reg_cache[reg];
+}
+
+static int __devinit ad1836_spi_probe(struct spi_device *spi)
+{
+ struct snd_soc_codec *codec;
+ struct ad1836_priv *ad1836;
+
+ ad1836 = kzalloc(sizeof(struct ad1836_priv), GFP_KERNEL);
+ if (ad1836 == NULL)
+ return -ENOMEM;
+
+ codec = &ad1836->codec;
+ codec->control_data = spi;
+ codec->dev = &spi->dev;
+
+ dev_set_drvdata(&spi->dev, ad1836);
+
+ return ad1836_register(ad1836);
+}
+
+static int __devexit ad1836_spi_remove(struct spi_device *spi)
+{
+ struct ad1836_priv *ad1836 = dev_get_drvdata(&spi->dev);
+
+ ad1836_unregister(ad1836);
+ return 0;
+}
+
+static struct spi_driver ad1836_spi_driver = {
+ .driver = {
+ .name = "ad1836-spi",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad1836_spi_probe,
+ .remove = __devexit_p(ad1836_spi_remove),
+};
+
+static struct snd_soc_dai_ops ad1836_dai_ops = {
+ .hw_params = ad1836_hw_params,
+ .set_fmt = ad1836_set_dai_fmt,
+};
+
+/* codec DAI instance */
+struct snd_soc_dai ad1836_dai = {
+ .name = "AD1836",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 6,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 4,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &ad1836_dai_ops,
+};
+EXPORT_SYMBOL_GPL(ad1836_dai);
+
+static int ad1836_register(struct ad1836_priv *ad1836)
+{
+ int ret;
+ struct snd_soc_codec *codec = &ad1836->codec;
+
+ if (ad1836_codec) {
+ dev_err(codec->dev, "Another ad1836 is registered\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+ codec->private_data = ad1836;
+ codec->reg_cache = ad1836->reg_cache;
+ codec->reg_cache_size = AD1836_NUM_REGS;
+ codec->name = "AD1836";
+ codec->owner = THIS_MODULE;
+ codec->dai = &ad1836_dai;
+ codec->num_dai = 1;
+ codec->write = ad1836_write_reg;
+ codec->read = ad1836_read_reg_cache;
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ ad1836_dai.dev = codec->dev;
+ ad1836_codec = codec;
+
+ /* default setting for ad1836 */
+ /* de-emphasis: 48kHz, power-on dac */
+ codec->write(codec, AD1836_DAC_CTRL1, 0x300);
+ /* unmute dac channels */
+ codec->write(codec, AD1836_DAC_CTRL2, 0x0);
+ /* high-pass filter enable, power-on adc */
+ codec->write(codec, AD1836_ADC_CTRL1, 0x100);
+ /* unmute adc channles, adc aux mode */
+ codec->write(codec, AD1836_ADC_CTRL2, 0x180);
+ /* left/right diff:PGA/MUX */
+ codec->write(codec, AD1836_ADC_CTRL3, 0x3A);
+ /* volume */
+ codec->write(codec, AD1836_DAC_L1_VOL, 0x3FF);
+ codec->write(codec, AD1836_DAC_R1_VOL, 0x3FF);
+ codec->write(codec, AD1836_DAC_L2_VOL, 0x3FF);
+ codec->write(codec, AD1836_DAC_R2_VOL, 0x3FF);
+ codec->write(codec, AD1836_DAC_L3_VOL, 0x3FF);
+ codec->write(codec, AD1836_DAC_R3_VOL, 0x3FF);
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ kfree(ad1836);
+ return ret;
+ }
+
+ ret = snd_soc_register_dai(&ad1836_dai);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
+ snd_soc_unregister_codec(codec);
+ kfree(ad1836);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ad1836_unregister(struct ad1836_priv *ad1836)
+{
+ snd_soc_unregister_dai(&ad1836_dai);
+ snd_soc_unregister_codec(&ad1836->codec);
+ kfree(ad1836);
+ ad1836_codec = NULL;
+}
+
+static int ad1836_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret = 0;
+
+ if (ad1836_codec == NULL) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+
+ socdev->card->codec = ad1836_codec;
+ codec = ad1836_codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to create pcms: %d\n", ret);
+ goto pcm_err;
+ }
+
+ snd_soc_add_controls(codec, ad1836_snd_controls,
+ ARRAY_SIZE(ad1836_snd_controls));
+ snd_soc_dapm_new_controls(codec, ad1836_dapm_widgets,
+ ARRAY_SIZE(ad1836_dapm_widgets));
+ snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+ snd_soc_dapm_new_widgets(codec);
+
+ ret = snd_soc_init_card(socdev);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to register card: %d\n", ret);
+ goto card_err;
+ }
+
+ return ret;
+
+card_err:
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+pcm_err:
+ return ret;
+}
+
+/* power down chip */
+static int ad1836_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+struct snd_soc_codec_device soc_codec_dev_ad1836 = {
+ .probe = ad1836_probe,
+ .remove = ad1836_remove,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_ad1836);
+
+static int __init ad1836_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&ad1836_spi_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to register ad1836 SPI driver: %d\n",
+ ret);
+ }
+
+ return ret;
+}
+module_init(ad1836_init);
+
+static void __exit ad1836_exit(void)
+{
+ spi_unregister_driver(&ad1836_spi_driver);
+}
+module_exit(ad1836_exit);
+
+MODULE_DESCRIPTION("ASoC ad1836 driver");
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/ad1836.h b/sound/soc/codecs/ad1836.h
new file mode 100644
index 000000000000..7660ee6973c0
--- /dev/null
+++ b/sound/soc/codecs/ad1836.h
@@ -0,0 +1,64 @@
+/*
+ * File: sound/soc/codecs/ad1836.h
+ * Based on:
+ * Author: Barry Song <Barry.Song@analog.com>
+ *
+ * Created: Aug 04, 2009
+ * Description: definitions for AD1836 registers
+ *
+ * Modified:
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __AD1836_H__
+#define __AD1836_H__
+
+#define AD1836_DAC_CTRL1 0
+#define AD1836_DAC_POWERDOWN 2
+#define AD1836_DAC_SERFMT_MASK 0xE0
+#define AD1836_DAC_SERFMT_PCK256 (0x4 << 5)
+#define AD1836_DAC_SERFMT_PCK128 (0x5 << 5)
+#define AD1836_DAC_WORD_LEN_MASK 0x18
+
+#define AD1836_DAC_CTRL2 1
+#define AD1836_DACL1_MUTE 0
+#define AD1836_DACR1_MUTE 1
+#define AD1836_DACL2_MUTE 2
+#define AD1836_DACR2_MUTE 3
+#define AD1836_DACL3_MUTE 4
+#define AD1836_DACR3_MUTE 5
+
+#define AD1836_DAC_L1_VOL 2
+#define AD1836_DAC_R1_VOL 3
+#define AD1836_DAC_L2_VOL 4
+#define AD1836_DAC_R2_VOL 5
+#define AD1836_DAC_L3_VOL 6
+#define AD1836_DAC_R3_VOL 7
+
+#define AD1836_ADC_CTRL1 12
+#define AD1836_ADC_POWERDOWN 7
+#define AD1836_ADC_HIGHPASS_FILTER 8
+
+#define AD1836_ADC_CTRL2 13
+#define AD1836_ADCL1_MUTE 0
+#define AD1836_ADCR1_MUTE 1
+#define AD1836_ADCL2_MUTE 2
+#define AD1836_ADCR2_MUTE 3
+#define AD1836_ADC_WORD_LEN_MASK 0x30
+#define AD1836_ADC_SERFMT_MASK (7 << 6)
+#define AD1836_ADC_SERFMT_PCK256 (0x4 << 6)
+#define AD1836_ADC_SERFMT_PCK128 (0x5 << 6)
+
+#define AD1836_ADC_CTRL3 14
+
+#define AD1836_NUM_REGS 16
+
+extern struct snd_soc_dai ad1836_dai;
+extern struct snd_soc_codec_device soc_codec_dev_ad1836;
+#endif
diff --git a/sound/soc/codecs/ad1938.c b/sound/soc/codecs/ad1938.c
new file mode 100644
index 000000000000..e62b27701a49
--- /dev/null
+++ b/sound/soc/codecs/ad1938.c
@@ -0,0 +1,682 @@
+/*
+ * File: sound/soc/codecs/ad1938.c
+ * Author: Barry Song <Barry.Song@analog.com>
+ *
+ * Created: June 04 2009
+ * Description: Driver for AD1938 sound chip
+ *
+ * Modified:
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include <sound/soc-dapm.h>
+#include <linux/spi/spi.h>
+#include "ad1938.h"
+
+/* codec private data */
+struct ad1938_priv {
+ struct snd_soc_codec codec;
+ u8 reg_cache[AD1938_NUM_REGS];
+};
+
+static struct snd_soc_codec *ad1938_codec;
+struct snd_soc_codec_device soc_codec_dev_ad1938;
+static int ad1938_register(struct ad1938_priv *ad1938);
+static void ad1938_unregister(struct ad1938_priv *ad1938);
+
+/*
+ * AD1938 volume/mute/de-emphasis etc. controls
+ */
+static const char *ad1938_deemp[] = {"None", "48kHz", "44.1kHz", "32kHz"};
+
+static const struct soc_enum ad1938_deemp_enum =
+ SOC_ENUM_SINGLE(AD1938_DAC_CTRL2, 1, 4, ad1938_deemp);
+
+static const struct snd_kcontrol_new ad1938_snd_controls[] = {
+ /* DAC volume control */
+ SOC_DOUBLE_R("DAC1 Volume", AD1938_DAC_L1_VOL,
+ AD1938_DAC_R1_VOL, 0, 0xFF, 1),
+ SOC_DOUBLE_R("DAC2 Volume", AD1938_DAC_L2_VOL,
+ AD1938_DAC_R2_VOL, 0, 0xFF, 1),
+ SOC_DOUBLE_R("DAC3 Volume", AD1938_DAC_L3_VOL,
+ AD1938_DAC_R3_VOL, 0, 0xFF, 1),
+ SOC_DOUBLE_R("DAC4 Volume", AD1938_DAC_L4_VOL,
+ AD1938_DAC_R4_VOL, 0, 0xFF, 1),
+
+ /* ADC switch control */
+ SOC_DOUBLE("ADC1 Switch", AD1938_ADC_CTRL0, AD1938_ADCL1_MUTE,
+ AD1938_ADCR1_MUTE, 1, 1),
+ SOC_DOUBLE("ADC2 Switch", AD1938_ADC_CTRL0, AD1938_ADCL2_MUTE,
+ AD1938_ADCR2_MUTE, 1, 1),
+
+ /* DAC switch control */
+ SOC_DOUBLE("DAC1 Switch", AD1938_DAC_CHNL_MUTE, AD1938_DACL1_MUTE,
+ AD1938_DACR1_MUTE, 1, 1),
+ SOC_DOUBLE("DAC2 Switch", AD1938_DAC_CHNL_MUTE, AD1938_DACL2_MUTE,
+ AD1938_DACR2_MUTE, 1, 1),
+ SOC_DOUBLE("DAC3 Switch", AD1938_DAC_CHNL_MUTE, AD1938_DACL3_MUTE,
+ AD1938_DACR3_MUTE, 1, 1),
+ SOC_DOUBLE("DAC4 Switch", AD1938_DAC_CHNL_MUTE, AD1938_DACL4_MUTE,
+ AD1938_DACR4_MUTE, 1, 1),
+
+ /* ADC high-pass filter */
+ SOC_SINGLE("ADC High Pass Filter Switch", AD1938_ADC_CTRL0,
+ AD1938_ADC_HIGHPASS_FILTER, 1, 0),
+
+ /* DAC de-emphasis */
+ SOC_ENUM("Playback Deemphasis", ad1938_deemp_enum),
+};
+
+static const struct snd_soc_dapm_widget ad1938_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("DAC", "Playback", AD1938_DAC_CTRL0, 0, 1),
+ SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_SUPPLY("ADC_PWR", AD1938_ADC_CTRL0, 0, 1, NULL, 0),
+ SND_SOC_DAPM_OUTPUT("DAC1OUT"),
+ SND_SOC_DAPM_OUTPUT("DAC2OUT"),
+ SND_SOC_DAPM_OUTPUT("DAC3OUT"),
+ SND_SOC_DAPM_OUTPUT("DAC4OUT"),
+ SND_SOC_DAPM_INPUT("ADC1IN"),
+ SND_SOC_DAPM_INPUT("ADC2IN"),
+};
+
+static const struct snd_soc_dapm_route audio_paths[] = {
+ { "DAC", NULL, "ADC_PWR" },
+ { "ADC", NULL, "ADC_PWR" },
+ { "DAC1OUT", "DAC1 Switch", "DAC" },
+ { "DAC2OUT", "DAC2 Switch", "DAC" },
+ { "DAC3OUT", "DAC3 Switch", "DAC" },
+ { "DAC4OUT", "DAC4 Switch", "DAC" },
+ { "ADC", "ADC1 Switch", "ADC1IN" },
+ { "ADC", "ADC2 Switch", "ADC2IN" },
+};
+
+/*
+ * DAI ops entries
+ */
+
+static int ad1938_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ int reg;
+
+ reg = codec->read(codec, AD1938_DAC_CTRL2);
+ reg = (mute > 0) ? reg | AD1938_DAC_MASTER_MUTE : reg &
+ (~AD1938_DAC_MASTER_MUTE);
+ codec->write(codec, AD1938_DAC_CTRL2, reg);
+
+ return 0;
+}
+
+static inline int ad1938_pll_powerctrl(struct snd_soc_codec *codec, int cmd)
+{
+ int reg = codec->read(codec, AD1938_PLL_CLK_CTRL0);
+ reg = (cmd > 0) ? reg & (~AD1938_PLL_POWERDOWN) : reg |
+ AD1938_PLL_POWERDOWN;
+ codec->write(codec, AD1938_PLL_CLK_CTRL0, reg);
+
+ return 0;
+}
+
+static int ad1938_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int mask, int slots, int width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ int dac_reg = codec->read(codec, AD1938_DAC_CTRL1);
+ int adc_reg = codec->read(codec, AD1938_ADC_CTRL2);
+
+ dac_reg &= ~AD1938_DAC_CHAN_MASK;
+ adc_reg &= ~AD1938_ADC_CHAN_MASK;
+
+ switch (slots) {
+ case 2:
+ dac_reg |= AD1938_DAC_2_CHANNELS << AD1938_DAC_CHAN_SHFT;
+ adc_reg |= AD1938_ADC_2_CHANNELS << AD1938_ADC_CHAN_SHFT;
+ break;
+ case 4:
+ dac_reg |= AD1938_DAC_4_CHANNELS << AD1938_DAC_CHAN_SHFT;
+ adc_reg |= AD1938_ADC_4_CHANNELS << AD1938_ADC_CHAN_SHFT;
+ break;
+ case 8:
+ dac_reg |= AD1938_DAC_8_CHANNELS << AD1938_DAC_CHAN_SHFT;
+ adc_reg |= AD1938_ADC_8_CHANNELS << AD1938_ADC_CHAN_SHFT;
+ break;
+ case 16:
+ dac_reg |= AD1938_DAC_16_CHANNELS << AD1938_DAC_CHAN_SHFT;
+ adc_reg |= AD1938_ADC_16_CHANNELS << AD1938_ADC_CHAN_SHFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ codec->write(codec, AD1938_DAC_CTRL1, dac_reg);
+ codec->write(codec, AD1938_ADC_CTRL2, adc_reg);
+
+ return 0;
+}
+
+static int ad1938_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ int adc_reg, dac_reg;
+
+ adc_reg = codec->read(codec, AD1938_ADC_CTRL2);
+ dac_reg = codec->read(codec, AD1938_DAC_CTRL1);
+
+ /* At present, the driver only support AUX ADC mode(SND_SOC_DAIFMT_I2S
+ * with TDM) and ADC&DAC TDM mode(SND_SOC_DAIFMT_DSP_A)
+ */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ adc_reg &= ~AD1938_ADC_SERFMT_MASK;
+ adc_reg |= AD1938_ADC_SERFMT_TDM;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ adc_reg &= ~AD1938_ADC_SERFMT_MASK;
+ adc_reg |= AD1938_ADC_SERFMT_AUX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF: /* normal bit clock + frame */
+ adc_reg &= ~AD1938_ADC_LEFT_HIGH;
+ adc_reg &= ~AD1938_ADC_BCLK_INV;
+ dac_reg &= ~AD1938_DAC_LEFT_HIGH;
+ dac_reg &= ~AD1938_DAC_BCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_NB_IF: /* normal bclk + invert frm */
+ adc_reg |= AD1938_ADC_LEFT_HIGH;
+ adc_reg &= ~AD1938_ADC_BCLK_INV;
+ dac_reg |= AD1938_DAC_LEFT_HIGH;
+ dac_reg &= ~AD1938_DAC_BCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_IB_NF: /* invert bclk + normal frm */
+ adc_reg &= ~AD1938_ADC_LEFT_HIGH;
+ adc_reg |= AD1938_ADC_BCLK_INV;
+ dac_reg &= ~AD1938_DAC_LEFT_HIGH;
+ dac_reg |= AD1938_DAC_BCLK_INV;
+ break;
+
+ case SND_SOC_DAIFMT_IB_IF: /* invert bclk + frm */
+ adc_reg |= AD1938_ADC_LEFT_HIGH;
+ adc_reg |= AD1938_ADC_BCLK_INV;
+ dac_reg |= AD1938_DAC_LEFT_HIGH;
+ dac_reg |= AD1938_DAC_BCLK_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM: /* codec clk & frm master */
+ adc_reg |= AD1938_ADC_LCR_MASTER;
+ adc_reg |= AD1938_ADC_BCLK_MASTER;
+ dac_reg |= AD1938_DAC_LCR_MASTER;
+ dac_reg |= AD1938_DAC_BCLK_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM: /* codec clk slave & frm master */
+ adc_reg |= AD1938_ADC_LCR_MASTER;
+ adc_reg &= ~AD1938_ADC_BCLK_MASTER;
+ dac_reg |= AD1938_DAC_LCR_MASTER;
+ dac_reg &= ~AD1938_DAC_BCLK_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS: /* codec clk master & frame slave */
+ adc_reg &= ~AD1938_ADC_LCR_MASTER;
+ adc_reg |= AD1938_ADC_BCLK_MASTER;
+ dac_reg &= ~AD1938_DAC_LCR_MASTER;
+ dac_reg |= AD1938_DAC_BCLK_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS: /* codec clk & frm slave */
+ adc_reg &= ~AD1938_ADC_LCR_MASTER;
+ adc_reg &= ~AD1938_ADC_BCLK_MASTER;
+ dac_reg &= ~AD1938_DAC_LCR_MASTER;
+ dac_reg &= ~AD1938_DAC_BCLK_MASTER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ codec->write(codec, AD1938_ADC_CTRL2, adc_reg);
+ codec->write(codec, AD1938_DAC_CTRL1, dac_reg);
+
+ return 0;
+}
+
+static int ad1938_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ int word_len = 0, reg = 0;
+
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_device *socdev = rtd->socdev;
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ /* bit size */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ word_len = 3;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ word_len = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ case SNDRV_PCM_FORMAT_S32_LE:
+ word_len = 0;
+ break;
+ }
+
+ reg = codec->read(codec, AD1938_DAC_CTRL2);
+ reg = (reg & (~AD1938_DAC_WORD_LEN_MASK)) | word_len;
+ codec->write(codec, AD1938_DAC_CTRL2, reg);
+
+ reg = codec->read(codec, AD1938_ADC_CTRL1);
+ reg = (reg & (~AD1938_ADC_WORD_LEN_MASK)) | word_len;
+ codec->write(codec, AD1938_ADC_CTRL1, reg);
+
+ return 0;
+}
+
+static int ad1938_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ ad1938_pll_powerctrl(codec, 1);
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ case SND_SOC_BIAS_OFF:
+ ad1938_pll_powerctrl(codec, 0);
+ break;
+ }
+ codec->bias_level = level;
+ return 0;
+}
+
+/*
+ * interface to read/write ad1938 register
+ */
+
+#define AD1938_SPI_ADDR 0x4
+#define AD1938_SPI_READ 0x1
+#define AD1938_SPI_BUFLEN 3
+
+/*
+ * write to the ad1938 register space
+ */
+
+static int ad1938_write_reg(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ u8 *reg_cache = codec->reg_cache;
+ int ret = 0;
+
+ if (value != reg_cache[reg]) {
+ uint8_t buf[AD1938_SPI_BUFLEN];
+ struct spi_transfer t = {
+ .tx_buf = buf,
+ .len = AD1938_SPI_BUFLEN,
+ };
+ struct spi_message m;
+
+ buf[0] = AD1938_SPI_ADDR << 1;
+ buf[1] = reg;
+ buf[2] = value;
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ ret = spi_sync(codec->control_data, &m);
+ if (ret == 0)
+ reg_cache[reg] = value;
+ }
+
+ return ret;
+}
+
+/*
+ * read from the ad1938 register space cache
+ */
+
+static unsigned int ad1938_read_reg_cache(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u8 *reg_cache = codec->reg_cache;
+
+ if (reg >= codec->reg_cache_size)
+ return -EINVAL;
+
+ return reg_cache[reg];
+}
+
+/*
+ * read from the ad1938 register space
+ */
+
+static unsigned int ad1938_read_reg(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ char w_buf[AD1938_SPI_BUFLEN];
+ char r_buf[AD1938_SPI_BUFLEN];
+ int ret;
+
+ struct spi_transfer t = {
+ .tx_buf = w_buf,
+ .rx_buf = r_buf,
+ .len = AD1938_SPI_BUFLEN,
+ };
+ struct spi_message m;
+
+ w_buf[0] = (AD1938_SPI_ADDR << 1) | AD1938_SPI_READ;
+ w_buf[1] = reg;
+ w_buf[2] = 0;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ ret = spi_sync(codec->control_data, &m);
+ if (ret == 0)
+ return r_buf[2];
+ else
+ return -EIO;
+}
+
+static int ad1938_fill_cache(struct snd_soc_codec *codec)
+{
+ int i;
+ u8 *reg_cache = codec->reg_cache;
+ struct spi_device *spi = codec->control_data;
+
+ for (i = 0; i < codec->reg_cache_size; i++) {
+ int ret = ad1938_read_reg(codec, i);
+ if (ret == -EIO) {
+ dev_err(&spi->dev, "AD1938 SPI read failure\n");
+ return ret;
+ }
+ reg_cache[i] = ret;
+ }
+
+ return 0;
+}
+
+static int __devinit ad1938_spi_probe(struct spi_device *spi)
+{
+ struct snd_soc_codec *codec;
+ struct ad1938_priv *ad1938;
+
+ ad1938 = kzalloc(sizeof(struct ad1938_priv), GFP_KERNEL);
+ if (ad1938 == NULL)
+ return -ENOMEM;
+
+ codec = &ad1938->codec;
+ codec->control_data = spi;
+ codec->dev = &spi->dev;
+
+ dev_set_drvdata(&spi->dev, ad1938);
+
+ return ad1938_register(ad1938);
+}
+
+static int __devexit ad1938_spi_remove(struct spi_device *spi)
+{
+ struct ad1938_priv *ad1938 = dev_get_drvdata(&spi->dev);
+
+ ad1938_unregister(ad1938);
+ return 0;
+}
+
+static struct spi_driver ad1938_spi_driver = {
+ .driver = {
+ .name = "ad1938",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad1938_spi_probe,
+ .remove = __devexit_p(ad1938_spi_remove),
+};
+
+static struct snd_soc_dai_ops ad1938_dai_ops = {
+ .hw_params = ad1938_hw_params,
+ .digital_mute = ad1938_mute,
+ .set_tdm_slot = ad1938_set_tdm_slot,
+ .set_fmt = ad1938_set_dai_fmt,
+};
+
+/* codec DAI instance */
+struct snd_soc_dai ad1938_dai = {
+ .name = "AD1938",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 4,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &ad1938_dai_ops,
+};
+EXPORT_SYMBOL_GPL(ad1938_dai);
+
+static int ad1938_register(struct ad1938_priv *ad1938)
+{
+ int ret;
+ struct snd_soc_codec *codec = &ad1938->codec;
+
+ if (ad1938_codec) {
+ dev_err(codec->dev, "Another ad1938 is registered\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+ codec->private_data = ad1938;
+ codec->reg_cache = ad1938->reg_cache;
+ codec->reg_cache_size = AD1938_NUM_REGS;
+ codec->name = "AD1938";
+ codec->owner = THIS_MODULE;
+ codec->dai = &ad1938_dai;
+ codec->num_dai = 1;
+ codec->write = ad1938_write_reg;
+ codec->read = ad1938_read_reg_cache;
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ ad1938_dai.dev = codec->dev;
+ ad1938_codec = codec;
+
+ /* default setting for ad1938 */
+
+ /* unmute dac channels */
+ codec->write(codec, AD1938_DAC_CHNL_MUTE, 0x0);
+ /* de-emphasis: 48kHz, powedown dac */
+ codec->write(codec, AD1938_DAC_CTRL2, 0x1A);
+ /* powerdown dac, dac in tdm mode */
+ codec->write(codec, AD1938_DAC_CTRL0, 0x41);
+ /* high-pass filter enable */
+ codec->write(codec, AD1938_ADC_CTRL0, 0x3);
+ /* sata delay=1, adc aux mode */
+ codec->write(codec, AD1938_ADC_CTRL1, 0x43);
+ /* pll input: mclki/xi */
+ codec->write(codec, AD1938_PLL_CLK_CTRL0, 0x9D);
+ codec->write(codec, AD1938_PLL_CLK_CTRL1, 0x04);
+
+ ad1938_fill_cache(codec);
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ kfree(ad1938);
+ return ret;
+ }
+
+ ret = snd_soc_register_dai(&ad1938_dai);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
+ snd_soc_unregister_codec(codec);
+ kfree(ad1938);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ad1938_unregister(struct ad1938_priv *ad1938)
+{
+ ad1938_set_bias_level(&ad1938->codec, SND_SOC_BIAS_OFF);
+ snd_soc_unregister_dai(&ad1938_dai);
+ snd_soc_unregister_codec(&ad1938->codec);
+ kfree(ad1938);
+ ad1938_codec = NULL;
+}
+
+static int ad1938_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret = 0;
+
+ if (ad1938_codec == NULL) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+
+ socdev->card->codec = ad1938_codec;
+ codec = ad1938_codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to create pcms: %d\n", ret);
+ goto pcm_err;
+ }
+
+ snd_soc_add_controls(codec, ad1938_snd_controls,
+ ARRAY_SIZE(ad1938_snd_controls));
+ snd_soc_dapm_new_controls(codec, ad1938_dapm_widgets,
+ ARRAY_SIZE(ad1938_dapm_widgets));
+ snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+ snd_soc_dapm_new_widgets(codec);
+
+ ad1938_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ ret = snd_soc_init_card(socdev);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to register card: %d\n", ret);
+ goto card_err;
+ }
+
+ return ret;
+
+card_err:
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+pcm_err:
+ return ret;
+}
+
+/* power down chip */
+static int ad1938_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ad1938_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ ad1938_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ return 0;
+}
+
+static int ad1938_resume(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ if (codec->suspend_bias_level == SND_SOC_BIAS_ON)
+ ad1938_set_bias_level(codec, SND_SOC_BIAS_ON);
+
+ return 0;
+}
+#else
+#define ad1938_suspend NULL
+#define ad1938_resume NULL
+#endif
+
+struct snd_soc_codec_device soc_codec_dev_ad1938 = {
+ .probe = ad1938_probe,
+ .remove = ad1938_remove,
+ .suspend = ad1938_suspend,
+ .resume = ad1938_resume,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_ad1938);
+
+static int __init ad1938_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&ad1938_spi_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to register ad1938 SPI driver: %d\n",
+ ret);
+ }
+
+ return ret;
+}
+module_init(ad1938_init);
+
+static void __exit ad1938_exit(void)
+{
+ spi_unregister_driver(&ad1938_spi_driver);
+}
+module_exit(ad1938_exit);
+
+MODULE_DESCRIPTION("ASoC ad1938 driver");
+MODULE_AUTHOR("Barry Song ");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/ad1938.h b/sound/soc/codecs/ad1938.h
new file mode 100644
index 000000000000..fe3c48cd2d5b
--- /dev/null
+++ b/sound/soc/codecs/ad1938.h
@@ -0,0 +1,100 @@
+/*
+ * File: sound/soc/codecs/ad1836.h
+ * Based on:
+ * Author: Barry Song <Barry.Song@analog.com>
+ *
+ * Created: May 25, 2009
+ * Description: definitions for AD1938 registers
+ *
+ * Modified:
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __AD1938_H__
+#define __AD1938_H__
+
+#define AD1938_PLL_CLK_CTRL0 0
+#define AD1938_PLL_POWERDOWN 0x01
+#define AD1938_PLL_CLK_CTRL1 1
+#define AD1938_DAC_CTRL0 2
+#define AD1938_DAC_POWERDOWN 0x01
+#define AD1938_DAC_SERFMT_MASK 0xC0
+#define AD1938_DAC_SERFMT_STEREO (0 << 6)
+#define AD1938_DAC_SERFMT_TDM (1 << 6)
+#define AD1938_DAC_CTRL1 3
+#define AD1938_DAC_2_CHANNELS 0
+#define AD1938_DAC_4_CHANNELS 1
+#define AD1938_DAC_8_CHANNELS 2
+#define AD1938_DAC_16_CHANNELS 3
+#define AD1938_DAC_CHAN_SHFT 1
+#define AD1938_DAC_CHAN_MASK (3 << AD1938_DAC_CHAN_SHFT)
+#define AD1938_DAC_LCR_MASTER (1 << 4)
+#define AD1938_DAC_BCLK_MASTER (1 << 5)
+#define AD1938_DAC_LEFT_HIGH (1 << 3)
+#define AD1938_DAC_BCLK_INV (1 << 7)
+#define AD1938_DAC_CTRL2 4
+#define AD1938_DAC_WORD_LEN_MASK 0xC
+#define AD1938_DAC_MASTER_MUTE 1
+#define AD1938_DAC_CHNL_MUTE 5
+#define AD1938_DACL1_MUTE 0
+#define AD1938_DACR1_MUTE 1
+#define AD1938_DACL2_MUTE 2
+#define AD1938_DACR2_MUTE 3
+#define AD1938_DACL3_MUTE 4
+#define AD1938_DACR3_MUTE 5
+#define AD1938_DACL4_MUTE 6
+#define AD1938_DACR4_MUTE 7
+#define AD1938_DAC_L1_VOL 6
+#define AD1938_DAC_R1_VOL 7
+#define AD1938_DAC_L2_VOL 8
+#define AD1938_DAC_R2_VOL 9
+#define AD1938_DAC_L3_VOL 10
+#define AD1938_DAC_R3_VOL 11
+#define AD1938_DAC_L4_VOL 12
+#define AD1938_DAC_R4_VOL 13
+#define AD1938_ADC_CTRL0 14
+#define AD1938_ADC_POWERDOWN 0x01
+#define AD1938_ADC_HIGHPASS_FILTER 1
+#define AD1938_ADCL1_MUTE 2
+#define AD1938_ADCR1_MUTE 3
+#define AD1938_ADCL2_MUTE 4
+#define AD1938_ADCR2_MUTE 5
+#define AD1938_ADC_CTRL1 15
+#define AD1938_ADC_SERFMT_MASK 0x60
+#define AD1938_ADC_SERFMT_STEREO (0 << 5)
+#define AD1938_ADC_SERFMT_TDM (1 << 2)
+#define AD1938_ADC_SERFMT_AUX (2 << 5)
+#define AD1938_ADC_WORD_LEN_MASK 0x3
+#define AD1938_ADC_CTRL2 16
+#define AD1938_ADC_2_CHANNELS 0
+#define AD1938_ADC_4_CHANNELS 1
+#define AD1938_ADC_8_CHANNELS 2
+#define AD1938_ADC_16_CHANNELS 3
+#define AD1938_ADC_CHAN_SHFT 4
+#define AD1938_ADC_CHAN_MASK (3 << AD1938_ADC_CHAN_SHFT)
+#define AD1938_ADC_LCR_MASTER (1 << 3)
+#define AD1938_ADC_BCLK_MASTER (1 << 6)
+#define AD1938_ADC_LEFT_HIGH (1 << 2)
+#define AD1938_ADC_BCLK_INV (1 << 1)
+
+#define AD1938_NUM_REGS 17
+
+extern struct snd_soc_dai ad1938_dai;
+extern struct snd_soc_codec_device soc_codec_dev_ad1938;
+#endif
diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
index dd3380202766..0abec0d29a96 100644
--- a/sound/soc/codecs/ak4535.c
+++ b/sound/soc/codecs/ak4535.c
@@ -59,21 +59,6 @@ static inline unsigned int ak4535_read_reg_cache(struct snd_soc_codec *codec,
return cache[reg];
}
-static inline unsigned int ak4535_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u8 data;
- data = reg;
-
- if (codec->hw_write(codec->control_data, &data, 1) != 1)
- return -EIO;
-
- if (codec->hw_read(codec->control_data, &data, 1) != 1)
- return -EIO;
-
- return data;
-};
-
/*
* write ak4535 register cache
*/
@@ -635,7 +620,6 @@ static int ak4535_probe(struct platform_device *pdev)
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
if (setup->i2c_address) {
codec->hw_write = (hw_write_t)i2c_master_send;
- codec->hw_read = (hw_read_t)i2c_master_recv;
ret = ak4535_add_i2c_device(pdev, setup);
}
#endif
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
new file mode 100644
index 000000000000..e057c7b578df
--- /dev/null
+++ b/sound/soc/codecs/ak4642.c
@@ -0,0 +1,502 @@
+/*
+ * ak4642.c -- AK4642/AK4643 ALSA Soc Audio driver
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on wm8731.c by Richard Purdie
+ * Based on ak4535.c by Richard Purdie
+ * Based on wm8753.c by Liam Girdwood
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* ** CAUTION **
+ *
+ * This is very simple driver.
+ * It can use headphone output / stereo input only
+ *
+ * AK4642 is not tested.
+ * AK4643 is tested.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+
+#include "ak4642.h"
+
+#define AK4642_VERSION "0.0.1"
+
+#define PW_MGMT1 0x00
+#define PW_MGMT2 0x01
+#define SG_SL1 0x02
+#define SG_SL2 0x03
+#define MD_CTL1 0x04
+#define MD_CTL2 0x05
+#define TIMER 0x06
+#define ALC_CTL1 0x07
+#define ALC_CTL2 0x08
+#define L_IVC 0x09
+#define L_DVC 0x0a
+#define ALC_CTL3 0x0b
+#define R_IVC 0x0c
+#define R_DVC 0x0d
+#define MD_CTL3 0x0e
+#define MD_CTL4 0x0f
+#define PW_MGMT3 0x10
+#define DF_S 0x11
+#define FIL3_0 0x12
+#define FIL3_1 0x13
+#define FIL3_2 0x14
+#define FIL3_3 0x15
+#define EQ_0 0x16
+#define EQ_1 0x17
+#define EQ_2 0x18
+#define EQ_3 0x19
+#define EQ_4 0x1a
+#define EQ_5 0x1b
+#define FIL1_0 0x1c
+#define FIL1_1 0x1d
+#define FIL1_2 0x1e
+#define FIL1_3 0x1f
+#define PW_MGMT4 0x20
+#define MD_CTL5 0x21
+#define LO_MS 0x22
+#define HP_MS 0x23
+#define SPK_MS 0x24
+
+#define AK4642_CACHEREGNUM 0x25
+
+struct snd_soc_codec_device soc_codec_dev_ak4642;
+
+/* codec private data */
+struct ak4642_priv {
+ struct snd_soc_codec codec;
+ unsigned int sysclk;
+};
+
+static struct snd_soc_codec *ak4642_codec;
+
+/*
+ * ak4642 register cache
+ */
+static const u16 ak4642_reg[AK4642_CACHEREGNUM] = {
+ 0x0000, 0x0000, 0x0001, 0x0000,
+ 0x0002, 0x0000, 0x0000, 0x0000,
+ 0x00e1, 0x00e1, 0x0018, 0x0000,
+ 0x00e1, 0x0018, 0x0011, 0x0008,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000,
+};
+
+/*
+ * read ak4642 register cache
+ */
+static inline unsigned int ak4642_read_reg_cache(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u16 *cache = codec->reg_cache;
+ if (reg >= AK4642_CACHEREGNUM)
+ return -1;
+ return cache[reg];
+}
+
+/*
+ * write ak4642 register cache
+ */
+static inline void ak4642_write_reg_cache(struct snd_soc_codec *codec,
+ u16 reg, unsigned int value)
+{
+ u16 *cache = codec->reg_cache;
+ if (reg >= AK4642_CACHEREGNUM)
+ return;
+
+ cache[reg] = value;
+}
+
+/*
+ * write to the AK4642 register space
+ */
+static int ak4642_write(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ u8 data[2];
+
+ /* data is
+ * D15..D8 AK4642 register offset
+ * D7...D0 register data
+ */
+ data[0] = reg & 0xff;
+ data[1] = value & 0xff;
+
+ if (codec->hw_write(codec->control_data, data, 2) == 2) {
+ ak4642_write_reg_cache(codec, reg, value);
+ return 0;
+ } else
+ return -EIO;
+}
+
+static int ak4642_sync(struct snd_soc_codec *codec)
+{
+ u16 *cache = codec->reg_cache;
+ int i, r = 0;
+
+ for (i = 0; i < AK4642_CACHEREGNUM; i++)
+ r |= ak4642_write(codec, i, cache[i]);
+
+ return r;
+};
+
+static int ak4642_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ struct snd_soc_codec *codec = dai->codec;
+
+ if (is_play) {
+ /*
+ * start headphone output
+ *
+ * PLL, Master Mode
+ * Audio I/F Format :MSB justified (ADC & DAC)
+ * Sampling Frequency: 44.1kHz
+ * Digital Volume: −8dB
+ * Bass Boost Level : Middle
+ *
+ * This operation came from example code of
+ * "ASAHI KASEI AK4642" (japanese) manual p97.
+ *
+ * Example code use 0x39, 0x79 value for 0x01 address,
+ * But we need MCKO (0x02) bit now
+ */
+ ak4642_write(codec, 0x05, 0x27);
+ ak4642_write(codec, 0x0f, 0x09);
+ ak4642_write(codec, 0x0e, 0x19);
+ ak4642_write(codec, 0x09, 0x91);
+ ak4642_write(codec, 0x0c, 0x91);
+ ak4642_write(codec, 0x0a, 0x28);
+ ak4642_write(codec, 0x0d, 0x28);
+ ak4642_write(codec, 0x00, 0x64);
+ ak4642_write(codec, 0x01, 0x3b); /* + MCKO bit */
+ ak4642_write(codec, 0x01, 0x7b); /* + MCKO bit */
+ } else {
+ /*
+ * start stereo input
+ *
+ * PLL Master Mode
+ * Audio I/F Format:MSB justified (ADC & DAC)
+ * Sampling Frequency:44.1kHz
+ * Pre MIC AMP:+20dB
+ * MIC Power On
+ * ALC setting:Refer to Table 35
+ * ALC bit=“1”
+ *
+ * This operation came from example code of
+ * "ASAHI KASEI AK4642" (japanese) manual p94.
+ */
+ ak4642_write(codec, 0x05, 0x27);
+ ak4642_write(codec, 0x02, 0x05);
+ ak4642_write(codec, 0x06, 0x3c);
+ ak4642_write(codec, 0x08, 0xe1);
+ ak4642_write(codec, 0x0b, 0x00);
+ ak4642_write(codec, 0x07, 0x21);
+ ak4642_write(codec, 0x00, 0x41);
+ ak4642_write(codec, 0x10, 0x01);
+ }
+
+ return 0;
+}
+
+static void ak4642_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ struct snd_soc_codec *codec = dai->codec;
+
+ if (is_play) {
+ /* stop headphone output */
+ ak4642_write(codec, 0x01, 0x3b);
+ ak4642_write(codec, 0x01, 0x0b);
+ ak4642_write(codec, 0x00, 0x40);
+ ak4642_write(codec, 0x0e, 0x11);
+ ak4642_write(codec, 0x0f, 0x08);
+ } else {
+ /* stop stereo input */
+ ak4642_write(codec, 0x00, 0x40);
+ ak4642_write(codec, 0x10, 0x00);
+ ak4642_write(codec, 0x07, 0x01);
+ }
+}
+
+static int ak4642_dai_set_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct ak4642_priv *ak4642 = codec->private_data;
+
+ ak4642->sysclk = freq;
+ return 0;
+}
+
+static struct snd_soc_dai_ops ak4642_dai_ops = {
+ .startup = ak4642_dai_startup,
+ .shutdown = ak4642_dai_shutdown,
+ .set_sysclk = ak4642_dai_set_sysclk,
+};
+
+struct snd_soc_dai ak4642_dai = {
+ .name = "AK4642",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE },
+ .ops = &ak4642_dai_ops,
+};
+EXPORT_SYMBOL_GPL(ak4642_dai);
+
+static int ak4642_resume(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ ak4642_sync(codec);
+ return 0;
+}
+
+/*
+ * initialise the AK4642 driver
+ * register the mixer and dsp interfaces with the kernel
+ */
+static int ak4642_init(struct ak4642_priv *ak4642)
+{
+ struct snd_soc_codec *codec = &ak4642->codec;
+ int ret = 0;
+
+ if (ak4642_codec) {
+ dev_err(codec->dev, "Another ak4642 is registered\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->private_data = ak4642;
+ codec->name = "AK4642";
+ codec->owner = THIS_MODULE;
+ codec->read = ak4642_read_reg_cache;
+ codec->write = ak4642_write;
+ codec->dai = &ak4642_dai;
+ codec->num_dai = 1;
+ codec->hw_write = (hw_write_t)i2c_master_send;
+ codec->reg_cache_size = ARRAY_SIZE(ak4642_reg);
+ codec->reg_cache = kmemdup(ak4642_reg,
+ sizeof(ak4642_reg), GFP_KERNEL);
+
+ if (!codec->reg_cache)
+ return -ENOMEM;
+
+ ak4642_dai.dev = codec->dev;
+ ak4642_codec = codec;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ goto reg_cache_err;
+ }
+
+ ret = snd_soc_register_dai(&ak4642_dai);
+ if (ret) {
+ dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
+ snd_soc_unregister_codec(codec);
+ goto reg_cache_err;
+ }
+
+ /*
+ * clock setting
+ *
+ * Audio I/F Format: MSB justified (ADC & DAC)
+ * BICK frequency at Master Mode: 64fs
+ * Input Master Clock Select at PLL Mode: 11.2896MHz
+ * MCKO: Enable
+ * Sampling Frequency: 44.1kHz
+ *
+ * This operation came from example code of
+ * "ASAHI KASEI AK4642" (japanese) manual p89.
+ *
+ * please fix-me
+ */
+ ak4642_write(codec, 0x01, 0x08);
+ ak4642_write(codec, 0x04, 0x4a);
+ ak4642_write(codec, 0x05, 0x27);
+ ak4642_write(codec, 0x00, 0x40);
+ ak4642_write(codec, 0x01, 0x0b);
+
+ return ret;
+
+reg_cache_err:
+ kfree(codec->reg_cache);
+ codec->reg_cache = NULL;
+
+ return ret;
+}
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static int ak4642_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct ak4642_priv *ak4642;
+ struct snd_soc_codec *codec;
+ int ret;
+
+ ak4642 = kzalloc(sizeof(struct ak4642_priv), GFP_KERNEL);
+ if (!ak4642)
+ return -ENOMEM;
+
+ codec = &ak4642->codec;
+ codec->dev = &i2c->dev;
+
+ i2c_set_clientdata(i2c, ak4642);
+ codec->control_data = i2c;
+
+ ret = ak4642_init(ak4642);
+ if (ret < 0)
+ printk(KERN_ERR "failed to initialise AK4642\n");
+
+ return ret;
+}
+
+static int ak4642_i2c_remove(struct i2c_client *client)
+{
+ struct ak4642_priv *ak4642 = i2c_get_clientdata(client);
+
+ snd_soc_unregister_dai(&ak4642_dai);
+ snd_soc_unregister_codec(&ak4642->codec);
+ kfree(ak4642->codec.reg_cache);
+ kfree(ak4642);
+ ak4642_codec = NULL;
+
+ return 0;
+}
+
+static const struct i2c_device_id ak4642_i2c_id[] = {
+ { "ak4642", 0 },
+ { "ak4643", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ak4642_i2c_id);
+
+static struct i2c_driver ak4642_i2c_driver = {
+ .driver = {
+ .name = "AK4642 I2C Codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = ak4642_i2c_probe,
+ .remove = ak4642_i2c_remove,
+ .id_table = ak4642_i2c_id,
+};
+
+#endif
+
+static int ak4642_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ int ret;
+
+ if (!ak4642_codec) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+
+ socdev->card->codec = ak4642_codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ printk(KERN_ERR "ak4642: failed to create pcms\n");
+ goto pcm_err;
+ }
+
+ ret = snd_soc_init_card(socdev);
+ if (ret < 0) {
+ printk(KERN_ERR "ak4642: failed to register card\n");
+ goto card_err;
+ }
+
+ dev_info(&pdev->dev, "AK4642 Audio Codec %s", AK4642_VERSION);
+ return ret;
+
+card_err:
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+pcm_err:
+ return ret;
+
+}
+
+/* power down chip */
+static int ak4642_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+struct snd_soc_codec_device soc_codec_dev_ak4642 = {
+ .probe = ak4642_probe,
+ .remove = ak4642_remove,
+ .resume = ak4642_resume,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_ak4642);
+
+static int __init ak4642_modinit(void)
+{
+ int ret;
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ ret = i2c_add_driver(&ak4642_i2c_driver);
+#endif
+ return ret;
+
+}
+module_init(ak4642_modinit);
+
+static void __exit ak4642_exit(void)
+{
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_driver(&ak4642_i2c_driver);
+#endif
+
+}
+module_exit(ak4642_exit);
+
+MODULE_DESCRIPTION("Soc AK4642 driver");
+MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/ak4642.h b/sound/soc/codecs/ak4642.h
new file mode 100644
index 000000000000..e476833d314e
--- /dev/null
+++ b/sound/soc/codecs/ak4642.h
@@ -0,0 +1,20 @@
+/*
+ * ak4642.h -- AK4642 Soc Audio driver
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on ak4535.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _AK4642_H
+#define _AK4642_H
+
+extern struct snd_soc_dai ak4642_dai;
+extern struct snd_soc_codec_device soc_codec_dev_ak4642;
+
+#endif
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index a32b8226c8a4..ca1e24a8f12a 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -806,15 +806,30 @@ static int cs4270_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
{
struct cs4270_private *cs4270 = i2c_get_clientdata(client);
struct snd_soc_codec *codec = &cs4270->codec;
- int reg = snd_soc_read(codec, CS4270_PWRCTL) | CS4270_PWRCTL_PDN_ALL;
- return snd_soc_write(codec, CS4270_PWRCTL, reg);
+ return snd_soc_suspend_device(codec->dev);
}
static int cs4270_i2c_resume(struct i2c_client *client)
{
struct cs4270_private *cs4270 = i2c_get_clientdata(client);
struct snd_soc_codec *codec = &cs4270->codec;
+
+ return snd_soc_resume_device(codec->dev);
+}
+
+static int cs4270_soc_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ struct snd_soc_codec *codec = cs4270_codec;
+ int reg = snd_soc_read(codec, CS4270_PWRCTL) | CS4270_PWRCTL_PDN_ALL;
+
+ return snd_soc_write(codec, CS4270_PWRCTL, reg);
+}
+
+static int cs4270_soc_resume(struct platform_device *pdev)
+{
+ struct snd_soc_codec *codec = cs4270_codec;
+ struct i2c_client *i2c_client = codec->control_data;
int reg;
/* In case the device was put to hard reset during sleep, we need to
@@ -825,7 +840,7 @@ static int cs4270_i2c_resume(struct i2c_client *client)
for (reg = CS4270_FIRSTREG; reg <= CS4270_LASTREG; reg++) {
u8 val = snd_soc_read(codec, reg);
- if (i2c_smbus_write_byte_data(client, reg, val)) {
+ if (i2c_smbus_write_byte_data(i2c_client, reg, val)) {
dev_err(codec->dev, "i2c write failed\n");
return -EIO;
}
@@ -840,6 +855,8 @@ static int cs4270_i2c_resume(struct i2c_client *client)
#else
#define cs4270_i2c_suspend NULL
#define cs4270_i2c_resume NULL
+#define cs4270_soc_suspend NULL
+#define cs4270_soc_resume NULL
#endif /* CONFIG_PM */
/*
@@ -868,7 +885,9 @@ static struct i2c_driver cs4270_i2c_driver = {
*/
struct snd_soc_codec_device soc_codec_device_cs4270 = {
.probe = cs4270_probe,
- .remove = cs4270_remove
+ .remove = cs4270_remove,
+ .suspend = cs4270_soc_suspend,
+ .resume = cs4270_soc_resume,
};
EXPORT_SYMBOL_GPL(soc_codec_device_cs4270);
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
new file mode 100644
index 000000000000..38eac9c866e1
--- /dev/null
+++ b/sound/soc/codecs/cx20442.c
@@ -0,0 +1,501 @@
+/*
+ * cx20442.c -- CX20442 ALSA Soc Audio driver
+ *
+ * Copyright 2009 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ *
+ * Initially based on sound/soc/codecs/wm8400.c
+ * Copyright 2008, 2009 Wolfson Microelectronics PLC.
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/tty.h>
+
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/soc-dapm.h>
+
+#include "cx20442.h"
+
+
+struct cx20442_priv {
+ struct snd_soc_codec codec;
+ u8 reg_cache[1];
+};
+
+#define CX20442_PM 0x0
+
+#define CX20442_TELIN 0
+#define CX20442_TELOUT 1
+#define CX20442_MIC 2
+#define CX20442_SPKOUT 3
+#define CX20442_AGC 4
+
+static const struct snd_soc_dapm_widget cx20442_dapm_widgets[] = {
+ SND_SOC_DAPM_OUTPUT("TELOUT"),
+ SND_SOC_DAPM_OUTPUT("SPKOUT"),
+ SND_SOC_DAPM_OUTPUT("AGCOUT"),
+
+ SND_SOC_DAPM_MIXER("SPKOUT Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("TELOUT Amp", CX20442_PM, CX20442_TELOUT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("SPKOUT Amp", CX20442_PM, CX20442_SPKOUT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("SPKOUT AGC", CX20442_PM, CX20442_AGC, 0, NULL, 0),
+
+ SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_MIXER("Input Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MICBIAS("TELIN Bias", CX20442_PM, CX20442_TELIN, 0),
+ SND_SOC_DAPM_MICBIAS("MIC Bias", CX20442_PM, CX20442_MIC, 0),
+
+ SND_SOC_DAPM_PGA("MIC AGC", CX20442_PM, CX20442_AGC, 0, NULL, 0),
+
+ SND_SOC_DAPM_INPUT("TELIN"),
+ SND_SOC_DAPM_INPUT("MIC"),
+ SND_SOC_DAPM_INPUT("AGCIN"),
+};
+
+static const struct snd_soc_dapm_route cx20442_audio_map[] = {
+ {"TELOUT", NULL, "TELOUT Amp"},
+
+ {"SPKOUT", NULL, "SPKOUT Mixer"},
+ {"SPKOUT Mixer", NULL, "SPKOUT Amp"},
+
+ {"TELOUT Amp", NULL, "DAC"},
+ {"SPKOUT Amp", NULL, "DAC"},
+
+ {"SPKOUT Mixer", NULL, "SPKOUT AGC"},
+ {"SPKOUT AGC", NULL, "AGCIN"},
+
+ {"AGCOUT", NULL, "MIC AGC"},
+ {"MIC AGC", NULL, "MIC"},
+
+ {"MIC Bias", NULL, "MIC"},
+ {"Input Mixer", NULL, "MIC Bias"},
+
+ {"TELIN Bias", NULL, "TELIN"},
+ {"Input Mixer", NULL, "TELIN Bias"},
+
+ {"ADC", NULL, "Input Mixer"},
+};
+
+static int cx20442_add_widgets(struct snd_soc_codec *codec)
+{
+ snd_soc_dapm_new_controls(codec, cx20442_dapm_widgets,
+ ARRAY_SIZE(cx20442_dapm_widgets));
+
+ snd_soc_dapm_add_routes(codec, cx20442_audio_map,
+ ARRAY_SIZE(cx20442_audio_map));
+
+ snd_soc_dapm_new_widgets(codec);
+ return 0;
+}
+
+static unsigned int cx20442_read_reg_cache(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u8 *reg_cache = codec->reg_cache;
+
+ if (reg >= codec->reg_cache_size)
+ return -EINVAL;
+
+ return reg_cache[reg];
+}
+
+enum v253_vls {
+ V253_VLS_NONE = 0,
+ V253_VLS_T,
+ V253_VLS_L,
+ V253_VLS_LT,
+ V253_VLS_S,
+ V253_VLS_ST,
+ V253_VLS_M,
+ V253_VLS_MST,
+ V253_VLS_S1,
+ V253_VLS_S1T,
+ V253_VLS_MS1T,
+ V253_VLS_M1,
+ V253_VLS_M1ST,
+ V253_VLS_M1S1T,
+ V253_VLS_H,
+ V253_VLS_HT,
+ V253_VLS_MS,
+ V253_VLS_MS1,
+ V253_VLS_M1S,
+ V253_VLS_M1S1,
+ V253_VLS_TEST,
+};
+
+static int cx20442_pm_to_v253_vls(u8 value)
+{
+ switch (value & ~(1 << CX20442_AGC)) {
+ case 0:
+ return V253_VLS_T;
+ case (1 << CX20442_SPKOUT):
+ case (1 << CX20442_MIC):
+ case (1 << CX20442_SPKOUT) | (1 << CX20442_MIC):
+ return V253_VLS_M1S1;
+ case (1 << CX20442_TELOUT):
+ case (1 << CX20442_TELIN):
+ case (1 << CX20442_TELOUT) | (1 << CX20442_TELIN):
+ return V253_VLS_L;
+ case (1 << CX20442_TELOUT) | (1 << CX20442_MIC):
+ return V253_VLS_NONE;
+ }
+ return -EINVAL;
+}
+static int cx20442_pm_to_v253_vsp(u8 value)
+{
+ switch (value & ~(1 << CX20442_AGC)) {
+ case (1 << CX20442_SPKOUT):
+ case (1 << CX20442_MIC):
+ case (1 << CX20442_SPKOUT) | (1 << CX20442_MIC):
+ return (bool)(value & (1 << CX20442_AGC));
+ }
+ return (value & (1 << CX20442_AGC)) ? -EINVAL : 0;
+}
+
+static int cx20442_write(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ u8 *reg_cache = codec->reg_cache;
+ int vls, vsp, old, len;
+ char buf[18];
+
+ if (reg >= codec->reg_cache_size)
+ return -EINVAL;
+
+ /* hw_write and control_data pointers required for talking to the modem
+ * are expected to be set by the line discipline initialization code */
+ if (!codec->hw_write || !codec->control_data)
+ return -EIO;
+
+ old = reg_cache[reg];
+ reg_cache[reg] = value;
+
+ vls = cx20442_pm_to_v253_vls(value);
+ if (vls < 0)
+ return vls;
+
+ vsp = cx20442_pm_to_v253_vsp(value);
+ if (vsp < 0)
+ return vsp;
+
+ if ((vls == V253_VLS_T) ||
+ (vls == cx20442_pm_to_v253_vls(old))) {
+ if (vsp == cx20442_pm_to_v253_vsp(old))
+ return 0;
+ len = snprintf(buf, ARRAY_SIZE(buf), "at+vsp=%d\r", vsp);
+ } else if (vsp == cx20442_pm_to_v253_vsp(old))
+ len = snprintf(buf, ARRAY_SIZE(buf), "at+vls=%d\r", vls);
+ else
+ len = snprintf(buf, ARRAY_SIZE(buf),
+ "at+vls=%d;+vsp=%d\r", vls, vsp);
+
+ if (unlikely(len > (ARRAY_SIZE(buf) - 1)))
+ return -ENOMEM;
+
+ dev_dbg(codec->dev, "%s: %s\n", __func__, buf);
+ if (codec->hw_write(codec->control_data, buf, len) != len)
+ return -EIO;
+
+ return 0;
+}
+
+
+/* Moved up here as line discipline referres it during initialization */
+static struct snd_soc_codec *cx20442_codec;
+
+
+/*
+ * Line discpline related code
+ *
+ * Any of the callback functions below can be used in two ways:
+ * 1) registerd by a machine driver as one of line discipline operations,
+ * 2) called from a machine's provided line discipline callback function
+ * in case when extra machine specific code must be run as well.
+ */
+
+/* Modem init: echo off, digital speaker off, quiet off, voice mode */
+static const char *v253_init = "ate0m0q0+fclass=8\r";
+
+/* Line discipline .open() */
+static int v253_open(struct tty_struct *tty)
+{
+ struct snd_soc_codec *codec = cx20442_codec;
+ int ret, len = strlen(v253_init);
+
+ /* Doesn't make sense without write callback */
+ if (!tty->ops->write)
+ return -EINVAL;
+
+ /* Pass the codec structure address for use by other ldisc callbacks */
+ tty->disc_data = codec;
+
+ if (tty->ops->write(tty, v253_init, len) != len) {
+ ret = -EIO;
+ goto err;
+ }
+ /* Actual setup will be performed after the modem responds. */
+ return 0;
+err:
+ tty->disc_data = NULL;
+ return ret;
+}
+
+/* Line discipline .close() */
+static void v253_close(struct tty_struct *tty)
+{
+ struct snd_soc_codec *codec = tty->disc_data;
+
+ tty->disc_data = NULL;
+
+ if (!codec)
+ return;
+
+ /* Prevent the codec driver from further accessing the modem */
+ codec->hw_write = NULL;
+ codec->control_data = NULL;
+ codec->pop_time = 0;
+}
+
+/* Line discipline .hangup() */
+static int v253_hangup(struct tty_struct *tty)
+{
+ v253_close(tty);
+ return 0;
+}
+
+/* Line discipline .receive_buf() */
+static void v253_receive(struct tty_struct *tty,
+ const unsigned char *cp, char *fp, int count)
+{
+ struct snd_soc_codec *codec = tty->disc_data;
+
+ if (!codec)
+ return;
+
+ if (!codec->control_data) {
+ /* First modem response, complete setup procedure */
+
+ /* Set up codec driver access to modem controls */
+ codec->control_data = tty;
+ codec->hw_write = (hw_write_t)tty->ops->write;
+ codec->pop_time = 1;
+ }
+}
+
+/* Line discipline .write_wakeup() */
+static void v253_wakeup(struct tty_struct *tty)
+{
+}
+
+struct tty_ldisc_ops v253_ops = {
+ .magic = TTY_LDISC_MAGIC,
+ .name = "cx20442",
+ .owner = THIS_MODULE,
+ .open = v253_open,
+ .close = v253_close,
+ .hangup = v253_hangup,
+ .receive_buf = v253_receive,
+ .write_wakeup = v253_wakeup,
+};
+EXPORT_SYMBOL_GPL(v253_ops);
+
+
+/*
+ * Codec DAI
+ */
+
+struct snd_soc_dai cx20442_dai = {
+ .name = "CX20442",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_8000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_8000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+};
+EXPORT_SYMBOL_GPL(cx20442_dai);
+
+static int cx20442_codec_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret;
+
+ if (!cx20442_codec) {
+ dev_err(&pdev->dev, "cx20442 not yet discovered\n");
+ return -ENODEV;
+ }
+ codec = cx20442_codec;
+
+ socdev->card->codec = codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to create pcms\n");
+ goto pcm_err;
+ }
+
+ cx20442_add_widgets(codec);
+
+ ret = snd_soc_init_card(socdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register card\n");
+ goto card_err;
+ }
+
+ return ret;
+
+card_err:
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+pcm_err:
+ return ret;
+}
+
+/* power down chip */
+static int cx20442_codec_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+struct snd_soc_codec_device cx20442_codec_dev = {
+ .probe = cx20442_codec_probe,
+ .remove = cx20442_codec_remove,
+};
+EXPORT_SYMBOL_GPL(cx20442_codec_dev);
+
+static int cx20442_register(struct cx20442_priv *cx20442)
+{
+ struct snd_soc_codec *codec = &cx20442->codec;
+ int ret;
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->name = "CX20442";
+ codec->owner = THIS_MODULE;
+ codec->private_data = cx20442;
+
+ codec->dai = &cx20442_dai;
+ codec->num_dai = 1;
+
+ codec->reg_cache = &cx20442->reg_cache;
+ codec->reg_cache_size = ARRAY_SIZE(cx20442->reg_cache);
+ codec->read = cx20442_read_reg_cache;
+ codec->write = cx20442_write;
+
+ codec->bias_level = SND_SOC_BIAS_OFF;
+
+ cx20442_dai.dev = codec->dev;
+
+ cx20442_codec = codec;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ goto err;
+ }
+
+ ret = snd_soc_register_dai(&cx20442_dai);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
+ goto err_codec;
+ }
+
+ return 0;
+
+err_codec:
+ snd_soc_unregister_codec(codec);
+err:
+ cx20442_codec = NULL;
+ kfree(cx20442);
+ return ret;
+}
+
+static void cx20442_unregister(struct cx20442_priv *cx20442)
+{
+ snd_soc_unregister_dai(&cx20442_dai);
+ snd_soc_unregister_codec(&cx20442->codec);
+
+ cx20442_codec = NULL;
+ kfree(cx20442);
+}
+
+static int cx20442_platform_probe(struct platform_device *pdev)
+{
+ struct cx20442_priv *cx20442;
+ struct snd_soc_codec *codec;
+
+ cx20442 = kzalloc(sizeof(struct cx20442_priv), GFP_KERNEL);
+ if (cx20442 == NULL)
+ return -ENOMEM;
+
+ codec = &cx20442->codec;
+
+ codec->control_data = NULL;
+ codec->hw_write = NULL;
+ codec->pop_time = 0;
+
+ codec->dev = &pdev->dev;
+ platform_set_drvdata(pdev, cx20442);
+
+ return cx20442_register(cx20442);
+}
+
+static int __exit cx20442_platform_remove(struct platform_device *pdev)
+{
+ struct cx20442_priv *cx20442 = platform_get_drvdata(pdev);
+
+ cx20442_unregister(cx20442);
+ return 0;
+}
+
+static struct platform_driver cx20442_platform_driver = {
+ .driver = {
+ .name = "cx20442",
+ .owner = THIS_MODULE,
+ },
+ .probe = cx20442_platform_probe,
+ .remove = __exit_p(cx20442_platform_remove),
+};
+
+static int __init cx20442_init(void)
+{
+ return platform_driver_register(&cx20442_platform_driver);
+}
+module_init(cx20442_init);
+
+static void __exit cx20442_exit(void)
+{
+ platform_driver_unregister(&cx20442_platform_driver);
+}
+module_exit(cx20442_exit);
+
+MODULE_DESCRIPTION("ASoC CX20442-11 voice modem codec driver");
+MODULE_AUTHOR("Janusz Krzysztofik");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cx20442");
diff --git a/sound/soc/codecs/cx20442.h b/sound/soc/codecs/cx20442.h
new file mode 100644
index 000000000000..688a5eb62e17
--- /dev/null
+++ b/sound/soc/codecs/cx20442.h
@@ -0,0 +1,20 @@
+/*
+ * cx20442.h -- audio driver for CX20442
+ *
+ * Copyright 2009 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef _CX20442_CODEC_H
+#define _CX20442_CODEC_H
+
+extern struct snd_soc_dai cx20442_dai;
+extern struct snd_soc_codec_device cx20442_codec_dev;
+extern struct tty_ldisc_ops v253_ops;
+
+#endif
diff --git a/sound/soc/codecs/max9877.c b/sound/soc/codecs/max9877.c
new file mode 100644
index 000000000000..9e7e964a5fa3
--- /dev/null
+++ b/sound/soc/codecs/max9877.c
@@ -0,0 +1,308 @@
+/*
+ * max9877.c -- amp driver for max9877
+ *
+ * Copyright (C) 2009 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include "max9877.h"
+
+static struct i2c_client *i2c;
+
+static u8 max9877_regs[5] = { 0x40, 0x00, 0x00, 0x00, 0x49 };
+
+static void max9877_write_regs(void)
+{
+ unsigned int i;
+ u8 data[6];
+
+ data[0] = MAX9877_INPUT_MODE;
+ for (i = 0; i < ARRAY_SIZE(max9877_regs); i++)
+ data[i + 1] = max9877_regs[i];
+
+ if (i2c_master_send(i2c, data, 6) != 6)
+ dev_err(&i2c->dev, "i2c write failed\n");
+}
+
+static int max9877_get_reg(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int reg = mc->reg;
+ unsigned int shift = mc->shift;
+ unsigned int mask = mc->max;
+ unsigned int invert = mc->invert;
+
+ ucontrol->value.integer.value[0] = (max9877_regs[reg] >> shift) & mask;
+
+ if (invert)
+ ucontrol->value.integer.value[0] =
+ mask - ucontrol->value.integer.value[0];
+
+ return 0;
+}
+
+static int max9877_set_reg(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int reg = mc->reg;
+ unsigned int shift = mc->shift;
+ unsigned int mask = mc->max;
+ unsigned int invert = mc->invert;
+ unsigned int val = (ucontrol->value.integer.value[0] & mask);
+
+ if (invert)
+ val = mask - val;
+
+ if (((max9877_regs[reg] >> shift) & mask) == val)
+ return 0;
+
+ max9877_regs[reg] &= ~(mask << shift);
+ max9877_regs[reg] |= val << shift;
+ max9877_write_regs();
+
+ return 1;
+}
+
+static int max9877_get_2reg(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int reg = mc->reg;
+ unsigned int reg2 = mc->rreg;
+ unsigned int shift = mc->shift;
+ unsigned int mask = mc->max;
+
+ ucontrol->value.integer.value[0] = (max9877_regs[reg] >> shift) & mask;
+ ucontrol->value.integer.value[1] = (max9877_regs[reg2] >> shift) & mask;
+
+ return 0;
+}
+
+static int max9877_set_2reg(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int reg = mc->reg;
+ unsigned int reg2 = mc->rreg;
+ unsigned int shift = mc->shift;
+ unsigned int mask = mc->max;
+ unsigned int val = (ucontrol->value.integer.value[0] & mask);
+ unsigned int val2 = (ucontrol->value.integer.value[1] & mask);
+ unsigned int change = 1;
+
+ if (((max9877_regs[reg] >> shift) & mask) == val)
+ change = 0;
+
+ if (((max9877_regs[reg2] >> shift) & mask) == val2)
+ change = 0;
+
+ if (change) {
+ max9877_regs[reg] &= ~(mask << shift);
+ max9877_regs[reg] |= val << shift;
+ max9877_regs[reg2] &= ~(mask << shift);
+ max9877_regs[reg2] |= val2 << shift;
+ max9877_write_regs();
+ }
+
+ return change;
+}
+
+static int max9877_get_out_mode(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 value = max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OUTMODE_MASK;
+
+ if (value)
+ value -= 1;
+
+ ucontrol->value.integer.value[0] = value;
+ return 0;
+}
+
+static int max9877_set_out_mode(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 value = ucontrol->value.integer.value[0];
+
+ value += 1;
+
+ if ((max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OUTMODE_MASK) == value)
+ return 0;
+
+ max9877_regs[MAX9877_OUTPUT_MODE] &= ~MAX9877_OUTMODE_MASK;
+ max9877_regs[MAX9877_OUTPUT_MODE] |= value;
+ max9877_write_regs();
+ return 1;
+}
+
+static int max9877_get_osc_mode(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 value = (max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OSC_MASK);
+
+ value = value >> MAX9877_OSC_OFFSET;
+
+ ucontrol->value.integer.value[0] = value;
+ return 0;
+}
+
+static int max9877_set_osc_mode(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 value = ucontrol->value.integer.value[0];
+
+ value = value << MAX9877_OSC_OFFSET;
+ if ((max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OSC_MASK) == value)
+ return 0;
+
+ max9877_regs[MAX9877_OUTPUT_MODE] &= ~MAX9877_OSC_MASK;
+ max9877_regs[MAX9877_OUTPUT_MODE] |= value;
+ max9877_write_regs();
+ return 1;
+}
+
+static const unsigned int max9877_pgain_tlv[] = {
+ TLV_DB_RANGE_HEAD(2),
+ 0, 1, TLV_DB_SCALE_ITEM(0, 900, 0),
+ 2, 2, TLV_DB_SCALE_ITEM(2000, 0, 0),
+};
+
+static const unsigned int max9877_output_tlv[] = {
+ TLV_DB_RANGE_HEAD(4),
+ 0, 7, TLV_DB_SCALE_ITEM(-7900, 400, 1),
+ 8, 15, TLV_DB_SCALE_ITEM(-4700, 300, 0),
+ 16, 23, TLV_DB_SCALE_ITEM(-2300, 200, 0),
+ 24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0),
+};
+
+static const char *max9877_out_mode[] = {
+ "INA -> SPK",
+ "INA -> HP",
+ "INA -> SPK and HP",
+ "INB -> SPK",
+ "INB -> HP",
+ "INB -> SPK and HP",
+ "INA + INB -> SPK",
+ "INA + INB -> HP",
+ "INA + INB -> SPK and HP",
+};
+
+static const char *max9877_osc_mode[] = {
+ "1176KHz",
+ "1100KHz",
+ "700KHz",
+};
+
+static const struct soc_enum max9877_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(max9877_out_mode), max9877_out_mode),
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(max9877_osc_mode), max9877_osc_mode),
+};
+
+static const struct snd_kcontrol_new max9877_controls[] = {
+ SOC_SINGLE_EXT_TLV("MAX9877 PGAINA Playback Volume",
+ MAX9877_INPUT_MODE, 0, 2, 0,
+ max9877_get_reg, max9877_set_reg, max9877_pgain_tlv),
+ SOC_SINGLE_EXT_TLV("MAX9877 PGAINB Playback Volume",
+ MAX9877_INPUT_MODE, 2, 2, 0,
+ max9877_get_reg, max9877_set_reg, max9877_pgain_tlv),
+ SOC_SINGLE_EXT_TLV("MAX9877 Amp Speaker Playback Volume",
+ MAX9877_SPK_VOLUME, 0, 31, 0,
+ max9877_get_reg, max9877_set_reg, max9877_output_tlv),
+ SOC_DOUBLE_R_EXT_TLV("MAX9877 Amp HP Playback Volume",
+ MAX9877_HPL_VOLUME, MAX9877_HPR_VOLUME, 0, 31, 0,
+ max9877_get_2reg, max9877_set_2reg, max9877_output_tlv),
+ SOC_SINGLE_EXT("MAX9877 INB Stereo Switch",
+ MAX9877_INPUT_MODE, 4, 1, 1,
+ max9877_get_reg, max9877_set_reg),
+ SOC_SINGLE_EXT("MAX9877 INA Stereo Switch",
+ MAX9877_INPUT_MODE, 5, 1, 1,
+ max9877_get_reg, max9877_set_reg),
+ SOC_SINGLE_EXT("MAX9877 Zero-crossing detection Switch",
+ MAX9877_INPUT_MODE, 6, 1, 0,
+ max9877_get_reg, max9877_set_reg),
+ SOC_SINGLE_EXT("MAX9877 Bypass Mode Switch",
+ MAX9877_OUTPUT_MODE, 6, 1, 0,
+ max9877_get_reg, max9877_set_reg),
+ SOC_SINGLE_EXT("MAX9877 Shutdown Mode Switch",
+ MAX9877_OUTPUT_MODE, 7, 1, 1,
+ max9877_get_reg, max9877_set_reg),
+ SOC_ENUM_EXT("MAX9877 Output Mode", max9877_enum[0],
+ max9877_get_out_mode, max9877_set_out_mode),
+ SOC_ENUM_EXT("MAX9877 Oscillator Mode", max9877_enum[1],
+ max9877_get_osc_mode, max9877_set_osc_mode),
+};
+
+/* This function is called from ASoC machine driver */
+int max9877_add_controls(struct snd_soc_codec *codec)
+{
+ return snd_soc_add_controls(codec, max9877_controls,
+ ARRAY_SIZE(max9877_controls));
+}
+EXPORT_SYMBOL_GPL(max9877_add_controls);
+
+static int __devinit max9877_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ i2c = client;
+
+ max9877_write_regs();
+
+ return 0;
+}
+
+static __devexit int max9877_i2c_remove(struct i2c_client *client)
+{
+ i2c = NULL;
+
+ return 0;
+}
+
+static const struct i2c_device_id max9877_i2c_id[] = {
+ { "max9877", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max9877_i2c_id);
+
+static struct i2c_driver max9877_i2c_driver = {
+ .driver = {
+ .name = "max9877",
+ .owner = THIS_MODULE,
+ },
+ .probe = max9877_i2c_probe,
+ .remove = __devexit_p(max9877_i2c_remove),
+ .id_table = max9877_i2c_id,
+};
+
+static int __init max9877_init(void)
+{
+ return i2c_add_driver(&max9877_i2c_driver);
+}
+module_init(max9877_init);
+
+static void __exit max9877_exit(void)
+{
+ i2c_del_driver(&max9877_i2c_driver);
+}
+module_exit(max9877_exit);
+
+MODULE_DESCRIPTION("ASoC MAX9877 amp driver");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max9877.h b/sound/soc/codecs/max9877.h
new file mode 100644
index 000000000000..6da72290ac58
--- /dev/null
+++ b/sound/soc/codecs/max9877.h
@@ -0,0 +1,37 @@
+/*
+ * max9877.h -- amp driver for max9877
+ *
+ * Copyright (C) 2009 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef _MAX9877_H
+#define _MAX9877_H
+
+#define MAX9877_INPUT_MODE 0x00
+#define MAX9877_SPK_VOLUME 0x01
+#define MAX9877_HPL_VOLUME 0x02
+#define MAX9877_HPR_VOLUME 0x03
+#define MAX9877_OUTPUT_MODE 0x04
+
+/* MAX9877_INPUT_MODE */
+#define MAX9877_INB (1 << 4)
+#define MAX9877_INA (1 << 5)
+#define MAX9877_ZCD (1 << 6)
+
+/* MAX9877_OUTPUT_MODE */
+#define MAX9877_OUTMODE_MASK (15 << 0)
+#define MAX9877_OSC_MASK (3 << 4)
+#define MAX9877_OSC_OFFSET 4
+#define MAX9877_BYPASS (1 << 6)
+#define MAX9877_SHDN (1 << 7)
+
+extern int max9877_add_controls(struct snd_soc_codec *codec);
+
+#endif
diff --git a/sound/soc/codecs/spdif_transciever.c b/sound/soc/codecs/spdif_transciever.c
index 218b33adad90..a63191141052 100644
--- a/sound/soc/codecs/spdif_transciever.c
+++ b/sound/soc/codecs/spdif_transciever.c
@@ -21,6 +21,8 @@
#include "spdif_transciever.h"
+MODULE_LICENSE("GPL");
+
#define STUB_RATES SNDRV_PCM_RATE_8000_96000
#define STUB_FORMATS SNDRV_PCM_FMTBIT_S16_LE
@@ -34,6 +36,7 @@ struct snd_soc_dai dit_stub_dai = {
.formats = STUB_FORMATS,
},
};
+EXPORT_SYMBOL_GPL(dit_stub_dai);
static int spdif_dit_probe(struct platform_device *pdev)
{
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 8ad4b7b3e3ba..befc6488c39a 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -149,7 +149,7 @@ static int stac9766_ac97_write(struct snd_soc_codec *codec, unsigned int reg,
stac9766_ac97_write(codec, AC97_INT_PAGING, 1);
return 0;
}
- if (reg / 2 > ARRAY_SIZE(stac9766_reg))
+ if (reg / 2 >= ARRAY_SIZE(stac9766_reg))
return -EIO;
soc_ac97_ops.write(codec->ac97, reg, val);
@@ -168,7 +168,7 @@ static unsigned int stac9766_ac97_read(struct snd_soc_codec *codec,
stac9766_ac97_write(codec, AC97_INT_PAGING, 1);
return val;
}
- if (reg / 2 > ARRAY_SIZE(stac9766_reg))
+ if (reg / 2 >= ARRAY_SIZE(stac9766_reg))
return -EIO;
if (reg == AC97_RESET || reg == AC97_GPIO_STATUS ||
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index cb0d1bf34b57..3395cf945d56 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -53,6 +53,7 @@
/* codec private data */
struct aic3x_priv {
+ struct snd_soc_codec codec;
unsigned int sysclk;
int master;
};
@@ -145,8 +146,8 @@ static int aic3x_read(struct snd_soc_codec *codec, unsigned int reg,
u8 *value)
{
*value = reg & 0xff;
- if (codec->hw_read(codec->control_data, value, 1) != 1)
- return -EIO;
+
+ value[0] = i2c_smbus_read_byte_data(codec->control_data, value[0]);
aic3x_write_reg_cache(codec, reg, *value);
return 0;
@@ -1156,11 +1157,13 @@ static int aic3x_resume(struct platform_device *pdev)
* initialise the AIC3X driver
* register the mixer and dsp interfaces with the kernel
*/
-static int aic3x_init(struct snd_soc_device *socdev)
+static int aic3x_init(struct snd_soc_codec *codec)
{
- struct snd_soc_codec *codec = socdev->card->codec;
- struct aic3x_setup_data *setup = socdev->codec_data;
- int reg, ret = 0;
+ int reg;
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
codec->name = "tlv320aic3x";
codec->owner = THIS_MODULE;
@@ -1177,13 +1180,6 @@ static int aic3x_init(struct snd_soc_device *socdev)
aic3x_write(codec, AIC3X_PAGE_SELECT, PAGE0_SELECT);
aic3x_write(codec, AIC3X_RESET, SOFT_RESET);
- /* register pcms */
- ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
- if (ret < 0) {
- printk(KERN_ERR "aic3x: failed to create pcms\n");
- goto pcm_err;
- }
-
/* DAC default volume and mute */
aic3x_write(codec, LDAC_VOL, DEFAULT_VOL | MUTE_ON);
aic3x_write(codec, RDAC_VOL, DEFAULT_VOL | MUTE_ON);
@@ -1250,30 +1246,51 @@ static int aic3x_init(struct snd_soc_device *socdev)
/* off, with power on */
aic3x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- /* setup GPIO functions */
- aic3x_write(codec, AIC3X_GPIO1_REG, (setup->gpio_func[0] & 0xf) << 4);
- aic3x_write(codec, AIC3X_GPIO2_REG, (setup->gpio_func[1] & 0xf) << 4);
+ return 0;
+}
- snd_soc_add_controls(codec, aic3x_snd_controls,
- ARRAY_SIZE(aic3x_snd_controls));
- aic3x_add_widgets(codec);
- ret = snd_soc_init_card(socdev);
+static struct snd_soc_codec *aic3x_codec;
+
+static int aic3x_register(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ ret = aic3x_init(codec);
if (ret < 0) {
- printk(KERN_ERR "aic3x: failed to register card\n");
- goto card_err;
+ dev_err(codec->dev, "Failed to initialise device\n");
+ return ret;
}
- return ret;
+ aic3x_codec = codec;
-card_err:
- snd_soc_free_pcms(socdev);
- snd_soc_dapm_free(socdev);
-pcm_err:
- kfree(codec->reg_cache);
- return ret;
+ ret = snd_soc_register_codec(codec);
+ if (ret) {
+ dev_err(codec->dev, "Failed to register codec\n");
+ return ret;
+ }
+
+ ret = snd_soc_register_dai(&aic3x_dai);
+ if (ret) {
+ dev_err(codec->dev, "Failed to register dai\n");
+ snd_soc_unregister_codec(codec);
+ return ret;
+ }
+
+ return 0;
}
-static struct snd_soc_device *aic3x_socdev;
+static int aic3x_unregister(struct aic3x_priv *aic3x)
+{
+ aic3x_set_bias_level(&aic3x->codec, SND_SOC_BIAS_OFF);
+
+ snd_soc_unregister_dai(&aic3x_dai);
+ snd_soc_unregister_codec(&aic3x->codec);
+
+ kfree(aic3x);
+ aic3x_codec = NULL;
+
+ return 0;
+}
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
/*
@@ -1288,28 +1305,36 @@ static struct snd_soc_device *aic3x_socdev;
static int aic3x_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- struct snd_soc_device *socdev = aic3x_socdev;
- struct snd_soc_codec *codec = socdev->card->codec;
- int ret;
+ struct snd_soc_codec *codec;
+ struct aic3x_priv *aic3x;
- i2c_set_clientdata(i2c, codec);
+ aic3x = kzalloc(sizeof(struct aic3x_priv), GFP_KERNEL);
+ if (aic3x == NULL) {
+ dev_err(&i2c->dev, "failed to create private data\n");
+ return -ENOMEM;
+ }
+
+ codec = &aic3x->codec;
+ codec->dev = &i2c->dev;
+ codec->private_data = aic3x;
codec->control_data = i2c;
+ codec->hw_write = (hw_write_t) i2c_master_send;
- ret = aic3x_init(socdev);
- if (ret < 0)
- printk(KERN_ERR "aic3x: failed to initialise AIC3X\n");
- return ret;
+ i2c_set_clientdata(i2c, aic3x);
+
+ return aic3x_register(codec);
}
static int aic3x_i2c_remove(struct i2c_client *client)
{
- struct snd_soc_codec *codec = i2c_get_clientdata(client);
- kfree(codec->reg_cache);
- return 0;
+ struct aic3x_priv *aic3x = i2c_get_clientdata(client);
+
+ return aic3x_unregister(aic3x);
}
static const struct i2c_device_id aic3x_i2c_id[] = {
{ "tlv320aic3x", 0 },
+ { "tlv320aic33", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, aic3x_i2c_id);
@@ -1320,56 +1345,28 @@ static struct i2c_driver aic3x_i2c_driver = {
.name = "aic3x I2C Codec",
.owner = THIS_MODULE,
},
- .probe = aic3x_i2c_probe,
+ .probe = aic3x_i2c_probe,
.remove = aic3x_i2c_remove,
.id_table = aic3x_i2c_id,
};
-static int aic3x_i2c_read(struct i2c_client *client, u8 *value, int len)
+static inline void aic3x_i2c_init(void)
{
- value[0] = i2c_smbus_read_byte_data(client, value[0]);
- return (len == 1);
-}
-
-static int aic3x_add_i2c_device(struct platform_device *pdev,
- const struct aic3x_setup_data *setup)
-{
- struct i2c_board_info info;
- struct i2c_adapter *adapter;
- struct i2c_client *client;
int ret;
ret = i2c_add_driver(&aic3x_i2c_driver);
- if (ret != 0) {
- dev_err(&pdev->dev, "can't add i2c driver\n");
- return ret;
- }
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = setup->i2c_address;
- strlcpy(info.type, "tlv320aic3x", I2C_NAME_SIZE);
-
- adapter = i2c_get_adapter(setup->i2c_bus);
- if (!adapter) {
- dev_err(&pdev->dev, "can't get i2c adapter %d\n",
- setup->i2c_bus);
- goto err_driver;
- }
-
- client = i2c_new_device(adapter, &info);
- i2c_put_adapter(adapter);
- if (!client) {
- dev_err(&pdev->dev, "can't add i2c device at 0x%x\n",
- (unsigned int)info.addr);
- goto err_driver;
- }
-
- return 0;
+ if (ret)
+ printk(KERN_ERR "%s: error regsitering i2c driver, %d\n",
+ __func__, ret);
+}
-err_driver:
+static inline void aic3x_i2c_exit(void)
+{
i2c_del_driver(&aic3x_i2c_driver);
- return -ENODEV;
}
+#else
+static inline void aic3x_i2c_init(void) { }
+static inline void aic3x_i2c_exit(void) { }
#endif
static int aic3x_probe(struct platform_device *pdev)
@@ -1377,43 +1374,51 @@ static int aic3x_probe(struct platform_device *pdev)
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct aic3x_setup_data *setup;
struct snd_soc_codec *codec;
- struct aic3x_priv *aic3x;
int ret = 0;
- printk(KERN_INFO "AIC3X Audio Codec %s\n", AIC3X_VERSION);
+ codec = aic3x_codec;
+ if (!codec) {
+ dev_err(&pdev->dev, "Codec not registered\n");
+ return -ENODEV;
+ }
+ socdev->card->codec = codec;
setup = socdev->codec_data;
- codec = kzalloc(sizeof(struct snd_soc_codec), GFP_KERNEL);
- if (codec == NULL)
- return -ENOMEM;
- aic3x = kzalloc(sizeof(struct aic3x_priv), GFP_KERNEL);
- if (aic3x == NULL) {
- kfree(codec);
- return -ENOMEM;
+ if (setup) {
+ /* setup GPIO functions */
+ aic3x_write(codec, AIC3X_GPIO1_REG,
+ (setup->gpio_func[0] & 0xf) << 4);
+ aic3x_write(codec, AIC3X_GPIO2_REG,
+ (setup->gpio_func[1] & 0xf) << 4);
}
- codec->private_data = aic3x;
- socdev->card->codec = codec;
- mutex_init(&codec->mutex);
- INIT_LIST_HEAD(&codec->dapm_widgets);
- INIT_LIST_HEAD(&codec->dapm_paths);
-
- aic3x_socdev = socdev;
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
- if (setup->i2c_address) {
- codec->hw_write = (hw_write_t) i2c_master_send;
- codec->hw_read = (hw_read_t) aic3x_i2c_read;
- ret = aic3x_add_i2c_device(pdev, setup);
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ printk(KERN_ERR "aic3x: failed to create pcms\n");
+ goto pcm_err;
}
-#else
- /* Add other interfaces here */
-#endif
- if (ret != 0) {
- kfree(codec->private_data);
- kfree(codec);
+ snd_soc_add_controls(codec, aic3x_snd_controls,
+ ARRAY_SIZE(aic3x_snd_controls));
+
+ aic3x_add_widgets(codec);
+
+ ret = snd_soc_init_card(socdev);
+ if (ret < 0) {
+ printk(KERN_ERR "aic3x: failed to register card\n");
+ goto card_err;
}
+
+ return ret;
+
+card_err:
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+pcm_err:
+ kfree(codec->reg_cache);
return ret;
}
@@ -1428,12 +1433,8 @@ static int aic3x_remove(struct platform_device *pdev)
snd_soc_free_pcms(socdev);
snd_soc_dapm_free(socdev);
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
- i2c_unregister_device(codec->control_data);
- i2c_del_driver(&aic3x_i2c_driver);
-#endif
- kfree(codec->private_data);
- kfree(codec);
+
+ kfree(codec->reg_cache);
return 0;
}
@@ -1448,13 +1449,15 @@ EXPORT_SYMBOL_GPL(soc_codec_dev_aic3x);
static int __init aic3x_modinit(void)
{
- return snd_soc_register_dai(&aic3x_dai);
+ aic3x_i2c_init();
+
+ return 0;
}
module_init(aic3x_modinit);
static void __exit aic3x_exit(void)
{
- snd_soc_unregister_dai(&aic3x_dai);
+ aic3x_i2c_exit();
}
module_exit(aic3x_exit);
diff --git a/sound/soc/codecs/tlv320aic3x.h b/sound/soc/codecs/tlv320aic3x.h
index ac827e578c4d..9af1c886213c 100644
--- a/sound/soc/codecs/tlv320aic3x.h
+++ b/sound/soc/codecs/tlv320aic3x.h
@@ -282,8 +282,6 @@ int aic3x_headset_detected(struct snd_soc_codec *codec);
int aic3x_button_pressed(struct snd_soc_codec *codec);
struct aic3x_setup_data {
- int i2c_bus;
- unsigned short i2c_address;
unsigned int gpio_func[2];
};
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index 4dbb853eef5a..4df7c6c61c76 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -225,55 +225,11 @@ static void twl4030_codec_mute(struct snd_soc_codec *codec, int mute)
return;
if (mute) {
- /* Bypass the reg_cache and mute the volumes
- * Headset mute is done in it's own event handler
- * Things to mute: Earpiece, PreDrivL/R, CarkitL/R
- */
- reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_EAR_CTL);
- twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
- reg_val & (~TWL4030_EAR_GAIN),
- TWL4030_REG_EAR_CTL);
-
- reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_PREDL_CTL);
- twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
- reg_val & (~TWL4030_PREDL_GAIN),
- TWL4030_REG_PREDL_CTL);
- reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_PREDR_CTL);
- twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
- reg_val & (~TWL4030_PREDR_GAIN),
- TWL4030_REG_PREDL_CTL);
-
- reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_PRECKL_CTL);
- twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
- reg_val & (~TWL4030_PRECKL_GAIN),
- TWL4030_REG_PRECKL_CTL);
- reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_PRECKR_CTL);
- twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
- reg_val & (~TWL4030_PRECKR_GAIN),
- TWL4030_REG_PRECKR_CTL);
-
/* Disable PLL */
reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_APLL_CTL);
reg_val &= ~TWL4030_APLL_EN;
twl4030_write(codec, TWL4030_REG_APLL_CTL, reg_val);
} else {
- /* Restore the volumes
- * Headset mute is done in it's own event handler
- * Things to restore: Earpiece, PreDrivL/R, CarkitL/R
- */
- twl4030_write(codec, TWL4030_REG_EAR_CTL,
- twl4030_read_reg_cache(codec, TWL4030_REG_EAR_CTL));
-
- twl4030_write(codec, TWL4030_REG_PREDL_CTL,
- twl4030_read_reg_cache(codec, TWL4030_REG_PREDL_CTL));
- twl4030_write(codec, TWL4030_REG_PREDR_CTL,
- twl4030_read_reg_cache(codec, TWL4030_REG_PREDR_CTL));
-
- twl4030_write(codec, TWL4030_REG_PRECKL_CTL,
- twl4030_read_reg_cache(codec, TWL4030_REG_PRECKL_CTL));
- twl4030_write(codec, TWL4030_REG_PRECKR_CTL,
- twl4030_read_reg_cache(codec, TWL4030_REG_PRECKR_CTL));
-
/* Enable PLL */
reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_APLL_CTL);
reg_val |= TWL4030_APLL_EN;
@@ -443,16 +399,20 @@ SOC_DAPM_ENUM("Route", twl4030_vibrapath_enum);
/* Left analog microphone selection */
static const struct snd_kcontrol_new twl4030_dapm_analoglmic_controls[] = {
- SOC_DAPM_SINGLE("Main mic", TWL4030_REG_ANAMICL, 0, 1, 0),
- SOC_DAPM_SINGLE("Headset mic", TWL4030_REG_ANAMICL, 1, 1, 0),
- SOC_DAPM_SINGLE("AUXL", TWL4030_REG_ANAMICL, 2, 1, 0),
- SOC_DAPM_SINGLE("Carkit mic", TWL4030_REG_ANAMICL, 3, 1, 0),
+ SOC_DAPM_SINGLE("Main Mic Capture Switch",
+ TWL4030_REG_ANAMICL, 0, 1, 0),
+ SOC_DAPM_SINGLE("Headset Mic Capture Switch",
+ TWL4030_REG_ANAMICL, 1, 1, 0),
+ SOC_DAPM_SINGLE("AUXL Capture Switch",
+ TWL4030_REG_ANAMICL, 2, 1, 0),
+ SOC_DAPM_SINGLE("Carkit Mic Capture Switch",
+ TWL4030_REG_ANAMICL, 3, 1, 0),
};
/* Right analog microphone selection */
static const struct snd_kcontrol_new twl4030_dapm_analogrmic_controls[] = {
- SOC_DAPM_SINGLE("Sub mic", TWL4030_REG_ANAMICR, 0, 1, 0),
- SOC_DAPM_SINGLE("AUXR", TWL4030_REG_ANAMICR, 2, 1, 0),
+ SOC_DAPM_SINGLE("Sub Mic Capture Switch", TWL4030_REG_ANAMICR, 0, 1, 0),
+ SOC_DAPM_SINGLE("AUXR Capture Switch", TWL4030_REG_ANAMICR, 2, 1, 0),
};
/* TX1 L/R Analog/Digital microphone selection */
@@ -560,6 +520,41 @@ static int micpath_event(struct snd_soc_dapm_widget *w,
return 0;
}
+/*
+ * Output PGA builder:
+ * Handle the muting and unmuting of the given output (turning off the
+ * amplifier associated with the output pin)
+ * On mute bypass the reg_cache and mute the volume
+ * On unmute: restore the register content
+ * Outputs handled in this way: Earpiece, PreDrivL/R, CarkitL/R
+ */
+#define TWL4030_OUTPUT_PGA(pin_name, reg, mask) \
+static int pin_name##pga_event(struct snd_soc_dapm_widget *w, \
+ struct snd_kcontrol *kcontrol, int event) \
+{ \
+ u8 reg_val; \
+ \
+ switch (event) { \
+ case SND_SOC_DAPM_POST_PMU: \
+ twl4030_write(w->codec, reg, \
+ twl4030_read_reg_cache(w->codec, reg)); \
+ break; \
+ case SND_SOC_DAPM_POST_PMD: \
+ reg_val = twl4030_read_reg_cache(w->codec, reg); \
+ twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, \
+ reg_val & (~mask), \
+ reg); \
+ break; \
+ } \
+ return 0; \
+}
+
+TWL4030_OUTPUT_PGA(earpiece, TWL4030_REG_EAR_CTL, TWL4030_EAR_GAIN);
+TWL4030_OUTPUT_PGA(predrivel, TWL4030_REG_PREDL_CTL, TWL4030_PREDL_GAIN);
+TWL4030_OUTPUT_PGA(predriver, TWL4030_REG_PREDR_CTL, TWL4030_PREDR_GAIN);
+TWL4030_OUTPUT_PGA(carkitl, TWL4030_REG_PRECKL_CTL, TWL4030_PRECKL_GAIN);
+TWL4030_OUTPUT_PGA(carkitr, TWL4030_REG_PRECKR_CTL, TWL4030_PRECKR_GAIN);
+
static void handsfree_ramp(struct snd_soc_codec *codec, int reg, int ramp)
{
unsigned char hs_ctl;
@@ -620,6 +615,9 @@ static int handsfreerpga_event(struct snd_soc_dapm_widget *w,
static void headset_ramp(struct snd_soc_codec *codec, int ramp)
{
+ struct snd_soc_device *socdev = codec->socdev;
+ struct twl4030_setup_data *setup = socdev->codec_data;
+
unsigned char hs_gain, hs_pop;
struct twl4030_priv *twl4030 = codec->private_data;
/* Base values for ramp delay calculation: 2^19 - 2^26 */
@@ -629,6 +627,17 @@ static void headset_ramp(struct snd_soc_codec *codec, int ramp)
hs_gain = twl4030_read_reg_cache(codec, TWL4030_REG_HS_GAIN_SET);
hs_pop = twl4030_read_reg_cache(codec, TWL4030_REG_HS_POPN_SET);
+ /* Enable external mute control, this dramatically reduces
+ * the pop-noise */
+ if (setup && setup->hs_extmute) {
+ if (setup->set_hs_extmute) {
+ setup->set_hs_extmute(1);
+ } else {
+ hs_pop |= TWL4030_EXTMUTE;
+ twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
+ }
+ }
+
if (ramp) {
/* Headset ramp-up according to the TRM */
hs_pop |= TWL4030_VMID_EN;
@@ -636,6 +645,9 @@ static void headset_ramp(struct snd_soc_codec *codec, int ramp)
twl4030_write(codec, TWL4030_REG_HS_GAIN_SET, hs_gain);
hs_pop |= TWL4030_RAMP_EN;
twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
+ /* Wait ramp delay time + 1, so the VMID can settle */
+ mdelay((ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] /
+ twl4030->sysclk) + 1);
} else {
/* Headset ramp-down _not_ according to
* the TRM, but in a way that it is working */
@@ -652,6 +664,16 @@ static void headset_ramp(struct snd_soc_codec *codec, int ramp)
hs_pop &= ~TWL4030_VMID_EN;
twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
}
+
+ /* Disable external mute */
+ if (setup && setup->hs_extmute) {
+ if (setup->set_hs_extmute) {
+ setup->set_hs_extmute(0);
+ } else {
+ hs_pop &= ~TWL4030_EXTMUTE;
+ twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
+ }
+ }
}
static int headsetlpga_event(struct snd_soc_dapm_widget *w,
@@ -712,7 +734,19 @@ static int bypass_event(struct snd_soc_dapm_widget *w,
reg = twl4030_read_reg_cache(w->codec, m->reg);
- if (m->reg <= TWL4030_REG_ARXR2_APGA_CTL) {
+ /*
+ * bypass_state[0:3] - analog HiFi bypass
+ * bypass_state[4] - analog voice bypass
+ * bypass_state[5] - digital voice bypass
+ * bypass_state[6:7] - digital HiFi bypass
+ */
+ if (m->reg == TWL4030_REG_VSTPGA) {
+ /* Voice digital bypass */
+ if (reg)
+ twl4030->bypass_state |= (1 << 5);
+ else
+ twl4030->bypass_state &= ~(1 << 5);
+ } else if (m->reg <= TWL4030_REG_ARXR2_APGA_CTL) {
/* Analog bypass */
if (reg & (1 << m->shift))
twl4030->bypass_state |=
@@ -726,12 +760,6 @@ static int bypass_event(struct snd_soc_dapm_widget *w,
twl4030->bypass_state |= (1 << 4);
else
twl4030->bypass_state &= ~(1 << 4);
- } else if (m->reg == TWL4030_REG_VSTPGA) {
- /* Voice digital bypass */
- if (reg)
- twl4030->bypass_state |= (1 << 5);
- else
- twl4030->bypass_state &= ~(1 << 5);
} else {
/* Digital bypass */
if (reg & (0x7 << m->shift))
@@ -924,7 +952,7 @@ static const struct soc_enum twl4030_op_modes_enum =
ARRAY_SIZE(twl4030_op_modes_texts),
twl4030_op_modes_texts);
-int snd_soc_put_twl4030_opmode_enum_double(struct snd_kcontrol *kcontrol,
+static int snd_soc_put_twl4030_opmode_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
@@ -1005,6 +1033,16 @@ static DECLARE_TLV_DB_SCALE(digital_capture_tlv, 0, 100, 0);
*/
static DECLARE_TLV_DB_SCALE(input_gain_tlv, 0, 600, 0);
+/* AVADC clock priority */
+static const char *twl4030_avadc_clk_priority_texts[] = {
+ "Voice high priority", "HiFi high priority"
+};
+
+static const struct soc_enum twl4030_avadc_clk_priority_enum =
+ SOC_ENUM_SINGLE(TWL4030_REG_AVADC_CTL, 2,
+ ARRAY_SIZE(twl4030_avadc_clk_priority_texts),
+ twl4030_avadc_clk_priority_texts);
+
static const char *twl4030_rampdelay_texts[] = {
"27/20/14 ms", "55/40/27 ms", "109/81/55 ms", "218/161/109 ms",
"437/323/218 ms", "874/645/437 ms", "1748/1291/874 ms",
@@ -1106,6 +1144,8 @@ static const struct snd_kcontrol_new twl4030_snd_controls[] = {
SOC_DOUBLE_TLV("Analog Capture Volume", TWL4030_REG_ANAMIC_GAIN,
0, 3, 5, 0, input_gain_tlv),
+ SOC_ENUM("AVADC Clock Priority", twl4030_avadc_clk_priority_enum),
+
SOC_ENUM("HS ramp delay", twl4030_rampdelay_enum),
SOC_ENUM("Vibra H-bridge mode", twl4030_vibradirmode_enum),
@@ -1208,13 +1248,22 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
SND_SOC_DAPM_MIXER("Earpiece Mixer", SND_SOC_NOPM, 0, 0,
&twl4030_dapm_earpiece_controls[0],
ARRAY_SIZE(twl4030_dapm_earpiece_controls)),
+ SND_SOC_DAPM_PGA_E("Earpiece PGA", SND_SOC_NOPM,
+ 0, 0, NULL, 0, earpiecepga_event,
+ SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
/* PreDrivL/R */
SND_SOC_DAPM_MIXER("PredriveL Mixer", SND_SOC_NOPM, 0, 0,
&twl4030_dapm_predrivel_controls[0],
ARRAY_SIZE(twl4030_dapm_predrivel_controls)),
+ SND_SOC_DAPM_PGA_E("PredriveL PGA", SND_SOC_NOPM,
+ 0, 0, NULL, 0, predrivelpga_event,
+ SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER("PredriveR Mixer", SND_SOC_NOPM, 0, 0,
&twl4030_dapm_predriver_controls[0],
ARRAY_SIZE(twl4030_dapm_predriver_controls)),
+ SND_SOC_DAPM_PGA_E("PredriveR PGA", SND_SOC_NOPM,
+ 0, 0, NULL, 0, predriverpga_event,
+ SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
/* HeadsetL/R */
SND_SOC_DAPM_MIXER("HeadsetL Mixer", SND_SOC_NOPM, 0, 0,
&twl4030_dapm_hsol_controls[0],
@@ -1232,22 +1281,28 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
SND_SOC_DAPM_MIXER("CarkitL Mixer", SND_SOC_NOPM, 0, 0,
&twl4030_dapm_carkitl_controls[0],
ARRAY_SIZE(twl4030_dapm_carkitl_controls)),
+ SND_SOC_DAPM_PGA_E("CarkitL PGA", SND_SOC_NOPM,
+ 0, 0, NULL, 0, carkitlpga_event,
+ SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER("CarkitR Mixer", SND_SOC_NOPM, 0, 0,
&twl4030_dapm_carkitr_controls[0],
ARRAY_SIZE(twl4030_dapm_carkitr_controls)),
+ SND_SOC_DAPM_PGA_E("CarkitR PGA", SND_SOC_NOPM,
+ 0, 0, NULL, 0, carkitrpga_event,
+ SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
/* Output MUX controls */
/* HandsfreeL/R */
SND_SOC_DAPM_MUX("HandsfreeL Mux", SND_SOC_NOPM, 0, 0,
&twl4030_dapm_handsfreel_control),
- SND_SOC_DAPM_SWITCH("HandsfreeL Switch", SND_SOC_NOPM, 0, 0,
+ SND_SOC_DAPM_SWITCH("HandsfreeL", SND_SOC_NOPM, 0, 0,
&twl4030_dapm_handsfreelmute_control),
SND_SOC_DAPM_PGA_E("HandsfreeL PGA", SND_SOC_NOPM,
0, 0, NULL, 0, handsfreelpga_event,
SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MUX("HandsfreeR Mux", SND_SOC_NOPM, 5, 0,
&twl4030_dapm_handsfreer_control),
- SND_SOC_DAPM_SWITCH("HandsfreeR Switch", SND_SOC_NOPM, 0, 0,
+ SND_SOC_DAPM_SWITCH("HandsfreeR", SND_SOC_NOPM, 0, 0,
&twl4030_dapm_handsfreermute_control),
SND_SOC_DAPM_PGA_E("HandsfreeR PGA", SND_SOC_NOPM,
0, 0, NULL, 0, handsfreerpga_event,
@@ -1282,11 +1337,11 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
SND_SOC_DAPM_POST_REG),
/* Analog input mixers for the capture amplifiers */
- SND_SOC_DAPM_MIXER("Analog Left Capture Route",
+ SND_SOC_DAPM_MIXER("Analog Left",
TWL4030_REG_ANAMICL, 4, 0,
&twl4030_dapm_analoglmic_controls[0],
ARRAY_SIZE(twl4030_dapm_analoglmic_controls)),
- SND_SOC_DAPM_MIXER("Analog Right Capture Route",
+ SND_SOC_DAPM_MIXER("Analog Right",
TWL4030_REG_ANAMICR, 4, 0,
&twl4030_dapm_analogrmic_controls[0],
ARRAY_SIZE(twl4030_dapm_analogrmic_controls)),
@@ -1326,16 +1381,19 @@ static const struct snd_soc_dapm_route intercon[] = {
{"Earpiece Mixer", "AudioL1", "Analog L1 Playback Mixer"},
{"Earpiece Mixer", "AudioL2", "Analog L2 Playback Mixer"},
{"Earpiece Mixer", "AudioR1", "Analog R1 Playback Mixer"},
+ {"Earpiece PGA", NULL, "Earpiece Mixer"},
/* PreDrivL */
{"PredriveL Mixer", "Voice", "Analog Voice Playback Mixer"},
{"PredriveL Mixer", "AudioL1", "Analog L1 Playback Mixer"},
{"PredriveL Mixer", "AudioL2", "Analog L2 Playback Mixer"},
{"PredriveL Mixer", "AudioR2", "Analog R2 Playback Mixer"},
+ {"PredriveL PGA", NULL, "PredriveL Mixer"},
/* PreDrivR */
{"PredriveR Mixer", "Voice", "Analog Voice Playback Mixer"},
{"PredriveR Mixer", "AudioR1", "Analog R1 Playback Mixer"},
{"PredriveR Mixer", "AudioR2", "Analog R2 Playback Mixer"},
{"PredriveR Mixer", "AudioL2", "Analog L2 Playback Mixer"},
+ {"PredriveR PGA", NULL, "PredriveR Mixer"},
/* HeadsetL */
{"HeadsetL Mixer", "Voice", "Analog Voice Playback Mixer"},
{"HeadsetL Mixer", "AudioL1", "Analog L1 Playback Mixer"},
@@ -1350,24 +1408,26 @@ static const struct snd_soc_dapm_route intercon[] = {
{"CarkitL Mixer", "Voice", "Analog Voice Playback Mixer"},
{"CarkitL Mixer", "AudioL1", "Analog L1 Playback Mixer"},
{"CarkitL Mixer", "AudioL2", "Analog L2 Playback Mixer"},
+ {"CarkitL PGA", NULL, "CarkitL Mixer"},
/* CarkitR */
{"CarkitR Mixer", "Voice", "Analog Voice Playback Mixer"},
{"CarkitR Mixer", "AudioR1", "Analog R1 Playback Mixer"},
{"CarkitR Mixer", "AudioR2", "Analog R2 Playback Mixer"},
+ {"CarkitR PGA", NULL, "CarkitR Mixer"},
/* HandsfreeL */
{"HandsfreeL Mux", "Voice", "Analog Voice Playback Mixer"},
{"HandsfreeL Mux", "AudioL1", "Analog L1 Playback Mixer"},
{"HandsfreeL Mux", "AudioL2", "Analog L2 Playback Mixer"},
{"HandsfreeL Mux", "AudioR2", "Analog R2 Playback Mixer"},
- {"HandsfreeL Switch", "Switch", "HandsfreeL Mux"},
- {"HandsfreeL PGA", NULL, "HandsfreeL Switch"},
+ {"HandsfreeL", "Switch", "HandsfreeL Mux"},
+ {"HandsfreeL PGA", NULL, "HandsfreeL"},
/* HandsfreeR */
{"HandsfreeR Mux", "Voice", "Analog Voice Playback Mixer"},
{"HandsfreeR Mux", "AudioR1", "Analog R1 Playback Mixer"},
{"HandsfreeR Mux", "AudioR2", "Analog R2 Playback Mixer"},
{"HandsfreeR Mux", "AudioL2", "Analog L2 Playback Mixer"},
- {"HandsfreeR Switch", "Switch", "HandsfreeR Mux"},
- {"HandsfreeR PGA", NULL, "HandsfreeR Switch"},
+ {"HandsfreeR", "Switch", "HandsfreeR Mux"},
+ {"HandsfreeR PGA", NULL, "HandsfreeR"},
/* Vibra */
{"Vibra Mux", "AudioL1", "DAC Left1"},
{"Vibra Mux", "AudioR1", "DAC Right1"},
@@ -1377,29 +1437,29 @@ static const struct snd_soc_dapm_route intercon[] = {
/* outputs */
{"OUTL", NULL, "Analog L2 Playback Mixer"},
{"OUTR", NULL, "Analog R2 Playback Mixer"},
- {"EARPIECE", NULL, "Earpiece Mixer"},
- {"PREDRIVEL", NULL, "PredriveL Mixer"},
- {"PREDRIVER", NULL, "PredriveR Mixer"},
+ {"EARPIECE", NULL, "Earpiece PGA"},
+ {"PREDRIVEL", NULL, "PredriveL PGA"},
+ {"PREDRIVER", NULL, "PredriveR PGA"},
{"HSOL", NULL, "HeadsetL PGA"},
{"HSOR", NULL, "HeadsetR PGA"},
- {"CARKITL", NULL, "CarkitL Mixer"},
- {"CARKITR", NULL, "CarkitR Mixer"},
+ {"CARKITL", NULL, "CarkitL PGA"},
+ {"CARKITR", NULL, "CarkitR PGA"},
{"HFL", NULL, "HandsfreeL PGA"},
{"HFR", NULL, "HandsfreeR PGA"},
{"Vibra Route", "Audio", "Vibra Mux"},
{"VIBRA", NULL, "Vibra Route"},
/* Capture path */
- {"Analog Left Capture Route", "Main mic", "MAINMIC"},
- {"Analog Left Capture Route", "Headset mic", "HSMIC"},
- {"Analog Left Capture Route", "AUXL", "AUXL"},
- {"Analog Left Capture Route", "Carkit mic", "CARKITMIC"},
+ {"Analog Left", "Main Mic Capture Switch", "MAINMIC"},
+ {"Analog Left", "Headset Mic Capture Switch", "HSMIC"},
+ {"Analog Left", "AUXL Capture Switch", "AUXL"},
+ {"Analog Left", "Carkit Mic Capture Switch", "CARKITMIC"},
- {"Analog Right Capture Route", "Sub mic", "SUBMIC"},
- {"Analog Right Capture Route", "AUXR", "AUXR"},
+ {"Analog Right", "Sub Mic Capture Switch", "SUBMIC"},
+ {"Analog Right", "AUXR Capture Switch", "AUXR"},
- {"ADC Physical Left", NULL, "Analog Left Capture Route"},
- {"ADC Physical Right", NULL, "Analog Right Capture Route"},
+ {"ADC Physical Left", NULL, "Analog Left"},
+ {"ADC Physical Right", NULL, "Analog Right"},
{"Digimic0 Enable", NULL, "DIGIMIC0"},
{"Digimic1 Enable", NULL, "DIGIMIC1"},
@@ -1423,11 +1483,11 @@ static const struct snd_soc_dapm_route intercon[] = {
{"ADC Virtual Right2", NULL, "TX2 Capture Route"},
/* Analog bypass routes */
- {"Right1 Analog Loopback", "Switch", "Analog Right Capture Route"},
- {"Left1 Analog Loopback", "Switch", "Analog Left Capture Route"},
- {"Right2 Analog Loopback", "Switch", "Analog Right Capture Route"},
- {"Left2 Analog Loopback", "Switch", "Analog Left Capture Route"},
- {"Voice Analog Loopback", "Switch", "Analog Left Capture Route"},
+ {"Right1 Analog Loopback", "Switch", "Analog Right"},
+ {"Left1 Analog Loopback", "Switch", "Analog Left"},
+ {"Right2 Analog Loopback", "Switch", "Analog Right"},
+ {"Left2 Analog Loopback", "Switch", "Analog Left"},
+ {"Voice Analog Loopback", "Switch", "Analog Left"},
{"Analog R1 Playback Mixer", NULL, "Right1 Analog Loopback"},
{"Analog L1 Playback Mixer", NULL, "Left1 Analog Loopback"},
@@ -1609,8 +1669,6 @@ static int twl4030_hw_params(struct snd_pcm_substream *substream,
/* If the substream has 4 channel, do the necessary setup */
if (params_channels(params) == 4) {
- u8 format, mode;
-
format = twl4030_read_reg_cache(codec, TWL4030_REG_AUDIO_IF);
mode = twl4030_read_reg_cache(codec, TWL4030_REG_CODEC_MODE);
@@ -1806,6 +1864,19 @@ static int twl4030_set_dai_fmt(struct snd_soc_dai *codec_dai,
return 0;
}
+static int twl4030_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u8 reg = twl4030_read_reg_cache(codec, TWL4030_REG_AUDIO_IF);
+
+ if (tristate)
+ reg |= TWL4030_AIF_TRI_EN;
+ else
+ reg &= ~TWL4030_AIF_TRI_EN;
+
+ return twl4030_write(codec, TWL4030_REG_AUDIO_IF, reg);
+}
+
/* In case of voice mode, the RX1 L(VRX) for downlink and the TX2 L/R
* (VTXL, VTXR) for uplink has to be enabled/disabled. */
static void twl4030_voice_enable(struct snd_soc_codec *codec, int direction,
@@ -1948,7 +2019,7 @@ static int twl4030_voice_set_dai_fmt(struct snd_soc_dai *codec_dai,
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFM:
+ case SND_SOC_DAIFMT_CBM_CFM:
format &= ~(TWL4030_VIF_SLAVE_EN);
break;
case SND_SOC_DAIFMT_CBS_CFS:
@@ -1980,6 +2051,19 @@ static int twl4030_voice_set_dai_fmt(struct snd_soc_dai *codec_dai,
return 0;
}
+static int twl4030_voice_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u8 reg = twl4030_read_reg_cache(codec, TWL4030_REG_VOICE_IF);
+
+ if (tristate)
+ reg |= TWL4030_VIF_TRI_EN;
+ else
+ reg &= ~TWL4030_VIF_TRI_EN;
+
+ return twl4030_write(codec, TWL4030_REG_VOICE_IF, reg);
+}
+
#define TWL4030_RATES (SNDRV_PCM_RATE_8000_48000)
#define TWL4030_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_S24_LE)
@@ -1989,6 +2073,7 @@ static struct snd_soc_dai_ops twl4030_dai_ops = {
.hw_params = twl4030_hw_params,
.set_sysclk = twl4030_set_dai_sysclk,
.set_fmt = twl4030_set_dai_fmt,
+ .set_tristate = twl4030_set_tristate,
};
static struct snd_soc_dai_ops twl4030_dai_voice_ops = {
@@ -1997,6 +2082,7 @@ static struct snd_soc_dai_ops twl4030_dai_voice_ops = {
.hw_params = twl4030_voice_hw_params,
.set_sysclk = twl4030_voice_set_dai_sysclk,
.set_fmt = twl4030_voice_set_dai_fmt,
+ .set_tristate = twl4030_voice_set_tristate,
};
struct snd_soc_dai twl4030_dai[] = {
diff --git a/sound/soc/codecs/twl4030.h b/sound/soc/codecs/twl4030.h
index fe5f395d9e4f..2b4bfa23f985 100644
--- a/sound/soc/codecs/twl4030.h
+++ b/sound/soc/codecs/twl4030.h
@@ -274,6 +274,8 @@ extern struct snd_soc_codec_device soc_codec_dev_twl4030;
struct twl4030_setup_data {
unsigned int ramp_delay_value;
unsigned int sysclk;
+ unsigned int hs_extmute:1;
+ void (*set_hs_extmute)(int mute);
};
#endif /* End of __TWL4030_AUDIO_H__ */
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 269b108e1de6..c33b92edbded 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -163,7 +163,7 @@ static int uda134x_mute(struct snd_soc_dai *dai, int mute)
else
mute_reg &= ~(1<<2);
- uda134x_write(codec, UDA134X_DATA010, mute_reg & ~(1<<2));
+ uda134x_write(codec, UDA134X_DATA010, mute_reg);
return 0;
}
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 5b21594e0e58..92ec03442154 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -5,9 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * Copyright (c) 2007 Philipp Zabel <philipp.zabel@gmail.com>
- * Improved support for DAPM and audio routing/mixing capabilities,
- * added TLV support.
+ * Copyright (c) 2007-2009 Philipp Zabel <philipp.zabel@gmail.com>
*
* Modified by Richard Purdie <richard@openedhand.com> to fit into SoC
* codec model.
@@ -19,26 +17,32 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/string.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/ioctl.h>
+#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/workqueue.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/initval.h>
-#include <sound/info.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/tlv.h>
+#include <sound/uda1380.h>
#include "uda1380.h"
-static struct work_struct uda1380_work;
static struct snd_soc_codec *uda1380_codec;
+/* codec private data */
+struct uda1380_priv {
+ struct snd_soc_codec codec;
+ u16 reg_cache[UDA1380_CACHEREGNUM];
+ unsigned int dac_clk;
+ struct work_struct work;
+};
+
/*
* uda1380 register cache
*/
@@ -473,6 +477,7 @@ static int uda1380_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
+ struct uda1380_priv *uda1380 = codec->private_data;
int mixer = uda1380_read_reg_cache(codec, UDA1380_MIXER);
switch (cmd) {
@@ -480,13 +485,13 @@ static int uda1380_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
uda1380_write_reg_cache(codec, UDA1380_MIXER,
mixer & ~R14_SILENCE);
- schedule_work(&uda1380_work);
+ schedule_work(&uda1380->work);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
uda1380_write_reg_cache(codec, UDA1380_MIXER,
mixer | R14_SILENCE);
- schedule_work(&uda1380_work);
+ schedule_work(&uda1380->work);
break;
}
return 0;
@@ -670,44 +675,33 @@ static int uda1380_resume(struct platform_device *pdev)
return 0;
}
-/*
- * initialise the UDA1380 driver
- * register mixer and dsp interfaces with the kernel
- */
-static int uda1380_init(struct snd_soc_device *socdev, int dac_clk)
+static int uda1380_probe(struct platform_device *pdev)
{
- struct snd_soc_codec *codec = socdev->card->codec;
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ struct uda1380_platform_data *pdata;
int ret = 0;
- codec->name = "UDA1380";
- codec->owner = THIS_MODULE;
- codec->read = uda1380_read_reg_cache;
- codec->write = uda1380_write;
- codec->set_bias_level = uda1380_set_bias_level;
- codec->dai = uda1380_dai;
- codec->num_dai = ARRAY_SIZE(uda1380_dai);
- codec->reg_cache = kmemdup(uda1380_reg, sizeof(uda1380_reg),
- GFP_KERNEL);
- if (codec->reg_cache == NULL)
- return -ENOMEM;
- codec->reg_cache_size = ARRAY_SIZE(uda1380_reg);
- codec->reg_cache_step = 1;
- uda1380_reset(codec);
+ if (uda1380_codec == NULL) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
- uda1380_codec = codec;
- INIT_WORK(&uda1380_work, uda1380_flush_work);
+ socdev->card->codec = uda1380_codec;
+ codec = uda1380_codec;
+ pdata = codec->dev->platform_data;
/* register pcms */
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
if (ret < 0) {
- pr_err("uda1380: failed to create pcms\n");
+ dev_err(codec->dev, "failed to create pcms: %d\n", ret);
goto pcm_err;
}
/* power on device */
uda1380_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* set clock input */
- switch (dac_clk) {
+ switch (pdata->dac_clk) {
case UDA1380_DAC_CLK_SYSCLK:
uda1380_write(codec, UDA1380_CLK, 0);
break;
@@ -716,13 +710,12 @@ static int uda1380_init(struct snd_soc_device *socdev, int dac_clk)
break;
}
- /* uda1380 init */
snd_soc_add_controls(codec, uda1380_snd_controls,
ARRAY_SIZE(uda1380_snd_controls));
uda1380_add_widgets(codec);
ret = snd_soc_init_card(socdev);
if (ret < 0) {
- pr_err("uda1380: failed to register card\n");
+ dev_err(codec->dev, "failed to register card: %d\n", ret);
goto card_err;
}
@@ -732,165 +725,201 @@ card_err:
snd_soc_free_pcms(socdev);
snd_soc_dapm_free(socdev);
pcm_err:
- kfree(codec->reg_cache);
return ret;
}
-static struct snd_soc_device *uda1380_socdev;
-
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-
-static int uda1380_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+/* power down chip */
+static int uda1380_remove(struct platform_device *pdev)
{
- struct snd_soc_device *socdev = uda1380_socdev;
- struct uda1380_setup_data *setup = socdev->codec_data;
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- int ret;
-
- i2c_set_clientdata(i2c, codec);
- codec->control_data = i2c;
- ret = uda1380_init(socdev, setup->dac_clk);
- if (ret < 0)
- pr_err("uda1380: failed to initialise UDA1380\n");
+ if (codec->control_data)
+ uda1380_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return ret;
-}
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
-static int uda1380_i2c_remove(struct i2c_client *client)
-{
- struct snd_soc_codec *codec = i2c_get_clientdata(client);
- kfree(codec->reg_cache);
return 0;
}
-static const struct i2c_device_id uda1380_i2c_id[] = {
- { "uda1380", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, uda1380_i2c_id);
-
-static struct i2c_driver uda1380_i2c_driver = {
- .driver = {
- .name = "UDA1380 I2C Codec",
- .owner = THIS_MODULE,
- },
- .probe = uda1380_i2c_probe,
- .remove = uda1380_i2c_remove,
- .id_table = uda1380_i2c_id,
+struct snd_soc_codec_device soc_codec_dev_uda1380 = {
+ .probe = uda1380_probe,
+ .remove = uda1380_remove,
+ .suspend = uda1380_suspend,
+ .resume = uda1380_resume,
};
+EXPORT_SYMBOL_GPL(soc_codec_dev_uda1380);
-static int uda1380_add_i2c_device(struct platform_device *pdev,
- const struct uda1380_setup_data *setup)
+static int uda1380_register(struct uda1380_priv *uda1380)
{
- struct i2c_board_info info;
- struct i2c_adapter *adapter;
- struct i2c_client *client;
- int ret;
+ int ret, i;
+ struct snd_soc_codec *codec = &uda1380->codec;
+ struct uda1380_platform_data *pdata = codec->dev->platform_data;
- ret = i2c_add_driver(&uda1380_i2c_driver);
- if (ret != 0) {
- dev_err(&pdev->dev, "can't add i2c driver\n");
- return ret;
+ if (uda1380_codec) {
+ dev_err(codec->dev, "Another UDA1380 is registered\n");
+ return -EINVAL;
+ }
+
+ if (!pdata || !pdata->gpio_power || !pdata->gpio_reset)
+ return -EINVAL;
+
+ ret = gpio_request(pdata->gpio_power, "uda1380 power");
+ if (ret)
+ goto err_out;
+ ret = gpio_request(pdata->gpio_reset, "uda1380 reset");
+ if (ret)
+ goto err_gpio;
+
+ gpio_direction_output(pdata->gpio_power, 1);
+
+ /* we may need to have the clock running here - pH5 */
+ gpio_direction_output(pdata->gpio_reset, 1);
+ udelay(5);
+ gpio_set_value(pdata->gpio_reset, 0);
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->private_data = uda1380;
+ codec->name = "UDA1380";
+ codec->owner = THIS_MODULE;
+ codec->read = uda1380_read_reg_cache;
+ codec->write = uda1380_write;
+ codec->bias_level = SND_SOC_BIAS_OFF;
+ codec->set_bias_level = uda1380_set_bias_level;
+ codec->dai = uda1380_dai;
+ codec->num_dai = ARRAY_SIZE(uda1380_dai);
+ codec->reg_cache_size = ARRAY_SIZE(uda1380_reg);
+ codec->reg_cache = &uda1380->reg_cache;
+ codec->reg_cache_step = 1;
+
+ memcpy(codec->reg_cache, uda1380_reg, sizeof(uda1380_reg));
+
+ ret = uda1380_reset(codec);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to issue reset\n");
+ goto err_reset;
}
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = setup->i2c_address;
- strlcpy(info.type, "uda1380", I2C_NAME_SIZE);
+ INIT_WORK(&uda1380->work, uda1380_flush_work);
+
+ for (i = 0; i < ARRAY_SIZE(uda1380_dai); i++)
+ uda1380_dai[i].dev = codec->dev;
- adapter = i2c_get_adapter(setup->i2c_bus);
- if (!adapter) {
- dev_err(&pdev->dev, "can't get i2c adapter %d\n",
- setup->i2c_bus);
- goto err_driver;
+ uda1380_codec = codec;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ goto err_reset;
}
- client = i2c_new_device(adapter, &info);
- i2c_put_adapter(adapter);
- if (!client) {
- dev_err(&pdev->dev, "can't add i2c device at 0x%x\n",
- (unsigned int)info.addr);
- goto err_driver;
+ ret = snd_soc_register_dais(uda1380_dai, ARRAY_SIZE(uda1380_dai));
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAIs: %d\n", ret);
+ goto err_dai;
}
return 0;
-err_driver:
- i2c_del_driver(&uda1380_i2c_driver);
- return -ENODEV;
+err_dai:
+ snd_soc_unregister_codec(codec);
+err_reset:
+ gpio_set_value(pdata->gpio_power, 0);
+ gpio_free(pdata->gpio_reset);
+err_gpio:
+ gpio_free(pdata->gpio_power);
+err_out:
+ return ret;
}
-#endif
-static int uda1380_probe(struct platform_device *pdev)
+static void uda1380_unregister(struct uda1380_priv *uda1380)
{
- struct snd_soc_device *socdev = platform_get_drvdata(pdev);
- struct uda1380_setup_data *setup;
+ struct snd_soc_codec *codec = &uda1380->codec;
+ struct uda1380_platform_data *pdata = codec->dev->platform_data;
+
+ snd_soc_unregister_dais(uda1380_dai, ARRAY_SIZE(uda1380_dai));
+ snd_soc_unregister_codec(&uda1380->codec);
+
+ gpio_set_value(pdata->gpio_power, 0);
+ gpio_free(pdata->gpio_reset);
+ gpio_free(pdata->gpio_power);
+
+ kfree(uda1380);
+ uda1380_codec = NULL;
+}
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static __devinit int uda1380_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct uda1380_priv *uda1380;
struct snd_soc_codec *codec;
int ret;
- setup = socdev->codec_data;
- codec = kzalloc(sizeof(struct snd_soc_codec), GFP_KERNEL);
- if (codec == NULL)
+ uda1380 = kzalloc(sizeof(struct uda1380_priv), GFP_KERNEL);
+ if (uda1380 == NULL)
return -ENOMEM;
- socdev->card->codec = codec;
- mutex_init(&codec->mutex);
- INIT_LIST_HEAD(&codec->dapm_widgets);
- INIT_LIST_HEAD(&codec->dapm_paths);
+ codec = &uda1380->codec;
+ codec->hw_write = (hw_write_t)i2c_master_send;
- uda1380_socdev = socdev;
- ret = -ENODEV;
+ i2c_set_clientdata(i2c, uda1380);
+ codec->control_data = i2c;
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
- if (setup->i2c_address) {
- codec->hw_write = (hw_write_t)i2c_master_send;
- ret = uda1380_add_i2c_device(pdev, setup);
- }
-#endif
+ codec->dev = &i2c->dev;
+ ret = uda1380_register(uda1380);
if (ret != 0)
- kfree(codec);
+ kfree(uda1380);
+
return ret;
}
-/* power down chip */
-static int uda1380_remove(struct platform_device *pdev)
+static int __devexit uda1380_i2c_remove(struct i2c_client *i2c)
{
- struct snd_soc_device *socdev = platform_get_drvdata(pdev);
- struct snd_soc_codec *codec = socdev->card->codec;
-
- if (codec->control_data)
- uda1380_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- snd_soc_free_pcms(socdev);
- snd_soc_dapm_free(socdev);
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
- i2c_unregister_device(codec->control_data);
- i2c_del_driver(&uda1380_i2c_driver);
-#endif
- kfree(codec);
-
+ struct uda1380_priv *uda1380 = i2c_get_clientdata(i2c);
+ uda1380_unregister(uda1380);
return 0;
}
-struct snd_soc_codec_device soc_codec_dev_uda1380 = {
- .probe = uda1380_probe,
- .remove = uda1380_remove,
- .suspend = uda1380_suspend,
- .resume = uda1380_resume,
+static const struct i2c_device_id uda1380_i2c_id[] = {
+ { "uda1380", 0 },
+ { }
};
-EXPORT_SYMBOL_GPL(soc_codec_dev_uda1380);
+MODULE_DEVICE_TABLE(i2c, uda1380_i2c_id);
+
+static struct i2c_driver uda1380_i2c_driver = {
+ .driver = {
+ .name = "UDA1380 I2C Codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = uda1380_i2c_probe,
+ .remove = __devexit_p(uda1380_i2c_remove),
+ .id_table = uda1380_i2c_id,
+};
+#endif
static int __init uda1380_modinit(void)
{
- return snd_soc_register_dais(uda1380_dai, ARRAY_SIZE(uda1380_dai));
+ int ret;
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ ret = i2c_add_driver(&uda1380_i2c_driver);
+ if (ret != 0)
+ pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
+#endif
+ return 0;
}
module_init(uda1380_modinit);
static void __exit uda1380_exit(void)
{
- snd_soc_unregister_dais(uda1380_dai, ARRAY_SIZE(uda1380_dai));
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_driver(&uda1380_i2c_driver);
+#endif
}
module_exit(uda1380_exit);
diff --git a/sound/soc/codecs/uda1380.h b/sound/soc/codecs/uda1380.h
index c55c17a52a12..9cefa8a54770 100644
--- a/sound/soc/codecs/uda1380.h
+++ b/sound/soc/codecs/uda1380.h
@@ -72,14 +72,6 @@
#define R22_SKIP_DCFIL 0x0002
#define R23_AGC_EN 0x0001
-struct uda1380_setup_data {
- int i2c_bus;
- unsigned short i2c_address;
- int dac_clk;
-#define UDA1380_DAC_CLK_SYSCLK 0
-#define UDA1380_DAC_CLK_WSPLL 1
-};
-
#define UDA1380_DAI_DUPLEX 0 /* playback and capture on single DAI */
#define UDA1380_DAI_PLAYBACK 1 /* playback DAI */
#define UDA1380_DAI_CAPTURE 2 /* capture DAI */
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index e7348d341b76..3ff0373dff89 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -63,6 +63,8 @@ struct wm8350_data {
struct wm8350_jack_data hpl;
struct wm8350_jack_data hpr;
struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)];
+ int fll_freq_out;
+ int fll_freq_in;
};
static unsigned int wm8350_codec_cache_read(struct snd_soc_codec *codec,
@@ -406,7 +408,6 @@ static const char *wm8350_deemp[] = { "None", "32kHz", "44.1kHz", "48kHz" };
static const char *wm8350_pol[] = { "Normal", "Inv R", "Inv L", "Inv L & R" };
static const char *wm8350_dacmutem[] = { "Normal", "Soft" };
static const char *wm8350_dacmutes[] = { "Fast", "Slow" };
-static const char *wm8350_dacfilter[] = { "Normal", "Sloping" };
static const char *wm8350_adcfilter[] = { "None", "High Pass" };
static const char *wm8350_adchp[] = { "44.1kHz", "8kHz", "16kHz", "32kHz" };
static const char *wm8350_lr[] = { "Left", "Right" };
@@ -416,7 +417,6 @@ static const struct soc_enum wm8350_enum[] = {
SOC_ENUM_SINGLE(WM8350_DAC_CONTROL, 0, 4, wm8350_pol),
SOC_ENUM_SINGLE(WM8350_DAC_MUTE_VOLUME, 14, 2, wm8350_dacmutem),
SOC_ENUM_SINGLE(WM8350_DAC_MUTE_VOLUME, 13, 2, wm8350_dacmutes),
- SOC_ENUM_SINGLE(WM8350_DAC_MUTE_VOLUME, 12, 2, wm8350_dacfilter),
SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 15, 2, wm8350_adcfilter),
SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 8, 4, wm8350_adchp),
SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 0, 4, wm8350_pol),
@@ -444,10 +444,9 @@ static const struct snd_kcontrol_new wm8350_snd_controls[] = {
0, 255, 0, dac_pcm_tlv),
SOC_ENUM("Playback PCM Mute Function", wm8350_enum[2]),
SOC_ENUM("Playback PCM Mute Speed", wm8350_enum[3]),
- SOC_ENUM("Playback PCM Filter", wm8350_enum[4]),
- SOC_ENUM("Capture PCM Filter", wm8350_enum[5]),
- SOC_ENUM("Capture PCM HP Filter", wm8350_enum[6]),
- SOC_ENUM("Capture ADC Inversion", wm8350_enum[7]),
+ SOC_ENUM("Capture PCM Filter", wm8350_enum[4]),
+ SOC_ENUM("Capture PCM HP Filter", wm8350_enum[5]),
+ SOC_ENUM("Capture ADC Inversion", wm8350_enum[6]),
SOC_WM8350_DOUBLE_R_TLV("Capture PCM Volume",
WM8350_ADC_DIGITAL_VOLUME_L,
WM8350_ADC_DIGITAL_VOLUME_R,
@@ -613,7 +612,7 @@ SOC_DAPM_SINGLE("Switch", WM8350_BEEP_VOLUME, 15, 1, 1);
/* Out4 Capture Mux */
static const struct snd_kcontrol_new wm8350_out4_capture_controls =
-SOC_DAPM_ENUM("Route", wm8350_enum[8]);
+SOC_DAPM_ENUM("Route", wm8350_enum[7]);
static const struct snd_soc_dapm_widget wm8350_dapm_widgets[] = {
@@ -993,6 +992,7 @@ static int wm8350_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai)
{
struct snd_soc_codec *codec = codec_dai->codec;
+ struct wm8350 *wm8350 = codec->control_data;
u16 iface = wm8350_codec_read(codec, WM8350_AI_FORMATING) &
~WM8350_AIF_WL_MASK;
@@ -1012,6 +1012,19 @@ static int wm8350_pcm_hw_params(struct snd_pcm_substream *substream,
}
wm8350_codec_write(codec, WM8350_AI_FORMATING, iface);
+
+ /* The sloping stopband filter is recommended for use with
+ * lower sample rates to improve performance.
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (params_rate(params) < 24000)
+ wm8350_set_bits(wm8350, WM8350_DAC_MUTE_VOLUME,
+ WM8350_DAC_SB_FILT);
+ else
+ wm8350_clear_bits(wm8350, WM8350_DAC_MUTE_VOLUME,
+ WM8350_DAC_SB_FILT);
+ }
+
return 0;
}
@@ -1093,10 +1106,14 @@ static int wm8350_set_fll(struct snd_soc_dai *codec_dai,
{
struct snd_soc_codec *codec = codec_dai->codec;
struct wm8350 *wm8350 = codec->control_data;
+ struct wm8350_data *priv = codec->private_data;
struct _fll_div fll_div;
int ret = 0;
u16 fll_1, fll_4;
+ if (freq_in == priv->fll_freq_in && freq_out == priv->fll_freq_out)
+ return 0;
+
/* power down FLL - we need to do this for reconfiguration */
wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_4,
WM8350_FLL_ENA | WM8350_FLL_OSC_ENA);
@@ -1131,6 +1148,9 @@ static int wm8350_set_fll(struct snd_soc_dai *codec_dai,
wm8350_set_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_FLL_OSC_ENA);
wm8350_set_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_FLL_ENA);
+ priv->fll_freq_out = freq_out;
+ priv->fll_freq_in = freq_in;
+
return 0;
}
@@ -1660,6 +1680,21 @@ static int __devexit wm8350_codec_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8350_codec_suspend(struct platform_device *pdev, pm_message_t m)
+{
+ return snd_soc_suspend_device(&pdev->dev);
+}
+
+static int wm8350_codec_resume(struct platform_device *pdev)
+{
+ return snd_soc_resume_device(&pdev->dev);
+}
+#else
+#define wm8350_codec_suspend NULL
+#define wm8350_codec_resume NULL
+#endif
+
static struct platform_driver wm8350_codec_driver = {
.driver = {
.name = "wm8350-codec",
@@ -1667,6 +1702,8 @@ static struct platform_driver wm8350_codec_driver = {
},
.probe = wm8350_codec_probe,
.remove = __devexit_p(wm8350_codec_remove),
+ .suspend = wm8350_codec_suspend,
+ .resume = wm8350_codec_resume,
};
static __init int wm8350_init(void)
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 502eefac1ecd..b9ef4d915221 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -1022,10 +1022,15 @@ static int wm8400_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
if (freq_in == wm8400->fll_in && freq_out == wm8400->fll_out)
return 0;
- if (freq_out != 0) {
+ if (freq_out) {
ret = fll_factors(wm8400, &factors, freq_in, freq_out);
if (ret != 0)
return ret;
+ } else {
+ /* Bodge GCC 4.4.0 uninitialised variable warning - it
+ * doesn't seem capable of working out that we exit if
+ * freq_out is 0 before any of the uses. */
+ memset(&factors, 0, sizeof(factors));
}
wm8400->fll_out = freq_out;
@@ -1040,7 +1045,7 @@ static int wm8400_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
reg &= ~WM8400_FLL_OSC_ENA;
wm8400_write(codec, WM8400_FLL_CONTROL_1, reg);
- if (freq_out == 0)
+ if (!freq_out)
return 0;
reg &= ~(WM8400_FLL_REF_FREQ | WM8400_FLL_FRATIO_MASK);
@@ -1553,6 +1558,21 @@ static int __exit wm8400_codec_remove(struct platform_device *dev)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8400_pdev_suspend(struct platform_device *pdev, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&pdev->dev);
+}
+
+static int wm8400_pdev_resume(struct platform_device *pdev)
+{
+ return snd_soc_resume_device(&pdev->dev);
+}
+#else
+#define wm8400_pdev_suspend NULL
+#define wm8400_pdev_resume NULL
+#endif
+
static struct platform_driver wm8400_codec_driver = {
.driver = {
.name = "wm8400-codec",
@@ -1560,6 +1580,8 @@ static struct platform_driver wm8400_codec_driver = {
},
.probe = wm8400_codec_probe,
.remove = __exit_p(wm8400_codec_remove),
+ .suspend = wm8400_pdev_suspend,
+ .resume = wm8400_pdev_resume,
};
static int __init wm8400_codec_init(void)
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index c8b8dba85890..060d5d06ba95 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -58,55 +58,7 @@ static const u16 wm8510_reg[WM8510_CACHEREGNUM] = {
#define WM8510_POWER1_BIASEN 0x08
#define WM8510_POWER1_BUFIOEN 0x10
-/*
- * read wm8510 register cache
- */
-static inline unsigned int wm8510_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
- if (reg == WM8510_RESET)
- return 0;
- if (reg >= WM8510_CACHEREGNUM)
- return -1;
- return cache[reg];
-}
-
-/*
- * write wm8510 register cache
- */
-static inline void wm8510_write_reg_cache(struct snd_soc_codec *codec,
- u16 reg, unsigned int value)
-{
- u16 *cache = codec->reg_cache;
- if (reg >= WM8510_CACHEREGNUM)
- return;
- cache[reg] = value;
-}
-
-/*
- * write to the WM8510 register space
- */
-static int wm8510_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- u8 data[2];
-
- /* data is
- * D15..D9 WM8510 register offset
- * D8...D0 register data
- */
- data[0] = (reg << 1) | ((value >> 8) & 0x0001);
- data[1] = value & 0x00ff;
-
- wm8510_write_reg_cache(codec, reg, value);
- if (codec->hw_write(codec->control_data, data, 2) == 2)
- return 0;
- else
- return -EIO;
-}
-
-#define wm8510_reset(c) wm8510_write(c, WM8510_RESET, 0)
+#define wm8510_reset(c) snd_soc_write(c, WM8510_RESET, 0)
static const char *wm8510_companding[] = { "Off", "NC", "u-law", "A-law" };
static const char *wm8510_deemp[] = { "None", "32kHz", "44.1kHz", "48kHz" };
@@ -327,27 +279,27 @@ static int wm8510_set_dai_pll(struct snd_soc_dai *codec_dai,
if (freq_in == 0 || freq_out == 0) {
/* Clock CODEC directly from MCLK */
- reg = wm8510_read_reg_cache(codec, WM8510_CLOCK);
- wm8510_write(codec, WM8510_CLOCK, reg & 0x0ff);
+ reg = snd_soc_read(codec, WM8510_CLOCK);
+ snd_soc_write(codec, WM8510_CLOCK, reg & 0x0ff);
/* Turn off PLL */
- reg = wm8510_read_reg_cache(codec, WM8510_POWER1);
- wm8510_write(codec, WM8510_POWER1, reg & 0x1df);
+ reg = snd_soc_read(codec, WM8510_POWER1);
+ snd_soc_write(codec, WM8510_POWER1, reg & 0x1df);
return 0;
}
pll_factors(freq_out*4, freq_in);
- wm8510_write(codec, WM8510_PLLN, (pll_div.pre_div << 4) | pll_div.n);
- wm8510_write(codec, WM8510_PLLK1, pll_div.k >> 18);
- wm8510_write(codec, WM8510_PLLK2, (pll_div.k >> 9) & 0x1ff);
- wm8510_write(codec, WM8510_PLLK3, pll_div.k & 0x1ff);
- reg = wm8510_read_reg_cache(codec, WM8510_POWER1);
- wm8510_write(codec, WM8510_POWER1, reg | 0x020);
+ snd_soc_write(codec, WM8510_PLLN, (pll_div.pre_div << 4) | pll_div.n);
+ snd_soc_write(codec, WM8510_PLLK1, pll_div.k >> 18);
+ snd_soc_write(codec, WM8510_PLLK2, (pll_div.k >> 9) & 0x1ff);
+ snd_soc_write(codec, WM8510_PLLK3, pll_div.k & 0x1ff);
+ reg = snd_soc_read(codec, WM8510_POWER1);
+ snd_soc_write(codec, WM8510_POWER1, reg | 0x020);
/* Run CODEC from PLL instead of MCLK */
- reg = wm8510_read_reg_cache(codec, WM8510_CLOCK);
- wm8510_write(codec, WM8510_CLOCK, reg | 0x100);
+ reg = snd_soc_read(codec, WM8510_CLOCK);
+ snd_soc_write(codec, WM8510_CLOCK, reg | 0x100);
return 0;
}
@@ -363,24 +315,24 @@ static int wm8510_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
switch (div_id) {
case WM8510_OPCLKDIV:
- reg = wm8510_read_reg_cache(codec, WM8510_GPIO) & 0x1cf;
- wm8510_write(codec, WM8510_GPIO, reg | div);
+ reg = snd_soc_read(codec, WM8510_GPIO) & 0x1cf;
+ snd_soc_write(codec, WM8510_GPIO, reg | div);
break;
case WM8510_MCLKDIV:
- reg = wm8510_read_reg_cache(codec, WM8510_CLOCK) & 0x11f;
- wm8510_write(codec, WM8510_CLOCK, reg | div);
+ reg = snd_soc_read(codec, WM8510_CLOCK) & 0x11f;
+ snd_soc_write(codec, WM8510_CLOCK, reg | div);
break;
case WM8510_ADCCLK:
- reg = wm8510_read_reg_cache(codec, WM8510_ADC) & 0x1f7;
- wm8510_write(codec, WM8510_ADC, reg | div);
+ reg = snd_soc_read(codec, WM8510_ADC) & 0x1f7;
+ snd_soc_write(codec, WM8510_ADC, reg | div);
break;
case WM8510_DACCLK:
- reg = wm8510_read_reg_cache(codec, WM8510_DAC) & 0x1f7;
- wm8510_write(codec, WM8510_DAC, reg | div);
+ reg = snd_soc_read(codec, WM8510_DAC) & 0x1f7;
+ snd_soc_write(codec, WM8510_DAC, reg | div);
break;
case WM8510_BCLKDIV:
- reg = wm8510_read_reg_cache(codec, WM8510_CLOCK) & 0x1e3;
- wm8510_write(codec, WM8510_CLOCK, reg | div);
+ reg = snd_soc_read(codec, WM8510_CLOCK) & 0x1e3;
+ snd_soc_write(codec, WM8510_CLOCK, reg | div);
break;
default:
return -EINVAL;
@@ -394,7 +346,7 @@ static int wm8510_set_dai_fmt(struct snd_soc_dai *codec_dai,
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 iface = 0;
- u16 clk = wm8510_read_reg_cache(codec, WM8510_CLOCK) & 0x1fe;
+ u16 clk = snd_soc_read(codec, WM8510_CLOCK) & 0x1fe;
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -441,8 +393,8 @@ static int wm8510_set_dai_fmt(struct snd_soc_dai *codec_dai,
return -EINVAL;
}
- wm8510_write(codec, WM8510_IFACE, iface);
- wm8510_write(codec, WM8510_CLOCK, clk);
+ snd_soc_write(codec, WM8510_IFACE, iface);
+ snd_soc_write(codec, WM8510_CLOCK, clk);
return 0;
}
@@ -453,8 +405,8 @@ static int wm8510_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- u16 iface = wm8510_read_reg_cache(codec, WM8510_IFACE) & 0x19f;
- u16 adn = wm8510_read_reg_cache(codec, WM8510_ADD) & 0x1f1;
+ u16 iface = snd_soc_read(codec, WM8510_IFACE) & 0x19f;
+ u16 adn = snd_soc_read(codec, WM8510_ADD) & 0x1f1;
/* bit size */
switch (params_format(params)) {
@@ -493,20 +445,20 @@ static int wm8510_pcm_hw_params(struct snd_pcm_substream *substream,
break;
}
- wm8510_write(codec, WM8510_IFACE, iface);
- wm8510_write(codec, WM8510_ADD, adn);
+ snd_soc_write(codec, WM8510_IFACE, iface);
+ snd_soc_write(codec, WM8510_ADD, adn);
return 0;
}
static int wm8510_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
- u16 mute_reg = wm8510_read_reg_cache(codec, WM8510_DAC) & 0xffbf;
+ u16 mute_reg = snd_soc_read(codec, WM8510_DAC) & 0xffbf;
if (mute)
- wm8510_write(codec, WM8510_DAC, mute_reg | 0x40);
+ snd_soc_write(codec, WM8510_DAC, mute_reg | 0x40);
else
- wm8510_write(codec, WM8510_DAC, mute_reg);
+ snd_soc_write(codec, WM8510_DAC, mute_reg);
return 0;
}
@@ -514,13 +466,13 @@ static int wm8510_mute(struct snd_soc_dai *dai, int mute)
static int wm8510_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- u16 power1 = wm8510_read_reg_cache(codec, WM8510_POWER1) & ~0x3;
+ u16 power1 = snd_soc_read(codec, WM8510_POWER1) & ~0x3;
switch (level) {
case SND_SOC_BIAS_ON:
case SND_SOC_BIAS_PREPARE:
power1 |= 0x1; /* VMID 50k */
- wm8510_write(codec, WM8510_POWER1, power1);
+ snd_soc_write(codec, WM8510_POWER1, power1);
break;
case SND_SOC_BIAS_STANDBY:
@@ -528,18 +480,18 @@ static int wm8510_set_bias_level(struct snd_soc_codec *codec,
if (codec->bias_level == SND_SOC_BIAS_OFF) {
/* Initial cap charge at VMID 5k */
- wm8510_write(codec, WM8510_POWER1, power1 | 0x3);
+ snd_soc_write(codec, WM8510_POWER1, power1 | 0x3);
mdelay(100);
}
power1 |= 0x2; /* VMID 500k */
- wm8510_write(codec, WM8510_POWER1, power1);
+ snd_soc_write(codec, WM8510_POWER1, power1);
break;
case SND_SOC_BIAS_OFF:
- wm8510_write(codec, WM8510_POWER1, 0);
- wm8510_write(codec, WM8510_POWER2, 0);
- wm8510_write(codec, WM8510_POWER3, 0);
+ snd_soc_write(codec, WM8510_POWER1, 0);
+ snd_soc_write(codec, WM8510_POWER2, 0);
+ snd_soc_write(codec, WM8510_POWER3, 0);
break;
}
@@ -577,6 +529,7 @@ struct snd_soc_dai wm8510_dai = {
.rates = WM8510_RATES,
.formats = WM8510_FORMATS,},
.ops = &wm8510_dai_ops,
+ .symmetric_rates = 1,
};
EXPORT_SYMBOL_GPL(wm8510_dai);
@@ -612,15 +565,14 @@ static int wm8510_resume(struct platform_device *pdev)
* initialise the WM8510 driver
* register the mixer and dsp interfaces with the kernel
*/
-static int wm8510_init(struct snd_soc_device *socdev)
+static int wm8510_init(struct snd_soc_device *socdev,
+ enum snd_soc_control_type control)
{
struct snd_soc_codec *codec = socdev->card->codec;
int ret = 0;
codec->name = "WM8510";
codec->owner = THIS_MODULE;
- codec->read = wm8510_read_reg_cache;
- codec->write = wm8510_write;
codec->set_bias_level = wm8510_set_bias_level;
codec->dai = &wm8510_dai;
codec->num_dai = 1;
@@ -630,13 +582,20 @@ static int wm8510_init(struct snd_soc_device *socdev)
if (codec->reg_cache == NULL)
return -ENOMEM;
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
+ if (ret < 0) {
+ printk(KERN_ERR "wm8510: failed to set cache I/O: %d\n",
+ ret);
+ goto err;
+ }
+
wm8510_reset(codec);
/* register pcms */
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
if (ret < 0) {
printk(KERN_ERR "wm8510: failed to create pcms\n");
- goto pcm_err;
+ goto err;
}
/* power on device */
@@ -655,7 +614,7 @@ static int wm8510_init(struct snd_soc_device *socdev)
card_err:
snd_soc_free_pcms(socdev);
snd_soc_dapm_free(socdev);
-pcm_err:
+err:
kfree(codec->reg_cache);
return ret;
}
@@ -678,7 +637,7 @@ static int wm8510_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, codec);
codec->control_data = i2c;
- ret = wm8510_init(socdev);
+ ret = wm8510_init(socdev, SND_SOC_I2C);
if (ret < 0)
pr_err("failed to initialise WM8510\n");
@@ -758,7 +717,7 @@ static int __devinit wm8510_spi_probe(struct spi_device *spi)
codec->control_data = spi;
- ret = wm8510_init(socdev);
+ ret = wm8510_init(socdev, SND_SOC_SPI);
if (ret < 0)
dev_err(&spi->dev, "failed to initialise WM8510\n");
@@ -779,30 +738,6 @@ static struct spi_driver wm8510_spi_driver = {
.probe = wm8510_spi_probe,
.remove = __devexit_p(wm8510_spi_remove),
};
-
-static int wm8510_spi_write(struct spi_device *spi, const char *data, int len)
-{
- struct spi_transfer t;
- struct spi_message m;
- u8 msg[2];
-
- if (len <= 0)
- return 0;
-
- msg[0] = data[0];
- msg[1] = data[1];
-
- spi_message_init(&m);
- memset(&t, 0, (sizeof t));
-
- t.tx_buf = &msg[0];
- t.len = len;
-
- spi_message_add_tail(&t, &m);
- spi_sync(spi, &m);
-
- return len;
-}
#endif /* CONFIG_SPI_MASTER */
static int wm8510_probe(struct platform_device *pdev)
@@ -827,13 +762,11 @@ static int wm8510_probe(struct platform_device *pdev)
wm8510_socdev = socdev;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
if (setup->i2c_address) {
- codec->hw_write = (hw_write_t)i2c_master_send;
ret = wm8510_add_i2c_device(pdev, setup);
}
#endif
#if defined(CONFIG_SPI_MASTER)
if (setup->spi) {
- codec->hw_write = (hw_write_t)wm8510_spi_write;
ret = spi_register_driver(&wm8510_spi_driver);
if (ret != 0)
printk(KERN_ERR "can't add spi driver");
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
new file mode 100644
index 000000000000..25870a4652fb
--- /dev/null
+++ b/sound/soc/codecs/wm8523.c
@@ -0,0 +1,699 @@
+/*
+ * wm8523.c -- WM8523 ALSA SoC Audio driver
+ *
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8523.h"
+
+static struct snd_soc_codec *wm8523_codec;
+struct snd_soc_codec_device soc_codec_dev_wm8523;
+
+#define WM8523_NUM_SUPPLIES 2
+static const char *wm8523_supply_names[WM8523_NUM_SUPPLIES] = {
+ "AVDD",
+ "LINEVDD",
+};
+
+#define WM8523_NUM_RATES 7
+
+/* codec private data */
+struct wm8523_priv {
+ struct snd_soc_codec codec;
+ u16 reg_cache[WM8523_REGISTER_COUNT];
+ struct regulator_bulk_data supplies[WM8523_NUM_SUPPLIES];
+ unsigned int sysclk;
+ unsigned int rate_constraint_list[WM8523_NUM_RATES];
+ struct snd_pcm_hw_constraint_list rate_constraint;
+};
+
+static const u16 wm8523_reg[WM8523_REGISTER_COUNT] = {
+ 0x8523, /* R0 - DEVICE_ID */
+ 0x0001, /* R1 - REVISION */
+ 0x0000, /* R2 - PSCTRL1 */
+ 0x1812, /* R3 - AIF_CTRL1 */
+ 0x0000, /* R4 - AIF_CTRL2 */
+ 0x0001, /* R5 - DAC_CTRL3 */
+ 0x0190, /* R6 - DAC_GAINL */
+ 0x0190, /* R7 - DAC_GAINR */
+ 0x0000, /* R8 - ZERO_DETECT */
+};
+
+static int wm8523_volatile_register(unsigned int reg)
+{
+ switch (reg) {
+ case WM8523_DEVICE_ID:
+ case WM8523_REVISION:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int wm8523_reset(struct snd_soc_codec *codec)
+{
+ return snd_soc_write(codec, WM8523_DEVICE_ID, 0);
+}
+
+static const DECLARE_TLV_DB_SCALE(dac_tlv, -10000, 25, 0);
+
+static const char *wm8523_zd_count_text[] = {
+ "1024",
+ "2048",
+};
+
+static const struct soc_enum wm8523_zc_count =
+ SOC_ENUM_SINGLE(WM8523_ZERO_DETECT, 0, 2, wm8523_zd_count_text);
+
+static const struct snd_kcontrol_new wm8523_snd_controls[] = {
+SOC_DOUBLE_R_TLV("Playback Volume", WM8523_DAC_GAINL, WM8523_DAC_GAINR,
+ 0, 448, 0, dac_tlv),
+SOC_SINGLE("ZC Switch", WM8523_DAC_CTRL3, 4, 1, 0),
+SOC_SINGLE("Playback Deemphasis Switch", WM8523_AIF_CTRL1, 8, 1, 0),
+SOC_DOUBLE("Playback Switch", WM8523_DAC_CTRL3, 2, 3, 1, 1),
+SOC_SINGLE("Volume Ramp Up Switch", WM8523_DAC_CTRL3, 1, 1, 0),
+SOC_SINGLE("Volume Ramp Down Switch", WM8523_DAC_CTRL3, 0, 1, 0),
+SOC_ENUM("Zero Detect Count", wm8523_zc_count),
+};
+
+static const struct snd_soc_dapm_widget wm8523_dapm_widgets[] = {
+SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_OUTPUT("LINEVOUTL"),
+SND_SOC_DAPM_OUTPUT("LINEVOUTR"),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+ { "LINEVOUTL", NULL, "DAC" },
+ { "LINEVOUTR", NULL, "DAC" },
+};
+
+static int wm8523_add_widgets(struct snd_soc_codec *codec)
+{
+ snd_soc_dapm_new_controls(codec, wm8523_dapm_widgets,
+ ARRAY_SIZE(wm8523_dapm_widgets));
+
+ snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+
+ snd_soc_dapm_new_widgets(codec);
+ return 0;
+}
+
+static struct {
+ int value;
+ int ratio;
+} lrclk_ratios[WM8523_NUM_RATES] = {
+ { 1, 128 },
+ { 2, 192 },
+ { 3, 256 },
+ { 4, 384 },
+ { 5, 512 },
+ { 6, 768 },
+ { 7, 1152 },
+};
+
+static int wm8523_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8523_priv *wm8523 = codec->private_data;
+
+ /* The set of sample rates that can be supported depends on the
+ * MCLK supplied to the CODEC - enforce this.
+ */
+ if (!wm8523->sysclk) {
+ dev_err(codec->dev,
+ "No MCLK configured, call set_sysclk() on init\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &wm8523->rate_constraint);
+
+ return 0;
+}
+
+static int wm8523_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_device *socdev = rtd->socdev;
+ struct snd_soc_codec *codec = socdev->card->codec;
+ struct wm8523_priv *wm8523 = codec->private_data;
+ int i;
+ u16 aifctrl1 = snd_soc_read(codec, WM8523_AIF_CTRL1);
+ u16 aifctrl2 = snd_soc_read(codec, WM8523_AIF_CTRL2);
+
+ /* Find a supported LRCLK ratio */
+ for (i = 0; i < ARRAY_SIZE(lrclk_ratios); i++) {
+ if (wm8523->sysclk / params_rate(params) ==
+ lrclk_ratios[i].ratio)
+ break;
+ }
+
+ /* Should never happen, should be handled by constraints */
+ if (i == ARRAY_SIZE(lrclk_ratios)) {
+ dev_err(codec->dev, "MCLK/fs ratio %d unsupported\n",
+ wm8523->sysclk / params_rate(params));
+ return -EINVAL;
+ }
+
+ aifctrl2 &= ~WM8523_SR_MASK;
+ aifctrl2 |= lrclk_ratios[i].value;
+
+ aifctrl1 &= ~WM8523_WL_MASK;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ aifctrl1 |= 0x8;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ aifctrl1 |= 0x10;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ aifctrl1 |= 0x18;
+ break;
+ }
+
+ snd_soc_write(codec, WM8523_AIF_CTRL1, aifctrl1);
+ snd_soc_write(codec, WM8523_AIF_CTRL2, aifctrl2);
+
+ return 0;
+}
+
+static int wm8523_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct wm8523_priv *wm8523 = codec->private_data;
+ unsigned int val;
+ int i;
+
+ wm8523->sysclk = freq;
+
+ wm8523->rate_constraint.count = 0;
+ for (i = 0; i < ARRAY_SIZE(lrclk_ratios); i++) {
+ val = freq / lrclk_ratios[i].ratio;
+ /* Check that it's a standard rate since core can't
+ * cope with others and having the odd rates confuses
+ * constraint matching.
+ */
+ switch (val) {
+ case 8000:
+ case 11025:
+ case 16000:
+ case 22050:
+ case 32000:
+ case 44100:
+ case 48000:
+ case 64000:
+ case 88200:
+ case 96000:
+ case 176400:
+ case 192000:
+ dev_dbg(codec->dev, "Supported sample rate: %dHz\n",
+ val);
+ wm8523->rate_constraint_list[i] = val;
+ wm8523->rate_constraint.count++;
+ break;
+ default:
+ dev_dbg(codec->dev, "Skipping sample rate: %dHz\n",
+ val);
+ }
+ }
+
+ /* Need at least one supported rate... */
+ if (wm8523->rate_constraint.count == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static int wm8523_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u16 aifctrl1 = snd_soc_read(codec, WM8523_AIF_CTRL1);
+
+ aifctrl1 &= ~(WM8523_BCLK_INV_MASK | WM8523_LRCLK_INV_MASK |
+ WM8523_FMT_MASK | WM8523_AIF_MSTR_MASK);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ aifctrl1 |= WM8523_AIF_MSTR;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ aifctrl1 |= 0x0002;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ aifctrl1 |= 0x0001;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ aifctrl1 |= 0x0003;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ aifctrl1 |= 0x0023;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ aifctrl1 |= WM8523_BCLK_INV | WM8523_LRCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ aifctrl1 |= WM8523_BCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ aifctrl1 |= WM8523_LRCLK_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_write(codec, WM8523_AIF_CTRL1, aifctrl1);
+
+ return 0;
+}
+
+static int wm8523_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct wm8523_priv *wm8523 = codec->private_data;
+ int ret, i;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+
+ case SND_SOC_BIAS_PREPARE:
+ /* Full power on */
+ snd_soc_update_bits(codec, WM8523_PSCTRL1,
+ WM8523_SYS_ENA_MASK, 3);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->bias_level == SND_SOC_BIAS_OFF) {
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies),
+ wm8523->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to enable supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Initial power up */
+ snd_soc_update_bits(codec, WM8523_PSCTRL1,
+ WM8523_SYS_ENA_MASK, 1);
+
+ /* Sync back default/cached values */
+ for (i = WM8523_AIF_CTRL1;
+ i < WM8523_MAX_REGISTER; i++)
+ snd_soc_write(codec, i, wm8523->reg_cache[i]);
+
+
+ msleep(100);
+ }
+
+ /* Power up to mute */
+ snd_soc_update_bits(codec, WM8523_PSCTRL1,
+ WM8523_SYS_ENA_MASK, 2);
+
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ /* The chip runs through the power down sequence for us. */
+ snd_soc_update_bits(codec, WM8523_PSCTRL1,
+ WM8523_SYS_ENA_MASK, 0);
+ msleep(100);
+
+ regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies),
+ wm8523->supplies);
+ break;
+ }
+ codec->bias_level = level;
+ return 0;
+}
+
+#define WM8523_RATES SNDRV_PCM_RATE_8000_192000
+
+#define WM8523_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops wm8523_dai_ops = {
+ .startup = wm8523_startup,
+ .hw_params = wm8523_hw_params,
+ .set_sysclk = wm8523_set_dai_sysclk,
+ .set_fmt = wm8523_set_dai_fmt,
+};
+
+struct snd_soc_dai wm8523_dai = {
+ .name = "WM8523",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2, /* Mono modes not yet supported */
+ .channels_max = 2,
+ .rates = WM8523_RATES,
+ .formats = WM8523_FORMATS,
+ },
+ .ops = &wm8523_dai_ops,
+};
+EXPORT_SYMBOL_GPL(wm8523_dai);
+
+#ifdef CONFIG_PM
+static int wm8523_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ wm8523_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ return 0;
+}
+
+static int wm8523_resume(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ wm8523_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+#else
+#define wm8523_suspend NULL
+#define wm8523_resume NULL
+#endif
+
+static int wm8523_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret = 0;
+
+ if (wm8523_codec == NULL) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+
+ socdev->card->codec = wm8523_codec;
+ codec = wm8523_codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to create pcms: %d\n", ret);
+ goto pcm_err;
+ }
+
+ snd_soc_add_controls(codec, wm8523_snd_controls,
+ ARRAY_SIZE(wm8523_snd_controls));
+ wm8523_add_widgets(codec);
+ ret = snd_soc_init_card(socdev);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to register card: %d\n", ret);
+ goto card_err;
+ }
+
+ return ret;
+
+card_err:
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+pcm_err:
+ return ret;
+}
+
+static int wm8523_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+struct snd_soc_codec_device soc_codec_dev_wm8523 = {
+ .probe = wm8523_probe,
+ .remove = wm8523_remove,
+ .suspend = wm8523_suspend,
+ .resume = wm8523_resume,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_wm8523);
+
+static int wm8523_register(struct wm8523_priv *wm8523,
+ enum snd_soc_control_type control)
+{
+ int ret;
+ struct snd_soc_codec *codec = &wm8523->codec;
+ int i;
+
+ if (wm8523_codec) {
+ dev_err(codec->dev, "Another WM8523 is registered\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->private_data = wm8523;
+ codec->name = "WM8523";
+ codec->owner = THIS_MODULE;
+ codec->bias_level = SND_SOC_BIAS_OFF;
+ codec->set_bias_level = wm8523_set_bias_level;
+ codec->dai = &wm8523_dai;
+ codec->num_dai = 1;
+ codec->reg_cache_size = WM8523_REGISTER_COUNT;
+ codec->reg_cache = &wm8523->reg_cache;
+ codec->volatile_register = wm8523_volatile_register;
+
+ wm8523->rate_constraint.list = &wm8523->rate_constraint_list[0];
+ wm8523->rate_constraint.count =
+ ARRAY_SIZE(wm8523->rate_constraint_list);
+
+ memcpy(codec->reg_cache, wm8523_reg, sizeof(wm8523_reg));
+
+ ret = snd_soc_codec_set_cache_io(codec, 8, 16, control);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wm8523->supplies); i++)
+ wm8523->supplies[i].supply = wm8523_supply_names[i];
+
+ ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8523->supplies),
+ wm8523->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+ goto err;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies),
+ wm8523->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+ goto err_get;
+ }
+
+ ret = snd_soc_read(codec, WM8523_DEVICE_ID);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to read ID register\n");
+ goto err_enable;
+ }
+ if (ret != wm8523_reg[WM8523_DEVICE_ID]) {
+ dev_err(codec->dev, "Device is not a WM8523, ID is %x\n", ret);
+ ret = -EINVAL;
+ goto err_enable;
+ }
+
+ ret = snd_soc_read(codec, WM8523_REVISION);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to read revision register\n");
+ goto err_enable;
+ }
+ dev_info(codec->dev, "revision %c\n",
+ (ret & WM8523_CHIP_REV_MASK) + 'A');
+
+ ret = wm8523_reset(codec);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to issue reset\n");
+ goto err_enable;
+ }
+
+ wm8523_dai.dev = codec->dev;
+
+ /* Change some default settings - latch VU and enable ZC */
+ wm8523->reg_cache[WM8523_DAC_GAINR] |= WM8523_DACR_VU;
+ wm8523->reg_cache[WM8523_DAC_CTRL3] |= WM8523_ZC;
+
+ wm8523_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ /* Bias level configuration will have done an extra enable */
+ regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies);
+
+ wm8523_codec = codec;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_register_dai(&wm8523_dai);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
+ snd_soc_unregister_codec(codec);
+ return ret;
+ }
+
+ return 0;
+
+err_enable:
+ regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies);
+err_get:
+ regulator_bulk_free(ARRAY_SIZE(wm8523->supplies), wm8523->supplies);
+err:
+ kfree(wm8523);
+ return ret;
+}
+
+static void wm8523_unregister(struct wm8523_priv *wm8523)
+{
+ wm8523_set_bias_level(&wm8523->codec, SND_SOC_BIAS_OFF);
+ regulator_bulk_free(ARRAY_SIZE(wm8523->supplies), wm8523->supplies);
+ snd_soc_unregister_dai(&wm8523_dai);
+ snd_soc_unregister_codec(&wm8523->codec);
+ kfree(wm8523);
+ wm8523_codec = NULL;
+}
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static __devinit int wm8523_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct wm8523_priv *wm8523;
+ struct snd_soc_codec *codec;
+
+ wm8523 = kzalloc(sizeof(struct wm8523_priv), GFP_KERNEL);
+ if (wm8523 == NULL)
+ return -ENOMEM;
+
+ codec = &wm8523->codec;
+ codec->hw_write = (hw_write_t)i2c_master_send;
+
+ i2c_set_clientdata(i2c, wm8523);
+ codec->control_data = i2c;
+
+ codec->dev = &i2c->dev;
+
+ return wm8523_register(wm8523, SND_SOC_I2C);
+}
+
+static __devexit int wm8523_i2c_remove(struct i2c_client *client)
+{
+ struct wm8523_priv *wm8523 = i2c_get_clientdata(client);
+ wm8523_unregister(wm8523);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int wm8523_i2c_suspend(struct i2c_client *i2c, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&i2c->dev);
+}
+
+static int wm8523_i2c_resume(struct i2c_client *i2c)
+{
+ return snd_soc_resume_device(&i2c->dev);
+}
+#else
+#define wm8523_i2c_suspend NULL
+#define wm8523_i2c_resume NULL
+#endif
+
+static const struct i2c_device_id wm8523_i2c_id[] = {
+ { "wm8523", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wm8523_i2c_id);
+
+static struct i2c_driver wm8523_i2c_driver = {
+ .driver = {
+ .name = "WM8523",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm8523_i2c_probe,
+ .remove = __devexit_p(wm8523_i2c_remove),
+ .suspend = wm8523_i2c_suspend,
+ .resume = wm8523_i2c_resume,
+ .id_table = wm8523_i2c_id,
+};
+#endif
+
+static int __init wm8523_modinit(void)
+{
+ int ret;
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ ret = i2c_add_driver(&wm8523_i2c_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to register WM8523 I2C driver: %d\n",
+ ret);
+ }
+#endif
+ return 0;
+}
+module_init(wm8523_modinit);
+
+static void __exit wm8523_exit(void)
+{
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_driver(&wm8523_i2c_driver);
+#endif
+}
+module_exit(wm8523_exit);
+
+MODULE_DESCRIPTION("ASoC WM8523 driver");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8523.h b/sound/soc/codecs/wm8523.h
new file mode 100644
index 000000000000..1aa9ce3e1357
--- /dev/null
+++ b/sound/soc/codecs/wm8523.h
@@ -0,0 +1,160 @@
+/*
+ * wm8523.h -- WM8423 ASoC driver
+ *
+ * Copyright 2009 Wolfson Microelectronics, plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * Based on wm8753.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8523_H
+#define _WM8523_H
+
+/*
+ * Register values.
+ */
+#define WM8523_DEVICE_ID 0x00
+#define WM8523_REVISION 0x01
+#define WM8523_PSCTRL1 0x02
+#define WM8523_AIF_CTRL1 0x03
+#define WM8523_AIF_CTRL2 0x04
+#define WM8523_DAC_CTRL3 0x05
+#define WM8523_DAC_GAINL 0x06
+#define WM8523_DAC_GAINR 0x07
+#define WM8523_ZERO_DETECT 0x08
+
+#define WM8523_REGISTER_COUNT 9
+#define WM8523_MAX_REGISTER 0x08
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - DEVICE_ID
+ */
+#define WM8523_CHIP_ID_MASK 0xFFFF /* CHIP_ID - [15:0] */
+#define WM8523_CHIP_ID_SHIFT 0 /* CHIP_ID - [15:0] */
+#define WM8523_CHIP_ID_WIDTH 16 /* CHIP_ID - [15:0] */
+
+/*
+ * R1 (0x01) - REVISION
+ */
+#define WM8523_CHIP_REV_MASK 0x0007 /* CHIP_REV - [2:0] */
+#define WM8523_CHIP_REV_SHIFT 0 /* CHIP_REV - [2:0] */
+#define WM8523_CHIP_REV_WIDTH 3 /* CHIP_REV - [2:0] */
+
+/*
+ * R2 (0x02) - PSCTRL1
+ */
+#define WM8523_SYS_ENA_MASK 0x0003 /* SYS_ENA - [1:0] */
+#define WM8523_SYS_ENA_SHIFT 0 /* SYS_ENA - [1:0] */
+#define WM8523_SYS_ENA_WIDTH 2 /* SYS_ENA - [1:0] */
+
+/*
+ * R3 (0x03) - AIF_CTRL1
+ */
+#define WM8523_TDM_MODE_MASK 0x1800 /* TDM_MODE - [12:11] */
+#define WM8523_TDM_MODE_SHIFT 11 /* TDM_MODE - [12:11] */
+#define WM8523_TDM_MODE_WIDTH 2 /* TDM_MODE - [12:11] */
+#define WM8523_TDM_SLOT_MASK 0x0600 /* TDM_SLOT - [10:9] */
+#define WM8523_TDM_SLOT_SHIFT 9 /* TDM_SLOT - [10:9] */
+#define WM8523_TDM_SLOT_WIDTH 2 /* TDM_SLOT - [10:9] */
+#define WM8523_DEEMPH 0x0100 /* DEEMPH */
+#define WM8523_DEEMPH_MASK 0x0100 /* DEEMPH */
+#define WM8523_DEEMPH_SHIFT 8 /* DEEMPH */
+#define WM8523_DEEMPH_WIDTH 1 /* DEEMPH */
+#define WM8523_AIF_MSTR 0x0080 /* AIF_MSTR */
+#define WM8523_AIF_MSTR_MASK 0x0080 /* AIF_MSTR */
+#define WM8523_AIF_MSTR_SHIFT 7 /* AIF_MSTR */
+#define WM8523_AIF_MSTR_WIDTH 1 /* AIF_MSTR */
+#define WM8523_LRCLK_INV 0x0040 /* LRCLK_INV */
+#define WM8523_LRCLK_INV_MASK 0x0040 /* LRCLK_INV */
+#define WM8523_LRCLK_INV_SHIFT 6 /* LRCLK_INV */
+#define WM8523_LRCLK_INV_WIDTH 1 /* LRCLK_INV */
+#define WM8523_BCLK_INV 0x0020 /* BCLK_INV */
+#define WM8523_BCLK_INV_MASK 0x0020 /* BCLK_INV */
+#define WM8523_BCLK_INV_SHIFT 5 /* BCLK_INV */
+#define WM8523_BCLK_INV_WIDTH 1 /* BCLK_INV */
+#define WM8523_WL_MASK 0x0018 /* WL - [4:3] */
+#define WM8523_WL_SHIFT 3 /* WL - [4:3] */
+#define WM8523_WL_WIDTH 2 /* WL - [4:3] */
+#define WM8523_FMT_MASK 0x0007 /* FMT - [2:0] */
+#define WM8523_FMT_SHIFT 0 /* FMT - [2:0] */
+#define WM8523_FMT_WIDTH 3 /* FMT - [2:0] */
+
+/*
+ * R4 (0x04) - AIF_CTRL2
+ */
+#define WM8523_DAC_OP_MUX_MASK 0x00C0 /* DAC_OP_MUX - [7:6] */
+#define WM8523_DAC_OP_MUX_SHIFT 6 /* DAC_OP_MUX - [7:6] */
+#define WM8523_DAC_OP_MUX_WIDTH 2 /* DAC_OP_MUX - [7:6] */
+#define WM8523_BCLKDIV_MASK 0x0038 /* BCLKDIV - [5:3] */
+#define WM8523_BCLKDIV_SHIFT 3 /* BCLKDIV - [5:3] */
+#define WM8523_BCLKDIV_WIDTH 3 /* BCLKDIV - [5:3] */
+#define WM8523_SR_MASK 0x0007 /* SR - [2:0] */
+#define WM8523_SR_SHIFT 0 /* SR - [2:0] */
+#define WM8523_SR_WIDTH 3 /* SR - [2:0] */
+
+/*
+ * R5 (0x05) - DAC_CTRL3
+ */
+#define WM8523_ZC 0x0010 /* ZC */
+#define WM8523_ZC_MASK 0x0010 /* ZC */
+#define WM8523_ZC_SHIFT 4 /* ZC */
+#define WM8523_ZC_WIDTH 1 /* ZC */
+#define WM8523_DACR 0x0008 /* DACR */
+#define WM8523_DACR_MASK 0x0008 /* DACR */
+#define WM8523_DACR_SHIFT 3 /* DACR */
+#define WM8523_DACR_WIDTH 1 /* DACR */
+#define WM8523_DACL 0x0004 /* DACL */
+#define WM8523_DACL_MASK 0x0004 /* DACL */
+#define WM8523_DACL_SHIFT 2 /* DACL */
+#define WM8523_DACL_WIDTH 1 /* DACL */
+#define WM8523_VOL_UP_RAMP 0x0002 /* VOL_UP_RAMP */
+#define WM8523_VOL_UP_RAMP_MASK 0x0002 /* VOL_UP_RAMP */
+#define WM8523_VOL_UP_RAMP_SHIFT 1 /* VOL_UP_RAMP */
+#define WM8523_VOL_UP_RAMP_WIDTH 1 /* VOL_UP_RAMP */
+#define WM8523_VOL_DOWN_RAMP 0x0001 /* VOL_DOWN_RAMP */
+#define WM8523_VOL_DOWN_RAMP_MASK 0x0001 /* VOL_DOWN_RAMP */
+#define WM8523_VOL_DOWN_RAMP_SHIFT 0 /* VOL_DOWN_RAMP */
+#define WM8523_VOL_DOWN_RAMP_WIDTH 1 /* VOL_DOWN_RAMP */
+
+/*
+ * R6 (0x06) - DAC_GAINL
+ */
+#define WM8523_DACL_VU 0x0200 /* DACL_VU */
+#define WM8523_DACL_VU_MASK 0x0200 /* DACL_VU */
+#define WM8523_DACL_VU_SHIFT 9 /* DACL_VU */
+#define WM8523_DACL_VU_WIDTH 1 /* DACL_VU */
+#define WM8523_DACL_VOL_MASK 0x01FF /* DACL_VOL - [8:0] */
+#define WM8523_DACL_VOL_SHIFT 0 /* DACL_VOL - [8:0] */
+#define WM8523_DACL_VOL_WIDTH 9 /* DACL_VOL - [8:0] */
+
+/*
+ * R7 (0x07) - DAC_GAINR
+ */
+#define WM8523_DACR_VU 0x0200 /* DACR_VU */
+#define WM8523_DACR_VU_MASK 0x0200 /* DACR_VU */
+#define WM8523_DACR_VU_SHIFT 9 /* DACR_VU */
+#define WM8523_DACR_VU_WIDTH 1 /* DACR_VU */
+#define WM8523_DACR_VOL_MASK 0x01FF /* DACR_VOL - [8:0] */
+#define WM8523_DACR_VOL_SHIFT 0 /* DACR_VOL - [8:0] */
+#define WM8523_DACR_VOL_WIDTH 9 /* DACR_VOL - [8:0] */
+
+/*
+ * R8 (0x08) - ZERO_DETECT
+ */
+#define WM8523_ZD_COUNT_MASK 0x0003 /* ZD_COUNT - [1:0] */
+#define WM8523_ZD_COUNT_SHIFT 0 /* ZD_COUNT - [1:0] */
+#define WM8523_ZD_COUNT_WIDTH 2 /* ZD_COUNT - [1:0] */
+
+extern struct snd_soc_dai wm8523_dai;
+extern struct snd_soc_codec_device soc_codec_dev_wm8523;
+
+#endif
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 86c4b24db817..6bded8c78150 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -24,6 +24,8 @@
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -187,82 +189,22 @@ struct pll_state {
unsigned int out;
};
+#define WM8580_NUM_SUPPLIES 3
+static const char *wm8580_supply_names[WM8580_NUM_SUPPLIES] = {
+ "AVDD",
+ "DVDD",
+ "PVDD",
+};
+
/* codec private data */
struct wm8580_priv {
struct snd_soc_codec codec;
+ struct regulator_bulk_data supplies[WM8580_NUM_SUPPLIES];
u16 reg_cache[WM8580_MAX_REGISTER + 1];
struct pll_state a;
struct pll_state b;
};
-
-/*
- * read wm8580 register cache
- */
-static inline unsigned int wm8580_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
- BUG_ON(reg >= ARRAY_SIZE(wm8580_reg));
- return cache[reg];
-}
-
-/*
- * write wm8580 register cache
- */
-static inline void wm8580_write_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int value)
-{
- u16 *cache = codec->reg_cache;
-
- cache[reg] = value;
-}
-
-/*
- * write to the WM8580 register space
- */
-static int wm8580_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- u8 data[2];
-
- BUG_ON(reg >= ARRAY_SIZE(wm8580_reg));
-
- /* Registers are 9 bits wide */
- value &= 0x1ff;
-
- switch (reg) {
- case WM8580_RESET:
- /* Uncached */
- break;
- default:
- if (value == wm8580_read_reg_cache(codec, reg))
- return 0;
- }
-
- /* data is
- * D15..D9 WM8580 register offset
- * D8...D0 register data
- */
- data[0] = (reg << 1) | ((value >> 8) & 0x0001);
- data[1] = value & 0x00ff;
-
- wm8580_write_reg_cache(codec, reg, value);
- if (codec->hw_write(codec->control_data, data, 2) == 2)
- return 0;
- else
- return -EIO;
-}
-
-static inline unsigned int wm8580_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- switch (reg) {
- default:
- return wm8580_read_reg_cache(codec, reg);
- }
-}
-
static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
static int wm8580_out_vu(struct snd_kcontrol *kcontrol,
@@ -271,25 +213,22 @@ static int wm8580_out_vu(struct snd_kcontrol *kcontrol,
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ u16 *reg_cache = codec->reg_cache;
unsigned int reg = mc->reg;
unsigned int reg2 = mc->rreg;
int ret;
- u16 val;
/* Clear the register cache so we write without VU set */
- wm8580_write_reg_cache(codec, reg, 0);
- wm8580_write_reg_cache(codec, reg2, 0);
+ reg_cache[reg] = 0;
+ reg_cache[reg2] = 0;
ret = snd_soc_put_volsw_2r(kcontrol, ucontrol);
if (ret < 0)
return ret;
/* Now write again with the volume update bit set */
- val = wm8580_read_reg_cache(codec, reg);
- wm8580_write(codec, reg, val | 0x0100);
-
- val = wm8580_read_reg_cache(codec, reg2);
- wm8580_write(codec, reg2, val | 0x0100);
+ snd_soc_update_bits(codec, reg, 0x100, 0x100);
+ snd_soc_update_bits(codec, reg2, 0x100, 0x100);
return 0;
}
@@ -512,27 +451,27 @@ static int wm8580_set_dai_pll(struct snd_soc_dai *codec_dai,
/* Always disable the PLL - it is not safe to leave it running
* while reprogramming it.
*/
- reg = wm8580_read(codec, WM8580_PWRDN2);
- wm8580_write(codec, WM8580_PWRDN2, reg | pwr_mask);
+ reg = snd_soc_read(codec, WM8580_PWRDN2);
+ snd_soc_write(codec, WM8580_PWRDN2, reg | pwr_mask);
if (!freq_in || !freq_out)
return 0;
- wm8580_write(codec, WM8580_PLLA1 + offset, pll_div.k & 0x1ff);
- wm8580_write(codec, WM8580_PLLA2 + offset, (pll_div.k >> 9) & 0xff);
- wm8580_write(codec, WM8580_PLLA3 + offset,
+ snd_soc_write(codec, WM8580_PLLA1 + offset, pll_div.k & 0x1ff);
+ snd_soc_write(codec, WM8580_PLLA2 + offset, (pll_div.k >> 9) & 0x1ff);
+ snd_soc_write(codec, WM8580_PLLA3 + offset,
(pll_div.k >> 18 & 0xf) | (pll_div.n << 4));
- reg = wm8580_read(codec, WM8580_PLLA4 + offset);
- reg &= ~0x3f;
+ reg = snd_soc_read(codec, WM8580_PLLA4 + offset);
+ reg &= ~0x1b;
reg |= pll_div.prescale | pll_div.postscale << 1 |
pll_div.freqmode << 3;
- wm8580_write(codec, WM8580_PLLA4 + offset, reg);
+ snd_soc_write(codec, WM8580_PLLA4 + offset, reg);
/* All done, turn it on */
- reg = wm8580_read(codec, WM8580_PWRDN2);
- wm8580_write(codec, WM8580_PWRDN2, reg & ~pwr_mask);
+ reg = snd_soc_read(codec, WM8580_PWRDN2);
+ snd_soc_write(codec, WM8580_PWRDN2, reg & ~pwr_mask);
return 0;
}
@@ -547,7 +486,7 @@ static int wm8580_paif_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- u16 paifb = wm8580_read(codec, WM8580_PAIF3 + dai->id);
+ u16 paifb = snd_soc_read(codec, WM8580_PAIF3 + dai->id);
paifb &= ~WM8580_AIF_LENGTH_MASK;
/* bit size */
@@ -567,7 +506,7 @@ static int wm8580_paif_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- wm8580_write(codec, WM8580_PAIF3 + dai->id, paifb);
+ snd_soc_write(codec, WM8580_PAIF3 + dai->id, paifb);
return 0;
}
@@ -579,8 +518,8 @@ static int wm8580_set_paif_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int aifb;
int can_invert_lrclk;
- aifa = wm8580_read(codec, WM8580_PAIF1 + codec_dai->id);
- aifb = wm8580_read(codec, WM8580_PAIF3 + codec_dai->id);
+ aifa = snd_soc_read(codec, WM8580_PAIF1 + codec_dai->id);
+ aifb = snd_soc_read(codec, WM8580_PAIF3 + codec_dai->id);
aifb &= ~(WM8580_AIF_FMT_MASK | WM8580_AIF_LRP | WM8580_AIF_BCP);
@@ -646,8 +585,8 @@ static int wm8580_set_paif_dai_fmt(struct snd_soc_dai *codec_dai,
return -EINVAL;
}
- wm8580_write(codec, WM8580_PAIF1 + codec_dai->id, aifa);
- wm8580_write(codec, WM8580_PAIF3 + codec_dai->id, aifb);
+ snd_soc_write(codec, WM8580_PAIF1 + codec_dai->id, aifa);
+ snd_soc_write(codec, WM8580_PAIF3 + codec_dai->id, aifb);
return 0;
}
@@ -660,7 +599,7 @@ static int wm8580_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
switch (div_id) {
case WM8580_MCLK:
- reg = wm8580_read(codec, WM8580_PLLB4);
+ reg = snd_soc_read(codec, WM8580_PLLB4);
reg &= ~WM8580_PLLB4_MCLKOUTSRC_MASK;
switch (div) {
@@ -682,11 +621,11 @@ static int wm8580_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
default:
return -EINVAL;
}
- wm8580_write(codec, WM8580_PLLB4, reg);
+ snd_soc_write(codec, WM8580_PLLB4, reg);
break;
case WM8580_DAC_CLKSEL:
- reg = wm8580_read(codec, WM8580_CLKSEL);
+ reg = snd_soc_read(codec, WM8580_CLKSEL);
reg &= ~WM8580_CLKSEL_DAC_CLKSEL_MASK;
switch (div) {
@@ -704,11 +643,11 @@ static int wm8580_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
default:
return -EINVAL;
}
- wm8580_write(codec, WM8580_CLKSEL, reg);
+ snd_soc_write(codec, WM8580_CLKSEL, reg);
break;
case WM8580_CLKOUTSRC:
- reg = wm8580_read(codec, WM8580_PLLB4);
+ reg = snd_soc_read(codec, WM8580_PLLB4);
reg &= ~WM8580_PLLB4_CLKOUTSRC_MASK;
switch (div) {
@@ -730,7 +669,7 @@ static int wm8580_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
default:
return -EINVAL;
}
- wm8580_write(codec, WM8580_PLLB4, reg);
+ snd_soc_write(codec, WM8580_PLLB4, reg);
break;
default:
@@ -745,14 +684,14 @@ static int wm8580_digital_mute(struct snd_soc_dai *codec_dai, int mute)
struct snd_soc_codec *codec = codec_dai->codec;
unsigned int reg;
- reg = wm8580_read(codec, WM8580_DAC_CONTROL5);
+ reg = snd_soc_read(codec, WM8580_DAC_CONTROL5);
if (mute)
reg |= WM8580_DAC_CONTROL5_MUTEALL;
else
reg &= ~WM8580_DAC_CONTROL5_MUTEALL;
- wm8580_write(codec, WM8580_DAC_CONTROL5, reg);
+ snd_soc_write(codec, WM8580_DAC_CONTROL5, reg);
return 0;
}
@@ -769,20 +708,20 @@ static int wm8580_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_STANDBY:
if (codec->bias_level == SND_SOC_BIAS_OFF) {
/* Power up and get individual control of the DACs */
- reg = wm8580_read(codec, WM8580_PWRDN1);
+ reg = snd_soc_read(codec, WM8580_PWRDN1);
reg &= ~(WM8580_PWRDN1_PWDN | WM8580_PWRDN1_ALLDACPD);
- wm8580_write(codec, WM8580_PWRDN1, reg);
+ snd_soc_write(codec, WM8580_PWRDN1, reg);
/* Make VMID high impedence */
- reg = wm8580_read(codec, WM8580_ADC_CONTROL1);
+ reg = snd_soc_read(codec, WM8580_ADC_CONTROL1);
reg &= ~0x100;
- wm8580_write(codec, WM8580_ADC_CONTROL1, reg);
+ snd_soc_write(codec, WM8580_ADC_CONTROL1, reg);
}
break;
case SND_SOC_BIAS_OFF:
- reg = wm8580_read(codec, WM8580_PWRDN1);
- wm8580_write(codec, WM8580_PWRDN1, reg | WM8580_PWRDN1_PWDN);
+ reg = snd_soc_read(codec, WM8580_PWRDN1);
+ snd_soc_write(codec, WM8580_PWRDN1, reg | WM8580_PWRDN1_PWDN);
break;
}
codec->bias_level = level;
@@ -893,7 +832,8 @@ struct snd_soc_codec_device soc_codec_dev_wm8580 = {
};
EXPORT_SYMBOL_GPL(soc_codec_dev_wm8580);
-static int wm8580_register(struct wm8580_priv *wm8580)
+static int wm8580_register(struct wm8580_priv *wm8580,
+ enum snd_soc_control_type control)
{
int ret, i;
struct snd_soc_codec *codec = &wm8580->codec;
@@ -911,8 +851,6 @@ static int wm8580_register(struct wm8580_priv *wm8580)
codec->private_data = wm8580;
codec->name = "WM8580";
codec->owner = THIS_MODULE;
- codec->read = wm8580_read_reg_cache;
- codec->write = wm8580_write;
codec->bias_level = SND_SOC_BIAS_OFF;
codec->set_bias_level = wm8580_set_bias_level;
codec->dai = wm8580_dai;
@@ -922,11 +860,34 @@ static int wm8580_register(struct wm8580_priv *wm8580)
memcpy(codec->reg_cache, wm8580_reg, sizeof(wm8580_reg));
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wm8580->supplies); i++)
+ wm8580->supplies[i].supply = wm8580_supply_names[i];
+
+ ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8580->supplies),
+ wm8580->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+ goto err;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8580->supplies),
+ wm8580->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+ goto err_regulator_get;
+ }
+
/* Get the codec into a known state */
- ret = wm8580_write(codec, WM8580_RESET, 0);
+ ret = snd_soc_write(codec, WM8580_RESET, 0);
if (ret != 0) {
dev_err(codec->dev, "Failed to reset codec: %d\n", ret);
- goto err;
+ goto err_regulator_enable;
}
for (i = 0; i < ARRAY_SIZE(wm8580_dai); i++)
@@ -939,7 +900,7 @@ static int wm8580_register(struct wm8580_priv *wm8580)
ret = snd_soc_register_codec(codec);
if (ret != 0) {
dev_err(codec->dev, "Failed to register codec: %d\n", ret);
- goto err;
+ goto err_regulator_enable;
}
ret = snd_soc_register_dais(wm8580_dai, ARRAY_SIZE(wm8580_dai));
@@ -952,6 +913,10 @@ static int wm8580_register(struct wm8580_priv *wm8580)
err_codec:
snd_soc_unregister_codec(codec);
+err_regulator_enable:
+ regulator_bulk_disable(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
+err_regulator_get:
+ regulator_bulk_free(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
err:
kfree(wm8580);
return ret;
@@ -962,6 +927,8 @@ static void wm8580_unregister(struct wm8580_priv *wm8580)
wm8580_set_bias_level(&wm8580->codec, SND_SOC_BIAS_OFF);
snd_soc_unregister_dais(wm8580_dai, ARRAY_SIZE(wm8580_dai));
snd_soc_unregister_codec(&wm8580->codec);
+ regulator_bulk_disable(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
+ regulator_bulk_free(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
kfree(wm8580);
wm8580_codec = NULL;
}
@@ -978,14 +945,13 @@ static int wm8580_i2c_probe(struct i2c_client *i2c,
return -ENOMEM;
codec = &wm8580->codec;
- codec->hw_write = (hw_write_t)i2c_master_send;
i2c_set_clientdata(i2c, wm8580);
codec->control_data = i2c;
codec->dev = &i2c->dev;
- return wm8580_register(wm8580);
+ return wm8580_register(wm8580, SND_SOC_I2C);
}
static int wm8580_i2c_remove(struct i2c_client *client)
@@ -995,6 +961,21 @@ static int wm8580_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8580_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8580_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8580_i2c_suspend NULL
+#define wm8580_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm8580_i2c_id[] = {
{ "wm8580", 0 },
{ }
@@ -1008,6 +989,8 @@ static struct i2c_driver wm8580_i2c_driver = {
},
.probe = wm8580_i2c_probe,
.remove = wm8580_i2c_remove,
+ .suspend = wm8580_i2c_suspend,
+ .resume = wm8580_i2c_resume,
.id_table = wm8580_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c
index e7ff2121ede9..16e969a762c3 100644
--- a/sound/soc/codecs/wm8728.c
+++ b/sound/soc/codecs/wm8728.c
@@ -43,45 +43,6 @@ static const u16 wm8728_reg_defaults[] = {
0x100,
};
-static inline unsigned int wm8728_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
- BUG_ON(reg >= ARRAY_SIZE(wm8728_reg_defaults));
- return cache[reg];
-}
-
-static inline void wm8728_write_reg_cache(struct snd_soc_codec *codec,
- u16 reg, unsigned int value)
-{
- u16 *cache = codec->reg_cache;
- BUG_ON(reg >= ARRAY_SIZE(wm8728_reg_defaults));
- cache[reg] = value;
-}
-
-/*
- * write to the WM8728 register space
- */
-static int wm8728_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- u8 data[2];
-
- /* data is
- * D15..D9 WM8728 register offset
- * D8...D0 register data
- */
- data[0] = (reg << 1) | ((value >> 8) & 0x0001);
- data[1] = value & 0x00ff;
-
- wm8728_write_reg_cache(codec, reg, value);
-
- if (codec->hw_write(codec->control_data, data, 2) == 2)
- return 0;
- else
- return -EIO;
-}
-
static const DECLARE_TLV_DB_SCALE(wm8728_tlv, -12750, 50, 1);
static const struct snd_kcontrol_new wm8728_snd_controls[] = {
@@ -121,12 +82,12 @@ static int wm8728_add_widgets(struct snd_soc_codec *codec)
static int wm8728_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
- u16 mute_reg = wm8728_read_reg_cache(codec, WM8728_DACCTL);
+ u16 mute_reg = snd_soc_read(codec, WM8728_DACCTL);
if (mute)
- wm8728_write(codec, WM8728_DACCTL, mute_reg | 1);
+ snd_soc_write(codec, WM8728_DACCTL, mute_reg | 1);
else
- wm8728_write(codec, WM8728_DACCTL, mute_reg & ~1);
+ snd_soc_write(codec, WM8728_DACCTL, mute_reg & ~1);
return 0;
}
@@ -138,7 +99,7 @@ static int wm8728_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- u16 dac = wm8728_read_reg_cache(codec, WM8728_DACCTL);
+ u16 dac = snd_soc_read(codec, WM8728_DACCTL);
dac &= ~0x18;
@@ -155,7 +116,7 @@ static int wm8728_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- wm8728_write(codec, WM8728_DACCTL, dac);
+ snd_soc_write(codec, WM8728_DACCTL, dac);
return 0;
}
@@ -164,7 +125,7 @@ static int wm8728_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
- u16 iface = wm8728_read_reg_cache(codec, WM8728_IFCTL);
+ u16 iface = snd_soc_read(codec, WM8728_IFCTL);
/* Currently only I2S is supported by the driver, though the
* hardware is more flexible.
@@ -204,7 +165,7 @@ static int wm8728_set_dai_fmt(struct snd_soc_dai *codec_dai,
return -EINVAL;
}
- wm8728_write(codec, WM8728_IFCTL, iface);
+ snd_soc_write(codec, WM8728_IFCTL, iface);
return 0;
}
@@ -220,19 +181,19 @@ static int wm8728_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_STANDBY:
if (codec->bias_level == SND_SOC_BIAS_OFF) {
/* Power everything up... */
- reg = wm8728_read_reg_cache(codec, WM8728_DACCTL);
- wm8728_write(codec, WM8728_DACCTL, reg & ~0x4);
+ reg = snd_soc_read(codec, WM8728_DACCTL);
+ snd_soc_write(codec, WM8728_DACCTL, reg & ~0x4);
/* ..then sync in the register cache. */
for (i = 0; i < ARRAY_SIZE(wm8728_reg_defaults); i++)
- wm8728_write(codec, i,
- wm8728_read_reg_cache(codec, i));
+ snd_soc_write(codec, i,
+ snd_soc_read(codec, i));
}
break;
case SND_SOC_BIAS_OFF:
- reg = wm8728_read_reg_cache(codec, WM8728_DACCTL);
- wm8728_write(codec, WM8728_DACCTL, reg | 0x4);
+ reg = snd_soc_read(codec, WM8728_DACCTL);
+ snd_soc_write(codec, WM8728_DACCTL, reg | 0x4);
break;
}
codec->bias_level = level;
@@ -287,15 +248,14 @@ static int wm8728_resume(struct platform_device *pdev)
* initialise the WM8728 driver
* register the mixer and dsp interfaces with the kernel
*/
-static int wm8728_init(struct snd_soc_device *socdev)
+static int wm8728_init(struct snd_soc_device *socdev,
+ enum snd_soc_control_type control)
{
struct snd_soc_codec *codec = socdev->card->codec;
int ret = 0;
codec->name = "WM8728";
codec->owner = THIS_MODULE;
- codec->read = wm8728_read_reg_cache;
- codec->write = wm8728_write;
codec->set_bias_level = wm8728_set_bias_level;
codec->dai = &wm8728_dai;
codec->num_dai = 1;
@@ -307,11 +267,18 @@ static int wm8728_init(struct snd_soc_device *socdev)
if (codec->reg_cache == NULL)
return -ENOMEM;
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
+ if (ret < 0) {
+ printk(KERN_ERR "wm8728: failed to configure cache I/O: %d\n",
+ ret);
+ goto err;
+ }
+
/* register pcms */
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
if (ret < 0) {
printk(KERN_ERR "wm8728: failed to create pcms\n");
- goto pcm_err;
+ goto err;
}
/* power on device */
@@ -331,7 +298,7 @@ static int wm8728_init(struct snd_soc_device *socdev)
card_err:
snd_soc_free_pcms(socdev);
snd_soc_dapm_free(socdev);
-pcm_err:
+err:
kfree(codec->reg_cache);
return ret;
}
@@ -357,7 +324,7 @@ static int wm8728_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, codec);
codec->control_data = i2c;
- ret = wm8728_init(socdev);
+ ret = wm8728_init(socdev, SND_SOC_I2C);
if (ret < 0)
pr_err("failed to initialise WM8728\n");
@@ -437,7 +404,7 @@ static int __devinit wm8728_spi_probe(struct spi_device *spi)
codec->control_data = spi;
- ret = wm8728_init(socdev);
+ ret = wm8728_init(socdev, SND_SOC_SPI);
if (ret < 0)
dev_err(&spi->dev, "failed to initialise WM8728\n");
@@ -458,30 +425,6 @@ static struct spi_driver wm8728_spi_driver = {
.probe = wm8728_spi_probe,
.remove = __devexit_p(wm8728_spi_remove),
};
-
-static int wm8728_spi_write(struct spi_device *spi, const char *data, int len)
-{
- struct spi_transfer t;
- struct spi_message m;
- u8 msg[2];
-
- if (len <= 0)
- return 0;
-
- msg[0] = data[0];
- msg[1] = data[1];
-
- spi_message_init(&m);
- memset(&t, 0, (sizeof t));
-
- t.tx_buf = &msg[0];
- t.len = len;
-
- spi_message_add_tail(&t, &m);
- spi_sync(spi, &m);
-
- return len;
-}
#endif /* CONFIG_SPI_MASTER */
static int wm8728_probe(struct platform_device *pdev)
@@ -506,13 +449,11 @@ static int wm8728_probe(struct platform_device *pdev)
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
if (setup->i2c_address) {
- codec->hw_write = (hw_write_t)i2c_master_send;
ret = wm8728_add_i2c_device(pdev, setup);
}
#endif
#if defined(CONFIG_SPI_MASTER)
if (setup->spi) {
- codec->hw_write = (hw_write_t)wm8728_spi_write;
ret = spi_register_driver(&wm8728_spi_driver);
if (ret != 0)
printk(KERN_ERR "can't add spi driver");
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 7a205876ef4f..d3fd4f28d96e 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -26,6 +26,7 @@
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/initval.h>
+#include <sound/tlv.h>
#include "wm8731.h"
@@ -39,9 +40,6 @@ struct wm8731_priv {
unsigned int sysclk;
};
-#ifdef CONFIG_SPI_MASTER
-static int wm8731_spi_write(struct spi_device *spi, const char *data, int len);
-#endif
/*
* wm8731 register cache
@@ -50,60 +48,12 @@ static int wm8731_spi_write(struct spi_device *spi, const char *data, int len);
* There is no point in caching the reset register
*/
static const u16 wm8731_reg[WM8731_CACHEREGNUM] = {
- 0x0097, 0x0097, 0x0079, 0x0079,
- 0x000a, 0x0008, 0x009f, 0x000a,
- 0x0000, 0x0000
+ 0x0097, 0x0097, 0x0079, 0x0079,
+ 0x000a, 0x0008, 0x009f, 0x000a,
+ 0x0000, 0x0000
};
-/*
- * read wm8731 register cache
- */
-static inline unsigned int wm8731_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
- if (reg == WM8731_RESET)
- return 0;
- if (reg >= WM8731_CACHEREGNUM)
- return -1;
- return cache[reg];
-}
-
-/*
- * write wm8731 register cache
- */
-static inline void wm8731_write_reg_cache(struct snd_soc_codec *codec,
- u16 reg, unsigned int value)
-{
- u16 *cache = codec->reg_cache;
- if (reg >= WM8731_CACHEREGNUM)
- return;
- cache[reg] = value;
-}
-
-/*
- * write to the WM8731 register space
- */
-static int wm8731_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- u8 data[2];
-
- /* data is
- * D15..D9 WM8731 register offset
- * D8...D0 register data
- */
- data[0] = (reg << 1) | ((value >> 8) & 0x0001);
- data[1] = value & 0x00ff;
-
- wm8731_write_reg_cache(codec, reg, value);
- if (codec->hw_write(codec->control_data, data, 2) == 2)
- return 0;
- else
- return -EIO;
-}
-
-#define wm8731_reset(c) wm8731_write(c, WM8731_RESET, 0)
+#define wm8731_reset(c) snd_soc_write(c, WM8731_RESET, 0)
static const char *wm8731_input_select[] = {"Line In", "Mic"};
static const char *wm8731_deemph[] = {"None", "32Khz", "44.1Khz", "48Khz"};
@@ -113,20 +63,26 @@ static const struct soc_enum wm8731_enum[] = {
SOC_ENUM_SINGLE(WM8731_APDIGI, 1, 4, wm8731_deemph),
};
+static const DECLARE_TLV_DB_SCALE(in_tlv, -3450, 150, 0);
+static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -1500, 300, 0);
+static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
+
static const struct snd_kcontrol_new wm8731_snd_controls[] = {
-SOC_DOUBLE_R("Master Playback Volume", WM8731_LOUT1V, WM8731_ROUT1V,
- 0, 127, 0),
+SOC_DOUBLE_R_TLV("Master Playback Volume", WM8731_LOUT1V, WM8731_ROUT1V,
+ 0, 127, 0, out_tlv),
SOC_DOUBLE_R("Master Playback ZC Switch", WM8731_LOUT1V, WM8731_ROUT1V,
7, 1, 0),
-SOC_DOUBLE_R("Capture Volume", WM8731_LINVOL, WM8731_RINVOL, 0, 31, 0),
+SOC_DOUBLE_R_TLV("Capture Volume", WM8731_LINVOL, WM8731_RINVOL, 0, 31, 0,
+ in_tlv),
SOC_DOUBLE_R("Line Capture Switch", WM8731_LINVOL, WM8731_RINVOL, 7, 1, 1),
SOC_SINGLE("Mic Boost (+20dB)", WM8731_APANA, 0, 1, 0),
-SOC_SINGLE("Capture Mic Switch", WM8731_APANA, 1, 1, 1),
+SOC_SINGLE("Mic Capture Switch", WM8731_APANA, 1, 1, 1),
-SOC_SINGLE("Sidetone Playback Volume", WM8731_APANA, 6, 3, 1),
+SOC_SINGLE_TLV("Sidetone Playback Volume", WM8731_APANA, 6, 3, 1,
+ sidetone_tlv),
SOC_SINGLE("ADC High Pass Filter Switch", WM8731_APDIGI, 0, 1, 1),
SOC_SINGLE("Store DC Offset Switch", WM8731_APDIGI, 4, 1, 0),
@@ -260,12 +216,12 @@ static int wm8731_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
struct wm8731_priv *wm8731 = codec->private_data;
- u16 iface = wm8731_read_reg_cache(codec, WM8731_IFACE) & 0xfff3;
+ u16 iface = snd_soc_read(codec, WM8731_IFACE) & 0xfff3;
int i = get_coeff(wm8731->sysclk, params_rate(params));
u16 srate = (coeff_div[i].sr << 2) |
(coeff_div[i].bosr << 1) | coeff_div[i].usb;
- wm8731_write(codec, WM8731_SRATE, srate);
+ snd_soc_write(codec, WM8731_SRATE, srate);
/* bit size */
switch (params_format(params)) {
@@ -279,7 +235,7 @@ static int wm8731_hw_params(struct snd_pcm_substream *substream,
break;
}
- wm8731_write(codec, WM8731_IFACE, iface);
+ snd_soc_write(codec, WM8731_IFACE, iface);
return 0;
}
@@ -291,7 +247,7 @@ static int wm8731_pcm_prepare(struct snd_pcm_substream *substream,
struct snd_soc_codec *codec = socdev->card->codec;
/* set active */
- wm8731_write(codec, WM8731_ACTIVE, 0x0001);
+ snd_soc_write(codec, WM8731_ACTIVE, 0x0001);
return 0;
}
@@ -306,19 +262,19 @@ static void wm8731_shutdown(struct snd_pcm_substream *substream,
/* deactivate */
if (!codec->active) {
udelay(50);
- wm8731_write(codec, WM8731_ACTIVE, 0x0);
+ snd_soc_write(codec, WM8731_ACTIVE, 0x0);
}
}
static int wm8731_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
- u16 mute_reg = wm8731_read_reg_cache(codec, WM8731_APDIGI) & 0xfff7;
+ u16 mute_reg = snd_soc_read(codec, WM8731_APDIGI) & 0xfff7;
if (mute)
- wm8731_write(codec, WM8731_APDIGI, mute_reg | 0x8);
+ snd_soc_write(codec, WM8731_APDIGI, mute_reg | 0x8);
else
- wm8731_write(codec, WM8731_APDIGI, mute_reg);
+ snd_soc_write(codec, WM8731_APDIGI, mute_reg);
return 0;
}
@@ -396,7 +352,7 @@ static int wm8731_set_dai_fmt(struct snd_soc_dai *codec_dai,
}
/* set iface */
- wm8731_write(codec, WM8731_IFACE, iface);
+ snd_soc_write(codec, WM8731_IFACE, iface);
return 0;
}
@@ -412,12 +368,12 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
break;
case SND_SOC_BIAS_STANDBY:
/* Clear PWROFF, gate CLKOUT, everything else as-is */
- reg = wm8731_read_reg_cache(codec, WM8731_PWR) & 0xff7f;
- wm8731_write(codec, WM8731_PWR, reg | 0x0040);
+ reg = snd_soc_read(codec, WM8731_PWR) & 0xff7f;
+ snd_soc_write(codec, WM8731_PWR, reg | 0x0040);
break;
case SND_SOC_BIAS_OFF:
- wm8731_write(codec, WM8731_ACTIVE, 0x0);
- wm8731_write(codec, WM8731_PWR, 0xffff);
+ snd_soc_write(codec, WM8731_ACTIVE, 0x0);
+ snd_soc_write(codec, WM8731_PWR, 0xffff);
break;
}
codec->bias_level = level;
@@ -457,15 +413,17 @@ struct snd_soc_dai wm8731_dai = {
.rates = WM8731_RATES,
.formats = WM8731_FORMATS,},
.ops = &wm8731_dai_ops,
+ .symmetric_rates = 1,
};
EXPORT_SYMBOL_GPL(wm8731_dai);
+#ifdef CONFIG_PM
static int wm8731_suspend(struct platform_device *pdev, pm_message_t state)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- wm8731_write(codec, WM8731_ACTIVE, 0x0);
+ snd_soc_write(codec, WM8731_ACTIVE, 0x0);
wm8731_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
@@ -488,6 +446,10 @@ static int wm8731_resume(struct platform_device *pdev)
wm8731_set_bias_level(codec, codec->suspend_bias_level);
return 0;
}
+#else
+#define wm8731_suspend NULL
+#define wm8731_resume NULL
+#endif
static int wm8731_probe(struct platform_device *pdev)
{
@@ -547,15 +509,16 @@ struct snd_soc_codec_device soc_codec_dev_wm8731 = {
};
EXPORT_SYMBOL_GPL(soc_codec_dev_wm8731);
-static int wm8731_register(struct wm8731_priv *wm8731)
+static int wm8731_register(struct wm8731_priv *wm8731,
+ enum snd_soc_control_type control)
{
int ret;
struct snd_soc_codec *codec = &wm8731->codec;
- u16 reg;
if (wm8731_codec) {
dev_err(codec->dev, "Another WM8731 is registered\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
mutex_init(&codec->mutex);
@@ -565,8 +528,6 @@ static int wm8731_register(struct wm8731_priv *wm8731)
codec->private_data = wm8731;
codec->name = "WM8731";
codec->owner = THIS_MODULE;
- codec->read = wm8731_read_reg_cache;
- codec->write = wm8731_write;
codec->bias_level = SND_SOC_BIAS_OFF;
codec->set_bias_level = wm8731_set_bias_level;
codec->dai = &wm8731_dai;
@@ -576,10 +537,16 @@ static int wm8731_register(struct wm8731_priv *wm8731)
memcpy(codec->reg_cache, wm8731_reg, sizeof(wm8731_reg));
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
ret = wm8731_reset(codec);
if (ret < 0) {
- dev_err(codec->dev, "Failed to issue reset\n");
- return ret;
+ dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
+ goto err;
}
wm8731_dai.dev = codec->dev;
@@ -587,35 +554,36 @@ static int wm8731_register(struct wm8731_priv *wm8731)
wm8731_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Latch the update bits */
- reg = wm8731_read_reg_cache(codec, WM8731_LOUT1V);
- wm8731_write(codec, WM8731_LOUT1V, reg & ~0x0100);
- reg = wm8731_read_reg_cache(codec, WM8731_ROUT1V);
- wm8731_write(codec, WM8731_ROUT1V, reg & ~0x0100);
- reg = wm8731_read_reg_cache(codec, WM8731_LINVOL);
- wm8731_write(codec, WM8731_LINVOL, reg & ~0x0100);
- reg = wm8731_read_reg_cache(codec, WM8731_RINVOL);
- wm8731_write(codec, WM8731_RINVOL, reg & ~0x0100);
+ snd_soc_update_bits(codec, WM8731_LOUT1V, 0x100, 0);
+ snd_soc_update_bits(codec, WM8731_ROUT1V, 0x100, 0);
+ snd_soc_update_bits(codec, WM8731_LINVOL, 0x100, 0);
+ snd_soc_update_bits(codec, WM8731_RINVOL, 0x100, 0);
/* Disable bypass path by default */
- reg = wm8731_read_reg_cache(codec, WM8731_APANA);
- wm8731_write(codec, WM8731_APANA, reg & ~0x4);
+ snd_soc_update_bits(codec, WM8731_APANA, 0x4, 0);
wm8731_codec = codec;
ret = snd_soc_register_codec(codec);
if (ret != 0) {
dev_err(codec->dev, "Failed to register codec: %d\n", ret);
- return ret;
+ goto err;
}
ret = snd_soc_register_dai(&wm8731_dai);
if (ret != 0) {
dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
snd_soc_unregister_codec(codec);
- return ret;
+ goto err_codec;
}
return 0;
+
+err_codec:
+ snd_soc_unregister_codec(codec);
+err:
+ kfree(wm8731);
+ return ret;
}
static void wm8731_unregister(struct wm8731_priv *wm8731)
@@ -628,30 +596,6 @@ static void wm8731_unregister(struct wm8731_priv *wm8731)
}
#if defined(CONFIG_SPI_MASTER)
-static int wm8731_spi_write(struct spi_device *spi, const char *data, int len)
-{
- struct spi_transfer t;
- struct spi_message m;
- u8 msg[2];
-
- if (len <= 0)
- return 0;
-
- msg[0] = data[0];
- msg[1] = data[1];
-
- spi_message_init(&m);
- memset(&t, 0, (sizeof t));
-
- t.tx_buf = &msg[0];
- t.len = len;
-
- spi_message_add_tail(&t, &m);
- spi_sync(spi, &m);
-
- return len;
-}
-
static int __devinit wm8731_spi_probe(struct spi_device *spi)
{
struct snd_soc_codec *codec;
@@ -663,12 +607,11 @@ static int __devinit wm8731_spi_probe(struct spi_device *spi)
codec = &wm8731->codec;
codec->control_data = spi;
- codec->hw_write = (hw_write_t)wm8731_spi_write;
codec->dev = &spi->dev;
dev_set_drvdata(&spi->dev, wm8731);
- return wm8731_register(wm8731);
+ return wm8731_register(wm8731, SND_SOC_SPI);
}
static int __devexit wm8731_spi_remove(struct spi_device *spi)
@@ -680,6 +623,21 @@ static int __devexit wm8731_spi_remove(struct spi_device *spi)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8731_spi_suspend(struct spi_device *spi, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&spi->dev);
+}
+
+static int wm8731_spi_resume(struct spi_device *spi)
+{
+ return snd_soc_resume_device(&spi->dev);
+}
+#else
+#define wm8731_spi_suspend NULL
+#define wm8731_spi_resume NULL
+#endif
+
static struct spi_driver wm8731_spi_driver = {
.driver = {
.name = "wm8731",
@@ -687,6 +645,8 @@ static struct spi_driver wm8731_spi_driver = {
.owner = THIS_MODULE,
},
.probe = wm8731_spi_probe,
+ .suspend = wm8731_spi_suspend,
+ .resume = wm8731_spi_resume,
.remove = __devexit_p(wm8731_spi_remove),
};
#endif /* CONFIG_SPI_MASTER */
@@ -703,14 +663,13 @@ static __devinit int wm8731_i2c_probe(struct i2c_client *i2c,
return -ENOMEM;
codec = &wm8731->codec;
- codec->hw_write = (hw_write_t)i2c_master_send;
i2c_set_clientdata(i2c, wm8731);
codec->control_data = i2c;
codec->dev = &i2c->dev;
- return wm8731_register(wm8731);
+ return wm8731_register(wm8731, SND_SOC_I2C);
}
static __devexit int wm8731_i2c_remove(struct i2c_client *client)
@@ -720,6 +679,21 @@ static __devexit int wm8731_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8731_i2c_suspend(struct i2c_client *i2c, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&i2c->dev);
+}
+
+static int wm8731_i2c_resume(struct i2c_client *i2c)
+{
+ return snd_soc_resume_device(&i2c->dev);
+}
+#else
+#define wm8731_i2c_suspend NULL
+#define wm8731_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm8731_i2c_id[] = {
{ "wm8731", 0 },
{ }
@@ -733,6 +707,8 @@ static struct i2c_driver wm8731_i2c_driver = {
},
.probe = wm8731_i2c_probe,
.remove = __devexit_p(wm8731_i2c_remove),
+ .suspend = wm8731_i2c_suspend,
+ .resume = wm8731_i2c_resume,
.id_table = wm8731_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index b64509b01a49..4ba1e7e93fb4 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -55,50 +55,7 @@ static const u16 wm8750_reg[] = {
0x0079, 0x0079, 0x0079, /* 40 */
};
-/*
- * read wm8750 register cache
- */
-static inline unsigned int wm8750_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
- if (reg > WM8750_CACHE_REGNUM)
- return -1;
- return cache[reg];
-}
-
-/*
- * write wm8750 register cache
- */
-static inline void wm8750_write_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int value)
-{
- u16 *cache = codec->reg_cache;
- if (reg > WM8750_CACHE_REGNUM)
- return;
- cache[reg] = value;
-}
-
-static int wm8750_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- u8 data[2];
-
- /* data is
- * D15..D9 WM8753 register offset
- * D8...D0 register data
- */
- data[0] = (reg << 1) | ((value >> 8) & 0x0001);
- data[1] = value & 0x00ff;
-
- wm8750_write_reg_cache(codec, reg, value);
- if (codec->hw_write(codec->control_data, data, 2) == 2)
- return 0;
- else
- return -EIO;
-}
-
-#define wm8750_reset(c) wm8750_write(c, WM8750_RESET, 0)
+#define wm8750_reset(c) snd_soc_write(c, WM8750_RESET, 0)
/*
* WM8750 Controls
@@ -594,7 +551,7 @@ static int wm8750_set_dai_fmt(struct snd_soc_dai *codec_dai,
return -EINVAL;
}
- wm8750_write(codec, WM8750_IFACE, iface);
+ snd_soc_write(codec, WM8750_IFACE, iface);
return 0;
}
@@ -606,8 +563,8 @@ static int wm8750_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
struct wm8750_priv *wm8750 = codec->private_data;
- u16 iface = wm8750_read_reg_cache(codec, WM8750_IFACE) & 0x1f3;
- u16 srate = wm8750_read_reg_cache(codec, WM8750_SRATE) & 0x1c0;
+ u16 iface = snd_soc_read(codec, WM8750_IFACE) & 0x1f3;
+ u16 srate = snd_soc_read(codec, WM8750_SRATE) & 0x1c0;
int coeff = get_coeff(wm8750->sysclk, params_rate(params));
/* bit size */
@@ -626,9 +583,9 @@ static int wm8750_pcm_hw_params(struct snd_pcm_substream *substream,
}
/* set iface & srate */
- wm8750_write(codec, WM8750_IFACE, iface);
+ snd_soc_write(codec, WM8750_IFACE, iface);
if (coeff >= 0)
- wm8750_write(codec, WM8750_SRATE, srate |
+ snd_soc_write(codec, WM8750_SRATE, srate |
(coeff_div[coeff].sr << 1) | coeff_div[coeff].usb);
return 0;
@@ -637,35 +594,35 @@ static int wm8750_pcm_hw_params(struct snd_pcm_substream *substream,
static int wm8750_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
- u16 mute_reg = wm8750_read_reg_cache(codec, WM8750_ADCDAC) & 0xfff7;
+ u16 mute_reg = snd_soc_read(codec, WM8750_ADCDAC) & 0xfff7;
if (mute)
- wm8750_write(codec, WM8750_ADCDAC, mute_reg | 0x8);
+ snd_soc_write(codec, WM8750_ADCDAC, mute_reg | 0x8);
else
- wm8750_write(codec, WM8750_ADCDAC, mute_reg);
+ snd_soc_write(codec, WM8750_ADCDAC, mute_reg);
return 0;
}
static int wm8750_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- u16 pwr_reg = wm8750_read_reg_cache(codec, WM8750_PWR1) & 0xfe3e;
+ u16 pwr_reg = snd_soc_read(codec, WM8750_PWR1) & 0xfe3e;
switch (level) {
case SND_SOC_BIAS_ON:
/* set vmid to 50k and unmute dac */
- wm8750_write(codec, WM8750_PWR1, pwr_reg | 0x00c0);
+ snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x00c0);
break;
case SND_SOC_BIAS_PREPARE:
/* set vmid to 5k for quick power up */
- wm8750_write(codec, WM8750_PWR1, pwr_reg | 0x01c1);
+ snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x01c1);
break;
case SND_SOC_BIAS_STANDBY:
/* mute dac and set vmid to 500k, enable VREF */
- wm8750_write(codec, WM8750_PWR1, pwr_reg | 0x0141);
+ snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x0141);
break;
case SND_SOC_BIAS_OFF:
- wm8750_write(codec, WM8750_PWR1, 0x0001);
+ snd_soc_write(codec, WM8750_PWR1, 0x0001);
break;
}
codec->bias_level = level;
@@ -754,15 +711,14 @@ static int wm8750_resume(struct platform_device *pdev)
* initialise the WM8750 driver
* register the mixer and dsp interfaces with the kernel
*/
-static int wm8750_init(struct snd_soc_device *socdev)
+static int wm8750_init(struct snd_soc_device *socdev,
+ enum snd_soc_control_type control)
{
struct snd_soc_codec *codec = socdev->card->codec;
int reg, ret = 0;
codec->name = "WM8750";
codec->owner = THIS_MODULE;
- codec->read = wm8750_read_reg_cache;
- codec->write = wm8750_write;
codec->set_bias_level = wm8750_set_bias_level;
codec->dai = &wm8750_dai;
codec->num_dai = 1;
@@ -771,13 +727,23 @@ static int wm8750_init(struct snd_soc_device *socdev)
if (codec->reg_cache == NULL)
return -ENOMEM;
- wm8750_reset(codec);
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
+ if (ret < 0) {
+ printk(KERN_ERR "wm8750: failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
+ ret = wm8750_reset(codec);
+ if (ret < 0) {
+ printk(KERN_ERR "wm8750: failed to reset: %d\n", ret);
+ goto err;
+ }
/* register pcms */
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
if (ret < 0) {
printk(KERN_ERR "wm8750: failed to create pcms\n");
- goto pcm_err;
+ goto err;
}
/* charge output caps */
@@ -786,22 +752,22 @@ static int wm8750_init(struct snd_soc_device *socdev)
schedule_delayed_work(&codec->delayed_work, msecs_to_jiffies(1000));
/* set the update bits */
- reg = wm8750_read_reg_cache(codec, WM8750_LDAC);
- wm8750_write(codec, WM8750_LDAC, reg | 0x0100);
- reg = wm8750_read_reg_cache(codec, WM8750_RDAC);
- wm8750_write(codec, WM8750_RDAC, reg | 0x0100);
- reg = wm8750_read_reg_cache(codec, WM8750_LOUT1V);
- wm8750_write(codec, WM8750_LOUT1V, reg | 0x0100);
- reg = wm8750_read_reg_cache(codec, WM8750_ROUT1V);
- wm8750_write(codec, WM8750_ROUT1V, reg | 0x0100);
- reg = wm8750_read_reg_cache(codec, WM8750_LOUT2V);
- wm8750_write(codec, WM8750_LOUT2V, reg | 0x0100);
- reg = wm8750_read_reg_cache(codec, WM8750_ROUT2V);
- wm8750_write(codec, WM8750_ROUT2V, reg | 0x0100);
- reg = wm8750_read_reg_cache(codec, WM8750_LINVOL);
- wm8750_write(codec, WM8750_LINVOL, reg | 0x0100);
- reg = wm8750_read_reg_cache(codec, WM8750_RINVOL);
- wm8750_write(codec, WM8750_RINVOL, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8750_LDAC);
+ snd_soc_write(codec, WM8750_LDAC, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8750_RDAC);
+ snd_soc_write(codec, WM8750_RDAC, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8750_LOUT1V);
+ snd_soc_write(codec, WM8750_LOUT1V, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8750_ROUT1V);
+ snd_soc_write(codec, WM8750_ROUT1V, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8750_LOUT2V);
+ snd_soc_write(codec, WM8750_LOUT2V, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8750_ROUT2V);
+ snd_soc_write(codec, WM8750_ROUT2V, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8750_LINVOL);
+ snd_soc_write(codec, WM8750_LINVOL, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8750_RINVOL);
+ snd_soc_write(codec, WM8750_RINVOL, reg | 0x0100);
snd_soc_add_controls(codec, wm8750_snd_controls,
ARRAY_SIZE(wm8750_snd_controls));
@@ -816,7 +782,7 @@ static int wm8750_init(struct snd_soc_device *socdev)
card_err:
snd_soc_free_pcms(socdev);
snd_soc_dapm_free(socdev);
-pcm_err:
+err:
kfree(codec->reg_cache);
return ret;
}
@@ -844,7 +810,7 @@ static int wm8750_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, codec);
codec->control_data = i2c;
- ret = wm8750_init(socdev);
+ ret = wm8750_init(socdev, SND_SOC_I2C);
if (ret < 0)
pr_err("failed to initialise WM8750\n");
@@ -924,7 +890,7 @@ static int __devinit wm8750_spi_probe(struct spi_device *spi)
codec->control_data = spi;
- ret = wm8750_init(socdev);
+ ret = wm8750_init(socdev, SND_SOC_SPI);
if (ret < 0)
dev_err(&spi->dev, "failed to initialise WM8750\n");
@@ -945,30 +911,6 @@ static struct spi_driver wm8750_spi_driver = {
.probe = wm8750_spi_probe,
.remove = __devexit_p(wm8750_spi_remove),
};
-
-static int wm8750_spi_write(struct spi_device *spi, const char *data, int len)
-{
- struct spi_transfer t;
- struct spi_message m;
- u8 msg[2];
-
- if (len <= 0)
- return 0;
-
- msg[0] = data[0];
- msg[1] = data[1];
-
- spi_message_init(&m);
- memset(&t, 0, (sizeof t));
-
- t.tx_buf = &msg[0];
- t.len = len;
-
- spi_message_add_tail(&t, &m);
- spi_sync(spi, &m);
-
- return len;
-}
#endif
static int wm8750_probe(struct platform_device *pdev)
@@ -1002,13 +944,11 @@ static int wm8750_probe(struct platform_device *pdev)
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
if (setup->i2c_address) {
- codec->hw_write = (hw_write_t)i2c_master_send;
ret = wm8750_add_i2c_device(pdev, setup);
}
#endif
#if defined(CONFIG_SPI_MASTER)
if (setup->spi) {
- codec->hw_write = (hw_write_t)wm8750_spi_write;
ret = spi_register_driver(&wm8750_spi_driver);
if (ret != 0)
printk(KERN_ERR "can't add spi driver");
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 49c4b2898aff..d80d414cfbbd 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1766,6 +1766,21 @@ static int wm8753_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8753_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8753_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8753_i2c_suspend NULL
+#define wm8753_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm8753_i2c_id[] = {
{ "wm8753", 0 },
{ }
@@ -1779,6 +1794,8 @@ static struct i2c_driver wm8753_i2c_driver = {
},
.probe = wm8753_i2c_probe,
.remove = wm8753_i2c_remove,
+ .suspend = wm8753_i2c_suspend,
+ .resume = wm8753_i2c_resume,
.id_table = wm8753_i2c_id,
};
#endif
@@ -1834,6 +1851,22 @@ static int __devexit wm8753_spi_remove(struct spi_device *spi)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8753_spi_suspend(struct spi_device *spi, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&spi->dev);
+}
+
+static int wm8753_spi_resume(struct spi_device *spi)
+{
+ return snd_soc_resume_device(&spi->dev);
+}
+
+#else
+#define wm8753_spi_suspend NULL
+#define wm8753_spi_resume NULL
+#endif
+
static struct spi_driver wm8753_spi_driver = {
.driver = {
.name = "wm8753",
@@ -1842,6 +1875,8 @@ static struct spi_driver wm8753_spi_driver = {
},
.probe = wm8753_spi_probe,
.remove = __devexit_p(wm8753_spi_remove),
+ .suspend = wm8753_spi_suspend,
+ .resume = wm8753_spi_resume,
};
#endif
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
new file mode 100644
index 000000000000..a9829aa26e53
--- /dev/null
+++ b/sound/soc/codecs/wm8776.c
@@ -0,0 +1,744 @@
+/*
+ * wm8776.c -- WM8776 ALSA SoC Audio driver
+ *
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * TODO: Input ALC/limiter support
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8776.h"
+
+static struct snd_soc_codec *wm8776_codec;
+struct snd_soc_codec_device soc_codec_dev_wm8776;
+
+/* codec private data */
+struct wm8776_priv {
+ struct snd_soc_codec codec;
+ u16 reg_cache[WM8776_CACHEREGNUM];
+ int sysclk[2];
+};
+
+#ifdef CONFIG_SPI_MASTER
+static int wm8776_spi_write(struct spi_device *spi, const char *data, int len);
+#endif
+
+static const u16 wm8776_reg[WM8776_CACHEREGNUM] = {
+ 0x79, 0x79, 0x79, 0xff, 0xff, /* 4 */
+ 0xff, 0x00, 0x90, 0x00, 0x00, /* 9 */
+ 0x22, 0x22, 0x22, 0x08, 0xcf, /* 14 */
+ 0xcf, 0x7b, 0x00, 0x32, 0x00, /* 19 */
+ 0xa6, 0x01, 0x01
+};
+
+static int wm8776_reset(struct snd_soc_codec *codec)
+{
+ return snd_soc_write(codec, WM8776_RESET, 0);
+}
+
+static const DECLARE_TLV_DB_SCALE(hp_tlv, -12100, 100, 1);
+static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
+static const DECLARE_TLV_DB_SCALE(adc_tlv, -10350, 50, 1);
+
+static const struct snd_kcontrol_new wm8776_snd_controls[] = {
+SOC_DOUBLE_R_TLV("Headphone Playback Volume", WM8776_HPLVOL, WM8776_HPRVOL,
+ 0, 127, 0, hp_tlv),
+SOC_DOUBLE_R_TLV("Digital Playback Volume", WM8776_DACLVOL, WM8776_DACRVOL,
+ 0, 255, 0, dac_tlv),
+SOC_SINGLE("Digital Playback ZC Switch", WM8776_DACCTRL1, 0, 1, 0),
+
+SOC_SINGLE("Deemphasis Switch", WM8776_DACCTRL2, 0, 1, 0),
+
+SOC_DOUBLE_R_TLV("Capture Volume", WM8776_ADCLVOL, WM8776_ADCRVOL,
+ 0, 255, 0, adc_tlv),
+SOC_DOUBLE("Capture Switch", WM8776_ADCMUX, 7, 6, 1, 1),
+SOC_DOUBLE_R("Capture ZC Switch", WM8776_ADCLVOL, WM8776_ADCRVOL, 8, 1, 0),
+SOC_SINGLE("Capture HPF Switch", WM8776_ADCIFCTRL, 8, 1, 1),
+};
+
+static const struct snd_kcontrol_new inmix_controls[] = {
+SOC_DAPM_SINGLE("AIN1 Switch", WM8776_ADCMUX, 0, 1, 0),
+SOC_DAPM_SINGLE("AIN2 Switch", WM8776_ADCMUX, 1, 1, 0),
+SOC_DAPM_SINGLE("AIN3 Switch", WM8776_ADCMUX, 2, 1, 0),
+SOC_DAPM_SINGLE("AIN4 Switch", WM8776_ADCMUX, 3, 1, 0),
+SOC_DAPM_SINGLE("AIN5 Switch", WM8776_ADCMUX, 4, 1, 0),
+};
+
+static const struct snd_kcontrol_new outmix_controls[] = {
+SOC_DAPM_SINGLE("DAC Switch", WM8776_OUTMUX, 0, 1, 0),
+SOC_DAPM_SINGLE("AUX Switch", WM8776_OUTMUX, 1, 1, 0),
+SOC_DAPM_SINGLE("Bypass Switch", WM8776_OUTMUX, 2, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget wm8776_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("AUX"),
+SND_SOC_DAPM_INPUT("AUX"),
+
+SND_SOC_DAPM_INPUT("AIN1"),
+SND_SOC_DAPM_INPUT("AIN2"),
+SND_SOC_DAPM_INPUT("AIN3"),
+SND_SOC_DAPM_INPUT("AIN4"),
+SND_SOC_DAPM_INPUT("AIN5"),
+
+SND_SOC_DAPM_MIXER("Input Mixer", WM8776_PWRDOWN, 6, 1,
+ inmix_controls, ARRAY_SIZE(inmix_controls)),
+
+SND_SOC_DAPM_ADC("ADC", "Capture", WM8776_PWRDOWN, 1, 1),
+SND_SOC_DAPM_DAC("DAC", "Playback", WM8776_PWRDOWN, 2, 1),
+
+SND_SOC_DAPM_MIXER("Output Mixer", SND_SOC_NOPM, 0, 0,
+ outmix_controls, ARRAY_SIZE(outmix_controls)),
+
+SND_SOC_DAPM_PGA("Headphone PGA", WM8776_PWRDOWN, 3, 1, NULL, 0),
+
+SND_SOC_DAPM_OUTPUT("VOUT"),
+
+SND_SOC_DAPM_OUTPUT("HPOUTL"),
+SND_SOC_DAPM_OUTPUT("HPOUTR"),
+};
+
+static const struct snd_soc_dapm_route routes[] = {
+ { "Input Mixer", "AIN1 Switch", "AIN1" },
+ { "Input Mixer", "AIN2 Switch", "AIN2" },
+ { "Input Mixer", "AIN3 Switch", "AIN3" },
+ { "Input Mixer", "AIN4 Switch", "AIN4" },
+ { "Input Mixer", "AIN5 Switch", "AIN5" },
+
+ { "ADC", NULL, "Input Mixer" },
+
+ { "Output Mixer", "DAC Switch", "DAC" },
+ { "Output Mixer", "AUX Switch", "AUX" },
+ { "Output Mixer", "Bypass Switch", "Input Mixer" },
+
+ { "VOUT", NULL, "Output Mixer" },
+
+ { "Headphone PGA", NULL, "Output Mixer" },
+
+ { "HPOUTL", NULL, "Headphone PGA" },
+ { "HPOUTR", NULL, "Headphone PGA" },
+};
+
+static int wm8776_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ int reg, iface, master;
+
+ switch (dai->id) {
+ case WM8776_DAI_DAC:
+ reg = WM8776_DACIFCTRL;
+ master = 0x80;
+ break;
+ case WM8776_DAI_ADC:
+ reg = WM8776_ADCIFCTRL;
+ master = 0x100;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ iface = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ master = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ iface |= 0x0002;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ iface |= 0x0001;
+ break;
+ /* FIXME: CHECK A/B */
+ case SND_SOC_DAIFMT_DSP_A:
+ iface |= 0x0003;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ iface |= 0x0007;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ iface |= 0x00c;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ iface |= 0x008;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ iface |= 0x004;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Finally, write out the values */
+ snd_soc_update_bits(codec, reg, 0xf, iface);
+ snd_soc_update_bits(codec, WM8776_MSTRCTRL, 0x180, master);
+
+ return 0;
+}
+
+static int mclk_ratios[] = {
+ 128,
+ 192,
+ 256,
+ 384,
+ 512,
+ 768,
+};
+
+static int wm8776_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8776_priv *wm8776 = codec->private_data;
+ int iface_reg, iface;
+ int ratio_shift, master;
+ int i;
+
+ iface = 0;
+
+ switch (dai->id) {
+ case WM8776_DAI_DAC:
+ iface_reg = WM8776_DACIFCTRL;
+ master = 0x80;
+ ratio_shift = 4;
+ break;
+ case WM8776_DAI_ADC:
+ iface_reg = WM8776_ADCIFCTRL;
+ master = 0x100;
+ ratio_shift = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+
+ /* Set word length */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ iface |= 0x10;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ iface |= 0x20;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ iface |= 0x30;
+ break;
+ }
+
+ /* Only need to set MCLK/LRCLK ratio if we're master */
+ if (snd_soc_read(codec, WM8776_MSTRCTRL) & master) {
+ for (i = 0; i < ARRAY_SIZE(mclk_ratios); i++) {
+ if (wm8776->sysclk[dai->id] / params_rate(params)
+ == mclk_ratios[i])
+ break;
+ }
+
+ if (i == ARRAY_SIZE(mclk_ratios)) {
+ dev_err(codec->dev,
+ "Unable to configure MCLK ratio %d/%d\n",
+ wm8776->sysclk[dai->id], params_rate(params));
+ return -EINVAL;
+ }
+
+ dev_dbg(codec->dev, "MCLK is %dfs\n", mclk_ratios[i]);
+
+ snd_soc_update_bits(codec, WM8776_MSTRCTRL,
+ 0x7 << ratio_shift, i << ratio_shift);
+ } else {
+ dev_dbg(codec->dev, "DAI in slave mode\n");
+ }
+
+ snd_soc_update_bits(codec, iface_reg, 0x30, iface);
+
+ return 0;
+}
+
+static int wm8776_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+
+ return snd_soc_write(codec, WM8776_DACMUTE, !!mute);
+}
+
+static int wm8776_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8776_priv *wm8776 = codec->private_data;
+
+ BUG_ON(dai->id >= ARRAY_SIZE(wm8776->sysclk));
+
+ wm8776->sysclk[dai->id] = freq;
+
+ return 0;
+}
+
+static int wm8776_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->bias_level == SND_SOC_BIAS_OFF) {
+ /* Disable the global powerdown; DAPM does the rest */
+ snd_soc_update_bits(codec, WM8776_PWRDOWN, 1, 0);
+ }
+
+ break;
+ case SND_SOC_BIAS_OFF:
+ snd_soc_update_bits(codec, WM8776_PWRDOWN, 1, 1);
+ break;
+ }
+
+ codec->bias_level = level;
+ return 0;
+}
+
+#define WM8776_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_96000)
+
+
+#define WM8776_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops wm8776_dac_ops = {
+ .digital_mute = wm8776_mute,
+ .hw_params = wm8776_hw_params,
+ .set_fmt = wm8776_set_fmt,
+ .set_sysclk = wm8776_set_sysclk,
+};
+
+static struct snd_soc_dai_ops wm8776_adc_ops = {
+ .hw_params = wm8776_hw_params,
+ .set_fmt = wm8776_set_fmt,
+ .set_sysclk = wm8776_set_sysclk,
+};
+
+struct snd_soc_dai wm8776_dai[] = {
+ {
+ .name = "WM8776 Playback",
+ .id = WM8776_DAI_DAC,
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = WM8776_RATES,
+ .formats = WM8776_FORMATS,
+ },
+ .ops = &wm8776_dac_ops,
+ },
+ {
+ .name = "WM8776 Capture",
+ .id = WM8776_DAI_ADC,
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = WM8776_RATES,
+ .formats = WM8776_FORMATS,
+ },
+ .ops = &wm8776_adc_ops,
+ },
+};
+EXPORT_SYMBOL_GPL(wm8776_dai);
+
+#ifdef CONFIG_PM
+static int wm8776_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ wm8776_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+static int wm8776_resume(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+ int i;
+ u8 data[2];
+ u16 *cache = codec->reg_cache;
+
+ /* Sync reg_cache with the hardware */
+ for (i = 0; i < ARRAY_SIZE(wm8776_reg); i++) {
+ data[0] = (i << 1) | ((cache[i] >> 8) & 0x0001);
+ data[1] = cache[i] & 0x00ff;
+ codec->hw_write(codec->control_data, data, 2);
+ }
+
+ wm8776_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+#else
+#define wm8776_suspend NULL
+#define wm8776_resume NULL
+#endif
+
+static int wm8776_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret = 0;
+
+ if (wm8776_codec == NULL) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+
+ socdev->card->codec = wm8776_codec;
+ codec = wm8776_codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to create pcms: %d\n", ret);
+ goto pcm_err;
+ }
+
+ snd_soc_add_controls(codec, wm8776_snd_controls,
+ ARRAY_SIZE(wm8776_snd_controls));
+ snd_soc_dapm_new_controls(codec, wm8776_dapm_widgets,
+ ARRAY_SIZE(wm8776_dapm_widgets));
+ snd_soc_dapm_add_routes(codec, routes, ARRAY_SIZE(routes));
+
+ ret = snd_soc_init_card(socdev);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to register card: %d\n", ret);
+ goto card_err;
+ }
+
+ return ret;
+
+card_err:
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+pcm_err:
+ return ret;
+}
+
+/* power down chip */
+static int wm8776_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+struct snd_soc_codec_device soc_codec_dev_wm8776 = {
+ .probe = wm8776_probe,
+ .remove = wm8776_remove,
+ .suspend = wm8776_suspend,
+ .resume = wm8776_resume,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_wm8776);
+
+static int wm8776_register(struct wm8776_priv *wm8776,
+ enum snd_soc_control_type control)
+{
+ int ret, i;
+ struct snd_soc_codec *codec = &wm8776->codec;
+
+ if (wm8776_codec) {
+ dev_err(codec->dev, "Another WM8776 is registered\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->private_data = wm8776;
+ codec->name = "WM8776";
+ codec->owner = THIS_MODULE;
+ codec->bias_level = SND_SOC_BIAS_OFF;
+ codec->set_bias_level = wm8776_set_bias_level;
+ codec->dai = wm8776_dai;
+ codec->num_dai = ARRAY_SIZE(wm8776_dai);
+ codec->reg_cache_size = WM8776_CACHEREGNUM;
+ codec->reg_cache = &wm8776->reg_cache;
+
+ memcpy(codec->reg_cache, wm8776_reg, sizeof(wm8776_reg));
+
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wm8776_dai); i++)
+ wm8776_dai[i].dev = codec->dev;
+
+ ret = wm8776_reset(codec);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
+ goto err;
+ }
+
+ wm8776_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ /* Latch the update bits; right channel only since we always
+ * update both. */
+ snd_soc_update_bits(codec, WM8776_HPRVOL, 0x100, 0x100);
+ snd_soc_update_bits(codec, WM8776_DACRVOL, 0x100, 0x100);
+
+ wm8776_codec = codec;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ goto err;
+ }
+
+ ret = snd_soc_register_dais(wm8776_dai, ARRAY_SIZE(wm8776_dai));
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAIs: %d\n", ret);
+ goto err_codec;
+ }
+
+ return 0;
+
+err_codec:
+ snd_soc_unregister_codec(codec);
+err:
+ kfree(wm8776);
+ return ret;
+}
+
+static void wm8776_unregister(struct wm8776_priv *wm8776)
+{
+ wm8776_set_bias_level(&wm8776->codec, SND_SOC_BIAS_OFF);
+ snd_soc_unregister_dais(wm8776_dai, ARRAY_SIZE(wm8776_dai));
+ snd_soc_unregister_codec(&wm8776->codec);
+ kfree(wm8776);
+ wm8776_codec = NULL;
+}
+
+#if defined(CONFIG_SPI_MASTER)
+static int wm8776_spi_write(struct spi_device *spi, const char *data, int len)
+{
+ struct spi_transfer t;
+ struct spi_message m;
+ u8 msg[2];
+
+ if (len <= 0)
+ return 0;
+
+ msg[0] = data[0];
+ msg[1] = data[1];
+
+ spi_message_init(&m);
+ memset(&t, 0, (sizeof t));
+
+ t.tx_buf = &msg[0];
+ t.len = len;
+
+ spi_message_add_tail(&t, &m);
+ spi_sync(spi, &m);
+
+ return len;
+}
+
+static int __devinit wm8776_spi_probe(struct spi_device *spi)
+{
+ struct snd_soc_codec *codec;
+ struct wm8776_priv *wm8776;
+
+ wm8776 = kzalloc(sizeof(struct wm8776_priv), GFP_KERNEL);
+ if (wm8776 == NULL)
+ return -ENOMEM;
+
+ codec = &wm8776->codec;
+ codec->control_data = spi;
+ codec->hw_write = (hw_write_t)wm8776_spi_write;
+ codec->dev = &spi->dev;
+
+ dev_set_drvdata(&spi->dev, wm8776);
+
+ return wm8776_register(wm8776, SND_SOC_SPI);
+}
+
+static int __devexit wm8776_spi_remove(struct spi_device *spi)
+{
+ struct wm8776_priv *wm8776 = dev_get_drvdata(&spi->dev);
+
+ wm8776_unregister(wm8776);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int wm8776_spi_suspend(struct spi_device *spi, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&spi->dev);
+}
+
+static int wm8776_spi_resume(struct spi_device *spi)
+{
+ return snd_soc_resume_device(&spi->dev);
+}
+#else
+#define wm8776_spi_suspend NULL
+#define wm8776_spi_resume NULL
+#endif
+
+static struct spi_driver wm8776_spi_driver = {
+ .driver = {
+ .name = "wm8776",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = wm8776_spi_probe,
+ .suspend = wm8776_spi_suspend,
+ .resume = wm8776_spi_resume,
+ .remove = __devexit_p(wm8776_spi_remove),
+};
+#endif /* CONFIG_SPI_MASTER */
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static __devinit int wm8776_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct wm8776_priv *wm8776;
+ struct snd_soc_codec *codec;
+
+ wm8776 = kzalloc(sizeof(struct wm8776_priv), GFP_KERNEL);
+ if (wm8776 == NULL)
+ return -ENOMEM;
+
+ codec = &wm8776->codec;
+ codec->hw_write = (hw_write_t)i2c_master_send;
+
+ i2c_set_clientdata(i2c, wm8776);
+ codec->control_data = i2c;
+
+ codec->dev = &i2c->dev;
+
+ return wm8776_register(wm8776, SND_SOC_I2C);
+}
+
+static __devexit int wm8776_i2c_remove(struct i2c_client *client)
+{
+ struct wm8776_priv *wm8776 = i2c_get_clientdata(client);
+ wm8776_unregister(wm8776);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int wm8776_i2c_suspend(struct i2c_client *i2c, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&i2c->dev);
+}
+
+static int wm8776_i2c_resume(struct i2c_client *i2c)
+{
+ return snd_soc_resume_device(&i2c->dev);
+}
+#else
+#define wm8776_i2c_suspend NULL
+#define wm8776_i2c_resume NULL
+#endif
+
+static const struct i2c_device_id wm8776_i2c_id[] = {
+ { "wm8776", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wm8776_i2c_id);
+
+static struct i2c_driver wm8776_i2c_driver = {
+ .driver = {
+ .name = "wm8776",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm8776_i2c_probe,
+ .remove = __devexit_p(wm8776_i2c_remove),
+ .suspend = wm8776_i2c_suspend,
+ .resume = wm8776_i2c_resume,
+ .id_table = wm8776_i2c_id,
+};
+#endif
+
+static int __init wm8776_modinit(void)
+{
+ int ret;
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ ret = i2c_add_driver(&wm8776_i2c_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to register WM8776 I2C driver: %d\n",
+ ret);
+ }
+#endif
+#if defined(CONFIG_SPI_MASTER)
+ ret = spi_register_driver(&wm8776_spi_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to register WM8776 SPI driver: %d\n",
+ ret);
+ }
+#endif
+ return 0;
+}
+module_init(wm8776_modinit);
+
+static void __exit wm8776_exit(void)
+{
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_driver(&wm8776_i2c_driver);
+#endif
+#if defined(CONFIG_SPI_MASTER)
+ spi_unregister_driver(&wm8776_spi_driver);
+#endif
+}
+module_exit(wm8776_exit);
+
+MODULE_DESCRIPTION("ASoC WM8776 driver");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8776.h b/sound/soc/codecs/wm8776.h
new file mode 100644
index 000000000000..6606d25d2d83
--- /dev/null
+++ b/sound/soc/codecs/wm8776.h
@@ -0,0 +1,51 @@
+/*
+ * wm8776.h -- WM8776 ASoC driver
+ *
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8776_H
+#define _WM8776_H
+
+/* Registers */
+
+#define WM8776_HPLVOL 0x00
+#define WM8776_HPRVOL 0x01
+#define WM8776_HPMASTER 0x02
+#define WM8776_DACLVOL 0x03
+#define WM8776_DACRVOL 0x04
+#define WM8776_DACMASTER 0x05
+#define WM8776_PHASESWAP 0x06
+#define WM8776_DACCTRL1 0x07
+#define WM8776_DACMUTE 0x08
+#define WM8776_DACCTRL2 0x09
+#define WM8776_DACIFCTRL 0x0a
+#define WM8776_ADCIFCTRL 0x0b
+#define WM8776_MSTRCTRL 0x0c
+#define WM8776_PWRDOWN 0x0d
+#define WM8776_ADCLVOL 0x0e
+#define WM8776_ADCRVOL 0x0f
+#define WM8776_ALCCTRL1 0x10
+#define WM8776_ALCCTRL2 0x11
+#define WM8776_ALCCTRL3 0x12
+#define WM8776_NOISEGATE 0x13
+#define WM8776_LIMITER 0x14
+#define WM8776_ADCMUX 0x15
+#define WM8776_OUTMUX 0x16
+#define WM8776_RESET 0x17
+
+#define WM8776_CACHEREGNUM 0x17
+
+#define WM8776_DAI_DAC 0
+#define WM8776_DAI_ADC 1
+
+extern struct snd_soc_dai wm8776_dai[];
+extern struct snd_soc_codec_device soc_codec_dev_wm8776;
+
+#endif
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index 3c78945244b8..5e9c855c0036 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -116,6 +116,7 @@
#define WM8900_REG_CLOCKING2_DAC_CLKDIV 0x1c
#define WM8900_REG_DACCTRL_MUTE 0x004
+#define WM8900_REG_DACCTRL_DAC_SB_FILT 0x100
#define WM8900_REG_DACCTRL_AIF_LRCLKRATE 0x400
#define WM8900_REG_AUDIO3_ADCLRC_DIR 0x0800
@@ -182,111 +183,20 @@ static const u16 wm8900_reg_defaults[WM8900_MAXREG] = {
/* Remaining registers all zero */
};
-/*
- * read wm8900 register cache
- */
-static inline unsigned int wm8900_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
-
- BUG_ON(reg >= WM8900_MAXREG);
-
- if (reg == WM8900_REG_ID)
- return 0;
-
- return cache[reg];
-}
-
-/*
- * write wm8900 register cache
- */
-static inline void wm8900_write_reg_cache(struct snd_soc_codec *codec,
- u16 reg, unsigned int value)
-{
- u16 *cache = codec->reg_cache;
-
- BUG_ON(reg >= WM8900_MAXREG);
-
- cache[reg] = value;
-}
-
-/*
- * write to the WM8900 register space
- */
-static int wm8900_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- u8 data[3];
-
- if (value == wm8900_read_reg_cache(codec, reg))
- return 0;
-
- /* data is
- * D15..D9 WM8900 register offset
- * D8...D0 register data
- */
- data[0] = reg;
- data[1] = value >> 8;
- data[2] = value & 0x00ff;
-
- wm8900_write_reg_cache(codec, reg, value);
- if (codec->hw_write(codec->control_data, data, 3) == 3)
- return 0;
- else
- return -EIO;
-}
-
-/*
- * Read from the wm8900.
- */
-static unsigned int wm8900_chip_read(struct snd_soc_codec *codec, u8 reg)
-{
- struct i2c_msg xfer[2];
- u16 data;
- int ret;
- struct i2c_client *client = codec->control_data;
-
- BUG_ON(reg != WM8900_REG_ID && reg != WM8900_REG_POWER1);
-
- /* Write register */
- xfer[0].addr = client->addr;
- xfer[0].flags = 0;
- xfer[0].len = 1;
- xfer[0].buf = &reg;
-
- /* Read data */
- xfer[1].addr = client->addr;
- xfer[1].flags = I2C_M_RD;
- xfer[1].len = 2;
- xfer[1].buf = (u8 *)&data;
-
- ret = i2c_transfer(client->adapter, xfer, 2);
- if (ret != 2) {
- printk(KERN_CRIT "i2c_transfer returned %d\n", ret);
- return 0;
- }
-
- return (data >> 8) | ((data & 0xff) << 8);
-}
-
-/*
- * Read from the WM8900 register space. Most registers can't be read
- * and are therefore supplied from cache.
- */
-static unsigned int wm8900_read(struct snd_soc_codec *codec, unsigned int reg)
+static int wm8900_volatile_register(unsigned int reg)
{
switch (reg) {
case WM8900_REG_ID:
- return wm8900_chip_read(codec, reg);
+ case WM8900_REG_POWER1:
+ return 1;
default:
- return wm8900_read_reg_cache(codec, reg);
+ return 0;
}
}
static void wm8900_reset(struct snd_soc_codec *codec)
{
- wm8900_write(codec, WM8900_REG_RESET, 0);
+ snd_soc_write(codec, WM8900_REG_RESET, 0);
memcpy(codec->reg_cache, wm8900_reg_defaults,
sizeof(codec->reg_cache));
@@ -296,14 +206,14 @@ static int wm8900_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
- u16 hpctl1 = wm8900_read(codec, WM8900_REG_HPCTL1);
+ u16 hpctl1 = snd_soc_read(codec, WM8900_REG_HPCTL1);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/* Clamp headphone outputs */
hpctl1 = WM8900_REG_HPCTL1_HP_CLAMP_IP |
WM8900_REG_HPCTL1_HP_CLAMP_OP;
- wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1);
+ snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
break;
case SND_SOC_DAPM_POST_PMU:
@@ -312,41 +222,41 @@ static int wm8900_hp_event(struct snd_soc_dapm_widget *w,
hpctl1 |= WM8900_REG_HPCTL1_HP_SHORT |
WM8900_REG_HPCTL1_HP_SHORT2 |
WM8900_REG_HPCTL1_HP_IPSTAGE_ENA;
- wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1);
+ snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
msleep(400);
/* Enable the output stage */
hpctl1 &= ~WM8900_REG_HPCTL1_HP_CLAMP_OP;
hpctl1 |= WM8900_REG_HPCTL1_HP_OPSTAGE_ENA;
- wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1);
+ snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
/* Remove the shorts */
hpctl1 &= ~WM8900_REG_HPCTL1_HP_SHORT2;
- wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1);
+ snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
hpctl1 &= ~WM8900_REG_HPCTL1_HP_SHORT;
- wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1);
+ snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
break;
case SND_SOC_DAPM_PRE_PMD:
/* Short the output */
hpctl1 |= WM8900_REG_HPCTL1_HP_SHORT;
- wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1);
+ snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
/* Disable the output stage */
hpctl1 &= ~WM8900_REG_HPCTL1_HP_OPSTAGE_ENA;
- wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1);
+ snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
/* Clamp the outputs and power down input */
hpctl1 |= WM8900_REG_HPCTL1_HP_CLAMP_IP |
WM8900_REG_HPCTL1_HP_CLAMP_OP;
hpctl1 &= ~WM8900_REG_HPCTL1_HP_IPSTAGE_ENA;
- wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1);
+ snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
break;
case SND_SOC_DAPM_POST_PMD:
/* Disable everything */
- wm8900_write(codec, WM8900_REG_HPCTL1, 0);
+ snd_soc_write(codec, WM8900_REG_HPCTL1, 0);
break;
default:
@@ -439,7 +349,6 @@ SOC_SINGLE("DAC Soft Mute Switch", WM8900_REG_DACCTRL, 6, 1, 1),
SOC_ENUM("DAC Mute Rate", dac_mute_rate),
SOC_SINGLE("DAC Mono Switch", WM8900_REG_DACCTRL, 9, 1, 0),
SOC_ENUM("DAC Deemphasis", dac_deemphasis),
-SOC_SINGLE("DAC Sloping Stopband Filter Switch", WM8900_REG_DACCTRL, 8, 1, 0),
SOC_SINGLE("DAC Sigma-Delta Modulator Clock Switch", WM8900_REG_DACCTRL,
12, 1, 0),
@@ -723,7 +632,7 @@ static int wm8900_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_codec *codec = socdev->card->codec;
u16 reg;
- reg = wm8900_read(codec, WM8900_REG_AUDIO1) & ~0x60;
+ reg = snd_soc_read(codec, WM8900_REG_AUDIO1) & ~0x60;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
@@ -741,7 +650,18 @@ static int wm8900_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- wm8900_write(codec, WM8900_REG_AUDIO1, reg);
+ snd_soc_write(codec, WM8900_REG_AUDIO1, reg);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ reg = snd_soc_read(codec, WM8900_REG_DACCTRL);
+
+ if (params_rate(params) <= 24000)
+ reg |= WM8900_REG_DACCTRL_DAC_SB_FILT;
+ else
+ reg &= ~WM8900_REG_DACCTRL_DAC_SB_FILT;
+
+ snd_soc_write(codec, WM8900_REG_DACCTRL, reg);
+ }
return 0;
}
@@ -834,18 +754,18 @@ static int wm8900_set_fll(struct snd_soc_codec *codec,
return 0;
/* The digital side should be disabled during any change. */
- reg = wm8900_read(codec, WM8900_REG_POWER1);
- wm8900_write(codec, WM8900_REG_POWER1,
+ reg = snd_soc_read(codec, WM8900_REG_POWER1);
+ snd_soc_write(codec, WM8900_REG_POWER1,
reg & (~WM8900_REG_POWER1_FLL_ENA));
/* Disable the FLL? */
if (!freq_in || !freq_out) {
- reg = wm8900_read(codec, WM8900_REG_CLOCKING1);
- wm8900_write(codec, WM8900_REG_CLOCKING1,
+ reg = snd_soc_read(codec, WM8900_REG_CLOCKING1);
+ snd_soc_write(codec, WM8900_REG_CLOCKING1,
reg & (~WM8900_REG_CLOCKING1_MCLK_SRC));
- reg = wm8900_read(codec, WM8900_REG_FLLCTL1);
- wm8900_write(codec, WM8900_REG_FLLCTL1,
+ reg = snd_soc_read(codec, WM8900_REG_FLLCTL1);
+ snd_soc_write(codec, WM8900_REG_FLLCTL1,
reg & (~WM8900_REG_FLLCTL1_OSC_ENA));
wm8900->fll_in = freq_in;
@@ -862,33 +782,33 @@ static int wm8900_set_fll(struct snd_soc_codec *codec,
/* The osclilator *MUST* be enabled before we enable the
* digital circuit. */
- wm8900_write(codec, WM8900_REG_FLLCTL1,
+ snd_soc_write(codec, WM8900_REG_FLLCTL1,
fll_div.fll_ratio | WM8900_REG_FLLCTL1_OSC_ENA);
- wm8900_write(codec, WM8900_REG_FLLCTL4, fll_div.n >> 5);
- wm8900_write(codec, WM8900_REG_FLLCTL5,
+ snd_soc_write(codec, WM8900_REG_FLLCTL4, fll_div.n >> 5);
+ snd_soc_write(codec, WM8900_REG_FLLCTL5,
(fll_div.fllclk_div << 6) | (fll_div.n & 0x1f));
if (fll_div.k) {
- wm8900_write(codec, WM8900_REG_FLLCTL2,
+ snd_soc_write(codec, WM8900_REG_FLLCTL2,
(fll_div.k >> 8) | 0x100);
- wm8900_write(codec, WM8900_REG_FLLCTL3, fll_div.k & 0xff);
+ snd_soc_write(codec, WM8900_REG_FLLCTL3, fll_div.k & 0xff);
} else
- wm8900_write(codec, WM8900_REG_FLLCTL2, 0);
+ snd_soc_write(codec, WM8900_REG_FLLCTL2, 0);
if (fll_div.fll_slow_lock_ref)
- wm8900_write(codec, WM8900_REG_FLLCTL6,
+ snd_soc_write(codec, WM8900_REG_FLLCTL6,
WM8900_REG_FLLCTL6_FLL_SLOW_LOCK_REF);
else
- wm8900_write(codec, WM8900_REG_FLLCTL6, 0);
+ snd_soc_write(codec, WM8900_REG_FLLCTL6, 0);
- reg = wm8900_read(codec, WM8900_REG_POWER1);
- wm8900_write(codec, WM8900_REG_POWER1,
+ reg = snd_soc_read(codec, WM8900_REG_POWER1);
+ snd_soc_write(codec, WM8900_REG_POWER1,
reg | WM8900_REG_POWER1_FLL_ENA);
reenable:
- reg = wm8900_read(codec, WM8900_REG_CLOCKING1);
- wm8900_write(codec, WM8900_REG_CLOCKING1,
+ reg = snd_soc_read(codec, WM8900_REG_CLOCKING1);
+ snd_soc_write(codec, WM8900_REG_CLOCKING1,
reg | WM8900_REG_CLOCKING1_MCLK_SRC);
return 0;
@@ -908,38 +828,38 @@ static int wm8900_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
switch (div_id) {
case WM8900_BCLK_DIV:
- reg = wm8900_read(codec, WM8900_REG_CLOCKING1);
- wm8900_write(codec, WM8900_REG_CLOCKING1,
+ reg = snd_soc_read(codec, WM8900_REG_CLOCKING1);
+ snd_soc_write(codec, WM8900_REG_CLOCKING1,
div | (reg & WM8900_REG_CLOCKING1_BCLK_MASK));
break;
case WM8900_OPCLK_DIV:
- reg = wm8900_read(codec, WM8900_REG_CLOCKING1);
- wm8900_write(codec, WM8900_REG_CLOCKING1,
+ reg = snd_soc_read(codec, WM8900_REG_CLOCKING1);
+ snd_soc_write(codec, WM8900_REG_CLOCKING1,
div | (reg & WM8900_REG_CLOCKING1_OPCLK_MASK));
break;
case WM8900_DAC_LRCLK:
- reg = wm8900_read(codec, WM8900_REG_AUDIO4);
- wm8900_write(codec, WM8900_REG_AUDIO4,
+ reg = snd_soc_read(codec, WM8900_REG_AUDIO4);
+ snd_soc_write(codec, WM8900_REG_AUDIO4,
div | (reg & WM8900_LRC_MASK));
break;
case WM8900_ADC_LRCLK:
- reg = wm8900_read(codec, WM8900_REG_AUDIO3);
- wm8900_write(codec, WM8900_REG_AUDIO3,
+ reg = snd_soc_read(codec, WM8900_REG_AUDIO3);
+ snd_soc_write(codec, WM8900_REG_AUDIO3,
div | (reg & WM8900_LRC_MASK));
break;
case WM8900_DAC_CLKDIV:
- reg = wm8900_read(codec, WM8900_REG_CLOCKING2);
- wm8900_write(codec, WM8900_REG_CLOCKING2,
+ reg = snd_soc_read(codec, WM8900_REG_CLOCKING2);
+ snd_soc_write(codec, WM8900_REG_CLOCKING2,
div | (reg & WM8900_REG_CLOCKING2_DAC_CLKDIV));
break;
case WM8900_ADC_CLKDIV:
- reg = wm8900_read(codec, WM8900_REG_CLOCKING2);
- wm8900_write(codec, WM8900_REG_CLOCKING2,
+ reg = snd_soc_read(codec, WM8900_REG_CLOCKING2);
+ snd_soc_write(codec, WM8900_REG_CLOCKING2,
div | (reg & WM8900_REG_CLOCKING2_ADC_CLKDIV));
break;
case WM8900_LRCLK_MODE:
- reg = wm8900_read(codec, WM8900_REG_DACCTRL);
- wm8900_write(codec, WM8900_REG_DACCTRL,
+ reg = snd_soc_read(codec, WM8900_REG_DACCTRL);
+ snd_soc_write(codec, WM8900_REG_DACCTRL,
div | (reg & WM8900_REG_DACCTRL_AIF_LRCLKRATE));
break;
default:
@@ -956,10 +876,10 @@ static int wm8900_set_dai_fmt(struct snd_soc_dai *codec_dai,
struct snd_soc_codec *codec = codec_dai->codec;
unsigned int clocking1, aif1, aif3, aif4;
- clocking1 = wm8900_read(codec, WM8900_REG_CLOCKING1);
- aif1 = wm8900_read(codec, WM8900_REG_AUDIO1);
- aif3 = wm8900_read(codec, WM8900_REG_AUDIO3);
- aif4 = wm8900_read(codec, WM8900_REG_AUDIO4);
+ clocking1 = snd_soc_read(codec, WM8900_REG_CLOCKING1);
+ aif1 = snd_soc_read(codec, WM8900_REG_AUDIO1);
+ aif3 = snd_soc_read(codec, WM8900_REG_AUDIO3);
+ aif4 = snd_soc_read(codec, WM8900_REG_AUDIO4);
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -1055,10 +975,10 @@ static int wm8900_set_dai_fmt(struct snd_soc_dai *codec_dai,
return -EINVAL;
}
- wm8900_write(codec, WM8900_REG_CLOCKING1, clocking1);
- wm8900_write(codec, WM8900_REG_AUDIO1, aif1);
- wm8900_write(codec, WM8900_REG_AUDIO3, aif3);
- wm8900_write(codec, WM8900_REG_AUDIO4, aif4);
+ snd_soc_write(codec, WM8900_REG_CLOCKING1, clocking1);
+ snd_soc_write(codec, WM8900_REG_AUDIO1, aif1);
+ snd_soc_write(codec, WM8900_REG_AUDIO3, aif3);
+ snd_soc_write(codec, WM8900_REG_AUDIO4, aif4);
return 0;
}
@@ -1068,14 +988,14 @@ static int wm8900_digital_mute(struct snd_soc_dai *codec_dai, int mute)
struct snd_soc_codec *codec = codec_dai->codec;
u16 reg;
- reg = wm8900_read(codec, WM8900_REG_DACCTRL);
+ reg = snd_soc_read(codec, WM8900_REG_DACCTRL);
if (mute)
reg |= WM8900_REG_DACCTRL_MUTE;
else
reg &= ~WM8900_REG_DACCTRL_MUTE;
- wm8900_write(codec, WM8900_REG_DACCTRL, reg);
+ snd_soc_write(codec, WM8900_REG_DACCTRL, reg);
return 0;
}
@@ -1124,11 +1044,11 @@ static int wm8900_set_bias_level(struct snd_soc_codec *codec,
switch (level) {
case SND_SOC_BIAS_ON:
/* Enable thermal shutdown */
- reg = wm8900_read(codec, WM8900_REG_GPIO);
- wm8900_write(codec, WM8900_REG_GPIO,
+ reg = snd_soc_read(codec, WM8900_REG_GPIO);
+ snd_soc_write(codec, WM8900_REG_GPIO,
reg | WM8900_REG_GPIO_TEMP_ENA);
- reg = wm8900_read(codec, WM8900_REG_ADDCTL);
- wm8900_write(codec, WM8900_REG_ADDCTL,
+ reg = snd_soc_read(codec, WM8900_REG_ADDCTL);
+ snd_soc_write(codec, WM8900_REG_ADDCTL,
reg | WM8900_REG_ADDCTL_TEMP_SD);
break;
@@ -1139,69 +1059,69 @@ static int wm8900_set_bias_level(struct snd_soc_codec *codec,
/* Charge capacitors if initial power up */
if (codec->bias_level == SND_SOC_BIAS_OFF) {
/* STARTUP_BIAS_ENA on */
- wm8900_write(codec, WM8900_REG_POWER1,
+ snd_soc_write(codec, WM8900_REG_POWER1,
WM8900_REG_POWER1_STARTUP_BIAS_ENA);
/* Startup bias mode */
- wm8900_write(codec, WM8900_REG_ADDCTL,
+ snd_soc_write(codec, WM8900_REG_ADDCTL,
WM8900_REG_ADDCTL_BIAS_SRC |
WM8900_REG_ADDCTL_VMID_SOFTST);
/* VMID 2x50k */
- wm8900_write(codec, WM8900_REG_POWER1,
+ snd_soc_write(codec, WM8900_REG_POWER1,
WM8900_REG_POWER1_STARTUP_BIAS_ENA | 0x1);
/* Allow capacitors to charge */
schedule_timeout_interruptible(msecs_to_jiffies(400));
/* Enable bias */
- wm8900_write(codec, WM8900_REG_POWER1,
+ snd_soc_write(codec, WM8900_REG_POWER1,
WM8900_REG_POWER1_STARTUP_BIAS_ENA |
WM8900_REG_POWER1_BIAS_ENA | 0x1);
- wm8900_write(codec, WM8900_REG_ADDCTL, 0);
+ snd_soc_write(codec, WM8900_REG_ADDCTL, 0);
- wm8900_write(codec, WM8900_REG_POWER1,
+ snd_soc_write(codec, WM8900_REG_POWER1,
WM8900_REG_POWER1_BIAS_ENA | 0x1);
}
- reg = wm8900_read(codec, WM8900_REG_POWER1);
- wm8900_write(codec, WM8900_REG_POWER1,
+ reg = snd_soc_read(codec, WM8900_REG_POWER1);
+ snd_soc_write(codec, WM8900_REG_POWER1,
(reg & WM8900_REG_POWER1_FLL_ENA) |
WM8900_REG_POWER1_BIAS_ENA | 0x1);
- wm8900_write(codec, WM8900_REG_POWER2,
+ snd_soc_write(codec, WM8900_REG_POWER2,
WM8900_REG_POWER2_SYSCLK_ENA);
- wm8900_write(codec, WM8900_REG_POWER3, 0);
+ snd_soc_write(codec, WM8900_REG_POWER3, 0);
break;
case SND_SOC_BIAS_OFF:
/* Startup bias enable */
- reg = wm8900_read(codec, WM8900_REG_POWER1);
- wm8900_write(codec, WM8900_REG_POWER1,
+ reg = snd_soc_read(codec, WM8900_REG_POWER1);
+ snd_soc_write(codec, WM8900_REG_POWER1,
reg & WM8900_REG_POWER1_STARTUP_BIAS_ENA);
- wm8900_write(codec, WM8900_REG_ADDCTL,
+ snd_soc_write(codec, WM8900_REG_ADDCTL,
WM8900_REG_ADDCTL_BIAS_SRC |
WM8900_REG_ADDCTL_VMID_SOFTST);
/* Discharge caps */
- wm8900_write(codec, WM8900_REG_POWER1,
+ snd_soc_write(codec, WM8900_REG_POWER1,
WM8900_REG_POWER1_STARTUP_BIAS_ENA);
schedule_timeout_interruptible(msecs_to_jiffies(500));
/* Remove clamp */
- wm8900_write(codec, WM8900_REG_HPCTL1, 0);
+ snd_soc_write(codec, WM8900_REG_HPCTL1, 0);
/* Power down */
- wm8900_write(codec, WM8900_REG_ADDCTL, 0);
- wm8900_write(codec, WM8900_REG_POWER1, 0);
- wm8900_write(codec, WM8900_REG_POWER2, 0);
- wm8900_write(codec, WM8900_REG_POWER3, 0);
+ snd_soc_write(codec, WM8900_REG_ADDCTL, 0);
+ snd_soc_write(codec, WM8900_REG_POWER1, 0);
+ snd_soc_write(codec, WM8900_REG_POWER2, 0);
+ snd_soc_write(codec, WM8900_REG_POWER3, 0);
/* Need to let things settle before stopping the clock
* to ensure that restart works, see "Stopping the
* master clock" in the datasheet. */
schedule_timeout_interruptible(msecs_to_jiffies(1));
- wm8900_write(codec, WM8900_REG_POWER2,
+ snd_soc_write(codec, WM8900_REG_POWER2,
WM8900_REG_POWER2_SYSCLK_ENA);
break;
}
@@ -1264,7 +1184,7 @@ static int wm8900_resume(struct platform_device *pdev)
if (cache) {
for (i = 0; i < WM8900_MAXREG; i++)
- wm8900_write(codec, i, cache[i]);
+ snd_soc_write(codec, i, cache[i]);
kfree(cache);
} else
dev_err(&pdev->dev, "Unable to allocate register cache\n");
@@ -1297,16 +1217,20 @@ static __devinit int wm8900_i2c_probe(struct i2c_client *i2c,
codec->name = "WM8900";
codec->owner = THIS_MODULE;
- codec->read = wm8900_read;
- codec->write = wm8900_write;
codec->dai = &wm8900_dai;
codec->num_dai = 1;
- codec->hw_write = (hw_write_t)i2c_master_send;
codec->control_data = i2c;
codec->set_bias_level = wm8900_set_bias_level;
+ codec->volatile_register = wm8900_volatile_register;
codec->dev = &i2c->dev;
- reg = wm8900_read(codec, WM8900_REG_ID);
+ ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
+ reg = snd_soc_read(codec, WM8900_REG_ID);
if (reg != 0x8900) {
dev_err(&i2c->dev, "Device is not a WM8900 - ID %x\n", reg);
ret = -ENODEV;
@@ -1314,7 +1238,7 @@ static __devinit int wm8900_i2c_probe(struct i2c_client *i2c,
}
/* Read back from the chip */
- reg = wm8900_chip_read(codec, WM8900_REG_POWER1);
+ reg = snd_soc_read(codec, WM8900_REG_POWER1);
reg = (reg >> 12) & 0xf;
dev_info(&i2c->dev, "WM8900 revision %d\n", reg);
@@ -1324,29 +1248,29 @@ static __devinit int wm8900_i2c_probe(struct i2c_client *i2c,
wm8900_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Latch the volume update bits */
- wm8900_write(codec, WM8900_REG_LINVOL,
- wm8900_read(codec, WM8900_REG_LINVOL) | 0x100);
- wm8900_write(codec, WM8900_REG_RINVOL,
- wm8900_read(codec, WM8900_REG_RINVOL) | 0x100);
- wm8900_write(codec, WM8900_REG_LOUT1CTL,
- wm8900_read(codec, WM8900_REG_LOUT1CTL) | 0x100);
- wm8900_write(codec, WM8900_REG_ROUT1CTL,
- wm8900_read(codec, WM8900_REG_ROUT1CTL) | 0x100);
- wm8900_write(codec, WM8900_REG_LOUT2CTL,
- wm8900_read(codec, WM8900_REG_LOUT2CTL) | 0x100);
- wm8900_write(codec, WM8900_REG_ROUT2CTL,
- wm8900_read(codec, WM8900_REG_ROUT2CTL) | 0x100);
- wm8900_write(codec, WM8900_REG_LDAC_DV,
- wm8900_read(codec, WM8900_REG_LDAC_DV) | 0x100);
- wm8900_write(codec, WM8900_REG_RDAC_DV,
- wm8900_read(codec, WM8900_REG_RDAC_DV) | 0x100);
- wm8900_write(codec, WM8900_REG_LADC_DV,
- wm8900_read(codec, WM8900_REG_LADC_DV) | 0x100);
- wm8900_write(codec, WM8900_REG_RADC_DV,
- wm8900_read(codec, WM8900_REG_RADC_DV) | 0x100);
+ snd_soc_write(codec, WM8900_REG_LINVOL,
+ snd_soc_read(codec, WM8900_REG_LINVOL) | 0x100);
+ snd_soc_write(codec, WM8900_REG_RINVOL,
+ snd_soc_read(codec, WM8900_REG_RINVOL) | 0x100);
+ snd_soc_write(codec, WM8900_REG_LOUT1CTL,
+ snd_soc_read(codec, WM8900_REG_LOUT1CTL) | 0x100);
+ snd_soc_write(codec, WM8900_REG_ROUT1CTL,
+ snd_soc_read(codec, WM8900_REG_ROUT1CTL) | 0x100);
+ snd_soc_write(codec, WM8900_REG_LOUT2CTL,
+ snd_soc_read(codec, WM8900_REG_LOUT2CTL) | 0x100);
+ snd_soc_write(codec, WM8900_REG_ROUT2CTL,
+ snd_soc_read(codec, WM8900_REG_ROUT2CTL) | 0x100);
+ snd_soc_write(codec, WM8900_REG_LDAC_DV,
+ snd_soc_read(codec, WM8900_REG_LDAC_DV) | 0x100);
+ snd_soc_write(codec, WM8900_REG_RDAC_DV,
+ snd_soc_read(codec, WM8900_REG_RDAC_DV) | 0x100);
+ snd_soc_write(codec, WM8900_REG_LADC_DV,
+ snd_soc_read(codec, WM8900_REG_LADC_DV) | 0x100);
+ snd_soc_write(codec, WM8900_REG_RADC_DV,
+ snd_soc_read(codec, WM8900_REG_RADC_DV) | 0x100);
/* Set the DAC and mixer output bias */
- wm8900_write(codec, WM8900_REG_OUTBIASCTL, 0x81);
+ snd_soc_write(codec, WM8900_REG_OUTBIASCTL, 0x81);
wm8900_dai.dev = &i2c->dev;
@@ -1388,6 +1312,21 @@ static __devexit int wm8900_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8900_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8900_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8900_i2c_suspend NULL
+#define wm8900_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm8900_i2c_id[] = {
{ "wm8900", 0 },
{ }
@@ -1401,6 +1340,8 @@ static struct i2c_driver wm8900_i2c_driver = {
},
.probe = wm8900_i2c_probe,
.remove = __devexit_p(wm8900_i2c_remove),
+ .suspend = wm8900_i2c_suspend,
+ .resume = wm8900_i2c_resume,
.id_table = wm8900_i2c_id,
};
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index e8d2e3e14c45..fe1307b500cf 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -225,94 +225,18 @@ struct wm8903_priv {
struct snd_pcm_substream *slave_substream;
};
-
-static unsigned int wm8903_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
-
- BUG_ON(reg >= ARRAY_SIZE(wm8903_reg_defaults));
-
- return cache[reg];
-}
-
-static unsigned int wm8903_hw_read(struct snd_soc_codec *codec, u8 reg)
-{
- struct i2c_msg xfer[2];
- u16 data;
- int ret;
- struct i2c_client *client = codec->control_data;
-
- /* Write register */
- xfer[0].addr = client->addr;
- xfer[0].flags = 0;
- xfer[0].len = 1;
- xfer[0].buf = &reg;
-
- /* Read data */
- xfer[1].addr = client->addr;
- xfer[1].flags = I2C_M_RD;
- xfer[1].len = 2;
- xfer[1].buf = (u8 *)&data;
-
- ret = i2c_transfer(client->adapter, xfer, 2);
- if (ret != 2) {
- pr_err("i2c_transfer returned %d\n", ret);
- return 0;
- }
-
- return (data >> 8) | ((data & 0xff) << 8);
-}
-
-static unsigned int wm8903_read(struct snd_soc_codec *codec,
- unsigned int reg)
+static int wm8903_volatile_register(unsigned int reg)
{
switch (reg) {
case WM8903_SW_RESET_AND_ID:
case WM8903_REVISION_NUMBER:
case WM8903_INTERRUPT_STATUS_1:
case WM8903_WRITE_SEQUENCER_4:
- return wm8903_hw_read(codec, reg);
+ return 1;
default:
- return wm8903_read_reg_cache(codec, reg);
- }
-}
-
-static void wm8903_write_reg_cache(struct snd_soc_codec *codec,
- u16 reg, unsigned int value)
-{
- u16 *cache = codec->reg_cache;
-
- BUG_ON(reg >= ARRAY_SIZE(wm8903_reg_defaults));
-
- switch (reg) {
- case WM8903_SW_RESET_AND_ID:
- case WM8903_REVISION_NUMBER:
- break;
-
- default:
- cache[reg] = value;
- break;
- }
-}
-
-static int wm8903_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- u8 data[3];
-
- wm8903_write_reg_cache(codec, reg, value);
-
- /* Data format is 1 byte of address followed by 2 bytes of data */
- data[0] = reg;
- data[1] = (value >> 8) & 0xff;
- data[2] = value & 0xff;
-
- if (codec->hw_write(codec->control_data, data, 3) == 2)
return 0;
- else
- return -EIO;
+ }
}
static int wm8903_run_sequence(struct snd_soc_codec *codec, unsigned int start)
@@ -323,13 +247,13 @@ static int wm8903_run_sequence(struct snd_soc_codec *codec, unsigned int start)
BUG_ON(start > 48);
/* Enable the sequencer */
- reg[0] = wm8903_read(codec, WM8903_WRITE_SEQUENCER_0);
+ reg[0] = snd_soc_read(codec, WM8903_WRITE_SEQUENCER_0);
reg[0] |= WM8903_WSEQ_ENA;
- wm8903_write(codec, WM8903_WRITE_SEQUENCER_0, reg[0]);
+ snd_soc_write(codec, WM8903_WRITE_SEQUENCER_0, reg[0]);
dev_dbg(&i2c->dev, "Starting sequence at %d\n", start);
- wm8903_write(codec, WM8903_WRITE_SEQUENCER_3,
+ snd_soc_write(codec, WM8903_WRITE_SEQUENCER_3,
start | WM8903_WSEQ_START);
/* Wait for it to complete. If we have the interrupt wired up then
@@ -339,13 +263,13 @@ static int wm8903_run_sequence(struct snd_soc_codec *codec, unsigned int start)
do {
msleep(10);
- reg[4] = wm8903_read(codec, WM8903_WRITE_SEQUENCER_4);
+ reg[4] = snd_soc_read(codec, WM8903_WRITE_SEQUENCER_4);
} while (reg[4] & WM8903_WSEQ_BUSY);
dev_dbg(&i2c->dev, "Sequence complete\n");
/* Disable the sequencer again */
- wm8903_write(codec, WM8903_WRITE_SEQUENCER_0,
+ snd_soc_write(codec, WM8903_WRITE_SEQUENCER_0,
reg[0] & ~WM8903_WSEQ_ENA);
return 0;
@@ -357,12 +281,12 @@ static void wm8903_sync_reg_cache(struct snd_soc_codec *codec, u16 *cache)
/* There really ought to be something better we can do here :/ */
for (i = 0; i < ARRAY_SIZE(wm8903_reg_defaults); i++)
- cache[i] = wm8903_hw_read(codec, i);
+ cache[i] = codec->hw_read(codec, i);
}
static void wm8903_reset(struct snd_soc_codec *codec)
{
- wm8903_write(codec, WM8903_SW_RESET_AND_ID, 0);
+ snd_soc_write(codec, WM8903_SW_RESET_AND_ID, 0);
memcpy(codec->reg_cache, wm8903_reg_defaults,
sizeof(wm8903_reg_defaults));
}
@@ -423,52 +347,52 @@ static int wm8903_output_event(struct snd_soc_dapm_widget *w,
}
if (event & SND_SOC_DAPM_PRE_PMU) {
- val = wm8903_read(codec, reg);
+ val = snd_soc_read(codec, reg);
/* Short the output */
val &= ~(WM8903_OUTPUT_SHORT << shift);
- wm8903_write(codec, reg, val);
+ snd_soc_write(codec, reg, val);
}
if (event & SND_SOC_DAPM_POST_PMU) {
- val = wm8903_read(codec, reg);
+ val = snd_soc_read(codec, reg);
val |= (WM8903_OUTPUT_IN << shift);
- wm8903_write(codec, reg, val);
+ snd_soc_write(codec, reg, val);
val |= (WM8903_OUTPUT_INT << shift);
- wm8903_write(codec, reg, val);
+ snd_soc_write(codec, reg, val);
/* Turn on the output ENA_OUTP */
val |= (WM8903_OUTPUT_OUT << shift);
- wm8903_write(codec, reg, val);
+ snd_soc_write(codec, reg, val);
/* Enable the DC servo */
- dcs_reg = wm8903_read(codec, WM8903_DC_SERVO_0);
+ dcs_reg = snd_soc_read(codec, WM8903_DC_SERVO_0);
dcs_reg |= dcs_bit;
- wm8903_write(codec, WM8903_DC_SERVO_0, dcs_reg);
+ snd_soc_write(codec, WM8903_DC_SERVO_0, dcs_reg);
/* Remove the short */
val |= (WM8903_OUTPUT_SHORT << shift);
- wm8903_write(codec, reg, val);
+ snd_soc_write(codec, reg, val);
}
if (event & SND_SOC_DAPM_PRE_PMD) {
- val = wm8903_read(codec, reg);
+ val = snd_soc_read(codec, reg);
/* Short the output */
val &= ~(WM8903_OUTPUT_SHORT << shift);
- wm8903_write(codec, reg, val);
+ snd_soc_write(codec, reg, val);
/* Disable the DC servo */
- dcs_reg = wm8903_read(codec, WM8903_DC_SERVO_0);
+ dcs_reg = snd_soc_read(codec, WM8903_DC_SERVO_0);
dcs_reg &= ~dcs_bit;
- wm8903_write(codec, WM8903_DC_SERVO_0, dcs_reg);
+ snd_soc_write(codec, WM8903_DC_SERVO_0, dcs_reg);
/* Then disable the intermediate and output stages */
val &= ~((WM8903_OUTPUT_OUT | WM8903_OUTPUT_INT |
WM8903_OUTPUT_IN) << shift);
- wm8903_write(codec, reg, val);
+ snd_soc_write(codec, reg, val);
}
return 0;
@@ -492,13 +416,13 @@ static int wm8903_class_w_put(struct snd_kcontrol *kcontrol,
u16 reg;
int ret;
- reg = wm8903_read(codec, WM8903_CLASS_W_0);
+ reg = snd_soc_read(codec, WM8903_CLASS_W_0);
/* Turn it off if we're about to enable bypass */
if (ucontrol->value.integer.value[0]) {
if (wm8903->class_w_users == 0) {
dev_dbg(&i2c->dev, "Disabling Class W\n");
- wm8903_write(codec, WM8903_CLASS_W_0, reg &
+ snd_soc_write(codec, WM8903_CLASS_W_0, reg &
~(WM8903_CP_DYN_FREQ | WM8903_CP_DYN_V));
}
wm8903->class_w_users++;
@@ -511,7 +435,7 @@ static int wm8903_class_w_put(struct snd_kcontrol *kcontrol,
if (!ucontrol->value.integer.value[0]) {
if (wm8903->class_w_users == 1) {
dev_dbg(&i2c->dev, "Enabling Class W\n");
- wm8903_write(codec, WM8903_CLASS_W_0, reg |
+ snd_soc_write(codec, WM8903_CLASS_W_0, reg |
WM8903_CP_DYN_FREQ | WM8903_CP_DYN_V);
}
wm8903->class_w_users--;
@@ -715,8 +639,6 @@ SOC_ENUM("DAC Soft Mute Rate", soft_mute),
SOC_ENUM("DAC Mute Mode", mute_mode),
SOC_SINGLE("DAC Mono Switch", WM8903_DAC_DIGITAL_1, 12, 1, 0),
SOC_ENUM("DAC De-emphasis", dac_deemphasis),
-SOC_SINGLE("DAC Sloping Stopband Filter Switch",
- WM8903_DAC_DIGITAL_1, 11, 1, 0),
SOC_ENUM("DAC Companding Mode", dac_companding),
SOC_SINGLE("DAC Companding Switch", WM8903_AUDIO_INTERFACE_0, 1, 1, 0),
@@ -1011,55 +933,55 @@ static int wm8903_set_bias_level(struct snd_soc_codec *codec,
switch (level) {
case SND_SOC_BIAS_ON:
case SND_SOC_BIAS_PREPARE:
- reg = wm8903_read(codec, WM8903_VMID_CONTROL_0);
+ reg = snd_soc_read(codec, WM8903_VMID_CONTROL_0);
reg &= ~(WM8903_VMID_RES_MASK);
reg |= WM8903_VMID_RES_50K;
- wm8903_write(codec, WM8903_VMID_CONTROL_0, reg);
+ snd_soc_write(codec, WM8903_VMID_CONTROL_0, reg);
break;
case SND_SOC_BIAS_STANDBY:
if (codec->bias_level == SND_SOC_BIAS_OFF) {
- wm8903_write(codec, WM8903_CLOCK_RATES_2,
+ snd_soc_write(codec, WM8903_CLOCK_RATES_2,
WM8903_CLK_SYS_ENA);
/* Change DC servo dither level in startup sequence */
- wm8903_write(codec, WM8903_WRITE_SEQUENCER_0, 0x11);
- wm8903_write(codec, WM8903_WRITE_SEQUENCER_1, 0x1257);
- wm8903_write(codec, WM8903_WRITE_SEQUENCER_2, 0x2);
+ snd_soc_write(codec, WM8903_WRITE_SEQUENCER_0, 0x11);
+ snd_soc_write(codec, WM8903_WRITE_SEQUENCER_1, 0x1257);
+ snd_soc_write(codec, WM8903_WRITE_SEQUENCER_2, 0x2);
wm8903_run_sequence(codec, 0);
wm8903_sync_reg_cache(codec, codec->reg_cache);
/* Enable low impedence charge pump output */
- reg = wm8903_read(codec,
+ reg = snd_soc_read(codec,
WM8903_CONTROL_INTERFACE_TEST_1);
- wm8903_write(codec, WM8903_CONTROL_INTERFACE_TEST_1,
+ snd_soc_write(codec, WM8903_CONTROL_INTERFACE_TEST_1,
reg | WM8903_TEST_KEY);
- reg2 = wm8903_read(codec, WM8903_CHARGE_PUMP_TEST_1);
- wm8903_write(codec, WM8903_CHARGE_PUMP_TEST_1,
+ reg2 = snd_soc_read(codec, WM8903_CHARGE_PUMP_TEST_1);
+ snd_soc_write(codec, WM8903_CHARGE_PUMP_TEST_1,
reg2 | WM8903_CP_SW_KELVIN_MODE_MASK);
- wm8903_write(codec, WM8903_CONTROL_INTERFACE_TEST_1,
+ snd_soc_write(codec, WM8903_CONTROL_INTERFACE_TEST_1,
reg);
/* By default no bypass paths are enabled so
* enable Class W support.
*/
dev_dbg(&i2c->dev, "Enabling Class W\n");
- wm8903_write(codec, WM8903_CLASS_W_0, reg |
+ snd_soc_write(codec, WM8903_CLASS_W_0, reg |
WM8903_CP_DYN_FREQ | WM8903_CP_DYN_V);
}
- reg = wm8903_read(codec, WM8903_VMID_CONTROL_0);
+ reg = snd_soc_read(codec, WM8903_VMID_CONTROL_0);
reg &= ~(WM8903_VMID_RES_MASK);
reg |= WM8903_VMID_RES_250K;
- wm8903_write(codec, WM8903_VMID_CONTROL_0, reg);
+ snd_soc_write(codec, WM8903_VMID_CONTROL_0, reg);
break;
case SND_SOC_BIAS_OFF:
wm8903_run_sequence(codec, 32);
- reg = wm8903_read(codec, WM8903_CLOCK_RATES_2);
+ reg = snd_soc_read(codec, WM8903_CLOCK_RATES_2);
reg &= ~WM8903_CLK_SYS_ENA;
- wm8903_write(codec, WM8903_CLOCK_RATES_2, reg);
+ snd_soc_write(codec, WM8903_CLOCK_RATES_2, reg);
break;
}
@@ -1083,7 +1005,7 @@ static int wm8903_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
- u16 aif1 = wm8903_read(codec, WM8903_AUDIO_INTERFACE_1);
+ u16 aif1 = snd_soc_read(codec, WM8903_AUDIO_INTERFACE_1);
aif1 &= ~(WM8903_LRCLK_DIR | WM8903_BCLK_DIR | WM8903_AIF_FMT_MASK |
WM8903_AIF_LRCLK_INV | WM8903_AIF_BCLK_INV);
@@ -1161,7 +1083,7 @@ static int wm8903_set_dai_fmt(struct snd_soc_dai *codec_dai,
return -EINVAL;
}
- wm8903_write(codec, WM8903_AUDIO_INTERFACE_1, aif1);
+ snd_soc_write(codec, WM8903_AUDIO_INTERFACE_1, aif1);
return 0;
}
@@ -1171,14 +1093,14 @@ static int wm8903_digital_mute(struct snd_soc_dai *codec_dai, int mute)
struct snd_soc_codec *codec = codec_dai->codec;
u16 reg;
- reg = wm8903_read(codec, WM8903_DAC_DIGITAL_1);
+ reg = snd_soc_read(codec, WM8903_DAC_DIGITAL_1);
if (mute)
reg |= WM8903_DAC_MUTE;
else
reg &= ~WM8903_DAC_MUTE;
- wm8903_write(codec, WM8903_DAC_DIGITAL_1, reg);
+ snd_soc_write(codec, WM8903_DAC_DIGITAL_1, reg);
return 0;
}
@@ -1368,17 +1290,24 @@ static int wm8903_hw_params(struct snd_pcm_substream *substream,
int cur_val;
int clk_sys;
- u16 aif1 = wm8903_read(codec, WM8903_AUDIO_INTERFACE_1);
- u16 aif2 = wm8903_read(codec, WM8903_AUDIO_INTERFACE_2);
- u16 aif3 = wm8903_read(codec, WM8903_AUDIO_INTERFACE_3);
- u16 clock0 = wm8903_read(codec, WM8903_CLOCK_RATES_0);
- u16 clock1 = wm8903_read(codec, WM8903_CLOCK_RATES_1);
+ u16 aif1 = snd_soc_read(codec, WM8903_AUDIO_INTERFACE_1);
+ u16 aif2 = snd_soc_read(codec, WM8903_AUDIO_INTERFACE_2);
+ u16 aif3 = snd_soc_read(codec, WM8903_AUDIO_INTERFACE_3);
+ u16 clock0 = snd_soc_read(codec, WM8903_CLOCK_RATES_0);
+ u16 clock1 = snd_soc_read(codec, WM8903_CLOCK_RATES_1);
+ u16 dac_digital1 = snd_soc_read(codec, WM8903_DAC_DIGITAL_1);
if (substream == wm8903->slave_substream) {
dev_dbg(&i2c->dev, "Ignoring hw_params for slave substream\n");
return 0;
}
+ /* Enable sloping stopband filter for low sample rates */
+ if (fs <= 24000)
+ dac_digital1 |= WM8903_DAC_SB_FILT;
+ else
+ dac_digital1 &= ~WM8903_DAC_SB_FILT;
+
/* Configure sample rate logic for DSP - choose nearest rate */
dsp_config = 0;
best_val = abs(sample_rates[dsp_config].rate - fs);
@@ -1498,11 +1427,12 @@ static int wm8903_hw_params(struct snd_pcm_substream *substream,
aif2 |= bclk_divs[bclk_div].div;
aif3 |= bclk / fs;
- wm8903_write(codec, WM8903_CLOCK_RATES_0, clock0);
- wm8903_write(codec, WM8903_CLOCK_RATES_1, clock1);
- wm8903_write(codec, WM8903_AUDIO_INTERFACE_1, aif1);
- wm8903_write(codec, WM8903_AUDIO_INTERFACE_2, aif2);
- wm8903_write(codec, WM8903_AUDIO_INTERFACE_3, aif3);
+ snd_soc_write(codec, WM8903_CLOCK_RATES_0, clock0);
+ snd_soc_write(codec, WM8903_CLOCK_RATES_1, clock1);
+ snd_soc_write(codec, WM8903_AUDIO_INTERFACE_1, aif1);
+ snd_soc_write(codec, WM8903_AUDIO_INTERFACE_2, aif2);
+ snd_soc_write(codec, WM8903_AUDIO_INTERFACE_3, aif3);
+ snd_soc_write(codec, WM8903_DAC_DIGITAL_1, dac_digital1);
return 0;
}
@@ -1587,7 +1517,7 @@ static int wm8903_resume(struct platform_device *pdev)
if (tmp_cache) {
for (i = 2; i < ARRAY_SIZE(wm8903_reg_defaults); i++)
if (tmp_cache[i] != reg_cache[i])
- wm8903_write(codec, i, tmp_cache[i]);
+ snd_soc_write(codec, i, tmp_cache[i]);
} else {
dev_err(&i2c->dev, "Failed to allocate temporary cache\n");
}
@@ -1618,9 +1548,6 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
codec->dev = &i2c->dev;
codec->name = "WM8903";
codec->owner = THIS_MODULE;
- codec->read = wm8903_read;
- codec->write = wm8903_write;
- codec->hw_write = (hw_write_t)i2c_master_send;
codec->bias_level = SND_SOC_BIAS_OFF;
codec->set_bias_level = wm8903_set_bias_level;
codec->dai = &wm8903_dai;
@@ -1628,18 +1555,25 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
codec->reg_cache_size = ARRAY_SIZE(wm8903->reg_cache);
codec->reg_cache = &wm8903->reg_cache[0];
codec->private_data = wm8903;
+ codec->volatile_register = wm8903_volatile_register;
i2c_set_clientdata(i2c, codec);
codec->control_data = i2c;
- val = wm8903_hw_read(codec, WM8903_SW_RESET_AND_ID);
+ ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
+ val = snd_soc_read(codec, WM8903_SW_RESET_AND_ID);
if (val != wm8903_reg_defaults[WM8903_SW_RESET_AND_ID]) {
dev_err(&i2c->dev,
"Device with ID register %x is not a WM8903\n", val);
return -ENODEV;
}
- val = wm8903_read(codec, WM8903_REVISION_NUMBER);
+ val = snd_soc_read(codec, WM8903_REVISION_NUMBER);
dev_info(&i2c->dev, "WM8903 revision %d\n",
val & WM8903_CHIP_REV_MASK);
@@ -1649,35 +1583,35 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
wm8903_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Latch volume update bits */
- val = wm8903_read(codec, WM8903_ADC_DIGITAL_VOLUME_LEFT);
+ val = snd_soc_read(codec, WM8903_ADC_DIGITAL_VOLUME_LEFT);
val |= WM8903_ADCVU;
- wm8903_write(codec, WM8903_ADC_DIGITAL_VOLUME_LEFT, val);
- wm8903_write(codec, WM8903_ADC_DIGITAL_VOLUME_RIGHT, val);
+ snd_soc_write(codec, WM8903_ADC_DIGITAL_VOLUME_LEFT, val);
+ snd_soc_write(codec, WM8903_ADC_DIGITAL_VOLUME_RIGHT, val);
- val = wm8903_read(codec, WM8903_DAC_DIGITAL_VOLUME_LEFT);
+ val = snd_soc_read(codec, WM8903_DAC_DIGITAL_VOLUME_LEFT);
val |= WM8903_DACVU;
- wm8903_write(codec, WM8903_DAC_DIGITAL_VOLUME_LEFT, val);
- wm8903_write(codec, WM8903_DAC_DIGITAL_VOLUME_RIGHT, val);
+ snd_soc_write(codec, WM8903_DAC_DIGITAL_VOLUME_LEFT, val);
+ snd_soc_write(codec, WM8903_DAC_DIGITAL_VOLUME_RIGHT, val);
- val = wm8903_read(codec, WM8903_ANALOGUE_OUT1_LEFT);
+ val = snd_soc_read(codec, WM8903_ANALOGUE_OUT1_LEFT);
val |= WM8903_HPOUTVU;
- wm8903_write(codec, WM8903_ANALOGUE_OUT1_LEFT, val);
- wm8903_write(codec, WM8903_ANALOGUE_OUT1_RIGHT, val);
+ snd_soc_write(codec, WM8903_ANALOGUE_OUT1_LEFT, val);
+ snd_soc_write(codec, WM8903_ANALOGUE_OUT1_RIGHT, val);
- val = wm8903_read(codec, WM8903_ANALOGUE_OUT2_LEFT);
+ val = snd_soc_read(codec, WM8903_ANALOGUE_OUT2_LEFT);
val |= WM8903_LINEOUTVU;
- wm8903_write(codec, WM8903_ANALOGUE_OUT2_LEFT, val);
- wm8903_write(codec, WM8903_ANALOGUE_OUT2_RIGHT, val);
+ snd_soc_write(codec, WM8903_ANALOGUE_OUT2_LEFT, val);
+ snd_soc_write(codec, WM8903_ANALOGUE_OUT2_RIGHT, val);
- val = wm8903_read(codec, WM8903_ANALOGUE_OUT3_LEFT);
+ val = snd_soc_read(codec, WM8903_ANALOGUE_OUT3_LEFT);
val |= WM8903_SPKVU;
- wm8903_write(codec, WM8903_ANALOGUE_OUT3_LEFT, val);
- wm8903_write(codec, WM8903_ANALOGUE_OUT3_RIGHT, val);
+ snd_soc_write(codec, WM8903_ANALOGUE_OUT3_LEFT, val);
+ snd_soc_write(codec, WM8903_ANALOGUE_OUT3_RIGHT, val);
/* Enable DAC soft mute by default */
- val = wm8903_read(codec, WM8903_DAC_DIGITAL_1);
+ val = snd_soc_read(codec, WM8903_DAC_DIGITAL_1);
val |= WM8903_DAC_MUTEMODE;
- wm8903_write(codec, WM8903_DAC_DIGITAL_1, val);
+ snd_soc_write(codec, WM8903_DAC_DIGITAL_1, val);
wm8903_dai.dev = &i2c->dev;
wm8903_codec = codec;
@@ -1721,6 +1655,21 @@ static __devexit int wm8903_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8903_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8903_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8903_i2c_suspend NULL
+#define wm8903_i2c_resume NULL
+#endif
+
/* i2c codec control layer */
static const struct i2c_device_id wm8903_i2c_id[] = {
{ "wm8903", 0 },
@@ -1735,6 +1684,8 @@ static struct i2c_driver wm8903_i2c_driver = {
},
.probe = wm8903_i2c_probe,
.remove = __devexit_p(wm8903_i2c_remove),
+ .suspend = wm8903_i2c_suspend,
+ .resume = wm8903_i2c_resume,
.id_table = wm8903_i2c_id,
};
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index b8e17d6bc1f7..da97aae475a2 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -106,50 +106,6 @@ static u16 wm8940_reg_defaults[] = {
0x0000, /* Mono Mixer Control */
};
-static inline unsigned int wm8940_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
-
- if (reg >= ARRAY_SIZE(wm8940_reg_defaults))
- return -1;
-
- return cache[reg];
-}
-
-static inline int wm8940_write_reg_cache(struct snd_soc_codec *codec,
- u16 reg, unsigned int value)
-{
- u16 *cache = codec->reg_cache;
-
- if (reg >= ARRAY_SIZE(wm8940_reg_defaults))
- return -1;
-
- cache[reg] = value;
-
- return 0;
-}
-
-static int wm8940_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- int ret;
- u8 data[3] = { reg,
- (value & 0xff00) >> 8,
- (value & 0x00ff)
- };
-
- wm8940_write_reg_cache(codec, reg, value);
-
- ret = codec->hw_write(codec->control_data, data, 3);
-
- if (ret < 0)
- return ret;
- else if (ret != 3)
- return -EIO;
- return 0;
-}
-
static const char *wm8940_companding[] = { "Off", "NC", "u-law", "A-law" };
static const struct soc_enum wm8940_adc_companding_enum
= SOC_ENUM_SINGLE(WM8940_COMPANDINGCTL, 1, 4, wm8940_companding);
@@ -348,14 +304,14 @@ error_ret:
return ret;
}
-#define wm8940_reset(c) wm8940_write(c, WM8940_SOFTRESET, 0);
+#define wm8940_reset(c) snd_soc_write(c, WM8940_SOFTRESET, 0);
static int wm8940_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
- u16 iface = wm8940_read_reg_cache(codec, WM8940_IFACE) & 0xFE67;
- u16 clk = wm8940_read_reg_cache(codec, WM8940_CLOCK) & 0x1fe;
+ u16 iface = snd_soc_read(codec, WM8940_IFACE) & 0xFE67;
+ u16 clk = snd_soc_read(codec, WM8940_CLOCK) & 0x1fe;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
@@ -366,7 +322,7 @@ static int wm8940_set_dai_fmt(struct snd_soc_dai *codec_dai,
default:
return -EINVAL;
}
- wm8940_write(codec, WM8940_CLOCK, clk);
+ snd_soc_write(codec, WM8940_CLOCK, clk);
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
@@ -399,7 +355,7 @@ static int wm8940_set_dai_fmt(struct snd_soc_dai *codec_dai,
break;
}
- wm8940_write(codec, WM8940_IFACE, iface);
+ snd_soc_write(codec, WM8940_IFACE, iface);
return 0;
}
@@ -411,9 +367,9 @@ static int wm8940_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- u16 iface = wm8940_read_reg_cache(codec, WM8940_IFACE) & 0xFD9F;
- u16 addcntrl = wm8940_read_reg_cache(codec, WM8940_ADDCNTRL) & 0xFFF1;
- u16 companding = wm8940_read_reg_cache(codec,
+ u16 iface = snd_soc_read(codec, WM8940_IFACE) & 0xFD9F;
+ u16 addcntrl = snd_soc_read(codec, WM8940_ADDCNTRL) & 0xFFF1;
+ u16 companding = snd_soc_read(codec,
WM8940_COMPANDINGCTL) & 0xFFDF;
int ret;
@@ -442,7 +398,7 @@ static int wm8940_i2s_hw_params(struct snd_pcm_substream *substream,
case SNDRV_PCM_RATE_48000:
break;
}
- ret = wm8940_write(codec, WM8940_ADDCNTRL, addcntrl);
+ ret = snd_soc_write(codec, WM8940_ADDCNTRL, addcntrl);
if (ret)
goto error_ret;
@@ -462,10 +418,10 @@ static int wm8940_i2s_hw_params(struct snd_pcm_substream *substream,
iface |= (3 << 5);
break;
}
- ret = wm8940_write(codec, WM8940_COMPANDINGCTL, companding);
+ ret = snd_soc_write(codec, WM8940_COMPANDINGCTL, companding);
if (ret)
goto error_ret;
- ret = wm8940_write(codec, WM8940_IFACE, iface);
+ ret = snd_soc_write(codec, WM8940_IFACE, iface);
error_ret:
return ret;
@@ -474,19 +430,19 @@ error_ret:
static int wm8940_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
- u16 mute_reg = wm8940_read_reg_cache(codec, WM8940_DAC) & 0xffbf;
+ u16 mute_reg = snd_soc_read(codec, WM8940_DAC) & 0xffbf;
if (mute)
mute_reg |= 0x40;
- return wm8940_write(codec, WM8940_DAC, mute_reg);
+ return snd_soc_write(codec, WM8940_DAC, mute_reg);
}
static int wm8940_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
u16 val;
- u16 pwr_reg = wm8940_read_reg_cache(codec, WM8940_POWER1) & 0x1F0;
+ u16 pwr_reg = snd_soc_read(codec, WM8940_POWER1) & 0x1F0;
int ret = 0;
switch (level) {
@@ -494,26 +450,26 @@ static int wm8940_set_bias_level(struct snd_soc_codec *codec,
/* ensure bufioen and biasen */
pwr_reg |= (1 << 2) | (1 << 3);
/* Enable thermal shutdown */
- val = wm8940_read_reg_cache(codec, WM8940_OUTPUTCTL);
- ret = wm8940_write(codec, WM8940_OUTPUTCTL, val | 0x2);
+ val = snd_soc_read(codec, WM8940_OUTPUTCTL);
+ ret = snd_soc_write(codec, WM8940_OUTPUTCTL, val | 0x2);
if (ret)
break;
/* set vmid to 75k */
- ret = wm8940_write(codec, WM8940_POWER1, pwr_reg | 0x1);
+ ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg | 0x1);
break;
case SND_SOC_BIAS_PREPARE:
/* ensure bufioen and biasen */
pwr_reg |= (1 << 2) | (1 << 3);
- ret = wm8940_write(codec, WM8940_POWER1, pwr_reg | 0x1);
+ ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg | 0x1);
break;
case SND_SOC_BIAS_STANDBY:
/* ensure bufioen and biasen */
pwr_reg |= (1 << 2) | (1 << 3);
/* set vmid to 300k for standby */
- ret = wm8940_write(codec, WM8940_POWER1, pwr_reg | 0x2);
+ ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg | 0x2);
break;
case SND_SOC_BIAS_OFF:
- ret = wm8940_write(codec, WM8940_POWER1, pwr_reg);
+ ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg);
break;
}
@@ -587,36 +543,36 @@ static int wm8940_set_dai_pll(struct snd_soc_dai *codec_dai,
u16 reg;
/* Turn off PLL */
- reg = wm8940_read_reg_cache(codec, WM8940_POWER1);
- wm8940_write(codec, WM8940_POWER1, reg & 0x1df);
+ reg = snd_soc_read(codec, WM8940_POWER1);
+ snd_soc_write(codec, WM8940_POWER1, reg & 0x1df);
if (freq_in == 0 || freq_out == 0) {
/* Clock CODEC directly from MCLK */
- reg = wm8940_read_reg_cache(codec, WM8940_CLOCK);
- wm8940_write(codec, WM8940_CLOCK, reg & 0x0ff);
+ reg = snd_soc_read(codec, WM8940_CLOCK);
+ snd_soc_write(codec, WM8940_CLOCK, reg & 0x0ff);
/* Pll power down */
- wm8940_write(codec, WM8940_PLLN, (1 << 7));
+ snd_soc_write(codec, WM8940_PLLN, (1 << 7));
return 0;
}
/* Pll is followed by a frequency divide by 4 */
pll_factors(freq_out*4, freq_in);
if (pll_div.k)
- wm8940_write(codec, WM8940_PLLN,
+ snd_soc_write(codec, WM8940_PLLN,
(pll_div.pre_scale << 4) | pll_div.n | (1 << 6));
else /* No factional component */
- wm8940_write(codec, WM8940_PLLN,
+ snd_soc_write(codec, WM8940_PLLN,
(pll_div.pre_scale << 4) | pll_div.n);
- wm8940_write(codec, WM8940_PLLK1, pll_div.k >> 18);
- wm8940_write(codec, WM8940_PLLK2, (pll_div.k >> 9) & 0x1ff);
- wm8940_write(codec, WM8940_PLLK3, pll_div.k & 0x1ff);
+ snd_soc_write(codec, WM8940_PLLK1, pll_div.k >> 18);
+ snd_soc_write(codec, WM8940_PLLK2, (pll_div.k >> 9) & 0x1ff);
+ snd_soc_write(codec, WM8940_PLLK3, pll_div.k & 0x1ff);
/* Enable the PLL */
- reg = wm8940_read_reg_cache(codec, WM8940_POWER1);
- wm8940_write(codec, WM8940_POWER1, reg | 0x020);
+ reg = snd_soc_read(codec, WM8940_POWER1);
+ snd_soc_write(codec, WM8940_POWER1, reg | 0x020);
/* Run CODEC from PLL instead of MCLK */
- reg = wm8940_read_reg_cache(codec, WM8940_CLOCK);
- wm8940_write(codec, WM8940_CLOCK, reg | 0x100);
+ reg = snd_soc_read(codec, WM8940_CLOCK);
+ snd_soc_write(codec, WM8940_CLOCK, reg | 0x100);
return 0;
}
@@ -648,16 +604,16 @@ static int wm8940_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
switch (div_id) {
case WM8940_BCLKDIV:
- reg = wm8940_read_reg_cache(codec, WM8940_CLOCK) & 0xFFEF3;
- ret = wm8940_write(codec, WM8940_CLOCK, reg | (div << 2));
+ reg = snd_soc_read(codec, WM8940_CLOCK) & 0xFFEF3;
+ ret = snd_soc_write(codec, WM8940_CLOCK, reg | (div << 2));
break;
case WM8940_MCLKDIV:
- reg = wm8940_read_reg_cache(codec, WM8940_CLOCK) & 0xFF1F;
- ret = wm8940_write(codec, WM8940_CLOCK, reg | (div << 5));
+ reg = snd_soc_read(codec, WM8940_CLOCK) & 0xFF1F;
+ ret = snd_soc_write(codec, WM8940_CLOCK, reg | (div << 5));
break;
case WM8940_OPCLKDIV:
- reg = wm8940_read_reg_cache(codec, WM8940_ADDCNTRL) & 0xFFCF;
- ret = wm8940_write(codec, WM8940_ADDCNTRL, reg | (div << 4));
+ reg = snd_soc_read(codec, WM8940_ADDCNTRL) & 0xFFCF;
+ ret = snd_soc_write(codec, WM8940_ADDCNTRL, reg | (div << 4));
break;
}
return ret;
@@ -808,7 +764,8 @@ struct snd_soc_codec_device soc_codec_dev_wm8940 = {
};
EXPORT_SYMBOL_GPL(soc_codec_dev_wm8940);
-static int wm8940_register(struct wm8940_priv *wm8940)
+static int wm8940_register(struct wm8940_priv *wm8940,
+ enum snd_soc_control_type control)
{
struct wm8940_setup_data *pdata = wm8940->codec.dev->platform_data;
struct snd_soc_codec *codec = &wm8940->codec;
@@ -825,8 +782,6 @@ static int wm8940_register(struct wm8940_priv *wm8940)
codec->private_data = wm8940;
codec->name = "WM8940";
codec->owner = THIS_MODULE;
- codec->read = wm8940_read_reg_cache;
- codec->write = wm8940_write;
codec->bias_level = SND_SOC_BIAS_OFF;
codec->set_bias_level = wm8940_set_bias_level;
codec->dai = &wm8940_dai;
@@ -834,6 +789,12 @@ static int wm8940_register(struct wm8940_priv *wm8940)
codec->reg_cache_size = ARRAY_SIZE(wm8940_reg_defaults);
codec->reg_cache = &wm8940->reg_cache;
+ ret = snd_soc_codec_set_cache_io(codec, 8, 16, control);
+ if (ret == 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ return ret;
+ }
+
memcpy(codec->reg_cache, wm8940_reg_defaults,
sizeof(wm8940_reg_defaults));
@@ -847,15 +808,15 @@ static int wm8940_register(struct wm8940_priv *wm8940)
wm8940_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- ret = wm8940_write(codec, WM8940_POWER1, 0x180);
+ ret = snd_soc_write(codec, WM8940_POWER1, 0x180);
if (ret < 0)
return ret;
if (!pdata)
dev_warn(codec->dev, "No platform data supplied\n");
else {
- reg = wm8940_read_reg_cache(codec, WM8940_OUTPUTCTL);
- ret = wm8940_write(codec, WM8940_OUTPUTCTL, reg | pdata->vroi);
+ reg = snd_soc_read(codec, WM8940_OUTPUTCTL);
+ ret = snd_soc_write(codec, WM8940_OUTPUTCTL, reg | pdata->vroi);
if (ret < 0)
return ret;
}
@@ -904,7 +865,7 @@ static int wm8940_i2c_probe(struct i2c_client *i2c,
codec->control_data = i2c;
codec->dev = &i2c->dev;
- return wm8940_register(wm8940);
+ return wm8940_register(wm8940, SND_SOC_I2C);
}
static int __devexit wm8940_i2c_remove(struct i2c_client *client)
@@ -916,6 +877,21 @@ static int __devexit wm8940_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8940_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8940_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8940_i2c_suspend NULL
+#define wm8940_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm8940_i2c_id[] = {
{ "wm8940", 0 },
{ }
@@ -929,6 +905,8 @@ static struct i2c_driver wm8940_i2c_driver = {
},
.probe = wm8940_i2c_probe,
.remove = __devexit_p(wm8940_i2c_remove),
+ .suspend = wm8940_i2c_suspend,
+ .resume = wm8940_i2c_resume,
.id_table = wm8940_i2c_id,
};
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index e224d8add170..f59703be61c8 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -69,61 +69,7 @@ struct wm8960_priv {
struct snd_soc_codec codec;
};
-/*
- * read wm8960 register cache
- */
-static inline unsigned int wm8960_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
- if (reg == WM8960_RESET)
- return 0;
- if (reg >= WM8960_CACHEREGNUM)
- return -1;
- return cache[reg];
-}
-
-/*
- * write wm8960 register cache
- */
-static inline void wm8960_write_reg_cache(struct snd_soc_codec *codec,
- u16 reg, unsigned int value)
-{
- u16 *cache = codec->reg_cache;
- if (reg >= WM8960_CACHEREGNUM)
- return;
- cache[reg] = value;
-}
-
-static inline unsigned int wm8960_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- return wm8960_read_reg_cache(codec, reg);
-}
-
-/*
- * write to the WM8960 register space
- */
-static int wm8960_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- u8 data[2];
-
- /* data is
- * D15..D9 WM8960 register offset
- * D8...D0 register data
- */
- data[0] = (reg << 1) | ((value >> 8) & 0x0001);
- data[1] = value & 0x00ff;
-
- wm8960_write_reg_cache(codec, reg, value);
- if (codec->hw_write(codec->control_data, data, 2) == 2)
- return 0;
- else
- return -EIO;
-}
-
-#define wm8960_reset(c) wm8960_write(c, WM8960_RESET, 0)
+#define wm8960_reset(c) snd_soc_write(c, WM8960_RESET, 0)
/* enumerated controls */
static const char *wm8960_deemph[] = {"None", "32Khz", "44.1Khz", "48Khz"};
@@ -420,7 +366,7 @@ static int wm8960_set_dai_fmt(struct snd_soc_dai *codec_dai,
}
/* set iface */
- wm8960_write(codec, WM8960_IFACE1, iface);
+ snd_soc_write(codec, WM8960_IFACE1, iface);
return 0;
}
@@ -431,7 +377,7 @@ static int wm8960_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- u16 iface = wm8960_read(codec, WM8960_IFACE1) & 0xfff3;
+ u16 iface = snd_soc_read(codec, WM8960_IFACE1) & 0xfff3;
/* bit size */
switch (params_format(params)) {
@@ -446,19 +392,19 @@ static int wm8960_hw_params(struct snd_pcm_substream *substream,
}
/* set iface */
- wm8960_write(codec, WM8960_IFACE1, iface);
+ snd_soc_write(codec, WM8960_IFACE1, iface);
return 0;
}
static int wm8960_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
- u16 mute_reg = wm8960_read(codec, WM8960_DACCTL1) & 0xfff7;
+ u16 mute_reg = snd_soc_read(codec, WM8960_DACCTL1) & 0xfff7;
if (mute)
- wm8960_write(codec, WM8960_DACCTL1, mute_reg | 0x8);
+ snd_soc_write(codec, WM8960_DACCTL1, mute_reg | 0x8);
else
- wm8960_write(codec, WM8960_DACCTL1, mute_reg);
+ snd_soc_write(codec, WM8960_DACCTL1, mute_reg);
return 0;
}
@@ -474,16 +420,16 @@ static int wm8960_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_PREPARE:
/* Set VMID to 2x50k */
- reg = wm8960_read(codec, WM8960_POWER1);
+ reg = snd_soc_read(codec, WM8960_POWER1);
reg &= ~0x180;
reg |= 0x80;
- wm8960_write(codec, WM8960_POWER1, reg);
+ snd_soc_write(codec, WM8960_POWER1, reg);
break;
case SND_SOC_BIAS_STANDBY:
if (codec->bias_level == SND_SOC_BIAS_OFF) {
/* Enable anti-pop features */
- wm8960_write(codec, WM8960_APOP1,
+ snd_soc_write(codec, WM8960_APOP1,
WM8960_POBCTRL | WM8960_SOFT_ST |
WM8960_BUFDCOPEN | WM8960_BUFIOEN);
@@ -491,43 +437,43 @@ static int wm8960_set_bias_level(struct snd_soc_codec *codec,
reg = WM8960_DISOP;
if (pdata)
reg |= pdata->dres << 4;
- wm8960_write(codec, WM8960_APOP2, reg);
+ snd_soc_write(codec, WM8960_APOP2, reg);
msleep(400);
- wm8960_write(codec, WM8960_APOP2, 0);
+ snd_soc_write(codec, WM8960_APOP2, 0);
/* Enable & ramp VMID at 2x50k */
- reg = wm8960_read(codec, WM8960_POWER1);
+ reg = snd_soc_read(codec, WM8960_POWER1);
reg |= 0x80;
- wm8960_write(codec, WM8960_POWER1, reg);
+ snd_soc_write(codec, WM8960_POWER1, reg);
msleep(100);
/* Enable VREF */
- wm8960_write(codec, WM8960_POWER1, reg | WM8960_VREF);
+ snd_soc_write(codec, WM8960_POWER1, reg | WM8960_VREF);
/* Disable anti-pop features */
- wm8960_write(codec, WM8960_APOP1, WM8960_BUFIOEN);
+ snd_soc_write(codec, WM8960_APOP1, WM8960_BUFIOEN);
}
/* Set VMID to 2x250k */
- reg = wm8960_read(codec, WM8960_POWER1);
+ reg = snd_soc_read(codec, WM8960_POWER1);
reg &= ~0x180;
reg |= 0x100;
- wm8960_write(codec, WM8960_POWER1, reg);
+ snd_soc_write(codec, WM8960_POWER1, reg);
break;
case SND_SOC_BIAS_OFF:
/* Enable anti-pop features */
- wm8960_write(codec, WM8960_APOP1,
+ snd_soc_write(codec, WM8960_APOP1,
WM8960_POBCTRL | WM8960_SOFT_ST |
WM8960_BUFDCOPEN | WM8960_BUFIOEN);
/* Disable VMID and VREF, let them discharge */
- wm8960_write(codec, WM8960_POWER1, 0);
+ snd_soc_write(codec, WM8960_POWER1, 0);
msleep(600);
- wm8960_write(codec, WM8960_APOP1, 0);
+ snd_soc_write(codec, WM8960_APOP1, 0);
break;
}
@@ -610,33 +556,33 @@ static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai,
/* Disable the PLL: even if we are changing the frequency the
* PLL needs to be disabled while we do so. */
- wm8960_write(codec, WM8960_CLOCK1,
- wm8960_read(codec, WM8960_CLOCK1) & ~1);
- wm8960_write(codec, WM8960_POWER2,
- wm8960_read(codec, WM8960_POWER2) & ~1);
+ snd_soc_write(codec, WM8960_CLOCK1,
+ snd_soc_read(codec, WM8960_CLOCK1) & ~1);
+ snd_soc_write(codec, WM8960_POWER2,
+ snd_soc_read(codec, WM8960_POWER2) & ~1);
if (!freq_in || !freq_out)
return 0;
- reg = wm8960_read(codec, WM8960_PLL1) & ~0x3f;
+ reg = snd_soc_read(codec, WM8960_PLL1) & ~0x3f;
reg |= pll_div.pre_div << 4;
reg |= pll_div.n;
if (pll_div.k) {
reg |= 0x20;
- wm8960_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
- wm8960_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
- wm8960_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
+ snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
+ snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
+ snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
}
- wm8960_write(codec, WM8960_PLL1, reg);
+ snd_soc_write(codec, WM8960_PLL1, reg);
/* Turn it on */
- wm8960_write(codec, WM8960_POWER2,
- wm8960_read(codec, WM8960_POWER2) | 1);
+ snd_soc_write(codec, WM8960_POWER2,
+ snd_soc_read(codec, WM8960_POWER2) | 1);
msleep(250);
- wm8960_write(codec, WM8960_CLOCK1,
- wm8960_read(codec, WM8960_CLOCK1) | 1);
+ snd_soc_write(codec, WM8960_CLOCK1,
+ snd_soc_read(codec, WM8960_CLOCK1) | 1);
return 0;
}
@@ -649,28 +595,28 @@ static int wm8960_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
switch (div_id) {
case WM8960_SYSCLKSEL:
- reg = wm8960_read(codec, WM8960_CLOCK1) & 0x1fe;
- wm8960_write(codec, WM8960_CLOCK1, reg | div);
+ reg = snd_soc_read(codec, WM8960_CLOCK1) & 0x1fe;
+ snd_soc_write(codec, WM8960_CLOCK1, reg | div);
break;
case WM8960_SYSCLKDIV:
- reg = wm8960_read(codec, WM8960_CLOCK1) & 0x1f9;
- wm8960_write(codec, WM8960_CLOCK1, reg | div);
+ reg = snd_soc_read(codec, WM8960_CLOCK1) & 0x1f9;
+ snd_soc_write(codec, WM8960_CLOCK1, reg | div);
break;
case WM8960_DACDIV:
- reg = wm8960_read(codec, WM8960_CLOCK1) & 0x1c7;
- wm8960_write(codec, WM8960_CLOCK1, reg | div);
+ reg = snd_soc_read(codec, WM8960_CLOCK1) & 0x1c7;
+ snd_soc_write(codec, WM8960_CLOCK1, reg | div);
break;
case WM8960_OPCLKDIV:
- reg = wm8960_read(codec, WM8960_PLL1) & 0x03f;
- wm8960_write(codec, WM8960_PLL1, reg | div);
+ reg = snd_soc_read(codec, WM8960_PLL1) & 0x03f;
+ snd_soc_write(codec, WM8960_PLL1, reg | div);
break;
case WM8960_DCLKDIV:
- reg = wm8960_read(codec, WM8960_CLOCK2) & 0x03f;
- wm8960_write(codec, WM8960_CLOCK2, reg | div);
+ reg = snd_soc_read(codec, WM8960_CLOCK2) & 0x03f;
+ snd_soc_write(codec, WM8960_CLOCK2, reg | div);
break;
case WM8960_TOCLKSEL:
- reg = wm8960_read(codec, WM8960_ADDCTL1) & 0x1fd;
- wm8960_write(codec, WM8960_ADDCTL1, reg | div);
+ reg = snd_soc_read(codec, WM8960_ADDCTL1) & 0x1fd;
+ snd_soc_write(codec, WM8960_ADDCTL1, reg | div);
break;
default:
return -EINVAL;
@@ -801,7 +747,8 @@ struct snd_soc_codec_device soc_codec_dev_wm8960 = {
};
EXPORT_SYMBOL_GPL(soc_codec_dev_wm8960);
-static int wm8960_register(struct wm8960_priv *wm8960)
+static int wm8960_register(struct wm8960_priv *wm8960,
+ enum snd_soc_control_type control)
{
struct wm8960_data *pdata = wm8960->codec.dev->platform_data;
struct snd_soc_codec *codec = &wm8960->codec;
@@ -810,7 +757,8 @@ static int wm8960_register(struct wm8960_priv *wm8960)
if (wm8960_codec) {
dev_err(codec->dev, "Another WM8960 is registered\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
if (!pdata) {
@@ -829,8 +777,6 @@ static int wm8960_register(struct wm8960_priv *wm8960)
codec->private_data = wm8960;
codec->name = "WM8960";
codec->owner = THIS_MODULE;
- codec->read = wm8960_read_reg_cache;
- codec->write = wm8960_write;
codec->bias_level = SND_SOC_BIAS_OFF;
codec->set_bias_level = wm8960_set_bias_level;
codec->dai = &wm8960_dai;
@@ -840,10 +786,16 @@ static int wm8960_register(struct wm8960_priv *wm8960)
memcpy(codec->reg_cache, wm8960_reg, sizeof(wm8960_reg));
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
ret = wm8960_reset(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset\n");
- return ret;
+ goto err;
}
wm8960_dai.dev = codec->dev;
@@ -851,43 +803,48 @@ static int wm8960_register(struct wm8960_priv *wm8960)
wm8960_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Latch the update bits */
- reg = wm8960_read(codec, WM8960_LINVOL);
- wm8960_write(codec, WM8960_LINVOL, reg | 0x100);
- reg = wm8960_read(codec, WM8960_RINVOL);
- wm8960_write(codec, WM8960_RINVOL, reg | 0x100);
- reg = wm8960_read(codec, WM8960_LADC);
- wm8960_write(codec, WM8960_LADC, reg | 0x100);
- reg = wm8960_read(codec, WM8960_RADC);
- wm8960_write(codec, WM8960_RADC, reg | 0x100);
- reg = wm8960_read(codec, WM8960_LDAC);
- wm8960_write(codec, WM8960_LDAC, reg | 0x100);
- reg = wm8960_read(codec, WM8960_RDAC);
- wm8960_write(codec, WM8960_RDAC, reg | 0x100);
- reg = wm8960_read(codec, WM8960_LOUT1);
- wm8960_write(codec, WM8960_LOUT1, reg | 0x100);
- reg = wm8960_read(codec, WM8960_ROUT1);
- wm8960_write(codec, WM8960_ROUT1, reg | 0x100);
- reg = wm8960_read(codec, WM8960_LOUT2);
- wm8960_write(codec, WM8960_LOUT2, reg | 0x100);
- reg = wm8960_read(codec, WM8960_ROUT2);
- wm8960_write(codec, WM8960_ROUT2, reg | 0x100);
+ reg = snd_soc_read(codec, WM8960_LINVOL);
+ snd_soc_write(codec, WM8960_LINVOL, reg | 0x100);
+ reg = snd_soc_read(codec, WM8960_RINVOL);
+ snd_soc_write(codec, WM8960_RINVOL, reg | 0x100);
+ reg = snd_soc_read(codec, WM8960_LADC);
+ snd_soc_write(codec, WM8960_LADC, reg | 0x100);
+ reg = snd_soc_read(codec, WM8960_RADC);
+ snd_soc_write(codec, WM8960_RADC, reg | 0x100);
+ reg = snd_soc_read(codec, WM8960_LDAC);
+ snd_soc_write(codec, WM8960_LDAC, reg | 0x100);
+ reg = snd_soc_read(codec, WM8960_RDAC);
+ snd_soc_write(codec, WM8960_RDAC, reg | 0x100);
+ reg = snd_soc_read(codec, WM8960_LOUT1);
+ snd_soc_write(codec, WM8960_LOUT1, reg | 0x100);
+ reg = snd_soc_read(codec, WM8960_ROUT1);
+ snd_soc_write(codec, WM8960_ROUT1, reg | 0x100);
+ reg = snd_soc_read(codec, WM8960_LOUT2);
+ snd_soc_write(codec, WM8960_LOUT2, reg | 0x100);
+ reg = snd_soc_read(codec, WM8960_ROUT2);
+ snd_soc_write(codec, WM8960_ROUT2, reg | 0x100);
wm8960_codec = codec;
ret = snd_soc_register_codec(codec);
if (ret != 0) {
dev_err(codec->dev, "Failed to register codec: %d\n", ret);
- return ret;
+ goto err;
}
ret = snd_soc_register_dai(&wm8960_dai);
if (ret != 0) {
dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
- snd_soc_unregister_codec(codec);
- return ret;
+ goto err_codec;
}
return 0;
+
+err_codec:
+ snd_soc_unregister_codec(codec);
+err:
+ kfree(wm8960);
+ return ret;
}
static void wm8960_unregister(struct wm8960_priv *wm8960)
@@ -910,14 +867,13 @@ static __devinit int wm8960_i2c_probe(struct i2c_client *i2c,
return -ENOMEM;
codec = &wm8960->codec;
- codec->hw_write = (hw_write_t)i2c_master_send;
i2c_set_clientdata(i2c, wm8960);
codec->control_data = i2c;
codec->dev = &i2c->dev;
- return wm8960_register(wm8960);
+ return wm8960_register(wm8960, SND_SOC_I2C);
}
static __devexit int wm8960_i2c_remove(struct i2c_client *client)
@@ -927,6 +883,21 @@ static __devexit int wm8960_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8960_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8960_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8960_i2c_suspend NULL
+#define wm8960_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm8960_i2c_id[] = {
{ "wm8960", 0 },
{ }
@@ -940,6 +911,8 @@ static struct i2c_driver wm8960_i2c_driver = {
},
.probe = wm8960_i2c_probe,
.remove = __devexit_p(wm8960_i2c_remove),
+ .suspend = wm8960_i2c_suspend,
+ .resume = wm8960_i2c_resume,
.id_table = wm8960_i2c_id,
};
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
new file mode 100644
index 000000000000..503032085899
--- /dev/null
+++ b/sound/soc/codecs/wm8961.c
@@ -0,0 +1,1265 @@
+/*
+ * wm8961.c -- WM8961 ALSA SoC Audio driver
+ *
+ * Author: Mark Brown
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Currently unimplemented features:
+ * - ALC
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8961.h"
+
+#define WM8961_MAX_REGISTER 0xFC
+
+static u16 wm8961_reg_defaults[] = {
+ 0x009F, /* R0 - Left Input volume */
+ 0x009F, /* R1 - Right Input volume */
+ 0x0000, /* R2 - LOUT1 volume */
+ 0x0000, /* R3 - ROUT1 volume */
+ 0x0020, /* R4 - Clocking1 */
+ 0x0008, /* R5 - ADC & DAC Control 1 */
+ 0x0000, /* R6 - ADC & DAC Control 2 */
+ 0x000A, /* R7 - Audio Interface 0 */
+ 0x01F4, /* R8 - Clocking2 */
+ 0x0000, /* R9 - Audio Interface 1 */
+ 0x00FF, /* R10 - Left DAC volume */
+ 0x00FF, /* R11 - Right DAC volume */
+ 0x0000, /* R12 */
+ 0x0000, /* R13 */
+ 0x0040, /* R14 - Audio Interface 2 */
+ 0x0000, /* R15 - Software Reset */
+ 0x0000, /* R16 */
+ 0x007B, /* R17 - ALC1 */
+ 0x0000, /* R18 - ALC2 */
+ 0x0032, /* R19 - ALC3 */
+ 0x0000, /* R20 - Noise Gate */
+ 0x00C0, /* R21 - Left ADC volume */
+ 0x00C0, /* R22 - Right ADC volume */
+ 0x0120, /* R23 - Additional control(1) */
+ 0x0000, /* R24 - Additional control(2) */
+ 0x0000, /* R25 - Pwr Mgmt (1) */
+ 0x0000, /* R26 - Pwr Mgmt (2) */
+ 0x0000, /* R27 - Additional Control (3) */
+ 0x0000, /* R28 - Anti-pop */
+ 0x0000, /* R29 */
+ 0x005F, /* R30 - Clocking 3 */
+ 0x0000, /* R31 */
+ 0x0000, /* R32 - ADCL signal path */
+ 0x0000, /* R33 - ADCR signal path */
+ 0x0000, /* R34 */
+ 0x0000, /* R35 */
+ 0x0000, /* R36 */
+ 0x0000, /* R37 */
+ 0x0000, /* R38 */
+ 0x0000, /* R39 */
+ 0x0000, /* R40 - LOUT2 volume */
+ 0x0000, /* R41 - ROUT2 volume */
+ 0x0000, /* R42 */
+ 0x0000, /* R43 */
+ 0x0000, /* R44 */
+ 0x0000, /* R45 */
+ 0x0000, /* R46 */
+ 0x0000, /* R47 - Pwr Mgmt (3) */
+ 0x0023, /* R48 - Additional Control (4) */
+ 0x0000, /* R49 - Class D Control 1 */
+ 0x0000, /* R50 */
+ 0x0003, /* R51 - Class D Control 2 */
+ 0x0000, /* R52 */
+ 0x0000, /* R53 */
+ 0x0000, /* R54 */
+ 0x0000, /* R55 */
+ 0x0106, /* R56 - Clocking 4 */
+ 0x0000, /* R57 - DSP Sidetone 0 */
+ 0x0000, /* R58 - DSP Sidetone 1 */
+ 0x0000, /* R59 */
+ 0x0000, /* R60 - DC Servo 0 */
+ 0x0000, /* R61 - DC Servo 1 */
+ 0x0000, /* R62 */
+ 0x015E, /* R63 - DC Servo 3 */
+ 0x0010, /* R64 */
+ 0x0010, /* R65 - DC Servo 5 */
+ 0x0000, /* R66 */
+ 0x0001, /* R67 */
+ 0x0003, /* R68 - Analogue PGA Bias */
+ 0x0000, /* R69 - Analogue HP 0 */
+ 0x0060, /* R70 */
+ 0x01FB, /* R71 - Analogue HP 2 */
+ 0x0000, /* R72 - Charge Pump 1 */
+ 0x0065, /* R73 */
+ 0x005F, /* R74 */
+ 0x0059, /* R75 */
+ 0x006B, /* R76 */
+ 0x0038, /* R77 */
+ 0x000C, /* R78 */
+ 0x000A, /* R79 */
+ 0x006B, /* R80 */
+ 0x0000, /* R81 */
+ 0x0000, /* R82 - Charge Pump B */
+ 0x0087, /* R83 */
+ 0x0000, /* R84 */
+ 0x005C, /* R85 */
+ 0x0000, /* R86 */
+ 0x0000, /* R87 - Write Sequencer 1 */
+ 0x0000, /* R88 - Write Sequencer 2 */
+ 0x0000, /* R89 - Write Sequencer 3 */
+ 0x0000, /* R90 - Write Sequencer 4 */
+ 0x0000, /* R91 - Write Sequencer 5 */
+ 0x0000, /* R92 - Write Sequencer 6 */
+ 0x0000, /* R93 - Write Sequencer 7 */
+ 0x0000, /* R94 */
+ 0x0000, /* R95 */
+ 0x0000, /* R96 */
+ 0x0000, /* R97 */
+ 0x0000, /* R98 */
+ 0x0000, /* R99 */
+ 0x0000, /* R100 */
+ 0x0000, /* R101 */
+ 0x0000, /* R102 */
+ 0x0000, /* R103 */
+ 0x0000, /* R104 */
+ 0x0000, /* R105 */
+ 0x0000, /* R106 */
+ 0x0000, /* R107 */
+ 0x0000, /* R108 */
+ 0x0000, /* R109 */
+ 0x0000, /* R110 */
+ 0x0000, /* R111 */
+ 0x0000, /* R112 */
+ 0x0000, /* R113 */
+ 0x0000, /* R114 */
+ 0x0000, /* R115 */
+ 0x0000, /* R116 */
+ 0x0000, /* R117 */
+ 0x0000, /* R118 */
+ 0x0000, /* R119 */
+ 0x0000, /* R120 */
+ 0x0000, /* R121 */
+ 0x0000, /* R122 */
+ 0x0000, /* R123 */
+ 0x0000, /* R124 */
+ 0x0000, /* R125 */
+ 0x0000, /* R126 */
+ 0x0000, /* R127 */
+ 0x0000, /* R128 */
+ 0x0000, /* R129 */
+ 0x0000, /* R130 */
+ 0x0000, /* R131 */
+ 0x0000, /* R132 */
+ 0x0000, /* R133 */
+ 0x0000, /* R134 */
+ 0x0000, /* R135 */
+ 0x0000, /* R136 */
+ 0x0000, /* R137 */
+ 0x0000, /* R138 */
+ 0x0000, /* R139 */
+ 0x0000, /* R140 */
+ 0x0000, /* R141 */
+ 0x0000, /* R142 */
+ 0x0000, /* R143 */
+ 0x0000, /* R144 */
+ 0x0000, /* R145 */
+ 0x0000, /* R146 */
+ 0x0000, /* R147 */
+ 0x0000, /* R148 */
+ 0x0000, /* R149 */
+ 0x0000, /* R150 */
+ 0x0000, /* R151 */
+ 0x0000, /* R152 */
+ 0x0000, /* R153 */
+ 0x0000, /* R154 */
+ 0x0000, /* R155 */
+ 0x0000, /* R156 */
+ 0x0000, /* R157 */
+ 0x0000, /* R158 */
+ 0x0000, /* R159 */
+ 0x0000, /* R160 */
+ 0x0000, /* R161 */
+ 0x0000, /* R162 */
+ 0x0000, /* R163 */
+ 0x0000, /* R164 */
+ 0x0000, /* R165 */
+ 0x0000, /* R166 */
+ 0x0000, /* R167 */
+ 0x0000, /* R168 */
+ 0x0000, /* R169 */
+ 0x0000, /* R170 */
+ 0x0000, /* R171 */
+ 0x0000, /* R172 */
+ 0x0000, /* R173 */
+ 0x0000, /* R174 */
+ 0x0000, /* R175 */
+ 0x0000, /* R176 */
+ 0x0000, /* R177 */
+ 0x0000, /* R178 */
+ 0x0000, /* R179 */
+ 0x0000, /* R180 */
+ 0x0000, /* R181 */
+ 0x0000, /* R182 */
+ 0x0000, /* R183 */
+ 0x0000, /* R184 */
+ 0x0000, /* R185 */
+ 0x0000, /* R186 */
+ 0x0000, /* R187 */
+ 0x0000, /* R188 */
+ 0x0000, /* R189 */
+ 0x0000, /* R190 */
+ 0x0000, /* R191 */
+ 0x0000, /* R192 */
+ 0x0000, /* R193 */
+ 0x0000, /* R194 */
+ 0x0000, /* R195 */
+ 0x0030, /* R196 */
+ 0x0006, /* R197 */
+ 0x0000, /* R198 */
+ 0x0060, /* R199 */
+ 0x0000, /* R200 */
+ 0x003F, /* R201 */
+ 0x0000, /* R202 */
+ 0x0000, /* R203 */
+ 0x0000, /* R204 */
+ 0x0001, /* R205 */
+ 0x0000, /* R206 */
+ 0x0181, /* R207 */
+ 0x0005, /* R208 */
+ 0x0008, /* R209 */
+ 0x0008, /* R210 */
+ 0x0000, /* R211 */
+ 0x013B, /* R212 */
+ 0x0000, /* R213 */
+ 0x0000, /* R214 */
+ 0x0000, /* R215 */
+ 0x0000, /* R216 */
+ 0x0070, /* R217 */
+ 0x0000, /* R218 */
+ 0x0000, /* R219 */
+ 0x0000, /* R220 */
+ 0x0000, /* R221 */
+ 0x0000, /* R222 */
+ 0x0003, /* R223 */
+ 0x0000, /* R224 */
+ 0x0000, /* R225 */
+ 0x0001, /* R226 */
+ 0x0008, /* R227 */
+ 0x0000, /* R228 */
+ 0x0000, /* R229 */
+ 0x0000, /* R230 */
+ 0x0000, /* R231 */
+ 0x0004, /* R232 */
+ 0x0000, /* R233 */
+ 0x0000, /* R234 */
+ 0x0000, /* R235 */
+ 0x0000, /* R236 */
+ 0x0000, /* R237 */
+ 0x0080, /* R238 */
+ 0x0000, /* R239 */
+ 0x0000, /* R240 */
+ 0x0000, /* R241 */
+ 0x0000, /* R242 */
+ 0x0000, /* R243 */
+ 0x0000, /* R244 */
+ 0x0052, /* R245 */
+ 0x0110, /* R246 */
+ 0x0040, /* R247 */
+ 0x0000, /* R248 */
+ 0x0030, /* R249 */
+ 0x0000, /* R250 */
+ 0x0000, /* R251 */
+ 0x0001, /* R252 - General test 1 */
+};
+
+struct wm8961_priv {
+ struct snd_soc_codec codec;
+ int sysclk;
+ u16 reg_cache[WM8961_MAX_REGISTER];
+};
+
+static int wm8961_volatile_register(unsigned int reg)
+{
+ switch (reg) {
+ case WM8961_SOFTWARE_RESET:
+ case WM8961_WRITE_SEQUENCER_7:
+ case WM8961_DC_SERVO_1:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static int wm8961_reset(struct snd_soc_codec *codec)
+{
+ return snd_soc_write(codec, WM8961_SOFTWARE_RESET, 0);
+}
+
+/*
+ * The headphone output supports special anti-pop sequences giving
+ * silent power up and power down.
+ */
+static int wm8961_hp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ u16 hp_reg = snd_soc_read(codec, WM8961_ANALOGUE_HP_0);
+ u16 cp_reg = snd_soc_read(codec, WM8961_CHARGE_PUMP_1);
+ u16 pwr_reg = snd_soc_read(codec, WM8961_PWR_MGMT_2);
+ u16 dcs_reg = snd_soc_read(codec, WM8961_DC_SERVO_1);
+ int timeout = 500;
+
+ if (event & SND_SOC_DAPM_POST_PMU) {
+ /* Make sure the output is shorted */
+ hp_reg &= ~(WM8961_HPR_RMV_SHORT | WM8961_HPL_RMV_SHORT);
+ snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+
+ /* Enable the charge pump */
+ cp_reg |= WM8961_CP_ENA;
+ snd_soc_write(codec, WM8961_CHARGE_PUMP_1, cp_reg);
+ mdelay(5);
+
+ /* Enable the PGA */
+ pwr_reg |= WM8961_LOUT1_PGA | WM8961_ROUT1_PGA;
+ snd_soc_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
+
+ /* Enable the amplifier */
+ hp_reg |= WM8961_HPR_ENA | WM8961_HPL_ENA;
+ snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+
+ /* Second stage enable */
+ hp_reg |= WM8961_HPR_ENA_DLY | WM8961_HPL_ENA_DLY;
+ snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+
+ /* Enable the DC servo & trigger startup */
+ dcs_reg |=
+ WM8961_DCS_ENA_CHAN_HPR | WM8961_DCS_TRIG_STARTUP_HPR |
+ WM8961_DCS_ENA_CHAN_HPL | WM8961_DCS_TRIG_STARTUP_HPL;
+ dev_dbg(codec->dev, "Enabling DC servo\n");
+
+ snd_soc_write(codec, WM8961_DC_SERVO_1, dcs_reg);
+ do {
+ msleep(1);
+ dcs_reg = snd_soc_read(codec, WM8961_DC_SERVO_1);
+ } while (--timeout &&
+ dcs_reg & (WM8961_DCS_TRIG_STARTUP_HPR |
+ WM8961_DCS_TRIG_STARTUP_HPL));
+ if (dcs_reg & (WM8961_DCS_TRIG_STARTUP_HPR |
+ WM8961_DCS_TRIG_STARTUP_HPL))
+ dev_err(codec->dev, "DC servo timed out\n");
+ else
+ dev_dbg(codec->dev, "DC servo startup complete\n");
+
+ /* Enable the output stage */
+ hp_reg |= WM8961_HPR_ENA_OUTP | WM8961_HPL_ENA_OUTP;
+ snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+
+ /* Remove the short on the output stage */
+ hp_reg |= WM8961_HPR_RMV_SHORT | WM8961_HPL_RMV_SHORT;
+ snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+ }
+
+ if (event & SND_SOC_DAPM_PRE_PMD) {
+ /* Short the output */
+ hp_reg &= ~(WM8961_HPR_RMV_SHORT | WM8961_HPL_RMV_SHORT);
+ snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+
+ /* Disable the output stage */
+ hp_reg &= ~(WM8961_HPR_ENA_OUTP | WM8961_HPL_ENA_OUTP);
+ snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+
+ /* Disable DC offset cancellation */
+ dcs_reg &= ~(WM8961_DCS_ENA_CHAN_HPR |
+ WM8961_DCS_ENA_CHAN_HPL);
+ snd_soc_write(codec, WM8961_DC_SERVO_1, dcs_reg);
+
+ /* Finish up */
+ hp_reg &= ~(WM8961_HPR_ENA_DLY | WM8961_HPR_ENA |
+ WM8961_HPL_ENA_DLY | WM8961_HPL_ENA);
+ snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
+
+ /* Disable the PGA */
+ pwr_reg &= ~(WM8961_LOUT1_PGA | WM8961_ROUT1_PGA);
+ snd_soc_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
+
+ /* Disable the charge pump */
+ dev_dbg(codec->dev, "Disabling charge pump\n");
+ snd_soc_write(codec, WM8961_CHARGE_PUMP_1,
+ cp_reg & ~WM8961_CP_ENA);
+ }
+
+ return 0;
+}
+
+static int wm8961_spk_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ u16 pwr_reg = snd_soc_read(codec, WM8961_PWR_MGMT_2);
+ u16 spk_reg = snd_soc_read(codec, WM8961_CLASS_D_CONTROL_1);
+
+ if (event & SND_SOC_DAPM_POST_PMU) {
+ /* Enable the PGA */
+ pwr_reg |= WM8961_SPKL_PGA | WM8961_SPKR_PGA;
+ snd_soc_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
+
+ /* Enable the amplifier */
+ spk_reg |= WM8961_SPKL_ENA | WM8961_SPKR_ENA;
+ snd_soc_write(codec, WM8961_CLASS_D_CONTROL_1, spk_reg);
+ }
+
+ if (event & SND_SOC_DAPM_PRE_PMD) {
+ /* Enable the amplifier */
+ spk_reg &= ~(WM8961_SPKL_ENA | WM8961_SPKR_ENA);
+ snd_soc_write(codec, WM8961_CLASS_D_CONTROL_1, spk_reg);
+
+ /* Enable the PGA */
+ pwr_reg &= ~(WM8961_SPKL_PGA | WM8961_SPKR_PGA);
+ snd_soc_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
+ }
+
+ return 0;
+}
+
+static const char *adc_hpf_text[] = {
+ "Hi-fi", "Voice 1", "Voice 2", "Voice 3",
+};
+
+static const struct soc_enum adc_hpf =
+ SOC_ENUM_SINGLE(WM8961_ADC_DAC_CONTROL_2, 7, 4, adc_hpf_text);
+
+static const char *dac_deemph_text[] = {
+ "None", "32kHz", "44.1kHz", "48kHz",
+};
+
+static const struct soc_enum dac_deemph =
+ SOC_ENUM_SINGLE(WM8961_ADC_DAC_CONTROL_1, 1, 4, dac_deemph_text);
+
+static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
+static const DECLARE_TLV_DB_SCALE(hp_sec_tlv, -700, 100, 0);
+static const DECLARE_TLV_DB_SCALE(adc_tlv, -7200, 75, 1);
+static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 300, 0);
+static unsigned int boost_tlv[] = {
+ TLV_DB_RANGE_HEAD(4),
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 1, TLV_DB_SCALE_ITEM(13, 0, 0),
+ 2, 2, TLV_DB_SCALE_ITEM(20, 0, 0),
+ 3, 3, TLV_DB_SCALE_ITEM(29, 0, 0),
+};
+static const DECLARE_TLV_DB_SCALE(pga_tlv, -2325, 75, 0);
+
+static const struct snd_kcontrol_new wm8961_snd_controls[] = {
+SOC_DOUBLE_R_TLV("Headphone Volume", WM8961_LOUT1_VOLUME, WM8961_ROUT1_VOLUME,
+ 0, 127, 0, out_tlv),
+SOC_DOUBLE_TLV("Headphone Secondary Volume", WM8961_ANALOGUE_HP_2,
+ 6, 3, 7, 0, hp_sec_tlv),
+SOC_DOUBLE_R("Headphone ZC Switch", WM8961_LOUT1_VOLUME, WM8961_ROUT1_VOLUME,
+ 7, 1, 0),
+
+SOC_DOUBLE_R_TLV("Speaker Volume", WM8961_LOUT2_VOLUME, WM8961_ROUT2_VOLUME,
+ 0, 127, 0, out_tlv),
+SOC_DOUBLE_R("Speaker ZC Switch", WM8961_LOUT2_VOLUME, WM8961_ROUT2_VOLUME,
+ 7, 1, 0),
+SOC_SINGLE("Speaker AC Gain", WM8961_CLASS_D_CONTROL_2, 0, 7, 0),
+
+SOC_SINGLE("DAC x128 OSR Switch", WM8961_ADC_DAC_CONTROL_2, 0, 1, 0),
+SOC_ENUM("DAC Deemphasis", dac_deemph),
+SOC_SINGLE("DAC Soft Mute Switch", WM8961_ADC_DAC_CONTROL_2, 3, 1, 0),
+
+SOC_DOUBLE_R_TLV("Sidetone Volume", WM8961_DSP_SIDETONE_0,
+ WM8961_DSP_SIDETONE_1, 4, 12, 0, sidetone_tlv),
+
+SOC_SINGLE("ADC High Pass Filter Switch", WM8961_ADC_DAC_CONTROL_1, 0, 1, 0),
+SOC_ENUM("ADC High Pass Filter Mode", adc_hpf),
+
+SOC_DOUBLE_R_TLV("Capture Volume",
+ WM8961_LEFT_ADC_VOLUME, WM8961_RIGHT_ADC_VOLUME,
+ 1, 119, 0, adc_tlv),
+SOC_DOUBLE_R_TLV("Capture Boost Volume",
+ WM8961_ADCL_SIGNAL_PATH, WM8961_ADCR_SIGNAL_PATH,
+ 4, 3, 0, boost_tlv),
+SOC_DOUBLE_R_TLV("Capture PGA Volume",
+ WM8961_LEFT_INPUT_VOLUME, WM8961_RIGHT_INPUT_VOLUME,
+ 0, 62, 0, pga_tlv),
+SOC_DOUBLE_R("Capture PGA ZC Switch",
+ WM8961_LEFT_INPUT_VOLUME, WM8961_RIGHT_INPUT_VOLUME,
+ 6, 1, 1),
+SOC_DOUBLE_R("Capture PGA Switch",
+ WM8961_LEFT_INPUT_VOLUME, WM8961_RIGHT_INPUT_VOLUME,
+ 7, 1, 1),
+};
+
+static const char *sidetone_text[] = {
+ "None", "Left", "Right"
+};
+
+static const struct soc_enum dacl_sidetone =
+ SOC_ENUM_SINGLE(WM8961_DSP_SIDETONE_0, 2, 3, sidetone_text);
+
+static const struct soc_enum dacr_sidetone =
+ SOC_ENUM_SINGLE(WM8961_DSP_SIDETONE_1, 2, 3, sidetone_text);
+
+static const struct snd_kcontrol_new dacl_mux =
+ SOC_DAPM_ENUM("DACL Sidetone", dacl_sidetone);
+
+static const struct snd_kcontrol_new dacr_mux =
+ SOC_DAPM_ENUM("DACR Sidetone", dacr_sidetone);
+
+static const struct snd_soc_dapm_widget wm8961_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("LINPUT"),
+SND_SOC_DAPM_INPUT("RINPUT"),
+
+SND_SOC_DAPM_SUPPLY("CLK_DSP", WM8961_CLOCKING2, 4, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("Left Input", WM8961_PWR_MGMT_1, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right Input", WM8961_PWR_MGMT_1, 4, 0, NULL, 0),
+
+SND_SOC_DAPM_ADC("ADCL", "HiFi Capture", WM8961_PWR_MGMT_1, 3, 0),
+SND_SOC_DAPM_ADC("ADCR", "HiFi Capture", WM8961_PWR_MGMT_1, 2, 0),
+
+SND_SOC_DAPM_MICBIAS("MICBIAS", WM8961_PWR_MGMT_1, 1, 0),
+
+SND_SOC_DAPM_MUX("DACL Sidetone", SND_SOC_NOPM, 0, 0, &dacl_mux),
+SND_SOC_DAPM_MUX("DACR Sidetone", SND_SOC_NOPM, 0, 0, &dacr_mux),
+
+SND_SOC_DAPM_DAC("DACL", "HiFi Playback", WM8961_PWR_MGMT_2, 8, 0),
+SND_SOC_DAPM_DAC("DACR", "HiFi Playback", WM8961_PWR_MGMT_2, 7, 0),
+
+/* Handle as a mono path for DCS */
+SND_SOC_DAPM_PGA_E("Headphone Output", SND_SOC_NOPM,
+ 4, 0, NULL, 0, wm8961_hp_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+SND_SOC_DAPM_PGA_E("Speaker Output", SND_SOC_NOPM,
+ 4, 0, NULL, 0, wm8961_spk_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+SND_SOC_DAPM_OUTPUT("HP_L"),
+SND_SOC_DAPM_OUTPUT("HP_R"),
+SND_SOC_DAPM_OUTPUT("SPK_LN"),
+SND_SOC_DAPM_OUTPUT("SPK_LP"),
+SND_SOC_DAPM_OUTPUT("SPK_RN"),
+SND_SOC_DAPM_OUTPUT("SPK_RP"),
+};
+
+
+static const struct snd_soc_dapm_route audio_paths[] = {
+ { "DACL", NULL, "CLK_DSP" },
+ { "DACL", NULL, "DACL Sidetone" },
+ { "DACR", NULL, "CLK_DSP" },
+ { "DACR", NULL, "DACR Sidetone" },
+
+ { "DACL Sidetone", "Left", "ADCL" },
+ { "DACL Sidetone", "Right", "ADCR" },
+
+ { "DACR Sidetone", "Left", "ADCL" },
+ { "DACR Sidetone", "Right", "ADCR" },
+
+ { "HP_L", NULL, "Headphone Output" },
+ { "HP_R", NULL, "Headphone Output" },
+ { "Headphone Output", NULL, "DACL" },
+ { "Headphone Output", NULL, "DACR" },
+
+ { "SPK_LN", NULL, "Speaker Output" },
+ { "SPK_LP", NULL, "Speaker Output" },
+ { "SPK_RN", NULL, "Speaker Output" },
+ { "SPK_RP", NULL, "Speaker Output" },
+
+ { "Speaker Output", NULL, "DACL" },
+ { "Speaker Output", NULL, "DACR" },
+
+ { "ADCL", NULL, "Left Input" },
+ { "ADCL", NULL, "CLK_DSP" },
+ { "ADCR", NULL, "Right Input" },
+ { "ADCR", NULL, "CLK_DSP" },
+
+ { "Left Input", NULL, "LINPUT" },
+ { "Right Input", NULL, "RINPUT" },
+
+};
+
+/* Values for CLK_SYS_RATE */
+static struct {
+ int ratio;
+ u16 val;
+} wm8961_clk_sys_ratio[] = {
+ { 64, 0 },
+ { 128, 1 },
+ { 192, 2 },
+ { 256, 3 },
+ { 384, 4 },
+ { 512, 5 },
+ { 768, 6 },
+ { 1024, 7 },
+ { 1408, 8 },
+ { 1536, 9 },
+};
+
+/* Values for SAMPLE_RATE */
+static struct {
+ int rate;
+ u16 val;
+} wm8961_srate[] = {
+ { 48000, 0 },
+ { 44100, 0 },
+ { 32000, 1 },
+ { 22050, 2 },
+ { 24000, 2 },
+ { 16000, 3 },
+ { 11250, 4 },
+ { 12000, 4 },
+ { 8000, 5 },
+};
+
+static int wm8961_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8961_priv *wm8961 = codec->private_data;
+ int i, best, target, fs;
+ u16 reg;
+
+ fs = params_rate(params);
+
+ if (!wm8961->sysclk) {
+ dev_err(codec->dev, "MCLK has not been specified\n");
+ return -EINVAL;
+ }
+
+ /* Find the closest sample rate for the filters */
+ best = 0;
+ for (i = 0; i < ARRAY_SIZE(wm8961_srate); i++) {
+ if (abs(wm8961_srate[i].rate - fs) <
+ abs(wm8961_srate[best].rate - fs))
+ best = i;
+ }
+ reg = snd_soc_read(codec, WM8961_ADDITIONAL_CONTROL_3);
+ reg &= ~WM8961_SAMPLE_RATE_MASK;
+ reg |= wm8961_srate[best].val;
+ snd_soc_write(codec, WM8961_ADDITIONAL_CONTROL_3, reg);
+ dev_dbg(codec->dev, "Selected SRATE %dHz for %dHz\n",
+ wm8961_srate[best].rate, fs);
+
+ /* Select a CLK_SYS/fs ratio equal to or higher than required */
+ target = wm8961->sysclk / fs;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && target < 64) {
+ dev_err(codec->dev,
+ "SYSCLK must be at least 64*fs for DAC\n");
+ return -EINVAL;
+ }
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE && target < 256) {
+ dev_err(codec->dev,
+ "SYSCLK must be at least 256*fs for ADC\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wm8961_clk_sys_ratio); i++) {
+ if (wm8961_clk_sys_ratio[i].ratio >= target)
+ break;
+ }
+ if (i == ARRAY_SIZE(wm8961_clk_sys_ratio)) {
+ dev_err(codec->dev, "Unable to generate CLK_SYS_RATE\n");
+ return -EINVAL;
+ }
+ dev_dbg(codec->dev, "Selected CLK_SYS_RATE of %d for %d/%d=%d\n",
+ wm8961_clk_sys_ratio[i].ratio, wm8961->sysclk, fs,
+ wm8961->sysclk / fs);
+
+ reg = snd_soc_read(codec, WM8961_CLOCKING_4);
+ reg &= ~WM8961_CLK_SYS_RATE_MASK;
+ reg |= wm8961_clk_sys_ratio[i].val << WM8961_CLK_SYS_RATE_SHIFT;
+ snd_soc_write(codec, WM8961_CLOCKING_4, reg);
+
+ reg = snd_soc_read(codec, WM8961_AUDIO_INTERFACE_0);
+ reg &= ~WM8961_WL_MASK;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ reg |= 1 << WM8961_WL_SHIFT;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ reg |= 2 << WM8961_WL_SHIFT;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ reg |= 3 << WM8961_WL_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ snd_soc_write(codec, WM8961_AUDIO_INTERFACE_0, reg);
+
+ /* Sloping stop-band filter is recommended for <= 24kHz */
+ reg = snd_soc_read(codec, WM8961_ADC_DAC_CONTROL_2);
+ if (fs <= 24000)
+ reg |= WM8961_DACSLOPE;
+ else
+ reg &= WM8961_DACSLOPE;
+ snd_soc_write(codec, WM8961_ADC_DAC_CONTROL_2, reg);
+
+ return 0;
+}
+
+static int wm8961_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq,
+ int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8961_priv *wm8961 = codec->private_data;
+ u16 reg = snd_soc_read(codec, WM8961_CLOCKING1);
+
+ if (freq > 33000000) {
+ dev_err(codec->dev, "MCLK must be <33MHz\n");
+ return -EINVAL;
+ }
+
+ if (freq > 16500000) {
+ dev_dbg(codec->dev, "Using MCLK/2 for %dHz MCLK\n", freq);
+ reg |= WM8961_MCLKDIV;
+ freq /= 2;
+ } else {
+ dev_dbg(codec->dev, "Using MCLK/1 for %dHz MCLK\n", freq);
+ reg &= WM8961_MCLKDIV;
+ }
+
+ snd_soc_write(codec, WM8961_CLOCKING1, reg);
+
+ wm8961->sysclk = freq;
+
+ return 0;
+}
+
+static int wm8961_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u16 aif = snd_soc_read(codec, WM8961_AUDIO_INTERFACE_0);
+
+ aif &= ~(WM8961_BCLKINV | WM8961_LRP |
+ WM8961_MS | WM8961_FORMAT_MASK);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ aif |= WM8961_MS;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+
+ case SND_SOC_DAIFMT_LEFT_J:
+ aif |= 1;
+ break;
+
+ case SND_SOC_DAIFMT_I2S:
+ aif |= 2;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_B:
+ aif |= WM8961_LRP;
+ case SND_SOC_DAIFMT_DSP_A:
+ aif |= 3;
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ case SND_SOC_DAIFMT_IB_NF:
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ aif |= WM8961_LRP;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ aif |= WM8961_BCLKINV;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ aif |= WM8961_BCLKINV | WM8961_LRP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return snd_soc_write(codec, WM8961_AUDIO_INTERFACE_0, aif);
+}
+
+static int wm8961_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u16 reg = snd_soc_read(codec, WM8961_ADDITIONAL_CONTROL_2);
+
+ if (tristate)
+ reg |= WM8961_TRIS;
+ else
+ reg &= ~WM8961_TRIS;
+
+ return snd_soc_write(codec, WM8961_ADDITIONAL_CONTROL_2, reg);
+}
+
+static int wm8961_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u16 reg = snd_soc_read(codec, WM8961_ADC_DAC_CONTROL_1);
+
+ if (mute)
+ reg |= WM8961_DACMU;
+ else
+ reg &= ~WM8961_DACMU;
+
+ msleep(17);
+
+ return snd_soc_write(codec, WM8961_ADC_DAC_CONTROL_1, reg);
+}
+
+static int wm8961_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u16 reg;
+
+ switch (div_id) {
+ case WM8961_BCLK:
+ reg = snd_soc_read(codec, WM8961_CLOCKING2);
+ reg &= ~WM8961_BCLKDIV_MASK;
+ reg |= div;
+ snd_soc_write(codec, WM8961_CLOCKING2, reg);
+ break;
+
+ case WM8961_LRCLK:
+ reg = snd_soc_read(codec, WM8961_AUDIO_INTERFACE_2);
+ reg &= ~WM8961_LRCLK_RATE_MASK;
+ reg |= div;
+ snd_soc_write(codec, WM8961_AUDIO_INTERFACE_2, reg);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wm8961_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ u16 reg;
+
+ /* This is all slightly unusual since we have no bypass paths
+ * and the output amplifier structure means we can just slam
+ * the biases straight up rather than having to ramp them
+ * slowly.
+ */
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+
+ case SND_SOC_BIAS_PREPARE:
+ if (codec->bias_level == SND_SOC_BIAS_STANDBY) {
+ /* Enable bias generation */
+ reg = snd_soc_read(codec, WM8961_ANTI_POP);
+ reg |= WM8961_BUFIOEN | WM8961_BUFDCOPEN;
+ snd_soc_write(codec, WM8961_ANTI_POP, reg);
+
+ /* VMID=2*50k, VREF */
+ reg = snd_soc_read(codec, WM8961_PWR_MGMT_1);
+ reg &= ~WM8961_VMIDSEL_MASK;
+ reg |= (1 << WM8961_VMIDSEL_SHIFT) | WM8961_VREF;
+ snd_soc_write(codec, WM8961_PWR_MGMT_1, reg);
+ }
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->bias_level == SND_SOC_BIAS_PREPARE) {
+ /* VREF off */
+ reg = snd_soc_read(codec, WM8961_PWR_MGMT_1);
+ reg &= ~WM8961_VREF;
+ snd_soc_write(codec, WM8961_PWR_MGMT_1, reg);
+
+ /* Bias generation off */
+ reg = snd_soc_read(codec, WM8961_ANTI_POP);
+ reg &= ~(WM8961_BUFIOEN | WM8961_BUFDCOPEN);
+ snd_soc_write(codec, WM8961_ANTI_POP, reg);
+
+ /* VMID off */
+ reg = snd_soc_read(codec, WM8961_PWR_MGMT_1);
+ reg &= ~WM8961_VMIDSEL_MASK;
+ snd_soc_write(codec, WM8961_PWR_MGMT_1, reg);
+ }
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ break;
+ }
+
+ codec->bias_level = level;
+
+ return 0;
+}
+
+
+#define WM8961_RATES SNDRV_PCM_RATE_8000_48000
+
+#define WM8961_FORMATS \
+ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+static struct snd_soc_dai_ops wm8961_dai_ops = {
+ .hw_params = wm8961_hw_params,
+ .set_sysclk = wm8961_set_sysclk,
+ .set_fmt = wm8961_set_fmt,
+ .digital_mute = wm8961_digital_mute,
+ .set_tristate = wm8961_set_tristate,
+ .set_clkdiv = wm8961_set_clkdiv,
+};
+
+struct snd_soc_dai wm8961_dai = {
+ .name = "WM8961",
+ .playback = {
+ .stream_name = "HiFi Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM8961_RATES,
+ .formats = WM8961_FORMATS,},
+ .capture = {
+ .stream_name = "HiFi Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM8961_RATES,
+ .formats = WM8961_FORMATS,},
+ .ops = &wm8961_dai_ops,
+};
+EXPORT_SYMBOL_GPL(wm8961_dai);
+
+
+static struct snd_soc_codec *wm8961_codec;
+
+static int wm8961_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret = 0;
+
+ if (wm8961_codec == NULL) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+
+ socdev->card->codec = wm8961_codec;
+ codec = wm8961_codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to create pcms: %d\n", ret);
+ goto pcm_err;
+ }
+
+ snd_soc_add_controls(codec, wm8961_snd_controls,
+ ARRAY_SIZE(wm8961_snd_controls));
+ snd_soc_dapm_new_controls(codec, wm8961_dapm_widgets,
+ ARRAY_SIZE(wm8961_dapm_widgets));
+ snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+ snd_soc_dapm_new_widgets(codec);
+
+ ret = snd_soc_init_card(socdev);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to register card: %d\n", ret);
+ goto card_err;
+ }
+
+ return ret;
+
+card_err:
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+pcm_err:
+ return ret;
+}
+
+static int wm8961_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int wm8961_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ wm8961_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+static int wm8961_resume(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+ u16 *reg_cache = codec->reg_cache;
+ int i;
+
+ for (i = 0; i < codec->reg_cache_size; i++) {
+ if (i == WM8961_SOFTWARE_RESET)
+ continue;
+
+ snd_soc_write(codec, i, reg_cache[i]);
+ }
+
+ wm8961_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+#else
+#define wm8961_suspend NULL
+#define wm8961_resume NULL
+#endif
+
+struct snd_soc_codec_device soc_codec_dev_wm8961 = {
+ .probe = wm8961_probe,
+ .remove = wm8961_remove,
+ .suspend = wm8961_suspend,
+ .resume = wm8961_resume,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_wm8961);
+
+static int wm8961_register(struct wm8961_priv *wm8961)
+{
+ struct snd_soc_codec *codec = &wm8961->codec;
+ int ret;
+ u16 reg;
+
+ if (wm8961_codec) {
+ dev_err(codec->dev, "Another WM8961 is registered\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->private_data = wm8961;
+ codec->name = "WM8961";
+ codec->owner = THIS_MODULE;
+ codec->dai = &wm8961_dai;
+ codec->num_dai = 1;
+ codec->reg_cache_size = ARRAY_SIZE(wm8961->reg_cache);
+ codec->reg_cache = &wm8961->reg_cache;
+ codec->bias_level = SND_SOC_BIAS_OFF;
+ codec->set_bias_level = wm8961_set_bias_level;
+ codec->volatile_register = wm8961_volatile_register;
+
+ memcpy(codec->reg_cache, wm8961_reg_defaults,
+ sizeof(wm8961_reg_defaults));
+
+ ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
+ reg = snd_soc_read(codec, WM8961_SOFTWARE_RESET);
+ if (reg != 0x1801) {
+ dev_err(codec->dev, "Device is not a WM8961: ID=0x%x\n", reg);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* This isn't volatile - readback doesn't correspond to write */
+ reg = codec->hw_read(codec, WM8961_RIGHT_INPUT_VOLUME);
+ dev_info(codec->dev, "WM8961 family %d revision %c\n",
+ (reg & WM8961_DEVICE_ID_MASK) >> WM8961_DEVICE_ID_SHIFT,
+ ((reg & WM8961_CHIP_REV_MASK) >> WM8961_CHIP_REV_SHIFT)
+ + 'A');
+
+ ret = wm8961_reset(codec);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to issue reset\n");
+ return ret;
+ }
+
+ /* Enable class W */
+ reg = snd_soc_read(codec, WM8961_CHARGE_PUMP_B);
+ reg |= WM8961_CP_DYN_PWR_MASK;
+ snd_soc_write(codec, WM8961_CHARGE_PUMP_B, reg);
+
+ /* Latch volume update bits (right channel only, we always
+ * write both out) and default ZC on. */
+ reg = snd_soc_read(codec, WM8961_ROUT1_VOLUME);
+ snd_soc_write(codec, WM8961_ROUT1_VOLUME,
+ reg | WM8961_LO1ZC | WM8961_OUT1VU);
+ snd_soc_write(codec, WM8961_LOUT1_VOLUME, reg | WM8961_LO1ZC);
+ reg = snd_soc_read(codec, WM8961_ROUT2_VOLUME);
+ snd_soc_write(codec, WM8961_ROUT2_VOLUME,
+ reg | WM8961_SPKRZC | WM8961_SPKVU);
+ snd_soc_write(codec, WM8961_LOUT2_VOLUME, reg | WM8961_SPKLZC);
+
+ reg = snd_soc_read(codec, WM8961_RIGHT_ADC_VOLUME);
+ snd_soc_write(codec, WM8961_RIGHT_ADC_VOLUME, reg | WM8961_ADCVU);
+ reg = snd_soc_read(codec, WM8961_RIGHT_INPUT_VOLUME);
+ snd_soc_write(codec, WM8961_RIGHT_INPUT_VOLUME, reg | WM8961_IPVU);
+
+ /* Use soft mute by default */
+ reg = snd_soc_read(codec, WM8961_ADC_DAC_CONTROL_2);
+ reg |= WM8961_DACSMM;
+ snd_soc_write(codec, WM8961_ADC_DAC_CONTROL_2, reg);
+
+ /* Use automatic clocking mode by default; for now this is all
+ * we support.
+ */
+ reg = snd_soc_read(codec, WM8961_CLOCKING_3);
+ reg &= ~WM8961_MANUAL_MODE;
+ snd_soc_write(codec, WM8961_CLOCKING_3, reg);
+
+ wm8961_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ wm8961_dai.dev = codec->dev;
+
+ wm8961_codec = codec;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_register_dai(&wm8961_dai);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
+ snd_soc_unregister_codec(codec);
+ return ret;
+ }
+
+ return 0;
+
+err:
+ kfree(wm8961);
+ return ret;
+}
+
+static void wm8961_unregister(struct wm8961_priv *wm8961)
+{
+ wm8961_set_bias_level(&wm8961->codec, SND_SOC_BIAS_OFF);
+ snd_soc_unregister_dai(&wm8961_dai);
+ snd_soc_unregister_codec(&wm8961->codec);
+ kfree(wm8961);
+ wm8961_codec = NULL;
+}
+
+static __devinit int wm8961_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct wm8961_priv *wm8961;
+ struct snd_soc_codec *codec;
+
+ wm8961 = kzalloc(sizeof(struct wm8961_priv), GFP_KERNEL);
+ if (wm8961 == NULL)
+ return -ENOMEM;
+
+ codec = &wm8961->codec;
+
+ i2c_set_clientdata(i2c, wm8961);
+ codec->control_data = i2c;
+
+ codec->dev = &i2c->dev;
+
+ return wm8961_register(wm8961);
+}
+
+static __devexit int wm8961_i2c_remove(struct i2c_client *client)
+{
+ struct wm8961_priv *wm8961 = i2c_get_clientdata(client);
+ wm8961_unregister(wm8961);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int wm8961_i2c_suspend(struct i2c_client *client, pm_message_t state)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8961_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8961_i2c_suspend NULL
+#define wm8961_i2c_resume NULL
+#endif
+
+static const struct i2c_device_id wm8961_i2c_id[] = {
+ { "wm8961", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wm8961_i2c_id);
+
+static struct i2c_driver wm8961_i2c_driver = {
+ .driver = {
+ .name = "wm8961",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm8961_i2c_probe,
+ .remove = __devexit_p(wm8961_i2c_remove),
+ .suspend = wm8961_i2c_suspend,
+ .resume = wm8961_i2c_resume,
+ .id_table = wm8961_i2c_id,
+};
+
+static int __init wm8961_modinit(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&wm8961_i2c_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to register WM8961 I2C driver: %d\n",
+ ret);
+ }
+
+ return ret;
+}
+module_init(wm8961_modinit);
+
+static void __exit wm8961_exit(void)
+{
+ i2c_del_driver(&wm8961_i2c_driver);
+}
+module_exit(wm8961_exit);
+
+
+MODULE_DESCRIPTION("ASoC WM8961 driver");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8961.h b/sound/soc/codecs/wm8961.h
new file mode 100644
index 000000000000..5513bfd720d6
--- /dev/null
+++ b/sound/soc/codecs/wm8961.h
@@ -0,0 +1,866 @@
+/*
+ * wm8961.h -- WM8961 Soc Audio driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8961_H
+#define _WM8961_H
+
+#include <sound/soc.h>
+
+extern struct snd_soc_codec_device soc_codec_dev_wm8961;
+extern struct snd_soc_dai wm8961_dai;
+
+#define WM8961_BCLK 1
+#define WM8961_LRCLK 2
+
+#define WM8961_BCLK_DIV_1 0
+#define WM8961_BCLK_DIV_1_5 1
+#define WM8961_BCLK_DIV_2 2
+#define WM8961_BCLK_DIV_3 3
+#define WM8961_BCLK_DIV_4 4
+#define WM8961_BCLK_DIV_5_5 5
+#define WM8961_BCLK_DIV_6 6
+#define WM8961_BCLK_DIV_8 7
+#define WM8961_BCLK_DIV_11 8
+#define WM8961_BCLK_DIV_12 9
+#define WM8961_BCLK_DIV_16 10
+#define WM8961_BCLK_DIV_24 11
+#define WM8961_BCLK_DIV_32 13
+
+
+/*
+ * Register values.
+ */
+#define WM8961_LEFT_INPUT_VOLUME 0x00
+#define WM8961_RIGHT_INPUT_VOLUME 0x01
+#define WM8961_LOUT1_VOLUME 0x02
+#define WM8961_ROUT1_VOLUME 0x03
+#define WM8961_CLOCKING1 0x04
+#define WM8961_ADC_DAC_CONTROL_1 0x05
+#define WM8961_ADC_DAC_CONTROL_2 0x06
+#define WM8961_AUDIO_INTERFACE_0 0x07
+#define WM8961_CLOCKING2 0x08
+#define WM8961_AUDIO_INTERFACE_1 0x09
+#define WM8961_LEFT_DAC_VOLUME 0x0A
+#define WM8961_RIGHT_DAC_VOLUME 0x0B
+#define WM8961_AUDIO_INTERFACE_2 0x0E
+#define WM8961_SOFTWARE_RESET 0x0F
+#define WM8961_ALC1 0x11
+#define WM8961_ALC2 0x12
+#define WM8961_ALC3 0x13
+#define WM8961_NOISE_GATE 0x14
+#define WM8961_LEFT_ADC_VOLUME 0x15
+#define WM8961_RIGHT_ADC_VOLUME 0x16
+#define WM8961_ADDITIONAL_CONTROL_1 0x17
+#define WM8961_ADDITIONAL_CONTROL_2 0x18
+#define WM8961_PWR_MGMT_1 0x19
+#define WM8961_PWR_MGMT_2 0x1A
+#define WM8961_ADDITIONAL_CONTROL_3 0x1B
+#define WM8961_ANTI_POP 0x1C
+#define WM8961_CLOCKING_3 0x1E
+#define WM8961_ADCL_SIGNAL_PATH 0x20
+#define WM8961_ADCR_SIGNAL_PATH 0x21
+#define WM8961_LOUT2_VOLUME 0x28
+#define WM8961_ROUT2_VOLUME 0x29
+#define WM8961_PWR_MGMT_3 0x2F
+#define WM8961_ADDITIONAL_CONTROL_4 0x30
+#define WM8961_CLASS_D_CONTROL_1 0x31
+#define WM8961_CLASS_D_CONTROL_2 0x33
+#define WM8961_CLOCKING_4 0x38
+#define WM8961_DSP_SIDETONE_0 0x39
+#define WM8961_DSP_SIDETONE_1 0x3A
+#define WM8961_DC_SERVO_0 0x3C
+#define WM8961_DC_SERVO_1 0x3D
+#define WM8961_DC_SERVO_3 0x3F
+#define WM8961_DC_SERVO_5 0x41
+#define WM8961_ANALOGUE_PGA_BIAS 0x44
+#define WM8961_ANALOGUE_HP_0 0x45
+#define WM8961_ANALOGUE_HP_2 0x47
+#define WM8961_CHARGE_PUMP_1 0x48
+#define WM8961_CHARGE_PUMP_B 0x52
+#define WM8961_WRITE_SEQUENCER_1 0x57
+#define WM8961_WRITE_SEQUENCER_2 0x58
+#define WM8961_WRITE_SEQUENCER_3 0x59
+#define WM8961_WRITE_SEQUENCER_4 0x5A
+#define WM8961_WRITE_SEQUENCER_5 0x5B
+#define WM8961_WRITE_SEQUENCER_6 0x5C
+#define WM8961_WRITE_SEQUENCER_7 0x5D
+#define WM8961_GENERAL_TEST_1 0xFC
+
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - Left Input volume
+ */
+#define WM8961_IPVU 0x0100 /* IPVU */
+#define WM8961_IPVU_MASK 0x0100 /* IPVU */
+#define WM8961_IPVU_SHIFT 8 /* IPVU */
+#define WM8961_IPVU_WIDTH 1 /* IPVU */
+#define WM8961_LINMUTE 0x0080 /* LINMUTE */
+#define WM8961_LINMUTE_MASK 0x0080 /* LINMUTE */
+#define WM8961_LINMUTE_SHIFT 7 /* LINMUTE */
+#define WM8961_LINMUTE_WIDTH 1 /* LINMUTE */
+#define WM8961_LIZC 0x0040 /* LIZC */
+#define WM8961_LIZC_MASK 0x0040 /* LIZC */
+#define WM8961_LIZC_SHIFT 6 /* LIZC */
+#define WM8961_LIZC_WIDTH 1 /* LIZC */
+#define WM8961_LINVOL_MASK 0x003F /* LINVOL - [5:0] */
+#define WM8961_LINVOL_SHIFT 0 /* LINVOL - [5:0] */
+#define WM8961_LINVOL_WIDTH 6 /* LINVOL - [5:0] */
+
+/*
+ * R1 (0x01) - Right Input volume
+ */
+#define WM8961_DEVICE_ID_MASK 0xF000 /* DEVICE_ID - [15:12] */
+#define WM8961_DEVICE_ID_SHIFT 12 /* DEVICE_ID - [15:12] */
+#define WM8961_DEVICE_ID_WIDTH 4 /* DEVICE_ID - [15:12] */
+#define WM8961_CHIP_REV_MASK 0x0E00 /* CHIP_REV - [11:9] */
+#define WM8961_CHIP_REV_SHIFT 9 /* CHIP_REV - [11:9] */
+#define WM8961_CHIP_REV_WIDTH 3 /* CHIP_REV - [11:9] */
+#define WM8961_IPVU 0x0100 /* IPVU */
+#define WM8961_IPVU_MASK 0x0100 /* IPVU */
+#define WM8961_IPVU_SHIFT 8 /* IPVU */
+#define WM8961_IPVU_WIDTH 1 /* IPVU */
+#define WM8961_RINMUTE 0x0080 /* RINMUTE */
+#define WM8961_RINMUTE_MASK 0x0080 /* RINMUTE */
+#define WM8961_RINMUTE_SHIFT 7 /* RINMUTE */
+#define WM8961_RINMUTE_WIDTH 1 /* RINMUTE */
+#define WM8961_RIZC 0x0040 /* RIZC */
+#define WM8961_RIZC_MASK 0x0040 /* RIZC */
+#define WM8961_RIZC_SHIFT 6 /* RIZC */
+#define WM8961_RIZC_WIDTH 1 /* RIZC */
+#define WM8961_RINVOL_MASK 0x003F /* RINVOL - [5:0] */
+#define WM8961_RINVOL_SHIFT 0 /* RINVOL - [5:0] */
+#define WM8961_RINVOL_WIDTH 6 /* RINVOL - [5:0] */
+
+/*
+ * R2 (0x02) - LOUT1 volume
+ */
+#define WM8961_OUT1VU 0x0100 /* OUT1VU */
+#define WM8961_OUT1VU_MASK 0x0100 /* OUT1VU */
+#define WM8961_OUT1VU_SHIFT 8 /* OUT1VU */
+#define WM8961_OUT1VU_WIDTH 1 /* OUT1VU */
+#define WM8961_LO1ZC 0x0080 /* LO1ZC */
+#define WM8961_LO1ZC_MASK 0x0080 /* LO1ZC */
+#define WM8961_LO1ZC_SHIFT 7 /* LO1ZC */
+#define WM8961_LO1ZC_WIDTH 1 /* LO1ZC */
+#define WM8961_LOUT1VOL_MASK 0x007F /* LOUT1VOL - [6:0] */
+#define WM8961_LOUT1VOL_SHIFT 0 /* LOUT1VOL - [6:0] */
+#define WM8961_LOUT1VOL_WIDTH 7 /* LOUT1VOL - [6:0] */
+
+/*
+ * R3 (0x03) - ROUT1 volume
+ */
+#define WM8961_OUT1VU 0x0100 /* OUT1VU */
+#define WM8961_OUT1VU_MASK 0x0100 /* OUT1VU */
+#define WM8961_OUT1VU_SHIFT 8 /* OUT1VU */
+#define WM8961_OUT1VU_WIDTH 1 /* OUT1VU */
+#define WM8961_RO1ZC 0x0080 /* RO1ZC */
+#define WM8961_RO1ZC_MASK 0x0080 /* RO1ZC */
+#define WM8961_RO1ZC_SHIFT 7 /* RO1ZC */
+#define WM8961_RO1ZC_WIDTH 1 /* RO1ZC */
+#define WM8961_ROUT1VOL_MASK 0x007F /* ROUT1VOL - [6:0] */
+#define WM8961_ROUT1VOL_SHIFT 0 /* ROUT1VOL - [6:0] */
+#define WM8961_ROUT1VOL_WIDTH 7 /* ROUT1VOL - [6:0] */
+
+/*
+ * R4 (0x04) - Clocking1
+ */
+#define WM8961_ADCDIV_MASK 0x01C0 /* ADCDIV - [8:6] */
+#define WM8961_ADCDIV_SHIFT 6 /* ADCDIV - [8:6] */
+#define WM8961_ADCDIV_WIDTH 3 /* ADCDIV - [8:6] */
+#define WM8961_DACDIV_MASK 0x0038 /* DACDIV - [5:3] */
+#define WM8961_DACDIV_SHIFT 3 /* DACDIV - [5:3] */
+#define WM8961_DACDIV_WIDTH 3 /* DACDIV - [5:3] */
+#define WM8961_MCLKDIV 0x0004 /* MCLKDIV */
+#define WM8961_MCLKDIV_MASK 0x0004 /* MCLKDIV */
+#define WM8961_MCLKDIV_SHIFT 2 /* MCLKDIV */
+#define WM8961_MCLKDIV_WIDTH 1 /* MCLKDIV */
+
+/*
+ * R5 (0x05) - ADC & DAC Control 1
+ */
+#define WM8961_ADCPOL_MASK 0x0060 /* ADCPOL - [6:5] */
+#define WM8961_ADCPOL_SHIFT 5 /* ADCPOL - [6:5] */
+#define WM8961_ADCPOL_WIDTH 2 /* ADCPOL - [6:5] */
+#define WM8961_DACMU 0x0008 /* DACMU */
+#define WM8961_DACMU_MASK 0x0008 /* DACMU */
+#define WM8961_DACMU_SHIFT 3 /* DACMU */
+#define WM8961_DACMU_WIDTH 1 /* DACMU */
+#define WM8961_DEEMPH_MASK 0x0006 /* DEEMPH - [2:1] */
+#define WM8961_DEEMPH_SHIFT 1 /* DEEMPH - [2:1] */
+#define WM8961_DEEMPH_WIDTH 2 /* DEEMPH - [2:1] */
+#define WM8961_ADCHPD 0x0001 /* ADCHPD */
+#define WM8961_ADCHPD_MASK 0x0001 /* ADCHPD */
+#define WM8961_ADCHPD_SHIFT 0 /* ADCHPD */
+#define WM8961_ADCHPD_WIDTH 1 /* ADCHPD */
+
+/*
+ * R6 (0x06) - ADC & DAC Control 2
+ */
+#define WM8961_ADC_HPF_CUT_MASK 0x0180 /* ADC_HPF_CUT - [8:7] */
+#define WM8961_ADC_HPF_CUT_SHIFT 7 /* ADC_HPF_CUT - [8:7] */
+#define WM8961_ADC_HPF_CUT_WIDTH 2 /* ADC_HPF_CUT - [8:7] */
+#define WM8961_DACPOL_MASK 0x0060 /* DACPOL - [6:5] */
+#define WM8961_DACPOL_SHIFT 5 /* DACPOL - [6:5] */
+#define WM8961_DACPOL_WIDTH 2 /* DACPOL - [6:5] */
+#define WM8961_DACSMM 0x0008 /* DACSMM */
+#define WM8961_DACSMM_MASK 0x0008 /* DACSMM */
+#define WM8961_DACSMM_SHIFT 3 /* DACSMM */
+#define WM8961_DACSMM_WIDTH 1 /* DACSMM */
+#define WM8961_DACMR 0x0004 /* DACMR */
+#define WM8961_DACMR_MASK 0x0004 /* DACMR */
+#define WM8961_DACMR_SHIFT 2 /* DACMR */
+#define WM8961_DACMR_WIDTH 1 /* DACMR */
+#define WM8961_DACSLOPE 0x0002 /* DACSLOPE */
+#define WM8961_DACSLOPE_MASK 0x0002 /* DACSLOPE */
+#define WM8961_DACSLOPE_SHIFT 1 /* DACSLOPE */
+#define WM8961_DACSLOPE_WIDTH 1 /* DACSLOPE */
+#define WM8961_DAC_OSR128 0x0001 /* DAC_OSR128 */
+#define WM8961_DAC_OSR128_MASK 0x0001 /* DAC_OSR128 */
+#define WM8961_DAC_OSR128_SHIFT 0 /* DAC_OSR128 */
+#define WM8961_DAC_OSR128_WIDTH 1 /* DAC_OSR128 */
+
+/*
+ * R7 (0x07) - Audio Interface 0
+ */
+#define WM8961_ALRSWAP 0x0100 /* ALRSWAP */
+#define WM8961_ALRSWAP_MASK 0x0100 /* ALRSWAP */
+#define WM8961_ALRSWAP_SHIFT 8 /* ALRSWAP */
+#define WM8961_ALRSWAP_WIDTH 1 /* ALRSWAP */
+#define WM8961_BCLKINV 0x0080 /* BCLKINV */
+#define WM8961_BCLKINV_MASK 0x0080 /* BCLKINV */
+#define WM8961_BCLKINV_SHIFT 7 /* BCLKINV */
+#define WM8961_BCLKINV_WIDTH 1 /* BCLKINV */
+#define WM8961_MS 0x0040 /* MS */
+#define WM8961_MS_MASK 0x0040 /* MS */
+#define WM8961_MS_SHIFT 6 /* MS */
+#define WM8961_MS_WIDTH 1 /* MS */
+#define WM8961_DLRSWAP 0x0020 /* DLRSWAP */
+#define WM8961_DLRSWAP_MASK 0x0020 /* DLRSWAP */
+#define WM8961_DLRSWAP_SHIFT 5 /* DLRSWAP */
+#define WM8961_DLRSWAP_WIDTH 1 /* DLRSWAP */
+#define WM8961_LRP 0x0010 /* LRP */
+#define WM8961_LRP_MASK 0x0010 /* LRP */
+#define WM8961_LRP_SHIFT 4 /* LRP */
+#define WM8961_LRP_WIDTH 1 /* LRP */
+#define WM8961_WL_MASK 0x000C /* WL - [3:2] */
+#define WM8961_WL_SHIFT 2 /* WL - [3:2] */
+#define WM8961_WL_WIDTH 2 /* WL - [3:2] */
+#define WM8961_FORMAT_MASK 0x0003 /* FORMAT - [1:0] */
+#define WM8961_FORMAT_SHIFT 0 /* FORMAT - [1:0] */
+#define WM8961_FORMAT_WIDTH 2 /* FORMAT - [1:0] */
+
+/*
+ * R8 (0x08) - Clocking2
+ */
+#define WM8961_DCLKDIV_MASK 0x01C0 /* DCLKDIV - [8:6] */
+#define WM8961_DCLKDIV_SHIFT 6 /* DCLKDIV - [8:6] */
+#define WM8961_DCLKDIV_WIDTH 3 /* DCLKDIV - [8:6] */
+#define WM8961_CLK_SYS_ENA 0x0020 /* CLK_SYS_ENA */
+#define WM8961_CLK_SYS_ENA_MASK 0x0020 /* CLK_SYS_ENA */
+#define WM8961_CLK_SYS_ENA_SHIFT 5 /* CLK_SYS_ENA */
+#define WM8961_CLK_SYS_ENA_WIDTH 1 /* CLK_SYS_ENA */
+#define WM8961_CLK_DSP_ENA 0x0010 /* CLK_DSP_ENA */
+#define WM8961_CLK_DSP_ENA_MASK 0x0010 /* CLK_DSP_ENA */
+#define WM8961_CLK_DSP_ENA_SHIFT 4 /* CLK_DSP_ENA */
+#define WM8961_CLK_DSP_ENA_WIDTH 1 /* CLK_DSP_ENA */
+#define WM8961_BCLKDIV_MASK 0x000F /* BCLKDIV - [3:0] */
+#define WM8961_BCLKDIV_SHIFT 0 /* BCLKDIV - [3:0] */
+#define WM8961_BCLKDIV_WIDTH 4 /* BCLKDIV - [3:0] */
+
+/*
+ * R9 (0x09) - Audio Interface 1
+ */
+#define WM8961_DACCOMP_MASK 0x0018 /* DACCOMP - [4:3] */
+#define WM8961_DACCOMP_SHIFT 3 /* DACCOMP - [4:3] */
+#define WM8961_DACCOMP_WIDTH 2 /* DACCOMP - [4:3] */
+#define WM8961_ADCCOMP_MASK 0x0006 /* ADCCOMP - [2:1] */
+#define WM8961_ADCCOMP_SHIFT 1 /* ADCCOMP - [2:1] */
+#define WM8961_ADCCOMP_WIDTH 2 /* ADCCOMP - [2:1] */
+#define WM8961_LOOPBACK 0x0001 /* LOOPBACK */
+#define WM8961_LOOPBACK_MASK 0x0001 /* LOOPBACK */
+#define WM8961_LOOPBACK_SHIFT 0 /* LOOPBACK */
+#define WM8961_LOOPBACK_WIDTH 1 /* LOOPBACK */
+
+/*
+ * R10 (0x0A) - Left DAC volume
+ */
+#define WM8961_DACVU 0x0100 /* DACVU */
+#define WM8961_DACVU_MASK 0x0100 /* DACVU */
+#define WM8961_DACVU_SHIFT 8 /* DACVU */
+#define WM8961_DACVU_WIDTH 1 /* DACVU */
+#define WM8961_LDACVOL_MASK 0x00FF /* LDACVOL - [7:0] */
+#define WM8961_LDACVOL_SHIFT 0 /* LDACVOL - [7:0] */
+#define WM8961_LDACVOL_WIDTH 8 /* LDACVOL - [7:0] */
+
+/*
+ * R11 (0x0B) - Right DAC volume
+ */
+#define WM8961_DACVU 0x0100 /* DACVU */
+#define WM8961_DACVU_MASK 0x0100 /* DACVU */
+#define WM8961_DACVU_SHIFT 8 /* DACVU */
+#define WM8961_DACVU_WIDTH 1 /* DACVU */
+#define WM8961_RDACVOL_MASK 0x00FF /* RDACVOL - [7:0] */
+#define WM8961_RDACVOL_SHIFT 0 /* RDACVOL - [7:0] */
+#define WM8961_RDACVOL_WIDTH 8 /* RDACVOL - [7:0] */
+
+/*
+ * R14 (0x0E) - Audio Interface 2
+ */
+#define WM8961_LRCLK_RATE_MASK 0x01FF /* LRCLK_RATE - [8:0] */
+#define WM8961_LRCLK_RATE_SHIFT 0 /* LRCLK_RATE - [8:0] */
+#define WM8961_LRCLK_RATE_WIDTH 9 /* LRCLK_RATE - [8:0] */
+
+/*
+ * R15 (0x0F) - Software Reset
+ */
+#define WM8961_SW_RST_DEV_ID1_MASK 0xFFFF /* SW_RST_DEV_ID1 - [15:0] */
+#define WM8961_SW_RST_DEV_ID1_SHIFT 0 /* SW_RST_DEV_ID1 - [15:0] */
+#define WM8961_SW_RST_DEV_ID1_WIDTH 16 /* SW_RST_DEV_ID1 - [15:0] */
+
+/*
+ * R17 (0x11) - ALC1
+ */
+#define WM8961_ALCSEL_MASK 0x0180 /* ALCSEL - [8:7] */
+#define WM8961_ALCSEL_SHIFT 7 /* ALCSEL - [8:7] */
+#define WM8961_ALCSEL_WIDTH 2 /* ALCSEL - [8:7] */
+#define WM8961_MAXGAIN_MASK 0x0070 /* MAXGAIN - [6:4] */
+#define WM8961_MAXGAIN_SHIFT 4 /* MAXGAIN - [6:4] */
+#define WM8961_MAXGAIN_WIDTH 3 /* MAXGAIN - [6:4] */
+#define WM8961_ALCL_MASK 0x000F /* ALCL - [3:0] */
+#define WM8961_ALCL_SHIFT 0 /* ALCL - [3:0] */
+#define WM8961_ALCL_WIDTH 4 /* ALCL - [3:0] */
+
+/*
+ * R18 (0x12) - ALC2
+ */
+#define WM8961_ALCZC 0x0080 /* ALCZC */
+#define WM8961_ALCZC_MASK 0x0080 /* ALCZC */
+#define WM8961_ALCZC_SHIFT 7 /* ALCZC */
+#define WM8961_ALCZC_WIDTH 1 /* ALCZC */
+#define WM8961_MINGAIN_MASK 0x0070 /* MINGAIN - [6:4] */
+#define WM8961_MINGAIN_SHIFT 4 /* MINGAIN - [6:4] */
+#define WM8961_MINGAIN_WIDTH 3 /* MINGAIN - [6:4] */
+#define WM8961_HLD_MASK 0x000F /* HLD - [3:0] */
+#define WM8961_HLD_SHIFT 0 /* HLD - [3:0] */
+#define WM8961_HLD_WIDTH 4 /* HLD - [3:0] */
+
+/*
+ * R19 (0x13) - ALC3
+ */
+#define WM8961_ALCMODE 0x0100 /* ALCMODE */
+#define WM8961_ALCMODE_MASK 0x0100 /* ALCMODE */
+#define WM8961_ALCMODE_SHIFT 8 /* ALCMODE */
+#define WM8961_ALCMODE_WIDTH 1 /* ALCMODE */
+#define WM8961_DCY_MASK 0x00F0 /* DCY - [7:4] */
+#define WM8961_DCY_SHIFT 4 /* DCY - [7:4] */
+#define WM8961_DCY_WIDTH 4 /* DCY - [7:4] */
+#define WM8961_ATK_MASK 0x000F /* ATK - [3:0] */
+#define WM8961_ATK_SHIFT 0 /* ATK - [3:0] */
+#define WM8961_ATK_WIDTH 4 /* ATK - [3:0] */
+
+/*
+ * R20 (0x14) - Noise Gate
+ */
+#define WM8961_NGTH_MASK 0x00F8 /* NGTH - [7:3] */
+#define WM8961_NGTH_SHIFT 3 /* NGTH - [7:3] */
+#define WM8961_NGTH_WIDTH 5 /* NGTH - [7:3] */
+#define WM8961_NGG 0x0002 /* NGG */
+#define WM8961_NGG_MASK 0x0002 /* NGG */
+#define WM8961_NGG_SHIFT 1 /* NGG */
+#define WM8961_NGG_WIDTH 1 /* NGG */
+#define WM8961_NGAT 0x0001 /* NGAT */
+#define WM8961_NGAT_MASK 0x0001 /* NGAT */
+#define WM8961_NGAT_SHIFT 0 /* NGAT */
+#define WM8961_NGAT_WIDTH 1 /* NGAT */
+
+/*
+ * R21 (0x15) - Left ADC volume
+ */
+#define WM8961_ADCVU 0x0100 /* ADCVU */
+#define WM8961_ADCVU_MASK 0x0100 /* ADCVU */
+#define WM8961_ADCVU_SHIFT 8 /* ADCVU */
+#define WM8961_ADCVU_WIDTH 1 /* ADCVU */
+#define WM8961_LADCVOL_MASK 0x00FF /* LADCVOL - [7:0] */
+#define WM8961_LADCVOL_SHIFT 0 /* LADCVOL - [7:0] */
+#define WM8961_LADCVOL_WIDTH 8 /* LADCVOL - [7:0] */
+
+/*
+ * R22 (0x16) - Right ADC volume
+ */
+#define WM8961_ADCVU 0x0100 /* ADCVU */
+#define WM8961_ADCVU_MASK 0x0100 /* ADCVU */
+#define WM8961_ADCVU_SHIFT 8 /* ADCVU */
+#define WM8961_ADCVU_WIDTH 1 /* ADCVU */
+#define WM8961_RADCVOL_MASK 0x00FF /* RADCVOL - [7:0] */
+#define WM8961_RADCVOL_SHIFT 0 /* RADCVOL - [7:0] */
+#define WM8961_RADCVOL_WIDTH 8 /* RADCVOL - [7:0] */
+
+/*
+ * R23 (0x17) - Additional control(1)
+ */
+#define WM8961_TSDEN 0x0100 /* TSDEN */
+#define WM8961_TSDEN_MASK 0x0100 /* TSDEN */
+#define WM8961_TSDEN_SHIFT 8 /* TSDEN */
+#define WM8961_TSDEN_WIDTH 1 /* TSDEN */
+#define WM8961_DMONOMIX 0x0010 /* DMONOMIX */
+#define WM8961_DMONOMIX_MASK 0x0010 /* DMONOMIX */
+#define WM8961_DMONOMIX_SHIFT 4 /* DMONOMIX */
+#define WM8961_DMONOMIX_WIDTH 1 /* DMONOMIX */
+#define WM8961_TOEN 0x0001 /* TOEN */
+#define WM8961_TOEN_MASK 0x0001 /* TOEN */
+#define WM8961_TOEN_SHIFT 0 /* TOEN */
+#define WM8961_TOEN_WIDTH 1 /* TOEN */
+
+/*
+ * R24 (0x18) - Additional control(2)
+ */
+#define WM8961_TRIS 0x0008 /* TRIS */
+#define WM8961_TRIS_MASK 0x0008 /* TRIS */
+#define WM8961_TRIS_SHIFT 3 /* TRIS */
+#define WM8961_TRIS_WIDTH 1 /* TRIS */
+
+/*
+ * R25 (0x19) - Pwr Mgmt (1)
+ */
+#define WM8961_VMIDSEL_MASK 0x0180 /* VMIDSEL - [8:7] */
+#define WM8961_VMIDSEL_SHIFT 7 /* VMIDSEL - [8:7] */
+#define WM8961_VMIDSEL_WIDTH 2 /* VMIDSEL - [8:7] */
+#define WM8961_VREF 0x0040 /* VREF */
+#define WM8961_VREF_MASK 0x0040 /* VREF */
+#define WM8961_VREF_SHIFT 6 /* VREF */
+#define WM8961_VREF_WIDTH 1 /* VREF */
+#define WM8961_AINL 0x0020 /* AINL */
+#define WM8961_AINL_MASK 0x0020 /* AINL */
+#define WM8961_AINL_SHIFT 5 /* AINL */
+#define WM8961_AINL_WIDTH 1 /* AINL */
+#define WM8961_AINR 0x0010 /* AINR */
+#define WM8961_AINR_MASK 0x0010 /* AINR */
+#define WM8961_AINR_SHIFT 4 /* AINR */
+#define WM8961_AINR_WIDTH 1 /* AINR */
+#define WM8961_ADCL 0x0008 /* ADCL */
+#define WM8961_ADCL_MASK 0x0008 /* ADCL */
+#define WM8961_ADCL_SHIFT 3 /* ADCL */
+#define WM8961_ADCL_WIDTH 1 /* ADCL */
+#define WM8961_ADCR 0x0004 /* ADCR */
+#define WM8961_ADCR_MASK 0x0004 /* ADCR */
+#define WM8961_ADCR_SHIFT 2 /* ADCR */
+#define WM8961_ADCR_WIDTH 1 /* ADCR */
+#define WM8961_MICB 0x0002 /* MICB */
+#define WM8961_MICB_MASK 0x0002 /* MICB */
+#define WM8961_MICB_SHIFT 1 /* MICB */
+#define WM8961_MICB_WIDTH 1 /* MICB */
+
+/*
+ * R26 (0x1A) - Pwr Mgmt (2)
+ */
+#define WM8961_DACL 0x0100 /* DACL */
+#define WM8961_DACL_MASK 0x0100 /* DACL */
+#define WM8961_DACL_SHIFT 8 /* DACL */
+#define WM8961_DACL_WIDTH 1 /* DACL */
+#define WM8961_DACR 0x0080 /* DACR */
+#define WM8961_DACR_MASK 0x0080 /* DACR */
+#define WM8961_DACR_SHIFT 7 /* DACR */
+#define WM8961_DACR_WIDTH 1 /* DACR */
+#define WM8961_LOUT1_PGA 0x0040 /* LOUT1_PGA */
+#define WM8961_LOUT1_PGA_MASK 0x0040 /* LOUT1_PGA */
+#define WM8961_LOUT1_PGA_SHIFT 6 /* LOUT1_PGA */
+#define WM8961_LOUT1_PGA_WIDTH 1 /* LOUT1_PGA */
+#define WM8961_ROUT1_PGA 0x0020 /* ROUT1_PGA */
+#define WM8961_ROUT1_PGA_MASK 0x0020 /* ROUT1_PGA */
+#define WM8961_ROUT1_PGA_SHIFT 5 /* ROUT1_PGA */
+#define WM8961_ROUT1_PGA_WIDTH 1 /* ROUT1_PGA */
+#define WM8961_SPKL_PGA 0x0010 /* SPKL_PGA */
+#define WM8961_SPKL_PGA_MASK 0x0010 /* SPKL_PGA */
+#define WM8961_SPKL_PGA_SHIFT 4 /* SPKL_PGA */
+#define WM8961_SPKL_PGA_WIDTH 1 /* SPKL_PGA */
+#define WM8961_SPKR_PGA 0x0008 /* SPKR_PGA */
+#define WM8961_SPKR_PGA_MASK 0x0008 /* SPKR_PGA */
+#define WM8961_SPKR_PGA_SHIFT 3 /* SPKR_PGA */
+#define WM8961_SPKR_PGA_WIDTH 1 /* SPKR_PGA */
+
+/*
+ * R27 (0x1B) - Additional Control (3)
+ */
+#define WM8961_SAMPLE_RATE_MASK 0x0007 /* SAMPLE_RATE - [2:0] */
+#define WM8961_SAMPLE_RATE_SHIFT 0 /* SAMPLE_RATE - [2:0] */
+#define WM8961_SAMPLE_RATE_WIDTH 3 /* SAMPLE_RATE - [2:0] */
+
+/*
+ * R28 (0x1C) - Anti-pop
+ */
+#define WM8961_BUFDCOPEN 0x0010 /* BUFDCOPEN */
+#define WM8961_BUFDCOPEN_MASK 0x0010 /* BUFDCOPEN */
+#define WM8961_BUFDCOPEN_SHIFT 4 /* BUFDCOPEN */
+#define WM8961_BUFDCOPEN_WIDTH 1 /* BUFDCOPEN */
+#define WM8961_BUFIOEN 0x0008 /* BUFIOEN */
+#define WM8961_BUFIOEN_MASK 0x0008 /* BUFIOEN */
+#define WM8961_BUFIOEN_SHIFT 3 /* BUFIOEN */
+#define WM8961_BUFIOEN_WIDTH 1 /* BUFIOEN */
+#define WM8961_SOFT_ST 0x0004 /* SOFT_ST */
+#define WM8961_SOFT_ST_MASK 0x0004 /* SOFT_ST */
+#define WM8961_SOFT_ST_SHIFT 2 /* SOFT_ST */
+#define WM8961_SOFT_ST_WIDTH 1 /* SOFT_ST */
+
+/*
+ * R30 (0x1E) - Clocking 3
+ */
+#define WM8961_CLK_TO_DIV_MASK 0x0180 /* CLK_TO_DIV - [8:7] */
+#define WM8961_CLK_TO_DIV_SHIFT 7 /* CLK_TO_DIV - [8:7] */
+#define WM8961_CLK_TO_DIV_WIDTH 2 /* CLK_TO_DIV - [8:7] */
+#define WM8961_CLK_256K_DIV_MASK 0x007E /* CLK_256K_DIV - [6:1] */
+#define WM8961_CLK_256K_DIV_SHIFT 1 /* CLK_256K_DIV - [6:1] */
+#define WM8961_CLK_256K_DIV_WIDTH 6 /* CLK_256K_DIV - [6:1] */
+#define WM8961_MANUAL_MODE 0x0001 /* MANUAL_MODE */
+#define WM8961_MANUAL_MODE_MASK 0x0001 /* MANUAL_MODE */
+#define WM8961_MANUAL_MODE_SHIFT 0 /* MANUAL_MODE */
+#define WM8961_MANUAL_MODE_WIDTH 1 /* MANUAL_MODE */
+
+/*
+ * R32 (0x20) - ADCL signal path
+ */
+#define WM8961_LMICBOOST_MASK 0x0030 /* LMICBOOST - [5:4] */
+#define WM8961_LMICBOOST_SHIFT 4 /* LMICBOOST - [5:4] */
+#define WM8961_LMICBOOST_WIDTH 2 /* LMICBOOST - [5:4] */
+
+/*
+ * R33 (0x21) - ADCR signal path
+ */
+#define WM8961_RMICBOOST_MASK 0x0030 /* RMICBOOST - [5:4] */
+#define WM8961_RMICBOOST_SHIFT 4 /* RMICBOOST - [5:4] */
+#define WM8961_RMICBOOST_WIDTH 2 /* RMICBOOST - [5:4] */
+
+/*
+ * R40 (0x28) - LOUT2 volume
+ */
+#define WM8961_SPKVU 0x0100 /* SPKVU */
+#define WM8961_SPKVU_MASK 0x0100 /* SPKVU */
+#define WM8961_SPKVU_SHIFT 8 /* SPKVU */
+#define WM8961_SPKVU_WIDTH 1 /* SPKVU */
+#define WM8961_SPKLZC 0x0080 /* SPKLZC */
+#define WM8961_SPKLZC_MASK 0x0080 /* SPKLZC */
+#define WM8961_SPKLZC_SHIFT 7 /* SPKLZC */
+#define WM8961_SPKLZC_WIDTH 1 /* SPKLZC */
+#define WM8961_SPKLVOL_MASK 0x007F /* SPKLVOL - [6:0] */
+#define WM8961_SPKLVOL_SHIFT 0 /* SPKLVOL - [6:0] */
+#define WM8961_SPKLVOL_WIDTH 7 /* SPKLVOL - [6:0] */
+
+/*
+ * R41 (0x29) - ROUT2 volume
+ */
+#define WM8961_SPKVU 0x0100 /* SPKVU */
+#define WM8961_SPKVU_MASK 0x0100 /* SPKVU */
+#define WM8961_SPKVU_SHIFT 8 /* SPKVU */
+#define WM8961_SPKVU_WIDTH 1 /* SPKVU */
+#define WM8961_SPKRZC 0x0080 /* SPKRZC */
+#define WM8961_SPKRZC_MASK 0x0080 /* SPKRZC */
+#define WM8961_SPKRZC_SHIFT 7 /* SPKRZC */
+#define WM8961_SPKRZC_WIDTH 1 /* SPKRZC */
+#define WM8961_SPKRVOL_MASK 0x007F /* SPKRVOL - [6:0] */
+#define WM8961_SPKRVOL_SHIFT 0 /* SPKRVOL - [6:0] */
+#define WM8961_SPKRVOL_WIDTH 7 /* SPKRVOL - [6:0] */
+
+/*
+ * R47 (0x2F) - Pwr Mgmt (3)
+ */
+#define WM8961_TEMP_SHUT 0x0002 /* TEMP_SHUT */
+#define WM8961_TEMP_SHUT_MASK 0x0002 /* TEMP_SHUT */
+#define WM8961_TEMP_SHUT_SHIFT 1 /* TEMP_SHUT */
+#define WM8961_TEMP_SHUT_WIDTH 1 /* TEMP_SHUT */
+#define WM8961_TEMP_WARN 0x0001 /* TEMP_WARN */
+#define WM8961_TEMP_WARN_MASK 0x0001 /* TEMP_WARN */
+#define WM8961_TEMP_WARN_SHIFT 0 /* TEMP_WARN */
+#define WM8961_TEMP_WARN_WIDTH 1 /* TEMP_WARN */
+
+/*
+ * R48 (0x30) - Additional Control (4)
+ */
+#define WM8961_TSENSEN 0x0002 /* TSENSEN */
+#define WM8961_TSENSEN_MASK 0x0002 /* TSENSEN */
+#define WM8961_TSENSEN_SHIFT 1 /* TSENSEN */
+#define WM8961_TSENSEN_WIDTH 1 /* TSENSEN */
+#define WM8961_MBSEL 0x0001 /* MBSEL */
+#define WM8961_MBSEL_MASK 0x0001 /* MBSEL */
+#define WM8961_MBSEL_SHIFT 0 /* MBSEL */
+#define WM8961_MBSEL_WIDTH 1 /* MBSEL */
+
+/*
+ * R49 (0x31) - Class D Control 1
+ */
+#define WM8961_SPKR_ENA 0x0080 /* SPKR_ENA */
+#define WM8961_SPKR_ENA_MASK 0x0080 /* SPKR_ENA */
+#define WM8961_SPKR_ENA_SHIFT 7 /* SPKR_ENA */
+#define WM8961_SPKR_ENA_WIDTH 1 /* SPKR_ENA */
+#define WM8961_SPKL_ENA 0x0040 /* SPKL_ENA */
+#define WM8961_SPKL_ENA_MASK 0x0040 /* SPKL_ENA */
+#define WM8961_SPKL_ENA_SHIFT 6 /* SPKL_ENA */
+#define WM8961_SPKL_ENA_WIDTH 1 /* SPKL_ENA */
+
+/*
+ * R51 (0x33) - Class D Control 2
+ */
+#define WM8961_CLASSD_ACGAIN_MASK 0x0007 /* CLASSD_ACGAIN - [2:0] */
+#define WM8961_CLASSD_ACGAIN_SHIFT 0 /* CLASSD_ACGAIN - [2:0] */
+#define WM8961_CLASSD_ACGAIN_WIDTH 3 /* CLASSD_ACGAIN - [2:0] */
+
+/*
+ * R56 (0x38) - Clocking 4
+ */
+#define WM8961_CLK_DCS_DIV_MASK 0x01E0 /* CLK_DCS_DIV - [8:5] */
+#define WM8961_CLK_DCS_DIV_SHIFT 5 /* CLK_DCS_DIV - [8:5] */
+#define WM8961_CLK_DCS_DIV_WIDTH 4 /* CLK_DCS_DIV - [8:5] */
+#define WM8961_CLK_SYS_RATE_MASK 0x001E /* CLK_SYS_RATE - [4:1] */
+#define WM8961_CLK_SYS_RATE_SHIFT 1 /* CLK_SYS_RATE - [4:1] */
+#define WM8961_CLK_SYS_RATE_WIDTH 4 /* CLK_SYS_RATE - [4:1] */
+
+/*
+ * R57 (0x39) - DSP Sidetone 0
+ */
+#define WM8961_ADCR_DAC_SVOL_MASK 0x00F0 /* ADCR_DAC_SVOL - [7:4] */
+#define WM8961_ADCR_DAC_SVOL_SHIFT 4 /* ADCR_DAC_SVOL - [7:4] */
+#define WM8961_ADCR_DAC_SVOL_WIDTH 4 /* ADCR_DAC_SVOL - [7:4] */
+#define WM8961_ADC_TO_DACR_MASK 0x000C /* ADC_TO_DACR - [3:2] */
+#define WM8961_ADC_TO_DACR_SHIFT 2 /* ADC_TO_DACR - [3:2] */
+#define WM8961_ADC_TO_DACR_WIDTH 2 /* ADC_TO_DACR - [3:2] */
+
+/*
+ * R58 (0x3A) - DSP Sidetone 1
+ */
+#define WM8961_ADCL_DAC_SVOL_MASK 0x00F0 /* ADCL_DAC_SVOL - [7:4] */
+#define WM8961_ADCL_DAC_SVOL_SHIFT 4 /* ADCL_DAC_SVOL - [7:4] */
+#define WM8961_ADCL_DAC_SVOL_WIDTH 4 /* ADCL_DAC_SVOL - [7:4] */
+#define WM8961_ADC_TO_DACL_MASK 0x000C /* ADC_TO_DACL - [3:2] */
+#define WM8961_ADC_TO_DACL_SHIFT 2 /* ADC_TO_DACL - [3:2] */
+#define WM8961_ADC_TO_DACL_WIDTH 2 /* ADC_TO_DACL - [3:2] */
+
+/*
+ * R60 (0x3C) - DC Servo 0
+ */
+#define WM8961_DCS_ENA_CHAN_INL 0x0080 /* DCS_ENA_CHAN_INL */
+#define WM8961_DCS_ENA_CHAN_INL_MASK 0x0080 /* DCS_ENA_CHAN_INL */
+#define WM8961_DCS_ENA_CHAN_INL_SHIFT 7 /* DCS_ENA_CHAN_INL */
+#define WM8961_DCS_ENA_CHAN_INL_WIDTH 1 /* DCS_ENA_CHAN_INL */
+#define WM8961_DCS_TRIG_STARTUP_INL 0x0040 /* DCS_TRIG_STARTUP_INL */
+#define WM8961_DCS_TRIG_STARTUP_INL_MASK 0x0040 /* DCS_TRIG_STARTUP_INL */
+#define WM8961_DCS_TRIG_STARTUP_INL_SHIFT 6 /* DCS_TRIG_STARTUP_INL */
+#define WM8961_DCS_TRIG_STARTUP_INL_WIDTH 1 /* DCS_TRIG_STARTUP_INL */
+#define WM8961_DCS_TRIG_SERIES_INL 0x0010 /* DCS_TRIG_SERIES_INL */
+#define WM8961_DCS_TRIG_SERIES_INL_MASK 0x0010 /* DCS_TRIG_SERIES_INL */
+#define WM8961_DCS_TRIG_SERIES_INL_SHIFT 4 /* DCS_TRIG_SERIES_INL */
+#define WM8961_DCS_TRIG_SERIES_INL_WIDTH 1 /* DCS_TRIG_SERIES_INL */
+#define WM8961_DCS_ENA_CHAN_INR 0x0008 /* DCS_ENA_CHAN_INR */
+#define WM8961_DCS_ENA_CHAN_INR_MASK 0x0008 /* DCS_ENA_CHAN_INR */
+#define WM8961_DCS_ENA_CHAN_INR_SHIFT 3 /* DCS_ENA_CHAN_INR */
+#define WM8961_DCS_ENA_CHAN_INR_WIDTH 1 /* DCS_ENA_CHAN_INR */
+#define WM8961_DCS_TRIG_STARTUP_INR 0x0004 /* DCS_TRIG_STARTUP_INR */
+#define WM8961_DCS_TRIG_STARTUP_INR_MASK 0x0004 /* DCS_TRIG_STARTUP_INR */
+#define WM8961_DCS_TRIG_STARTUP_INR_SHIFT 2 /* DCS_TRIG_STARTUP_INR */
+#define WM8961_DCS_TRIG_STARTUP_INR_WIDTH 1 /* DCS_TRIG_STARTUP_INR */
+#define WM8961_DCS_TRIG_SERIES_INR 0x0001 /* DCS_TRIG_SERIES_INR */
+#define WM8961_DCS_TRIG_SERIES_INR_MASK 0x0001 /* DCS_TRIG_SERIES_INR */
+#define WM8961_DCS_TRIG_SERIES_INR_SHIFT 0 /* DCS_TRIG_SERIES_INR */
+#define WM8961_DCS_TRIG_SERIES_INR_WIDTH 1 /* DCS_TRIG_SERIES_INR */
+
+/*
+ * R61 (0x3D) - DC Servo 1
+ */
+#define WM8961_DCS_ENA_CHAN_HPL 0x0080 /* DCS_ENA_CHAN_HPL */
+#define WM8961_DCS_ENA_CHAN_HPL_MASK 0x0080 /* DCS_ENA_CHAN_HPL */
+#define WM8961_DCS_ENA_CHAN_HPL_SHIFT 7 /* DCS_ENA_CHAN_HPL */
+#define WM8961_DCS_ENA_CHAN_HPL_WIDTH 1 /* DCS_ENA_CHAN_HPL */
+#define WM8961_DCS_TRIG_STARTUP_HPL 0x0040 /* DCS_TRIG_STARTUP_HPL */
+#define WM8961_DCS_TRIG_STARTUP_HPL_MASK 0x0040 /* DCS_TRIG_STARTUP_HPL */
+#define WM8961_DCS_TRIG_STARTUP_HPL_SHIFT 6 /* DCS_TRIG_STARTUP_HPL */
+#define WM8961_DCS_TRIG_STARTUP_HPL_WIDTH 1 /* DCS_TRIG_STARTUP_HPL */
+#define WM8961_DCS_TRIG_SERIES_HPL 0x0010 /* DCS_TRIG_SERIES_HPL */
+#define WM8961_DCS_TRIG_SERIES_HPL_MASK 0x0010 /* DCS_TRIG_SERIES_HPL */
+#define WM8961_DCS_TRIG_SERIES_HPL_SHIFT 4 /* DCS_TRIG_SERIES_HPL */
+#define WM8961_DCS_TRIG_SERIES_HPL_WIDTH 1 /* DCS_TRIG_SERIES_HPL */
+#define WM8961_DCS_ENA_CHAN_HPR 0x0008 /* DCS_ENA_CHAN_HPR */
+#define WM8961_DCS_ENA_CHAN_HPR_MASK 0x0008 /* DCS_ENA_CHAN_HPR */
+#define WM8961_DCS_ENA_CHAN_HPR_SHIFT 3 /* DCS_ENA_CHAN_HPR */
+#define WM8961_DCS_ENA_CHAN_HPR_WIDTH 1 /* DCS_ENA_CHAN_HPR */
+#define WM8961_DCS_TRIG_STARTUP_HPR 0x0004 /* DCS_TRIG_STARTUP_HPR */
+#define WM8961_DCS_TRIG_STARTUP_HPR_MASK 0x0004 /* DCS_TRIG_STARTUP_HPR */
+#define WM8961_DCS_TRIG_STARTUP_HPR_SHIFT 2 /* DCS_TRIG_STARTUP_HPR */
+#define WM8961_DCS_TRIG_STARTUP_HPR_WIDTH 1 /* DCS_TRIG_STARTUP_HPR */
+#define WM8961_DCS_TRIG_SERIES_HPR 0x0001 /* DCS_TRIG_SERIES_HPR */
+#define WM8961_DCS_TRIG_SERIES_HPR_MASK 0x0001 /* DCS_TRIG_SERIES_HPR */
+#define WM8961_DCS_TRIG_SERIES_HPR_SHIFT 0 /* DCS_TRIG_SERIES_HPR */
+#define WM8961_DCS_TRIG_SERIES_HPR_WIDTH 1 /* DCS_TRIG_SERIES_HPR */
+
+/*
+ * R63 (0x3F) - DC Servo 3
+ */
+#define WM8961_DCS_FILT_BW_SERIES_MASK 0x0030 /* DCS_FILT_BW_SERIES - [5:4] */
+#define WM8961_DCS_FILT_BW_SERIES_SHIFT 4 /* DCS_FILT_BW_SERIES - [5:4] */
+#define WM8961_DCS_FILT_BW_SERIES_WIDTH 2 /* DCS_FILT_BW_SERIES - [5:4] */
+
+/*
+ * R65 (0x41) - DC Servo 5
+ */
+#define WM8961_DCS_SERIES_NO_HP_MASK 0x007F /* DCS_SERIES_NO_HP - [6:0] */
+#define WM8961_DCS_SERIES_NO_HP_SHIFT 0 /* DCS_SERIES_NO_HP - [6:0] */
+#define WM8961_DCS_SERIES_NO_HP_WIDTH 7 /* DCS_SERIES_NO_HP - [6:0] */
+
+/*
+ * R68 (0x44) - Analogue PGA Bias
+ */
+#define WM8961_HP_PGAS_BIAS_MASK 0x0007 /* HP_PGAS_BIAS - [2:0] */
+#define WM8961_HP_PGAS_BIAS_SHIFT 0 /* HP_PGAS_BIAS - [2:0] */
+#define WM8961_HP_PGAS_BIAS_WIDTH 3 /* HP_PGAS_BIAS - [2:0] */
+
+/*
+ * R69 (0x45) - Analogue HP 0
+ */
+#define WM8961_HPL_RMV_SHORT 0x0080 /* HPL_RMV_SHORT */
+#define WM8961_HPL_RMV_SHORT_MASK 0x0080 /* HPL_RMV_SHORT */
+#define WM8961_HPL_RMV_SHORT_SHIFT 7 /* HPL_RMV_SHORT */
+#define WM8961_HPL_RMV_SHORT_WIDTH 1 /* HPL_RMV_SHORT */
+#define WM8961_HPL_ENA_OUTP 0x0040 /* HPL_ENA_OUTP */
+#define WM8961_HPL_ENA_OUTP_MASK 0x0040 /* HPL_ENA_OUTP */
+#define WM8961_HPL_ENA_OUTP_SHIFT 6 /* HPL_ENA_OUTP */
+#define WM8961_HPL_ENA_OUTP_WIDTH 1 /* HPL_ENA_OUTP */
+#define WM8961_HPL_ENA_DLY 0x0020 /* HPL_ENA_DLY */
+#define WM8961_HPL_ENA_DLY_MASK 0x0020 /* HPL_ENA_DLY */
+#define WM8961_HPL_ENA_DLY_SHIFT 5 /* HPL_ENA_DLY */
+#define WM8961_HPL_ENA_DLY_WIDTH 1 /* HPL_ENA_DLY */
+#define WM8961_HPL_ENA 0x0010 /* HPL_ENA */
+#define WM8961_HPL_ENA_MASK 0x0010 /* HPL_ENA */
+#define WM8961_HPL_ENA_SHIFT 4 /* HPL_ENA */
+#define WM8961_HPL_ENA_WIDTH 1 /* HPL_ENA */
+#define WM8961_HPR_RMV_SHORT 0x0008 /* HPR_RMV_SHORT */
+#define WM8961_HPR_RMV_SHORT_MASK 0x0008 /* HPR_RMV_SHORT */
+#define WM8961_HPR_RMV_SHORT_SHIFT 3 /* HPR_RMV_SHORT */
+#define WM8961_HPR_RMV_SHORT_WIDTH 1 /* HPR_RMV_SHORT */
+#define WM8961_HPR_ENA_OUTP 0x0004 /* HPR_ENA_OUTP */
+#define WM8961_HPR_ENA_OUTP_MASK 0x0004 /* HPR_ENA_OUTP */
+#define WM8961_HPR_ENA_OUTP_SHIFT 2 /* HPR_ENA_OUTP */
+#define WM8961_HPR_ENA_OUTP_WIDTH 1 /* HPR_ENA_OUTP */
+#define WM8961_HPR_ENA_DLY 0x0002 /* HPR_ENA_DLY */
+#define WM8961_HPR_ENA_DLY_MASK 0x0002 /* HPR_ENA_DLY */
+#define WM8961_HPR_ENA_DLY_SHIFT 1 /* HPR_ENA_DLY */
+#define WM8961_HPR_ENA_DLY_WIDTH 1 /* HPR_ENA_DLY */
+#define WM8961_HPR_ENA 0x0001 /* HPR_ENA */
+#define WM8961_HPR_ENA_MASK 0x0001 /* HPR_ENA */
+#define WM8961_HPR_ENA_SHIFT 0 /* HPR_ENA */
+#define WM8961_HPR_ENA_WIDTH 1 /* HPR_ENA */
+
+/*
+ * R71 (0x47) - Analogue HP 2
+ */
+#define WM8961_HPL_VOL_MASK 0x01C0 /* HPL_VOL - [8:6] */
+#define WM8961_HPL_VOL_SHIFT 6 /* HPL_VOL - [8:6] */
+#define WM8961_HPL_VOL_WIDTH 3 /* HPL_VOL - [8:6] */
+#define WM8961_HPR_VOL_MASK 0x0038 /* HPR_VOL - [5:3] */
+#define WM8961_HPR_VOL_SHIFT 3 /* HPR_VOL - [5:3] */
+#define WM8961_HPR_VOL_WIDTH 3 /* HPR_VOL - [5:3] */
+#define WM8961_HP_BIAS_BOOST_MASK 0x0007 /* HP_BIAS_BOOST - [2:0] */
+#define WM8961_HP_BIAS_BOOST_SHIFT 0 /* HP_BIAS_BOOST - [2:0] */
+#define WM8961_HP_BIAS_BOOST_WIDTH 3 /* HP_BIAS_BOOST - [2:0] */
+
+/*
+ * R72 (0x48) - Charge Pump 1
+ */
+#define WM8961_CP_ENA 0x0001 /* CP_ENA */
+#define WM8961_CP_ENA_MASK 0x0001 /* CP_ENA */
+#define WM8961_CP_ENA_SHIFT 0 /* CP_ENA */
+#define WM8961_CP_ENA_WIDTH 1 /* CP_ENA */
+
+/*
+ * R82 (0x52) - Charge Pump B
+ */
+#define WM8961_CP_DYN_PWR_MASK 0x0003 /* CP_DYN_PWR - [1:0] */
+#define WM8961_CP_DYN_PWR_SHIFT 0 /* CP_DYN_PWR - [1:0] */
+#define WM8961_CP_DYN_PWR_WIDTH 2 /* CP_DYN_PWR - [1:0] */
+
+/*
+ * R87 (0x57) - Write Sequencer 1
+ */
+#define WM8961_WSEQ_ENA 0x0020 /* WSEQ_ENA */
+#define WM8961_WSEQ_ENA_MASK 0x0020 /* WSEQ_ENA */
+#define WM8961_WSEQ_ENA_SHIFT 5 /* WSEQ_ENA */
+#define WM8961_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */
+#define WM8961_WSEQ_WRITE_INDEX_MASK 0x001F /* WSEQ_WRITE_INDEX - [4:0] */
+#define WM8961_WSEQ_WRITE_INDEX_SHIFT 0 /* WSEQ_WRITE_INDEX - [4:0] */
+#define WM8961_WSEQ_WRITE_INDEX_WIDTH 5 /* WSEQ_WRITE_INDEX - [4:0] */
+
+/*
+ * R88 (0x58) - Write Sequencer 2
+ */
+#define WM8961_WSEQ_EOS 0x0100 /* WSEQ_EOS */
+#define WM8961_WSEQ_EOS_MASK 0x0100 /* WSEQ_EOS */
+#define WM8961_WSEQ_EOS_SHIFT 8 /* WSEQ_EOS */
+#define WM8961_WSEQ_EOS_WIDTH 1 /* WSEQ_EOS */
+#define WM8961_WSEQ_ADDR_MASK 0x00FF /* WSEQ_ADDR - [7:0] */
+#define WM8961_WSEQ_ADDR_SHIFT 0 /* WSEQ_ADDR - [7:0] */
+#define WM8961_WSEQ_ADDR_WIDTH 8 /* WSEQ_ADDR - [7:0] */
+
+/*
+ * R89 (0x59) - Write Sequencer 3
+ */
+#define WM8961_WSEQ_DATA_MASK 0x00FF /* WSEQ_DATA - [7:0] */
+#define WM8961_WSEQ_DATA_SHIFT 0 /* WSEQ_DATA - [7:0] */
+#define WM8961_WSEQ_DATA_WIDTH 8 /* WSEQ_DATA - [7:0] */
+
+/*
+ * R90 (0x5A) - Write Sequencer 4
+ */
+#define WM8961_WSEQ_ABORT 0x0100 /* WSEQ_ABORT */
+#define WM8961_WSEQ_ABORT_MASK 0x0100 /* WSEQ_ABORT */
+#define WM8961_WSEQ_ABORT_SHIFT 8 /* WSEQ_ABORT */
+#define WM8961_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */
+#define WM8961_WSEQ_START 0x0080 /* WSEQ_START */
+#define WM8961_WSEQ_START_MASK 0x0080 /* WSEQ_START */
+#define WM8961_WSEQ_START_SHIFT 7 /* WSEQ_START */
+#define WM8961_WSEQ_START_WIDTH 1 /* WSEQ_START */
+#define WM8961_WSEQ_START_INDEX_MASK 0x003F /* WSEQ_START_INDEX - [5:0] */
+#define WM8961_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [5:0] */
+#define WM8961_WSEQ_START_INDEX_WIDTH 6 /* WSEQ_START_INDEX - [5:0] */
+
+/*
+ * R91 (0x5B) - Write Sequencer 5
+ */
+#define WM8961_WSEQ_DATA_WIDTH_MASK 0x0070 /* WSEQ_DATA_WIDTH - [6:4] */
+#define WM8961_WSEQ_DATA_WIDTH_SHIFT 4 /* WSEQ_DATA_WIDTH - [6:4] */
+#define WM8961_WSEQ_DATA_WIDTH_WIDTH 3 /* WSEQ_DATA_WIDTH - [6:4] */
+#define WM8961_WSEQ_DATA_START_MASK 0x000F /* WSEQ_DATA_START - [3:0] */
+#define WM8961_WSEQ_DATA_START_SHIFT 0 /* WSEQ_DATA_START - [3:0] */
+#define WM8961_WSEQ_DATA_START_WIDTH 4 /* WSEQ_DATA_START - [3:0] */
+
+/*
+ * R92 (0x5C) - Write Sequencer 6
+ */
+#define WM8961_WSEQ_DELAY_MASK 0x000F /* WSEQ_DELAY - [3:0] */
+#define WM8961_WSEQ_DELAY_SHIFT 0 /* WSEQ_DELAY - [3:0] */
+#define WM8961_WSEQ_DELAY_WIDTH 4 /* WSEQ_DELAY - [3:0] */
+
+/*
+ * R93 (0x5D) - Write Sequencer 7
+ */
+#define WM8961_WSEQ_BUSY 0x0001 /* WSEQ_BUSY */
+#define WM8961_WSEQ_BUSY_MASK 0x0001 /* WSEQ_BUSY */
+#define WM8961_WSEQ_BUSY_SHIFT 0 /* WSEQ_BUSY */
+#define WM8961_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */
+
+/*
+ * R252 (0xFC) - General test 1
+ */
+#define WM8961_ARA_ENA 0x0002 /* ARA_ENA */
+#define WM8961_ARA_ENA_MASK 0x0002 /* ARA_ENA */
+#define WM8961_ARA_ENA_SHIFT 1 /* ARA_ENA */
+#define WM8961_ARA_ENA_WIDTH 1 /* ARA_ENA */
+#define WM8961_AUTO_INC 0x0001 /* AUTO_INC */
+#define WM8961_AUTO_INC_MASK 0x0001 /* AUTO_INC */
+#define WM8961_AUTO_INC_SHIFT 0 /* AUTO_INC */
+#define WM8961_AUTO_INC_WIDTH 1 /* AUTO_INC */
+
+#endif
diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c
index 032dca22dbd3..d66efb0546ea 100644
--- a/sound/soc/codecs/wm8971.c
+++ b/sound/soc/codecs/wm8971.c
@@ -59,44 +59,7 @@ static const u16 wm8971_reg[] = {
0x0079, 0x0079, 0x0079, /* 40 */
};
-static inline unsigned int wm8971_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
- if (reg < WM8971_REG_COUNT)
- return cache[reg];
-
- return -1;
-}
-
-static inline void wm8971_write_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int value)
-{
- u16 *cache = codec->reg_cache;
- if (reg < WM8971_REG_COUNT)
- cache[reg] = value;
-}
-
-static int wm8971_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- u8 data[2];
-
- /* data is
- * D15..D9 WM8753 register offset
- * D8...D0 register data
- */
- data[0] = (reg << 1) | ((value >> 8) & 0x0001);
- data[1] = value & 0x00ff;
-
- wm8971_write_reg_cache (codec, reg, value);
- if (codec->hw_write(codec->control_data, data, 2) == 2)
- return 0;
- else
- return -EIO;
-}
-
-#define wm8971_reset(c) wm8971_write(c, WM8971_RESET, 0)
+#define wm8971_reset(c) snd_soc_write(c, WM8971_RESET, 0)
/* WM8971 Controls */
static const char *wm8971_bass[] = { "Linear Control", "Adaptive Boost" };
@@ -521,7 +484,7 @@ static int wm8971_set_dai_fmt(struct snd_soc_dai *codec_dai,
return -EINVAL;
}
- wm8971_write(codec, WM8971_IFACE, iface);
+ snd_soc_write(codec, WM8971_IFACE, iface);
return 0;
}
@@ -533,8 +496,8 @@ static int wm8971_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
struct wm8971_priv *wm8971 = codec->private_data;
- u16 iface = wm8971_read_reg_cache(codec, WM8971_IFACE) & 0x1f3;
- u16 srate = wm8971_read_reg_cache(codec, WM8971_SRATE) & 0x1c0;
+ u16 iface = snd_soc_read(codec, WM8971_IFACE) & 0x1f3;
+ u16 srate = snd_soc_read(codec, WM8971_SRATE) & 0x1c0;
int coeff = get_coeff(wm8971->sysclk, params_rate(params));
/* bit size */
@@ -553,9 +516,9 @@ static int wm8971_pcm_hw_params(struct snd_pcm_substream *substream,
}
/* set iface & srate */
- wm8971_write(codec, WM8971_IFACE, iface);
+ snd_soc_write(codec, WM8971_IFACE, iface);
if (coeff >= 0)
- wm8971_write(codec, WM8971_SRATE, srate |
+ snd_soc_write(codec, WM8971_SRATE, srate |
(coeff_div[coeff].sr << 1) | coeff_div[coeff].usb);
return 0;
@@ -564,33 +527,33 @@ static int wm8971_pcm_hw_params(struct snd_pcm_substream *substream,
static int wm8971_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
- u16 mute_reg = wm8971_read_reg_cache(codec, WM8971_ADCDAC) & 0xfff7;
+ u16 mute_reg = snd_soc_read(codec, WM8971_ADCDAC) & 0xfff7;
if (mute)
- wm8971_write(codec, WM8971_ADCDAC, mute_reg | 0x8);
+ snd_soc_write(codec, WM8971_ADCDAC, mute_reg | 0x8);
else
- wm8971_write(codec, WM8971_ADCDAC, mute_reg);
+ snd_soc_write(codec, WM8971_ADCDAC, mute_reg);
return 0;
}
static int wm8971_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- u16 pwr_reg = wm8971_read_reg_cache(codec, WM8971_PWR1) & 0xfe3e;
+ u16 pwr_reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
switch (level) {
case SND_SOC_BIAS_ON:
/* set vmid to 50k and unmute dac */
- wm8971_write(codec, WM8971_PWR1, pwr_reg | 0x00c1);
+ snd_soc_write(codec, WM8971_PWR1, pwr_reg | 0x00c1);
break;
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
/* mute dac and set vmid to 500k, enable VREF */
- wm8971_write(codec, WM8971_PWR1, pwr_reg | 0x0140);
+ snd_soc_write(codec, WM8971_PWR1, pwr_reg | 0x0140);
break;
case SND_SOC_BIAS_OFF:
- wm8971_write(codec, WM8971_PWR1, 0x0001);
+ snd_soc_write(codec, WM8971_PWR1, 0x0001);
break;
}
codec->bias_level = level;
@@ -667,8 +630,8 @@ static int wm8971_resume(struct platform_device *pdev)
/* charge wm8971 caps */
if (codec->suspend_bias_level == SND_SOC_BIAS_ON) {
- reg = wm8971_read_reg_cache(codec, WM8971_PWR1) & 0xfe3e;
- wm8971_write(codec, WM8971_PWR1, reg | 0x01c0);
+ reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
+ snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0);
codec->bias_level = SND_SOC_BIAS_ON;
queue_delayed_work(wm8971_workq, &codec->delayed_work,
msecs_to_jiffies(1000));
@@ -677,15 +640,14 @@ static int wm8971_resume(struct platform_device *pdev)
return 0;
}
-static int wm8971_init(struct snd_soc_device *socdev)
+static int wm8971_init(struct snd_soc_device *socdev,
+ enum snd_soc_control_type control)
{
struct snd_soc_codec *codec = socdev->card->codec;
int reg, ret = 0;
codec->name = "WM8971";
codec->owner = THIS_MODULE;
- codec->read = wm8971_read_reg_cache;
- codec->write = wm8971_write;
codec->set_bias_level = wm8971_set_bias_level;
codec->dai = &wm8971_dai;
codec->reg_cache_size = ARRAY_SIZE(wm8971_reg);
@@ -695,42 +657,48 @@ static int wm8971_init(struct snd_soc_device *socdev)
if (codec->reg_cache == NULL)
return -ENOMEM;
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
+ if (ret < 0) {
+ printk(KERN_ERR "wm8971: failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
wm8971_reset(codec);
/* register pcms */
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
if (ret < 0) {
printk(KERN_ERR "wm8971: failed to create pcms\n");
- goto pcm_err;
+ goto err;
}
/* charge output caps - set vmid to 5k for quick power up */
- reg = wm8971_read_reg_cache(codec, WM8971_PWR1) & 0xfe3e;
- wm8971_write(codec, WM8971_PWR1, reg | 0x01c0);
+ reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
+ snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0);
codec->bias_level = SND_SOC_BIAS_STANDBY;
queue_delayed_work(wm8971_workq, &codec->delayed_work,
msecs_to_jiffies(1000));
/* set the update bits */
- reg = wm8971_read_reg_cache(codec, WM8971_LDAC);
- wm8971_write(codec, WM8971_LDAC, reg | 0x0100);
- reg = wm8971_read_reg_cache(codec, WM8971_RDAC);
- wm8971_write(codec, WM8971_RDAC, reg | 0x0100);
-
- reg = wm8971_read_reg_cache(codec, WM8971_LOUT1V);
- wm8971_write(codec, WM8971_LOUT1V, reg | 0x0100);
- reg = wm8971_read_reg_cache(codec, WM8971_ROUT1V);
- wm8971_write(codec, WM8971_ROUT1V, reg | 0x0100);
-
- reg = wm8971_read_reg_cache(codec, WM8971_LOUT2V);
- wm8971_write(codec, WM8971_LOUT2V, reg | 0x0100);
- reg = wm8971_read_reg_cache(codec, WM8971_ROUT2V);
- wm8971_write(codec, WM8971_ROUT2V, reg | 0x0100);
-
- reg = wm8971_read_reg_cache(codec, WM8971_LINVOL);
- wm8971_write(codec, WM8971_LINVOL, reg | 0x0100);
- reg = wm8971_read_reg_cache(codec, WM8971_RINVOL);
- wm8971_write(codec, WM8971_RINVOL, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8971_LDAC);
+ snd_soc_write(codec, WM8971_LDAC, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8971_RDAC);
+ snd_soc_write(codec, WM8971_RDAC, reg | 0x0100);
+
+ reg = snd_soc_read(codec, WM8971_LOUT1V);
+ snd_soc_write(codec, WM8971_LOUT1V, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8971_ROUT1V);
+ snd_soc_write(codec, WM8971_ROUT1V, reg | 0x0100);
+
+ reg = snd_soc_read(codec, WM8971_LOUT2V);
+ snd_soc_write(codec, WM8971_LOUT2V, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8971_ROUT2V);
+ snd_soc_write(codec, WM8971_ROUT2V, reg | 0x0100);
+
+ reg = snd_soc_read(codec, WM8971_LINVOL);
+ snd_soc_write(codec, WM8971_LINVOL, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8971_RINVOL);
+ snd_soc_write(codec, WM8971_RINVOL, reg | 0x0100);
snd_soc_add_controls(codec, wm8971_snd_controls,
ARRAY_SIZE(wm8971_snd_controls));
@@ -745,7 +713,7 @@ static int wm8971_init(struct snd_soc_device *socdev)
card_err:
snd_soc_free_pcms(socdev);
snd_soc_dapm_free(socdev);
-pcm_err:
+err:
kfree(codec->reg_cache);
return ret;
}
@@ -767,7 +735,7 @@ static int wm8971_i2c_probe(struct i2c_client *i2c,
codec->control_data = i2c;
- ret = wm8971_init(socdev);
+ ret = wm8971_init(socdev, SND_SOC_I2C);
if (ret < 0)
pr_err("failed to initialise WM8971\n");
@@ -877,7 +845,6 @@ static int wm8971_probe(struct platform_device *pdev)
#if defined (CONFIG_I2C) || defined (CONFIG_I2C_MODULE)
if (setup->i2c_address) {
- codec->hw_write = (hw_write_t)i2c_master_send;
ret = wm8971_add_i2c_device(pdev, setup);
}
#endif
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
new file mode 100644
index 000000000000..d8a013ab3177
--- /dev/null
+++ b/sound/soc/codecs/wm8974.c
@@ -0,0 +1,808 @@
+/*
+ * wm8974.c -- WM8974 ALSA Soc Audio driver
+ *
+ * Copyright 2006-2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Liam Girdwood <linux@wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8974.h"
+
+static const u16 wm8974_reg[WM8974_CACHEREGNUM] = {
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0050, 0x0000, 0x0140, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x00ff,
+ 0x0000, 0x0000, 0x0100, 0x00ff,
+ 0x0000, 0x0000, 0x012c, 0x002c,
+ 0x002c, 0x002c, 0x002c, 0x0000,
+ 0x0032, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0038, 0x000b, 0x0032, 0x0000,
+ 0x0008, 0x000c, 0x0093, 0x00e9,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0003, 0x0010, 0x0000, 0x0000,
+ 0x0000, 0x0002, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0039, 0x0000,
+ 0x0000,
+};
+
+#define WM8974_POWER1_BIASEN 0x08
+#define WM8974_POWER1_BUFIOEN 0x10
+
+struct wm8974_priv {
+ struct snd_soc_codec codec;
+ u16 reg_cache[WM8974_CACHEREGNUM];
+};
+
+static struct snd_soc_codec *wm8974_codec;
+
+#define wm8974_reset(c) snd_soc_write(c, WM8974_RESET, 0)
+
+static const char *wm8974_companding[] = {"Off", "NC", "u-law", "A-law" };
+static const char *wm8974_deemp[] = {"None", "32kHz", "44.1kHz", "48kHz" };
+static const char *wm8974_eqmode[] = {"Capture", "Playback" };
+static const char *wm8974_bw[] = {"Narrow", "Wide" };
+static const char *wm8974_eq1[] = {"80Hz", "105Hz", "135Hz", "175Hz" };
+static const char *wm8974_eq2[] = {"230Hz", "300Hz", "385Hz", "500Hz" };
+static const char *wm8974_eq3[] = {"650Hz", "850Hz", "1.1kHz", "1.4kHz" };
+static const char *wm8974_eq4[] = {"1.8kHz", "2.4kHz", "3.2kHz", "4.1kHz" };
+static const char *wm8974_eq5[] = {"5.3kHz", "6.9kHz", "9kHz", "11.7kHz" };
+static const char *wm8974_alc[] = {"ALC", "Limiter" };
+
+static const struct soc_enum wm8974_enum[] = {
+ SOC_ENUM_SINGLE(WM8974_COMP, 1, 4, wm8974_companding), /* adc */
+ SOC_ENUM_SINGLE(WM8974_COMP, 3, 4, wm8974_companding), /* dac */
+ SOC_ENUM_SINGLE(WM8974_DAC, 4, 4, wm8974_deemp),
+ SOC_ENUM_SINGLE(WM8974_EQ1, 8, 2, wm8974_eqmode),
+
+ SOC_ENUM_SINGLE(WM8974_EQ1, 5, 4, wm8974_eq1),
+ SOC_ENUM_SINGLE(WM8974_EQ2, 8, 2, wm8974_bw),
+ SOC_ENUM_SINGLE(WM8974_EQ2, 5, 4, wm8974_eq2),
+ SOC_ENUM_SINGLE(WM8974_EQ3, 8, 2, wm8974_bw),
+
+ SOC_ENUM_SINGLE(WM8974_EQ3, 5, 4, wm8974_eq3),
+ SOC_ENUM_SINGLE(WM8974_EQ4, 8, 2, wm8974_bw),
+ SOC_ENUM_SINGLE(WM8974_EQ4, 5, 4, wm8974_eq4),
+ SOC_ENUM_SINGLE(WM8974_EQ5, 8, 2, wm8974_bw),
+
+ SOC_ENUM_SINGLE(WM8974_EQ5, 5, 4, wm8974_eq5),
+ SOC_ENUM_SINGLE(WM8974_ALC3, 8, 2, wm8974_alc),
+};
+
+static const char *wm8974_auxmode_text[] = { "Buffer", "Mixer" };
+
+static const struct soc_enum wm8974_auxmode =
+ SOC_ENUM_SINGLE(WM8974_INPUT, 3, 2, wm8974_auxmode_text);
+
+static const DECLARE_TLV_DB_SCALE(digital_tlv, -12750, 50, 1);
+static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1200, 75, 0);
+static const DECLARE_TLV_DB_SCALE(spk_tlv, -5700, 100, 0);
+
+static const struct snd_kcontrol_new wm8974_snd_controls[] = {
+
+SOC_SINGLE("Digital Loopback Switch", WM8974_COMP, 0, 1, 0),
+
+SOC_ENUM("DAC Companding", wm8974_enum[1]),
+SOC_ENUM("ADC Companding", wm8974_enum[0]),
+
+SOC_ENUM("Playback De-emphasis", wm8974_enum[2]),
+SOC_SINGLE("DAC Inversion Switch", WM8974_DAC, 0, 1, 0),
+
+SOC_SINGLE_TLV("PCM Volume", WM8974_DACVOL, 0, 255, 0, digital_tlv),
+
+SOC_SINGLE("High Pass Filter Switch", WM8974_ADC, 8, 1, 0),
+SOC_SINGLE("High Pass Cut Off", WM8974_ADC, 4, 7, 0),
+SOC_SINGLE("ADC Inversion Switch", WM8974_ADC, 0, 1, 0),
+
+SOC_SINGLE_TLV("Capture Volume", WM8974_ADCVOL, 0, 255, 0, digital_tlv),
+
+SOC_ENUM("Equaliser Function", wm8974_enum[3]),
+SOC_ENUM("EQ1 Cut Off", wm8974_enum[4]),
+SOC_SINGLE_TLV("EQ1 Volume", WM8974_EQ1, 0, 24, 1, eq_tlv),
+
+SOC_ENUM("Equaliser EQ2 Bandwith", wm8974_enum[5]),
+SOC_ENUM("EQ2 Cut Off", wm8974_enum[6]),
+SOC_SINGLE_TLV("EQ2 Volume", WM8974_EQ2, 0, 24, 1, eq_tlv),
+
+SOC_ENUM("Equaliser EQ3 Bandwith", wm8974_enum[7]),
+SOC_ENUM("EQ3 Cut Off", wm8974_enum[8]),
+SOC_SINGLE_TLV("EQ3 Volume", WM8974_EQ3, 0, 24, 1, eq_tlv),
+
+SOC_ENUM("Equaliser EQ4 Bandwith", wm8974_enum[9]),
+SOC_ENUM("EQ4 Cut Off", wm8974_enum[10]),
+SOC_SINGLE_TLV("EQ4 Volume", WM8974_EQ4, 0, 24, 1, eq_tlv),
+
+SOC_ENUM("Equaliser EQ5 Bandwith", wm8974_enum[11]),
+SOC_ENUM("EQ5 Cut Off", wm8974_enum[12]),
+SOC_SINGLE_TLV("EQ5 Volume", WM8974_EQ5, 0, 24, 1, eq_tlv),
+
+SOC_SINGLE("DAC Playback Limiter Switch", WM8974_DACLIM1, 8, 1, 0),
+SOC_SINGLE("DAC Playback Limiter Decay", WM8974_DACLIM1, 4, 15, 0),
+SOC_SINGLE("DAC Playback Limiter Attack", WM8974_DACLIM1, 0, 15, 0),
+
+SOC_SINGLE("DAC Playback Limiter Threshold", WM8974_DACLIM2, 4, 7, 0),
+SOC_SINGLE("DAC Playback Limiter Boost", WM8974_DACLIM2, 0, 15, 0),
+
+SOC_SINGLE("ALC Enable Switch", WM8974_ALC1, 8, 1, 0),
+SOC_SINGLE("ALC Capture Max Gain", WM8974_ALC1, 3, 7, 0),
+SOC_SINGLE("ALC Capture Min Gain", WM8974_ALC1, 0, 7, 0),
+
+SOC_SINGLE("ALC Capture ZC Switch", WM8974_ALC2, 8, 1, 0),
+SOC_SINGLE("ALC Capture Hold", WM8974_ALC2, 4, 7, 0),
+SOC_SINGLE("ALC Capture Target", WM8974_ALC2, 0, 15, 0),
+
+SOC_ENUM("ALC Capture Mode", wm8974_enum[13]),
+SOC_SINGLE("ALC Capture Decay", WM8974_ALC3, 4, 15, 0),
+SOC_SINGLE("ALC Capture Attack", WM8974_ALC3, 0, 15, 0),
+
+SOC_SINGLE("ALC Capture Noise Gate Switch", WM8974_NGATE, 3, 1, 0),
+SOC_SINGLE("ALC Capture Noise Gate Threshold", WM8974_NGATE, 0, 7, 0),
+
+SOC_SINGLE("Capture PGA ZC Switch", WM8974_INPPGA, 7, 1, 0),
+SOC_SINGLE_TLV("Capture PGA Volume", WM8974_INPPGA, 0, 63, 0, inpga_tlv),
+
+SOC_SINGLE("Speaker Playback ZC Switch", WM8974_SPKVOL, 7, 1, 0),
+SOC_SINGLE("Speaker Playback Switch", WM8974_SPKVOL, 6, 1, 1),
+SOC_SINGLE_TLV("Speaker Playback Volume", WM8974_SPKVOL, 0, 63, 0, spk_tlv),
+
+SOC_ENUM("Aux Mode", wm8974_auxmode),
+
+SOC_SINGLE("Capture Boost(+20dB)", WM8974_ADCBOOST, 8, 1, 0),
+SOC_SINGLE("Mono Playback Switch", WM8974_MONOMIX, 6, 1, 1),
+};
+
+/* Speaker Output Mixer */
+static const struct snd_kcontrol_new wm8974_speaker_mixer_controls[] = {
+SOC_DAPM_SINGLE("Line Bypass Switch", WM8974_SPKMIX, 1, 1, 0),
+SOC_DAPM_SINGLE("Aux Playback Switch", WM8974_SPKMIX, 5, 1, 0),
+SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_SPKMIX, 0, 1, 1),
+};
+
+/* Mono Output Mixer */
+static const struct snd_kcontrol_new wm8974_mono_mixer_controls[] = {
+SOC_DAPM_SINGLE("Line Bypass Switch", WM8974_MONOMIX, 1, 1, 0),
+SOC_DAPM_SINGLE("Aux Playback Switch", WM8974_MONOMIX, 2, 1, 0),
+SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_MONOMIX, 0, 1, 0),
+};
+
+/* Boost mixer */
+static const struct snd_kcontrol_new wm8974_boost_mixer[] = {
+SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 0),
+};
+
+/* Input PGA */
+static const struct snd_kcontrol_new wm8974_inpga[] = {
+SOC_DAPM_SINGLE("Aux Switch", WM8974_INPUT, 2, 1, 0),
+SOC_DAPM_SINGLE("MicN Switch", WM8974_INPUT, 1, 1, 0),
+SOC_DAPM_SINGLE("MicP Switch", WM8974_INPUT, 0, 1, 0),
+};
+
+/* AUX Input boost vol */
+static const struct snd_kcontrol_new wm8974_aux_boost_controls =
+SOC_DAPM_SINGLE("Aux Volume", WM8974_ADCBOOST, 0, 7, 0);
+
+/* Mic Input boost vol */
+static const struct snd_kcontrol_new wm8974_mic_boost_controls =
+SOC_DAPM_SINGLE("Mic Volume", WM8974_ADCBOOST, 4, 7, 0);
+
+static const struct snd_soc_dapm_widget wm8974_dapm_widgets[] = {
+SND_SOC_DAPM_MIXER("Speaker Mixer", WM8974_POWER3, 2, 0,
+ &wm8974_speaker_mixer_controls[0],
+ ARRAY_SIZE(wm8974_speaker_mixer_controls)),
+SND_SOC_DAPM_MIXER("Mono Mixer", WM8974_POWER3, 3, 0,
+ &wm8974_mono_mixer_controls[0],
+ ARRAY_SIZE(wm8974_mono_mixer_controls)),
+SND_SOC_DAPM_DAC("DAC", "HiFi Playback", WM8974_POWER3, 0, 0),
+SND_SOC_DAPM_ADC("ADC", "HiFi Capture", WM8974_POWER2, 0, 0),
+SND_SOC_DAPM_PGA("Aux Input", WM8974_POWER1, 6, 0, NULL, 0),
+SND_SOC_DAPM_PGA("SpkN Out", WM8974_POWER3, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA("SpkP Out", WM8974_POWER3, 6, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Mono Out", WM8974_POWER3, 7, 0, NULL, 0),
+
+SND_SOC_DAPM_MIXER("Input PGA", WM8974_POWER2, 2, 0, wm8974_inpga,
+ ARRAY_SIZE(wm8974_inpga)),
+SND_SOC_DAPM_MIXER("Boost Mixer", WM8974_POWER2, 4, 0,
+ wm8974_boost_mixer, ARRAY_SIZE(wm8974_boost_mixer)),
+
+SND_SOC_DAPM_MICBIAS("Mic Bias", WM8974_POWER1, 4, 0),
+
+SND_SOC_DAPM_INPUT("MICN"),
+SND_SOC_DAPM_INPUT("MICP"),
+SND_SOC_DAPM_INPUT("AUX"),
+SND_SOC_DAPM_OUTPUT("MONOOUT"),
+SND_SOC_DAPM_OUTPUT("SPKOUTP"),
+SND_SOC_DAPM_OUTPUT("SPKOUTN"),
+};
+
+static const struct snd_soc_dapm_route audio_map[] = {
+ /* Mono output mixer */
+ {"Mono Mixer", "PCM Playback Switch", "DAC"},
+ {"Mono Mixer", "Aux Playback Switch", "Aux Input"},
+ {"Mono Mixer", "Line Bypass Switch", "Boost Mixer"},
+
+ /* Speaker output mixer */
+ {"Speaker Mixer", "PCM Playback Switch", "DAC"},
+ {"Speaker Mixer", "Aux Playback Switch", "Aux Input"},
+ {"Speaker Mixer", "Line Bypass Switch", "Boost Mixer"},
+
+ /* Outputs */
+ {"Mono Out", NULL, "Mono Mixer"},
+ {"MONOOUT", NULL, "Mono Out"},
+ {"SpkN Out", NULL, "Speaker Mixer"},
+ {"SpkP Out", NULL, "Speaker Mixer"},
+ {"SPKOUTN", NULL, "SpkN Out"},
+ {"SPKOUTP", NULL, "SpkP Out"},
+
+ /* Boost Mixer */
+ {"ADC", NULL, "Boost Mixer"},
+ {"Boost Mixer", "Aux Switch", "Aux Input"},
+ {"Boost Mixer", NULL, "Input PGA"},
+ {"Boost Mixer", NULL, "MICP"},
+
+ /* Input PGA */
+ {"Input PGA", "Aux Switch", "Aux Input"},
+ {"Input PGA", "MicN Switch", "MICN"},
+ {"Input PGA", "MicP Switch", "MICP"},
+
+ /* Inputs */
+ {"Aux Input", NULL, "AUX"},
+};
+
+static int wm8974_add_widgets(struct snd_soc_codec *codec)
+{
+ snd_soc_dapm_new_controls(codec, wm8974_dapm_widgets,
+ ARRAY_SIZE(wm8974_dapm_widgets));
+
+ snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+
+ snd_soc_dapm_new_widgets(codec);
+ return 0;
+}
+
+struct pll_ {
+ unsigned int pre_div:4; /* prescale - 1 */
+ unsigned int n:4;
+ unsigned int k;
+};
+
+static struct pll_ pll_div;
+
+/* The size in bits of the pll divide multiplied by 10
+ * to allow rounding later */
+#define FIXED_PLL_SIZE ((1 << 24) * 10)
+
+static void pll_factors(unsigned int target, unsigned int source)
+{
+ unsigned long long Kpart;
+ unsigned int K, Ndiv, Nmod;
+
+ Ndiv = target / source;
+ if (Ndiv < 6) {
+ source >>= 1;
+ pll_div.pre_div = 1;
+ Ndiv = target / source;
+ } else
+ pll_div.pre_div = 0;
+
+ if ((Ndiv < 6) || (Ndiv > 12))
+ printk(KERN_WARNING
+ "WM8974 N value %u outwith recommended range!\n",
+ Ndiv);
+
+ pll_div.n = Ndiv;
+ Nmod = target % source;
+ Kpart = FIXED_PLL_SIZE * (long long)Nmod;
+
+ do_div(Kpart, source);
+
+ K = Kpart & 0xFFFFFFFF;
+
+ /* Check if we need to round */
+ if ((K % 10) >= 5)
+ K += 5;
+
+ /* Move down to proper range now rounding is done */
+ K /= 10;
+
+ pll_div.k = K;
+}
+
+static int wm8974_set_dai_pll(struct snd_soc_dai *codec_dai,
+ int pll_id, unsigned int freq_in, unsigned int freq_out)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u16 reg;
+
+ if (freq_in == 0 || freq_out == 0) {
+ /* Clock CODEC directly from MCLK */
+ reg = snd_soc_read(codec, WM8974_CLOCK);
+ snd_soc_write(codec, WM8974_CLOCK, reg & 0x0ff);
+
+ /* Turn off PLL */
+ reg = snd_soc_read(codec, WM8974_POWER1);
+ snd_soc_write(codec, WM8974_POWER1, reg & 0x1df);
+ return 0;
+ }
+
+ pll_factors(freq_out*4, freq_in);
+
+ snd_soc_write(codec, WM8974_PLLN, (pll_div.pre_div << 4) | pll_div.n);
+ snd_soc_write(codec, WM8974_PLLK1, pll_div.k >> 18);
+ snd_soc_write(codec, WM8974_PLLK2, (pll_div.k >> 9) & 0x1ff);
+ snd_soc_write(codec, WM8974_PLLK3, pll_div.k & 0x1ff);
+ reg = snd_soc_read(codec, WM8974_POWER1);
+ snd_soc_write(codec, WM8974_POWER1, reg | 0x020);
+
+ /* Run CODEC from PLL instead of MCLK */
+ reg = snd_soc_read(codec, WM8974_CLOCK);
+ snd_soc_write(codec, WM8974_CLOCK, reg | 0x100);
+
+ return 0;
+}
+
+/*
+ * Configure WM8974 clock dividers.
+ */
+static int wm8974_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
+ int div_id, int div)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u16 reg;
+
+ switch (div_id) {
+ case WM8974_OPCLKDIV:
+ reg = snd_soc_read(codec, WM8974_GPIO) & 0x1cf;
+ snd_soc_write(codec, WM8974_GPIO, reg | div);
+ break;
+ case WM8974_MCLKDIV:
+ reg = snd_soc_read(codec, WM8974_CLOCK) & 0x11f;
+ snd_soc_write(codec, WM8974_CLOCK, reg | div);
+ break;
+ case WM8974_ADCCLK:
+ reg = snd_soc_read(codec, WM8974_ADC) & 0x1f7;
+ snd_soc_write(codec, WM8974_ADC, reg | div);
+ break;
+ case WM8974_DACCLK:
+ reg = snd_soc_read(codec, WM8974_DAC) & 0x1f7;
+ snd_soc_write(codec, WM8974_DAC, reg | div);
+ break;
+ case WM8974_BCLKDIV:
+ reg = snd_soc_read(codec, WM8974_CLOCK) & 0x1e3;
+ snd_soc_write(codec, WM8974_CLOCK, reg | div);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wm8974_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u16 iface = 0;
+ u16 clk = snd_soc_read(codec, WM8974_CLOCK) & 0x1fe;
+
+ /* set master/slave audio interface */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ clk |= 0x0001;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ iface |= 0x0010;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ iface |= 0x0008;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ iface |= 0x00018;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* clock inversion */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ iface |= 0x0180;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ iface |= 0x0100;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ iface |= 0x0080;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_write(codec, WM8974_IFACE, iface);
+ snd_soc_write(codec, WM8974_CLOCK, clk);
+ return 0;
+}
+
+static int wm8974_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u16 iface = snd_soc_read(codec, WM8974_IFACE) & 0x19f;
+ u16 adn = snd_soc_read(codec, WM8974_ADD) & 0x1f1;
+
+ /* bit size */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ iface |= 0x0020;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ iface |= 0x0040;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ iface |= 0x0060;
+ break;
+ }
+
+ /* filter coefficient */
+ switch (params_rate(params)) {
+ case SNDRV_PCM_RATE_8000:
+ adn |= 0x5 << 1;
+ break;
+ case SNDRV_PCM_RATE_11025:
+ adn |= 0x4 << 1;
+ break;
+ case SNDRV_PCM_RATE_16000:
+ adn |= 0x3 << 1;
+ break;
+ case SNDRV_PCM_RATE_22050:
+ adn |= 0x2 << 1;
+ break;
+ case SNDRV_PCM_RATE_32000:
+ adn |= 0x1 << 1;
+ break;
+ case SNDRV_PCM_RATE_44100:
+ case SNDRV_PCM_RATE_48000:
+ break;
+ }
+
+ snd_soc_write(codec, WM8974_IFACE, iface);
+ snd_soc_write(codec, WM8974_ADD, adn);
+ return 0;
+}
+
+static int wm8974_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u16 mute_reg = snd_soc_read(codec, WM8974_DAC) & 0xffbf;
+
+ if (mute)
+ snd_soc_write(codec, WM8974_DAC, mute_reg | 0x40);
+ else
+ snd_soc_write(codec, WM8974_DAC, mute_reg);
+ return 0;
+}
+
+/* liam need to make this lower power with dapm */
+static int wm8974_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ u16 power1 = snd_soc_read(codec, WM8974_POWER1) & ~0x3;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ power1 |= 0x1; /* VMID 50k */
+ snd_soc_write(codec, WM8974_POWER1, power1);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ power1 |= WM8974_POWER1_BIASEN | WM8974_POWER1_BUFIOEN;
+
+ if (codec->bias_level == SND_SOC_BIAS_OFF) {
+ /* Initial cap charge at VMID 5k */
+ snd_soc_write(codec, WM8974_POWER1, power1 | 0x3);
+ mdelay(100);
+ }
+
+ power1 |= 0x2; /* VMID 500k */
+ snd_soc_write(codec, WM8974_POWER1, power1);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ snd_soc_write(codec, WM8974_POWER1, 0);
+ snd_soc_write(codec, WM8974_POWER2, 0);
+ snd_soc_write(codec, WM8974_POWER3, 0);
+ break;
+ }
+
+ codec->bias_level = level;
+ return 0;
+}
+
+#define WM8974_RATES (SNDRV_PCM_RATE_8000_48000)
+
+#define WM8974_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+static struct snd_soc_dai_ops wm8974_ops = {
+ .hw_params = wm8974_pcm_hw_params,
+ .digital_mute = wm8974_mute,
+ .set_fmt = wm8974_set_dai_fmt,
+ .set_clkdiv = wm8974_set_dai_clkdiv,
+ .set_pll = wm8974_set_dai_pll,
+};
+
+struct snd_soc_dai wm8974_dai = {
+ .name = "WM8974 HiFi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2, /* Only 1 channel of data */
+ .rates = WM8974_RATES,
+ .formats = WM8974_FORMATS,},
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2, /* Only 1 channel of data */
+ .rates = WM8974_RATES,
+ .formats = WM8974_FORMATS,},
+ .ops = &wm8974_ops,
+ .symmetric_rates = 1,
+};
+EXPORT_SYMBOL_GPL(wm8974_dai);
+
+static int wm8974_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ wm8974_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ return 0;
+}
+
+static int wm8974_resume(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+ int i;
+ u8 data[2];
+ u16 *cache = codec->reg_cache;
+
+ /* Sync reg_cache with the hardware */
+ for (i = 0; i < ARRAY_SIZE(wm8974_reg); i++) {
+ data[0] = (i << 1) | ((cache[i] >> 8) & 0x0001);
+ data[1] = cache[i] & 0x00ff;
+ codec->hw_write(codec->control_data, data, 2);
+ }
+ wm8974_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ wm8974_set_bias_level(codec, codec->suspend_bias_level);
+ return 0;
+}
+
+static int wm8974_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret = 0;
+
+ if (wm8974_codec == NULL) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+
+ socdev->card->codec = wm8974_codec;
+ codec = wm8974_codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to create pcms: %d\n", ret);
+ goto pcm_err;
+ }
+
+ snd_soc_add_controls(codec, wm8974_snd_controls,
+ ARRAY_SIZE(wm8974_snd_controls));
+ wm8974_add_widgets(codec);
+ ret = snd_soc_init_card(socdev);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to register card: %d\n", ret);
+ goto card_err;
+ }
+
+ return ret;
+
+card_err:
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+pcm_err:
+ return ret;
+}
+
+/* power down chip */
+static int wm8974_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+struct snd_soc_codec_device soc_codec_dev_wm8974 = {
+ .probe = wm8974_probe,
+ .remove = wm8974_remove,
+ .suspend = wm8974_suspend,
+ .resume = wm8974_resume,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_wm8974);
+
+static __devinit int wm8974_register(struct wm8974_priv *wm8974)
+{
+ int ret;
+ struct snd_soc_codec *codec = &wm8974->codec;
+
+ if (wm8974_codec) {
+ dev_err(codec->dev, "Another WM8974 is registered\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->private_data = wm8974;
+ codec->name = "WM8974";
+ codec->owner = THIS_MODULE;
+ codec->bias_level = SND_SOC_BIAS_OFF;
+ codec->set_bias_level = wm8974_set_bias_level;
+ codec->dai = &wm8974_dai;
+ codec->num_dai = 1;
+ codec->reg_cache_size = WM8974_CACHEREGNUM;
+ codec->reg_cache = &wm8974->reg_cache;
+
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_I2C);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
+ memcpy(codec->reg_cache, wm8974_reg, sizeof(wm8974_reg));
+
+ ret = wm8974_reset(codec);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to issue reset\n");
+ goto err;
+ }
+
+ wm8974_dai.dev = codec->dev;
+
+ wm8974_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ wm8974_codec = codec;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ goto err;
+ }
+
+ ret = snd_soc_register_dai(&wm8974_dai);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
+ goto err_codec;
+ }
+
+ return 0;
+
+err_codec:
+ snd_soc_unregister_codec(codec);
+err:
+ kfree(wm8974);
+ return ret;
+}
+
+static __devexit void wm8974_unregister(struct wm8974_priv *wm8974)
+{
+ wm8974_set_bias_level(&wm8974->codec, SND_SOC_BIAS_OFF);
+ snd_soc_unregister_dai(&wm8974_dai);
+ snd_soc_unregister_codec(&wm8974->codec);
+ kfree(wm8974);
+ wm8974_codec = NULL;
+}
+
+static __devinit int wm8974_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct wm8974_priv *wm8974;
+ struct snd_soc_codec *codec;
+
+ wm8974 = kzalloc(sizeof(struct wm8974_priv), GFP_KERNEL);
+ if (wm8974 == NULL)
+ return -ENOMEM;
+
+ codec = &wm8974->codec;
+ codec->hw_write = (hw_write_t)i2c_master_send;
+
+ i2c_set_clientdata(i2c, wm8974);
+ codec->control_data = i2c;
+
+ codec->dev = &i2c->dev;
+
+ return wm8974_register(wm8974);
+}
+
+static __devexit int wm8974_i2c_remove(struct i2c_client *client)
+{
+ struct wm8974_priv *wm8974 = i2c_get_clientdata(client);
+ wm8974_unregister(wm8974);
+ return 0;
+}
+
+static const struct i2c_device_id wm8974_i2c_id[] = {
+ { "wm8974", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wm8974_i2c_id);
+
+static struct i2c_driver wm8974_i2c_driver = {
+ .driver = {
+ .name = "WM8974",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm8974_i2c_probe,
+ .remove = __devexit_p(wm8974_i2c_remove),
+ .id_table = wm8974_i2c_id,
+};
+
+static int __init wm8974_modinit(void)
+{
+ return i2c_add_driver(&wm8974_i2c_driver);
+}
+module_init(wm8974_modinit);
+
+static void __exit wm8974_exit(void)
+{
+ i2c_del_driver(&wm8974_i2c_driver);
+}
+module_exit(wm8974_exit);
+
+MODULE_DESCRIPTION("ASoC WM8974 driver");
+MODULE_AUTHOR("Liam Girdwood");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8974.h b/sound/soc/codecs/wm8974.h
new file mode 100644
index 000000000000..98de9562d4d2
--- /dev/null
+++ b/sound/soc/codecs/wm8974.h
@@ -0,0 +1,99 @@
+/*
+ * wm8974.h -- WM8974 Soc Audio driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8974_H
+#define _WM8974_H
+
+/* WM8974 register space */
+
+#define WM8974_RESET 0x0
+#define WM8974_POWER1 0x1
+#define WM8974_POWER2 0x2
+#define WM8974_POWER3 0x3
+#define WM8974_IFACE 0x4
+#define WM8974_COMP 0x5
+#define WM8974_CLOCK 0x6
+#define WM8974_ADD 0x7
+#define WM8974_GPIO 0x8
+#define WM8974_DAC 0xa
+#define WM8974_DACVOL 0xb
+#define WM8974_ADC 0xe
+#define WM8974_ADCVOL 0xf
+#define WM8974_EQ1 0x12
+#define WM8974_EQ2 0x13
+#define WM8974_EQ3 0x14
+#define WM8974_EQ4 0x15
+#define WM8974_EQ5 0x16
+#define WM8974_DACLIM1 0x18
+#define WM8974_DACLIM2 0x19
+#define WM8974_NOTCH1 0x1b
+#define WM8974_NOTCH2 0x1c
+#define WM8974_NOTCH3 0x1d
+#define WM8974_NOTCH4 0x1e
+#define WM8974_ALC1 0x20
+#define WM8974_ALC2 0x21
+#define WM8974_ALC3 0x22
+#define WM8974_NGATE 0x23
+#define WM8974_PLLN 0x24
+#define WM8974_PLLK1 0x25
+#define WM8974_PLLK2 0x26
+#define WM8974_PLLK3 0x27
+#define WM8974_ATTEN 0x28
+#define WM8974_INPUT 0x2c
+#define WM8974_INPPGA 0x2d
+#define WM8974_ADCBOOST 0x2f
+#define WM8974_OUTPUT 0x31
+#define WM8974_SPKMIX 0x32
+#define WM8974_SPKVOL 0x36
+#define WM8974_MONOMIX 0x38
+
+#define WM8974_CACHEREGNUM 57
+
+/* Clock divider Id's */
+#define WM8974_OPCLKDIV 0
+#define WM8974_MCLKDIV 1
+#define WM8974_ADCCLK 2
+#define WM8974_DACCLK 3
+#define WM8974_BCLKDIV 4
+
+/* DAC clock dividers */
+#define WM8974_DACCLK_F2 (1 << 3)
+#define WM8974_DACCLK_F4 (0 << 3)
+
+/* ADC clock dividers */
+#define WM8974_ADCCLK_F2 (1 << 3)
+#define WM8974_ADCCLK_F4 (0 << 3)
+
+/* PLL Out dividers */
+#define WM8974_OPCLKDIV_1 (0 << 4)
+#define WM8974_OPCLKDIV_2 (1 << 4)
+#define WM8974_OPCLKDIV_3 (2 << 4)
+#define WM8974_OPCLKDIV_4 (3 << 4)
+
+/* BCLK clock dividers */
+#define WM8974_BCLKDIV_1 (0 << 2)
+#define WM8974_BCLKDIV_2 (1 << 2)
+#define WM8974_BCLKDIV_4 (2 << 2)
+#define WM8974_BCLKDIV_8 (3 << 2)
+#define WM8974_BCLKDIV_16 (4 << 2)
+#define WM8974_BCLKDIV_32 (5 << 2)
+
+/* MCLK clock dividers */
+#define WM8974_MCLKDIV_1 (0 << 5)
+#define WM8974_MCLKDIV_1_5 (1 << 5)
+#define WM8974_MCLKDIV_2 (2 << 5)
+#define WM8974_MCLKDIV_3 (3 << 5)
+#define WM8974_MCLKDIV_4 (4 << 5)
+#define WM8974_MCLKDIV_6 (5 << 5)
+#define WM8974_MCLKDIV_8 (6 << 5)
+#define WM8974_MCLKDIV_12 (7 << 5)
+
+extern struct snd_soc_dai wm8974_dai;
+extern struct snd_soc_codec_device soc_codec_dev_wm8974;
+
+#endif
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index 8c0fdf84aac3..3f530f8a972a 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -57,50 +57,7 @@ struct wm8988_priv {
};
-/*
- * read wm8988 register cache
- */
-static inline unsigned int wm8988_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
- if (reg > WM8988_NUM_REG)
- return -1;
- return cache[reg];
-}
-
-/*
- * write wm8988 register cache
- */
-static inline void wm8988_write_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int value)
-{
- u16 *cache = codec->reg_cache;
- if (reg > WM8988_NUM_REG)
- return;
- cache[reg] = value;
-}
-
-static int wm8988_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- u8 data[2];
-
- /* data is
- * D15..D9 WM8753 register offset
- * D8...D0 register data
- */
- data[0] = (reg << 1) | ((value >> 8) & 0x0001);
- data[1] = value & 0x00ff;
-
- wm8988_write_reg_cache(codec, reg, value);
- if (codec->hw_write(codec->control_data, data, 2) == 2)
- return 0;
- else
- return -EIO;
-}
-
-#define wm8988_reset(c) wm8988_write(c, WM8988_RESET, 0)
+#define wm8988_reset(c) snd_soc_write(c, WM8988_RESET, 0)
/*
* WM8988 Controls
@@ -226,15 +183,15 @@ static int wm8988_lrc_control(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
- u16 adctl2 = wm8988_read_reg_cache(codec, WM8988_ADCTL2);
+ u16 adctl2 = snd_soc_read(codec, WM8988_ADCTL2);
/* Use the DAC to gate LRC if active, otherwise use ADC */
- if (wm8988_read_reg_cache(codec, WM8988_PWR2) & 0x180)
+ if (snd_soc_read(codec, WM8988_PWR2) & 0x180)
adctl2 &= ~0x4;
else
adctl2 |= 0x4;
- return wm8988_write(codec, WM8988_ADCTL2, adctl2);
+ return snd_soc_write(codec, WM8988_ADCTL2, adctl2);
}
static const char *wm8988_line_texts[] = {
@@ -619,7 +576,7 @@ static int wm8988_set_dai_fmt(struct snd_soc_dai *codec_dai,
return -EINVAL;
}
- wm8988_write(codec, WM8988_IFACE, iface);
+ snd_soc_write(codec, WM8988_IFACE, iface);
return 0;
}
@@ -653,8 +610,8 @@ static int wm8988_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
struct wm8988_priv *wm8988 = codec->private_data;
- u16 iface = wm8988_read_reg_cache(codec, WM8988_IFACE) & 0x1f3;
- u16 srate = wm8988_read_reg_cache(codec, WM8988_SRATE) & 0x180;
+ u16 iface = snd_soc_read(codec, WM8988_IFACE) & 0x1f3;
+ u16 srate = snd_soc_read(codec, WM8988_SRATE) & 0x180;
int coeff;
coeff = get_coeff(wm8988->sysclk, params_rate(params));
@@ -685,9 +642,9 @@ static int wm8988_pcm_hw_params(struct snd_pcm_substream *substream,
}
/* set iface & srate */
- wm8988_write(codec, WM8988_IFACE, iface);
+ snd_soc_write(codec, WM8988_IFACE, iface);
if (coeff >= 0)
- wm8988_write(codec, WM8988_SRATE, srate |
+ snd_soc_write(codec, WM8988_SRATE, srate |
(coeff_div[coeff].sr << 1) | coeff_div[coeff].usb);
return 0;
@@ -696,19 +653,19 @@ static int wm8988_pcm_hw_params(struct snd_pcm_substream *substream,
static int wm8988_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
- u16 mute_reg = wm8988_read_reg_cache(codec, WM8988_ADCDAC) & 0xfff7;
+ u16 mute_reg = snd_soc_read(codec, WM8988_ADCDAC) & 0xfff7;
if (mute)
- wm8988_write(codec, WM8988_ADCDAC, mute_reg | 0x8);
+ snd_soc_write(codec, WM8988_ADCDAC, mute_reg | 0x8);
else
- wm8988_write(codec, WM8988_ADCDAC, mute_reg);
+ snd_soc_write(codec, WM8988_ADCDAC, mute_reg);
return 0;
}
static int wm8988_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- u16 pwr_reg = wm8988_read_reg_cache(codec, WM8988_PWR1) & ~0x1c1;
+ u16 pwr_reg = snd_soc_read(codec, WM8988_PWR1) & ~0x1c1;
switch (level) {
case SND_SOC_BIAS_ON:
@@ -716,24 +673,24 @@ static int wm8988_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_PREPARE:
/* VREF, VMID=2x50k, digital enabled */
- wm8988_write(codec, WM8988_PWR1, pwr_reg | 0x00c0);
+ snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x00c0);
break;
case SND_SOC_BIAS_STANDBY:
if (codec->bias_level == SND_SOC_BIAS_OFF) {
/* VREF, VMID=2x5k */
- wm8988_write(codec, WM8988_PWR1, pwr_reg | 0x1c1);
+ snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x1c1);
/* Charge caps */
msleep(100);
}
/* VREF, VMID=2*500k, digital stopped */
- wm8988_write(codec, WM8988_PWR1, pwr_reg | 0x0141);
+ snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x0141);
break;
case SND_SOC_BIAS_OFF:
- wm8988_write(codec, WM8988_PWR1, 0x0000);
+ snd_soc_write(codec, WM8988_PWR1, 0x0000);
break;
}
codec->bias_level = level;
@@ -868,7 +825,8 @@ struct snd_soc_codec_device soc_codec_dev_wm8988 = {
};
EXPORT_SYMBOL_GPL(soc_codec_dev_wm8988);
-static int wm8988_register(struct wm8988_priv *wm8988)
+static int wm8988_register(struct wm8988_priv *wm8988,
+ enum snd_soc_control_type control)
{
struct snd_soc_codec *codec = &wm8988->codec;
int ret;
@@ -887,8 +845,6 @@ static int wm8988_register(struct wm8988_priv *wm8988)
codec->private_data = wm8988;
codec->name = "WM8988";
codec->owner = THIS_MODULE;
- codec->read = wm8988_read_reg_cache;
- codec->write = wm8988_write;
codec->dai = &wm8988_dai;
codec->num_dai = 1;
codec->reg_cache_size = ARRAY_SIZE(wm8988->reg_cache);
@@ -899,23 +855,29 @@ static int wm8988_register(struct wm8988_priv *wm8988)
memcpy(codec->reg_cache, wm8988_reg,
sizeof(wm8988_reg));
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
ret = wm8988_reset(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset\n");
- return ret;
+ goto err;
}
/* set the update bits (we always update left then right) */
- reg = wm8988_read_reg_cache(codec, WM8988_RADC);
- wm8988_write(codec, WM8988_RADC, reg | 0x100);
- reg = wm8988_read_reg_cache(codec, WM8988_RDAC);
- wm8988_write(codec, WM8988_RDAC, reg | 0x0100);
- reg = wm8988_read_reg_cache(codec, WM8988_ROUT1V);
- wm8988_write(codec, WM8988_ROUT1V, reg | 0x0100);
- reg = wm8988_read_reg_cache(codec, WM8988_ROUT2V);
- wm8988_write(codec, WM8988_ROUT2V, reg | 0x0100);
- reg = wm8988_read_reg_cache(codec, WM8988_RINVOL);
- wm8988_write(codec, WM8988_RINVOL, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8988_RADC);
+ snd_soc_write(codec, WM8988_RADC, reg | 0x100);
+ reg = snd_soc_read(codec, WM8988_RDAC);
+ snd_soc_write(codec, WM8988_RDAC, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8988_ROUT1V);
+ snd_soc_write(codec, WM8988_ROUT1V, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8988_ROUT2V);
+ snd_soc_write(codec, WM8988_ROUT2V, reg | 0x0100);
+ reg = snd_soc_read(codec, WM8988_RINVOL);
+ snd_soc_write(codec, WM8988_RINVOL, reg | 0x0100);
wm8988_set_bias_level(&wm8988->codec, SND_SOC_BIAS_STANDBY);
@@ -926,18 +888,20 @@ static int wm8988_register(struct wm8988_priv *wm8988)
ret = snd_soc_register_codec(codec);
if (ret != 0) {
dev_err(codec->dev, "Failed to register codec: %d\n", ret);
- return ret;
+ goto err;
}
ret = snd_soc_register_dai(&wm8988_dai);
if (ret != 0) {
dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
snd_soc_unregister_codec(codec);
- return ret;
+ goto err_codec;
}
return 0;
+err_codec:
+ snd_soc_unregister_codec(codec);
err:
kfree(wm8988);
return ret;
@@ -964,14 +928,13 @@ static int wm8988_i2c_probe(struct i2c_client *i2c,
return -ENOMEM;
codec = &wm8988->codec;
- codec->hw_write = (hw_write_t)i2c_master_send;
i2c_set_clientdata(i2c, wm8988);
codec->control_data = i2c;
codec->dev = &i2c->dev;
- return wm8988_register(wm8988);
+ return wm8988_register(wm8988, SND_SOC_I2C);
}
static int wm8988_i2c_remove(struct i2c_client *client)
@@ -981,6 +944,21 @@ static int wm8988_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8988_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm8988_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm8988_i2c_suspend NULL
+#define wm8988_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm8988_i2c_id[] = {
{ "wm8988", 0 },
{ }
@@ -994,35 +972,13 @@ static struct i2c_driver wm8988_i2c_driver = {
},
.probe = wm8988_i2c_probe,
.remove = wm8988_i2c_remove,
+ .suspend = wm8988_i2c_suspend,
+ .resume = wm8988_i2c_resume,
.id_table = wm8988_i2c_id,
};
#endif
#if defined(CONFIG_SPI_MASTER)
-static int wm8988_spi_write(struct spi_device *spi, const char *data, int len)
-{
- struct spi_transfer t;
- struct spi_message m;
- u8 msg[2];
-
- if (len <= 0)
- return 0;
-
- msg[0] = data[0];
- msg[1] = data[1];
-
- spi_message_init(&m);
- memset(&t, 0, (sizeof t));
-
- t.tx_buf = &msg[0];
- t.len = len;
-
- spi_message_add_tail(&t, &m);
- spi_sync(spi, &m);
-
- return len;
-}
-
static int __devinit wm8988_spi_probe(struct spi_device *spi)
{
struct wm8988_priv *wm8988;
@@ -1033,13 +989,12 @@ static int __devinit wm8988_spi_probe(struct spi_device *spi)
return -ENOMEM;
codec = &wm8988->codec;
- codec->hw_write = (hw_write_t)wm8988_spi_write;
codec->control_data = spi;
codec->dev = &spi->dev;
dev_set_drvdata(&spi->dev, wm8988);
- return wm8988_register(wm8988);
+ return wm8988_register(wm8988, SND_SOC_SPI);
}
static int __devexit wm8988_spi_remove(struct spi_device *spi)
@@ -1051,6 +1006,21 @@ static int __devexit wm8988_spi_remove(struct spi_device *spi)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8988_spi_suspend(struct spi_device *spi, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&spi->dev);
+}
+
+static int wm8988_spi_resume(struct spi_device *spi)
+{
+ return snd_soc_resume_device(&spi->dev);
+}
+#else
+#define wm8988_spi_suspend NULL
+#define wm8988_spi_resume NULL
+#endif
+
static struct spi_driver wm8988_spi_driver = {
.driver = {
.name = "wm8988",
@@ -1059,6 +1029,8 @@ static struct spi_driver wm8988_spi_driver = {
},
.probe = wm8988_spi_probe,
.remove = __devexit_p(wm8988_spi_remove),
+ .suspend = wm8988_spi_suspend,
+ .resume = wm8988_spi_resume,
};
#endif
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index d029818350e9..2d702db4131d 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -108,53 +108,7 @@ static const u16 wm8990_reg[] = {
0x0000, /* R63 - Driver internal */
};
-/*
- * read wm8990 register cache
- */
-static inline unsigned int wm8990_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
- BUG_ON(reg >= ARRAY_SIZE(wm8990_reg));
- return cache[reg];
-}
-
-/*
- * write wm8990 register cache
- */
-static inline void wm8990_write_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int value)
-{
- u16 *cache = codec->reg_cache;
-
- /* Reset register and reserved registers are uncached */
- if (reg == 0 || reg >= ARRAY_SIZE(wm8990_reg))
- return;
-
- cache[reg] = value;
-}
-
-/*
- * write to the wm8990 register space
- */
-static int wm8990_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- u8 data[3];
-
- data[0] = reg & 0xFF;
- data[1] = (value >> 8) & 0xFF;
- data[2] = value & 0xFF;
-
- wm8990_write_reg_cache(codec, reg, value);
-
- if (codec->hw_write(codec->control_data, data, 3) == 2)
- return 0;
- else
- return -EIO;
-}
-
-#define wm8990_reset(c) wm8990_write(c, WM8990_RESET, 0)
+#define wm8990_reset(c) snd_soc_write(c, WM8990_RESET, 0)
static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
@@ -187,8 +141,8 @@ static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
return ret;
/* now hit the volume update bits (always bit 8) */
- val = wm8990_read_reg_cache(codec, reg);
- return wm8990_write(codec, reg, val | 0x0100);
+ val = snd_soc_read(codec, reg);
+ return snd_soc_write(codec, reg, val | 0x0100);
}
#define SOC_WM899X_OUTPGA_SINGLE_R_TLV(xname, reg, shift, max, invert,\
@@ -427,8 +381,8 @@ static int inmixer_event(struct snd_soc_dapm_widget *w,
{
u16 reg, fakepower;
- reg = wm8990_read_reg_cache(w->codec, WM8990_POWER_MANAGEMENT_2);
- fakepower = wm8990_read_reg_cache(w->codec, WM8990_INTDRIVBITS);
+ reg = snd_soc_read(w->codec, WM8990_POWER_MANAGEMENT_2);
+ fakepower = snd_soc_read(w->codec, WM8990_INTDRIVBITS);
if (fakepower & ((1 << WM8990_INMIXL_PWR_BIT) |
(1 << WM8990_AINLMUX_PWR_BIT))) {
@@ -443,7 +397,7 @@ static int inmixer_event(struct snd_soc_dapm_widget *w,
} else {
reg &= ~WM8990_AINL_ENA;
}
- wm8990_write(w->codec, WM8990_POWER_MANAGEMENT_2, reg);
+ snd_soc_write(w->codec, WM8990_POWER_MANAGEMENT_2, reg);
return 0;
}
@@ -457,7 +411,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
switch (reg_shift) {
case WM8990_SPEAKER_MIXER | (WM8990_LDSPK_BIT << 8) :
- reg = wm8990_read_reg_cache(w->codec, WM8990_OUTPUT_MIXER1);
+ reg = snd_soc_read(w->codec, WM8990_OUTPUT_MIXER1);
if (reg & WM8990_LDLO) {
printk(KERN_WARNING
"Cannot set as Output Mixer 1 LDLO Set\n");
@@ -465,7 +419,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
}
break;
case WM8990_SPEAKER_MIXER | (WM8990_RDSPK_BIT << 8):
- reg = wm8990_read_reg_cache(w->codec, WM8990_OUTPUT_MIXER2);
+ reg = snd_soc_read(w->codec, WM8990_OUTPUT_MIXER2);
if (reg & WM8990_RDRO) {
printk(KERN_WARNING
"Cannot set as Output Mixer 2 RDRO Set\n");
@@ -473,7 +427,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
}
break;
case WM8990_OUTPUT_MIXER1 | (WM8990_LDLO_BIT << 8):
- reg = wm8990_read_reg_cache(w->codec, WM8990_SPEAKER_MIXER);
+ reg = snd_soc_read(w->codec, WM8990_SPEAKER_MIXER);
if (reg & WM8990_LDSPK) {
printk(KERN_WARNING
"Cannot set as Speaker Mixer LDSPK Set\n");
@@ -481,7 +435,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
}
break;
case WM8990_OUTPUT_MIXER2 | (WM8990_RDRO_BIT << 8):
- reg = wm8990_read_reg_cache(w->codec, WM8990_SPEAKER_MIXER);
+ reg = snd_soc_read(w->codec, WM8990_SPEAKER_MIXER);
if (reg & WM8990_RDSPK) {
printk(KERN_WARNING
"Cannot set as Speaker Mixer RDSPK Set\n");
@@ -1029,24 +983,24 @@ static int wm8990_set_dai_pll(struct snd_soc_dai *codec_dai,
pll_factors(&pll_div, freq_out * 4, freq_in);
/* Turn on PLL */
- reg = wm8990_read_reg_cache(codec, WM8990_POWER_MANAGEMENT_2);
+ reg = snd_soc_read(codec, WM8990_POWER_MANAGEMENT_2);
reg |= WM8990_PLL_ENA;
- wm8990_write(codec, WM8990_POWER_MANAGEMENT_2, reg);
+ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_2, reg);
/* sysclk comes from PLL */
- reg = wm8990_read_reg_cache(codec, WM8990_CLOCKING_2);
- wm8990_write(codec, WM8990_CLOCKING_2, reg | WM8990_SYSCLK_SRC);
+ reg = snd_soc_read(codec, WM8990_CLOCKING_2);
+ snd_soc_write(codec, WM8990_CLOCKING_2, reg | WM8990_SYSCLK_SRC);
/* set up N , fractional mode and pre-divisor if neccessary */
- wm8990_write(codec, WM8990_PLL1, pll_div.n | WM8990_SDM |
+ snd_soc_write(codec, WM8990_PLL1, pll_div.n | WM8990_SDM |
(pll_div.div2?WM8990_PRESCALE:0));
- wm8990_write(codec, WM8990_PLL2, (u8)(pll_div.k>>8));
- wm8990_write(codec, WM8990_PLL3, (u8)(pll_div.k & 0xFF));
+ snd_soc_write(codec, WM8990_PLL2, (u8)(pll_div.k>>8));
+ snd_soc_write(codec, WM8990_PLL3, (u8)(pll_div.k & 0xFF));
} else {
/* Turn on PLL */
- reg = wm8990_read_reg_cache(codec, WM8990_POWER_MANAGEMENT_2);
+ reg = snd_soc_read(codec, WM8990_POWER_MANAGEMENT_2);
reg &= ~WM8990_PLL_ENA;
- wm8990_write(codec, WM8990_POWER_MANAGEMENT_2, reg);
+ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_2, reg);
}
return 0;
}
@@ -1073,8 +1027,8 @@ static int wm8990_set_dai_fmt(struct snd_soc_dai *codec_dai,
struct snd_soc_codec *codec = codec_dai->codec;
u16 audio1, audio3;
- audio1 = wm8990_read_reg_cache(codec, WM8990_AUDIO_INTERFACE_1);
- audio3 = wm8990_read_reg_cache(codec, WM8990_AUDIO_INTERFACE_3);
+ audio1 = snd_soc_read(codec, WM8990_AUDIO_INTERFACE_1);
+ audio3 = snd_soc_read(codec, WM8990_AUDIO_INTERFACE_3);
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -1115,8 +1069,8 @@ static int wm8990_set_dai_fmt(struct snd_soc_dai *codec_dai,
return -EINVAL;
}
- wm8990_write(codec, WM8990_AUDIO_INTERFACE_1, audio1);
- wm8990_write(codec, WM8990_AUDIO_INTERFACE_3, audio3);
+ snd_soc_write(codec, WM8990_AUDIO_INTERFACE_1, audio1);
+ snd_soc_write(codec, WM8990_AUDIO_INTERFACE_3, audio3);
return 0;
}
@@ -1128,24 +1082,24 @@ static int wm8990_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
switch (div_id) {
case WM8990_MCLK_DIV:
- reg = wm8990_read_reg_cache(codec, WM8990_CLOCKING_2) &
+ reg = snd_soc_read(codec, WM8990_CLOCKING_2) &
~WM8990_MCLK_DIV_MASK;
- wm8990_write(codec, WM8990_CLOCKING_2, reg | div);
+ snd_soc_write(codec, WM8990_CLOCKING_2, reg | div);
break;
case WM8990_DACCLK_DIV:
- reg = wm8990_read_reg_cache(codec, WM8990_CLOCKING_2) &
+ reg = snd_soc_read(codec, WM8990_CLOCKING_2) &
~WM8990_DAC_CLKDIV_MASK;
- wm8990_write(codec, WM8990_CLOCKING_2, reg | div);
+ snd_soc_write(codec, WM8990_CLOCKING_2, reg | div);
break;
case WM8990_ADCCLK_DIV:
- reg = wm8990_read_reg_cache(codec, WM8990_CLOCKING_2) &
+ reg = snd_soc_read(codec, WM8990_CLOCKING_2) &
~WM8990_ADC_CLKDIV_MASK;
- wm8990_write(codec, WM8990_CLOCKING_2, reg | div);
+ snd_soc_write(codec, WM8990_CLOCKING_2, reg | div);
break;
case WM8990_BCLK_DIV:
- reg = wm8990_read_reg_cache(codec, WM8990_CLOCKING_1) &
+ reg = snd_soc_read(codec, WM8990_CLOCKING_1) &
~WM8990_BCLK_DIV_MASK;
- wm8990_write(codec, WM8990_CLOCKING_1, reg | div);
+ snd_soc_write(codec, WM8990_CLOCKING_1, reg | div);
break;
default:
return -EINVAL;
@@ -1164,7 +1118,7 @@ static int wm8990_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_codec *codec = socdev->card->codec;
- u16 audio1 = wm8990_read_reg_cache(codec, WM8990_AUDIO_INTERFACE_1);
+ u16 audio1 = snd_soc_read(codec, WM8990_AUDIO_INTERFACE_1);
audio1 &= ~WM8990_AIF_WL_MASK;
/* bit size */
@@ -1182,7 +1136,7 @@ static int wm8990_hw_params(struct snd_pcm_substream *substream,
break;
}
- wm8990_write(codec, WM8990_AUDIO_INTERFACE_1, audio1);
+ snd_soc_write(codec, WM8990_AUDIO_INTERFACE_1, audio1);
return 0;
}
@@ -1191,12 +1145,12 @@ static int wm8990_mute(struct snd_soc_dai *dai, int mute)
struct snd_soc_codec *codec = dai->codec;
u16 val;
- val = wm8990_read_reg_cache(codec, WM8990_DAC_CTRL) & ~WM8990_DAC_MUTE;
+ val = snd_soc_read(codec, WM8990_DAC_CTRL) & ~WM8990_DAC_MUTE;
if (mute)
- wm8990_write(codec, WM8990_DAC_CTRL, val | WM8990_DAC_MUTE);
+ snd_soc_write(codec, WM8990_DAC_CTRL, val | WM8990_DAC_MUTE);
else
- wm8990_write(codec, WM8990_DAC_CTRL, val);
+ snd_soc_write(codec, WM8990_DAC_CTRL, val);
return 0;
}
@@ -1212,21 +1166,21 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_PREPARE:
/* VMID=2*50k */
- val = wm8990_read_reg_cache(codec, WM8990_POWER_MANAGEMENT_1) &
+ val = snd_soc_read(codec, WM8990_POWER_MANAGEMENT_1) &
~WM8990_VMID_MODE_MASK;
- wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, val | 0x2);
+ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, val | 0x2);
break;
case SND_SOC_BIAS_STANDBY:
if (codec->bias_level == SND_SOC_BIAS_OFF) {
/* Enable all output discharge bits */
- wm8990_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
+ snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
WM8990_DIS_RLINE | WM8990_DIS_OUT3 |
WM8990_DIS_OUT4 | WM8990_DIS_LOUT |
WM8990_DIS_ROUT);
/* Enable POBCTRL, SOFT_ST, VMIDTOG and BUFDCOPEN */
- wm8990_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
+ snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
WM8990_BUFDCOPEN | WM8990_POBCTRL |
WM8990_VMIDTOG);
@@ -1234,83 +1188,83 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
msleep(msecs_to_jiffies(300));
/* Disable VMIDTOG */
- wm8990_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
+ snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
WM8990_BUFDCOPEN | WM8990_POBCTRL);
/* disable all output discharge bits */
- wm8990_write(codec, WM8990_ANTIPOP1, 0);
+ snd_soc_write(codec, WM8990_ANTIPOP1, 0);
/* Enable outputs */
- wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1b00);
+ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1b00);
msleep(msecs_to_jiffies(50));
/* Enable VMID at 2x50k */
- wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f02);
+ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f02);
msleep(msecs_to_jiffies(100));
/* Enable VREF */
- wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03);
+ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03);
msleep(msecs_to_jiffies(600));
/* Enable BUFIOEN */
- wm8990_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
+ snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
WM8990_BUFDCOPEN | WM8990_POBCTRL |
WM8990_BUFIOEN);
/* Disable outputs */
- wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, 0x3);
+ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x3);
/* disable POBCTRL, SOFT_ST and BUFDCOPEN */
- wm8990_write(codec, WM8990_ANTIPOP2, WM8990_BUFIOEN);
+ snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_BUFIOEN);
/* Enable workaround for ADC clocking issue. */
- wm8990_write(codec, WM8990_EXT_ACCESS_ENA, 0x2);
- wm8990_write(codec, WM8990_EXT_CTL1, 0xa003);
- wm8990_write(codec, WM8990_EXT_ACCESS_ENA, 0);
+ snd_soc_write(codec, WM8990_EXT_ACCESS_ENA, 0x2);
+ snd_soc_write(codec, WM8990_EXT_CTL1, 0xa003);
+ snd_soc_write(codec, WM8990_EXT_ACCESS_ENA, 0);
}
/* VMID=2*250k */
- val = wm8990_read_reg_cache(codec, WM8990_POWER_MANAGEMENT_1) &
+ val = snd_soc_read(codec, WM8990_POWER_MANAGEMENT_1) &
~WM8990_VMID_MODE_MASK;
- wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, val | 0x4);
+ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, val | 0x4);
break;
case SND_SOC_BIAS_OFF:
/* Enable POBCTRL and SOFT_ST */
- wm8990_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
+ snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
WM8990_POBCTRL | WM8990_BUFIOEN);
/* Enable POBCTRL, SOFT_ST and BUFDCOPEN */
- wm8990_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
+ snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
WM8990_BUFDCOPEN | WM8990_POBCTRL |
WM8990_BUFIOEN);
/* mute DAC */
- val = wm8990_read_reg_cache(codec, WM8990_DAC_CTRL);
- wm8990_write(codec, WM8990_DAC_CTRL, val | WM8990_DAC_MUTE);
+ val = snd_soc_read(codec, WM8990_DAC_CTRL);
+ snd_soc_write(codec, WM8990_DAC_CTRL, val | WM8990_DAC_MUTE);
/* Enable any disabled outputs */
- wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03);
+ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03);
/* Disable VMID */
- wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f01);
+ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f01);
msleep(msecs_to_jiffies(300));
/* Enable all output discharge bits */
- wm8990_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
+ snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
WM8990_DIS_RLINE | WM8990_DIS_OUT3 |
WM8990_DIS_OUT4 | WM8990_DIS_LOUT |
WM8990_DIS_ROUT);
/* Disable VREF */
- wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, 0x0);
+ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x0);
/* disable POBCTRL, SOFT_ST and BUFDCOPEN */
- wm8990_write(codec, WM8990_ANTIPOP2, 0x0);
+ snd_soc_write(codec, WM8990_ANTIPOP2, 0x0);
break;
}
@@ -1411,8 +1365,6 @@ static int wm8990_init(struct snd_soc_device *socdev)
codec->name = "WM8990";
codec->owner = THIS_MODULE;
- codec->read = wm8990_read_reg_cache;
- codec->write = wm8990_write;
codec->set_bias_level = wm8990_set_bias_level;
codec->dai = &wm8990_dai;
codec->num_dai = 2;
@@ -1422,6 +1374,12 @@ static int wm8990_init(struct snd_soc_device *socdev)
if (codec->reg_cache == NULL)
return -ENOMEM;
+ ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C);
+ if (ret < 0) {
+ printk(KERN_ERR "wm8990: failed to set cache I/O: %d\n", ret);
+ goto pcm_err;
+ }
+
wm8990_reset(codec);
/* register pcms */
@@ -1435,18 +1393,18 @@ static int wm8990_init(struct snd_soc_device *socdev)
codec->bias_level = SND_SOC_BIAS_OFF;
wm8990_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- reg = wm8990_read_reg_cache(codec, WM8990_AUDIO_INTERFACE_4);
- wm8990_write(codec, WM8990_AUDIO_INTERFACE_4, reg | WM8990_ALRCGPIO1);
+ reg = snd_soc_read(codec, WM8990_AUDIO_INTERFACE_4);
+ snd_soc_write(codec, WM8990_AUDIO_INTERFACE_4, reg | WM8990_ALRCGPIO1);
- reg = wm8990_read_reg_cache(codec, WM8990_GPIO1_GPIO2) &
+ reg = snd_soc_read(codec, WM8990_GPIO1_GPIO2) &
~WM8990_GPIO1_SEL_MASK;
- wm8990_write(codec, WM8990_GPIO1_GPIO2, reg | 1);
+ snd_soc_write(codec, WM8990_GPIO1_GPIO2, reg | 1);
- reg = wm8990_read_reg_cache(codec, WM8990_POWER_MANAGEMENT_2);
- wm8990_write(codec, WM8990_POWER_MANAGEMENT_2, reg | WM8990_OPCLK_ENA);
+ reg = snd_soc_read(codec, WM8990_POWER_MANAGEMENT_2);
+ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_2, reg | WM8990_OPCLK_ENA);
- wm8990_write(codec, WM8990_LEFT_OUTPUT_VOLUME, 0x50 | (1<<8));
- wm8990_write(codec, WM8990_RIGHT_OUTPUT_VOLUME, 0x50 | (1<<8));
+ snd_soc_write(codec, WM8990_LEFT_OUTPUT_VOLUME, 0x50 | (1<<8));
+ snd_soc_write(codec, WM8990_RIGHT_OUTPUT_VOLUME, 0x50 | (1<<8));
snd_soc_add_controls(codec, wm8990_snd_controls,
ARRAY_SIZE(wm8990_snd_controls));
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
new file mode 100644
index 000000000000..d9987999e92c
--- /dev/null
+++ b/sound/soc/codecs/wm8993.c
@@ -0,0 +1,1675 @@
+/*
+ * wm8993.c -- WM8993 ALSA SoC audio driver
+ *
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/wm8993.h>
+
+#include "wm8993.h"
+#include "wm_hubs.h"
+
+static u16 wm8993_reg_defaults[WM8993_REGISTER_COUNT] = {
+ 0x8993, /* R0 - Software Reset */
+ 0x0000, /* R1 - Power Management (1) */
+ 0x6000, /* R2 - Power Management (2) */
+ 0x0000, /* R3 - Power Management (3) */
+ 0x4050, /* R4 - Audio Interface (1) */
+ 0x4000, /* R5 - Audio Interface (2) */
+ 0x01C8, /* R6 - Clocking 1 */
+ 0x0000, /* R7 - Clocking 2 */
+ 0x0000, /* R8 - Audio Interface (3) */
+ 0x0040, /* R9 - Audio Interface (4) */
+ 0x0004, /* R10 - DAC CTRL */
+ 0x00C0, /* R11 - Left DAC Digital Volume */
+ 0x00C0, /* R12 - Right DAC Digital Volume */
+ 0x0000, /* R13 - Digital Side Tone */
+ 0x0300, /* R14 - ADC CTRL */
+ 0x00C0, /* R15 - Left ADC Digital Volume */
+ 0x00C0, /* R16 - Right ADC Digital Volume */
+ 0x0000, /* R17 */
+ 0x0000, /* R18 - GPIO CTRL 1 */
+ 0x0010, /* R19 - GPIO1 */
+ 0x0000, /* R20 - IRQ_DEBOUNCE */
+ 0x0000, /* R21 */
+ 0x8000, /* R22 - GPIOCTRL 2 */
+ 0x0800, /* R23 - GPIO_POL */
+ 0x008B, /* R24 - Left Line Input 1&2 Volume */
+ 0x008B, /* R25 - Left Line Input 3&4 Volume */
+ 0x008B, /* R26 - Right Line Input 1&2 Volume */
+ 0x008B, /* R27 - Right Line Input 3&4 Volume */
+ 0x006D, /* R28 - Left Output Volume */
+ 0x006D, /* R29 - Right Output Volume */
+ 0x0066, /* R30 - Line Outputs Volume */
+ 0x0020, /* R31 - HPOUT2 Volume */
+ 0x0079, /* R32 - Left OPGA Volume */
+ 0x0079, /* R33 - Right OPGA Volume */
+ 0x0003, /* R34 - SPKMIXL Attenuation */
+ 0x0003, /* R35 - SPKMIXR Attenuation */
+ 0x0011, /* R36 - SPKOUT Mixers */
+ 0x0100, /* R37 - SPKOUT Boost */
+ 0x0079, /* R38 - Speaker Volume Left */
+ 0x0079, /* R39 - Speaker Volume Right */
+ 0x0000, /* R40 - Input Mixer2 */
+ 0x0000, /* R41 - Input Mixer3 */
+ 0x0000, /* R42 - Input Mixer4 */
+ 0x0000, /* R43 - Input Mixer5 */
+ 0x0000, /* R44 - Input Mixer6 */
+ 0x0000, /* R45 - Output Mixer1 */
+ 0x0000, /* R46 - Output Mixer2 */
+ 0x0000, /* R47 - Output Mixer3 */
+ 0x0000, /* R48 - Output Mixer4 */
+ 0x0000, /* R49 - Output Mixer5 */
+ 0x0000, /* R50 - Output Mixer6 */
+ 0x0000, /* R51 - HPOUT2 Mixer */
+ 0x0000, /* R52 - Line Mixer1 */
+ 0x0000, /* R53 - Line Mixer2 */
+ 0x0000, /* R54 - Speaker Mixer */
+ 0x0000, /* R55 - Additional Control */
+ 0x0000, /* R56 - AntiPOP1 */
+ 0x0000, /* R57 - AntiPOP2 */
+ 0x0000, /* R58 - MICBIAS */
+ 0x0000, /* R59 */
+ 0x0000, /* R60 - FLL Control 1 */
+ 0x0000, /* R61 - FLL Control 2 */
+ 0x0000, /* R62 - FLL Control 3 */
+ 0x2EE0, /* R63 - FLL Control 4 */
+ 0x0002, /* R64 - FLL Control 5 */
+ 0x2287, /* R65 - Clocking 3 */
+ 0x025F, /* R66 - Clocking 4 */
+ 0x0000, /* R67 - MW Slave Control */
+ 0x0000, /* R68 */
+ 0x0002, /* R69 - Bus Control 1 */
+ 0x0000, /* R70 - Write Sequencer 0 */
+ 0x0000, /* R71 - Write Sequencer 1 */
+ 0x0000, /* R72 - Write Sequencer 2 */
+ 0x0000, /* R73 - Write Sequencer 3 */
+ 0x0000, /* R74 - Write Sequencer 4 */
+ 0x0000, /* R75 - Write Sequencer 5 */
+ 0x1F25, /* R76 - Charge Pump 1 */
+ 0x0000, /* R77 */
+ 0x0000, /* R78 */
+ 0x0000, /* R79 */
+ 0x0000, /* R80 */
+ 0x0000, /* R81 - Class W 0 */
+ 0x0000, /* R82 */
+ 0x0000, /* R83 */
+ 0x0000, /* R84 - DC Servo 0 */
+ 0x054A, /* R85 - DC Servo 1 */
+ 0x0000, /* R86 */
+ 0x0000, /* R87 - DC Servo 3 */
+ 0x0000, /* R88 - DC Servo Readback 0 */
+ 0x0000, /* R89 - DC Servo Readback 1 */
+ 0x0000, /* R90 - DC Servo Readback 2 */
+ 0x0000, /* R91 */
+ 0x0000, /* R92 */
+ 0x0000, /* R93 */
+ 0x0000, /* R94 */
+ 0x0000, /* R95 */
+ 0x0100, /* R96 - Analogue HP 0 */
+ 0x0000, /* R97 */
+ 0x0000, /* R98 - EQ1 */
+ 0x000C, /* R99 - EQ2 */
+ 0x000C, /* R100 - EQ3 */
+ 0x000C, /* R101 - EQ4 */
+ 0x000C, /* R102 - EQ5 */
+ 0x000C, /* R103 - EQ6 */
+ 0x0FCA, /* R104 - EQ7 */
+ 0x0400, /* R105 - EQ8 */
+ 0x00D8, /* R106 - EQ9 */
+ 0x1EB5, /* R107 - EQ10 */
+ 0xF145, /* R108 - EQ11 */
+ 0x0B75, /* R109 - EQ12 */
+ 0x01C5, /* R110 - EQ13 */
+ 0x1C58, /* R111 - EQ14 */
+ 0xF373, /* R112 - EQ15 */
+ 0x0A54, /* R113 - EQ16 */
+ 0x0558, /* R114 - EQ17 */
+ 0x168E, /* R115 - EQ18 */
+ 0xF829, /* R116 - EQ19 */
+ 0x07AD, /* R117 - EQ20 */
+ 0x1103, /* R118 - EQ21 */
+ 0x0564, /* R119 - EQ22 */
+ 0x0559, /* R120 - EQ23 */
+ 0x4000, /* R121 - EQ24 */
+ 0x0000, /* R122 - Digital Pulls */
+ 0x0F08, /* R123 - DRC Control 1 */
+ 0x0000, /* R124 - DRC Control 2 */
+ 0x0080, /* R125 - DRC Control 3 */
+ 0x0000, /* R126 - DRC Control 4 */
+};
+
+static struct {
+ int ratio;
+ int clk_sys_rate;
+} clk_sys_rates[] = {
+ { 64, 0 },
+ { 128, 1 },
+ { 192, 2 },
+ { 256, 3 },
+ { 384, 4 },
+ { 512, 5 },
+ { 768, 6 },
+ { 1024, 7 },
+ { 1408, 8 },
+ { 1536, 9 },
+};
+
+static struct {
+ int rate;
+ int sample_rate;
+} sample_rates[] = {
+ { 8000, 0 },
+ { 11025, 1 },
+ { 12000, 1 },
+ { 16000, 2 },
+ { 22050, 3 },
+ { 24000, 3 },
+ { 32000, 4 },
+ { 44100, 5 },
+ { 48000, 5 },
+};
+
+static struct {
+ int div; /* *10 due to .5s */
+ int bclk_div;
+} bclk_divs[] = {
+ { 10, 0 },
+ { 15, 1 },
+ { 20, 2 },
+ { 30, 3 },
+ { 40, 4 },
+ { 55, 5 },
+ { 60, 6 },
+ { 80, 7 },
+ { 110, 8 },
+ { 120, 9 },
+ { 160, 10 },
+ { 220, 11 },
+ { 240, 12 },
+ { 320, 13 },
+ { 440, 14 },
+ { 480, 15 },
+};
+
+struct wm8993_priv {
+ u16 reg_cache[WM8993_REGISTER_COUNT];
+ struct wm8993_platform_data pdata;
+ struct snd_soc_codec codec;
+ int master;
+ int sysclk_source;
+ int tdm_slots;
+ int tdm_width;
+ unsigned int mclk_rate;
+ unsigned int sysclk_rate;
+ unsigned int fs;
+ unsigned int bclk;
+ int class_w_users;
+ unsigned int fll_fref;
+ unsigned int fll_fout;
+};
+
+static unsigned int wm8993_read_hw(struct snd_soc_codec *codec, u8 reg)
+{
+ struct i2c_msg xfer[2];
+ u16 data;
+ int ret;
+ struct i2c_client *i2c = codec->control_data;
+
+ /* Write register */
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &reg;
+
+ /* Read data */
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = 2;
+ xfer[1].buf = (u8 *)&data;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret != 2) {
+ dev_err(codec->dev, "Failed to read 0x%x: %d\n", reg, ret);
+ return 0;
+ }
+
+ return (data >> 8) | ((data & 0xff) << 8);
+}
+
+static int wm8993_volatile(unsigned int reg)
+{
+ switch (reg) {
+ case WM8993_SOFTWARE_RESET:
+ case WM8993_DC_SERVO_0:
+ case WM8993_DC_SERVO_READBACK_0:
+ case WM8993_DC_SERVO_READBACK_1:
+ case WM8993_DC_SERVO_READBACK_2:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static unsigned int wm8993_read(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u16 *reg_cache = codec->reg_cache;
+
+ BUG_ON(reg > WM8993_MAX_REGISTER);
+
+ if (wm8993_volatile(reg))
+ return wm8993_read_hw(codec, reg);
+ else
+ return reg_cache[reg];
+}
+
+static int wm8993_write(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ u16 *reg_cache = codec->reg_cache;
+ u8 data[3];
+ int ret;
+
+ BUG_ON(reg > WM8993_MAX_REGISTER);
+
+ /* data is
+ * D15..D9 WM8993 register offset
+ * D8...D0 register data
+ */
+ data[0] = reg;
+ data[1] = value >> 8;
+ data[2] = value & 0x00ff;
+
+ if (!wm8993_volatile(reg))
+ reg_cache[reg] = value;
+
+ ret = codec->hw_write(codec->control_data, data, 3);
+
+ if (ret == 3)
+ return 0;
+ if (ret < 0)
+ return ret;
+ return -EIO;
+}
+
+struct _fll_div {
+ u16 fll_fratio;
+ u16 fll_outdiv;
+ u16 fll_clk_ref_div;
+ u16 n;
+ u16 k;
+};
+
+/* The size in bits of the FLL divide multiplied by 10
+ * to allow rounding later */
+#define FIXED_FLL_SIZE ((1 << 16) * 10)
+
+static struct {
+ unsigned int min;
+ unsigned int max;
+ u16 fll_fratio;
+ int ratio;
+} fll_fratios[] = {
+ { 0, 64000, 4, 16 },
+ { 64000, 128000, 3, 8 },
+ { 128000, 256000, 2, 4 },
+ { 256000, 1000000, 1, 2 },
+ { 1000000, 13500000, 0, 1 },
+};
+
+static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
+ unsigned int Fout)
+{
+ u64 Kpart;
+ unsigned int K, Ndiv, Nmod, target;
+ unsigned int div;
+ int i;
+
+ /* Fref must be <=13.5MHz */
+ div = 1;
+ fll_div->fll_clk_ref_div = 0;
+ while ((Fref / div) > 13500000) {
+ div *= 2;
+ fll_div->fll_clk_ref_div++;
+
+ if (div > 8) {
+ pr_err("Can't scale %dMHz input down to <=13.5MHz\n",
+ Fref);
+ return -EINVAL;
+ }
+ }
+
+ pr_debug("Fref=%u Fout=%u\n", Fref, Fout);
+
+ /* Apply the division for our remaining calculations */
+ Fref /= div;
+
+ /* Fvco should be 90-100MHz; don't check the upper bound */
+ div = 0;
+ target = Fout * 2;
+ while (target < 90000000) {
+ div++;
+ target *= 2;
+ if (div > 7) {
+ pr_err("Unable to find FLL_OUTDIV for Fout=%uHz\n",
+ Fout);
+ return -EINVAL;
+ }
+ }
+ fll_div->fll_outdiv = div;
+
+ pr_debug("Fvco=%dHz\n", target);
+
+ /* Find an appropraite FLL_FRATIO and factor it out of the target */
+ for (i = 0; i < ARRAY_SIZE(fll_fratios); i++) {
+ if (fll_fratios[i].min <= Fref && Fref <= fll_fratios[i].max) {
+ fll_div->fll_fratio = fll_fratios[i].fll_fratio;
+ target /= fll_fratios[i].ratio;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(fll_fratios)) {
+ pr_err("Unable to find FLL_FRATIO for Fref=%uHz\n", Fref);
+ return -EINVAL;
+ }
+
+ /* Now, calculate N.K */
+ Ndiv = target / Fref;
+
+ fll_div->n = Ndiv;
+ Nmod = target % Fref;
+ pr_debug("Nmod=%d\n", Nmod);
+
+ /* Calculate fractional part - scale up so we can round. */
+ Kpart = FIXED_FLL_SIZE * (long long)Nmod;
+
+ do_div(Kpart, Fref);
+
+ K = Kpart & 0xFFFFFFFF;
+
+ if ((K % 10) >= 5)
+ K += 5;
+
+ /* Move down to proper range now rounding is done */
+ fll_div->k = K / 10;
+
+ pr_debug("N=%x K=%x FLL_FRATIO=%x FLL_OUTDIV=%x FLL_CLK_REF_DIV=%x\n",
+ fll_div->n, fll_div->k,
+ fll_div->fll_fratio, fll_div->fll_outdiv,
+ fll_div->fll_clk_ref_div);
+
+ return 0;
+}
+
+static int wm8993_set_fll(struct snd_soc_dai *dai, int fll_id,
+ unsigned int Fref, unsigned int Fout)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8993_priv *wm8993 = codec->private_data;
+ u16 reg1, reg4, reg5;
+ struct _fll_div fll_div;
+ int ret;
+
+ /* Any change? */
+ if (Fref == wm8993->fll_fref && Fout == wm8993->fll_fout)
+ return 0;
+
+ /* Disable the FLL */
+ if (Fout == 0) {
+ dev_dbg(codec->dev, "FLL disabled\n");
+ wm8993->fll_fref = 0;
+ wm8993->fll_fout = 0;
+
+ reg1 = wm8993_read(codec, WM8993_FLL_CONTROL_1);
+ reg1 &= ~WM8993_FLL_ENA;
+ wm8993_write(codec, WM8993_FLL_CONTROL_1, reg1);
+
+ return 0;
+ }
+
+ ret = fll_factors(&fll_div, Fref, Fout);
+ if (ret != 0)
+ return ret;
+
+ reg5 = wm8993_read(codec, WM8993_FLL_CONTROL_5);
+ reg5 &= ~WM8993_FLL_CLK_SRC_MASK;
+
+ switch (fll_id) {
+ case WM8993_FLL_MCLK:
+ break;
+
+ case WM8993_FLL_LRCLK:
+ reg5 |= 1;
+ break;
+
+ case WM8993_FLL_BCLK:
+ reg5 |= 2;
+ break;
+
+ default:
+ dev_err(codec->dev, "Unknown FLL ID %d\n", fll_id);
+ return -EINVAL;
+ }
+
+ /* Any FLL configuration change requires that the FLL be
+ * disabled first. */
+ reg1 = wm8993_read(codec, WM8993_FLL_CONTROL_1);
+ reg1 &= ~WM8993_FLL_ENA;
+ wm8993_write(codec, WM8993_FLL_CONTROL_1, reg1);
+
+ /* Apply the configuration */
+ if (fll_div.k)
+ reg1 |= WM8993_FLL_FRAC_MASK;
+ else
+ reg1 &= ~WM8993_FLL_FRAC_MASK;
+ wm8993_write(codec, WM8993_FLL_CONTROL_1, reg1);
+
+ wm8993_write(codec, WM8993_FLL_CONTROL_2,
+ (fll_div.fll_outdiv << WM8993_FLL_OUTDIV_SHIFT) |
+ (fll_div.fll_fratio << WM8993_FLL_FRATIO_SHIFT));
+ wm8993_write(codec, WM8993_FLL_CONTROL_3, fll_div.k);
+
+ reg4 = wm8993_read(codec, WM8993_FLL_CONTROL_4);
+ reg4 &= ~WM8993_FLL_N_MASK;
+ reg4 |= fll_div.n << WM8993_FLL_N_SHIFT;
+ wm8993_write(codec, WM8993_FLL_CONTROL_4, reg4);
+
+ reg5 &= ~WM8993_FLL_CLK_REF_DIV_MASK;
+ reg5 |= fll_div.fll_clk_ref_div << WM8993_FLL_CLK_REF_DIV_SHIFT;
+ wm8993_write(codec, WM8993_FLL_CONTROL_5, reg5);
+
+ /* Enable the FLL */
+ wm8993_write(codec, WM8993_FLL_CONTROL_1, reg1 | WM8993_FLL_ENA);
+
+ dev_dbg(codec->dev, "FLL enabled at %dHz->%dHz\n", Fref, Fout);
+
+ wm8993->fll_fref = Fref;
+ wm8993->fll_fout = Fout;
+
+ return 0;
+}
+
+static int configure_clock(struct snd_soc_codec *codec)
+{
+ struct wm8993_priv *wm8993 = codec->private_data;
+ unsigned int reg;
+
+ /* This should be done on init() for bypass paths */
+ switch (wm8993->sysclk_source) {
+ case WM8993_SYSCLK_MCLK:
+ dev_dbg(codec->dev, "Using %dHz MCLK\n", wm8993->mclk_rate);
+
+ reg = wm8993_read(codec, WM8993_CLOCKING_2);
+ reg &= ~(WM8993_MCLK_DIV | WM8993_SYSCLK_SRC);
+ if (wm8993->mclk_rate > 13500000) {
+ reg |= WM8993_MCLK_DIV;
+ wm8993->sysclk_rate = wm8993->mclk_rate / 2;
+ } else {
+ reg &= ~WM8993_MCLK_DIV;
+ wm8993->sysclk_rate = wm8993->mclk_rate;
+ }
+ wm8993_write(codec, WM8993_CLOCKING_2, reg);
+ break;
+
+ case WM8993_SYSCLK_FLL:
+ dev_dbg(codec->dev, "Using %dHz FLL clock\n",
+ wm8993->fll_fout);
+
+ reg = wm8993_read(codec, WM8993_CLOCKING_2);
+ reg |= WM8993_SYSCLK_SRC;
+ if (wm8993->fll_fout > 13500000) {
+ reg |= WM8993_MCLK_DIV;
+ wm8993->sysclk_rate = wm8993->fll_fout / 2;
+ } else {
+ reg &= ~WM8993_MCLK_DIV;
+ wm8993->sysclk_rate = wm8993->fll_fout;
+ }
+ wm8993_write(codec, WM8993_CLOCKING_2, reg);
+ break;
+
+ default:
+ dev_err(codec->dev, "System clock not configured\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(codec->dev, "CLK_SYS is %dHz\n", wm8993->sysclk_rate);
+
+ return 0;
+}
+
+static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 300, 0);
+static const DECLARE_TLV_DB_SCALE(drc_comp_threash, -4500, 75, 0);
+static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0);
+static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
+static const unsigned int drc_max_tlv[] = {
+ TLV_DB_RANGE_HEAD(4),
+ 0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
+ 3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
+};
+static const DECLARE_TLV_DB_SCALE(drc_qr_tlv, 1200, 600, 0);
+static const DECLARE_TLV_DB_SCALE(drc_startup_tlv, -1800, 300, 0);
+static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
+static const DECLARE_TLV_DB_SCALE(dac_boost_tlv, 0, 600, 0);
+
+static const char *dac_deemph_text[] = {
+ "None",
+ "32kHz",
+ "44.1kHz",
+ "48kHz",
+};
+
+static const struct soc_enum dac_deemph =
+ SOC_ENUM_SINGLE(WM8993_DAC_CTRL, 4, 4, dac_deemph_text);
+
+static const char *adc_hpf_text[] = {
+ "Hi-Fi",
+ "Voice 1",
+ "Voice 2",
+ "Voice 3",
+};
+
+static const struct soc_enum adc_hpf =
+ SOC_ENUM_SINGLE(WM8993_ADC_CTRL, 5, 4, adc_hpf_text);
+
+static const char *drc_path_text[] = {
+ "ADC",
+ "DAC"
+};
+
+static const struct soc_enum drc_path =
+ SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_1, 14, 2, drc_path_text);
+
+static const char *drc_r0_text[] = {
+ "1",
+ "1/2",
+ "1/4",
+ "1/8",
+ "1/16",
+ "0",
+};
+
+static const struct soc_enum drc_r0 =
+ SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_3, 8, 6, drc_r0_text);
+
+static const char *drc_r1_text[] = {
+ "1",
+ "1/2",
+ "1/4",
+ "1/8",
+ "0",
+};
+
+static const struct soc_enum drc_r1 =
+ SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_4, 13, 5, drc_r1_text);
+
+static const char *drc_attack_text[] = {
+ "Reserved",
+ "181us",
+ "363us",
+ "726us",
+ "1.45ms",
+ "2.9ms",
+ "5.8ms",
+ "11.6ms",
+ "23.2ms",
+ "46.4ms",
+ "92.8ms",
+ "185.6ms",
+};
+
+static const struct soc_enum drc_attack =
+ SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_2, 12, 12, drc_attack_text);
+
+static const char *drc_decay_text[] = {
+ "186ms",
+ "372ms",
+ "743ms",
+ "1.49s",
+ "2.97ms",
+ "5.94ms",
+ "11.89ms",
+ "23.78ms",
+ "47.56ms",
+};
+
+static const struct soc_enum drc_decay =
+ SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_2, 8, 9, drc_decay_text);
+
+static const char *drc_ff_text[] = {
+ "5 samples",
+ "9 samples",
+};
+
+static const struct soc_enum drc_ff =
+ SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_3, 7, 2, drc_ff_text);
+
+static const char *drc_qr_rate_text[] = {
+ "0.725ms",
+ "1.45ms",
+ "5.8ms",
+};
+
+static const struct soc_enum drc_qr_rate =
+ SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_3, 0, 3, drc_qr_rate_text);
+
+static const char *drc_smooth_text[] = {
+ "Low",
+ "Medium",
+ "High",
+};
+
+static const struct soc_enum drc_smooth =
+ SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_1, 4, 3, drc_smooth_text);
+
+static const struct snd_kcontrol_new wm8993_snd_controls[] = {
+SOC_DOUBLE_TLV("Digital Sidetone Volume", WM8993_DIGITAL_SIDE_TONE,
+ 5, 9, 12, 0, sidetone_tlv),
+
+SOC_SINGLE("DRC Switch", WM8993_DRC_CONTROL_1, 15, 1, 0),
+SOC_ENUM("DRC Path", drc_path),
+SOC_SINGLE_TLV("DRC Compressor Threashold Volume", WM8993_DRC_CONTROL_2,
+ 2, 60, 1, drc_comp_threash),
+SOC_SINGLE_TLV("DRC Compressor Amplitude Volume", WM8993_DRC_CONTROL_3,
+ 11, 30, 1, drc_comp_amp),
+SOC_ENUM("DRC R0", drc_r0),
+SOC_ENUM("DRC R1", drc_r1),
+SOC_SINGLE_TLV("DRC Minimum Volume", WM8993_DRC_CONTROL_1, 2, 3, 1,
+ drc_min_tlv),
+SOC_SINGLE_TLV("DRC Maximum Volume", WM8993_DRC_CONTROL_1, 0, 3, 0,
+ drc_max_tlv),
+SOC_ENUM("DRC Attack Rate", drc_attack),
+SOC_ENUM("DRC Decay Rate", drc_decay),
+SOC_ENUM("DRC FF Delay", drc_ff),
+SOC_SINGLE("DRC Anti-clip Switch", WM8993_DRC_CONTROL_1, 9, 1, 0),
+SOC_SINGLE("DRC Quick Release Switch", WM8993_DRC_CONTROL_1, 10, 1, 0),
+SOC_SINGLE_TLV("DRC Quick Release Volume", WM8993_DRC_CONTROL_3, 2, 3, 0,
+ drc_qr_tlv),
+SOC_ENUM("DRC Quick Release Rate", drc_qr_rate),
+SOC_SINGLE("DRC Smoothing Switch", WM8993_DRC_CONTROL_1, 11, 1, 0),
+SOC_SINGLE("DRC Smoothing Hysteresis Switch", WM8993_DRC_CONTROL_1, 8, 1, 0),
+SOC_ENUM("DRC Smoothing Hysteresis Threashold", drc_smooth),
+SOC_SINGLE_TLV("DRC Startup Volume", WM8993_DRC_CONTROL_4, 8, 18, 0,
+ drc_startup_tlv),
+
+SOC_SINGLE("EQ Switch", WM8993_EQ1, 0, 1, 0),
+
+SOC_DOUBLE_R_TLV("Capture Volume", WM8993_LEFT_ADC_DIGITAL_VOLUME,
+ WM8993_RIGHT_ADC_DIGITAL_VOLUME, 1, 96, 0, digital_tlv),
+SOC_SINGLE("ADC High Pass Filter Switch", WM8993_ADC_CTRL, 8, 1, 0),
+SOC_ENUM("ADC High Pass Filter Mode", adc_hpf),
+
+SOC_DOUBLE_R_TLV("Playback Volume", WM8993_LEFT_DAC_DIGITAL_VOLUME,
+ WM8993_RIGHT_DAC_DIGITAL_VOLUME, 1, 96, 0, digital_tlv),
+SOC_SINGLE_TLV("Playback Boost Volume", WM8993_AUDIO_INTERFACE_2, 10, 3, 0,
+ dac_boost_tlv),
+SOC_ENUM("DAC Deemphasis", dac_deemph),
+
+SOC_SINGLE_TLV("SPKL DAC Volume", WM8993_SPKMIXL_ATTENUATION,
+ 2, 1, 1, wm_hubs_spkmix_tlv),
+
+SOC_SINGLE_TLV("SPKR DAC Volume", WM8993_SPKMIXR_ATTENUATION,
+ 2, 1, 1, wm_hubs_spkmix_tlv),
+};
+
+static const struct snd_kcontrol_new wm8993_eq_controls[] = {
+SOC_SINGLE_TLV("EQ1 Volume", WM8993_EQ2, 0, 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 Volume", WM8993_EQ3, 0, 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 Volume", WM8993_EQ4, 0, 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 Volume", WM8993_EQ5, 0, 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ5 Volume", WM8993_EQ6, 0, 24, 0, eq_tlv),
+};
+
+static int clk_sys_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ return configure_clock(codec);
+
+ case SND_SOC_DAPM_POST_PMD:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * When used with DAC outputs only the WM8993 charge pump supports
+ * operation in class W mode, providing very low power consumption
+ * when used with digital sources. Enable and disable this mode
+ * automatically depending on the mixer configuration.
+ *
+ * Currently the only supported paths are the direct DAC->headphone
+ * paths (which provide minimum power consumption anyway).
+ */
+static int class_w_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_codec *codec = widget->codec;
+ struct wm8993_priv *wm8993 = codec->private_data;
+ int ret;
+
+ /* Turn it off if we're using the main output mixer */
+ if (ucontrol->value.integer.value[0] == 0) {
+ if (wm8993->class_w_users == 0) {
+ dev_dbg(codec->dev, "Disabling Class W\n");
+ snd_soc_update_bits(codec, WM8993_CLASS_W_0,
+ WM8993_CP_DYN_FREQ |
+ WM8993_CP_DYN_V,
+ 0);
+ }
+ wm8993->class_w_users++;
+ }
+
+ /* Implement the change */
+ ret = snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
+
+ /* Enable it if we're using the direct DAC path */
+ if (ucontrol->value.integer.value[0] == 1) {
+ if (wm8993->class_w_users == 1) {
+ dev_dbg(codec->dev, "Enabling Class W\n");
+ snd_soc_update_bits(codec, WM8993_CLASS_W_0,
+ WM8993_CP_DYN_FREQ |
+ WM8993_CP_DYN_V,
+ WM8993_CP_DYN_FREQ |
+ WM8993_CP_DYN_V);
+ }
+ wm8993->class_w_users--;
+ }
+
+ dev_dbg(codec->dev, "Indirect DAC use count now %d\n",
+ wm8993->class_w_users);
+
+ return ret;
+}
+
+#define SOC_DAPM_ENUM_W(xname, xenum) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_info_enum_double, \
+ .get = snd_soc_dapm_get_enum_double, \
+ .put = class_w_put, \
+ .private_value = (unsigned long)&xenum }
+
+static const char *hp_mux_text[] = {
+ "Mixer",
+ "DAC",
+};
+
+static const struct soc_enum hpl_enum =
+ SOC_ENUM_SINGLE(WM8993_OUTPUT_MIXER1, 8, 2, hp_mux_text);
+
+static const struct snd_kcontrol_new hpl_mux =
+ SOC_DAPM_ENUM_W("Left Headphone Mux", hpl_enum);
+
+static const struct soc_enum hpr_enum =
+ SOC_ENUM_SINGLE(WM8993_OUTPUT_MIXER2, 8, 2, hp_mux_text);
+
+static const struct snd_kcontrol_new hpr_mux =
+ SOC_DAPM_ENUM_W("Right Headphone Mux", hpr_enum);
+
+static const struct snd_kcontrol_new left_speaker_mixer[] = {
+SOC_DAPM_SINGLE("Input Switch", WM8993_SPEAKER_MIXER, 7, 1, 0),
+SOC_DAPM_SINGLE("IN1LP Switch", WM8993_SPEAKER_MIXER, 5, 1, 0),
+SOC_DAPM_SINGLE("Output Switch", WM8993_SPEAKER_MIXER, 3, 1, 0),
+SOC_DAPM_SINGLE("DAC Switch", WM8993_SPEAKER_MIXER, 6, 1, 0),
+};
+
+static const struct snd_kcontrol_new right_speaker_mixer[] = {
+SOC_DAPM_SINGLE("Input Switch", WM8993_SPEAKER_MIXER, 6, 1, 0),
+SOC_DAPM_SINGLE("IN1RP Switch", WM8993_SPEAKER_MIXER, 4, 1, 0),
+SOC_DAPM_SINGLE("Output Switch", WM8993_SPEAKER_MIXER, 2, 1, 0),
+SOC_DAPM_SINGLE("DAC Switch", WM8993_SPEAKER_MIXER, 0, 1, 0),
+};
+
+static const char *aif_text[] = {
+ "Left", "Right"
+};
+
+static const struct soc_enum aifoutl_enum =
+ SOC_ENUM_SINGLE(WM8993_AUDIO_INTERFACE_1, 15, 2, aif_text);
+
+static const struct snd_kcontrol_new aifoutl_mux =
+ SOC_DAPM_ENUM("AIFOUTL Mux", aifoutl_enum);
+
+static const struct soc_enum aifoutr_enum =
+ SOC_ENUM_SINGLE(WM8993_AUDIO_INTERFACE_1, 14, 2, aif_text);
+
+static const struct snd_kcontrol_new aifoutr_mux =
+ SOC_DAPM_ENUM("AIFOUTR Mux", aifoutr_enum);
+
+static const struct soc_enum aifinl_enum =
+ SOC_ENUM_SINGLE(WM8993_AUDIO_INTERFACE_2, 15, 2, aif_text);
+
+static const struct snd_kcontrol_new aifinl_mux =
+ SOC_DAPM_ENUM("AIFINL Mux", aifinl_enum);
+
+static const struct soc_enum aifinr_enum =
+ SOC_ENUM_SINGLE(WM8993_AUDIO_INTERFACE_2, 14, 2, aif_text);
+
+static const struct snd_kcontrol_new aifinr_mux =
+ SOC_DAPM_ENUM("AIFINR Mux", aifinr_enum);
+
+static const char *sidetone_text[] = {
+ "None", "Left", "Right"
+};
+
+static const struct soc_enum sidetonel_enum =
+ SOC_ENUM_SINGLE(WM8993_DIGITAL_SIDE_TONE, 2, 3, sidetone_text);
+
+static const struct snd_kcontrol_new sidetonel_mux =
+ SOC_DAPM_ENUM("Left Sidetone", sidetonel_enum);
+
+static const struct soc_enum sidetoner_enum =
+ SOC_ENUM_SINGLE(WM8993_DIGITAL_SIDE_TONE, 0, 3, sidetone_text);
+
+static const struct snd_kcontrol_new sidetoner_mux =
+ SOC_DAPM_ENUM("Right Sidetone", sidetoner_enum);
+
+static const struct snd_soc_dapm_widget wm8993_dapm_widgets[] = {
+SND_SOC_DAPM_SUPPLY("CLK_SYS", WM8993_BUS_CONTROL_1, 1, 0, clk_sys_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_SUPPLY("TOCLK", WM8993_CLOCKING_1, 14, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("CLK_DSP", WM8993_CLOCKING_3, 0, 0, NULL, 0),
+
+SND_SOC_DAPM_ADC("ADCL", NULL, WM8993_POWER_MANAGEMENT_2, 1, 0),
+SND_SOC_DAPM_ADC("ADCR", NULL, WM8993_POWER_MANAGEMENT_2, 0, 0),
+
+SND_SOC_DAPM_MUX("AIFOUTL Mux", SND_SOC_NOPM, 0, 0, &aifoutl_mux),
+SND_SOC_DAPM_MUX("AIFOUTR Mux", SND_SOC_NOPM, 0, 0, &aifoutr_mux),
+
+SND_SOC_DAPM_AIF_OUT("AIFOUTL", "Capture", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_OUT("AIFOUTR", "Capture", 1, SND_SOC_NOPM, 0, 0),
+
+SND_SOC_DAPM_AIF_IN("AIFINL", "Playback", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_IN("AIFINR", "Playback", 1, SND_SOC_NOPM, 0, 0),
+
+SND_SOC_DAPM_MUX("DACL Mux", SND_SOC_NOPM, 0, 0, &aifinl_mux),
+SND_SOC_DAPM_MUX("DACR Mux", SND_SOC_NOPM, 0, 0, &aifinr_mux),
+
+SND_SOC_DAPM_MUX("DACL Sidetone", SND_SOC_NOPM, 0, 0, &sidetonel_mux),
+SND_SOC_DAPM_MUX("DACR Sidetone", SND_SOC_NOPM, 0, 0, &sidetoner_mux),
+
+SND_SOC_DAPM_DAC("DACL", NULL, WM8993_POWER_MANAGEMENT_3, 1, 0),
+SND_SOC_DAPM_DAC("DACR", NULL, WM8993_POWER_MANAGEMENT_3, 0, 0),
+
+SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux),
+SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux),
+
+SND_SOC_DAPM_MIXER("SPKL", WM8993_POWER_MANAGEMENT_3, 8, 0,
+ left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)),
+SND_SOC_DAPM_MIXER("SPKR", WM8993_POWER_MANAGEMENT_3, 9, 0,
+ right_speaker_mixer, ARRAY_SIZE(right_speaker_mixer)),
+
+};
+
+static const struct snd_soc_dapm_route routes[] = {
+ { "ADCL", NULL, "CLK_SYS" },
+ { "ADCL", NULL, "CLK_DSP" },
+ { "ADCR", NULL, "CLK_SYS" },
+ { "ADCR", NULL, "CLK_DSP" },
+
+ { "AIFOUTL Mux", "Left", "ADCL" },
+ { "AIFOUTL Mux", "Right", "ADCR" },
+ { "AIFOUTR Mux", "Left", "ADCL" },
+ { "AIFOUTR Mux", "Right", "ADCR" },
+
+ { "AIFOUTL", NULL, "AIFOUTL Mux" },
+ { "AIFOUTR", NULL, "AIFOUTR Mux" },
+
+ { "DACL Mux", "Left", "AIFINL" },
+ { "DACL Mux", "Right", "AIFINR" },
+ { "DACR Mux", "Left", "AIFINL" },
+ { "DACR Mux", "Right", "AIFINR" },
+
+ { "DACL Sidetone", "Left", "ADCL" },
+ { "DACL Sidetone", "Right", "ADCR" },
+ { "DACR Sidetone", "Left", "ADCL" },
+ { "DACR Sidetone", "Right", "ADCR" },
+
+ { "DACL", NULL, "CLK_SYS" },
+ { "DACL", NULL, "CLK_DSP" },
+ { "DACL", NULL, "DACL Mux" },
+ { "DACL", NULL, "DACL Sidetone" },
+ { "DACR", NULL, "CLK_SYS" },
+ { "DACR", NULL, "CLK_DSP" },
+ { "DACR", NULL, "DACR Mux" },
+ { "DACR", NULL, "DACR Sidetone" },
+
+ { "Left Output Mixer", "DAC Switch", "DACL" },
+
+ { "Right Output Mixer", "DAC Switch", "DACR" },
+
+ { "Left Output PGA", NULL, "CLK_SYS" },
+
+ { "Right Output PGA", NULL, "CLK_SYS" },
+
+ { "SPKL", "DAC Switch", "DACL" },
+ { "SPKL", NULL, "CLK_SYS" },
+
+ { "SPKR", "DAC Switch", "DACR" },
+ { "SPKR", NULL, "CLK_SYS" },
+
+ { "Left Headphone Mux", "DAC", "DACL" },
+ { "Right Headphone Mux", "DAC", "DACR" },
+};
+
+static int wm8993_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct wm8993_priv *wm8993 = codec->private_data;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ /* VMID=2*40k */
+ snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
+ WM8993_VMID_SEL_MASK, 0x2);
+ snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_2,
+ WM8993_TSHUT_ENA, WM8993_TSHUT_ENA);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->bias_level == SND_SOC_BIAS_OFF) {
+ /* Bring up VMID with fast soft start */
+ snd_soc_update_bits(codec, WM8993_ANTIPOP2,
+ WM8993_STARTUP_BIAS_ENA |
+ WM8993_VMID_BUF_ENA |
+ WM8993_VMID_RAMP_MASK |
+ WM8993_BIAS_SRC,
+ WM8993_STARTUP_BIAS_ENA |
+ WM8993_VMID_BUF_ENA |
+ WM8993_VMID_RAMP_MASK |
+ WM8993_BIAS_SRC);
+
+ /* If either line output is single ended we
+ * need the VMID buffer */
+ if (!wm8993->pdata.lineout1_diff ||
+ !wm8993->pdata.lineout2_diff)
+ snd_soc_update_bits(codec, WM8993_ANTIPOP1,
+ WM8993_LINEOUT_VMID_BUF_ENA,
+ WM8993_LINEOUT_VMID_BUF_ENA);
+
+ /* VMID=2*40k */
+ snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
+ WM8993_VMID_SEL_MASK |
+ WM8993_BIAS_ENA,
+ WM8993_BIAS_ENA | 0x2);
+ msleep(32);
+
+ /* Switch to normal bias */
+ snd_soc_update_bits(codec, WM8993_ANTIPOP2,
+ WM8993_BIAS_SRC |
+ WM8993_STARTUP_BIAS_ENA, 0);
+ }
+
+ /* VMID=2*240k */
+ snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
+ WM8993_VMID_SEL_MASK, 0x4);
+
+ snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_2,
+ WM8993_TSHUT_ENA, 0);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ snd_soc_update_bits(codec, WM8993_ANTIPOP1,
+ WM8993_LINEOUT_VMID_BUF_ENA, 0);
+
+ snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
+ WM8993_VMID_SEL_MASK | WM8993_BIAS_ENA,
+ 0);
+ break;
+ }
+
+ codec->bias_level = level;
+
+ return 0;
+}
+
+static int wm8993_set_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct wm8993_priv *wm8993 = codec->private_data;
+
+ switch (clk_id) {
+ case WM8993_SYSCLK_MCLK:
+ wm8993->mclk_rate = freq;
+ case WM8993_SYSCLK_FLL:
+ wm8993->sysclk_source = clk_id;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wm8993_set_dai_fmt(struct snd_soc_dai *dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8993_priv *wm8993 = codec->private_data;
+ unsigned int aif1 = wm8993_read(codec, WM8993_AUDIO_INTERFACE_1);
+ unsigned int aif4 = wm8993_read(codec, WM8993_AUDIO_INTERFACE_4);
+
+ aif1 &= ~(WM8993_BCLK_DIR | WM8993_AIF_BCLK_INV |
+ WM8993_AIF_LRCLK_INV | WM8993_AIF_FMT_MASK);
+ aif4 &= ~WM8993_LRCLK_DIR;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ wm8993->master = 0;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ aif4 |= WM8993_LRCLK_DIR;
+ wm8993->master = 1;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ aif1 |= WM8993_BCLK_DIR;
+ wm8993->master = 1;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ aif1 |= WM8993_BCLK_DIR;
+ aif4 |= WM8993_LRCLK_DIR;
+ wm8993->master = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_B:
+ aif1 |= WM8993_AIF_LRCLK_INV;
+ case SND_SOC_DAIFMT_DSP_A:
+ aif1 |= 0x18;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ aif1 |= 0x10;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ aif1 |= 0x8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ case SND_SOC_DAIFMT_DSP_B:
+ /* frame inversion not valid for DSP modes */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ aif1 |= WM8993_AIF_BCLK_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_RIGHT_J:
+ case SND_SOC_DAIFMT_LEFT_J:
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ aif1 |= WM8993_AIF_BCLK_INV | WM8993_AIF_LRCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ aif1 |= WM8993_AIF_BCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ aif1 |= WM8993_AIF_LRCLK_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wm8993_write(codec, WM8993_AUDIO_INTERFACE_1, aif1);
+ wm8993_write(codec, WM8993_AUDIO_INTERFACE_4, aif4);
+
+ return 0;
+}
+
+static int wm8993_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8993_priv *wm8993 = codec->private_data;
+ int ret, i, best, best_val, cur_val;
+ unsigned int clocking1, clocking3, aif1, aif4;
+
+ clocking1 = wm8993_read(codec, WM8993_CLOCKING_1);
+ clocking1 &= ~WM8993_BCLK_DIV_MASK;
+
+ clocking3 = wm8993_read(codec, WM8993_CLOCKING_3);
+ clocking3 &= ~(WM8993_CLK_SYS_RATE_MASK | WM8993_SAMPLE_RATE_MASK);
+
+ aif1 = wm8993_read(codec, WM8993_AUDIO_INTERFACE_1);
+ aif1 &= ~WM8993_AIF_WL_MASK;
+
+ aif4 = wm8993_read(codec, WM8993_AUDIO_INTERFACE_4);
+ aif4 &= ~WM8993_LRCLK_RATE_MASK;
+
+ /* What BCLK do we need? */
+ wm8993->fs = params_rate(params);
+ wm8993->bclk = 2 * wm8993->fs;
+ if (wm8993->tdm_slots) {
+ dev_dbg(codec->dev, "Configuring for %d %d bit TDM slots\n",
+ wm8993->tdm_slots, wm8993->tdm_width);
+ wm8993->bclk *= wm8993->tdm_width * wm8993->tdm_slots;
+ } else {
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ wm8993->bclk *= 16;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ wm8993->bclk *= 20;
+ aif1 |= 0x8;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ wm8993->bclk *= 24;
+ aif1 |= 0x10;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ wm8993->bclk *= 32;
+ aif1 |= 0x18;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ dev_dbg(codec->dev, "Target BCLK is %dHz\n", wm8993->bclk);
+
+ ret = configure_clock(codec);
+ if (ret != 0)
+ return ret;
+
+ /* Select nearest CLK_SYS_RATE */
+ best = 0;
+ best_val = abs((wm8993->sysclk_rate / clk_sys_rates[0].ratio)
+ - wm8993->fs);
+ for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) {
+ cur_val = abs((wm8993->sysclk_rate /
+ clk_sys_rates[i].ratio) - wm8993->fs);;
+ if (cur_val < best_val) {
+ best = i;
+ best_val = cur_val;
+ }
+ }
+ dev_dbg(codec->dev, "Selected CLK_SYS_RATIO of %d\n",
+ clk_sys_rates[best].ratio);
+ clocking3 |= (clk_sys_rates[best].clk_sys_rate
+ << WM8993_CLK_SYS_RATE_SHIFT);
+
+ /* SAMPLE_RATE */
+ best = 0;
+ best_val = abs(wm8993->fs - sample_rates[0].rate);
+ for (i = 1; i < ARRAY_SIZE(sample_rates); i++) {
+ /* Closest match */
+ cur_val = abs(wm8993->fs - sample_rates[i].rate);
+ if (cur_val < best_val) {
+ best = i;
+ best_val = cur_val;
+ }
+ }
+ dev_dbg(codec->dev, "Selected SAMPLE_RATE of %dHz\n",
+ sample_rates[best].rate);
+ clocking3 |= (sample_rates[best].sample_rate
+ << WM8993_SAMPLE_RATE_SHIFT);
+
+ /* BCLK_DIV */
+ best = 0;
+ best_val = INT_MAX;
+ for (i = 0; i < ARRAY_SIZE(bclk_divs); i++) {
+ cur_val = ((wm8993->sysclk_rate * 10) / bclk_divs[i].div)
+ - wm8993->bclk;
+ if (cur_val < 0) /* Table is sorted */
+ break;
+ if (cur_val < best_val) {
+ best = i;
+ best_val = cur_val;
+ }
+ }
+ wm8993->bclk = (wm8993->sysclk_rate * 10) / bclk_divs[best].div;
+ dev_dbg(codec->dev, "Selected BCLK_DIV of %d for %dHz BCLK\n",
+ bclk_divs[best].div, wm8993->bclk);
+ clocking1 |= bclk_divs[best].bclk_div << WM8993_BCLK_DIV_SHIFT;
+
+ /* LRCLK is a simple fraction of BCLK */
+ dev_dbg(codec->dev, "LRCLK_RATE is %d\n", wm8993->bclk / wm8993->fs);
+ aif4 |= wm8993->bclk / wm8993->fs;
+
+ wm8993_write(codec, WM8993_CLOCKING_1, clocking1);
+ wm8993_write(codec, WM8993_CLOCKING_3, clocking3);
+ wm8993_write(codec, WM8993_AUDIO_INTERFACE_1, aif1);
+ wm8993_write(codec, WM8993_AUDIO_INTERFACE_4, aif4);
+
+ /* ReTune Mobile? */
+ if (wm8993->pdata.num_retune_configs) {
+ u16 eq1 = wm8993_read(codec, WM8993_EQ1);
+ struct wm8993_retune_mobile_setting *s;
+
+ best = 0;
+ best_val = abs(wm8993->pdata.retune_configs[0].rate
+ - wm8993->fs);
+ for (i = 0; i < wm8993->pdata.num_retune_configs; i++) {
+ cur_val = abs(wm8993->pdata.retune_configs[i].rate
+ - wm8993->fs);
+ if (cur_val < best_val) {
+ best_val = cur_val;
+ best = i;
+ }
+ }
+ s = &wm8993->pdata.retune_configs[best];
+
+ dev_dbg(codec->dev, "ReTune Mobile %s tuned for %dHz\n",
+ s->name, s->rate);
+
+ /* Disable EQ while we reconfigure */
+ snd_soc_update_bits(codec, WM8993_EQ1, WM8993_EQ_ENA, 0);
+
+ for (i = 1; i < ARRAY_SIZE(s->config); i++)
+ wm8993_write(codec, WM8993_EQ1 + i, s->config[i]);
+
+ snd_soc_update_bits(codec, WM8993_EQ1, WM8993_EQ_ENA, eq1);
+ }
+
+ return 0;
+}
+
+static int wm8993_digital_mute(struct snd_soc_dai *codec_dai, int mute)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ unsigned int reg;
+
+ reg = wm8993_read(codec, WM8993_DAC_CTRL);
+
+ if (mute)
+ reg |= WM8993_DAC_MUTE;
+ else
+ reg &= ~WM8993_DAC_MUTE;
+
+ wm8993_write(codec, WM8993_DAC_CTRL, reg);
+
+ return 0;
+}
+
+static int wm8993_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8993_priv *wm8993 = codec->private_data;
+ int aif1 = 0;
+ int aif2 = 0;
+
+ /* Don't need to validate anything if we're turning off TDM */
+ if (slots == 0) {
+ wm8993->tdm_slots = 0;
+ goto out;
+ }
+
+ /* Note that we allow configurations we can't handle ourselves -
+ * for example, we can generate clocks for slots 2 and up even if
+ * we can't use those slots ourselves.
+ */
+ aif1 |= WM8993_AIFADC_TDM;
+ aif2 |= WM8993_AIFDAC_TDM;
+
+ switch (rx_mask) {
+ case 3:
+ break;
+ case 0xc:
+ aif1 |= WM8993_AIFADC_TDM_CHAN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+
+ switch (tx_mask) {
+ case 3:
+ break;
+ case 0xc:
+ aif2 |= WM8993_AIFDAC_TDM_CHAN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+out:
+ wm8993->tdm_width = slot_width;
+ wm8993->tdm_slots = slots / 2;
+
+ snd_soc_update_bits(codec, WM8993_AUDIO_INTERFACE_1,
+ WM8993_AIFADC_TDM | WM8993_AIFADC_TDM_CHAN, aif1);
+ snd_soc_update_bits(codec, WM8993_AUDIO_INTERFACE_2,
+ WM8993_AIFDAC_TDM | WM8993_AIFDAC_TDM_CHAN, aif2);
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops wm8993_ops = {
+ .set_sysclk = wm8993_set_sysclk,
+ .set_fmt = wm8993_set_dai_fmt,
+ .hw_params = wm8993_hw_params,
+ .digital_mute = wm8993_digital_mute,
+ .set_pll = wm8993_set_fll,
+ .set_tdm_slot = wm8993_set_tdm_slot,
+};
+
+#define WM8993_RATES SNDRV_PCM_RATE_8000_48000
+
+#define WM8993_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+struct snd_soc_dai wm8993_dai = {
+ .name = "WM8993",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM8993_RATES,
+ .formats = WM8993_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM8993_RATES,
+ .formats = WM8993_FORMATS,
+ },
+ .ops = &wm8993_ops,
+ .symmetric_rates = 1,
+};
+EXPORT_SYMBOL_GPL(wm8993_dai);
+
+static struct snd_soc_codec *wm8993_codec;
+
+static int wm8993_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ struct wm8993_priv *wm8993;
+ int ret = 0;
+
+ if (!wm8993_codec) {
+ dev_err(&pdev->dev, "I2C device not yet probed\n");
+ goto err;
+ }
+
+ socdev->card->codec = wm8993_codec;
+ codec = wm8993_codec;
+ wm8993 = codec->private_data;
+
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to create pcms\n");
+ goto err;
+ }
+
+ snd_soc_add_controls(codec, wm8993_snd_controls,
+ ARRAY_SIZE(wm8993_snd_controls));
+ if (wm8993->pdata.num_retune_configs != 0) {
+ dev_dbg(codec->dev, "Using ReTune Mobile\n");
+ } else {
+ dev_dbg(codec->dev, "No ReTune Mobile, using normal EQ\n");
+ snd_soc_add_controls(codec, wm8993_eq_controls,
+ ARRAY_SIZE(wm8993_eq_controls));
+ }
+
+ snd_soc_dapm_new_controls(codec, wm8993_dapm_widgets,
+ ARRAY_SIZE(wm8993_dapm_widgets));
+ wm_hubs_add_analogue_controls(codec);
+
+ snd_soc_dapm_add_routes(codec, routes, ARRAY_SIZE(routes));
+ wm_hubs_add_analogue_routes(codec, wm8993->pdata.lineout1_diff,
+ wm8993->pdata.lineout2_diff);
+
+ snd_soc_dapm_new_widgets(codec);
+
+ ret = snd_soc_init_card(socdev);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to register card\n");
+ goto card_err;
+ }
+
+ return ret;
+
+card_err:
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+err:
+ return ret;
+}
+
+static int wm8993_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+struct snd_soc_codec_device soc_codec_dev_wm8993 = {
+ .probe = wm8993_probe,
+ .remove = wm8993_remove,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_wm8993);
+
+static int wm8993_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct wm8993_priv *wm8993;
+ struct snd_soc_codec *codec;
+ unsigned int val;
+ int ret;
+
+ if (wm8993_codec) {
+ dev_err(&i2c->dev, "A WM8993 is already registered\n");
+ return -EINVAL;
+ }
+
+ wm8993 = kzalloc(sizeof(struct wm8993_priv), GFP_KERNEL);
+ if (wm8993 == NULL)
+ return -ENOMEM;
+
+ codec = &wm8993->codec;
+ if (i2c->dev.platform_data)
+ memcpy(&wm8993->pdata, i2c->dev.platform_data,
+ sizeof(wm8993->pdata));
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->name = "WM8993";
+ codec->read = wm8993_read;
+ codec->write = wm8993_write;
+ codec->hw_write = (hw_write_t)i2c_master_send;
+ codec->reg_cache = wm8993->reg_cache;
+ codec->reg_cache_size = ARRAY_SIZE(wm8993->reg_cache);
+ codec->bias_level = SND_SOC_BIAS_OFF;
+ codec->set_bias_level = wm8993_set_bias_level;
+ codec->dai = &wm8993_dai;
+ codec->num_dai = 1;
+ codec->private_data = wm8993;
+
+ memcpy(wm8993->reg_cache, wm8993_reg_defaults,
+ sizeof(wm8993->reg_cache));
+
+ i2c_set_clientdata(i2c, wm8993);
+ codec->control_data = i2c;
+ wm8993_codec = codec;
+
+ codec->dev = &i2c->dev;
+
+ val = wm8993_read_hw(codec, WM8993_SOFTWARE_RESET);
+ if (val != wm8993_reg_defaults[WM8993_SOFTWARE_RESET]) {
+ dev_err(codec->dev, "Invalid ID register value %x\n", val);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = wm8993_write(codec, WM8993_SOFTWARE_RESET, 0xffff);
+ if (ret != 0)
+ goto err;
+
+ /* By default we're using the output mixers */
+ wm8993->class_w_users = 2;
+
+ /* Latch volume update bits and default ZC on */
+ snd_soc_update_bits(codec, WM8993_RIGHT_DAC_DIGITAL_VOLUME,
+ WM8993_DAC_VU, WM8993_DAC_VU);
+ snd_soc_update_bits(codec, WM8993_RIGHT_ADC_DIGITAL_VOLUME,
+ WM8993_ADC_VU, WM8993_ADC_VU);
+
+ /* Manualy manage the HPOUT sequencing for independent stereo
+ * control. */
+ snd_soc_update_bits(codec, WM8993_ANALOGUE_HP_0,
+ WM8993_HPOUT1_AUTO_PU, 0);
+
+ /* Use automatic clock configuration */
+ snd_soc_update_bits(codec, WM8993_CLOCKING_4, WM8993_SR_MODE, 0);
+
+ if (!wm8993->pdata.lineout1_diff)
+ snd_soc_update_bits(codec, WM8993_LINE_MIXER1,
+ WM8993_LINEOUT1_MODE,
+ WM8993_LINEOUT1_MODE);
+ if (!wm8993->pdata.lineout2_diff)
+ snd_soc_update_bits(codec, WM8993_LINE_MIXER2,
+ WM8993_LINEOUT2_MODE,
+ WM8993_LINEOUT2_MODE);
+
+ if (wm8993->pdata.lineout1fb)
+ snd_soc_update_bits(codec, WM8993_ADDITIONAL_CONTROL,
+ WM8993_LINEOUT1_FB, WM8993_LINEOUT1_FB);
+
+ if (wm8993->pdata.lineout2fb)
+ snd_soc_update_bits(codec, WM8993_ADDITIONAL_CONTROL,
+ WM8993_LINEOUT2_FB, WM8993_LINEOUT2_FB);
+
+ /* Apply the microphone bias/detection configuration - the
+ * platform data is directly applicable to the register. */
+ snd_soc_update_bits(codec, WM8993_MICBIAS,
+ WM8993_JD_SCTHR_MASK | WM8993_JD_THR_MASK |
+ WM8993_MICB1_LVL | WM8993_MICB2_LVL,
+ wm8993->pdata.jd_scthr << WM8993_JD_SCTHR_SHIFT |
+ wm8993->pdata.jd_thr << WM8993_JD_THR_SHIFT |
+ wm8993->pdata.micbias1_lvl |
+ wm8993->pdata.micbias1_lvl << 1);
+
+ ret = wm8993_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ if (ret != 0)
+ goto err;
+
+ wm8993_dai.dev = codec->dev;
+
+ ret = snd_soc_register_dai(&wm8993_dai);
+ if (ret != 0)
+ goto err_bias;
+
+ ret = snd_soc_register_codec(codec);
+
+ return 0;
+
+err_bias:
+ wm8993_set_bias_level(codec, SND_SOC_BIAS_OFF);
+err:
+ wm8993_codec = NULL;
+ kfree(wm8993);
+ return ret;
+}
+
+static int wm8993_i2c_remove(struct i2c_client *client)
+{
+ struct wm8993_priv *wm8993 = i2c_get_clientdata(client);
+
+ snd_soc_unregister_codec(&wm8993->codec);
+ snd_soc_unregister_dai(&wm8993_dai);
+
+ wm8993_set_bias_level(&wm8993->codec, SND_SOC_BIAS_OFF);
+ kfree(wm8993);
+
+ return 0;
+}
+
+static const struct i2c_device_id wm8993_i2c_id[] = {
+ { "wm8993", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wm8993_i2c_id);
+
+static struct i2c_driver wm8993_i2c_driver = {
+ .driver = {
+ .name = "WM8993",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm8993_i2c_probe,
+ .remove = wm8993_i2c_remove,
+ .id_table = wm8993_i2c_id,
+};
+
+
+static int __init wm8993_modinit(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&wm8993_i2c_driver);
+ if (ret != 0)
+ pr_err("WM8993: Unable to register I2C driver: %d\n", ret);
+
+ return ret;
+}
+module_init(wm8993_modinit);
+
+static void __exit wm8993_exit(void)
+{
+ i2c_del_driver(&wm8993_i2c_driver);
+}
+module_exit(wm8993_exit);
+
+
+MODULE_DESCRIPTION("ASoC WM8993 driver");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8993.h b/sound/soc/codecs/wm8993.h
new file mode 100644
index 000000000000..30e71ca88dad
--- /dev/null
+++ b/sound/soc/codecs/wm8993.h
@@ -0,0 +1,2132 @@
+#ifndef WM8993_H
+#define WM8993_H
+
+extern struct snd_soc_dai wm8993_dai;
+extern struct snd_soc_codec_device soc_codec_dev_wm8993;
+
+#define WM8993_SYSCLK_MCLK 1
+#define WM8993_SYSCLK_FLL 2
+
+#define WM8993_FLL_MCLK 1
+#define WM8993_FLL_BCLK 2
+#define WM8993_FLL_LRCLK 3
+
+/*
+ * Register values.
+ */
+#define WM8993_SOFTWARE_RESET 0x00
+#define WM8993_POWER_MANAGEMENT_1 0x01
+#define WM8993_POWER_MANAGEMENT_2 0x02
+#define WM8993_POWER_MANAGEMENT_3 0x03
+#define WM8993_AUDIO_INTERFACE_1 0x04
+#define WM8993_AUDIO_INTERFACE_2 0x05
+#define WM8993_CLOCKING_1 0x06
+#define WM8993_CLOCKING_2 0x07
+#define WM8993_AUDIO_INTERFACE_3 0x08
+#define WM8993_AUDIO_INTERFACE_4 0x09
+#define WM8993_DAC_CTRL 0x0A
+#define WM8993_LEFT_DAC_DIGITAL_VOLUME 0x0B
+#define WM8993_RIGHT_DAC_DIGITAL_VOLUME 0x0C
+#define WM8993_DIGITAL_SIDE_TONE 0x0D
+#define WM8993_ADC_CTRL 0x0E
+#define WM8993_LEFT_ADC_DIGITAL_VOLUME 0x0F
+#define WM8993_RIGHT_ADC_DIGITAL_VOLUME 0x10
+#define WM8993_GPIO_CTRL_1 0x12
+#define WM8993_GPIO1 0x13
+#define WM8993_IRQ_DEBOUNCE 0x14
+#define WM8993_GPIOCTRL_2 0x16
+#define WM8993_GPIO_POL 0x17
+#define WM8993_LEFT_LINE_INPUT_1_2_VOLUME 0x18
+#define WM8993_LEFT_LINE_INPUT_3_4_VOLUME 0x19
+#define WM8993_RIGHT_LINE_INPUT_1_2_VOLUME 0x1A
+#define WM8993_RIGHT_LINE_INPUT_3_4_VOLUME 0x1B
+#define WM8993_LEFT_OUTPUT_VOLUME 0x1C
+#define WM8993_RIGHT_OUTPUT_VOLUME 0x1D
+#define WM8993_LINE_OUTPUTS_VOLUME 0x1E
+#define WM8993_HPOUT2_VOLUME 0x1F
+#define WM8993_LEFT_OPGA_VOLUME 0x20
+#define WM8993_RIGHT_OPGA_VOLUME 0x21
+#define WM8993_SPKMIXL_ATTENUATION 0x22
+#define WM8993_SPKMIXR_ATTENUATION 0x23
+#define WM8993_SPKOUT_MIXERS 0x24
+#define WM8993_SPKOUT_BOOST 0x25
+#define WM8993_SPEAKER_VOLUME_LEFT 0x26
+#define WM8993_SPEAKER_VOLUME_RIGHT 0x27
+#define WM8993_INPUT_MIXER2 0x28
+#define WM8993_INPUT_MIXER3 0x29
+#define WM8993_INPUT_MIXER4 0x2A
+#define WM8993_INPUT_MIXER5 0x2B
+#define WM8993_INPUT_MIXER6 0x2C
+#define WM8993_OUTPUT_MIXER1 0x2D
+#define WM8993_OUTPUT_MIXER2 0x2E
+#define WM8993_OUTPUT_MIXER3 0x2F
+#define WM8993_OUTPUT_MIXER4 0x30
+#define WM8993_OUTPUT_MIXER5 0x31
+#define WM8993_OUTPUT_MIXER6 0x32
+#define WM8993_HPOUT2_MIXER 0x33
+#define WM8993_LINE_MIXER1 0x34
+#define WM8993_LINE_MIXER2 0x35
+#define WM8993_SPEAKER_MIXER 0x36
+#define WM8993_ADDITIONAL_CONTROL 0x37
+#define WM8993_ANTIPOP1 0x38
+#define WM8993_ANTIPOP2 0x39
+#define WM8993_MICBIAS 0x3A
+#define WM8993_FLL_CONTROL_1 0x3C
+#define WM8993_FLL_CONTROL_2 0x3D
+#define WM8993_FLL_CONTROL_3 0x3E
+#define WM8993_FLL_CONTROL_4 0x3F
+#define WM8993_FLL_CONTROL_5 0x40
+#define WM8993_CLOCKING_3 0x41
+#define WM8993_CLOCKING_4 0x42
+#define WM8993_MW_SLAVE_CONTROL 0x43
+#define WM8993_BUS_CONTROL_1 0x45
+#define WM8993_WRITE_SEQUENCER_0 0x46
+#define WM8993_WRITE_SEQUENCER_1 0x47
+#define WM8993_WRITE_SEQUENCER_2 0x48
+#define WM8993_WRITE_SEQUENCER_3 0x49
+#define WM8993_WRITE_SEQUENCER_4 0x4A
+#define WM8993_WRITE_SEQUENCER_5 0x4B
+#define WM8993_CHARGE_PUMP_1 0x4C
+#define WM8993_CLASS_W_0 0x51
+#define WM8993_DC_SERVO_0 0x54
+#define WM8993_DC_SERVO_1 0x55
+#define WM8993_DC_SERVO_3 0x57
+#define WM8993_DC_SERVO_READBACK_0 0x58
+#define WM8993_DC_SERVO_READBACK_1 0x59
+#define WM8993_DC_SERVO_READBACK_2 0x5A
+#define WM8993_ANALOGUE_HP_0 0x60
+#define WM8993_EQ1 0x62
+#define WM8993_EQ2 0x63
+#define WM8993_EQ3 0x64
+#define WM8993_EQ4 0x65
+#define WM8993_EQ5 0x66
+#define WM8993_EQ6 0x67
+#define WM8993_EQ7 0x68
+#define WM8993_EQ8 0x69
+#define WM8993_EQ9 0x6A
+#define WM8993_EQ10 0x6B
+#define WM8993_EQ11 0x6C
+#define WM8993_EQ12 0x6D
+#define WM8993_EQ13 0x6E
+#define WM8993_EQ14 0x6F
+#define WM8993_EQ15 0x70
+#define WM8993_EQ16 0x71
+#define WM8993_EQ17 0x72
+#define WM8993_EQ18 0x73
+#define WM8993_EQ19 0x74
+#define WM8993_EQ20 0x75
+#define WM8993_EQ21 0x76
+#define WM8993_EQ22 0x77
+#define WM8993_EQ23 0x78
+#define WM8993_EQ24 0x79
+#define WM8993_DIGITAL_PULLS 0x7A
+#define WM8993_DRC_CONTROL_1 0x7B
+#define WM8993_DRC_CONTROL_2 0x7C
+#define WM8993_DRC_CONTROL_3 0x7D
+#define WM8993_DRC_CONTROL_4 0x7E
+
+#define WM8993_REGISTER_COUNT 0x7F
+#define WM8993_MAX_REGISTER 0x7E
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - Software Reset
+ */
+#define WM8993_SW_RESET_MASK 0xFFFF /* SW_RESET - [15:0] */
+#define WM8993_SW_RESET_SHIFT 0 /* SW_RESET - [15:0] */
+#define WM8993_SW_RESET_WIDTH 16 /* SW_RESET - [15:0] */
+
+/*
+ * R1 (0x01) - Power Management (1)
+ */
+#define WM8993_SPKOUTR_ENA 0x2000 /* SPKOUTR_ENA */
+#define WM8993_SPKOUTR_ENA_MASK 0x2000 /* SPKOUTR_ENA */
+#define WM8993_SPKOUTR_ENA_SHIFT 13 /* SPKOUTR_ENA */
+#define WM8993_SPKOUTR_ENA_WIDTH 1 /* SPKOUTR_ENA */
+#define WM8993_SPKOUTL_ENA 0x1000 /* SPKOUTL_ENA */
+#define WM8993_SPKOUTL_ENA_MASK 0x1000 /* SPKOUTL_ENA */
+#define WM8993_SPKOUTL_ENA_SHIFT 12 /* SPKOUTL_ENA */
+#define WM8993_SPKOUTL_ENA_WIDTH 1 /* SPKOUTL_ENA */
+#define WM8993_HPOUT2_ENA 0x0800 /* HPOUT2_ENA */
+#define WM8993_HPOUT2_ENA_MASK 0x0800 /* HPOUT2_ENA */
+#define WM8993_HPOUT2_ENA_SHIFT 11 /* HPOUT2_ENA */
+#define WM8993_HPOUT2_ENA_WIDTH 1 /* HPOUT2_ENA */
+#define WM8993_HPOUT1L_ENA 0x0200 /* HPOUT1L_ENA */
+#define WM8993_HPOUT1L_ENA_MASK 0x0200 /* HPOUT1L_ENA */
+#define WM8993_HPOUT1L_ENA_SHIFT 9 /* HPOUT1L_ENA */
+#define WM8993_HPOUT1L_ENA_WIDTH 1 /* HPOUT1L_ENA */
+#define WM8993_HPOUT1R_ENA 0x0100 /* HPOUT1R_ENA */
+#define WM8993_HPOUT1R_ENA_MASK 0x0100 /* HPOUT1R_ENA */
+#define WM8993_HPOUT1R_ENA_SHIFT 8 /* HPOUT1R_ENA */
+#define WM8993_HPOUT1R_ENA_WIDTH 1 /* HPOUT1R_ENA */
+#define WM8993_MICB2_ENA 0x0020 /* MICB2_ENA */
+#define WM8993_MICB2_ENA_MASK 0x0020 /* MICB2_ENA */
+#define WM8993_MICB2_ENA_SHIFT 5 /* MICB2_ENA */
+#define WM8993_MICB2_ENA_WIDTH 1 /* MICB2_ENA */
+#define WM8993_MICB1_ENA 0x0010 /* MICB1_ENA */
+#define WM8993_MICB1_ENA_MASK 0x0010 /* MICB1_ENA */
+#define WM8993_MICB1_ENA_SHIFT 4 /* MICB1_ENA */
+#define WM8993_MICB1_ENA_WIDTH 1 /* MICB1_ENA */
+#define WM8993_VMID_SEL_MASK 0x0006 /* VMID_SEL - [2:1] */
+#define WM8993_VMID_SEL_SHIFT 1 /* VMID_SEL - [2:1] */
+#define WM8993_VMID_SEL_WIDTH 2 /* VMID_SEL - [2:1] */
+#define WM8993_BIAS_ENA 0x0001 /* BIAS_ENA */
+#define WM8993_BIAS_ENA_MASK 0x0001 /* BIAS_ENA */
+#define WM8993_BIAS_ENA_SHIFT 0 /* BIAS_ENA */
+#define WM8993_BIAS_ENA_WIDTH 1 /* BIAS_ENA */
+
+/*
+ * R2 (0x02) - Power Management (2)
+ */
+#define WM8993_TSHUT_ENA 0x4000 /* TSHUT_ENA */
+#define WM8993_TSHUT_ENA_MASK 0x4000 /* TSHUT_ENA */
+#define WM8993_TSHUT_ENA_SHIFT 14 /* TSHUT_ENA */
+#define WM8993_TSHUT_ENA_WIDTH 1 /* TSHUT_ENA */
+#define WM8993_TSHUT_OPDIS 0x2000 /* TSHUT_OPDIS */
+#define WM8993_TSHUT_OPDIS_MASK 0x2000 /* TSHUT_OPDIS */
+#define WM8993_TSHUT_OPDIS_SHIFT 13 /* TSHUT_OPDIS */
+#define WM8993_TSHUT_OPDIS_WIDTH 1 /* TSHUT_OPDIS */
+#define WM8993_OPCLK_ENA 0x0800 /* OPCLK_ENA */
+#define WM8993_OPCLK_ENA_MASK 0x0800 /* OPCLK_ENA */
+#define WM8993_OPCLK_ENA_SHIFT 11 /* OPCLK_ENA */
+#define WM8993_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */
+#define WM8993_MIXINL_ENA 0x0200 /* MIXINL_ENA */
+#define WM8993_MIXINL_ENA_MASK 0x0200 /* MIXINL_ENA */
+#define WM8993_MIXINL_ENA_SHIFT 9 /* MIXINL_ENA */
+#define WM8993_MIXINL_ENA_WIDTH 1 /* MIXINL_ENA */
+#define WM8993_MIXINR_ENA 0x0100 /* MIXINR_ENA */
+#define WM8993_MIXINR_ENA_MASK 0x0100 /* MIXINR_ENA */
+#define WM8993_MIXINR_ENA_SHIFT 8 /* MIXINR_ENA */
+#define WM8993_MIXINR_ENA_WIDTH 1 /* MIXINR_ENA */
+#define WM8993_IN2L_ENA 0x0080 /* IN2L_ENA */
+#define WM8993_IN2L_ENA_MASK 0x0080 /* IN2L_ENA */
+#define WM8993_IN2L_ENA_SHIFT 7 /* IN2L_ENA */
+#define WM8993_IN2L_ENA_WIDTH 1 /* IN2L_ENA */
+#define WM8993_IN1L_ENA 0x0040 /* IN1L_ENA */
+#define WM8993_IN1L_ENA_MASK 0x0040 /* IN1L_ENA */
+#define WM8993_IN1L_ENA_SHIFT 6 /* IN1L_ENA */
+#define WM8993_IN1L_ENA_WIDTH 1 /* IN1L_ENA */
+#define WM8993_IN2R_ENA 0x0020 /* IN2R_ENA */
+#define WM8993_IN2R_ENA_MASK 0x0020 /* IN2R_ENA */
+#define WM8993_IN2R_ENA_SHIFT 5 /* IN2R_ENA */
+#define WM8993_IN2R_ENA_WIDTH 1 /* IN2R_ENA */
+#define WM8993_IN1R_ENA 0x0010 /* IN1R_ENA */
+#define WM8993_IN1R_ENA_MASK 0x0010 /* IN1R_ENA */
+#define WM8993_IN1R_ENA_SHIFT 4 /* IN1R_ENA */
+#define WM8993_IN1R_ENA_WIDTH 1 /* IN1R_ENA */
+#define WM8993_ADCL_ENA 0x0002 /* ADCL_ENA */
+#define WM8993_ADCL_ENA_MASK 0x0002 /* ADCL_ENA */
+#define WM8993_ADCL_ENA_SHIFT 1 /* ADCL_ENA */
+#define WM8993_ADCL_ENA_WIDTH 1 /* ADCL_ENA */
+#define WM8993_ADCR_ENA 0x0001 /* ADCR_ENA */
+#define WM8993_ADCR_ENA_MASK 0x0001 /* ADCR_ENA */
+#define WM8993_ADCR_ENA_SHIFT 0 /* ADCR_ENA */
+#define WM8993_ADCR_ENA_WIDTH 1 /* ADCR_ENA */
+
+/*
+ * R3 (0x03) - Power Management (3)
+ */
+#define WM8993_LINEOUT1N_ENA 0x2000 /* LINEOUT1N_ENA */
+#define WM8993_LINEOUT1N_ENA_MASK 0x2000 /* LINEOUT1N_ENA */
+#define WM8993_LINEOUT1N_ENA_SHIFT 13 /* LINEOUT1N_ENA */
+#define WM8993_LINEOUT1N_ENA_WIDTH 1 /* LINEOUT1N_ENA */
+#define WM8993_LINEOUT1P_ENA 0x1000 /* LINEOUT1P_ENA */
+#define WM8993_LINEOUT1P_ENA_MASK 0x1000 /* LINEOUT1P_ENA */
+#define WM8993_LINEOUT1P_ENA_SHIFT 12 /* LINEOUT1P_ENA */
+#define WM8993_LINEOUT1P_ENA_WIDTH 1 /* LINEOUT1P_ENA */
+#define WM8993_LINEOUT2N_ENA 0x0800 /* LINEOUT2N_ENA */
+#define WM8993_LINEOUT2N_ENA_MASK 0x0800 /* LINEOUT2N_ENA */
+#define WM8993_LINEOUT2N_ENA_SHIFT 11 /* LINEOUT2N_ENA */
+#define WM8993_LINEOUT2N_ENA_WIDTH 1 /* LINEOUT2N_ENA */
+#define WM8993_LINEOUT2P_ENA 0x0400 /* LINEOUT2P_ENA */
+#define WM8993_LINEOUT2P_ENA_MASK 0x0400 /* LINEOUT2P_ENA */
+#define WM8993_LINEOUT2P_ENA_SHIFT 10 /* LINEOUT2P_ENA */
+#define WM8993_LINEOUT2P_ENA_WIDTH 1 /* LINEOUT2P_ENA */
+#define WM8993_SPKRVOL_ENA 0x0200 /* SPKRVOL_ENA */
+#define WM8993_SPKRVOL_ENA_MASK 0x0200 /* SPKRVOL_ENA */
+#define WM8993_SPKRVOL_ENA_SHIFT 9 /* SPKRVOL_ENA */
+#define WM8993_SPKRVOL_ENA_WIDTH 1 /* SPKRVOL_ENA */
+#define WM8993_SPKLVOL_ENA 0x0100 /* SPKLVOL_ENA */
+#define WM8993_SPKLVOL_ENA_MASK 0x0100 /* SPKLVOL_ENA */
+#define WM8993_SPKLVOL_ENA_SHIFT 8 /* SPKLVOL_ENA */
+#define WM8993_SPKLVOL_ENA_WIDTH 1 /* SPKLVOL_ENA */
+#define WM8993_MIXOUTLVOL_ENA 0x0080 /* MIXOUTLVOL_ENA */
+#define WM8993_MIXOUTLVOL_ENA_MASK 0x0080 /* MIXOUTLVOL_ENA */
+#define WM8993_MIXOUTLVOL_ENA_SHIFT 7 /* MIXOUTLVOL_ENA */
+#define WM8993_MIXOUTLVOL_ENA_WIDTH 1 /* MIXOUTLVOL_ENA */
+#define WM8993_MIXOUTRVOL_ENA 0x0040 /* MIXOUTRVOL_ENA */
+#define WM8993_MIXOUTRVOL_ENA_MASK 0x0040 /* MIXOUTRVOL_ENA */
+#define WM8993_MIXOUTRVOL_ENA_SHIFT 6 /* MIXOUTRVOL_ENA */
+#define WM8993_MIXOUTRVOL_ENA_WIDTH 1 /* MIXOUTRVOL_ENA */
+#define WM8993_MIXOUTL_ENA 0x0020 /* MIXOUTL_ENA */
+#define WM8993_MIXOUTL_ENA_MASK 0x0020 /* MIXOUTL_ENA */
+#define WM8993_MIXOUTL_ENA_SHIFT 5 /* MIXOUTL_ENA */
+#define WM8993_MIXOUTL_ENA_WIDTH 1 /* MIXOUTL_ENA */
+#define WM8993_MIXOUTR_ENA 0x0010 /* MIXOUTR_ENA */
+#define WM8993_MIXOUTR_ENA_MASK 0x0010 /* MIXOUTR_ENA */
+#define WM8993_MIXOUTR_ENA_SHIFT 4 /* MIXOUTR_ENA */
+#define WM8993_MIXOUTR_ENA_WIDTH 1 /* MIXOUTR_ENA */
+#define WM8993_DACL_ENA 0x0002 /* DACL_ENA */
+#define WM8993_DACL_ENA_MASK 0x0002 /* DACL_ENA */
+#define WM8993_DACL_ENA_SHIFT 1 /* DACL_ENA */
+#define WM8993_DACL_ENA_WIDTH 1 /* DACL_ENA */
+#define WM8993_DACR_ENA 0x0001 /* DACR_ENA */
+#define WM8993_DACR_ENA_MASK 0x0001 /* DACR_ENA */
+#define WM8993_DACR_ENA_SHIFT 0 /* DACR_ENA */
+#define WM8993_DACR_ENA_WIDTH 1 /* DACR_ENA */
+
+/*
+ * R4 (0x04) - Audio Interface (1)
+ */
+#define WM8993_AIFADCL_SRC 0x8000 /* AIFADCL_SRC */
+#define WM8993_AIFADCL_SRC_MASK 0x8000 /* AIFADCL_SRC */
+#define WM8993_AIFADCL_SRC_SHIFT 15 /* AIFADCL_SRC */
+#define WM8993_AIFADCL_SRC_WIDTH 1 /* AIFADCL_SRC */
+#define WM8993_AIFADCR_SRC 0x4000 /* AIFADCR_SRC */
+#define WM8993_AIFADCR_SRC_MASK 0x4000 /* AIFADCR_SRC */
+#define WM8993_AIFADCR_SRC_SHIFT 14 /* AIFADCR_SRC */
+#define WM8993_AIFADCR_SRC_WIDTH 1 /* AIFADCR_SRC */
+#define WM8993_AIFADC_TDM 0x2000 /* AIFADC_TDM */
+#define WM8993_AIFADC_TDM_MASK 0x2000 /* AIFADC_TDM */
+#define WM8993_AIFADC_TDM_SHIFT 13 /* AIFADC_TDM */
+#define WM8993_AIFADC_TDM_WIDTH 1 /* AIFADC_TDM */
+#define WM8993_AIFADC_TDM_CHAN 0x1000 /* AIFADC_TDM_CHAN */
+#define WM8993_AIFADC_TDM_CHAN_MASK 0x1000 /* AIFADC_TDM_CHAN */
+#define WM8993_AIFADC_TDM_CHAN_SHIFT 12 /* AIFADC_TDM_CHAN */
+#define WM8993_AIFADC_TDM_CHAN_WIDTH 1 /* AIFADC_TDM_CHAN */
+#define WM8993_BCLK_DIR 0x0200 /* BCLK_DIR */
+#define WM8993_BCLK_DIR_MASK 0x0200 /* BCLK_DIR */
+#define WM8993_BCLK_DIR_SHIFT 9 /* BCLK_DIR */
+#define WM8993_BCLK_DIR_WIDTH 1 /* BCLK_DIR */
+#define WM8993_AIF_BCLK_INV 0x0100 /* AIF_BCLK_INV */
+#define WM8993_AIF_BCLK_INV_MASK 0x0100 /* AIF_BCLK_INV */
+#define WM8993_AIF_BCLK_INV_SHIFT 8 /* AIF_BCLK_INV */
+#define WM8993_AIF_BCLK_INV_WIDTH 1 /* AIF_BCLK_INV */
+#define WM8993_AIF_LRCLK_INV 0x0080 /* AIF_LRCLK_INV */
+#define WM8993_AIF_LRCLK_INV_MASK 0x0080 /* AIF_LRCLK_INV */
+#define WM8993_AIF_LRCLK_INV_SHIFT 7 /* AIF_LRCLK_INV */
+#define WM8993_AIF_LRCLK_INV_WIDTH 1 /* AIF_LRCLK_INV */
+#define WM8993_AIF_WL_MASK 0x0060 /* AIF_WL - [6:5] */
+#define WM8993_AIF_WL_SHIFT 5 /* AIF_WL - [6:5] */
+#define WM8993_AIF_WL_WIDTH 2 /* AIF_WL - [6:5] */
+#define WM8993_AIF_FMT_MASK 0x0018 /* AIF_FMT - [4:3] */
+#define WM8993_AIF_FMT_SHIFT 3 /* AIF_FMT - [4:3] */
+#define WM8993_AIF_FMT_WIDTH 2 /* AIF_FMT - [4:3] */
+
+/*
+ * R5 (0x05) - Audio Interface (2)
+ */
+#define WM8993_AIFDACL_SRC 0x8000 /* AIFDACL_SRC */
+#define WM8993_AIFDACL_SRC_MASK 0x8000 /* AIFDACL_SRC */
+#define WM8993_AIFDACL_SRC_SHIFT 15 /* AIFDACL_SRC */
+#define WM8993_AIFDACL_SRC_WIDTH 1 /* AIFDACL_SRC */
+#define WM8993_AIFDACR_SRC 0x4000 /* AIFDACR_SRC */
+#define WM8993_AIFDACR_SRC_MASK 0x4000 /* AIFDACR_SRC */
+#define WM8993_AIFDACR_SRC_SHIFT 14 /* AIFDACR_SRC */
+#define WM8993_AIFDACR_SRC_WIDTH 1 /* AIFDACR_SRC */
+#define WM8993_AIFDAC_TDM 0x2000 /* AIFDAC_TDM */
+#define WM8993_AIFDAC_TDM_MASK 0x2000 /* AIFDAC_TDM */
+#define WM8993_AIFDAC_TDM_SHIFT 13 /* AIFDAC_TDM */
+#define WM8993_AIFDAC_TDM_WIDTH 1 /* AIFDAC_TDM */
+#define WM8993_AIFDAC_TDM_CHAN 0x1000 /* AIFDAC_TDM_CHAN */
+#define WM8993_AIFDAC_TDM_CHAN_MASK 0x1000 /* AIFDAC_TDM_CHAN */
+#define WM8993_AIFDAC_TDM_CHAN_SHIFT 12 /* AIFDAC_TDM_CHAN */
+#define WM8993_AIFDAC_TDM_CHAN_WIDTH 1 /* AIFDAC_TDM_CHAN */
+#define WM8993_DAC_BOOST_MASK 0x0C00 /* DAC_BOOST - [11:10] */
+#define WM8993_DAC_BOOST_SHIFT 10 /* DAC_BOOST - [11:10] */
+#define WM8993_DAC_BOOST_WIDTH 2 /* DAC_BOOST - [11:10] */
+#define WM8993_DAC_COMP 0x0010 /* DAC_COMP */
+#define WM8993_DAC_COMP_MASK 0x0010 /* DAC_COMP */
+#define WM8993_DAC_COMP_SHIFT 4 /* DAC_COMP */
+#define WM8993_DAC_COMP_WIDTH 1 /* DAC_COMP */
+#define WM8993_DAC_COMPMODE 0x0008 /* DAC_COMPMODE */
+#define WM8993_DAC_COMPMODE_MASK 0x0008 /* DAC_COMPMODE */
+#define WM8993_DAC_COMPMODE_SHIFT 3 /* DAC_COMPMODE */
+#define WM8993_DAC_COMPMODE_WIDTH 1 /* DAC_COMPMODE */
+#define WM8993_ADC_COMP 0x0004 /* ADC_COMP */
+#define WM8993_ADC_COMP_MASK 0x0004 /* ADC_COMP */
+#define WM8993_ADC_COMP_SHIFT 2 /* ADC_COMP */
+#define WM8993_ADC_COMP_WIDTH 1 /* ADC_COMP */
+#define WM8993_ADC_COMPMODE 0x0002 /* ADC_COMPMODE */
+#define WM8993_ADC_COMPMODE_MASK 0x0002 /* ADC_COMPMODE */
+#define WM8993_ADC_COMPMODE_SHIFT 1 /* ADC_COMPMODE */
+#define WM8993_ADC_COMPMODE_WIDTH 1 /* ADC_COMPMODE */
+#define WM8993_LOOPBACK 0x0001 /* LOOPBACK */
+#define WM8993_LOOPBACK_MASK 0x0001 /* LOOPBACK */
+#define WM8993_LOOPBACK_SHIFT 0 /* LOOPBACK */
+#define WM8993_LOOPBACK_WIDTH 1 /* LOOPBACK */
+
+/*
+ * R6 (0x06) - Clocking 1
+ */
+#define WM8993_TOCLK_RATE 0x8000 /* TOCLK_RATE */
+#define WM8993_TOCLK_RATE_MASK 0x8000 /* TOCLK_RATE */
+#define WM8993_TOCLK_RATE_SHIFT 15 /* TOCLK_RATE */
+#define WM8993_TOCLK_RATE_WIDTH 1 /* TOCLK_RATE */
+#define WM8993_TOCLK_ENA 0x4000 /* TOCLK_ENA */
+#define WM8993_TOCLK_ENA_MASK 0x4000 /* TOCLK_ENA */
+#define WM8993_TOCLK_ENA_SHIFT 14 /* TOCLK_ENA */
+#define WM8993_TOCLK_ENA_WIDTH 1 /* TOCLK_ENA */
+#define WM8993_OPCLK_DIV_MASK 0x1E00 /* OPCLK_DIV - [12:9] */
+#define WM8993_OPCLK_DIV_SHIFT 9 /* OPCLK_DIV - [12:9] */
+#define WM8993_OPCLK_DIV_WIDTH 4 /* OPCLK_DIV - [12:9] */
+#define WM8993_DCLK_DIV_MASK 0x01C0 /* DCLK_DIV - [8:6] */
+#define WM8993_DCLK_DIV_SHIFT 6 /* DCLK_DIV - [8:6] */
+#define WM8993_DCLK_DIV_WIDTH 3 /* DCLK_DIV - [8:6] */
+#define WM8993_BCLK_DIV_MASK 0x001E /* BCLK_DIV - [4:1] */
+#define WM8993_BCLK_DIV_SHIFT 1 /* BCLK_DIV - [4:1] */
+#define WM8993_BCLK_DIV_WIDTH 4 /* BCLK_DIV - [4:1] */
+
+/*
+ * R7 (0x07) - Clocking 2
+ */
+#define WM8993_MCLK_SRC 0x8000 /* MCLK_SRC */
+#define WM8993_MCLK_SRC_MASK 0x8000 /* MCLK_SRC */
+#define WM8993_MCLK_SRC_SHIFT 15 /* MCLK_SRC */
+#define WM8993_MCLK_SRC_WIDTH 1 /* MCLK_SRC */
+#define WM8993_SYSCLK_SRC 0x4000 /* SYSCLK_SRC */
+#define WM8993_SYSCLK_SRC_MASK 0x4000 /* SYSCLK_SRC */
+#define WM8993_SYSCLK_SRC_SHIFT 14 /* SYSCLK_SRC */
+#define WM8993_SYSCLK_SRC_WIDTH 1 /* SYSCLK_SRC */
+#define WM8993_MCLK_DIV 0x1000 /* MCLK_DIV */
+#define WM8993_MCLK_DIV_MASK 0x1000 /* MCLK_DIV */
+#define WM8993_MCLK_DIV_SHIFT 12 /* MCLK_DIV */
+#define WM8993_MCLK_DIV_WIDTH 1 /* MCLK_DIV */
+#define WM8993_MCLK_INV 0x0400 /* MCLK_INV */
+#define WM8993_MCLK_INV_MASK 0x0400 /* MCLK_INV */
+#define WM8993_MCLK_INV_SHIFT 10 /* MCLK_INV */
+#define WM8993_MCLK_INV_WIDTH 1 /* MCLK_INV */
+#define WM8993_ADC_DIV_MASK 0x00E0 /* ADC_DIV - [7:5] */
+#define WM8993_ADC_DIV_SHIFT 5 /* ADC_DIV - [7:5] */
+#define WM8993_ADC_DIV_WIDTH 3 /* ADC_DIV - [7:5] */
+#define WM8993_DAC_DIV_MASK 0x001C /* DAC_DIV - [4:2] */
+#define WM8993_DAC_DIV_SHIFT 2 /* DAC_DIV - [4:2] */
+#define WM8993_DAC_DIV_WIDTH 3 /* DAC_DIV - [4:2] */
+
+/*
+ * R8 (0x08) - Audio Interface (3)
+ */
+#define WM8993_AIF_MSTR1 0x8000 /* AIF_MSTR1 */
+#define WM8993_AIF_MSTR1_MASK 0x8000 /* AIF_MSTR1 */
+#define WM8993_AIF_MSTR1_SHIFT 15 /* AIF_MSTR1 */
+#define WM8993_AIF_MSTR1_WIDTH 1 /* AIF_MSTR1 */
+
+/*
+ * R9 (0x09) - Audio Interface (4)
+ */
+#define WM8993_AIF_TRIS 0x2000 /* AIF_TRIS */
+#define WM8993_AIF_TRIS_MASK 0x2000 /* AIF_TRIS */
+#define WM8993_AIF_TRIS_SHIFT 13 /* AIF_TRIS */
+#define WM8993_AIF_TRIS_WIDTH 1 /* AIF_TRIS */
+#define WM8993_LRCLK_DIR 0x0800 /* LRCLK_DIR */
+#define WM8993_LRCLK_DIR_MASK 0x0800 /* LRCLK_DIR */
+#define WM8993_LRCLK_DIR_SHIFT 11 /* LRCLK_DIR */
+#define WM8993_LRCLK_DIR_WIDTH 1 /* LRCLK_DIR */
+#define WM8993_LRCLK_RATE_MASK 0x07FF /* LRCLK_RATE - [10:0] */
+#define WM8993_LRCLK_RATE_SHIFT 0 /* LRCLK_RATE - [10:0] */
+#define WM8993_LRCLK_RATE_WIDTH 11 /* LRCLK_RATE - [10:0] */
+
+/*
+ * R10 (0x0A) - DAC CTRL
+ */
+#define WM8993_DAC_OSR128 0x2000 /* DAC_OSR128 */
+#define WM8993_DAC_OSR128_MASK 0x2000 /* DAC_OSR128 */
+#define WM8993_DAC_OSR128_SHIFT 13 /* DAC_OSR128 */
+#define WM8993_DAC_OSR128_WIDTH 1 /* DAC_OSR128 */
+#define WM8993_DAC_MONO 0x0200 /* DAC_MONO */
+#define WM8993_DAC_MONO_MASK 0x0200 /* DAC_MONO */
+#define WM8993_DAC_MONO_SHIFT 9 /* DAC_MONO */
+#define WM8993_DAC_MONO_WIDTH 1 /* DAC_MONO */
+#define WM8993_DAC_SB_FILT 0x0100 /* DAC_SB_FILT */
+#define WM8993_DAC_SB_FILT_MASK 0x0100 /* DAC_SB_FILT */
+#define WM8993_DAC_SB_FILT_SHIFT 8 /* DAC_SB_FILT */
+#define WM8993_DAC_SB_FILT_WIDTH 1 /* DAC_SB_FILT */
+#define WM8993_DAC_MUTERATE 0x0080 /* DAC_MUTERATE */
+#define WM8993_DAC_MUTERATE_MASK 0x0080 /* DAC_MUTERATE */
+#define WM8993_DAC_MUTERATE_SHIFT 7 /* DAC_MUTERATE */
+#define WM8993_DAC_MUTERATE_WIDTH 1 /* DAC_MUTERATE */
+#define WM8993_DAC_UNMUTE_RAMP 0x0040 /* DAC_UNMUTE_RAMP */
+#define WM8993_DAC_UNMUTE_RAMP_MASK 0x0040 /* DAC_UNMUTE_RAMP */
+#define WM8993_DAC_UNMUTE_RAMP_SHIFT 6 /* DAC_UNMUTE_RAMP */
+#define WM8993_DAC_UNMUTE_RAMP_WIDTH 1 /* DAC_UNMUTE_RAMP */
+#define WM8993_DEEMPH_MASK 0x0030 /* DEEMPH - [5:4] */
+#define WM8993_DEEMPH_SHIFT 4 /* DEEMPH - [5:4] */
+#define WM8993_DEEMPH_WIDTH 2 /* DEEMPH - [5:4] */
+#define WM8993_DAC_MUTE 0x0004 /* DAC_MUTE */
+#define WM8993_DAC_MUTE_MASK 0x0004 /* DAC_MUTE */
+#define WM8993_DAC_MUTE_SHIFT 2 /* DAC_MUTE */
+#define WM8993_DAC_MUTE_WIDTH 1 /* DAC_MUTE */
+#define WM8993_DACL_DATINV 0x0002 /* DACL_DATINV */
+#define WM8993_DACL_DATINV_MASK 0x0002 /* DACL_DATINV */
+#define WM8993_DACL_DATINV_SHIFT 1 /* DACL_DATINV */
+#define WM8993_DACL_DATINV_WIDTH 1 /* DACL_DATINV */
+#define WM8993_DACR_DATINV 0x0001 /* DACR_DATINV */
+#define WM8993_DACR_DATINV_MASK 0x0001 /* DACR_DATINV */
+#define WM8993_DACR_DATINV_SHIFT 0 /* DACR_DATINV */
+#define WM8993_DACR_DATINV_WIDTH 1 /* DACR_DATINV */
+
+/*
+ * R11 (0x0B) - Left DAC Digital Volume
+ */
+#define WM8993_DAC_VU 0x0100 /* DAC_VU */
+#define WM8993_DAC_VU_MASK 0x0100 /* DAC_VU */
+#define WM8993_DAC_VU_SHIFT 8 /* DAC_VU */
+#define WM8993_DAC_VU_WIDTH 1 /* DAC_VU */
+#define WM8993_DACL_VOL_MASK 0x00FF /* DACL_VOL - [7:0] */
+#define WM8993_DACL_VOL_SHIFT 0 /* DACL_VOL - [7:0] */
+#define WM8993_DACL_VOL_WIDTH 8 /* DACL_VOL - [7:0] */
+
+/*
+ * R12 (0x0C) - Right DAC Digital Volume
+ */
+#define WM8993_DAC_VU 0x0100 /* DAC_VU */
+#define WM8993_DAC_VU_MASK 0x0100 /* DAC_VU */
+#define WM8993_DAC_VU_SHIFT 8 /* DAC_VU */
+#define WM8993_DAC_VU_WIDTH 1 /* DAC_VU */
+#define WM8993_DACR_VOL_MASK 0x00FF /* DACR_VOL - [7:0] */
+#define WM8993_DACR_VOL_SHIFT 0 /* DACR_VOL - [7:0] */
+#define WM8993_DACR_VOL_WIDTH 8 /* DACR_VOL - [7:0] */
+
+/*
+ * R13 (0x0D) - Digital Side Tone
+ */
+#define WM8993_ADCL_DAC_SVOL_MASK 0x1E00 /* ADCL_DAC_SVOL - [12:9] */
+#define WM8993_ADCL_DAC_SVOL_SHIFT 9 /* ADCL_DAC_SVOL - [12:9] */
+#define WM8993_ADCL_DAC_SVOL_WIDTH 4 /* ADCL_DAC_SVOL - [12:9] */
+#define WM8993_ADCR_DAC_SVOL_MASK 0x01E0 /* ADCR_DAC_SVOL - [8:5] */
+#define WM8993_ADCR_DAC_SVOL_SHIFT 5 /* ADCR_DAC_SVOL - [8:5] */
+#define WM8993_ADCR_DAC_SVOL_WIDTH 4 /* ADCR_DAC_SVOL - [8:5] */
+#define WM8993_ADC_TO_DACL_MASK 0x000C /* ADC_TO_DACL - [3:2] */
+#define WM8993_ADC_TO_DACL_SHIFT 2 /* ADC_TO_DACL - [3:2] */
+#define WM8993_ADC_TO_DACL_WIDTH 2 /* ADC_TO_DACL - [3:2] */
+#define WM8993_ADC_TO_DACR_MASK 0x0003 /* ADC_TO_DACR - [1:0] */
+#define WM8993_ADC_TO_DACR_SHIFT 0 /* ADC_TO_DACR - [1:0] */
+#define WM8993_ADC_TO_DACR_WIDTH 2 /* ADC_TO_DACR - [1:0] */
+
+/*
+ * R14 (0x0E) - ADC CTRL
+ */
+#define WM8993_ADC_OSR128 0x0200 /* ADC_OSR128 */
+#define WM8993_ADC_OSR128_MASK 0x0200 /* ADC_OSR128 */
+#define WM8993_ADC_OSR128_SHIFT 9 /* ADC_OSR128 */
+#define WM8993_ADC_OSR128_WIDTH 1 /* ADC_OSR128 */
+#define WM8993_ADC_HPF 0x0100 /* ADC_HPF */
+#define WM8993_ADC_HPF_MASK 0x0100 /* ADC_HPF */
+#define WM8993_ADC_HPF_SHIFT 8 /* ADC_HPF */
+#define WM8993_ADC_HPF_WIDTH 1 /* ADC_HPF */
+#define WM8993_ADC_HPF_CUT_MASK 0x0060 /* ADC_HPF_CUT - [6:5] */
+#define WM8993_ADC_HPF_CUT_SHIFT 5 /* ADC_HPF_CUT - [6:5] */
+#define WM8993_ADC_HPF_CUT_WIDTH 2 /* ADC_HPF_CUT - [6:5] */
+#define WM8993_ADCL_DATINV 0x0002 /* ADCL_DATINV */
+#define WM8993_ADCL_DATINV_MASK 0x0002 /* ADCL_DATINV */
+#define WM8993_ADCL_DATINV_SHIFT 1 /* ADCL_DATINV */
+#define WM8993_ADCL_DATINV_WIDTH 1 /* ADCL_DATINV */
+#define WM8993_ADCR_DATINV 0x0001 /* ADCR_DATINV */
+#define WM8993_ADCR_DATINV_MASK 0x0001 /* ADCR_DATINV */
+#define WM8993_ADCR_DATINV_SHIFT 0 /* ADCR_DATINV */
+#define WM8993_ADCR_DATINV_WIDTH 1 /* ADCR_DATINV */
+
+/*
+ * R15 (0x0F) - Left ADC Digital Volume
+ */
+#define WM8993_ADC_VU 0x0100 /* ADC_VU */
+#define WM8993_ADC_VU_MASK 0x0100 /* ADC_VU */
+#define WM8993_ADC_VU_SHIFT 8 /* ADC_VU */
+#define WM8993_ADC_VU_WIDTH 1 /* ADC_VU */
+#define WM8993_ADCL_VOL_MASK 0x00FF /* ADCL_VOL - [7:0] */
+#define WM8993_ADCL_VOL_SHIFT 0 /* ADCL_VOL - [7:0] */
+#define WM8993_ADCL_VOL_WIDTH 8 /* ADCL_VOL - [7:0] */
+
+/*
+ * R16 (0x10) - Right ADC Digital Volume
+ */
+#define WM8993_ADC_VU 0x0100 /* ADC_VU */
+#define WM8993_ADC_VU_MASK 0x0100 /* ADC_VU */
+#define WM8993_ADC_VU_SHIFT 8 /* ADC_VU */
+#define WM8993_ADC_VU_WIDTH 1 /* ADC_VU */
+#define WM8993_ADCR_VOL_MASK 0x00FF /* ADCR_VOL - [7:0] */
+#define WM8993_ADCR_VOL_SHIFT 0 /* ADCR_VOL - [7:0] */
+#define WM8993_ADCR_VOL_WIDTH 8 /* ADCR_VOL - [7:0] */
+
+/*
+ * R18 (0x12) - GPIO CTRL 1
+ */
+#define WM8993_JD2_SC_EINT 0x8000 /* JD2_SC_EINT */
+#define WM8993_JD2_SC_EINT_MASK 0x8000 /* JD2_SC_EINT */
+#define WM8993_JD2_SC_EINT_SHIFT 15 /* JD2_SC_EINT */
+#define WM8993_JD2_SC_EINT_WIDTH 1 /* JD2_SC_EINT */
+#define WM8993_JD2_EINT 0x4000 /* JD2_EINT */
+#define WM8993_JD2_EINT_MASK 0x4000 /* JD2_EINT */
+#define WM8993_JD2_EINT_SHIFT 14 /* JD2_EINT */
+#define WM8993_JD2_EINT_WIDTH 1 /* JD2_EINT */
+#define WM8993_WSEQ_EINT 0x2000 /* WSEQ_EINT */
+#define WM8993_WSEQ_EINT_MASK 0x2000 /* WSEQ_EINT */
+#define WM8993_WSEQ_EINT_SHIFT 13 /* WSEQ_EINT */
+#define WM8993_WSEQ_EINT_WIDTH 1 /* WSEQ_EINT */
+#define WM8993_IRQ 0x1000 /* IRQ */
+#define WM8993_IRQ_MASK 0x1000 /* IRQ */
+#define WM8993_IRQ_SHIFT 12 /* IRQ */
+#define WM8993_IRQ_WIDTH 1 /* IRQ */
+#define WM8993_TEMPOK_EINT 0x0800 /* TEMPOK_EINT */
+#define WM8993_TEMPOK_EINT_MASK 0x0800 /* TEMPOK_EINT */
+#define WM8993_TEMPOK_EINT_SHIFT 11 /* TEMPOK_EINT */
+#define WM8993_TEMPOK_EINT_WIDTH 1 /* TEMPOK_EINT */
+#define WM8993_JD1_SC_EINT 0x0400 /* JD1_SC_EINT */
+#define WM8993_JD1_SC_EINT_MASK 0x0400 /* JD1_SC_EINT */
+#define WM8993_JD1_SC_EINT_SHIFT 10 /* JD1_SC_EINT */
+#define WM8993_JD1_SC_EINT_WIDTH 1 /* JD1_SC_EINT */
+#define WM8993_JD1_EINT 0x0200 /* JD1_EINT */
+#define WM8993_JD1_EINT_MASK 0x0200 /* JD1_EINT */
+#define WM8993_JD1_EINT_SHIFT 9 /* JD1_EINT */
+#define WM8993_JD1_EINT_WIDTH 1 /* JD1_EINT */
+#define WM8993_FLL_LOCK_EINT 0x0100 /* FLL_LOCK_EINT */
+#define WM8993_FLL_LOCK_EINT_MASK 0x0100 /* FLL_LOCK_EINT */
+#define WM8993_FLL_LOCK_EINT_SHIFT 8 /* FLL_LOCK_EINT */
+#define WM8993_FLL_LOCK_EINT_WIDTH 1 /* FLL_LOCK_EINT */
+#define WM8993_GPI8_EINT 0x0080 /* GPI8_EINT */
+#define WM8993_GPI8_EINT_MASK 0x0080 /* GPI8_EINT */
+#define WM8993_GPI8_EINT_SHIFT 7 /* GPI8_EINT */
+#define WM8993_GPI8_EINT_WIDTH 1 /* GPI8_EINT */
+#define WM8993_GPI7_EINT 0x0040 /* GPI7_EINT */
+#define WM8993_GPI7_EINT_MASK 0x0040 /* GPI7_EINT */
+#define WM8993_GPI7_EINT_SHIFT 6 /* GPI7_EINT */
+#define WM8993_GPI7_EINT_WIDTH 1 /* GPI7_EINT */
+#define WM8993_GPIO1_EINT 0x0001 /* GPIO1_EINT */
+#define WM8993_GPIO1_EINT_MASK 0x0001 /* GPIO1_EINT */
+#define WM8993_GPIO1_EINT_SHIFT 0 /* GPIO1_EINT */
+#define WM8993_GPIO1_EINT_WIDTH 1 /* GPIO1_EINT */
+
+/*
+ * R19 (0x13) - GPIO1
+ */
+#define WM8993_GPIO1_PU 0x0020 /* GPIO1_PU */
+#define WM8993_GPIO1_PU_MASK 0x0020 /* GPIO1_PU */
+#define WM8993_GPIO1_PU_SHIFT 5 /* GPIO1_PU */
+#define WM8993_GPIO1_PU_WIDTH 1 /* GPIO1_PU */
+#define WM8993_GPIO1_PD 0x0010 /* GPIO1_PD */
+#define WM8993_GPIO1_PD_MASK 0x0010 /* GPIO1_PD */
+#define WM8993_GPIO1_PD_SHIFT 4 /* GPIO1_PD */
+#define WM8993_GPIO1_PD_WIDTH 1 /* GPIO1_PD */
+#define WM8993_GPIO1_SEL_MASK 0x000F /* GPIO1_SEL - [3:0] */
+#define WM8993_GPIO1_SEL_SHIFT 0 /* GPIO1_SEL - [3:0] */
+#define WM8993_GPIO1_SEL_WIDTH 4 /* GPIO1_SEL - [3:0] */
+
+/*
+ * R20 (0x14) - IRQ_DEBOUNCE
+ */
+#define WM8993_JD2_SC_DB 0x8000 /* JD2_SC_DB */
+#define WM8993_JD2_SC_DB_MASK 0x8000 /* JD2_SC_DB */
+#define WM8993_JD2_SC_DB_SHIFT 15 /* JD2_SC_DB */
+#define WM8993_JD2_SC_DB_WIDTH 1 /* JD2_SC_DB */
+#define WM8993_JD2_DB 0x4000 /* JD2_DB */
+#define WM8993_JD2_DB_MASK 0x4000 /* JD2_DB */
+#define WM8993_JD2_DB_SHIFT 14 /* JD2_DB */
+#define WM8993_JD2_DB_WIDTH 1 /* JD2_DB */
+#define WM8993_WSEQ_DB 0x2000 /* WSEQ_DB */
+#define WM8993_WSEQ_DB_MASK 0x2000 /* WSEQ_DB */
+#define WM8993_WSEQ_DB_SHIFT 13 /* WSEQ_DB */
+#define WM8993_WSEQ_DB_WIDTH 1 /* WSEQ_DB */
+#define WM8993_TEMPOK_DB 0x0800 /* TEMPOK_DB */
+#define WM8993_TEMPOK_DB_MASK 0x0800 /* TEMPOK_DB */
+#define WM8993_TEMPOK_DB_SHIFT 11 /* TEMPOK_DB */
+#define WM8993_TEMPOK_DB_WIDTH 1 /* TEMPOK_DB */
+#define WM8993_JD1_SC_DB 0x0400 /* JD1_SC_DB */
+#define WM8993_JD1_SC_DB_MASK 0x0400 /* JD1_SC_DB */
+#define WM8993_JD1_SC_DB_SHIFT 10 /* JD1_SC_DB */
+#define WM8993_JD1_SC_DB_WIDTH 1 /* JD1_SC_DB */
+#define WM8993_JD1_DB 0x0200 /* JD1_DB */
+#define WM8993_JD1_DB_MASK 0x0200 /* JD1_DB */
+#define WM8993_JD1_DB_SHIFT 9 /* JD1_DB */
+#define WM8993_JD1_DB_WIDTH 1 /* JD1_DB */
+#define WM8993_FLL_LOCK_DB 0x0100 /* FLL_LOCK_DB */
+#define WM8993_FLL_LOCK_DB_MASK 0x0100 /* FLL_LOCK_DB */
+#define WM8993_FLL_LOCK_DB_SHIFT 8 /* FLL_LOCK_DB */
+#define WM8993_FLL_LOCK_DB_WIDTH 1 /* FLL_LOCK_DB */
+#define WM8993_GPI8_DB 0x0080 /* GPI8_DB */
+#define WM8993_GPI8_DB_MASK 0x0080 /* GPI8_DB */
+#define WM8993_GPI8_DB_SHIFT 7 /* GPI8_DB */
+#define WM8993_GPI8_DB_WIDTH 1 /* GPI8_DB */
+#define WM8993_GPI7_DB 0x0008 /* GPI7_DB */
+#define WM8993_GPI7_DB_MASK 0x0008 /* GPI7_DB */
+#define WM8993_GPI7_DB_SHIFT 3 /* GPI7_DB */
+#define WM8993_GPI7_DB_WIDTH 1 /* GPI7_DB */
+#define WM8993_GPIO1_DB 0x0001 /* GPIO1_DB */
+#define WM8993_GPIO1_DB_MASK 0x0001 /* GPIO1_DB */
+#define WM8993_GPIO1_DB_SHIFT 0 /* GPIO1_DB */
+#define WM8993_GPIO1_DB_WIDTH 1 /* GPIO1_DB */
+
+/*
+ * R22 (0x16) - GPIOCTRL 2
+ */
+#define WM8993_IM_JD2_EINT 0x2000 /* IM_JD2_EINT */
+#define WM8993_IM_JD2_EINT_MASK 0x2000 /* IM_JD2_EINT */
+#define WM8993_IM_JD2_EINT_SHIFT 13 /* IM_JD2_EINT */
+#define WM8993_IM_JD2_EINT_WIDTH 1 /* IM_JD2_EINT */
+#define WM8993_IM_JD2_SC_EINT 0x1000 /* IM_JD2_SC_EINT */
+#define WM8993_IM_JD2_SC_EINT_MASK 0x1000 /* IM_JD2_SC_EINT */
+#define WM8993_IM_JD2_SC_EINT_SHIFT 12 /* IM_JD2_SC_EINT */
+#define WM8993_IM_JD2_SC_EINT_WIDTH 1 /* IM_JD2_SC_EINT */
+#define WM8993_IM_TEMPOK_EINT 0x0800 /* IM_TEMPOK_EINT */
+#define WM8993_IM_TEMPOK_EINT_MASK 0x0800 /* IM_TEMPOK_EINT */
+#define WM8993_IM_TEMPOK_EINT_SHIFT 11 /* IM_TEMPOK_EINT */
+#define WM8993_IM_TEMPOK_EINT_WIDTH 1 /* IM_TEMPOK_EINT */
+#define WM8993_IM_JD1_SC_EINT 0x0400 /* IM_JD1_SC_EINT */
+#define WM8993_IM_JD1_SC_EINT_MASK 0x0400 /* IM_JD1_SC_EINT */
+#define WM8993_IM_JD1_SC_EINT_SHIFT 10 /* IM_JD1_SC_EINT */
+#define WM8993_IM_JD1_SC_EINT_WIDTH 1 /* IM_JD1_SC_EINT */
+#define WM8993_IM_JD1_EINT 0x0200 /* IM_JD1_EINT */
+#define WM8993_IM_JD1_EINT_MASK 0x0200 /* IM_JD1_EINT */
+#define WM8993_IM_JD1_EINT_SHIFT 9 /* IM_JD1_EINT */
+#define WM8993_IM_JD1_EINT_WIDTH 1 /* IM_JD1_EINT */
+#define WM8993_IM_FLL_LOCK_EINT 0x0100 /* IM_FLL_LOCK_EINT */
+#define WM8993_IM_FLL_LOCK_EINT_MASK 0x0100 /* IM_FLL_LOCK_EINT */
+#define WM8993_IM_FLL_LOCK_EINT_SHIFT 8 /* IM_FLL_LOCK_EINT */
+#define WM8993_IM_FLL_LOCK_EINT_WIDTH 1 /* IM_FLL_LOCK_EINT */
+#define WM8993_IM_GPI8_EINT 0x0040 /* IM_GPI8_EINT */
+#define WM8993_IM_GPI8_EINT_MASK 0x0040 /* IM_GPI8_EINT */
+#define WM8993_IM_GPI8_EINT_SHIFT 6 /* IM_GPI8_EINT */
+#define WM8993_IM_GPI8_EINT_WIDTH 1 /* IM_GPI8_EINT */
+#define WM8993_IM_GPIO1_EINT 0x0020 /* IM_GPIO1_EINT */
+#define WM8993_IM_GPIO1_EINT_MASK 0x0020 /* IM_GPIO1_EINT */
+#define WM8993_IM_GPIO1_EINT_SHIFT 5 /* IM_GPIO1_EINT */
+#define WM8993_IM_GPIO1_EINT_WIDTH 1 /* IM_GPIO1_EINT */
+#define WM8993_GPI8_ENA 0x0010 /* GPI8_ENA */
+#define WM8993_GPI8_ENA_MASK 0x0010 /* GPI8_ENA */
+#define WM8993_GPI8_ENA_SHIFT 4 /* GPI8_ENA */
+#define WM8993_GPI8_ENA_WIDTH 1 /* GPI8_ENA */
+#define WM8993_IM_GPI7_EINT 0x0004 /* IM_GPI7_EINT */
+#define WM8993_IM_GPI7_EINT_MASK 0x0004 /* IM_GPI7_EINT */
+#define WM8993_IM_GPI7_EINT_SHIFT 2 /* IM_GPI7_EINT */
+#define WM8993_IM_GPI7_EINT_WIDTH 1 /* IM_GPI7_EINT */
+#define WM8993_IM_WSEQ_EINT 0x0002 /* IM_WSEQ_EINT */
+#define WM8993_IM_WSEQ_EINT_MASK 0x0002 /* IM_WSEQ_EINT */
+#define WM8993_IM_WSEQ_EINT_SHIFT 1 /* IM_WSEQ_EINT */
+#define WM8993_IM_WSEQ_EINT_WIDTH 1 /* IM_WSEQ_EINT */
+#define WM8993_GPI7_ENA 0x0001 /* GPI7_ENA */
+#define WM8993_GPI7_ENA_MASK 0x0001 /* GPI7_ENA */
+#define WM8993_GPI7_ENA_SHIFT 0 /* GPI7_ENA */
+#define WM8993_GPI7_ENA_WIDTH 1 /* GPI7_ENA */
+
+/*
+ * R23 (0x17) - GPIO_POL
+ */
+#define WM8993_JD2_SC_POL 0x8000 /* JD2_SC_POL */
+#define WM8993_JD2_SC_POL_MASK 0x8000 /* JD2_SC_POL */
+#define WM8993_JD2_SC_POL_SHIFT 15 /* JD2_SC_POL */
+#define WM8993_JD2_SC_POL_WIDTH 1 /* JD2_SC_POL */
+#define WM8993_JD2_POL 0x4000 /* JD2_POL */
+#define WM8993_JD2_POL_MASK 0x4000 /* JD2_POL */
+#define WM8993_JD2_POL_SHIFT 14 /* JD2_POL */
+#define WM8993_JD2_POL_WIDTH 1 /* JD2_POL */
+#define WM8993_WSEQ_POL 0x2000 /* WSEQ_POL */
+#define WM8993_WSEQ_POL_MASK 0x2000 /* WSEQ_POL */
+#define WM8993_WSEQ_POL_SHIFT 13 /* WSEQ_POL */
+#define WM8993_WSEQ_POL_WIDTH 1 /* WSEQ_POL */
+#define WM8993_IRQ_POL 0x1000 /* IRQ_POL */
+#define WM8993_IRQ_POL_MASK 0x1000 /* IRQ_POL */
+#define WM8993_IRQ_POL_SHIFT 12 /* IRQ_POL */
+#define WM8993_IRQ_POL_WIDTH 1 /* IRQ_POL */
+#define WM8993_TEMPOK_POL 0x0800 /* TEMPOK_POL */
+#define WM8993_TEMPOK_POL_MASK 0x0800 /* TEMPOK_POL */
+#define WM8993_TEMPOK_POL_SHIFT 11 /* TEMPOK_POL */
+#define WM8993_TEMPOK_POL_WIDTH 1 /* TEMPOK_POL */
+#define WM8993_JD1_SC_POL 0x0400 /* JD1_SC_POL */
+#define WM8993_JD1_SC_POL_MASK 0x0400 /* JD1_SC_POL */
+#define WM8993_JD1_SC_POL_SHIFT 10 /* JD1_SC_POL */
+#define WM8993_JD1_SC_POL_WIDTH 1 /* JD1_SC_POL */
+#define WM8993_JD1_POL 0x0200 /* JD1_POL */
+#define WM8993_JD1_POL_MASK 0x0200 /* JD1_POL */
+#define WM8993_JD1_POL_SHIFT 9 /* JD1_POL */
+#define WM8993_JD1_POL_WIDTH 1 /* JD1_POL */
+#define WM8993_FLL_LOCK_POL 0x0100 /* FLL_LOCK_POL */
+#define WM8993_FLL_LOCK_POL_MASK 0x0100 /* FLL_LOCK_POL */
+#define WM8993_FLL_LOCK_POL_SHIFT 8 /* FLL_LOCK_POL */
+#define WM8993_FLL_LOCK_POL_WIDTH 1 /* FLL_LOCK_POL */
+#define WM8993_GPI8_POL 0x0080 /* GPI8_POL */
+#define WM8993_GPI8_POL_MASK 0x0080 /* GPI8_POL */
+#define WM8993_GPI8_POL_SHIFT 7 /* GPI8_POL */
+#define WM8993_GPI8_POL_WIDTH 1 /* GPI8_POL */
+#define WM8993_GPI7_POL 0x0040 /* GPI7_POL */
+#define WM8993_GPI7_POL_MASK 0x0040 /* GPI7_POL */
+#define WM8993_GPI7_POL_SHIFT 6 /* GPI7_POL */
+#define WM8993_GPI7_POL_WIDTH 1 /* GPI7_POL */
+#define WM8993_GPIO1_POL 0x0001 /* GPIO1_POL */
+#define WM8993_GPIO1_POL_MASK 0x0001 /* GPIO1_POL */
+#define WM8993_GPIO1_POL_SHIFT 0 /* GPIO1_POL */
+#define WM8993_GPIO1_POL_WIDTH 1 /* GPIO1_POL */
+
+/*
+ * R24 (0x18) - Left Line Input 1&2 Volume
+ */
+#define WM8993_IN1_VU 0x0100 /* IN1_VU */
+#define WM8993_IN1_VU_MASK 0x0100 /* IN1_VU */
+#define WM8993_IN1_VU_SHIFT 8 /* IN1_VU */
+#define WM8993_IN1_VU_WIDTH 1 /* IN1_VU */
+#define WM8993_IN1L_MUTE 0x0080 /* IN1L_MUTE */
+#define WM8993_IN1L_MUTE_MASK 0x0080 /* IN1L_MUTE */
+#define WM8993_IN1L_MUTE_SHIFT 7 /* IN1L_MUTE */
+#define WM8993_IN1L_MUTE_WIDTH 1 /* IN1L_MUTE */
+#define WM8993_IN1L_ZC 0x0040 /* IN1L_ZC */
+#define WM8993_IN1L_ZC_MASK 0x0040 /* IN1L_ZC */
+#define WM8993_IN1L_ZC_SHIFT 6 /* IN1L_ZC */
+#define WM8993_IN1L_ZC_WIDTH 1 /* IN1L_ZC */
+#define WM8993_IN1L_VOL_MASK 0x001F /* IN1L_VOL - [4:0] */
+#define WM8993_IN1L_VOL_SHIFT 0 /* IN1L_VOL - [4:0] */
+#define WM8993_IN1L_VOL_WIDTH 5 /* IN1L_VOL - [4:0] */
+
+/*
+ * R25 (0x19) - Left Line Input 3&4 Volume
+ */
+#define WM8993_IN2_VU 0x0100 /* IN2_VU */
+#define WM8993_IN2_VU_MASK 0x0100 /* IN2_VU */
+#define WM8993_IN2_VU_SHIFT 8 /* IN2_VU */
+#define WM8993_IN2_VU_WIDTH 1 /* IN2_VU */
+#define WM8993_IN2L_MUTE 0x0080 /* IN2L_MUTE */
+#define WM8993_IN2L_MUTE_MASK 0x0080 /* IN2L_MUTE */
+#define WM8993_IN2L_MUTE_SHIFT 7 /* IN2L_MUTE */
+#define WM8993_IN2L_MUTE_WIDTH 1 /* IN2L_MUTE */
+#define WM8993_IN2L_ZC 0x0040 /* IN2L_ZC */
+#define WM8993_IN2L_ZC_MASK 0x0040 /* IN2L_ZC */
+#define WM8993_IN2L_ZC_SHIFT 6 /* IN2L_ZC */
+#define WM8993_IN2L_ZC_WIDTH 1 /* IN2L_ZC */
+#define WM8993_IN2L_VOL_MASK 0x001F /* IN2L_VOL - [4:0] */
+#define WM8993_IN2L_VOL_SHIFT 0 /* IN2L_VOL - [4:0] */
+#define WM8993_IN2L_VOL_WIDTH 5 /* IN2L_VOL - [4:0] */
+
+/*
+ * R26 (0x1A) - Right Line Input 1&2 Volume
+ */
+#define WM8993_IN1_VU 0x0100 /* IN1_VU */
+#define WM8993_IN1_VU_MASK 0x0100 /* IN1_VU */
+#define WM8993_IN1_VU_SHIFT 8 /* IN1_VU */
+#define WM8993_IN1_VU_WIDTH 1 /* IN1_VU */
+#define WM8993_IN1R_MUTE 0x0080 /* IN1R_MUTE */
+#define WM8993_IN1R_MUTE_MASK 0x0080 /* IN1R_MUTE */
+#define WM8993_IN1R_MUTE_SHIFT 7 /* IN1R_MUTE */
+#define WM8993_IN1R_MUTE_WIDTH 1 /* IN1R_MUTE */
+#define WM8993_IN1R_ZC 0x0040 /* IN1R_ZC */
+#define WM8993_IN1R_ZC_MASK 0x0040 /* IN1R_ZC */
+#define WM8993_IN1R_ZC_SHIFT 6 /* IN1R_ZC */
+#define WM8993_IN1R_ZC_WIDTH 1 /* IN1R_ZC */
+#define WM8993_IN1R_VOL_MASK 0x001F /* IN1R_VOL - [4:0] */
+#define WM8993_IN1R_VOL_SHIFT 0 /* IN1R_VOL - [4:0] */
+#define WM8993_IN1R_VOL_WIDTH 5 /* IN1R_VOL - [4:0] */
+
+/*
+ * R27 (0x1B) - Right Line Input 3&4 Volume
+ */
+#define WM8993_IN2_VU 0x0100 /* IN2_VU */
+#define WM8993_IN2_VU_MASK 0x0100 /* IN2_VU */
+#define WM8993_IN2_VU_SHIFT 8 /* IN2_VU */
+#define WM8993_IN2_VU_WIDTH 1 /* IN2_VU */
+#define WM8993_IN2R_MUTE 0x0080 /* IN2R_MUTE */
+#define WM8993_IN2R_MUTE_MASK 0x0080 /* IN2R_MUTE */
+#define WM8993_IN2R_MUTE_SHIFT 7 /* IN2R_MUTE */
+#define WM8993_IN2R_MUTE_WIDTH 1 /* IN2R_MUTE */
+#define WM8993_IN2R_ZC 0x0040 /* IN2R_ZC */
+#define WM8993_IN2R_ZC_MASK 0x0040 /* IN2R_ZC */
+#define WM8993_IN2R_ZC_SHIFT 6 /* IN2R_ZC */
+#define WM8993_IN2R_ZC_WIDTH 1 /* IN2R_ZC */
+#define WM8993_IN2R_VOL_MASK 0x001F /* IN2R_VOL - [4:0] */
+#define WM8993_IN2R_VOL_SHIFT 0 /* IN2R_VOL - [4:0] */
+#define WM8993_IN2R_VOL_WIDTH 5 /* IN2R_VOL - [4:0] */
+
+/*
+ * R28 (0x1C) - Left Output Volume
+ */
+#define WM8993_HPOUT1_VU 0x0100 /* HPOUT1_VU */
+#define WM8993_HPOUT1_VU_MASK 0x0100 /* HPOUT1_VU */
+#define WM8993_HPOUT1_VU_SHIFT 8 /* HPOUT1_VU */
+#define WM8993_HPOUT1_VU_WIDTH 1 /* HPOUT1_VU */
+#define WM8993_HPOUT1L_ZC 0x0080 /* HPOUT1L_ZC */
+#define WM8993_HPOUT1L_ZC_MASK 0x0080 /* HPOUT1L_ZC */
+#define WM8993_HPOUT1L_ZC_SHIFT 7 /* HPOUT1L_ZC */
+#define WM8993_HPOUT1L_ZC_WIDTH 1 /* HPOUT1L_ZC */
+#define WM8993_HPOUT1L_MUTE_N 0x0040 /* HPOUT1L_MUTE_N */
+#define WM8993_HPOUT1L_MUTE_N_MASK 0x0040 /* HPOUT1L_MUTE_N */
+#define WM8993_HPOUT1L_MUTE_N_SHIFT 6 /* HPOUT1L_MUTE_N */
+#define WM8993_HPOUT1L_MUTE_N_WIDTH 1 /* HPOUT1L_MUTE_N */
+#define WM8993_HPOUT1L_VOL_MASK 0x003F /* HPOUT1L_VOL - [5:0] */
+#define WM8993_HPOUT1L_VOL_SHIFT 0 /* HPOUT1L_VOL - [5:0] */
+#define WM8993_HPOUT1L_VOL_WIDTH 6 /* HPOUT1L_VOL - [5:0] */
+
+/*
+ * R29 (0x1D) - Right Output Volume
+ */
+#define WM8993_HPOUT1_VU 0x0100 /* HPOUT1_VU */
+#define WM8993_HPOUT1_VU_MASK 0x0100 /* HPOUT1_VU */
+#define WM8993_HPOUT1_VU_SHIFT 8 /* HPOUT1_VU */
+#define WM8993_HPOUT1_VU_WIDTH 1 /* HPOUT1_VU */
+#define WM8993_HPOUT1R_ZC 0x0080 /* HPOUT1R_ZC */
+#define WM8993_HPOUT1R_ZC_MASK 0x0080 /* HPOUT1R_ZC */
+#define WM8993_HPOUT1R_ZC_SHIFT 7 /* HPOUT1R_ZC */
+#define WM8993_HPOUT1R_ZC_WIDTH 1 /* HPOUT1R_ZC */
+#define WM8993_HPOUT1R_MUTE_N 0x0040 /* HPOUT1R_MUTE_N */
+#define WM8993_HPOUT1R_MUTE_N_MASK 0x0040 /* HPOUT1R_MUTE_N */
+#define WM8993_HPOUT1R_MUTE_N_SHIFT 6 /* HPOUT1R_MUTE_N */
+#define WM8993_HPOUT1R_MUTE_N_WIDTH 1 /* HPOUT1R_MUTE_N */
+#define WM8993_HPOUT1R_VOL_MASK 0x003F /* HPOUT1R_VOL - [5:0] */
+#define WM8993_HPOUT1R_VOL_SHIFT 0 /* HPOUT1R_VOL - [5:0] */
+#define WM8993_HPOUT1R_VOL_WIDTH 6 /* HPOUT1R_VOL - [5:0] */
+
+/*
+ * R30 (0x1E) - Line Outputs Volume
+ */
+#define WM8993_LINEOUT1N_MUTE 0x0040 /* LINEOUT1N_MUTE */
+#define WM8993_LINEOUT1N_MUTE_MASK 0x0040 /* LINEOUT1N_MUTE */
+#define WM8993_LINEOUT1N_MUTE_SHIFT 6 /* LINEOUT1N_MUTE */
+#define WM8993_LINEOUT1N_MUTE_WIDTH 1 /* LINEOUT1N_MUTE */
+#define WM8993_LINEOUT1P_MUTE 0x0020 /* LINEOUT1P_MUTE */
+#define WM8993_LINEOUT1P_MUTE_MASK 0x0020 /* LINEOUT1P_MUTE */
+#define WM8993_LINEOUT1P_MUTE_SHIFT 5 /* LINEOUT1P_MUTE */
+#define WM8993_LINEOUT1P_MUTE_WIDTH 1 /* LINEOUT1P_MUTE */
+#define WM8993_LINEOUT1_VOL 0x0010 /* LINEOUT1_VOL */
+#define WM8993_LINEOUT1_VOL_MASK 0x0010 /* LINEOUT1_VOL */
+#define WM8993_LINEOUT1_VOL_SHIFT 4 /* LINEOUT1_VOL */
+#define WM8993_LINEOUT1_VOL_WIDTH 1 /* LINEOUT1_VOL */
+#define WM8993_LINEOUT2N_MUTE 0x0004 /* LINEOUT2N_MUTE */
+#define WM8993_LINEOUT2N_MUTE_MASK 0x0004 /* LINEOUT2N_MUTE */
+#define WM8993_LINEOUT2N_MUTE_SHIFT 2 /* LINEOUT2N_MUTE */
+#define WM8993_LINEOUT2N_MUTE_WIDTH 1 /* LINEOUT2N_MUTE */
+#define WM8993_LINEOUT2P_MUTE 0x0002 /* LINEOUT2P_MUTE */
+#define WM8993_LINEOUT2P_MUTE_MASK 0x0002 /* LINEOUT2P_MUTE */
+#define WM8993_LINEOUT2P_MUTE_SHIFT 1 /* LINEOUT2P_MUTE */
+#define WM8993_LINEOUT2P_MUTE_WIDTH 1 /* LINEOUT2P_MUTE */
+#define WM8993_LINEOUT2_VOL 0x0001 /* LINEOUT2_VOL */
+#define WM8993_LINEOUT2_VOL_MASK 0x0001 /* LINEOUT2_VOL */
+#define WM8993_LINEOUT2_VOL_SHIFT 0 /* LINEOUT2_VOL */
+#define WM8993_LINEOUT2_VOL_WIDTH 1 /* LINEOUT2_VOL */
+
+/*
+ * R31 (0x1F) - HPOUT2 Volume
+ */
+#define WM8993_HPOUT2_MUTE 0x0020 /* HPOUT2_MUTE */
+#define WM8993_HPOUT2_MUTE_MASK 0x0020 /* HPOUT2_MUTE */
+#define WM8993_HPOUT2_MUTE_SHIFT 5 /* HPOUT2_MUTE */
+#define WM8993_HPOUT2_MUTE_WIDTH 1 /* HPOUT2_MUTE */
+#define WM8993_HPOUT2_VOL 0x0010 /* HPOUT2_VOL */
+#define WM8993_HPOUT2_VOL_MASK 0x0010 /* HPOUT2_VOL */
+#define WM8993_HPOUT2_VOL_SHIFT 4 /* HPOUT2_VOL */
+#define WM8993_HPOUT2_VOL_WIDTH 1 /* HPOUT2_VOL */
+
+/*
+ * R32 (0x20) - Left OPGA Volume
+ */
+#define WM8993_MIXOUT_VU 0x0100 /* MIXOUT_VU */
+#define WM8993_MIXOUT_VU_MASK 0x0100 /* MIXOUT_VU */
+#define WM8993_MIXOUT_VU_SHIFT 8 /* MIXOUT_VU */
+#define WM8993_MIXOUT_VU_WIDTH 1 /* MIXOUT_VU */
+#define WM8993_MIXOUTL_ZC 0x0080 /* MIXOUTL_ZC */
+#define WM8993_MIXOUTL_ZC_MASK 0x0080 /* MIXOUTL_ZC */
+#define WM8993_MIXOUTL_ZC_SHIFT 7 /* MIXOUTL_ZC */
+#define WM8993_MIXOUTL_ZC_WIDTH 1 /* MIXOUTL_ZC */
+#define WM8993_MIXOUTL_MUTE_N 0x0040 /* MIXOUTL_MUTE_N */
+#define WM8993_MIXOUTL_MUTE_N_MASK 0x0040 /* MIXOUTL_MUTE_N */
+#define WM8993_MIXOUTL_MUTE_N_SHIFT 6 /* MIXOUTL_MUTE_N */
+#define WM8993_MIXOUTL_MUTE_N_WIDTH 1 /* MIXOUTL_MUTE_N */
+#define WM8993_MIXOUTL_VOL_MASK 0x003F /* MIXOUTL_VOL - [5:0] */
+#define WM8993_MIXOUTL_VOL_SHIFT 0 /* MIXOUTL_VOL - [5:0] */
+#define WM8993_MIXOUTL_VOL_WIDTH 6 /* MIXOUTL_VOL - [5:0] */
+
+/*
+ * R33 (0x21) - Right OPGA Volume
+ */
+#define WM8993_MIXOUT_VU 0x0100 /* MIXOUT_VU */
+#define WM8993_MIXOUT_VU_MASK 0x0100 /* MIXOUT_VU */
+#define WM8993_MIXOUT_VU_SHIFT 8 /* MIXOUT_VU */
+#define WM8993_MIXOUT_VU_WIDTH 1 /* MIXOUT_VU */
+#define WM8993_MIXOUTR_ZC 0x0080 /* MIXOUTR_ZC */
+#define WM8993_MIXOUTR_ZC_MASK 0x0080 /* MIXOUTR_ZC */
+#define WM8993_MIXOUTR_ZC_SHIFT 7 /* MIXOUTR_ZC */
+#define WM8993_MIXOUTR_ZC_WIDTH 1 /* MIXOUTR_ZC */
+#define WM8993_MIXOUTR_MUTE_N 0x0040 /* MIXOUTR_MUTE_N */
+#define WM8993_MIXOUTR_MUTE_N_MASK 0x0040 /* MIXOUTR_MUTE_N */
+#define WM8993_MIXOUTR_MUTE_N_SHIFT 6 /* MIXOUTR_MUTE_N */
+#define WM8993_MIXOUTR_MUTE_N_WIDTH 1 /* MIXOUTR_MUTE_N */
+#define WM8993_MIXOUTR_VOL_MASK 0x003F /* MIXOUTR_VOL - [5:0] */
+#define WM8993_MIXOUTR_VOL_SHIFT 0 /* MIXOUTR_VOL - [5:0] */
+#define WM8993_MIXOUTR_VOL_WIDTH 6 /* MIXOUTR_VOL - [5:0] */
+
+/*
+ * R34 (0x22) - SPKMIXL Attenuation
+ */
+#define WM8993_MIXINL_SPKMIXL_VOL 0x0020 /* MIXINL_SPKMIXL_VOL */
+#define WM8993_MIXINL_SPKMIXL_VOL_MASK 0x0020 /* MIXINL_SPKMIXL_VOL */
+#define WM8993_MIXINL_SPKMIXL_VOL_SHIFT 5 /* MIXINL_SPKMIXL_VOL */
+#define WM8993_MIXINL_SPKMIXL_VOL_WIDTH 1 /* MIXINL_SPKMIXL_VOL */
+#define WM8993_IN1LP_SPKMIXL_VOL 0x0010 /* IN1LP_SPKMIXL_VOL */
+#define WM8993_IN1LP_SPKMIXL_VOL_MASK 0x0010 /* IN1LP_SPKMIXL_VOL */
+#define WM8993_IN1LP_SPKMIXL_VOL_SHIFT 4 /* IN1LP_SPKMIXL_VOL */
+#define WM8993_IN1LP_SPKMIXL_VOL_WIDTH 1 /* IN1LP_SPKMIXL_VOL */
+#define WM8993_MIXOUTL_SPKMIXL_VOL 0x0008 /* MIXOUTL_SPKMIXL_VOL */
+#define WM8993_MIXOUTL_SPKMIXL_VOL_MASK 0x0008 /* MIXOUTL_SPKMIXL_VOL */
+#define WM8993_MIXOUTL_SPKMIXL_VOL_SHIFT 3 /* MIXOUTL_SPKMIXL_VOL */
+#define WM8993_MIXOUTL_SPKMIXL_VOL_WIDTH 1 /* MIXOUTL_SPKMIXL_VOL */
+#define WM8993_DACL_SPKMIXL_VOL 0x0004 /* DACL_SPKMIXL_VOL */
+#define WM8993_DACL_SPKMIXL_VOL_MASK 0x0004 /* DACL_SPKMIXL_VOL */
+#define WM8993_DACL_SPKMIXL_VOL_SHIFT 2 /* DACL_SPKMIXL_VOL */
+#define WM8993_DACL_SPKMIXL_VOL_WIDTH 1 /* DACL_SPKMIXL_VOL */
+#define WM8993_SPKMIXL_VOL_MASK 0x0003 /* SPKMIXL_VOL - [1:0] */
+#define WM8993_SPKMIXL_VOL_SHIFT 0 /* SPKMIXL_VOL - [1:0] */
+#define WM8993_SPKMIXL_VOL_WIDTH 2 /* SPKMIXL_VOL - [1:0] */
+
+/*
+ * R35 (0x23) - SPKMIXR Attenuation
+ */
+#define WM8993_SPKOUT_CLASSAB_MODE 0x0100 /* SPKOUT_CLASSAB_MODE */
+#define WM8993_SPKOUT_CLASSAB_MODE_MASK 0x0100 /* SPKOUT_CLASSAB_MODE */
+#define WM8993_SPKOUT_CLASSAB_MODE_SHIFT 8 /* SPKOUT_CLASSAB_MODE */
+#define WM8993_SPKOUT_CLASSAB_MODE_WIDTH 1 /* SPKOUT_CLASSAB_MODE */
+#define WM8993_MIXINR_SPKMIXR_VOL 0x0020 /* MIXINR_SPKMIXR_VOL */
+#define WM8993_MIXINR_SPKMIXR_VOL_MASK 0x0020 /* MIXINR_SPKMIXR_VOL */
+#define WM8993_MIXINR_SPKMIXR_VOL_SHIFT 5 /* MIXINR_SPKMIXR_VOL */
+#define WM8993_MIXINR_SPKMIXR_VOL_WIDTH 1 /* MIXINR_SPKMIXR_VOL */
+#define WM8993_IN1RP_SPKMIXR_VOL 0x0010 /* IN1RP_SPKMIXR_VOL */
+#define WM8993_IN1RP_SPKMIXR_VOL_MASK 0x0010 /* IN1RP_SPKMIXR_VOL */
+#define WM8993_IN1RP_SPKMIXR_VOL_SHIFT 4 /* IN1RP_SPKMIXR_VOL */
+#define WM8993_IN1RP_SPKMIXR_VOL_WIDTH 1 /* IN1RP_SPKMIXR_VOL */
+#define WM8993_MIXOUTR_SPKMIXR_VOL 0x0008 /* MIXOUTR_SPKMIXR_VOL */
+#define WM8993_MIXOUTR_SPKMIXR_VOL_MASK 0x0008 /* MIXOUTR_SPKMIXR_VOL */
+#define WM8993_MIXOUTR_SPKMIXR_VOL_SHIFT 3 /* MIXOUTR_SPKMIXR_VOL */
+#define WM8993_MIXOUTR_SPKMIXR_VOL_WIDTH 1 /* MIXOUTR_SPKMIXR_VOL */
+#define WM8993_DACR_SPKMIXR_VOL 0x0004 /* DACR_SPKMIXR_VOL */
+#define WM8993_DACR_SPKMIXR_VOL_MASK 0x0004 /* DACR_SPKMIXR_VOL */
+#define WM8993_DACR_SPKMIXR_VOL_SHIFT 2 /* DACR_SPKMIXR_VOL */
+#define WM8993_DACR_SPKMIXR_VOL_WIDTH 1 /* DACR_SPKMIXR_VOL */
+#define WM8993_SPKMIXR_VOL_MASK 0x0003 /* SPKMIXR_VOL - [1:0] */
+#define WM8993_SPKMIXR_VOL_SHIFT 0 /* SPKMIXR_VOL - [1:0] */
+#define WM8993_SPKMIXR_VOL_WIDTH 2 /* SPKMIXR_VOL - [1:0] */
+
+/*
+ * R36 (0x24) - SPKOUT Mixers
+ */
+#define WM8993_VRX_TO_SPKOUTL 0x0020 /* VRX_TO_SPKOUTL */
+#define WM8993_VRX_TO_SPKOUTL_MASK 0x0020 /* VRX_TO_SPKOUTL */
+#define WM8993_VRX_TO_SPKOUTL_SHIFT 5 /* VRX_TO_SPKOUTL */
+#define WM8993_VRX_TO_SPKOUTL_WIDTH 1 /* VRX_TO_SPKOUTL */
+#define WM8993_SPKMIXL_TO_SPKOUTL 0x0010 /* SPKMIXL_TO_SPKOUTL */
+#define WM8993_SPKMIXL_TO_SPKOUTL_MASK 0x0010 /* SPKMIXL_TO_SPKOUTL */
+#define WM8993_SPKMIXL_TO_SPKOUTL_SHIFT 4 /* SPKMIXL_TO_SPKOUTL */
+#define WM8993_SPKMIXL_TO_SPKOUTL_WIDTH 1 /* SPKMIXL_TO_SPKOUTL */
+#define WM8993_SPKMIXR_TO_SPKOUTL 0x0008 /* SPKMIXR_TO_SPKOUTL */
+#define WM8993_SPKMIXR_TO_SPKOUTL_MASK 0x0008 /* SPKMIXR_TO_SPKOUTL */
+#define WM8993_SPKMIXR_TO_SPKOUTL_SHIFT 3 /* SPKMIXR_TO_SPKOUTL */
+#define WM8993_SPKMIXR_TO_SPKOUTL_WIDTH 1 /* SPKMIXR_TO_SPKOUTL */
+#define WM8993_VRX_TO_SPKOUTR 0x0004 /* VRX_TO_SPKOUTR */
+#define WM8993_VRX_TO_SPKOUTR_MASK 0x0004 /* VRX_TO_SPKOUTR */
+#define WM8993_VRX_TO_SPKOUTR_SHIFT 2 /* VRX_TO_SPKOUTR */
+#define WM8993_VRX_TO_SPKOUTR_WIDTH 1 /* VRX_TO_SPKOUTR */
+#define WM8993_SPKMIXL_TO_SPKOUTR 0x0002 /* SPKMIXL_TO_SPKOUTR */
+#define WM8993_SPKMIXL_TO_SPKOUTR_MASK 0x0002 /* SPKMIXL_TO_SPKOUTR */
+#define WM8993_SPKMIXL_TO_SPKOUTR_SHIFT 1 /* SPKMIXL_TO_SPKOUTR */
+#define WM8993_SPKMIXL_TO_SPKOUTR_WIDTH 1 /* SPKMIXL_TO_SPKOUTR */
+#define WM8993_SPKMIXR_TO_SPKOUTR 0x0001 /* SPKMIXR_TO_SPKOUTR */
+#define WM8993_SPKMIXR_TO_SPKOUTR_MASK 0x0001 /* SPKMIXR_TO_SPKOUTR */
+#define WM8993_SPKMIXR_TO_SPKOUTR_SHIFT 0 /* SPKMIXR_TO_SPKOUTR */
+#define WM8993_SPKMIXR_TO_SPKOUTR_WIDTH 1 /* SPKMIXR_TO_SPKOUTR */
+
+/*
+ * R37 (0x25) - SPKOUT Boost
+ */
+#define WM8993_SPKOUTL_BOOST_MASK 0x0038 /* SPKOUTL_BOOST - [5:3] */
+#define WM8993_SPKOUTL_BOOST_SHIFT 3 /* SPKOUTL_BOOST - [5:3] */
+#define WM8993_SPKOUTL_BOOST_WIDTH 3 /* SPKOUTL_BOOST - [5:3] */
+#define WM8993_SPKOUTR_BOOST_MASK 0x0007 /* SPKOUTR_BOOST - [2:0] */
+#define WM8993_SPKOUTR_BOOST_SHIFT 0 /* SPKOUTR_BOOST - [2:0] */
+#define WM8993_SPKOUTR_BOOST_WIDTH 3 /* SPKOUTR_BOOST - [2:0] */
+
+/*
+ * R38 (0x26) - Speaker Volume Left
+ */
+#define WM8993_SPKOUT_VU 0x0100 /* SPKOUT_VU */
+#define WM8993_SPKOUT_VU_MASK 0x0100 /* SPKOUT_VU */
+#define WM8993_SPKOUT_VU_SHIFT 8 /* SPKOUT_VU */
+#define WM8993_SPKOUT_VU_WIDTH 1 /* SPKOUT_VU */
+#define WM8993_SPKOUTL_ZC 0x0080 /* SPKOUTL_ZC */
+#define WM8993_SPKOUTL_ZC_MASK 0x0080 /* SPKOUTL_ZC */
+#define WM8993_SPKOUTL_ZC_SHIFT 7 /* SPKOUTL_ZC */
+#define WM8993_SPKOUTL_ZC_WIDTH 1 /* SPKOUTL_ZC */
+#define WM8993_SPKOUTL_MUTE_N 0x0040 /* SPKOUTL_MUTE_N */
+#define WM8993_SPKOUTL_MUTE_N_MASK 0x0040 /* SPKOUTL_MUTE_N */
+#define WM8993_SPKOUTL_MUTE_N_SHIFT 6 /* SPKOUTL_MUTE_N */
+#define WM8993_SPKOUTL_MUTE_N_WIDTH 1 /* SPKOUTL_MUTE_N */
+#define WM8993_SPKOUTL_VOL_MASK 0x003F /* SPKOUTL_VOL - [5:0] */
+#define WM8993_SPKOUTL_VOL_SHIFT 0 /* SPKOUTL_VOL - [5:0] */
+#define WM8993_SPKOUTL_VOL_WIDTH 6 /* SPKOUTL_VOL - [5:0] */
+
+/*
+ * R39 (0x27) - Speaker Volume Right
+ */
+#define WM8993_SPKOUT_VU 0x0100 /* SPKOUT_VU */
+#define WM8993_SPKOUT_VU_MASK 0x0100 /* SPKOUT_VU */
+#define WM8993_SPKOUT_VU_SHIFT 8 /* SPKOUT_VU */
+#define WM8993_SPKOUT_VU_WIDTH 1 /* SPKOUT_VU */
+#define WM8993_SPKOUTR_ZC 0x0080 /* SPKOUTR_ZC */
+#define WM8993_SPKOUTR_ZC_MASK 0x0080 /* SPKOUTR_ZC */
+#define WM8993_SPKOUTR_ZC_SHIFT 7 /* SPKOUTR_ZC */
+#define WM8993_SPKOUTR_ZC_WIDTH 1 /* SPKOUTR_ZC */
+#define WM8993_SPKOUTR_MUTE_N 0x0040 /* SPKOUTR_MUTE_N */
+#define WM8993_SPKOUTR_MUTE_N_MASK 0x0040 /* SPKOUTR_MUTE_N */
+#define WM8993_SPKOUTR_MUTE_N_SHIFT 6 /* SPKOUTR_MUTE_N */
+#define WM8993_SPKOUTR_MUTE_N_WIDTH 1 /* SPKOUTR_MUTE_N */
+#define WM8993_SPKOUTR_VOL_MASK 0x003F /* SPKOUTR_VOL - [5:0] */
+#define WM8993_SPKOUTR_VOL_SHIFT 0 /* SPKOUTR_VOL - [5:0] */
+#define WM8993_SPKOUTR_VOL_WIDTH 6 /* SPKOUTR_VOL - [5:0] */
+
+/*
+ * R40 (0x28) - Input Mixer2
+ */
+#define WM8993_IN2LP_TO_IN2L 0x0080 /* IN2LP_TO_IN2L */
+#define WM8993_IN2LP_TO_IN2L_MASK 0x0080 /* IN2LP_TO_IN2L */
+#define WM8993_IN2LP_TO_IN2L_SHIFT 7 /* IN2LP_TO_IN2L */
+#define WM8993_IN2LP_TO_IN2L_WIDTH 1 /* IN2LP_TO_IN2L */
+#define WM8993_IN2LN_TO_IN2L 0x0040 /* IN2LN_TO_IN2L */
+#define WM8993_IN2LN_TO_IN2L_MASK 0x0040 /* IN2LN_TO_IN2L */
+#define WM8993_IN2LN_TO_IN2L_SHIFT 6 /* IN2LN_TO_IN2L */
+#define WM8993_IN2LN_TO_IN2L_WIDTH 1 /* IN2LN_TO_IN2L */
+#define WM8993_IN1LP_TO_IN1L 0x0020 /* IN1LP_TO_IN1L */
+#define WM8993_IN1LP_TO_IN1L_MASK 0x0020 /* IN1LP_TO_IN1L */
+#define WM8993_IN1LP_TO_IN1L_SHIFT 5 /* IN1LP_TO_IN1L */
+#define WM8993_IN1LP_TO_IN1L_WIDTH 1 /* IN1LP_TO_IN1L */
+#define WM8993_IN1LN_TO_IN1L 0x0010 /* IN1LN_TO_IN1L */
+#define WM8993_IN1LN_TO_IN1L_MASK 0x0010 /* IN1LN_TO_IN1L */
+#define WM8993_IN1LN_TO_IN1L_SHIFT 4 /* IN1LN_TO_IN1L */
+#define WM8993_IN1LN_TO_IN1L_WIDTH 1 /* IN1LN_TO_IN1L */
+#define WM8993_IN2RP_TO_IN2R 0x0008 /* IN2RP_TO_IN2R */
+#define WM8993_IN2RP_TO_IN2R_MASK 0x0008 /* IN2RP_TO_IN2R */
+#define WM8993_IN2RP_TO_IN2R_SHIFT 3 /* IN2RP_TO_IN2R */
+#define WM8993_IN2RP_TO_IN2R_WIDTH 1 /* IN2RP_TO_IN2R */
+#define WM8993_IN2RN_TO_IN2R 0x0004 /* IN2RN_TO_IN2R */
+#define WM8993_IN2RN_TO_IN2R_MASK 0x0004 /* IN2RN_TO_IN2R */
+#define WM8993_IN2RN_TO_IN2R_SHIFT 2 /* IN2RN_TO_IN2R */
+#define WM8993_IN2RN_TO_IN2R_WIDTH 1 /* IN2RN_TO_IN2R */
+#define WM8993_IN1RP_TO_IN1R 0x0002 /* IN1RP_TO_IN1R */
+#define WM8993_IN1RP_TO_IN1R_MASK 0x0002 /* IN1RP_TO_IN1R */
+#define WM8993_IN1RP_TO_IN1R_SHIFT 1 /* IN1RP_TO_IN1R */
+#define WM8993_IN1RP_TO_IN1R_WIDTH 1 /* IN1RP_TO_IN1R */
+#define WM8993_IN1RN_TO_IN1R 0x0001 /* IN1RN_TO_IN1R */
+#define WM8993_IN1RN_TO_IN1R_MASK 0x0001 /* IN1RN_TO_IN1R */
+#define WM8993_IN1RN_TO_IN1R_SHIFT 0 /* IN1RN_TO_IN1R */
+#define WM8993_IN1RN_TO_IN1R_WIDTH 1 /* IN1RN_TO_IN1R */
+
+/*
+ * R41 (0x29) - Input Mixer3
+ */
+#define WM8993_IN2L_TO_MIXINL 0x0100 /* IN2L_TO_MIXINL */
+#define WM8993_IN2L_TO_MIXINL_MASK 0x0100 /* IN2L_TO_MIXINL */
+#define WM8993_IN2L_TO_MIXINL_SHIFT 8 /* IN2L_TO_MIXINL */
+#define WM8993_IN2L_TO_MIXINL_WIDTH 1 /* IN2L_TO_MIXINL */
+#define WM8993_IN2L_MIXINL_VOL 0x0080 /* IN2L_MIXINL_VOL */
+#define WM8993_IN2L_MIXINL_VOL_MASK 0x0080 /* IN2L_MIXINL_VOL */
+#define WM8993_IN2L_MIXINL_VOL_SHIFT 7 /* IN2L_MIXINL_VOL */
+#define WM8993_IN2L_MIXINL_VOL_WIDTH 1 /* IN2L_MIXINL_VOL */
+#define WM8993_IN1L_TO_MIXINL 0x0020 /* IN1L_TO_MIXINL */
+#define WM8993_IN1L_TO_MIXINL_MASK 0x0020 /* IN1L_TO_MIXINL */
+#define WM8993_IN1L_TO_MIXINL_SHIFT 5 /* IN1L_TO_MIXINL */
+#define WM8993_IN1L_TO_MIXINL_WIDTH 1 /* IN1L_TO_MIXINL */
+#define WM8993_IN1L_MIXINL_VOL 0x0010 /* IN1L_MIXINL_VOL */
+#define WM8993_IN1L_MIXINL_VOL_MASK 0x0010 /* IN1L_MIXINL_VOL */
+#define WM8993_IN1L_MIXINL_VOL_SHIFT 4 /* IN1L_MIXINL_VOL */
+#define WM8993_IN1L_MIXINL_VOL_WIDTH 1 /* IN1L_MIXINL_VOL */
+#define WM8993_MIXOUTL_MIXINL_VOL_MASK 0x0007 /* MIXOUTL_MIXINL_VOL - [2:0] */
+#define WM8993_MIXOUTL_MIXINL_VOL_SHIFT 0 /* MIXOUTL_MIXINL_VOL - [2:0] */
+#define WM8993_MIXOUTL_MIXINL_VOL_WIDTH 3 /* MIXOUTL_MIXINL_VOL - [2:0] */
+
+/*
+ * R42 (0x2A) - Input Mixer4
+ */
+#define WM8993_IN2R_TO_MIXINR 0x0100 /* IN2R_TO_MIXINR */
+#define WM8993_IN2R_TO_MIXINR_MASK 0x0100 /* IN2R_TO_MIXINR */
+#define WM8993_IN2R_TO_MIXINR_SHIFT 8 /* IN2R_TO_MIXINR */
+#define WM8993_IN2R_TO_MIXINR_WIDTH 1 /* IN2R_TO_MIXINR */
+#define WM8993_IN2R_MIXINR_VOL 0x0080 /* IN2R_MIXINR_VOL */
+#define WM8993_IN2R_MIXINR_VOL_MASK 0x0080 /* IN2R_MIXINR_VOL */
+#define WM8993_IN2R_MIXINR_VOL_SHIFT 7 /* IN2R_MIXINR_VOL */
+#define WM8993_IN2R_MIXINR_VOL_WIDTH 1 /* IN2R_MIXINR_VOL */
+#define WM8993_IN1R_TO_MIXINR 0x0020 /* IN1R_TO_MIXINR */
+#define WM8993_IN1R_TO_MIXINR_MASK 0x0020 /* IN1R_TO_MIXINR */
+#define WM8993_IN1R_TO_MIXINR_SHIFT 5 /* IN1R_TO_MIXINR */
+#define WM8993_IN1R_TO_MIXINR_WIDTH 1 /* IN1R_TO_MIXINR */
+#define WM8993_IN1R_MIXINR_VOL 0x0010 /* IN1R_MIXINR_VOL */
+#define WM8993_IN1R_MIXINR_VOL_MASK 0x0010 /* IN1R_MIXINR_VOL */
+#define WM8993_IN1R_MIXINR_VOL_SHIFT 4 /* IN1R_MIXINR_VOL */
+#define WM8993_IN1R_MIXINR_VOL_WIDTH 1 /* IN1R_MIXINR_VOL */
+#define WM8993_MIXOUTR_MIXINR_VOL_MASK 0x0007 /* MIXOUTR_MIXINR_VOL - [2:0] */
+#define WM8993_MIXOUTR_MIXINR_VOL_SHIFT 0 /* MIXOUTR_MIXINR_VOL - [2:0] */
+#define WM8993_MIXOUTR_MIXINR_VOL_WIDTH 3 /* MIXOUTR_MIXINR_VOL - [2:0] */
+
+/*
+ * R43 (0x2B) - Input Mixer5
+ */
+#define WM8993_IN1LP_MIXINL_VOL_MASK 0x01C0 /* IN1LP_MIXINL_VOL - [8:6] */
+#define WM8993_IN1LP_MIXINL_VOL_SHIFT 6 /* IN1LP_MIXINL_VOL - [8:6] */
+#define WM8993_IN1LP_MIXINL_VOL_WIDTH 3 /* IN1LP_MIXINL_VOL - [8:6] */
+#define WM8993_VRX_MIXINL_VOL_MASK 0x0007 /* VRX_MIXINL_VOL - [2:0] */
+#define WM8993_VRX_MIXINL_VOL_SHIFT 0 /* VRX_MIXINL_VOL - [2:0] */
+#define WM8993_VRX_MIXINL_VOL_WIDTH 3 /* VRX_MIXINL_VOL - [2:0] */
+
+/*
+ * R44 (0x2C) - Input Mixer6
+ */
+#define WM8993_IN1RP_MIXINR_VOL_MASK 0x01C0 /* IN1RP_MIXINR_VOL - [8:6] */
+#define WM8993_IN1RP_MIXINR_VOL_SHIFT 6 /* IN1RP_MIXINR_VOL - [8:6] */
+#define WM8993_IN1RP_MIXINR_VOL_WIDTH 3 /* IN1RP_MIXINR_VOL - [8:6] */
+#define WM8993_VRX_MIXINR_VOL_MASK 0x0007 /* VRX_MIXINR_VOL - [2:0] */
+#define WM8993_VRX_MIXINR_VOL_SHIFT 0 /* VRX_MIXINR_VOL - [2:0] */
+#define WM8993_VRX_MIXINR_VOL_WIDTH 3 /* VRX_MIXINR_VOL - [2:0] */
+
+/*
+ * R45 (0x2D) - Output Mixer1
+ */
+#define WM8993_DACL_TO_HPOUT1L 0x0100 /* DACL_TO_HPOUT1L */
+#define WM8993_DACL_TO_HPOUT1L_MASK 0x0100 /* DACL_TO_HPOUT1L */
+#define WM8993_DACL_TO_HPOUT1L_SHIFT 8 /* DACL_TO_HPOUT1L */
+#define WM8993_DACL_TO_HPOUT1L_WIDTH 1 /* DACL_TO_HPOUT1L */
+#define WM8993_MIXINR_TO_MIXOUTL 0x0080 /* MIXINR_TO_MIXOUTL */
+#define WM8993_MIXINR_TO_MIXOUTL_MASK 0x0080 /* MIXINR_TO_MIXOUTL */
+#define WM8993_MIXINR_TO_MIXOUTL_SHIFT 7 /* MIXINR_TO_MIXOUTL */
+#define WM8993_MIXINR_TO_MIXOUTL_WIDTH 1 /* MIXINR_TO_MIXOUTL */
+#define WM8993_MIXINL_TO_MIXOUTL 0x0040 /* MIXINL_TO_MIXOUTL */
+#define WM8993_MIXINL_TO_MIXOUTL_MASK 0x0040 /* MIXINL_TO_MIXOUTL */
+#define WM8993_MIXINL_TO_MIXOUTL_SHIFT 6 /* MIXINL_TO_MIXOUTL */
+#define WM8993_MIXINL_TO_MIXOUTL_WIDTH 1 /* MIXINL_TO_MIXOUTL */
+#define WM8993_IN2RN_TO_MIXOUTL 0x0020 /* IN2RN_TO_MIXOUTL */
+#define WM8993_IN2RN_TO_MIXOUTL_MASK 0x0020 /* IN2RN_TO_MIXOUTL */
+#define WM8993_IN2RN_TO_MIXOUTL_SHIFT 5 /* IN2RN_TO_MIXOUTL */
+#define WM8993_IN2RN_TO_MIXOUTL_WIDTH 1 /* IN2RN_TO_MIXOUTL */
+#define WM8993_IN2LN_TO_MIXOUTL 0x0010 /* IN2LN_TO_MIXOUTL */
+#define WM8993_IN2LN_TO_MIXOUTL_MASK 0x0010 /* IN2LN_TO_MIXOUTL */
+#define WM8993_IN2LN_TO_MIXOUTL_SHIFT 4 /* IN2LN_TO_MIXOUTL */
+#define WM8993_IN2LN_TO_MIXOUTL_WIDTH 1 /* IN2LN_TO_MIXOUTL */
+#define WM8993_IN1R_TO_MIXOUTL 0x0008 /* IN1R_TO_MIXOUTL */
+#define WM8993_IN1R_TO_MIXOUTL_MASK 0x0008 /* IN1R_TO_MIXOUTL */
+#define WM8993_IN1R_TO_MIXOUTL_SHIFT 3 /* IN1R_TO_MIXOUTL */
+#define WM8993_IN1R_TO_MIXOUTL_WIDTH 1 /* IN1R_TO_MIXOUTL */
+#define WM8993_IN1L_TO_MIXOUTL 0x0004 /* IN1L_TO_MIXOUTL */
+#define WM8993_IN1L_TO_MIXOUTL_MASK 0x0004 /* IN1L_TO_MIXOUTL */
+#define WM8993_IN1L_TO_MIXOUTL_SHIFT 2 /* IN1L_TO_MIXOUTL */
+#define WM8993_IN1L_TO_MIXOUTL_WIDTH 1 /* IN1L_TO_MIXOUTL */
+#define WM8993_IN2LP_TO_MIXOUTL 0x0002 /* IN2LP_TO_MIXOUTL */
+#define WM8993_IN2LP_TO_MIXOUTL_MASK 0x0002 /* IN2LP_TO_MIXOUTL */
+#define WM8993_IN2LP_TO_MIXOUTL_SHIFT 1 /* IN2LP_TO_MIXOUTL */
+#define WM8993_IN2LP_TO_MIXOUTL_WIDTH 1 /* IN2LP_TO_MIXOUTL */
+#define WM8993_DACL_TO_MIXOUTL 0x0001 /* DACL_TO_MIXOUTL */
+#define WM8993_DACL_TO_MIXOUTL_MASK 0x0001 /* DACL_TO_MIXOUTL */
+#define WM8993_DACL_TO_MIXOUTL_SHIFT 0 /* DACL_TO_MIXOUTL */
+#define WM8993_DACL_TO_MIXOUTL_WIDTH 1 /* DACL_TO_MIXOUTL */
+
+/*
+ * R46 (0x2E) - Output Mixer2
+ */
+#define WM8993_DACR_TO_HPOUT1R 0x0100 /* DACR_TO_HPOUT1R */
+#define WM8993_DACR_TO_HPOUT1R_MASK 0x0100 /* DACR_TO_HPOUT1R */
+#define WM8993_DACR_TO_HPOUT1R_SHIFT 8 /* DACR_TO_HPOUT1R */
+#define WM8993_DACR_TO_HPOUT1R_WIDTH 1 /* DACR_TO_HPOUT1R */
+#define WM8993_MIXINL_TO_MIXOUTR 0x0080 /* MIXINL_TO_MIXOUTR */
+#define WM8993_MIXINL_TO_MIXOUTR_MASK 0x0080 /* MIXINL_TO_MIXOUTR */
+#define WM8993_MIXINL_TO_MIXOUTR_SHIFT 7 /* MIXINL_TO_MIXOUTR */
+#define WM8993_MIXINL_TO_MIXOUTR_WIDTH 1 /* MIXINL_TO_MIXOUTR */
+#define WM8993_MIXINR_TO_MIXOUTR 0x0040 /* MIXINR_TO_MIXOUTR */
+#define WM8993_MIXINR_TO_MIXOUTR_MASK 0x0040 /* MIXINR_TO_MIXOUTR */
+#define WM8993_MIXINR_TO_MIXOUTR_SHIFT 6 /* MIXINR_TO_MIXOUTR */
+#define WM8993_MIXINR_TO_MIXOUTR_WIDTH 1 /* MIXINR_TO_MIXOUTR */
+#define WM8993_IN2LN_TO_MIXOUTR 0x0020 /* IN2LN_TO_MIXOUTR */
+#define WM8993_IN2LN_TO_MIXOUTR_MASK 0x0020 /* IN2LN_TO_MIXOUTR */
+#define WM8993_IN2LN_TO_MIXOUTR_SHIFT 5 /* IN2LN_TO_MIXOUTR */
+#define WM8993_IN2LN_TO_MIXOUTR_WIDTH 1 /* IN2LN_TO_MIXOUTR */
+#define WM8993_IN2RN_TO_MIXOUTR 0x0010 /* IN2RN_TO_MIXOUTR */
+#define WM8993_IN2RN_TO_MIXOUTR_MASK 0x0010 /* IN2RN_TO_MIXOUTR */
+#define WM8993_IN2RN_TO_MIXOUTR_SHIFT 4 /* IN2RN_TO_MIXOUTR */
+#define WM8993_IN2RN_TO_MIXOUTR_WIDTH 1 /* IN2RN_TO_MIXOUTR */
+#define WM8993_IN1L_TO_MIXOUTR 0x0008 /* IN1L_TO_MIXOUTR */
+#define WM8993_IN1L_TO_MIXOUTR_MASK 0x0008 /* IN1L_TO_MIXOUTR */
+#define WM8993_IN1L_TO_MIXOUTR_SHIFT 3 /* IN1L_TO_MIXOUTR */
+#define WM8993_IN1L_TO_MIXOUTR_WIDTH 1 /* IN1L_TO_MIXOUTR */
+#define WM8993_IN1R_TO_MIXOUTR 0x0004 /* IN1R_TO_MIXOUTR */
+#define WM8993_IN1R_TO_MIXOUTR_MASK 0x0004 /* IN1R_TO_MIXOUTR */
+#define WM8993_IN1R_TO_MIXOUTR_SHIFT 2 /* IN1R_TO_MIXOUTR */
+#define WM8993_IN1R_TO_MIXOUTR_WIDTH 1 /* IN1R_TO_MIXOUTR */
+#define WM8993_IN2RP_TO_MIXOUTR 0x0002 /* IN2RP_TO_MIXOUTR */
+#define WM8993_IN2RP_TO_MIXOUTR_MASK 0x0002 /* IN2RP_TO_MIXOUTR */
+#define WM8993_IN2RP_TO_MIXOUTR_SHIFT 1 /* IN2RP_TO_MIXOUTR */
+#define WM8993_IN2RP_TO_MIXOUTR_WIDTH 1 /* IN2RP_TO_MIXOUTR */
+#define WM8993_DACR_TO_MIXOUTR 0x0001 /* DACR_TO_MIXOUTR */
+#define WM8993_DACR_TO_MIXOUTR_MASK 0x0001 /* DACR_TO_MIXOUTR */
+#define WM8993_DACR_TO_MIXOUTR_SHIFT 0 /* DACR_TO_MIXOUTR */
+#define WM8993_DACR_TO_MIXOUTR_WIDTH 1 /* DACR_TO_MIXOUTR */
+
+/*
+ * R47 (0x2F) - Output Mixer3
+ */
+#define WM8993_IN2LP_MIXOUTL_VOL_MASK 0x0E00 /* IN2LP_MIXOUTL_VOL - [11:9] */
+#define WM8993_IN2LP_MIXOUTL_VOL_SHIFT 9 /* IN2LP_MIXOUTL_VOL - [11:9] */
+#define WM8993_IN2LP_MIXOUTL_VOL_WIDTH 3 /* IN2LP_MIXOUTL_VOL - [11:9] */
+#define WM8993_IN2LN_MIXOUTL_VOL_MASK 0x01C0 /* IN2LN_MIXOUTL_VOL - [8:6] */
+#define WM8993_IN2LN_MIXOUTL_VOL_SHIFT 6 /* IN2LN_MIXOUTL_VOL - [8:6] */
+#define WM8993_IN2LN_MIXOUTL_VOL_WIDTH 3 /* IN2LN_MIXOUTL_VOL - [8:6] */
+#define WM8993_IN1R_MIXOUTL_VOL_MASK 0x0038 /* IN1R_MIXOUTL_VOL - [5:3] */
+#define WM8993_IN1R_MIXOUTL_VOL_SHIFT 3 /* IN1R_MIXOUTL_VOL - [5:3] */
+#define WM8993_IN1R_MIXOUTL_VOL_WIDTH 3 /* IN1R_MIXOUTL_VOL - [5:3] */
+#define WM8993_IN1L_MIXOUTL_VOL_MASK 0x0007 /* IN1L_MIXOUTL_VOL - [2:0] */
+#define WM8993_IN1L_MIXOUTL_VOL_SHIFT 0 /* IN1L_MIXOUTL_VOL - [2:0] */
+#define WM8993_IN1L_MIXOUTL_VOL_WIDTH 3 /* IN1L_MIXOUTL_VOL - [2:0] */
+
+/*
+ * R48 (0x30) - Output Mixer4
+ */
+#define WM8993_IN2RP_MIXOUTR_VOL_MASK 0x0E00 /* IN2RP_MIXOUTR_VOL - [11:9] */
+#define WM8993_IN2RP_MIXOUTR_VOL_SHIFT 9 /* IN2RP_MIXOUTR_VOL - [11:9] */
+#define WM8993_IN2RP_MIXOUTR_VOL_WIDTH 3 /* IN2RP_MIXOUTR_VOL - [11:9] */
+#define WM8993_IN2RN_MIXOUTR_VOL_MASK 0x01C0 /* IN2RN_MIXOUTR_VOL - [8:6] */
+#define WM8993_IN2RN_MIXOUTR_VOL_SHIFT 6 /* IN2RN_MIXOUTR_VOL - [8:6] */
+#define WM8993_IN2RN_MIXOUTR_VOL_WIDTH 3 /* IN2RN_MIXOUTR_VOL - [8:6] */
+#define WM8993_IN1L_MIXOUTR_VOL_MASK 0x0038 /* IN1L_MIXOUTR_VOL - [5:3] */
+#define WM8993_IN1L_MIXOUTR_VOL_SHIFT 3 /* IN1L_MIXOUTR_VOL - [5:3] */
+#define WM8993_IN1L_MIXOUTR_VOL_WIDTH 3 /* IN1L_MIXOUTR_VOL - [5:3] */
+#define WM8993_IN1R_MIXOUTR_VOL_MASK 0x0007 /* IN1R_MIXOUTR_VOL - [2:0] */
+#define WM8993_IN1R_MIXOUTR_VOL_SHIFT 0 /* IN1R_MIXOUTR_VOL - [2:0] */
+#define WM8993_IN1R_MIXOUTR_VOL_WIDTH 3 /* IN1R_MIXOUTR_VOL - [2:0] */
+
+/*
+ * R49 (0x31) - Output Mixer5
+ */
+#define WM8993_DACL_MIXOUTL_VOL_MASK 0x0E00 /* DACL_MIXOUTL_VOL - [11:9] */
+#define WM8993_DACL_MIXOUTL_VOL_SHIFT 9 /* DACL_MIXOUTL_VOL - [11:9] */
+#define WM8993_DACL_MIXOUTL_VOL_WIDTH 3 /* DACL_MIXOUTL_VOL - [11:9] */
+#define WM8993_IN2RN_MIXOUTL_VOL_MASK 0x01C0 /* IN2RN_MIXOUTL_VOL - [8:6] */
+#define WM8993_IN2RN_MIXOUTL_VOL_SHIFT 6 /* IN2RN_MIXOUTL_VOL - [8:6] */
+#define WM8993_IN2RN_MIXOUTL_VOL_WIDTH 3 /* IN2RN_MIXOUTL_VOL - [8:6] */
+#define WM8993_MIXINR_MIXOUTL_VOL_MASK 0x0038 /* MIXINR_MIXOUTL_VOL - [5:3] */
+#define WM8993_MIXINR_MIXOUTL_VOL_SHIFT 3 /* MIXINR_MIXOUTL_VOL - [5:3] */
+#define WM8993_MIXINR_MIXOUTL_VOL_WIDTH 3 /* MIXINR_MIXOUTL_VOL - [5:3] */
+#define WM8993_MIXINL_MIXOUTL_VOL_MASK 0x0007 /* MIXINL_MIXOUTL_VOL - [2:0] */
+#define WM8993_MIXINL_MIXOUTL_VOL_SHIFT 0 /* MIXINL_MIXOUTL_VOL - [2:0] */
+#define WM8993_MIXINL_MIXOUTL_VOL_WIDTH 3 /* MIXINL_MIXOUTL_VOL - [2:0] */
+
+/*
+ * R50 (0x32) - Output Mixer6
+ */
+#define WM8993_DACR_MIXOUTR_VOL_MASK 0x0E00 /* DACR_MIXOUTR_VOL - [11:9] */
+#define WM8993_DACR_MIXOUTR_VOL_SHIFT 9 /* DACR_MIXOUTR_VOL - [11:9] */
+#define WM8993_DACR_MIXOUTR_VOL_WIDTH 3 /* DACR_MIXOUTR_VOL - [11:9] */
+#define WM8993_IN2LN_MIXOUTR_VOL_MASK 0x01C0 /* IN2LN_MIXOUTR_VOL - [8:6] */
+#define WM8993_IN2LN_MIXOUTR_VOL_SHIFT 6 /* IN2LN_MIXOUTR_VOL - [8:6] */
+#define WM8993_IN2LN_MIXOUTR_VOL_WIDTH 3 /* IN2LN_MIXOUTR_VOL - [8:6] */
+#define WM8993_MIXINL_MIXOUTR_VOL_MASK 0x0038 /* MIXINL_MIXOUTR_VOL - [5:3] */
+#define WM8993_MIXINL_MIXOUTR_VOL_SHIFT 3 /* MIXINL_MIXOUTR_VOL - [5:3] */
+#define WM8993_MIXINL_MIXOUTR_VOL_WIDTH 3 /* MIXINL_MIXOUTR_VOL - [5:3] */
+#define WM8993_MIXINR_MIXOUTR_VOL_MASK 0x0007 /* MIXINR_MIXOUTR_VOL - [2:0] */
+#define WM8993_MIXINR_MIXOUTR_VOL_SHIFT 0 /* MIXINR_MIXOUTR_VOL - [2:0] */
+#define WM8993_MIXINR_MIXOUTR_VOL_WIDTH 3 /* MIXINR_MIXOUTR_VOL - [2:0] */
+
+/*
+ * R51 (0x33) - HPOUT2 Mixer
+ */
+#define WM8993_VRX_TO_HPOUT2 0x0020 /* VRX_TO_HPOUT2 */
+#define WM8993_VRX_TO_HPOUT2_MASK 0x0020 /* VRX_TO_HPOUT2 */
+#define WM8993_VRX_TO_HPOUT2_SHIFT 5 /* VRX_TO_HPOUT2 */
+#define WM8993_VRX_TO_HPOUT2_WIDTH 1 /* VRX_TO_HPOUT2 */
+#define WM8993_MIXOUTLVOL_TO_HPOUT2 0x0010 /* MIXOUTLVOL_TO_HPOUT2 */
+#define WM8993_MIXOUTLVOL_TO_HPOUT2_MASK 0x0010 /* MIXOUTLVOL_TO_HPOUT2 */
+#define WM8993_MIXOUTLVOL_TO_HPOUT2_SHIFT 4 /* MIXOUTLVOL_TO_HPOUT2 */
+#define WM8993_MIXOUTLVOL_TO_HPOUT2_WIDTH 1 /* MIXOUTLVOL_TO_HPOUT2 */
+#define WM8993_MIXOUTRVOL_TO_HPOUT2 0x0008 /* MIXOUTRVOL_TO_HPOUT2 */
+#define WM8993_MIXOUTRVOL_TO_HPOUT2_MASK 0x0008 /* MIXOUTRVOL_TO_HPOUT2 */
+#define WM8993_MIXOUTRVOL_TO_HPOUT2_SHIFT 3 /* MIXOUTRVOL_TO_HPOUT2 */
+#define WM8993_MIXOUTRVOL_TO_HPOUT2_WIDTH 1 /* MIXOUTRVOL_TO_HPOUT2 */
+
+/*
+ * R52 (0x34) - Line Mixer1
+ */
+#define WM8993_MIXOUTL_TO_LINEOUT1N 0x0040 /* MIXOUTL_TO_LINEOUT1N */
+#define WM8993_MIXOUTL_TO_LINEOUT1N_MASK 0x0040 /* MIXOUTL_TO_LINEOUT1N */
+#define WM8993_MIXOUTL_TO_LINEOUT1N_SHIFT 6 /* MIXOUTL_TO_LINEOUT1N */
+#define WM8993_MIXOUTL_TO_LINEOUT1N_WIDTH 1 /* MIXOUTL_TO_LINEOUT1N */
+#define WM8993_MIXOUTR_TO_LINEOUT1N 0x0020 /* MIXOUTR_TO_LINEOUT1N */
+#define WM8993_MIXOUTR_TO_LINEOUT1N_MASK 0x0020 /* MIXOUTR_TO_LINEOUT1N */
+#define WM8993_MIXOUTR_TO_LINEOUT1N_SHIFT 5 /* MIXOUTR_TO_LINEOUT1N */
+#define WM8993_MIXOUTR_TO_LINEOUT1N_WIDTH 1 /* MIXOUTR_TO_LINEOUT1N */
+#define WM8993_LINEOUT1_MODE 0x0010 /* LINEOUT1_MODE */
+#define WM8993_LINEOUT1_MODE_MASK 0x0010 /* LINEOUT1_MODE */
+#define WM8993_LINEOUT1_MODE_SHIFT 4 /* LINEOUT1_MODE */
+#define WM8993_LINEOUT1_MODE_WIDTH 1 /* LINEOUT1_MODE */
+#define WM8993_IN1R_TO_LINEOUT1P 0x0004 /* IN1R_TO_LINEOUT1P */
+#define WM8993_IN1R_TO_LINEOUT1P_MASK 0x0004 /* IN1R_TO_LINEOUT1P */
+#define WM8993_IN1R_TO_LINEOUT1P_SHIFT 2 /* IN1R_TO_LINEOUT1P */
+#define WM8993_IN1R_TO_LINEOUT1P_WIDTH 1 /* IN1R_TO_LINEOUT1P */
+#define WM8993_IN1L_TO_LINEOUT1P 0x0002 /* IN1L_TO_LINEOUT1P */
+#define WM8993_IN1L_TO_LINEOUT1P_MASK 0x0002 /* IN1L_TO_LINEOUT1P */
+#define WM8993_IN1L_TO_LINEOUT1P_SHIFT 1 /* IN1L_TO_LINEOUT1P */
+#define WM8993_IN1L_TO_LINEOUT1P_WIDTH 1 /* IN1L_TO_LINEOUT1P */
+#define WM8993_MIXOUTL_TO_LINEOUT1P 0x0001 /* MIXOUTL_TO_LINEOUT1P */
+#define WM8993_MIXOUTL_TO_LINEOUT1P_MASK 0x0001 /* MIXOUTL_TO_LINEOUT1P */
+#define WM8993_MIXOUTL_TO_LINEOUT1P_SHIFT 0 /* MIXOUTL_TO_LINEOUT1P */
+#define WM8993_MIXOUTL_TO_LINEOUT1P_WIDTH 1 /* MIXOUTL_TO_LINEOUT1P */
+
+/*
+ * R53 (0x35) - Line Mixer2
+ */
+#define WM8993_MIXOUTR_TO_LINEOUT2N 0x0040 /* MIXOUTR_TO_LINEOUT2N */
+#define WM8993_MIXOUTR_TO_LINEOUT2N_MASK 0x0040 /* MIXOUTR_TO_LINEOUT2N */
+#define WM8993_MIXOUTR_TO_LINEOUT2N_SHIFT 6 /* MIXOUTR_TO_LINEOUT2N */
+#define WM8993_MIXOUTR_TO_LINEOUT2N_WIDTH 1 /* MIXOUTR_TO_LINEOUT2N */
+#define WM8993_MIXOUTL_TO_LINEOUT2N 0x0020 /* MIXOUTL_TO_LINEOUT2N */
+#define WM8993_MIXOUTL_TO_LINEOUT2N_MASK 0x0020 /* MIXOUTL_TO_LINEOUT2N */
+#define WM8993_MIXOUTL_TO_LINEOUT2N_SHIFT 5 /* MIXOUTL_TO_LINEOUT2N */
+#define WM8993_MIXOUTL_TO_LINEOUT2N_WIDTH 1 /* MIXOUTL_TO_LINEOUT2N */
+#define WM8993_LINEOUT2_MODE 0x0010 /* LINEOUT2_MODE */
+#define WM8993_LINEOUT2_MODE_MASK 0x0010 /* LINEOUT2_MODE */
+#define WM8993_LINEOUT2_MODE_SHIFT 4 /* LINEOUT2_MODE */
+#define WM8993_LINEOUT2_MODE_WIDTH 1 /* LINEOUT2_MODE */
+#define WM8993_IN1L_TO_LINEOUT2P 0x0004 /* IN1L_TO_LINEOUT2P */
+#define WM8993_IN1L_TO_LINEOUT2P_MASK 0x0004 /* IN1L_TO_LINEOUT2P */
+#define WM8993_IN1L_TO_LINEOUT2P_SHIFT 2 /* IN1L_TO_LINEOUT2P */
+#define WM8993_IN1L_TO_LINEOUT2P_WIDTH 1 /* IN1L_TO_LINEOUT2P */
+#define WM8993_IN1R_TO_LINEOUT2P 0x0002 /* IN1R_TO_LINEOUT2P */
+#define WM8993_IN1R_TO_LINEOUT2P_MASK 0x0002 /* IN1R_TO_LINEOUT2P */
+#define WM8993_IN1R_TO_LINEOUT2P_SHIFT 1 /* IN1R_TO_LINEOUT2P */
+#define WM8993_IN1R_TO_LINEOUT2P_WIDTH 1 /* IN1R_TO_LINEOUT2P */
+#define WM8993_MIXOUTR_TO_LINEOUT2P 0x0001 /* MIXOUTR_TO_LINEOUT2P */
+#define WM8993_MIXOUTR_TO_LINEOUT2P_MASK 0x0001 /* MIXOUTR_TO_LINEOUT2P */
+#define WM8993_MIXOUTR_TO_LINEOUT2P_SHIFT 0 /* MIXOUTR_TO_LINEOUT2P */
+#define WM8993_MIXOUTR_TO_LINEOUT2P_WIDTH 1 /* MIXOUTR_TO_LINEOUT2P */
+
+/*
+ * R54 (0x36) - Speaker Mixer
+ */
+#define WM8993_SPKAB_REF_SEL 0x0100 /* SPKAB_REF_SEL */
+#define WM8993_SPKAB_REF_SEL_MASK 0x0100 /* SPKAB_REF_SEL */
+#define WM8993_SPKAB_REF_SEL_SHIFT 8 /* SPKAB_REF_SEL */
+#define WM8993_SPKAB_REF_SEL_WIDTH 1 /* SPKAB_REF_SEL */
+#define WM8993_MIXINL_TO_SPKMIXL 0x0080 /* MIXINL_TO_SPKMIXL */
+#define WM8993_MIXINL_TO_SPKMIXL_MASK 0x0080 /* MIXINL_TO_SPKMIXL */
+#define WM8993_MIXINL_TO_SPKMIXL_SHIFT 7 /* MIXINL_TO_SPKMIXL */
+#define WM8993_MIXINL_TO_SPKMIXL_WIDTH 1 /* MIXINL_TO_SPKMIXL */
+#define WM8993_MIXINR_TO_SPKMIXR 0x0040 /* MIXINR_TO_SPKMIXR */
+#define WM8993_MIXINR_TO_SPKMIXR_MASK 0x0040 /* MIXINR_TO_SPKMIXR */
+#define WM8993_MIXINR_TO_SPKMIXR_SHIFT 6 /* MIXINR_TO_SPKMIXR */
+#define WM8993_MIXINR_TO_SPKMIXR_WIDTH 1 /* MIXINR_TO_SPKMIXR */
+#define WM8993_IN1LP_TO_SPKMIXL 0x0020 /* IN1LP_TO_SPKMIXL */
+#define WM8993_IN1LP_TO_SPKMIXL_MASK 0x0020 /* IN1LP_TO_SPKMIXL */
+#define WM8993_IN1LP_TO_SPKMIXL_SHIFT 5 /* IN1LP_TO_SPKMIXL */
+#define WM8993_IN1LP_TO_SPKMIXL_WIDTH 1 /* IN1LP_TO_SPKMIXL */
+#define WM8993_IN1RP_TO_SPKMIXR 0x0010 /* IN1RP_TO_SPKMIXR */
+#define WM8993_IN1RP_TO_SPKMIXR_MASK 0x0010 /* IN1RP_TO_SPKMIXR */
+#define WM8993_IN1RP_TO_SPKMIXR_SHIFT 4 /* IN1RP_TO_SPKMIXR */
+#define WM8993_IN1RP_TO_SPKMIXR_WIDTH 1 /* IN1RP_TO_SPKMIXR */
+#define WM8993_MIXOUTL_TO_SPKMIXL 0x0008 /* MIXOUTL_TO_SPKMIXL */
+#define WM8993_MIXOUTL_TO_SPKMIXL_MASK 0x0008 /* MIXOUTL_TO_SPKMIXL */
+#define WM8993_MIXOUTL_TO_SPKMIXL_SHIFT 3 /* MIXOUTL_TO_SPKMIXL */
+#define WM8993_MIXOUTL_TO_SPKMIXL_WIDTH 1 /* MIXOUTL_TO_SPKMIXL */
+#define WM8993_MIXOUTR_TO_SPKMIXR 0x0004 /* MIXOUTR_TO_SPKMIXR */
+#define WM8993_MIXOUTR_TO_SPKMIXR_MASK 0x0004 /* MIXOUTR_TO_SPKMIXR */
+#define WM8993_MIXOUTR_TO_SPKMIXR_SHIFT 2 /* MIXOUTR_TO_SPKMIXR */
+#define WM8993_MIXOUTR_TO_SPKMIXR_WIDTH 1 /* MIXOUTR_TO_SPKMIXR */
+#define WM8993_DACL_TO_SPKMIXL 0x0002 /* DACL_TO_SPKMIXL */
+#define WM8993_DACL_TO_SPKMIXL_MASK 0x0002 /* DACL_TO_SPKMIXL */
+#define WM8993_DACL_TO_SPKMIXL_SHIFT 1 /* DACL_TO_SPKMIXL */
+#define WM8993_DACL_TO_SPKMIXL_WIDTH 1 /* DACL_TO_SPKMIXL */
+#define WM8993_DACR_TO_SPKMIXR 0x0001 /* DACR_TO_SPKMIXR */
+#define WM8993_DACR_TO_SPKMIXR_MASK 0x0001 /* DACR_TO_SPKMIXR */
+#define WM8993_DACR_TO_SPKMIXR_SHIFT 0 /* DACR_TO_SPKMIXR */
+#define WM8993_DACR_TO_SPKMIXR_WIDTH 1 /* DACR_TO_SPKMIXR */
+
+/*
+ * R55 (0x37) - Additional Control
+ */
+#define WM8993_LINEOUT1_FB 0x0080 /* LINEOUT1_FB */
+#define WM8993_LINEOUT1_FB_MASK 0x0080 /* LINEOUT1_FB */
+#define WM8993_LINEOUT1_FB_SHIFT 7 /* LINEOUT1_FB */
+#define WM8993_LINEOUT1_FB_WIDTH 1 /* LINEOUT1_FB */
+#define WM8993_LINEOUT2_FB 0x0040 /* LINEOUT2_FB */
+#define WM8993_LINEOUT2_FB_MASK 0x0040 /* LINEOUT2_FB */
+#define WM8993_LINEOUT2_FB_SHIFT 6 /* LINEOUT2_FB */
+#define WM8993_LINEOUT2_FB_WIDTH 1 /* LINEOUT2_FB */
+#define WM8993_VROI 0x0001 /* VROI */
+#define WM8993_VROI_MASK 0x0001 /* VROI */
+#define WM8993_VROI_SHIFT 0 /* VROI */
+#define WM8993_VROI_WIDTH 1 /* VROI */
+
+/*
+ * R56 (0x38) - AntiPOP1
+ */
+#define WM8993_LINEOUT_VMID_BUF_ENA 0x0080 /* LINEOUT_VMID_BUF_ENA */
+#define WM8993_LINEOUT_VMID_BUF_ENA_MASK 0x0080 /* LINEOUT_VMID_BUF_ENA */
+#define WM8993_LINEOUT_VMID_BUF_ENA_SHIFT 7 /* LINEOUT_VMID_BUF_ENA */
+#define WM8993_LINEOUT_VMID_BUF_ENA_WIDTH 1 /* LINEOUT_VMID_BUF_ENA */
+#define WM8993_HPOUT2_IN_ENA 0x0040 /* HPOUT2_IN_ENA */
+#define WM8993_HPOUT2_IN_ENA_MASK 0x0040 /* HPOUT2_IN_ENA */
+#define WM8993_HPOUT2_IN_ENA_SHIFT 6 /* HPOUT2_IN_ENA */
+#define WM8993_HPOUT2_IN_ENA_WIDTH 1 /* HPOUT2_IN_ENA */
+#define WM8993_LINEOUT1_DISCH 0x0020 /* LINEOUT1_DISCH */
+#define WM8993_LINEOUT1_DISCH_MASK 0x0020 /* LINEOUT1_DISCH */
+#define WM8993_LINEOUT1_DISCH_SHIFT 5 /* LINEOUT1_DISCH */
+#define WM8993_LINEOUT1_DISCH_WIDTH 1 /* LINEOUT1_DISCH */
+#define WM8993_LINEOUT2_DISCH 0x0010 /* LINEOUT2_DISCH */
+#define WM8993_LINEOUT2_DISCH_MASK 0x0010 /* LINEOUT2_DISCH */
+#define WM8993_LINEOUT2_DISCH_SHIFT 4 /* LINEOUT2_DISCH */
+#define WM8993_LINEOUT2_DISCH_WIDTH 1 /* LINEOUT2_DISCH */
+
+/*
+ * R57 (0x39) - AntiPOP2
+ */
+#define WM8993_VMID_RAMP_MASK 0x0060 /* VMID_RAMP - [6:5] */
+#define WM8993_VMID_RAMP_SHIFT 5 /* VMID_RAMP - [6:5] */
+#define WM8993_VMID_RAMP_WIDTH 2 /* VMID_RAMP - [6:5] */
+#define WM8993_VMID_BUF_ENA 0x0008 /* VMID_BUF_ENA */
+#define WM8993_VMID_BUF_ENA_MASK 0x0008 /* VMID_BUF_ENA */
+#define WM8993_VMID_BUF_ENA_SHIFT 3 /* VMID_BUF_ENA */
+#define WM8993_VMID_BUF_ENA_WIDTH 1 /* VMID_BUF_ENA */
+#define WM8993_STARTUP_BIAS_ENA 0x0004 /* STARTUP_BIAS_ENA */
+#define WM8993_STARTUP_BIAS_ENA_MASK 0x0004 /* STARTUP_BIAS_ENA */
+#define WM8993_STARTUP_BIAS_ENA_SHIFT 2 /* STARTUP_BIAS_ENA */
+#define WM8993_STARTUP_BIAS_ENA_WIDTH 1 /* STARTUP_BIAS_ENA */
+#define WM8993_BIAS_SRC 0x0002 /* BIAS_SRC */
+#define WM8993_BIAS_SRC_MASK 0x0002 /* BIAS_SRC */
+#define WM8993_BIAS_SRC_SHIFT 1 /* BIAS_SRC */
+#define WM8993_BIAS_SRC_WIDTH 1 /* BIAS_SRC */
+#define WM8993_VMID_DISCH 0x0001 /* VMID_DISCH */
+#define WM8993_VMID_DISCH_MASK 0x0001 /* VMID_DISCH */
+#define WM8993_VMID_DISCH_SHIFT 0 /* VMID_DISCH */
+#define WM8993_VMID_DISCH_WIDTH 1 /* VMID_DISCH */
+
+/*
+ * R58 (0x3A) - MICBIAS
+ */
+#define WM8993_JD_SCTHR_MASK 0x00C0 /* JD_SCTHR - [7:6] */
+#define WM8993_JD_SCTHR_SHIFT 6 /* JD_SCTHR - [7:6] */
+#define WM8993_JD_SCTHR_WIDTH 2 /* JD_SCTHR - [7:6] */
+#define WM8993_JD_THR_MASK 0x0030 /* JD_THR - [5:4] */
+#define WM8993_JD_THR_SHIFT 4 /* JD_THR - [5:4] */
+#define WM8993_JD_THR_WIDTH 2 /* JD_THR - [5:4] */
+#define WM8993_JD_ENA 0x0004 /* JD_ENA */
+#define WM8993_JD_ENA_MASK 0x0004 /* JD_ENA */
+#define WM8993_JD_ENA_SHIFT 2 /* JD_ENA */
+#define WM8993_JD_ENA_WIDTH 1 /* JD_ENA */
+#define WM8993_MICB2_LVL 0x0002 /* MICB2_LVL */
+#define WM8993_MICB2_LVL_MASK 0x0002 /* MICB2_LVL */
+#define WM8993_MICB2_LVL_SHIFT 1 /* MICB2_LVL */
+#define WM8993_MICB2_LVL_WIDTH 1 /* MICB2_LVL */
+#define WM8993_MICB1_LVL 0x0001 /* MICB1_LVL */
+#define WM8993_MICB1_LVL_MASK 0x0001 /* MICB1_LVL */
+#define WM8993_MICB1_LVL_SHIFT 0 /* MICB1_LVL */
+#define WM8993_MICB1_LVL_WIDTH 1 /* MICB1_LVL */
+
+/*
+ * R60 (0x3C) - FLL Control 1
+ */
+#define WM8993_FLL_FRAC 0x0004 /* FLL_FRAC */
+#define WM8993_FLL_FRAC_MASK 0x0004 /* FLL_FRAC */
+#define WM8993_FLL_FRAC_SHIFT 2 /* FLL_FRAC */
+#define WM8993_FLL_FRAC_WIDTH 1 /* FLL_FRAC */
+#define WM8993_FLL_OSC_ENA 0x0002 /* FLL_OSC_ENA */
+#define WM8993_FLL_OSC_ENA_MASK 0x0002 /* FLL_OSC_ENA */
+#define WM8993_FLL_OSC_ENA_SHIFT 1 /* FLL_OSC_ENA */
+#define WM8993_FLL_OSC_ENA_WIDTH 1 /* FLL_OSC_ENA */
+#define WM8993_FLL_ENA 0x0001 /* FLL_ENA */
+#define WM8993_FLL_ENA_MASK 0x0001 /* FLL_ENA */
+#define WM8993_FLL_ENA_SHIFT 0 /* FLL_ENA */
+#define WM8993_FLL_ENA_WIDTH 1 /* FLL_ENA */
+
+/*
+ * R61 (0x3D) - FLL Control 2
+ */
+#define WM8993_FLL_OUTDIV_MASK 0x0700 /* FLL_OUTDIV - [10:8] */
+#define WM8993_FLL_OUTDIV_SHIFT 8 /* FLL_OUTDIV - [10:8] */
+#define WM8993_FLL_OUTDIV_WIDTH 3 /* FLL_OUTDIV - [10:8] */
+#define WM8993_FLL_CTRL_RATE_MASK 0x0070 /* FLL_CTRL_RATE - [6:4] */
+#define WM8993_FLL_CTRL_RATE_SHIFT 4 /* FLL_CTRL_RATE - [6:4] */
+#define WM8993_FLL_CTRL_RATE_WIDTH 3 /* FLL_CTRL_RATE - [6:4] */
+#define WM8993_FLL_FRATIO_MASK 0x0007 /* FLL_FRATIO - [2:0] */
+#define WM8993_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [2:0] */
+#define WM8993_FLL_FRATIO_WIDTH 3 /* FLL_FRATIO - [2:0] */
+
+/*
+ * R62 (0x3E) - FLL Control 3
+ */
+#define WM8993_FLL_K_MASK 0xFFFF /* FLL_K - [15:0] */
+#define WM8993_FLL_K_SHIFT 0 /* FLL_K - [15:0] */
+#define WM8993_FLL_K_WIDTH 16 /* FLL_K - [15:0] */
+
+/*
+ * R63 (0x3F) - FLL Control 4
+ */
+#define WM8993_FLL_N_MASK 0x7FE0 /* FLL_N - [14:5] */
+#define WM8993_FLL_N_SHIFT 5 /* FLL_N - [14:5] */
+#define WM8993_FLL_N_WIDTH 10 /* FLL_N - [14:5] */
+#define WM8993_FLL_GAIN_MASK 0x000F /* FLL_GAIN - [3:0] */
+#define WM8993_FLL_GAIN_SHIFT 0 /* FLL_GAIN - [3:0] */
+#define WM8993_FLL_GAIN_WIDTH 4 /* FLL_GAIN - [3:0] */
+
+/*
+ * R64 (0x40) - FLL Control 5
+ */
+#define WM8993_FLL_FRC_NCO_VAL_MASK 0x1F80 /* FLL_FRC_NCO_VAL - [12:7] */
+#define WM8993_FLL_FRC_NCO_VAL_SHIFT 7 /* FLL_FRC_NCO_VAL - [12:7] */
+#define WM8993_FLL_FRC_NCO_VAL_WIDTH 6 /* FLL_FRC_NCO_VAL - [12:7] */
+#define WM8993_FLL_FRC_NCO 0x0040 /* FLL_FRC_NCO */
+#define WM8993_FLL_FRC_NCO_MASK 0x0040 /* FLL_FRC_NCO */
+#define WM8993_FLL_FRC_NCO_SHIFT 6 /* FLL_FRC_NCO */
+#define WM8993_FLL_FRC_NCO_WIDTH 1 /* FLL_FRC_NCO */
+#define WM8993_FLL_CLK_REF_DIV_MASK 0x0018 /* FLL_CLK_REF_DIV - [4:3] */
+#define WM8993_FLL_CLK_REF_DIV_SHIFT 3 /* FLL_CLK_REF_DIV - [4:3] */
+#define WM8993_FLL_CLK_REF_DIV_WIDTH 2 /* FLL_CLK_REF_DIV - [4:3] */
+#define WM8993_FLL_CLK_SRC_MASK 0x0003 /* FLL_CLK_SRC - [1:0] */
+#define WM8993_FLL_CLK_SRC_SHIFT 0 /* FLL_CLK_SRC - [1:0] */
+#define WM8993_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [1:0] */
+
+/*
+ * R65 (0x41) - Clocking 3
+ */
+#define WM8993_CLK_DCS_DIV_MASK 0x3C00 /* CLK_DCS_DIV - [13:10] */
+#define WM8993_CLK_DCS_DIV_SHIFT 10 /* CLK_DCS_DIV - [13:10] */
+#define WM8993_CLK_DCS_DIV_WIDTH 4 /* CLK_DCS_DIV - [13:10] */
+#define WM8993_SAMPLE_RATE_MASK 0x0380 /* SAMPLE_RATE - [9:7] */
+#define WM8993_SAMPLE_RATE_SHIFT 7 /* SAMPLE_RATE - [9:7] */
+#define WM8993_SAMPLE_RATE_WIDTH 3 /* SAMPLE_RATE - [9:7] */
+#define WM8993_CLK_SYS_RATE_MASK 0x001E /* CLK_SYS_RATE - [4:1] */
+#define WM8993_CLK_SYS_RATE_SHIFT 1 /* CLK_SYS_RATE - [4:1] */
+#define WM8993_CLK_SYS_RATE_WIDTH 4 /* CLK_SYS_RATE - [4:1] */
+#define WM8993_CLK_DSP_ENA 0x0001 /* CLK_DSP_ENA */
+#define WM8993_CLK_DSP_ENA_MASK 0x0001 /* CLK_DSP_ENA */
+#define WM8993_CLK_DSP_ENA_SHIFT 0 /* CLK_DSP_ENA */
+#define WM8993_CLK_DSP_ENA_WIDTH 1 /* CLK_DSP_ENA */
+
+/*
+ * R66 (0x42) - Clocking 4
+ */
+#define WM8993_DAC_DIV4 0x0200 /* DAC_DIV4 */
+#define WM8993_DAC_DIV4_MASK 0x0200 /* DAC_DIV4 */
+#define WM8993_DAC_DIV4_SHIFT 9 /* DAC_DIV4 */
+#define WM8993_DAC_DIV4_WIDTH 1 /* DAC_DIV4 */
+#define WM8993_CLK_256K_DIV_MASK 0x007E /* CLK_256K_DIV - [6:1] */
+#define WM8993_CLK_256K_DIV_SHIFT 1 /* CLK_256K_DIV - [6:1] */
+#define WM8993_CLK_256K_DIV_WIDTH 6 /* CLK_256K_DIV - [6:1] */
+#define WM8993_SR_MODE 0x0001 /* SR_MODE */
+#define WM8993_SR_MODE_MASK 0x0001 /* SR_MODE */
+#define WM8993_SR_MODE_SHIFT 0 /* SR_MODE */
+#define WM8993_SR_MODE_WIDTH 1 /* SR_MODE */
+
+/*
+ * R67 (0x43) - MW Slave Control
+ */
+#define WM8993_MASK_WRITE_ENA 0x0001 /* MASK_WRITE_ENA */
+#define WM8993_MASK_WRITE_ENA_MASK 0x0001 /* MASK_WRITE_ENA */
+#define WM8993_MASK_WRITE_ENA_SHIFT 0 /* MASK_WRITE_ENA */
+#define WM8993_MASK_WRITE_ENA_WIDTH 1 /* MASK_WRITE_ENA */
+
+/*
+ * R69 (0x45) - Bus Control 1
+ */
+#define WM8993_CLK_SYS_ENA 0x0002 /* CLK_SYS_ENA */
+#define WM8993_CLK_SYS_ENA_MASK 0x0002 /* CLK_SYS_ENA */
+#define WM8993_CLK_SYS_ENA_SHIFT 1 /* CLK_SYS_ENA */
+#define WM8993_CLK_SYS_ENA_WIDTH 1 /* CLK_SYS_ENA */
+
+/*
+ * R70 (0x46) - Write Sequencer 0
+ */
+#define WM8993_WSEQ_ENA 0x0100 /* WSEQ_ENA */
+#define WM8993_WSEQ_ENA_MASK 0x0100 /* WSEQ_ENA */
+#define WM8993_WSEQ_ENA_SHIFT 8 /* WSEQ_ENA */
+#define WM8993_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */
+#define WM8993_WSEQ_WRITE_INDEX_MASK 0x001F /* WSEQ_WRITE_INDEX - [4:0] */
+#define WM8993_WSEQ_WRITE_INDEX_SHIFT 0 /* WSEQ_WRITE_INDEX - [4:0] */
+#define WM8993_WSEQ_WRITE_INDEX_WIDTH 5 /* WSEQ_WRITE_INDEX - [4:0] */
+
+/*
+ * R71 (0x47) - Write Sequencer 1
+ */
+#define WM8993_WSEQ_DATA_WIDTH_MASK 0x7000 /* WSEQ_DATA_WIDTH - [14:12] */
+#define WM8993_WSEQ_DATA_WIDTH_SHIFT 12 /* WSEQ_DATA_WIDTH - [14:12] */
+#define WM8993_WSEQ_DATA_WIDTH_WIDTH 3 /* WSEQ_DATA_WIDTH - [14:12] */
+#define WM8993_WSEQ_DATA_START_MASK 0x0F00 /* WSEQ_DATA_START - [11:8] */
+#define WM8993_WSEQ_DATA_START_SHIFT 8 /* WSEQ_DATA_START - [11:8] */
+#define WM8993_WSEQ_DATA_START_WIDTH 4 /* WSEQ_DATA_START - [11:8] */
+#define WM8993_WSEQ_ADDR_MASK 0x00FF /* WSEQ_ADDR - [7:0] */
+#define WM8993_WSEQ_ADDR_SHIFT 0 /* WSEQ_ADDR - [7:0] */
+#define WM8993_WSEQ_ADDR_WIDTH 8 /* WSEQ_ADDR - [7:0] */
+
+/*
+ * R72 (0x48) - Write Sequencer 2
+ */
+#define WM8993_WSEQ_EOS 0x4000 /* WSEQ_EOS */
+#define WM8993_WSEQ_EOS_MASK 0x4000 /* WSEQ_EOS */
+#define WM8993_WSEQ_EOS_SHIFT 14 /* WSEQ_EOS */
+#define WM8993_WSEQ_EOS_WIDTH 1 /* WSEQ_EOS */
+#define WM8993_WSEQ_DELAY_MASK 0x0F00 /* WSEQ_DELAY - [11:8] */
+#define WM8993_WSEQ_DELAY_SHIFT 8 /* WSEQ_DELAY - [11:8] */
+#define WM8993_WSEQ_DELAY_WIDTH 4 /* WSEQ_DELAY - [11:8] */
+#define WM8993_WSEQ_DATA_MASK 0x00FF /* WSEQ_DATA - [7:0] */
+#define WM8993_WSEQ_DATA_SHIFT 0 /* WSEQ_DATA - [7:0] */
+#define WM8993_WSEQ_DATA_WIDTH 8 /* WSEQ_DATA - [7:0] */
+
+/*
+ * R73 (0x49) - Write Sequencer 3
+ */
+#define WM8993_WSEQ_ABORT 0x0200 /* WSEQ_ABORT */
+#define WM8993_WSEQ_ABORT_MASK 0x0200 /* WSEQ_ABORT */
+#define WM8993_WSEQ_ABORT_SHIFT 9 /* WSEQ_ABORT */
+#define WM8993_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */
+#define WM8993_WSEQ_START 0x0100 /* WSEQ_START */
+#define WM8993_WSEQ_START_MASK 0x0100 /* WSEQ_START */
+#define WM8993_WSEQ_START_SHIFT 8 /* WSEQ_START */
+#define WM8993_WSEQ_START_WIDTH 1 /* WSEQ_START */
+#define WM8993_WSEQ_START_INDEX_MASK 0x003F /* WSEQ_START_INDEX - [5:0] */
+#define WM8993_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [5:0] */
+#define WM8993_WSEQ_START_INDEX_WIDTH 6 /* WSEQ_START_INDEX - [5:0] */
+
+/*
+ * R74 (0x4A) - Write Sequencer 4
+ */
+#define WM8993_WSEQ_BUSY 0x0001 /* WSEQ_BUSY */
+#define WM8993_WSEQ_BUSY_MASK 0x0001 /* WSEQ_BUSY */
+#define WM8993_WSEQ_BUSY_SHIFT 0 /* WSEQ_BUSY */
+#define WM8993_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */
+
+/*
+ * R75 (0x4B) - Write Sequencer 5
+ */
+#define WM8993_WSEQ_CURRENT_INDEX_MASK 0x003F /* WSEQ_CURRENT_INDEX - [5:0] */
+#define WM8993_WSEQ_CURRENT_INDEX_SHIFT 0 /* WSEQ_CURRENT_INDEX - [5:0] */
+#define WM8993_WSEQ_CURRENT_INDEX_WIDTH 6 /* WSEQ_CURRENT_INDEX - [5:0] */
+
+/*
+ * R76 (0x4C) - Charge Pump 1
+ */
+#define WM8993_CP_ENA 0x8000 /* CP_ENA */
+#define WM8993_CP_ENA_MASK 0x8000 /* CP_ENA */
+#define WM8993_CP_ENA_SHIFT 15 /* CP_ENA */
+#define WM8993_CP_ENA_WIDTH 1 /* CP_ENA */
+
+/*
+ * R81 (0x51) - Class W 0
+ */
+#define WM8993_CP_DYN_FREQ 0x0002 /* CP_DYN_FREQ */
+#define WM8993_CP_DYN_FREQ_MASK 0x0002 /* CP_DYN_FREQ */
+#define WM8993_CP_DYN_FREQ_SHIFT 1 /* CP_DYN_FREQ */
+#define WM8993_CP_DYN_FREQ_WIDTH 1 /* CP_DYN_FREQ */
+#define WM8993_CP_DYN_V 0x0001 /* CP_DYN_V */
+#define WM8993_CP_DYN_V_MASK 0x0001 /* CP_DYN_V */
+#define WM8993_CP_DYN_V_SHIFT 0 /* CP_DYN_V */
+#define WM8993_CP_DYN_V_WIDTH 1 /* CP_DYN_V */
+
+/*
+ * R84 (0x54) - DC Servo 0
+ */
+#define WM8993_DCS_TRIG_SINGLE_1 0x2000 /* DCS_TRIG_SINGLE_1 */
+#define WM8993_DCS_TRIG_SINGLE_1_MASK 0x2000 /* DCS_TRIG_SINGLE_1 */
+#define WM8993_DCS_TRIG_SINGLE_1_SHIFT 13 /* DCS_TRIG_SINGLE_1 */
+#define WM8993_DCS_TRIG_SINGLE_1_WIDTH 1 /* DCS_TRIG_SINGLE_1 */
+#define WM8993_DCS_TRIG_SINGLE_0 0x1000 /* DCS_TRIG_SINGLE_0 */
+#define WM8993_DCS_TRIG_SINGLE_0_MASK 0x1000 /* DCS_TRIG_SINGLE_0 */
+#define WM8993_DCS_TRIG_SINGLE_0_SHIFT 12 /* DCS_TRIG_SINGLE_0 */
+#define WM8993_DCS_TRIG_SINGLE_0_WIDTH 1 /* DCS_TRIG_SINGLE_0 */
+#define WM8993_DCS_TRIG_SERIES_1 0x0200 /* DCS_TRIG_SERIES_1 */
+#define WM8993_DCS_TRIG_SERIES_1_MASK 0x0200 /* DCS_TRIG_SERIES_1 */
+#define WM8993_DCS_TRIG_SERIES_1_SHIFT 9 /* DCS_TRIG_SERIES_1 */
+#define WM8993_DCS_TRIG_SERIES_1_WIDTH 1 /* DCS_TRIG_SERIES_1 */
+#define WM8993_DCS_TRIG_SERIES_0 0x0100 /* DCS_TRIG_SERIES_0 */
+#define WM8993_DCS_TRIG_SERIES_0_MASK 0x0100 /* DCS_TRIG_SERIES_0 */
+#define WM8993_DCS_TRIG_SERIES_0_SHIFT 8 /* DCS_TRIG_SERIES_0 */
+#define WM8993_DCS_TRIG_SERIES_0_WIDTH 1 /* DCS_TRIG_SERIES_0 */
+#define WM8993_DCS_TRIG_STARTUP_1 0x0020 /* DCS_TRIG_STARTUP_1 */
+#define WM8993_DCS_TRIG_STARTUP_1_MASK 0x0020 /* DCS_TRIG_STARTUP_1 */
+#define WM8993_DCS_TRIG_STARTUP_1_SHIFT 5 /* DCS_TRIG_STARTUP_1 */
+#define WM8993_DCS_TRIG_STARTUP_1_WIDTH 1 /* DCS_TRIG_STARTUP_1 */
+#define WM8993_DCS_TRIG_STARTUP_0 0x0010 /* DCS_TRIG_STARTUP_0 */
+#define WM8993_DCS_TRIG_STARTUP_0_MASK 0x0010 /* DCS_TRIG_STARTUP_0 */
+#define WM8993_DCS_TRIG_STARTUP_0_SHIFT 4 /* DCS_TRIG_STARTUP_0 */
+#define WM8993_DCS_TRIG_STARTUP_0_WIDTH 1 /* DCS_TRIG_STARTUP_0 */
+#define WM8993_DCS_TRIG_DAC_WR_1 0x0008 /* DCS_TRIG_DAC_WR_1 */
+#define WM8993_DCS_TRIG_DAC_WR_1_MASK 0x0008 /* DCS_TRIG_DAC_WR_1 */
+#define WM8993_DCS_TRIG_DAC_WR_1_SHIFT 3 /* DCS_TRIG_DAC_WR_1 */
+#define WM8993_DCS_TRIG_DAC_WR_1_WIDTH 1 /* DCS_TRIG_DAC_WR_1 */
+#define WM8993_DCS_TRIG_DAC_WR_0 0x0004 /* DCS_TRIG_DAC_WR_0 */
+#define WM8993_DCS_TRIG_DAC_WR_0_MASK 0x0004 /* DCS_TRIG_DAC_WR_0 */
+#define WM8993_DCS_TRIG_DAC_WR_0_SHIFT 2 /* DCS_TRIG_DAC_WR_0 */
+#define WM8993_DCS_TRIG_DAC_WR_0_WIDTH 1 /* DCS_TRIG_DAC_WR_0 */
+#define WM8993_DCS_ENA_CHAN_1 0x0002 /* DCS_ENA_CHAN_1 */
+#define WM8993_DCS_ENA_CHAN_1_MASK 0x0002 /* DCS_ENA_CHAN_1 */
+#define WM8993_DCS_ENA_CHAN_1_SHIFT 1 /* DCS_ENA_CHAN_1 */
+#define WM8993_DCS_ENA_CHAN_1_WIDTH 1 /* DCS_ENA_CHAN_1 */
+#define WM8993_DCS_ENA_CHAN_0 0x0001 /* DCS_ENA_CHAN_0 */
+#define WM8993_DCS_ENA_CHAN_0_MASK 0x0001 /* DCS_ENA_CHAN_0 */
+#define WM8993_DCS_ENA_CHAN_0_SHIFT 0 /* DCS_ENA_CHAN_0 */
+#define WM8993_DCS_ENA_CHAN_0_WIDTH 1 /* DCS_ENA_CHAN_0 */
+
+/*
+ * R85 (0x55) - DC Servo 1
+ */
+#define WM8993_DCS_SERIES_NO_01_MASK 0x0FE0 /* DCS_SERIES_NO_01 - [11:5] */
+#define WM8993_DCS_SERIES_NO_01_SHIFT 5 /* DCS_SERIES_NO_01 - [11:5] */
+#define WM8993_DCS_SERIES_NO_01_WIDTH 7 /* DCS_SERIES_NO_01 - [11:5] */
+#define WM8993_DCS_TIMER_PERIOD_01_MASK 0x000F /* DCS_TIMER_PERIOD_01 - [3:0] */
+#define WM8993_DCS_TIMER_PERIOD_01_SHIFT 0 /* DCS_TIMER_PERIOD_01 - [3:0] */
+#define WM8993_DCS_TIMER_PERIOD_01_WIDTH 4 /* DCS_TIMER_PERIOD_01 - [3:0] */
+
+/*
+ * R87 (0x57) - DC Servo 3
+ */
+#define WM8993_DCS_DAC_WR_VAL_1_MASK 0xFF00 /* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8993_DCS_DAC_WR_VAL_1_SHIFT 8 /* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8993_DCS_DAC_WR_VAL_1_WIDTH 8 /* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8993_DCS_DAC_WR_VAL_0_MASK 0x00FF /* DCS_DAC_WR_VAL_0 - [7:0] */
+#define WM8993_DCS_DAC_WR_VAL_0_SHIFT 0 /* DCS_DAC_WR_VAL_0 - [7:0] */
+#define WM8993_DCS_DAC_WR_VAL_0_WIDTH 8 /* DCS_DAC_WR_VAL_0 - [7:0] */
+
+/*
+ * R88 (0x58) - DC Servo Readback 0
+ */
+#define WM8993_DCS_DATAPATH_BUSY 0x4000 /* DCS_DATAPATH_BUSY */
+#define WM8993_DCS_DATAPATH_BUSY_MASK 0x4000 /* DCS_DATAPATH_BUSY */
+#define WM8993_DCS_DATAPATH_BUSY_SHIFT 14 /* DCS_DATAPATH_BUSY */
+#define WM8993_DCS_DATAPATH_BUSY_WIDTH 1 /* DCS_DATAPATH_BUSY */
+#define WM8993_DCS_CHANNEL_MASK 0x3000 /* DCS_CHANNEL - [13:12] */
+#define WM8993_DCS_CHANNEL_SHIFT 12 /* DCS_CHANNEL - [13:12] */
+#define WM8993_DCS_CHANNEL_WIDTH 2 /* DCS_CHANNEL - [13:12] */
+#define WM8993_DCS_CAL_COMPLETE_MASK 0x0300 /* DCS_CAL_COMPLETE - [9:8] */
+#define WM8993_DCS_CAL_COMPLETE_SHIFT 8 /* DCS_CAL_COMPLETE - [9:8] */
+#define WM8993_DCS_CAL_COMPLETE_WIDTH 2 /* DCS_CAL_COMPLETE - [9:8] */
+#define WM8993_DCS_DAC_WR_COMPLETE_MASK 0x0030 /* DCS_DAC_WR_COMPLETE - [5:4] */
+#define WM8993_DCS_DAC_WR_COMPLETE_SHIFT 4 /* DCS_DAC_WR_COMPLETE - [5:4] */
+#define WM8993_DCS_DAC_WR_COMPLETE_WIDTH 2 /* DCS_DAC_WR_COMPLETE - [5:4] */
+#define WM8993_DCS_STARTUP_COMPLETE_MASK 0x0003 /* DCS_STARTUP_COMPLETE - [1:0] */
+#define WM8993_DCS_STARTUP_COMPLETE_SHIFT 0 /* DCS_STARTUP_COMPLETE - [1:0] */
+#define WM8993_DCS_STARTUP_COMPLETE_WIDTH 2 /* DCS_STARTUP_COMPLETE - [1:0] */
+
+/*
+ * R89 (0x59) - DC Servo Readback 1
+ */
+#define WM8993_DCS_INTEG_CHAN_1_MASK 0x00FF /* DCS_INTEG_CHAN_1 - [7:0] */
+#define WM8993_DCS_INTEG_CHAN_1_SHIFT 0 /* DCS_INTEG_CHAN_1 - [7:0] */
+#define WM8993_DCS_INTEG_CHAN_1_WIDTH 8 /* DCS_INTEG_CHAN_1 - [7:0] */
+
+/*
+ * R90 (0x5A) - DC Servo Readback 2
+ */
+#define WM8993_DCS_INTEG_CHAN_0_MASK 0x00FF /* DCS_INTEG_CHAN_0 - [7:0] */
+#define WM8993_DCS_INTEG_CHAN_0_SHIFT 0 /* DCS_INTEG_CHAN_0 - [7:0] */
+#define WM8993_DCS_INTEG_CHAN_0_WIDTH 8 /* DCS_INTEG_CHAN_0 - [7:0] */
+
+/*
+ * R96 (0x60) - Analogue HP 0
+ */
+#define WM8993_HPOUT1_AUTO_PU 0x0100 /* HPOUT1_AUTO_PU */
+#define WM8993_HPOUT1_AUTO_PU_MASK 0x0100 /* HPOUT1_AUTO_PU */
+#define WM8993_HPOUT1_AUTO_PU_SHIFT 8 /* HPOUT1_AUTO_PU */
+#define WM8993_HPOUT1_AUTO_PU_WIDTH 1 /* HPOUT1_AUTO_PU */
+#define WM8993_HPOUT1L_RMV_SHORT 0x0080 /* HPOUT1L_RMV_SHORT */
+#define WM8993_HPOUT1L_RMV_SHORT_MASK 0x0080 /* HPOUT1L_RMV_SHORT */
+#define WM8993_HPOUT1L_RMV_SHORT_SHIFT 7 /* HPOUT1L_RMV_SHORT */
+#define WM8993_HPOUT1L_RMV_SHORT_WIDTH 1 /* HPOUT1L_RMV_SHORT */
+#define WM8993_HPOUT1L_OUTP 0x0040 /* HPOUT1L_OUTP */
+#define WM8993_HPOUT1L_OUTP_MASK 0x0040 /* HPOUT1L_OUTP */
+#define WM8993_HPOUT1L_OUTP_SHIFT 6 /* HPOUT1L_OUTP */
+#define WM8993_HPOUT1L_OUTP_WIDTH 1 /* HPOUT1L_OUTP */
+#define WM8993_HPOUT1L_DLY 0x0020 /* HPOUT1L_DLY */
+#define WM8993_HPOUT1L_DLY_MASK 0x0020 /* HPOUT1L_DLY */
+#define WM8993_HPOUT1L_DLY_SHIFT 5 /* HPOUT1L_DLY */
+#define WM8993_HPOUT1L_DLY_WIDTH 1 /* HPOUT1L_DLY */
+#define WM8993_HPOUT1R_RMV_SHORT 0x0008 /* HPOUT1R_RMV_SHORT */
+#define WM8993_HPOUT1R_RMV_SHORT_MASK 0x0008 /* HPOUT1R_RMV_SHORT */
+#define WM8993_HPOUT1R_RMV_SHORT_SHIFT 3 /* HPOUT1R_RMV_SHORT */
+#define WM8993_HPOUT1R_RMV_SHORT_WIDTH 1 /* HPOUT1R_RMV_SHORT */
+#define WM8993_HPOUT1R_OUTP 0x0004 /* HPOUT1R_OUTP */
+#define WM8993_HPOUT1R_OUTP_MASK 0x0004 /* HPOUT1R_OUTP */
+#define WM8993_HPOUT1R_OUTP_SHIFT 2 /* HPOUT1R_OUTP */
+#define WM8993_HPOUT1R_OUTP_WIDTH 1 /* HPOUT1R_OUTP */
+#define WM8993_HPOUT1R_DLY 0x0002 /* HPOUT1R_DLY */
+#define WM8993_HPOUT1R_DLY_MASK 0x0002 /* HPOUT1R_DLY */
+#define WM8993_HPOUT1R_DLY_SHIFT 1 /* HPOUT1R_DLY */
+#define WM8993_HPOUT1R_DLY_WIDTH 1 /* HPOUT1R_DLY */
+
+/*
+ * R98 (0x62) - EQ1
+ */
+#define WM8993_EQ_ENA 0x0001 /* EQ_ENA */
+#define WM8993_EQ_ENA_MASK 0x0001 /* EQ_ENA */
+#define WM8993_EQ_ENA_SHIFT 0 /* EQ_ENA */
+#define WM8993_EQ_ENA_WIDTH 1 /* EQ_ENA */
+
+/*
+ * R99 (0x63) - EQ2
+ */
+#define WM8993_EQ_B1_GAIN_MASK 0x001F /* EQ_B1_GAIN - [4:0] */
+#define WM8993_EQ_B1_GAIN_SHIFT 0 /* EQ_B1_GAIN - [4:0] */
+#define WM8993_EQ_B1_GAIN_WIDTH 5 /* EQ_B1_GAIN - [4:0] */
+
+/*
+ * R100 (0x64) - EQ3
+ */
+#define WM8993_EQ_B2_GAIN_MASK 0x001F /* EQ_B2_GAIN - [4:0] */
+#define WM8993_EQ_B2_GAIN_SHIFT 0 /* EQ_B2_GAIN - [4:0] */
+#define WM8993_EQ_B2_GAIN_WIDTH 5 /* EQ_B2_GAIN - [4:0] */
+
+/*
+ * R101 (0x65) - EQ4
+ */
+#define WM8993_EQ_B3_GAIN_MASK 0x001F /* EQ_B3_GAIN - [4:0] */
+#define WM8993_EQ_B3_GAIN_SHIFT 0 /* EQ_B3_GAIN - [4:0] */
+#define WM8993_EQ_B3_GAIN_WIDTH 5 /* EQ_B3_GAIN - [4:0] */
+
+/*
+ * R102 (0x66) - EQ5
+ */
+#define WM8993_EQ_B4_GAIN_MASK 0x001F /* EQ_B4_GAIN - [4:0] */
+#define WM8993_EQ_B4_GAIN_SHIFT 0 /* EQ_B4_GAIN - [4:0] */
+#define WM8993_EQ_B4_GAIN_WIDTH 5 /* EQ_B4_GAIN - [4:0] */
+
+/*
+ * R103 (0x67) - EQ6
+ */
+#define WM8993_EQ_B5_GAIN_MASK 0x001F /* EQ_B5_GAIN - [4:0] */
+#define WM8993_EQ_B5_GAIN_SHIFT 0 /* EQ_B5_GAIN - [4:0] */
+#define WM8993_EQ_B5_GAIN_WIDTH 5 /* EQ_B5_GAIN - [4:0] */
+
+/*
+ * R104 (0x68) - EQ7
+ */
+#define WM8993_EQ_B1_A_MASK 0xFFFF /* EQ_B1_A - [15:0] */
+#define WM8993_EQ_B1_A_SHIFT 0 /* EQ_B1_A - [15:0] */
+#define WM8993_EQ_B1_A_WIDTH 16 /* EQ_B1_A - [15:0] */
+
+/*
+ * R105 (0x69) - EQ8
+ */
+#define WM8993_EQ_B1_B_MASK 0xFFFF /* EQ_B1_B - [15:0] */
+#define WM8993_EQ_B1_B_SHIFT 0 /* EQ_B1_B - [15:0] */
+#define WM8993_EQ_B1_B_WIDTH 16 /* EQ_B1_B - [15:0] */
+
+/*
+ * R106 (0x6A) - EQ9
+ */
+#define WM8993_EQ_B1_PG_MASK 0xFFFF /* EQ_B1_PG - [15:0] */
+#define WM8993_EQ_B1_PG_SHIFT 0 /* EQ_B1_PG - [15:0] */
+#define WM8993_EQ_B1_PG_WIDTH 16 /* EQ_B1_PG - [15:0] */
+
+/*
+ * R107 (0x6B) - EQ10
+ */
+#define WM8993_EQ_B2_A_MASK 0xFFFF /* EQ_B2_A - [15:0] */
+#define WM8993_EQ_B2_A_SHIFT 0 /* EQ_B2_A - [15:0] */
+#define WM8993_EQ_B2_A_WIDTH 16 /* EQ_B2_A - [15:0] */
+
+/*
+ * R108 (0x6C) - EQ11
+ */
+#define WM8993_EQ_B2_B_MASK 0xFFFF /* EQ_B2_B - [15:0] */
+#define WM8993_EQ_B2_B_SHIFT 0 /* EQ_B2_B - [15:0] */
+#define WM8993_EQ_B2_B_WIDTH 16 /* EQ_B2_B - [15:0] */
+
+/*
+ * R109 (0x6D) - EQ12
+ */
+#define WM8993_EQ_B2_C_MASK 0xFFFF /* EQ_B2_C - [15:0] */
+#define WM8993_EQ_B2_C_SHIFT 0 /* EQ_B2_C - [15:0] */
+#define WM8993_EQ_B2_C_WIDTH 16 /* EQ_B2_C - [15:0] */
+
+/*
+ * R110 (0x6E) - EQ13
+ */
+#define WM8993_EQ_B2_PG_MASK 0xFFFF /* EQ_B2_PG - [15:0] */
+#define WM8993_EQ_B2_PG_SHIFT 0 /* EQ_B2_PG - [15:0] */
+#define WM8993_EQ_B2_PG_WIDTH 16 /* EQ_B2_PG - [15:0] */
+
+/*
+ * R111 (0x6F) - EQ14
+ */
+#define WM8993_EQ_B3_A_MASK 0xFFFF /* EQ_B3_A - [15:0] */
+#define WM8993_EQ_B3_A_SHIFT 0 /* EQ_B3_A - [15:0] */
+#define WM8993_EQ_B3_A_WIDTH 16 /* EQ_B3_A - [15:0] */
+
+/*
+ * R112 (0x70) - EQ15
+ */
+#define WM8993_EQ_B3_B_MASK 0xFFFF /* EQ_B3_B - [15:0] */
+#define WM8993_EQ_B3_B_SHIFT 0 /* EQ_B3_B - [15:0] */
+#define WM8993_EQ_B3_B_WIDTH 16 /* EQ_B3_B - [15:0] */
+
+/*
+ * R113 (0x71) - EQ16
+ */
+#define WM8993_EQ_B3_C_MASK 0xFFFF /* EQ_B3_C - [15:0] */
+#define WM8993_EQ_B3_C_SHIFT 0 /* EQ_B3_C - [15:0] */
+#define WM8993_EQ_B3_C_WIDTH 16 /* EQ_B3_C - [15:0] */
+
+/*
+ * R114 (0x72) - EQ17
+ */
+#define WM8993_EQ_B3_PG_MASK 0xFFFF /* EQ_B3_PG - [15:0] */
+#define WM8993_EQ_B3_PG_SHIFT 0 /* EQ_B3_PG - [15:0] */
+#define WM8993_EQ_B3_PG_WIDTH 16 /* EQ_B3_PG - [15:0] */
+
+/*
+ * R115 (0x73) - EQ18
+ */
+#define WM8993_EQ_B4_A_MASK 0xFFFF /* EQ_B4_A - [15:0] */
+#define WM8993_EQ_B4_A_SHIFT 0 /* EQ_B4_A - [15:0] */
+#define WM8993_EQ_B4_A_WIDTH 16 /* EQ_B4_A - [15:0] */
+
+/*
+ * R116 (0x74) - EQ19
+ */
+#define WM8993_EQ_B4_B_MASK 0xFFFF /* EQ_B4_B - [15:0] */
+#define WM8993_EQ_B4_B_SHIFT 0 /* EQ_B4_B - [15:0] */
+#define WM8993_EQ_B4_B_WIDTH 16 /* EQ_B4_B - [15:0] */
+
+/*
+ * R117 (0x75) - EQ20
+ */
+#define WM8993_EQ_B4_C_MASK 0xFFFF /* EQ_B4_C - [15:0] */
+#define WM8993_EQ_B4_C_SHIFT 0 /* EQ_B4_C - [15:0] */
+#define WM8993_EQ_B4_C_WIDTH 16 /* EQ_B4_C - [15:0] */
+
+/*
+ * R118 (0x76) - EQ21
+ */
+#define WM8993_EQ_B4_PG_MASK 0xFFFF /* EQ_B4_PG - [15:0] */
+#define WM8993_EQ_B4_PG_SHIFT 0 /* EQ_B4_PG - [15:0] */
+#define WM8993_EQ_B4_PG_WIDTH 16 /* EQ_B4_PG - [15:0] */
+
+/*
+ * R119 (0x77) - EQ22
+ */
+#define WM8993_EQ_B5_A_MASK 0xFFFF /* EQ_B5_A - [15:0] */
+#define WM8993_EQ_B5_A_SHIFT 0 /* EQ_B5_A - [15:0] */
+#define WM8993_EQ_B5_A_WIDTH 16 /* EQ_B5_A - [15:0] */
+
+/*
+ * R120 (0x78) - EQ23
+ */
+#define WM8993_EQ_B5_B_MASK 0xFFFF /* EQ_B5_B - [15:0] */
+#define WM8993_EQ_B5_B_SHIFT 0 /* EQ_B5_B - [15:0] */
+#define WM8993_EQ_B5_B_WIDTH 16 /* EQ_B5_B - [15:0] */
+
+/*
+ * R121 (0x79) - EQ24
+ */
+#define WM8993_EQ_B5_PG_MASK 0xFFFF /* EQ_B5_PG - [15:0] */
+#define WM8993_EQ_B5_PG_SHIFT 0 /* EQ_B5_PG - [15:0] */
+#define WM8993_EQ_B5_PG_WIDTH 16 /* EQ_B5_PG - [15:0] */
+
+/*
+ * R122 (0x7A) - Digital Pulls
+ */
+#define WM8993_MCLK_PU 0x0080 /* MCLK_PU */
+#define WM8993_MCLK_PU_MASK 0x0080 /* MCLK_PU */
+#define WM8993_MCLK_PU_SHIFT 7 /* MCLK_PU */
+#define WM8993_MCLK_PU_WIDTH 1 /* MCLK_PU */
+#define WM8993_MCLK_PD 0x0040 /* MCLK_PD */
+#define WM8993_MCLK_PD_MASK 0x0040 /* MCLK_PD */
+#define WM8993_MCLK_PD_SHIFT 6 /* MCLK_PD */
+#define WM8993_MCLK_PD_WIDTH 1 /* MCLK_PD */
+#define WM8993_DACDAT_PU 0x0020 /* DACDAT_PU */
+#define WM8993_DACDAT_PU_MASK 0x0020 /* DACDAT_PU */
+#define WM8993_DACDAT_PU_SHIFT 5 /* DACDAT_PU */
+#define WM8993_DACDAT_PU_WIDTH 1 /* DACDAT_PU */
+#define WM8993_DACDAT_PD 0x0010 /* DACDAT_PD */
+#define WM8993_DACDAT_PD_MASK 0x0010 /* DACDAT_PD */
+#define WM8993_DACDAT_PD_SHIFT 4 /* DACDAT_PD */
+#define WM8993_DACDAT_PD_WIDTH 1 /* DACDAT_PD */
+#define WM8993_LRCLK_PU 0x0008 /* LRCLK_PU */
+#define WM8993_LRCLK_PU_MASK 0x0008 /* LRCLK_PU */
+#define WM8993_LRCLK_PU_SHIFT 3 /* LRCLK_PU */
+#define WM8993_LRCLK_PU_WIDTH 1 /* LRCLK_PU */
+#define WM8993_LRCLK_PD 0x0004 /* LRCLK_PD */
+#define WM8993_LRCLK_PD_MASK 0x0004 /* LRCLK_PD */
+#define WM8993_LRCLK_PD_SHIFT 2 /* LRCLK_PD */
+#define WM8993_LRCLK_PD_WIDTH 1 /* LRCLK_PD */
+#define WM8993_BCLK_PU 0x0002 /* BCLK_PU */
+#define WM8993_BCLK_PU_MASK 0x0002 /* BCLK_PU */
+#define WM8993_BCLK_PU_SHIFT 1 /* BCLK_PU */
+#define WM8993_BCLK_PU_WIDTH 1 /* BCLK_PU */
+#define WM8993_BCLK_PD 0x0001 /* BCLK_PD */
+#define WM8993_BCLK_PD_MASK 0x0001 /* BCLK_PD */
+#define WM8993_BCLK_PD_SHIFT 0 /* BCLK_PD */
+#define WM8993_BCLK_PD_WIDTH 1 /* BCLK_PD */
+
+/*
+ * R123 (0x7B) - DRC Control 1
+ */
+#define WM8993_DRC_ENA 0x8000 /* DRC_ENA */
+#define WM8993_DRC_ENA_MASK 0x8000 /* DRC_ENA */
+#define WM8993_DRC_ENA_SHIFT 15 /* DRC_ENA */
+#define WM8993_DRC_ENA_WIDTH 1 /* DRC_ENA */
+#define WM8993_DRC_DAC_PATH 0x4000 /* DRC_DAC_PATH */
+#define WM8993_DRC_DAC_PATH_MASK 0x4000 /* DRC_DAC_PATH */
+#define WM8993_DRC_DAC_PATH_SHIFT 14 /* DRC_DAC_PATH */
+#define WM8993_DRC_DAC_PATH_WIDTH 1 /* DRC_DAC_PATH */
+#define WM8993_DRC_SMOOTH_ENA 0x0800 /* DRC_SMOOTH_ENA */
+#define WM8993_DRC_SMOOTH_ENA_MASK 0x0800 /* DRC_SMOOTH_ENA */
+#define WM8993_DRC_SMOOTH_ENA_SHIFT 11 /* DRC_SMOOTH_ENA */
+#define WM8993_DRC_SMOOTH_ENA_WIDTH 1 /* DRC_SMOOTH_ENA */
+#define WM8993_DRC_QR_ENA 0x0400 /* DRC_QR_ENA */
+#define WM8993_DRC_QR_ENA_MASK 0x0400 /* DRC_QR_ENA */
+#define WM8993_DRC_QR_ENA_SHIFT 10 /* DRC_QR_ENA */
+#define WM8993_DRC_QR_ENA_WIDTH 1 /* DRC_QR_ENA */
+#define WM8993_DRC_ANTICLIP_ENA 0x0200 /* DRC_ANTICLIP_ENA */
+#define WM8993_DRC_ANTICLIP_ENA_MASK 0x0200 /* DRC_ANTICLIP_ENA */
+#define WM8993_DRC_ANTICLIP_ENA_SHIFT 9 /* DRC_ANTICLIP_ENA */
+#define WM8993_DRC_ANTICLIP_ENA_WIDTH 1 /* DRC_ANTICLIP_ENA */
+#define WM8993_DRC_HYST_ENA 0x0100 /* DRC_HYST_ENA */
+#define WM8993_DRC_HYST_ENA_MASK 0x0100 /* DRC_HYST_ENA */
+#define WM8993_DRC_HYST_ENA_SHIFT 8 /* DRC_HYST_ENA */
+#define WM8993_DRC_HYST_ENA_WIDTH 1 /* DRC_HYST_ENA */
+#define WM8993_DRC_THRESH_HYST_MASK 0x0030 /* DRC_THRESH_HYST - [5:4] */
+#define WM8993_DRC_THRESH_HYST_SHIFT 4 /* DRC_THRESH_HYST - [5:4] */
+#define WM8993_DRC_THRESH_HYST_WIDTH 2 /* DRC_THRESH_HYST - [5:4] */
+#define WM8993_DRC_MINGAIN_MASK 0x000C /* DRC_MINGAIN - [3:2] */
+#define WM8993_DRC_MINGAIN_SHIFT 2 /* DRC_MINGAIN - [3:2] */
+#define WM8993_DRC_MINGAIN_WIDTH 2 /* DRC_MINGAIN - [3:2] */
+#define WM8993_DRC_MAXGAIN_MASK 0x0003 /* DRC_MAXGAIN - [1:0] */
+#define WM8993_DRC_MAXGAIN_SHIFT 0 /* DRC_MAXGAIN - [1:0] */
+#define WM8993_DRC_MAXGAIN_WIDTH 2 /* DRC_MAXGAIN - [1:0] */
+
+/*
+ * R124 (0x7C) - DRC Control 2
+ */
+#define WM8993_DRC_ATTACK_RATE_MASK 0xF000 /* DRC_ATTACK_RATE - [15:12] */
+#define WM8993_DRC_ATTACK_RATE_SHIFT 12 /* DRC_ATTACK_RATE - [15:12] */
+#define WM8993_DRC_ATTACK_RATE_WIDTH 4 /* DRC_ATTACK_RATE - [15:12] */
+#define WM8993_DRC_DECAY_RATE_MASK 0x0F00 /* DRC_DECAY_RATE - [11:8] */
+#define WM8993_DRC_DECAY_RATE_SHIFT 8 /* DRC_DECAY_RATE - [11:8] */
+#define WM8993_DRC_DECAY_RATE_WIDTH 4 /* DRC_DECAY_RATE - [11:8] */
+#define WM8993_DRC_THRESH_COMP_MASK 0x00FC /* DRC_THRESH_COMP - [7:2] */
+#define WM8993_DRC_THRESH_COMP_SHIFT 2 /* DRC_THRESH_COMP - [7:2] */
+#define WM8993_DRC_THRESH_COMP_WIDTH 6 /* DRC_THRESH_COMP - [7:2] */
+
+/*
+ * R125 (0x7D) - DRC Control 3
+ */
+#define WM8993_DRC_AMP_COMP_MASK 0xF800 /* DRC_AMP_COMP - [15:11] */
+#define WM8993_DRC_AMP_COMP_SHIFT 11 /* DRC_AMP_COMP - [15:11] */
+#define WM8993_DRC_AMP_COMP_WIDTH 5 /* DRC_AMP_COMP - [15:11] */
+#define WM8993_DRC_R0_SLOPE_COMP_MASK 0x0700 /* DRC_R0_SLOPE_COMP - [10:8] */
+#define WM8993_DRC_R0_SLOPE_COMP_SHIFT 8 /* DRC_R0_SLOPE_COMP - [10:8] */
+#define WM8993_DRC_R0_SLOPE_COMP_WIDTH 3 /* DRC_R0_SLOPE_COMP - [10:8] */
+#define WM8993_DRC_FF_DELAY 0x0080 /* DRC_FF_DELAY */
+#define WM8993_DRC_FF_DELAY_MASK 0x0080 /* DRC_FF_DELAY */
+#define WM8993_DRC_FF_DELAY_SHIFT 7 /* DRC_FF_DELAY */
+#define WM8993_DRC_FF_DELAY_WIDTH 1 /* DRC_FF_DELAY */
+#define WM8993_DRC_THRESH_QR_MASK 0x000C /* DRC_THRESH_QR - [3:2] */
+#define WM8993_DRC_THRESH_QR_SHIFT 2 /* DRC_THRESH_QR - [3:2] */
+#define WM8993_DRC_THRESH_QR_WIDTH 2 /* DRC_THRESH_QR - [3:2] */
+#define WM8993_DRC_RATE_QR_MASK 0x0003 /* DRC_RATE_QR - [1:0] */
+#define WM8993_DRC_RATE_QR_SHIFT 0 /* DRC_RATE_QR - [1:0] */
+#define WM8993_DRC_RATE_QR_WIDTH 2 /* DRC_RATE_QR - [1:0] */
+
+/*
+ * R126 (0x7E) - DRC Control 4
+ */
+#define WM8993_DRC_R1_SLOPE_COMP_MASK 0xE000 /* DRC_R1_SLOPE_COMP - [15:13] */
+#define WM8993_DRC_R1_SLOPE_COMP_SHIFT 13 /* DRC_R1_SLOPE_COMP - [15:13] */
+#define WM8993_DRC_R1_SLOPE_COMP_WIDTH 3 /* DRC_R1_SLOPE_COMP - [15:13] */
+#define WM8993_DRC_STARTUP_GAIN_MASK 0x1F00 /* DRC_STARTUP_GAIN - [12:8] */
+#define WM8993_DRC_STARTUP_GAIN_SHIFT 8 /* DRC_STARTUP_GAIN - [12:8] */
+#define WM8993_DRC_STARTUP_GAIN_WIDTH 5 /* DRC_STARTUP_GAIN - [12:8] */
+
+#endif
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 86fc57e25f97..c64e55aa63b6 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -165,87 +165,23 @@ struct wm9081_priv {
int master;
int fll_fref;
int fll_fout;
+ int tdm_width;
struct wm9081_retune_mobile_config *retune;
};
-static int wm9081_reg_is_volatile(int reg)
+static int wm9081_volatile_register(unsigned int reg)
{
switch (reg) {
+ case WM9081_SOFTWARE_RESET:
+ return 1;
default:
return 0;
}
}
-static unsigned int wm9081_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- u16 *cache = codec->reg_cache;
- BUG_ON(reg > WM9081_MAX_REGISTER);
- return cache[reg];
-}
-
-static unsigned int wm9081_read_hw(struct snd_soc_codec *codec, u8 reg)
-{
- struct i2c_msg xfer[2];
- u16 data;
- int ret;
- struct i2c_client *client = codec->control_data;
-
- BUG_ON(reg > WM9081_MAX_REGISTER);
-
- /* Write register */
- xfer[0].addr = client->addr;
- xfer[0].flags = 0;
- xfer[0].len = 1;
- xfer[0].buf = &reg;
-
- /* Read data */
- xfer[1].addr = client->addr;
- xfer[1].flags = I2C_M_RD;
- xfer[1].len = 2;
- xfer[1].buf = (u8 *)&data;
-
- ret = i2c_transfer(client->adapter, xfer, 2);
- if (ret != 2) {
- dev_err(&client->dev, "i2c_transfer() returned %d\n", ret);
- return 0;
- }
-
- return (data >> 8) | ((data & 0xff) << 8);
-}
-
-static unsigned int wm9081_read(struct snd_soc_codec *codec, unsigned int reg)
-{
- if (wm9081_reg_is_volatile(reg))
- return wm9081_read_hw(codec, reg);
- else
- return wm9081_read_reg_cache(codec, reg);
-}
-
-static int wm9081_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- u16 *cache = codec->reg_cache;
- u8 data[3];
-
- BUG_ON(reg > WM9081_MAX_REGISTER);
-
- if (!wm9081_reg_is_volatile(reg))
- cache[reg] = value;
-
- data[0] = reg;
- data[1] = value >> 8;
- data[2] = value & 0x00ff;
-
- if (codec->hw_write(codec->control_data, data, 3) == 3)
- return 0;
- else
- return -EIO;
-}
-
static int wm9081_reset(struct snd_soc_codec *codec)
{
- return wm9081_write(codec, WM9081_SOFTWARE_RESET, 0);
+ return snd_soc_write(codec, WM9081_SOFTWARE_RESET, 0);
}
static const DECLARE_TLV_DB_SCALE(drc_in_tlv, -4500, 75, 0);
@@ -356,7 +292,7 @@ static int speaker_mode_get(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int reg;
- reg = wm9081_read(codec, WM9081_ANALOGUE_SPEAKER_2);
+ reg = snd_soc_read(codec, WM9081_ANALOGUE_SPEAKER_2);
if (reg & WM9081_SPK_MODE)
ucontrol->value.integer.value[0] = 1;
else
@@ -375,8 +311,8 @@ static int speaker_mode_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- unsigned int reg_pwr = wm9081_read(codec, WM9081_POWER_MANAGEMENT);
- unsigned int reg2 = wm9081_read(codec, WM9081_ANALOGUE_SPEAKER_2);
+ unsigned int reg_pwr = snd_soc_read(codec, WM9081_POWER_MANAGEMENT);
+ unsigned int reg2 = snd_soc_read(codec, WM9081_ANALOGUE_SPEAKER_2);
/* Are we changing anything? */
if (ucontrol->value.integer.value[0] ==
@@ -397,7 +333,7 @@ static int speaker_mode_put(struct snd_kcontrol *kcontrol,
reg2 &= ~WM9081_SPK_MODE;
}
- wm9081_write(codec, WM9081_ANALOGUE_SPEAKER_2, reg2);
+ snd_soc_write(codec, WM9081_ANALOGUE_SPEAKER_2, reg2);
return 0;
}
@@ -456,7 +392,7 @@ static int speaker_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
- unsigned int reg = wm9081_read(codec, WM9081_POWER_MANAGEMENT);
+ unsigned int reg = snd_soc_read(codec, WM9081_POWER_MANAGEMENT);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -468,7 +404,7 @@ static int speaker_event(struct snd_soc_dapm_widget *w,
break;
}
- wm9081_write(codec, WM9081_POWER_MANAGEMENT, reg);
+ snd_soc_write(codec, WM9081_POWER_MANAGEMENT, reg);
return 0;
}
@@ -607,7 +543,7 @@ static int wm9081_set_fll(struct snd_soc_codec *codec, int fll_id,
if (ret != 0)
return ret;
- reg5 = wm9081_read(codec, WM9081_FLL_CONTROL_5);
+ reg5 = snd_soc_read(codec, WM9081_FLL_CONTROL_5);
reg5 &= ~WM9081_FLL_CLK_SRC_MASK;
switch (fll_id) {
@@ -621,44 +557,44 @@ static int wm9081_set_fll(struct snd_soc_codec *codec, int fll_id,
}
/* Disable CLK_SYS while we reconfigure */
- clk_sys_reg = wm9081_read(codec, WM9081_CLOCK_CONTROL_3);
+ clk_sys_reg = snd_soc_read(codec, WM9081_CLOCK_CONTROL_3);
if (clk_sys_reg & WM9081_CLK_SYS_ENA)
- wm9081_write(codec, WM9081_CLOCK_CONTROL_3,
+ snd_soc_write(codec, WM9081_CLOCK_CONTROL_3,
clk_sys_reg & ~WM9081_CLK_SYS_ENA);
/* Any FLL configuration change requires that the FLL be
* disabled first. */
- reg1 = wm9081_read(codec, WM9081_FLL_CONTROL_1);
+ reg1 = snd_soc_read(codec, WM9081_FLL_CONTROL_1);
reg1 &= ~WM9081_FLL_ENA;
- wm9081_write(codec, WM9081_FLL_CONTROL_1, reg1);
+ snd_soc_write(codec, WM9081_FLL_CONTROL_1, reg1);
/* Apply the configuration */
if (fll_div.k)
reg1 |= WM9081_FLL_FRAC_MASK;
else
reg1 &= ~WM9081_FLL_FRAC_MASK;
- wm9081_write(codec, WM9081_FLL_CONTROL_1, reg1);
+ snd_soc_write(codec, WM9081_FLL_CONTROL_1, reg1);
- wm9081_write(codec, WM9081_FLL_CONTROL_2,
+ snd_soc_write(codec, WM9081_FLL_CONTROL_2,
(fll_div.fll_outdiv << WM9081_FLL_OUTDIV_SHIFT) |
(fll_div.fll_fratio << WM9081_FLL_FRATIO_SHIFT));
- wm9081_write(codec, WM9081_FLL_CONTROL_3, fll_div.k);
+ snd_soc_write(codec, WM9081_FLL_CONTROL_3, fll_div.k);
- reg4 = wm9081_read(codec, WM9081_FLL_CONTROL_4);
+ reg4 = snd_soc_read(codec, WM9081_FLL_CONTROL_4);
reg4 &= ~WM9081_FLL_N_MASK;
reg4 |= fll_div.n << WM9081_FLL_N_SHIFT;
- wm9081_write(codec, WM9081_FLL_CONTROL_4, reg4);
+ snd_soc_write(codec, WM9081_FLL_CONTROL_4, reg4);
reg5 &= ~WM9081_FLL_CLK_REF_DIV_MASK;
reg5 |= fll_div.fll_clk_ref_div << WM9081_FLL_CLK_REF_DIV_SHIFT;
- wm9081_write(codec, WM9081_FLL_CONTROL_5, reg5);
+ snd_soc_write(codec, WM9081_FLL_CONTROL_5, reg5);
/* Enable the FLL */
- wm9081_write(codec, WM9081_FLL_CONTROL_1, reg1 | WM9081_FLL_ENA);
+ snd_soc_write(codec, WM9081_FLL_CONTROL_1, reg1 | WM9081_FLL_ENA);
/* Then bring CLK_SYS up again if it was disabled */
if (clk_sys_reg & WM9081_CLK_SYS_ENA)
- wm9081_write(codec, WM9081_CLOCK_CONTROL_3, clk_sys_reg);
+ snd_soc_write(codec, WM9081_CLOCK_CONTROL_3, clk_sys_reg);
dev_dbg(codec->dev, "FLL enabled at %dHz->%dHz\n", Fref, Fout);
@@ -707,6 +643,10 @@ static int configure_clock(struct snd_soc_codec *codec)
target > 3000000)
break;
}
+
+ if (i == ARRAY_SIZE(clk_sys_rates))
+ return -EINVAL;
+
} else if (wm9081->fs) {
for (i = 0; i < ARRAY_SIZE(clk_sys_rates); i++) {
new_sysclk = clk_sys_rates[i].ratio
@@ -714,6 +654,10 @@ static int configure_clock(struct snd_soc_codec *codec)
if (new_sysclk > 3000000)
break;
}
+
+ if (i == ARRAY_SIZE(clk_sys_rates))
+ return -EINVAL;
+
} else {
new_sysclk = 12288000;
}
@@ -734,19 +678,19 @@ static int configure_clock(struct snd_soc_codec *codec)
return -EINVAL;
}
- reg = wm9081_read(codec, WM9081_CLOCK_CONTROL_1);
+ reg = snd_soc_read(codec, WM9081_CLOCK_CONTROL_1);
if (mclkdiv)
reg |= WM9081_MCLKDIV2;
else
reg &= ~WM9081_MCLKDIV2;
- wm9081_write(codec, WM9081_CLOCK_CONTROL_1, reg);
+ snd_soc_write(codec, WM9081_CLOCK_CONTROL_1, reg);
- reg = wm9081_read(codec, WM9081_CLOCK_CONTROL_3);
+ reg = snd_soc_read(codec, WM9081_CLOCK_CONTROL_3);
if (fll)
reg |= WM9081_CLK_SRC_SEL;
else
reg &= ~WM9081_CLK_SRC_SEL;
- wm9081_write(codec, WM9081_CLOCK_CONTROL_3, reg);
+ snd_soc_write(codec, WM9081_CLOCK_CONTROL_3, reg);
dev_dbg(codec->dev, "CLK_SYS is %dHz\n", wm9081->sysclk_rate);
@@ -846,76 +790,76 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_PREPARE:
/* VMID=2*40k */
- reg = wm9081_read(codec, WM9081_VMID_CONTROL);
+ reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
reg &= ~WM9081_VMID_SEL_MASK;
reg |= 0x2;
- wm9081_write(codec, WM9081_VMID_CONTROL, reg);
+ snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
/* Normal bias current */
- reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1);
+ reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
reg &= ~WM9081_STBY_BIAS_ENA;
- wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg);
+ snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
break;
case SND_SOC_BIAS_STANDBY:
/* Initial cold start */
if (codec->bias_level == SND_SOC_BIAS_OFF) {
/* Disable LINEOUT discharge */
- reg = wm9081_read(codec, WM9081_ANTI_POP_CONTROL);
+ reg = snd_soc_read(codec, WM9081_ANTI_POP_CONTROL);
reg &= ~WM9081_LINEOUT_DISCH;
- wm9081_write(codec, WM9081_ANTI_POP_CONTROL, reg);
+ snd_soc_write(codec, WM9081_ANTI_POP_CONTROL, reg);
/* Select startup bias source */
- reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1);
+ reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
reg |= WM9081_BIAS_SRC | WM9081_BIAS_ENA;
- wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg);
+ snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
/* VMID 2*4k; Soft VMID ramp enable */
- reg = wm9081_read(codec, WM9081_VMID_CONTROL);
+ reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
reg |= WM9081_VMID_RAMP | 0x6;
- wm9081_write(codec, WM9081_VMID_CONTROL, reg);
+ snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
mdelay(100);
/* Normal bias enable & soft start off */
reg |= WM9081_BIAS_ENA;
reg &= ~WM9081_VMID_RAMP;
- wm9081_write(codec, WM9081_VMID_CONTROL, reg);
+ snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
/* Standard bias source */
- reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1);
+ reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
reg &= ~WM9081_BIAS_SRC;
- wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg);
+ snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
}
/* VMID 2*240k */
- reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1);
+ reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
reg &= ~WM9081_VMID_SEL_MASK;
reg |= 0x40;
- wm9081_write(codec, WM9081_VMID_CONTROL, reg);
+ snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
/* Standby bias current on */
- reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1);
+ reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
reg |= WM9081_STBY_BIAS_ENA;
- wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg);
+ snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
break;
case SND_SOC_BIAS_OFF:
/* Startup bias source */
- reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1);
+ reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
reg |= WM9081_BIAS_SRC;
- wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg);
+ snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
/* Disable VMID and biases with soft ramping */
- reg = wm9081_read(codec, WM9081_VMID_CONTROL);
+ reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
reg &= ~(WM9081_VMID_SEL_MASK | WM9081_BIAS_ENA);
reg |= WM9081_VMID_RAMP;
- wm9081_write(codec, WM9081_VMID_CONTROL, reg);
+ snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
/* Actively discharge LINEOUT */
- reg = wm9081_read(codec, WM9081_ANTI_POP_CONTROL);
+ reg = snd_soc_read(codec, WM9081_ANTI_POP_CONTROL);
reg |= WM9081_LINEOUT_DISCH;
- wm9081_write(codec, WM9081_ANTI_POP_CONTROL, reg);
+ snd_soc_write(codec, WM9081_ANTI_POP_CONTROL, reg);
break;
}
@@ -929,7 +873,7 @@ static int wm9081_set_dai_fmt(struct snd_soc_dai *dai,
{
struct snd_soc_codec *codec = dai->codec;
struct wm9081_priv *wm9081 = codec->private_data;
- unsigned int aif2 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_2);
+ unsigned int aif2 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_2);
aif2 &= ~(WM9081_AIF_BCLK_INV | WM9081_AIF_LRCLK_INV |
WM9081_BCLK_DIR | WM9081_LRCLK_DIR | WM9081_AIF_FMT_MASK);
@@ -1010,7 +954,7 @@ static int wm9081_set_dai_fmt(struct snd_soc_dai *dai,
return -EINVAL;
}
- wm9081_write(codec, WM9081_AUDIO_INTERFACE_2, aif2);
+ snd_soc_write(codec, WM9081_AUDIO_INTERFACE_2, aif2);
return 0;
}
@@ -1024,47 +968,51 @@ static int wm9081_hw_params(struct snd_pcm_substream *substream,
int ret, i, best, best_val, cur_val;
unsigned int clk_ctrl2, aif1, aif2, aif3, aif4;
- clk_ctrl2 = wm9081_read(codec, WM9081_CLOCK_CONTROL_2);
+ clk_ctrl2 = snd_soc_read(codec, WM9081_CLOCK_CONTROL_2);
clk_ctrl2 &= ~(WM9081_CLK_SYS_RATE_MASK | WM9081_SAMPLE_RATE_MASK);
- aif1 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_1);
+ aif1 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_1);
- aif2 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_2);
+ aif2 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_2);
aif2 &= ~WM9081_AIF_WL_MASK;
- aif3 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_3);
+ aif3 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_3);
aif3 &= ~WM9081_BCLK_DIV_MASK;
- aif4 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_4);
+ aif4 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_4);
aif4 &= ~WM9081_LRCLK_RATE_MASK;
- /* What BCLK do we need? */
wm9081->fs = params_rate(params);
- wm9081->bclk = 2 * wm9081->fs;
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- wm9081->bclk *= 16;
- break;
- case SNDRV_PCM_FORMAT_S20_3LE:
- wm9081->bclk *= 20;
- aif2 |= 0x4;
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- wm9081->bclk *= 24;
- aif2 |= 0x8;
- break;
- case SNDRV_PCM_FORMAT_S32_LE:
- wm9081->bclk *= 32;
- aif2 |= 0xc;
- break;
- default:
- return -EINVAL;
- }
- if (aif1 & WM9081_AIFDAC_TDM_MODE_MASK) {
+ if (wm9081->tdm_width) {
+ /* If TDM is set up then that fixes our BCLK. */
int slots = ((aif1 & WM9081_AIFDAC_TDM_MODE_MASK) >>
WM9081_AIFDAC_TDM_MODE_SHIFT) + 1;
- wm9081->bclk *= slots;
+
+ wm9081->bclk = wm9081->fs * wm9081->tdm_width * slots;
+ } else {
+ /* Otherwise work out a BCLK from the sample size */
+ wm9081->bclk = 2 * wm9081->fs;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ wm9081->bclk *= 16;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ wm9081->bclk *= 20;
+ aif2 |= 0x4;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ wm9081->bclk *= 24;
+ aif2 |= 0x8;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ wm9081->bclk *= 32;
+ aif2 |= 0xc;
+ break;
+ default:
+ return -EINVAL;
+ }
}
dev_dbg(codec->dev, "Target BCLK is %dHz\n", wm9081->bclk);
@@ -1149,22 +1097,22 @@ static int wm9081_hw_params(struct snd_pcm_substream *substream,
s->name, s->rate);
/* If the EQ is enabled then disable it while we write out */
- eq1 = wm9081_read(codec, WM9081_EQ_1) & WM9081_EQ_ENA;
+ eq1 = snd_soc_read(codec, WM9081_EQ_1) & WM9081_EQ_ENA;
if (eq1 & WM9081_EQ_ENA)
- wm9081_write(codec, WM9081_EQ_1, 0);
+ snd_soc_write(codec, WM9081_EQ_1, 0);
/* Write out the other values */
for (i = 1; i < ARRAY_SIZE(s->config); i++)
- wm9081_write(codec, WM9081_EQ_1 + i, s->config[i]);
+ snd_soc_write(codec, WM9081_EQ_1 + i, s->config[i]);
eq1 |= (s->config[0] & ~WM9081_EQ_ENA);
- wm9081_write(codec, WM9081_EQ_1, eq1);
+ snd_soc_write(codec, WM9081_EQ_1, eq1);
}
- wm9081_write(codec, WM9081_CLOCK_CONTROL_2, clk_ctrl2);
- wm9081_write(codec, WM9081_AUDIO_INTERFACE_2, aif2);
- wm9081_write(codec, WM9081_AUDIO_INTERFACE_3, aif3);
- wm9081_write(codec, WM9081_AUDIO_INTERFACE_4, aif4);
+ snd_soc_write(codec, WM9081_CLOCK_CONTROL_2, clk_ctrl2);
+ snd_soc_write(codec, WM9081_AUDIO_INTERFACE_2, aif2);
+ snd_soc_write(codec, WM9081_AUDIO_INTERFACE_3, aif3);
+ snd_soc_write(codec, WM9081_AUDIO_INTERFACE_4, aif4);
return 0;
}
@@ -1174,14 +1122,14 @@ static int wm9081_digital_mute(struct snd_soc_dai *codec_dai, int mute)
struct snd_soc_codec *codec = codec_dai->codec;
unsigned int reg;
- reg = wm9081_read(codec, WM9081_DAC_DIGITAL_2);
+ reg = snd_soc_read(codec, WM9081_DAC_DIGITAL_2);
if (mute)
reg |= WM9081_DAC_MUTE;
else
reg &= ~WM9081_DAC_MUTE;
- wm9081_write(codec, WM9081_DAC_DIGITAL_2, reg);
+ snd_soc_write(codec, WM9081_DAC_DIGITAL_2, reg);
return 0;
}
@@ -1207,19 +1155,25 @@ static int wm9081_set_sysclk(struct snd_soc_dai *codec_dai,
}
static int wm9081_set_tdm_slot(struct snd_soc_dai *dai,
- unsigned int mask, int slots)
+ unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
{
struct snd_soc_codec *codec = dai->codec;
- unsigned int aif1 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_1);
+ struct wm9081_priv *wm9081 = codec->private_data;
+ unsigned int aif1 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_1);
aif1 &= ~(WM9081_AIFDAC_TDM_SLOT_MASK | WM9081_AIFDAC_TDM_MODE_MASK);
- if (slots < 1 || slots > 4)
+ if (slots < 0 || slots > 4)
return -EINVAL;
+ wm9081->tdm_width = slot_width;
+
+ if (slots == 0)
+ slots = 1;
+
aif1 |= (slots - 1) << WM9081_AIFDAC_TDM_MODE_SHIFT;
- switch (mask) {
+ switch (rx_mask) {
case 1:
break;
case 2:
@@ -1235,7 +1189,7 @@ static int wm9081_set_tdm_slot(struct snd_soc_dai *dai,
return -EINVAL;
}
- wm9081_write(codec, WM9081_AUDIO_INTERFACE_1, aif1);
+ snd_soc_write(codec, WM9081_AUDIO_INTERFACE_1, aif1);
return 0;
}
@@ -1357,7 +1311,7 @@ static int wm9081_resume(struct platform_device *pdev)
if (i == WM9081_SOFTWARE_RESET)
continue;
- wm9081_write(codec, i, reg_cache[i]);
+ snd_soc_write(codec, i, reg_cache[i]);
}
wm9081_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@@ -1377,7 +1331,8 @@ struct snd_soc_codec_device soc_codec_dev_wm9081 = {
};
EXPORT_SYMBOL_GPL(soc_codec_dev_wm9081);
-static int wm9081_register(struct wm9081_priv *wm9081)
+static int wm9081_register(struct wm9081_priv *wm9081,
+ enum snd_soc_control_type control)
{
struct snd_soc_codec *codec = &wm9081->codec;
int ret;
@@ -1396,19 +1351,24 @@ static int wm9081_register(struct wm9081_priv *wm9081)
codec->private_data = wm9081;
codec->name = "WM9081";
codec->owner = THIS_MODULE;
- codec->read = wm9081_read;
- codec->write = wm9081_write;
codec->dai = &wm9081_dai;
codec->num_dai = 1;
codec->reg_cache_size = ARRAY_SIZE(wm9081->reg_cache);
codec->reg_cache = &wm9081->reg_cache;
codec->bias_level = SND_SOC_BIAS_OFF;
codec->set_bias_level = wm9081_set_bias_level;
+ codec->volatile_register = wm9081_volatile_register;
memcpy(codec->reg_cache, wm9081_reg_defaults,
sizeof(wm9081_reg_defaults));
- reg = wm9081_read_hw(codec, WM9081_SOFTWARE_RESET);
+ ret = snd_soc_codec_set_cache_io(codec, 8, 16, control);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ return ret;
+ }
+
+ reg = snd_soc_read(codec, WM9081_SOFTWARE_RESET);
if (reg != 0x9081) {
dev_err(codec->dev, "Device is not a WM9081: ID=0x%x\n", reg);
ret = -EINVAL;
@@ -1424,10 +1384,10 @@ static int wm9081_register(struct wm9081_priv *wm9081)
wm9081_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Enable zero cross by default */
- reg = wm9081_read(codec, WM9081_ANALOGUE_LINEOUT);
- wm9081_write(codec, WM9081_ANALOGUE_LINEOUT, reg | WM9081_LINEOUTZC);
- reg = wm9081_read(codec, WM9081_ANALOGUE_SPEAKER_PGA);
- wm9081_write(codec, WM9081_ANALOGUE_SPEAKER_PGA,
+ reg = snd_soc_read(codec, WM9081_ANALOGUE_LINEOUT);
+ snd_soc_write(codec, WM9081_ANALOGUE_LINEOUT, reg | WM9081_LINEOUTZC);
+ reg = snd_soc_read(codec, WM9081_ANALOGUE_SPEAKER_PGA);
+ snd_soc_write(codec, WM9081_ANALOGUE_SPEAKER_PGA,
reg | WM9081_SPKPGAZC);
wm9081_dai.dev = codec->dev;
@@ -1482,7 +1442,7 @@ static __devinit int wm9081_i2c_probe(struct i2c_client *i2c,
codec->dev = &i2c->dev;
- return wm9081_register(wm9081);
+ return wm9081_register(wm9081, SND_SOC_I2C);
}
static __devexit int wm9081_i2c_remove(struct i2c_client *client)
@@ -1492,6 +1452,21 @@ static __devexit int wm9081_i2c_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int wm9081_i2c_suspend(struct i2c_client *client, pm_message_t msg)
+{
+ return snd_soc_suspend_device(&client->dev);
+}
+
+static int wm9081_i2c_resume(struct i2c_client *client)
+{
+ return snd_soc_resume_device(&client->dev);
+}
+#else
+#define wm9081_i2c_suspend NULL
+#define wm9081_i2c_resume NULL
+#endif
+
static const struct i2c_device_id wm9081_i2c_id[] = {
{ "wm9081", 0 },
{ }
@@ -1505,6 +1480,8 @@ static struct i2c_driver wm9081_i2c_driver = {
},
.probe = wm9081_i2c_probe,
.remove = __devexit_p(wm9081_i2c_remove),
+ .suspend = wm9081_i2c_suspend,
+ .resume = wm9081_i2c_resume,
.id_table = wm9081_i2c_id,
};
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index fa88b463e71f..e7d2840d9e59 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -406,7 +406,7 @@ static int wm9705_soc_probe(struct platform_device *pdev)
ret = snd_soc_init_card(socdev);
if (ret < 0) {
printk(KERN_ERR "wm9705: failed to register card\n");
- goto pcm_err;
+ goto reset_err;
}
return 0;
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
new file mode 100644
index 000000000000..e542027eea89
--- /dev/null
+++ b/sound/soc/codecs/wm_hubs.c
@@ -0,0 +1,743 @@
+/*
+ * wm_hubs.c -- WM8993/4 common code
+ *
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8993.h"
+#include "wm_hubs.h"
+
+const DECLARE_TLV_DB_SCALE(wm_hubs_spkmix_tlv, -300, 300, 0);
+EXPORT_SYMBOL_GPL(wm_hubs_spkmix_tlv);
+
+static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1650, 150, 0);
+static const DECLARE_TLV_DB_SCALE(inmix_sw_tlv, 0, 3000, 0);
+static const DECLARE_TLV_DB_SCALE(inmix_tlv, -1500, 300, 1);
+static const DECLARE_TLV_DB_SCALE(earpiece_tlv, -600, 600, 0);
+static const DECLARE_TLV_DB_SCALE(outmix_tlv, -2100, 300, 0);
+static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1);
+static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0);
+static const unsigned int spkboost_tlv[] = {
+ TLV_DB_RANGE_HEAD(7),
+ 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
+ 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
+};
+static const DECLARE_TLV_DB_SCALE(line_tlv, -600, 600, 0);
+
+static const char *speaker_ref_text[] = {
+ "SPKVDD/2",
+ "VMID",
+};
+
+static const struct soc_enum speaker_ref =
+ SOC_ENUM_SINGLE(WM8993_SPEAKER_MIXER, 8, 2, speaker_ref_text);
+
+static const char *speaker_mode_text[] = {
+ "Class D",
+ "Class AB",
+};
+
+static const struct soc_enum speaker_mode =
+ SOC_ENUM_SINGLE(WM8993_SPKMIXR_ATTENUATION, 8, 2, speaker_mode_text);
+
+static void wait_for_dc_servo(struct snd_soc_codec *codec)
+{
+ unsigned int reg;
+ int count = 0;
+
+ dev_dbg(codec->dev, "Waiting for DC servo...\n");
+ do {
+ count++;
+ msleep(1);
+ reg = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_0);
+ dev_dbg(codec->dev, "DC servo status: %x\n", reg);
+ } while ((reg & WM8993_DCS_CAL_COMPLETE_MASK)
+ != WM8993_DCS_CAL_COMPLETE_MASK && count < 1000);
+
+ if ((reg & WM8993_DCS_CAL_COMPLETE_MASK)
+ != WM8993_DCS_CAL_COMPLETE_MASK)
+ dev_err(codec->dev, "Timed out waiting for DC Servo\n");
+}
+
+/*
+ * Update the DC servo calibration on gain changes
+ */
+static int wm8993_put_dc_servo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ int ret;
+
+ ret = snd_soc_put_volsw_2r(kcontrol, ucontrol);
+
+ /* Only need to do this if the outputs are active */
+ if (snd_soc_read(codec, WM8993_POWER_MANAGEMENT_1)
+ & (WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA))
+ snd_soc_update_bits(codec,
+ WM8993_DC_SERVO_0,
+ WM8993_DCS_TRIG_SINGLE_0 |
+ WM8993_DCS_TRIG_SINGLE_1,
+ WM8993_DCS_TRIG_SINGLE_0 |
+ WM8993_DCS_TRIG_SINGLE_1);
+
+ return ret;
+}
+
+static const struct snd_kcontrol_new analogue_snd_controls[] = {
+SOC_SINGLE_TLV("IN1L Volume", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 0, 31, 0,
+ inpga_tlv),
+SOC_SINGLE("IN1L Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 1),
+SOC_SINGLE("IN1L ZC Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 0),
+
+SOC_SINGLE_TLV("IN1R Volume", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 0, 31, 0,
+ inpga_tlv),
+SOC_SINGLE("IN1R Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 1),
+SOC_SINGLE("IN1R ZC Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 0),
+
+
+SOC_SINGLE_TLV("IN2L Volume", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 0, 31, 0,
+ inpga_tlv),
+SOC_SINGLE("IN2L Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 1),
+SOC_SINGLE("IN2L ZC Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 0),
+
+SOC_SINGLE_TLV("IN2R Volume", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 0, 31, 0,
+ inpga_tlv),
+SOC_SINGLE("IN2R Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 1),
+SOC_SINGLE("IN2R ZC Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 0),
+
+SOC_SINGLE_TLV("MIXINL IN2L Volume", WM8993_INPUT_MIXER3, 7, 1, 0,
+ inmix_sw_tlv),
+SOC_SINGLE_TLV("MIXINL IN1L Volume", WM8993_INPUT_MIXER3, 4, 1, 0,
+ inmix_sw_tlv),
+SOC_SINGLE_TLV("MIXINL Output Record Volume", WM8993_INPUT_MIXER3, 0, 7, 0,
+ inmix_tlv),
+SOC_SINGLE_TLV("MIXINL IN1LP Volume", WM8993_INPUT_MIXER5, 6, 7, 0, inmix_tlv),
+SOC_SINGLE_TLV("MIXINL Direct Voice Volume", WM8993_INPUT_MIXER5, 0, 6, 0,
+ inmix_tlv),
+
+SOC_SINGLE_TLV("MIXINR IN2R Volume", WM8993_INPUT_MIXER4, 7, 1, 0,
+ inmix_sw_tlv),
+SOC_SINGLE_TLV("MIXINR IN1R Volume", WM8993_INPUT_MIXER4, 4, 1, 0,
+ inmix_sw_tlv),
+SOC_SINGLE_TLV("MIXINR Output Record Volume", WM8993_INPUT_MIXER4, 0, 7, 0,
+ inmix_tlv),
+SOC_SINGLE_TLV("MIXINR IN1RP Volume", WM8993_INPUT_MIXER6, 6, 7, 0, inmix_tlv),
+SOC_SINGLE_TLV("MIXINR Direct Voice Volume", WM8993_INPUT_MIXER6, 0, 6, 0,
+ inmix_tlv),
+
+SOC_SINGLE_TLV("Left Output Mixer IN2RN Volume", WM8993_OUTPUT_MIXER5, 6, 7, 1,
+ outmix_tlv),
+SOC_SINGLE_TLV("Left Output Mixer IN2LN Volume", WM8993_OUTPUT_MIXER3, 6, 7, 1,
+ outmix_tlv),
+SOC_SINGLE_TLV("Left Output Mixer IN2LP Volume", WM8993_OUTPUT_MIXER3, 9, 7, 1,
+ outmix_tlv),
+SOC_SINGLE_TLV("Left Output Mixer IN1L Volume", WM8993_OUTPUT_MIXER3, 0, 7, 1,
+ outmix_tlv),
+SOC_SINGLE_TLV("Left Output Mixer IN1R Volume", WM8993_OUTPUT_MIXER3, 3, 7, 1,
+ outmix_tlv),
+SOC_SINGLE_TLV("Left Output Mixer Right Input Volume",
+ WM8993_OUTPUT_MIXER5, 3, 7, 1, outmix_tlv),
+SOC_SINGLE_TLV("Left Output Mixer Left Input Volume",
+ WM8993_OUTPUT_MIXER5, 0, 7, 1, outmix_tlv),
+SOC_SINGLE_TLV("Left Output Mixer DAC Volume", WM8993_OUTPUT_MIXER5, 9, 7, 1,
+ outmix_tlv),
+
+SOC_SINGLE_TLV("Right Output Mixer IN2LN Volume",
+ WM8993_OUTPUT_MIXER6, 6, 7, 1, outmix_tlv),
+SOC_SINGLE_TLV("Right Output Mixer IN2RN Volume",
+ WM8993_OUTPUT_MIXER4, 6, 7, 1, outmix_tlv),
+SOC_SINGLE_TLV("Right Output Mixer IN1L Volume",
+ WM8993_OUTPUT_MIXER4, 3, 7, 1, outmix_tlv),
+SOC_SINGLE_TLV("Right Output Mixer IN1R Volume",
+ WM8993_OUTPUT_MIXER4, 0, 7, 1, outmix_tlv),
+SOC_SINGLE_TLV("Right Output Mixer IN2RP Volume",
+ WM8993_OUTPUT_MIXER4, 9, 7, 1, outmix_tlv),
+SOC_SINGLE_TLV("Right Output Mixer Left Input Volume",
+ WM8993_OUTPUT_MIXER6, 3, 7, 1, outmix_tlv),
+SOC_SINGLE_TLV("Right Output Mixer Right Input Volume",
+ WM8993_OUTPUT_MIXER6, 6, 7, 1, outmix_tlv),
+SOC_SINGLE_TLV("Right Output Mixer DAC Volume",
+ WM8993_OUTPUT_MIXER6, 9, 7, 1, outmix_tlv),
+
+SOC_DOUBLE_R_TLV("Output Volume", WM8993_LEFT_OPGA_VOLUME,
+ WM8993_RIGHT_OPGA_VOLUME, 0, 63, 0, outpga_tlv),
+SOC_DOUBLE_R("Output Switch", WM8993_LEFT_OPGA_VOLUME,
+ WM8993_RIGHT_OPGA_VOLUME, 6, 1, 0),
+SOC_DOUBLE_R("Output ZC Switch", WM8993_LEFT_OPGA_VOLUME,
+ WM8993_RIGHT_OPGA_VOLUME, 7, 1, 0),
+
+SOC_SINGLE("Earpiece Switch", WM8993_HPOUT2_VOLUME, 5, 1, 1),
+SOC_SINGLE_TLV("Earpiece Volume", WM8993_HPOUT2_VOLUME, 4, 1, 1, earpiece_tlv),
+
+SOC_SINGLE_TLV("SPKL Input Volume", WM8993_SPKMIXL_ATTENUATION,
+ 5, 1, 1, wm_hubs_spkmix_tlv),
+SOC_SINGLE_TLV("SPKL IN1LP Volume", WM8993_SPKMIXL_ATTENUATION,
+ 4, 1, 1, wm_hubs_spkmix_tlv),
+SOC_SINGLE_TLV("SPKL Output Volume", WM8993_SPKMIXL_ATTENUATION,
+ 3, 1, 1, wm_hubs_spkmix_tlv),
+
+SOC_SINGLE_TLV("SPKR Input Volume", WM8993_SPKMIXR_ATTENUATION,
+ 5, 1, 1, wm_hubs_spkmix_tlv),
+SOC_SINGLE_TLV("SPKR IN1RP Volume", WM8993_SPKMIXR_ATTENUATION,
+ 4, 1, 1, wm_hubs_spkmix_tlv),
+SOC_SINGLE_TLV("SPKR Output Volume", WM8993_SPKMIXR_ATTENUATION,
+ 3, 1, 1, wm_hubs_spkmix_tlv),
+
+SOC_DOUBLE_R_TLV("Speaker Mixer Volume",
+ WM8993_SPKMIXL_ATTENUATION, WM8993_SPKMIXR_ATTENUATION,
+ 0, 3, 1, spkmixout_tlv),
+SOC_DOUBLE_R_TLV("Speaker Volume",
+ WM8993_SPEAKER_VOLUME_LEFT, WM8993_SPEAKER_VOLUME_RIGHT,
+ 0, 63, 0, outpga_tlv),
+SOC_DOUBLE_R("Speaker Switch",
+ WM8993_SPEAKER_VOLUME_LEFT, WM8993_SPEAKER_VOLUME_RIGHT,
+ 6, 1, 0),
+SOC_DOUBLE_R("Speaker ZC Switch",
+ WM8993_SPEAKER_VOLUME_LEFT, WM8993_SPEAKER_VOLUME_RIGHT,
+ 7, 1, 0),
+SOC_DOUBLE_TLV("Speaker Boost Volume", WM8993_SPKOUT_BOOST, 0, 3, 7, 0,
+ spkboost_tlv),
+SOC_ENUM("Speaker Reference", speaker_ref),
+SOC_ENUM("Speaker Mode", speaker_mode),
+
+{
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Headphone Volume",
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+ SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .tlv.p = outpga_tlv,
+ .info = snd_soc_info_volsw_2r,
+ .get = snd_soc_get_volsw_2r, .put = wm8993_put_dc_servo,
+ .private_value = (unsigned long)&(struct soc_mixer_control) {
+ .reg = WM8993_LEFT_OUTPUT_VOLUME,
+ .rreg = WM8993_RIGHT_OUTPUT_VOLUME,
+ .shift = 0, .max = 63
+ },
+},
+SOC_DOUBLE_R("Headphone Switch", WM8993_LEFT_OUTPUT_VOLUME,
+ WM8993_RIGHT_OUTPUT_VOLUME, 6, 1, 0),
+SOC_DOUBLE_R("Headphone ZC Switch", WM8993_LEFT_OUTPUT_VOLUME,
+ WM8993_RIGHT_OUTPUT_VOLUME, 7, 1, 0),
+
+SOC_SINGLE("LINEOUT1N Switch", WM8993_LINE_OUTPUTS_VOLUME, 6, 1, 1),
+SOC_SINGLE("LINEOUT1P Switch", WM8993_LINE_OUTPUTS_VOLUME, 5, 1, 1),
+SOC_SINGLE_TLV("LINEOUT1 Volume", WM8993_LINE_OUTPUTS_VOLUME, 4, 1, 1,
+ line_tlv),
+
+SOC_SINGLE("LINEOUT2N Switch", WM8993_LINE_OUTPUTS_VOLUME, 2, 1, 1),
+SOC_SINGLE("LINEOUT2P Switch", WM8993_LINE_OUTPUTS_VOLUME, 1, 1, 1),
+SOC_SINGLE_TLV("LINEOUT2 Volume", WM8993_LINE_OUTPUTS_VOLUME, 0, 1, 1,
+ line_tlv),
+};
+
+static int hp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ unsigned int reg = snd_soc_read(codec, WM8993_ANALOGUE_HP_0);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
+ WM8993_CP_ENA, WM8993_CP_ENA);
+
+ msleep(5);
+
+ snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
+ WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA,
+ WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA);
+
+ reg |= WM8993_HPOUT1L_DLY | WM8993_HPOUT1R_DLY;
+ snd_soc_write(codec, WM8993_ANALOGUE_HP_0, reg);
+
+ /* Start the DC servo */
+ snd_soc_update_bits(codec, WM8993_DC_SERVO_0,
+ 0xFFFF,
+ WM8993_DCS_ENA_CHAN_0 |
+ WM8993_DCS_ENA_CHAN_1 |
+ WM8993_DCS_TRIG_STARTUP_1 |
+ WM8993_DCS_TRIG_STARTUP_0);
+ wait_for_dc_servo(codec);
+
+ reg |= WM8993_HPOUT1R_OUTP | WM8993_HPOUT1R_RMV_SHORT |
+ WM8993_HPOUT1L_OUTP | WM8993_HPOUT1L_RMV_SHORT;
+ snd_soc_write(codec, WM8993_ANALOGUE_HP_0, reg);
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ reg &= ~(WM8993_HPOUT1L_RMV_SHORT |
+ WM8993_HPOUT1L_DLY |
+ WM8993_HPOUT1L_OUTP |
+ WM8993_HPOUT1R_RMV_SHORT |
+ WM8993_HPOUT1R_DLY |
+ WM8993_HPOUT1R_OUTP);
+
+ snd_soc_update_bits(codec, WM8993_DC_SERVO_0,
+ 0xffff, 0);
+
+ snd_soc_write(codec, WM8993_ANALOGUE_HP_0, reg);
+ snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
+ WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA,
+ 0);
+
+ snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
+ WM8993_CP_ENA, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static int earpiece_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *control, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ u16 reg = snd_soc_read(codec, WM8993_ANTIPOP1) & ~WM8993_HPOUT2_IN_ENA;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ reg |= WM8993_HPOUT2_IN_ENA;
+ snd_soc_write(codec, WM8993_ANTIPOP1, reg);
+ udelay(50);
+ break;
+
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_write(codec, WM8993_ANTIPOP1, reg);
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new in1l_pga[] = {
+SOC_DAPM_SINGLE("IN1LP Switch", WM8993_INPUT_MIXER2, 5, 1, 0),
+SOC_DAPM_SINGLE("IN1LN Switch", WM8993_INPUT_MIXER2, 4, 1, 0),
+};
+
+static const struct snd_kcontrol_new in1r_pga[] = {
+SOC_DAPM_SINGLE("IN1RP Switch", WM8993_INPUT_MIXER2, 1, 1, 0),
+SOC_DAPM_SINGLE("IN1RN Switch", WM8993_INPUT_MIXER2, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new in2l_pga[] = {
+SOC_DAPM_SINGLE("IN2LP Switch", WM8993_INPUT_MIXER2, 7, 1, 0),
+SOC_DAPM_SINGLE("IN2LN Switch", WM8993_INPUT_MIXER2, 6, 1, 0),
+};
+
+static const struct snd_kcontrol_new in2r_pga[] = {
+SOC_DAPM_SINGLE("IN2RP Switch", WM8993_INPUT_MIXER2, 3, 1, 0),
+SOC_DAPM_SINGLE("IN2RN Switch", WM8993_INPUT_MIXER2, 2, 1, 0),
+};
+
+static const struct snd_kcontrol_new mixinl[] = {
+SOC_DAPM_SINGLE("IN2L Switch", WM8993_INPUT_MIXER3, 8, 1, 0),
+SOC_DAPM_SINGLE("IN1L Switch", WM8993_INPUT_MIXER3, 5, 1, 0),
+};
+
+static const struct snd_kcontrol_new mixinr[] = {
+SOC_DAPM_SINGLE("IN2R Switch", WM8993_INPUT_MIXER4, 8, 1, 0),
+SOC_DAPM_SINGLE("IN1R Switch", WM8993_INPUT_MIXER4, 5, 1, 0),
+};
+
+static const struct snd_kcontrol_new left_output_mixer[] = {
+SOC_DAPM_SINGLE("Right Input Switch", WM8993_OUTPUT_MIXER1, 7, 1, 0),
+SOC_DAPM_SINGLE("Left Input Switch", WM8993_OUTPUT_MIXER1, 6, 1, 0),
+SOC_DAPM_SINGLE("IN2RN Switch", WM8993_OUTPUT_MIXER1, 5, 1, 0),
+SOC_DAPM_SINGLE("IN2LN Switch", WM8993_OUTPUT_MIXER1, 4, 1, 0),
+SOC_DAPM_SINGLE("IN2LP Switch", WM8993_OUTPUT_MIXER1, 1, 1, 0),
+SOC_DAPM_SINGLE("IN1R Switch", WM8993_OUTPUT_MIXER1, 3, 1, 0),
+SOC_DAPM_SINGLE("IN1L Switch", WM8993_OUTPUT_MIXER1, 2, 1, 0),
+SOC_DAPM_SINGLE("DAC Switch", WM8993_OUTPUT_MIXER1, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new right_output_mixer[] = {
+SOC_DAPM_SINGLE("Left Input Switch", WM8993_OUTPUT_MIXER2, 7, 1, 0),
+SOC_DAPM_SINGLE("Right Input Switch", WM8993_OUTPUT_MIXER2, 6, 1, 0),
+SOC_DAPM_SINGLE("IN2LN Switch", WM8993_OUTPUT_MIXER2, 5, 1, 0),
+SOC_DAPM_SINGLE("IN2RN Switch", WM8993_OUTPUT_MIXER2, 4, 1, 0),
+SOC_DAPM_SINGLE("IN1L Switch", WM8993_OUTPUT_MIXER2, 3, 1, 0),
+SOC_DAPM_SINGLE("IN1R Switch", WM8993_OUTPUT_MIXER2, 2, 1, 0),
+SOC_DAPM_SINGLE("IN2RP Switch", WM8993_OUTPUT_MIXER2, 1, 1, 0),
+SOC_DAPM_SINGLE("DAC Switch", WM8993_OUTPUT_MIXER2, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new earpiece_mixer[] = {
+SOC_DAPM_SINGLE("Direct Voice Switch", WM8993_HPOUT2_MIXER, 5, 1, 0),
+SOC_DAPM_SINGLE("Left Output Switch", WM8993_HPOUT2_MIXER, 4, 1, 0),
+SOC_DAPM_SINGLE("Right Output Switch", WM8993_HPOUT2_MIXER, 3, 1, 0),
+};
+
+static const struct snd_kcontrol_new left_speaker_boost[] = {
+SOC_DAPM_SINGLE("Direct Voice Switch", WM8993_SPKOUT_MIXERS, 5, 1, 0),
+SOC_DAPM_SINGLE("SPKL Switch", WM8993_SPKOUT_MIXERS, 4, 1, 0),
+SOC_DAPM_SINGLE("SPKR Switch", WM8993_SPKOUT_MIXERS, 3, 1, 0),
+};
+
+static const struct snd_kcontrol_new right_speaker_boost[] = {
+SOC_DAPM_SINGLE("Direct Voice Switch", WM8993_SPKOUT_MIXERS, 2, 1, 0),
+SOC_DAPM_SINGLE("SPKL Switch", WM8993_SPKOUT_MIXERS, 1, 1, 0),
+SOC_DAPM_SINGLE("SPKR Switch", WM8993_SPKOUT_MIXERS, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new line1_mix[] = {
+SOC_DAPM_SINGLE("IN1R Switch", WM8993_LINE_MIXER1, 2, 1, 0),
+SOC_DAPM_SINGLE("IN1L Switch", WM8993_LINE_MIXER1, 1, 1, 0),
+SOC_DAPM_SINGLE("Output Switch", WM8993_LINE_MIXER1, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new line1n_mix[] = {
+SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER1, 6, 1, 0),
+SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER1, 5, 1, 0),
+};
+
+static const struct snd_kcontrol_new line1p_mix[] = {
+SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER1, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new line2_mix[] = {
+SOC_DAPM_SINGLE("IN2R Switch", WM8993_LINE_MIXER2, 2, 1, 0),
+SOC_DAPM_SINGLE("IN2L Switch", WM8993_LINE_MIXER2, 1, 1, 0),
+SOC_DAPM_SINGLE("Output Switch", WM8993_LINE_MIXER2, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new line2n_mix[] = {
+SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER2, 6, 1, 0),
+SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 5, 1, 0),
+};
+
+static const struct snd_kcontrol_new line2p_mix[] = {
+SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 0, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget analogue_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("IN1LN"),
+SND_SOC_DAPM_INPUT("IN1LP"),
+SND_SOC_DAPM_INPUT("IN2LN"),
+SND_SOC_DAPM_INPUT("IN2LP/VXRN"),
+SND_SOC_DAPM_INPUT("IN1RN"),
+SND_SOC_DAPM_INPUT("IN1RP"),
+SND_SOC_DAPM_INPUT("IN2RN"),
+SND_SOC_DAPM_INPUT("IN2RP/VXRP"),
+
+SND_SOC_DAPM_MICBIAS("MICBIAS2", WM8993_POWER_MANAGEMENT_1, 5, 0),
+SND_SOC_DAPM_MICBIAS("MICBIAS1", WM8993_POWER_MANAGEMENT_1, 4, 0),
+
+SND_SOC_DAPM_MIXER("IN1L PGA", WM8993_POWER_MANAGEMENT_2, 6, 0,
+ in1l_pga, ARRAY_SIZE(in1l_pga)),
+SND_SOC_DAPM_MIXER("IN1R PGA", WM8993_POWER_MANAGEMENT_2, 4, 0,
+ in1r_pga, ARRAY_SIZE(in1r_pga)),
+
+SND_SOC_DAPM_MIXER("IN2L PGA", WM8993_POWER_MANAGEMENT_2, 7, 0,
+ in2l_pga, ARRAY_SIZE(in2l_pga)),
+SND_SOC_DAPM_MIXER("IN2R PGA", WM8993_POWER_MANAGEMENT_2, 5, 0,
+ in2r_pga, ARRAY_SIZE(in2r_pga)),
+
+/* Dummy widgets to represent differential paths */
+SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+SND_SOC_DAPM_MIXER("MIXINL", WM8993_POWER_MANAGEMENT_2, 9, 0,
+ mixinl, ARRAY_SIZE(mixinl)),
+SND_SOC_DAPM_MIXER("MIXINR", WM8993_POWER_MANAGEMENT_2, 8, 0,
+ mixinr, ARRAY_SIZE(mixinr)),
+
+SND_SOC_DAPM_MIXER("Left Output Mixer", WM8993_POWER_MANAGEMENT_3, 5, 0,
+ left_output_mixer, ARRAY_SIZE(left_output_mixer)),
+SND_SOC_DAPM_MIXER("Right Output Mixer", WM8993_POWER_MANAGEMENT_3, 4, 0,
+ right_output_mixer, ARRAY_SIZE(right_output_mixer)),
+
+SND_SOC_DAPM_PGA("Left Output PGA", WM8993_POWER_MANAGEMENT_3, 7, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right Output PGA", WM8993_POWER_MANAGEMENT_3, 6, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA_E("Headphone PGA", SND_SOC_NOPM, 0, 0,
+ NULL, 0,
+ hp_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+SND_SOC_DAPM_MIXER("Earpiece Mixer", SND_SOC_NOPM, 0, 0,
+ earpiece_mixer, ARRAY_SIZE(earpiece_mixer)),
+SND_SOC_DAPM_PGA_E("Earpiece Driver", WM8993_POWER_MANAGEMENT_1, 11, 0,
+ NULL, 0, earpiece_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+SND_SOC_DAPM_MIXER("SPKL Boost", SND_SOC_NOPM, 0, 0,
+ left_speaker_boost, ARRAY_SIZE(left_speaker_boost)),
+SND_SOC_DAPM_MIXER("SPKR Boost", SND_SOC_NOPM, 0, 0,
+ right_speaker_boost, ARRAY_SIZE(right_speaker_boost)),
+
+SND_SOC_DAPM_PGA("SPKL Driver", WM8993_POWER_MANAGEMENT_1, 12, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("SPKR Driver", WM8993_POWER_MANAGEMENT_1, 13, 0,
+ NULL, 0),
+
+SND_SOC_DAPM_MIXER("LINEOUT1 Mixer", SND_SOC_NOPM, 0, 0,
+ line1_mix, ARRAY_SIZE(line1_mix)),
+SND_SOC_DAPM_MIXER("LINEOUT2 Mixer", SND_SOC_NOPM, 0, 0,
+ line2_mix, ARRAY_SIZE(line2_mix)),
+
+SND_SOC_DAPM_MIXER("LINEOUT1N Mixer", SND_SOC_NOPM, 0, 0,
+ line1n_mix, ARRAY_SIZE(line1n_mix)),
+SND_SOC_DAPM_MIXER("LINEOUT1P Mixer", SND_SOC_NOPM, 0, 0,
+ line1p_mix, ARRAY_SIZE(line1p_mix)),
+SND_SOC_DAPM_MIXER("LINEOUT2N Mixer", SND_SOC_NOPM, 0, 0,
+ line2n_mix, ARRAY_SIZE(line2n_mix)),
+SND_SOC_DAPM_MIXER("LINEOUT2P Mixer", SND_SOC_NOPM, 0, 0,
+ line2p_mix, ARRAY_SIZE(line2p_mix)),
+
+SND_SOC_DAPM_PGA("LINEOUT1N Driver", WM8993_POWER_MANAGEMENT_3, 13, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("LINEOUT1P Driver", WM8993_POWER_MANAGEMENT_3, 12, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("LINEOUT2N Driver", WM8993_POWER_MANAGEMENT_3, 11, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("LINEOUT2P Driver", WM8993_POWER_MANAGEMENT_3, 10, 0,
+ NULL, 0),
+
+SND_SOC_DAPM_OUTPUT("SPKOUTLP"),
+SND_SOC_DAPM_OUTPUT("SPKOUTLN"),
+SND_SOC_DAPM_OUTPUT("SPKOUTRP"),
+SND_SOC_DAPM_OUTPUT("SPKOUTRN"),
+SND_SOC_DAPM_OUTPUT("HPOUT1L"),
+SND_SOC_DAPM_OUTPUT("HPOUT1R"),
+SND_SOC_DAPM_OUTPUT("HPOUT2P"),
+SND_SOC_DAPM_OUTPUT("HPOUT2N"),
+SND_SOC_DAPM_OUTPUT("LINEOUT1P"),
+SND_SOC_DAPM_OUTPUT("LINEOUT1N"),
+SND_SOC_DAPM_OUTPUT("LINEOUT2P"),
+SND_SOC_DAPM_OUTPUT("LINEOUT2N"),
+};
+
+static const struct snd_soc_dapm_route analogue_routes[] = {
+ { "IN1L PGA", "IN1LP Switch", "IN1LP" },
+ { "IN1L PGA", "IN1LN Switch", "IN1LN" },
+
+ { "IN1R PGA", "IN1RP Switch", "IN1RP" },
+ { "IN1R PGA", "IN1RN Switch", "IN1RN" },
+
+ { "IN2L PGA", "IN2LP Switch", "IN2LP/VXRN" },
+ { "IN2L PGA", "IN2LN Switch", "IN2LN" },
+
+ { "IN2R PGA", "IN2RP Switch", "IN2RP/VXRP" },
+ { "IN2R PGA", "IN2RN Switch", "IN2RN" },
+
+ { "Direct Voice", NULL, "IN2LP/VXRN" },
+ { "Direct Voice", NULL, "IN2RP/VXRP" },
+
+ { "MIXINL", "IN1L Switch", "IN1L PGA" },
+ { "MIXINL", "IN2L Switch", "IN2L PGA" },
+ { "MIXINL", NULL, "Direct Voice" },
+ { "MIXINL", NULL, "IN1LP" },
+ { "MIXINL", NULL, "Left Output Mixer" },
+
+ { "MIXINR", "IN1R Switch", "IN1R PGA" },
+ { "MIXINR", "IN2R Switch", "IN2R PGA" },
+ { "MIXINR", NULL, "Direct Voice" },
+ { "MIXINR", NULL, "IN1RP" },
+ { "MIXINR", NULL, "Right Output Mixer" },
+
+ { "ADCL", NULL, "MIXINL" },
+ { "ADCR", NULL, "MIXINR" },
+
+ { "Left Output Mixer", "Left Input Switch", "MIXINL" },
+ { "Left Output Mixer", "Right Input Switch", "MIXINR" },
+ { "Left Output Mixer", "IN2RN Switch", "IN2RN" },
+ { "Left Output Mixer", "IN2LN Switch", "IN2LN" },
+ { "Left Output Mixer", "IN2LP Switch", "IN2LP/VXRN" },
+ { "Left Output Mixer", "IN1L Switch", "IN1L PGA" },
+ { "Left Output Mixer", "IN1R Switch", "IN1R PGA" },
+
+ { "Right Output Mixer", "Left Input Switch", "MIXINL" },
+ { "Right Output Mixer", "Right Input Switch", "MIXINR" },
+ { "Right Output Mixer", "IN2LN Switch", "IN2LN" },
+ { "Right Output Mixer", "IN2RN Switch", "IN2RN" },
+ { "Right Output Mixer", "IN2RP Switch", "IN2RP/VXRP" },
+ { "Right Output Mixer", "IN1L Switch", "IN1L PGA" },
+ { "Right Output Mixer", "IN1R Switch", "IN1R PGA" },
+
+ { "Left Output PGA", NULL, "Left Output Mixer" },
+ { "Left Output PGA", NULL, "TOCLK" },
+
+ { "Right Output PGA", NULL, "Right Output Mixer" },
+ { "Right Output PGA", NULL, "TOCLK" },
+
+ { "Earpiece Mixer", "Direct Voice Switch", "Direct Voice" },
+ { "Earpiece Mixer", "Left Output Switch", "Left Output PGA" },
+ { "Earpiece Mixer", "Right Output Switch", "Right Output PGA" },
+
+ { "Earpiece Driver", NULL, "Earpiece Mixer" },
+ { "HPOUT2N", NULL, "Earpiece Driver" },
+ { "HPOUT2P", NULL, "Earpiece Driver" },
+
+ { "SPKL", "Input Switch", "MIXINL" },
+ { "SPKL", "IN1LP Switch", "IN1LP" },
+ { "SPKL", "Output Switch", "Left Output Mixer" },
+ { "SPKL", NULL, "TOCLK" },
+
+ { "SPKR", "Input Switch", "MIXINR" },
+ { "SPKR", "IN1RP Switch", "IN1RP" },
+ { "SPKR", "Output Switch", "Right Output Mixer" },
+ { "SPKR", NULL, "TOCLK" },
+
+ { "SPKL Boost", "Direct Voice Switch", "Direct Voice" },
+ { "SPKL Boost", "SPKL Switch", "SPKL" },
+ { "SPKL Boost", "SPKR Switch", "SPKR" },
+
+ { "SPKR Boost", "Direct Voice Switch", "Direct Voice" },
+ { "SPKR Boost", "SPKR Switch", "SPKR" },
+ { "SPKR Boost", "SPKL Switch", "SPKL" },
+
+ { "SPKL Driver", NULL, "SPKL Boost" },
+ { "SPKL Driver", NULL, "CLK_SYS" },
+
+ { "SPKR Driver", NULL, "SPKR Boost" },
+ { "SPKR Driver", NULL, "CLK_SYS" },
+
+ { "SPKOUTLP", NULL, "SPKL Driver" },
+ { "SPKOUTLN", NULL, "SPKL Driver" },
+ { "SPKOUTRP", NULL, "SPKR Driver" },
+ { "SPKOUTRN", NULL, "SPKR Driver" },
+
+ { "Left Headphone Mux", "Mixer", "Left Output Mixer" },
+ { "Right Headphone Mux", "Mixer", "Right Output Mixer" },
+
+ { "Headphone PGA", NULL, "Left Headphone Mux" },
+ { "Headphone PGA", NULL, "Right Headphone Mux" },
+ { "Headphone PGA", NULL, "CLK_SYS" },
+
+ { "HPOUT1L", NULL, "Headphone PGA" },
+ { "HPOUT1R", NULL, "Headphone PGA" },
+
+ { "LINEOUT1N", NULL, "LINEOUT1N Driver" },
+ { "LINEOUT1P", NULL, "LINEOUT1P Driver" },
+ { "LINEOUT2N", NULL, "LINEOUT2N Driver" },
+ { "LINEOUT2P", NULL, "LINEOUT2P Driver" },
+};
+
+static const struct snd_soc_dapm_route lineout1_diff_routes[] = {
+ { "LINEOUT1 Mixer", "IN1L Switch", "IN1L PGA" },
+ { "LINEOUT1 Mixer", "IN1R Switch", "IN1R PGA" },
+ { "LINEOUT1 Mixer", "Output Switch", "Left Output Mixer" },
+
+ { "LINEOUT1N Driver", NULL, "LINEOUT1 Mixer" },
+ { "LINEOUT1P Driver", NULL, "LINEOUT1 Mixer" },
+};
+
+static const struct snd_soc_dapm_route lineout1_se_routes[] = {
+ { "LINEOUT1N Mixer", "Left Output Switch", "Left Output Mixer" },
+ { "LINEOUT1N Mixer", "Right Output Switch", "Left Output Mixer" },
+
+ { "LINEOUT1P Mixer", "Left Output Switch", "Left Output Mixer" },
+
+ { "LINEOUT1N Driver", NULL, "LINEOUT1N Mixer" },
+ { "LINEOUT1P Driver", NULL, "LINEOUT1P Mixer" },
+};
+
+static const struct snd_soc_dapm_route lineout2_diff_routes[] = {
+ { "LINEOUT2 Mixer", "IN2L Switch", "IN2L PGA" },
+ { "LINEOUT2 Mixer", "IN2R Switch", "IN2R PGA" },
+ { "LINEOUT2 Mixer", "Output Switch", "Right Output Mixer" },
+
+ { "LINEOUT2N Driver", NULL, "LINEOUT2 Mixer" },
+ { "LINEOUT2P Driver", NULL, "LINEOUT2 Mixer" },
+};
+
+static const struct snd_soc_dapm_route lineout2_se_routes[] = {
+ { "LINEOUT2N Mixer", "Left Output Switch", "Left Output Mixer" },
+ { "LINEOUT2N Mixer", "Right Output Switch", "Left Output Mixer" },
+
+ { "LINEOUT2P Mixer", "Right Output Switch", "Right Output Mixer" },
+
+ { "LINEOUT2N Driver", NULL, "LINEOUT2N Mixer" },
+ { "LINEOUT2P Driver", NULL, "LINEOUT2P Mixer" },
+};
+
+int wm_hubs_add_analogue_controls(struct snd_soc_codec *codec)
+{
+ /* Latch volume update bits & default ZC on */
+ snd_soc_update_bits(codec, WM8993_LEFT_LINE_INPUT_1_2_VOLUME,
+ WM8993_IN1_VU, WM8993_IN1_VU);
+ snd_soc_update_bits(codec, WM8993_RIGHT_LINE_INPUT_1_2_VOLUME,
+ WM8993_IN1_VU, WM8993_IN1_VU);
+ snd_soc_update_bits(codec, WM8993_LEFT_LINE_INPUT_3_4_VOLUME,
+ WM8993_IN2_VU, WM8993_IN2_VU);
+ snd_soc_update_bits(codec, WM8993_RIGHT_LINE_INPUT_3_4_VOLUME,
+ WM8993_IN2_VU, WM8993_IN2_VU);
+
+ snd_soc_update_bits(codec, WM8993_SPEAKER_VOLUME_RIGHT,
+ WM8993_SPKOUT_VU, WM8993_SPKOUT_VU);
+
+ snd_soc_update_bits(codec, WM8993_LEFT_OUTPUT_VOLUME,
+ WM8993_HPOUT1L_ZC, WM8993_HPOUT1L_ZC);
+ snd_soc_update_bits(codec, WM8993_RIGHT_OUTPUT_VOLUME,
+ WM8993_HPOUT1_VU | WM8993_HPOUT1R_ZC,
+ WM8993_HPOUT1_VU | WM8993_HPOUT1R_ZC);
+
+ snd_soc_update_bits(codec, WM8993_LEFT_OPGA_VOLUME,
+ WM8993_MIXOUTL_ZC, WM8993_MIXOUTL_ZC);
+ snd_soc_update_bits(codec, WM8993_RIGHT_OPGA_VOLUME,
+ WM8993_MIXOUTR_ZC | WM8993_MIXOUT_VU,
+ WM8993_MIXOUTR_ZC | WM8993_MIXOUT_VU);
+
+ snd_soc_add_controls(codec, analogue_snd_controls,
+ ARRAY_SIZE(analogue_snd_controls));
+
+ snd_soc_dapm_new_controls(codec, analogue_dapm_widgets,
+ ARRAY_SIZE(analogue_dapm_widgets));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wm_hubs_add_analogue_controls);
+
+int wm_hubs_add_analogue_routes(struct snd_soc_codec *codec,
+ int lineout1_diff, int lineout2_diff)
+{
+ snd_soc_dapm_add_routes(codec, analogue_routes,
+ ARRAY_SIZE(analogue_routes));
+
+ if (lineout1_diff)
+ snd_soc_dapm_add_routes(codec,
+ lineout1_diff_routes,
+ ARRAY_SIZE(lineout1_diff_routes));
+ else
+ snd_soc_dapm_add_routes(codec,
+ lineout1_se_routes,
+ ARRAY_SIZE(lineout1_se_routes));
+
+ if (lineout2_diff)
+ snd_soc_dapm_add_routes(codec,
+ lineout2_diff_routes,
+ ARRAY_SIZE(lineout2_diff_routes));
+ else
+ snd_soc_dapm_add_routes(codec,
+ lineout2_se_routes,
+ ARRAY_SIZE(lineout2_se_routes));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wm_hubs_add_analogue_routes);
+
+MODULE_DESCRIPTION("Shared support for Wolfson hubs products");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h
new file mode 100644
index 000000000000..ec09cb6a2939
--- /dev/null
+++ b/sound/soc/codecs/wm_hubs.h
@@ -0,0 +1,24 @@
+/*
+ * wm_hubs.h -- WM899x common code
+ *
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM_HUBS_H
+#define _WM_HUBS_H
+
+struct snd_soc_codec;
+
+extern const unsigned int wm_hubs_spkmix_tlv[];
+
+extern int wm_hubs_add_analogue_controls(struct snd_soc_codec *);
+extern int wm_hubs_add_analogue_routes(struct snd_soc_codec *, int, int);
+
+#endif
diff --git a/sound/soc/davinci/Kconfig b/sound/soc/davinci/Kconfig
index 411a710be660..4dfd4ad9d90e 100644
--- a/sound/soc/davinci/Kconfig
+++ b/sound/soc/davinci/Kconfig
@@ -9,6 +9,9 @@ config SND_DAVINCI_SOC
config SND_DAVINCI_SOC_I2S
tristate
+config SND_DAVINCI_SOC_MCASP
+ tristate
+
config SND_DAVINCI_SOC_EVM
tristate "SoC Audio support for DaVinci DM6446 or DM355 EVM"
depends on SND_DAVINCI_SOC
@@ -19,6 +22,16 @@ config SND_DAVINCI_SOC_EVM
Say Y if you want to add support for SoC audio on TI
DaVinci DM6446 or DM355 EVM platforms.
+config SND_DM6467_SOC_EVM
+ tristate "SoC Audio support for DaVinci DM6467 EVM"
+ depends on SND_DAVINCI_SOC && MACH_DAVINCI_DM6467_EVM
+ select SND_DAVINCI_SOC_MCASP
+ select SND_SOC_TLV320AIC3X
+ select SND_SOC_SPDIF
+
+ help
+ Say Y if you want to add support for SoC audio on TI
+
config SND_DAVINCI_SOC_SFFSDR
tristate "SoC Audio support for SFFSDR"
depends on SND_DAVINCI_SOC && MACH_SFFSDR
@@ -28,3 +41,23 @@ config SND_DAVINCI_SOC_SFFSDR
help
Say Y if you want to add support for SoC audio on
Lyrtech SFFSDR board.
+
+config SND_DA830_SOC_EVM
+ tristate "SoC Audio support for DA830/OMAP-L137 EVM"
+ depends on SND_DAVINCI_SOC && MACH_DAVINCI_DA830_EVM
+ select SND_DAVINCI_SOC_MCASP
+ select SND_SOC_TLV320AIC3X
+
+ help
+ Say Y if you want to add support for SoC audio on TI
+ DA830/OMAP-L137 EVM
+
+config SND_DA850_SOC_EVM
+ tristate "SoC Audio support for DA850/OMAP-L138 EVM"
+ depends on SND_DAVINCI_SOC && MACH_DAVINCI_DA850_EVM
+ select SND_DAVINCI_SOC_MCASP
+ select SND_SOC_TLV320AIC3X
+ help
+ Say Y if you want to add support for SoC audio on TI
+ DA850/OMAP-L138 EVM
+
diff --git a/sound/soc/davinci/Makefile b/sound/soc/davinci/Makefile
index ca8bae1fc3f6..a6939d71b988 100644
--- a/sound/soc/davinci/Makefile
+++ b/sound/soc/davinci/Makefile
@@ -1,13 +1,18 @@
# DAVINCI Platform Support
snd-soc-davinci-objs := davinci-pcm.o
snd-soc-davinci-i2s-objs := davinci-i2s.o
+snd-soc-davinci-mcasp-objs:= davinci-mcasp.o
obj-$(CONFIG_SND_DAVINCI_SOC) += snd-soc-davinci.o
obj-$(CONFIG_SND_DAVINCI_SOC_I2S) += snd-soc-davinci-i2s.o
+obj-$(CONFIG_SND_DAVINCI_SOC_MCASP) += snd-soc-davinci-mcasp.o
# DAVINCI Machine Support
snd-soc-evm-objs := davinci-evm.o
snd-soc-sffsdr-objs := davinci-sffsdr.o
obj-$(CONFIG_SND_DAVINCI_SOC_EVM) += snd-soc-evm.o
+obj-$(CONFIG_SND_DM6467_SOC_EVM) += snd-soc-evm.o
+obj-$(CONFIG_SND_DA830_SOC_EVM) += snd-soc-evm.o
+obj-$(CONFIG_SND_DA850_SOC_EVM) += snd-soc-evm.o
obj-$(CONFIG_SND_DAVINCI_SOC_SFFSDR) += snd-soc-sffsdr.o
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index 58fd1cbedd88..67414f659405 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -14,6 +14,7 @@
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/i2c.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
@@ -27,9 +28,10 @@
#include <mach/mux.h>
#include "../codecs/tlv320aic3x.h"
+#include "../codecs/spdif_transciever.h"
#include "davinci-pcm.h"
#include "davinci-i2s.h"
-
+#include "davinci-mcasp.h"
#define AUDIO_FORMAT (SND_SOC_DAIFMT_DSP_B | \
SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_IB_NF)
@@ -43,7 +45,7 @@ static int evm_hw_params(struct snd_pcm_substream *substream,
unsigned sysclk;
/* ASP1 on DM355 EVM is clocked by an external oscillator */
- if (machine_is_davinci_dm355_evm())
+ if (machine_is_davinci_dm355_evm() || machine_is_davinci_dm6467_evm())
sysclk = 27000000;
/* ASP0 in DM6446 EVM is clocked by U55, as configured by
@@ -53,6 +55,10 @@ static int evm_hw_params(struct snd_pcm_substream *substream,
else if (machine_is_davinci_evm())
sysclk = 12288000;
+ else if (machine_is_davinci_da830_evm() ||
+ machine_is_davinci_da850_evm())
+ sysclk = 24576000;
+
else
return -EINVAL;
@@ -144,6 +150,32 @@ static struct snd_soc_dai_link evm_dai = {
.ops = &evm_ops,
};
+static struct snd_soc_dai_link dm6467_evm_dai[] = {
+ {
+ .name = "TLV320AIC3X",
+ .stream_name = "AIC3X",
+ .cpu_dai = &davinci_mcasp_dai[DAVINCI_MCASP_I2S_DAI],
+ .codec_dai = &aic3x_dai,
+ .init = evm_aic3x_init,
+ .ops = &evm_ops,
+ },
+ {
+ .name = "McASP",
+ .stream_name = "spdif",
+ .cpu_dai = &davinci_mcasp_dai[DAVINCI_MCASP_DIT_DAI],
+ .codec_dai = &dit_stub_dai,
+ .ops = &evm_ops,
+ },
+};
+static struct snd_soc_dai_link da8xx_evm_dai = {
+ .name = "TLV320AIC3X",
+ .stream_name = "AIC3X",
+ .cpu_dai = &davinci_mcasp_dai[DAVINCI_MCASP_I2S_DAI],
+ .codec_dai = &aic3x_dai,
+ .init = evm_aic3x_init,
+ .ops = &evm_ops,
+};
+
/* davinci-evm audio machine driver */
static struct snd_soc_card snd_soc_card_evm = {
.name = "DaVinci EVM",
@@ -152,73 +184,80 @@ static struct snd_soc_card snd_soc_card_evm = {
.num_links = 1,
};
-/* evm audio private data */
-static struct aic3x_setup_data evm_aic3x_setup = {
- .i2c_bus = 1,
- .i2c_address = 0x1b,
+/* davinci dm6467 evm audio machine driver */
+static struct snd_soc_card dm6467_snd_soc_card_evm = {
+ .name = "DaVinci DM6467 EVM",
+ .platform = &davinci_soc_platform,
+ .dai_link = dm6467_evm_dai,
+ .num_links = ARRAY_SIZE(dm6467_evm_dai),
};
+static struct snd_soc_card da830_snd_soc_card = {
+ .name = "DA830/OMAP-L137 EVM",
+ .dai_link = &da8xx_evm_dai,
+ .platform = &davinci_soc_platform,
+ .num_links = 1,
+};
+
+static struct snd_soc_card da850_snd_soc_card = {
+ .name = "DA850/OMAP-L138 EVM",
+ .dai_link = &da8xx_evm_dai,
+ .platform = &davinci_soc_platform,
+ .num_links = 1,
+};
+
+static struct aic3x_setup_data aic3x_setup;
+
/* evm audio subsystem */
static struct snd_soc_device evm_snd_devdata = {
.card = &snd_soc_card_evm,
.codec_dev = &soc_codec_dev_aic3x,
- .codec_data = &evm_aic3x_setup,
-};
-
-/* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */
-static struct resource evm_snd_resources[] = {
- {
- .start = DAVINCI_ASP0_BASE,
- .end = DAVINCI_ASP0_BASE + SZ_8K - 1,
- .flags = IORESOURCE_MEM,
- },
+ .codec_data = &aic3x_setup,
};
-static struct evm_snd_platform_data evm_snd_data = {
- .tx_dma_ch = DAVINCI_DMA_ASP0_TX,
- .rx_dma_ch = DAVINCI_DMA_ASP0_RX,
+/* evm audio subsystem */
+static struct snd_soc_device dm6467_evm_snd_devdata = {
+ .card = &dm6467_snd_soc_card_evm,
+ .codec_dev = &soc_codec_dev_aic3x,
+ .codec_data = &aic3x_setup,
};
-/* DM335 EVM uses ASP1; line-out is a stereo mini-jack */
-static struct resource dm335evm_snd_resources[] = {
- {
- .start = DAVINCI_ASP1_BASE,
- .end = DAVINCI_ASP1_BASE + SZ_8K - 1,
- .flags = IORESOURCE_MEM,
- },
+/* evm audio subsystem */
+static struct snd_soc_device da830_evm_snd_devdata = {
+ .card = &da830_snd_soc_card,
+ .codec_dev = &soc_codec_dev_aic3x,
+ .codec_data = &aic3x_setup,
};
-static struct evm_snd_platform_data dm335evm_snd_data = {
- .tx_dma_ch = DAVINCI_DMA_ASP1_TX,
- .rx_dma_ch = DAVINCI_DMA_ASP1_RX,
+static struct snd_soc_device da850_evm_snd_devdata = {
+ .card = &da850_snd_soc_card,
+ .codec_dev = &soc_codec_dev_aic3x,
+ .codec_data = &aic3x_setup,
};
static struct platform_device *evm_snd_device;
static int __init evm_init(void)
{
- struct resource *resources;
- unsigned num_resources;
- struct evm_snd_platform_data *data;
+ struct snd_soc_device *evm_snd_dev_data;
int index;
int ret;
if (machine_is_davinci_evm()) {
- davinci_cfg_reg(DM644X_MCBSP);
-
- resources = evm_snd_resources;
- num_resources = ARRAY_SIZE(evm_snd_resources);
- data = &evm_snd_data;
+ evm_snd_dev_data = &evm_snd_devdata;
index = 0;
} else if (machine_is_davinci_dm355_evm()) {
- /* we don't use ASP1 IRQs, or we'd need to mux them ... */
- davinci_cfg_reg(DM355_EVT8_ASP1_TX);
- davinci_cfg_reg(DM355_EVT9_ASP1_RX);
-
- resources = dm335evm_snd_resources;
- num_resources = ARRAY_SIZE(dm335evm_snd_resources);
- data = &dm335evm_snd_data;
+ evm_snd_dev_data = &evm_snd_devdata;
+ index = 1;
+ } else if (machine_is_davinci_dm6467_evm()) {
+ evm_snd_dev_data = &dm6467_evm_snd_devdata;
+ index = 0;
+ } else if (machine_is_davinci_da830_evm()) {
+ evm_snd_dev_data = &da830_evm_snd_devdata;
index = 1;
+ } else if (machine_is_davinci_da850_evm()) {
+ evm_snd_dev_data = &da850_evm_snd_devdata;
+ index = 0;
} else
return -EINVAL;
@@ -226,17 +265,8 @@ static int __init evm_init(void)
if (!evm_snd_device)
return -ENOMEM;
- platform_set_drvdata(evm_snd_device, &evm_snd_devdata);
- evm_snd_devdata.dev = &evm_snd_device->dev;
- platform_device_add_data(evm_snd_device, data, sizeof(*data));
-
- ret = platform_device_add_resources(evm_snd_device, resources,
- num_resources);
- if (ret) {
- platform_device_put(evm_snd_device);
- return ret;
- }
-
+ platform_set_drvdata(evm_snd_device, evm_snd_dev_data);
+ evm_snd_dev_data->dev = &evm_snd_device->dev;
ret = platform_device_add(evm_snd_device);
if (ret)
platform_device_put(evm_snd_device);
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c
index b1ea52fc83c7..12a6c549ee6e 100644
--- a/sound/soc/davinci/davinci-i2s.c
+++ b/sound/soc/davinci/davinci-i2s.c
@@ -22,6 +22,8 @@
#include <sound/initval.h>
#include <sound/soc.h>
+#include <mach/asp.h>
+
#include "davinci-pcm.h"
@@ -63,6 +65,7 @@
#define DAVINCI_MCBSP_RCR_RWDLEN1(v) ((v) << 5)
#define DAVINCI_MCBSP_RCR_RFRLEN1(v) ((v) << 8)
#define DAVINCI_MCBSP_RCR_RDATDLY(v) ((v) << 16)
+#define DAVINCI_MCBSP_RCR_RFIG (1 << 18)
#define DAVINCI_MCBSP_RCR_RWDLEN2(v) ((v) << 21)
#define DAVINCI_MCBSP_XCR_XWDLEN1(v) ((v) << 5)
@@ -85,14 +88,6 @@
#define DAVINCI_MCBSP_PCR_FSRM (1 << 10)
#define DAVINCI_MCBSP_PCR_FSXM (1 << 11)
-#define MOD_REG_BIT(val, mask, set) do { \
- if (set) { \
- val |= mask; \
- } else { \
- val &= ~mask; \
- } \
-} while (0)
-
enum {
DAVINCI_MCBSP_WORD_8 = 0,
DAVINCI_MCBSP_WORD_12,
@@ -112,6 +107,10 @@ static struct davinci_pcm_dma_params davinci_i2s_pcm_in = {
struct davinci_mcbsp_dev {
void __iomem *base;
+#define MOD_DSP_A 0
+#define MOD_DSP_B 1
+ int mode;
+ u32 pcr;
struct clk *clk;
struct davinci_pcm_dma_params *dma_params[2];
};
@@ -127,96 +126,100 @@ static inline u32 davinci_mcbsp_read_reg(struct davinci_mcbsp_dev *dev, int reg)
return __raw_readl(dev->base + reg);
}
-static void davinci_mcbsp_start(struct snd_pcm_substream *substream)
+static void toggle_clock(struct davinci_mcbsp_dev *dev, int playback)
+{
+ u32 m = playback ? DAVINCI_MCBSP_PCR_CLKXP : DAVINCI_MCBSP_PCR_CLKRP;
+ /* The clock needs to toggle to complete reset.
+ * So, fake it by toggling the clk polarity.
+ */
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr ^ m);
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr);
+}
+
+static void davinci_mcbsp_start(struct davinci_mcbsp_dev *dev,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct davinci_mcbsp_dev *dev = rtd->dai->cpu_dai->private_data;
struct snd_soc_device *socdev = rtd->socdev;
struct snd_soc_platform *platform = socdev->card->platform;
- u32 w;
- int ret;
-
- /* Start the sample generator and enable transmitter/receiver */
- w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
- MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_GRST, 1);
- davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w);
+ int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+ u32 spcr;
+ u32 mask = playback ? DAVINCI_MCBSP_SPCR_XRST : DAVINCI_MCBSP_SPCR_RRST;
+ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
+ if (spcr & mask) {
+ /* start off disabled */
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG,
+ spcr & ~mask);
+ toggle_clock(dev, playback);
+ }
+ if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM |
+ DAVINCI_MCBSP_PCR_CLKXM | DAVINCI_MCBSP_PCR_CLKRM)) {
+ /* Start the sample generator */
+ spcr |= DAVINCI_MCBSP_SPCR_GRST;
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
+ }
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (playback) {
/* Stop the DMA to avoid data loss */
/* while the transmitter is out of reset to handle XSYNCERR */
if (platform->pcm_ops->trigger) {
- ret = platform->pcm_ops->trigger(substream,
+ int ret = platform->pcm_ops->trigger(substream,
SNDRV_PCM_TRIGGER_STOP);
if (ret < 0)
printk(KERN_DEBUG "Playback DMA stop failed\n");
}
/* Enable the transmitter */
- w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
- MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_XRST, 1);
- davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w);
+ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
+ spcr |= DAVINCI_MCBSP_SPCR_XRST;
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
/* wait for any unexpected frame sync error to occur */
udelay(100);
/* Disable the transmitter to clear any outstanding XSYNCERR */
- w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
- MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_XRST, 0);
- davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w);
+ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
+ spcr &= ~DAVINCI_MCBSP_SPCR_XRST;
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
+ toggle_clock(dev, playback);
/* Restart the DMA */
if (platform->pcm_ops->trigger) {
- ret = platform->pcm_ops->trigger(substream,
+ int ret = platform->pcm_ops->trigger(substream,
SNDRV_PCM_TRIGGER_START);
if (ret < 0)
printk(KERN_DEBUG "Playback DMA start failed\n");
}
- /* Enable the transmitter */
- w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
- MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_XRST, 1);
- davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w);
-
- } else {
-
- /* Enable the reciever */
- w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
- MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_RRST, 1);
- davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w);
}
+ /* Enable transmitter or receiver */
+ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
+ spcr |= mask;
- /* Start frame sync */
- w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
- MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_FRST, 1);
- davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w);
+ if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM)) {
+ /* Start frame sync */
+ spcr |= DAVINCI_MCBSP_SPCR_FRST;
+ }
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
}
-static void davinci_mcbsp_stop(struct snd_pcm_substream *substream)
+static void davinci_mcbsp_stop(struct davinci_mcbsp_dev *dev, int playback)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct davinci_mcbsp_dev *dev = rtd->dai->cpu_dai->private_data;
- u32 w;
+ u32 spcr;
/* Reset transmitter/receiver and sample rate/frame sync generators */
- w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
- MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_GRST |
- DAVINCI_MCBSP_SPCR_FRST, 0);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_XRST, 0);
- else
- MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_RRST, 0);
- davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w);
+ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
+ spcr &= ~(DAVINCI_MCBSP_SPCR_GRST | DAVINCI_MCBSP_SPCR_FRST);
+ spcr &= playback ? ~DAVINCI_MCBSP_SPCR_XRST : ~DAVINCI_MCBSP_SPCR_RRST;
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
+ toggle_clock(dev, playback);
}
static int davinci_i2s_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+ struct snd_soc_dai *cpu_dai)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
- struct davinci_mcbsp_dev *dev = rtd->dai->cpu_dai->private_data;
-
+ struct davinci_mcbsp_dev *dev = cpu_dai->private_data;
cpu_dai->dma_data = dev->dma_params[substream->stream];
-
return 0;
}
@@ -228,12 +231,11 @@ static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
struct davinci_mcbsp_dev *dev = cpu_dai->private_data;
unsigned int pcr;
unsigned int srgr;
- unsigned int rcr;
- unsigned int xcr;
srgr = DAVINCI_MCBSP_SRGR_FSGM |
DAVINCI_MCBSP_SRGR_FPER(DEFAULT_BITPERSAMPLE * 2 - 1) |
DAVINCI_MCBSP_SRGR_FWID(DEFAULT_BITPERSAMPLE - 1);
+ /* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
/* cpu is master */
@@ -258,11 +260,8 @@ static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
return -EINVAL;
}
- rcr = DAVINCI_MCBSP_RCR_RFRLEN1(1);
- xcr = DAVINCI_MCBSP_XCR_XFIG | DAVINCI_MCBSP_XCR_XFRLEN1(1);
+ /* interface format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
- case SND_SOC_DAIFMT_DSP_B:
- break;
case SND_SOC_DAIFMT_I2S:
/* Davinci doesn't support TRUE I2S, but some codecs will have
* the left and right channels contiguous. This allows
@@ -282,8 +281,10 @@ static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
*/
fmt ^= SND_SOC_DAIFMT_NB_IF;
case SND_SOC_DAIFMT_DSP_A:
- rcr |= DAVINCI_MCBSP_RCR_RDATDLY(1);
- xcr |= DAVINCI_MCBSP_XCR_XDATDLY(1);
+ dev->mode = MOD_DSP_A;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ dev->mode = MOD_DSP_B;
break;
default:
printk(KERN_ERR "%s:bad format\n", __func__);
@@ -343,9 +344,8 @@ static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
return -EINVAL;
}
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr);
+ dev->pcr = pcr;
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, pcr);
- davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_RCR_REG, rcr);
- davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_XCR_REG, xcr);
return 0;
}
@@ -353,31 +353,40 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct davinci_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data;
- struct davinci_mcbsp_dev *dev = rtd->dai->cpu_dai->private_data;
+ struct davinci_pcm_dma_params *dma_params = dai->dma_data;
+ struct davinci_mcbsp_dev *dev = dai->private_data;
struct snd_interval *i = NULL;
int mcbsp_word_length;
- u32 w;
+ unsigned int rcr, xcr, srgr;
+ u32 spcr;
/* general line settings */
- w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
+ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
- w |= DAVINCI_MCBSP_SPCR_RINTM(3) | DAVINCI_MCBSP_SPCR_FREE;
- davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w);
+ spcr |= DAVINCI_MCBSP_SPCR_RINTM(3) | DAVINCI_MCBSP_SPCR_FREE;
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
} else {
- w |= DAVINCI_MCBSP_SPCR_XINTM(3) | DAVINCI_MCBSP_SPCR_FREE;
- davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w);
+ spcr |= DAVINCI_MCBSP_SPCR_XINTM(3) | DAVINCI_MCBSP_SPCR_FREE;
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
}
i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
- w = DAVINCI_MCBSP_SRGR_FSGM;
- MOD_REG_BIT(w, DAVINCI_MCBSP_SRGR_FWID(snd_interval_value(i) - 1), 1);
+ srgr = DAVINCI_MCBSP_SRGR_FSGM;
+ srgr |= DAVINCI_MCBSP_SRGR_FWID(snd_interval_value(i) - 1);
i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_FRAME_BITS);
- MOD_REG_BIT(w, DAVINCI_MCBSP_SRGR_FPER(snd_interval_value(i) - 1), 1);
- davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, w);
+ srgr |= DAVINCI_MCBSP_SRGR_FPER(snd_interval_value(i) - 1);
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr);
+ rcr = DAVINCI_MCBSP_RCR_RFIG;
+ xcr = DAVINCI_MCBSP_XCR_XFIG;
+ if (dev->mode == MOD_DSP_B) {
+ rcr |= DAVINCI_MCBSP_RCR_RDATDLY(0);
+ xcr |= DAVINCI_MCBSP_XCR_XDATDLY(0);
+ } else {
+ rcr |= DAVINCI_MCBSP_RCR_RDATDLY(1);
+ xcr |= DAVINCI_MCBSP_XCR_XDATDLY(1);
+ }
/* Determine xfer data type */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
@@ -397,18 +406,31 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
- w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_RCR_REG);
- MOD_REG_BIT(w, DAVINCI_MCBSP_RCR_RWDLEN1(mcbsp_word_length) |
- DAVINCI_MCBSP_RCR_RWDLEN2(mcbsp_word_length), 1);
- davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_RCR_REG, w);
+ dma_params->acnt = dma_params->data_type;
+ rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(1);
+ xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(1);
- } else {
- w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_XCR_REG);
- MOD_REG_BIT(w, DAVINCI_MCBSP_XCR_XWDLEN1(mcbsp_word_length) |
- DAVINCI_MCBSP_XCR_XWDLEN2(mcbsp_word_length), 1);
- davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_XCR_REG, w);
+ rcr |= DAVINCI_MCBSP_RCR_RWDLEN1(mcbsp_word_length) |
+ DAVINCI_MCBSP_RCR_RWDLEN2(mcbsp_word_length);
+ xcr |= DAVINCI_MCBSP_XCR_XWDLEN1(mcbsp_word_length) |
+ DAVINCI_MCBSP_XCR_XWDLEN2(mcbsp_word_length);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_XCR_REG, xcr);
+ else
+ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_RCR_REG, rcr);
+ return 0;
+}
+
+static int davinci_i2s_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct davinci_mcbsp_dev *dev = dai->private_data;
+ int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+ davinci_mcbsp_stop(dev, playback);
+ if ((dev->pcr & DAVINCI_MCBSP_PCR_FSXM) == 0) {
+ /* codec is master */
+ davinci_mcbsp_start(dev, substream);
}
return 0;
}
@@ -416,35 +438,72 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
static int davinci_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
+ struct davinci_mcbsp_dev *dev = dai->private_data;
int ret = 0;
+ int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+ if ((dev->pcr & DAVINCI_MCBSP_PCR_FSXM) == 0)
+ return 0; /* return if codec is master */
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- davinci_mcbsp_start(substream);
+ davinci_mcbsp_start(dev, substream);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- davinci_mcbsp_stop(substream);
+ davinci_mcbsp_stop(dev, playback);
break;
default:
ret = -EINVAL;
}
-
return ret;
}
-static int davinci_i2s_probe(struct platform_device *pdev,
- struct snd_soc_dai *dai)
+static void davinci_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct davinci_mcbsp_dev *dev = dai->private_data;
+ int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+ davinci_mcbsp_stop(dev, playback);
+}
+
+#define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000
+
+static struct snd_soc_dai_ops davinci_i2s_dai_ops = {
+ .startup = davinci_i2s_startup,
+ .shutdown = davinci_i2s_shutdown,
+ .prepare = davinci_i2s_prepare,
+ .trigger = davinci_i2s_trigger,
+ .hw_params = davinci_i2s_hw_params,
+ .set_fmt = davinci_i2s_set_dai_fmt,
+
+};
+
+struct snd_soc_dai davinci_i2s_dai = {
+ .name = "davinci-i2s",
+ .id = 0,
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = DAVINCI_I2S_RATES,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,},
+ .capture = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = DAVINCI_I2S_RATES,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,},
+ .ops = &davinci_i2s_dai_ops,
+
+};
+EXPORT_SYMBOL_GPL(davinci_i2s_dai);
+
+static int davinci_i2s_probe(struct platform_device *pdev)
{
- struct snd_soc_device *socdev = platform_get_drvdata(pdev);
- struct snd_soc_card *card = socdev->card;
- struct snd_soc_dai *cpu_dai = card->dai_link->cpu_dai;
+ struct snd_platform_data *pdata = pdev->dev.platform_data;
struct davinci_mcbsp_dev *dev;
- struct resource *mem, *ioarea;
- struct evm_snd_platform_data *pdata;
+ struct resource *mem, *ioarea, *res;
int ret;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -466,8 +525,6 @@ static int davinci_i2s_probe(struct platform_device *pdev,
goto err_release_region;
}
- cpu_dai->private_data = dev;
-
dev->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk)) {
ret = -ENODEV;
@@ -476,18 +533,37 @@ static int davinci_i2s_probe(struct platform_device *pdev,
clk_enable(dev->clk);
dev->base = (void __iomem *)IO_ADDRESS(mem->start);
- pdata = pdev->dev.platform_data;
dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK] = &davinci_i2s_pcm_out;
- dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK]->channel = pdata->tx_dma_ch;
dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK]->dma_addr =
(dma_addr_t)(io_v2p(dev->base) + DAVINCI_MCBSP_DXR_REG);
dev->dma_params[SNDRV_PCM_STREAM_CAPTURE] = &davinci_i2s_pcm_in;
- dev->dma_params[SNDRV_PCM_STREAM_CAPTURE]->channel = pdata->rx_dma_ch;
dev->dma_params[SNDRV_PCM_STREAM_CAPTURE]->dma_addr =
(dma_addr_t)(io_v2p(dev->base) + DAVINCI_MCBSP_DRR_REG);
+ /* first TX, then RX */
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no DMA resource\n");
+ ret = -ENXIO;
+ goto err_free_mem;
+ }
+ dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK]->channel = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "no DMA resource\n");
+ ret = -ENXIO;
+ goto err_free_mem;
+ }
+ dev->dma_params[SNDRV_PCM_STREAM_CAPTURE]->channel = res->start;
+
+ davinci_i2s_dai.private_data = dev;
+ ret = snd_soc_register_dai(&davinci_i2s_dai);
+ if (ret != 0)
+ goto err_free_mem;
+
return 0;
err_free_mem:
@@ -498,62 +574,40 @@ err_release_region:
return ret;
}
-static void davinci_i2s_remove(struct platform_device *pdev,
- struct snd_soc_dai *dai)
+static int davinci_i2s_remove(struct platform_device *pdev)
{
- struct snd_soc_device *socdev = platform_get_drvdata(pdev);
- struct snd_soc_card *card = socdev->card;
- struct snd_soc_dai *cpu_dai = card->dai_link->cpu_dai;
- struct davinci_mcbsp_dev *dev = cpu_dai->private_data;
+ struct davinci_mcbsp_dev *dev = davinci_i2s_dai.private_data;
struct resource *mem;
+ snd_soc_unregister_dai(&davinci_i2s_dai);
clk_disable(dev->clk);
clk_put(dev->clk);
dev->clk = NULL;
-
kfree(dev);
-
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, (mem->end - mem->start) + 1);
-}
-#define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000
-
-static struct snd_soc_dai_ops davinci_i2s_dai_ops = {
- .startup = davinci_i2s_startup,
- .trigger = davinci_i2s_trigger,
- .hw_params = davinci_i2s_hw_params,
- .set_fmt = davinci_i2s_set_dai_fmt,
-};
+ return 0;
+}
-struct snd_soc_dai davinci_i2s_dai = {
- .name = "davinci-i2s",
- .id = 0,
- .probe = davinci_i2s_probe,
- .remove = davinci_i2s_remove,
- .playback = {
- .channels_min = 2,
- .channels_max = 2,
- .rates = DAVINCI_I2S_RATES,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,},
- .capture = {
- .channels_min = 2,
- .channels_max = 2,
- .rates = DAVINCI_I2S_RATES,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,},
- .ops = &davinci_i2s_dai_ops,
+static struct platform_driver davinci_mcbsp_driver = {
+ .probe = davinci_i2s_probe,
+ .remove = davinci_i2s_remove,
+ .driver = {
+ .name = "davinci-asp",
+ .owner = THIS_MODULE,
+ },
};
-EXPORT_SYMBOL_GPL(davinci_i2s_dai);
static int __init davinci_i2s_init(void)
{
- return snd_soc_register_dai(&davinci_i2s_dai);
+ return platform_driver_register(&davinci_mcbsp_driver);
}
module_init(davinci_i2s_init);
static void __exit davinci_i2s_exit(void)
{
- snd_soc_unregister_dai(&davinci_i2s_dai);
+ platform_driver_unregister(&davinci_mcbsp_driver);
}
module_exit(davinci_i2s_exit);
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
new file mode 100644
index 000000000000..eca22d7829d2
--- /dev/null
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -0,0 +1,973 @@
+/*
+ * ALSA SoC McASP Audio Layer for TI DAVINCI processor
+ *
+ * Multi-channel Audio Serial Port Driver
+ *
+ * Author: Nirmal Pandey <n-pandey@ti.com>,
+ * Suresh Rajashekara <suresh.r@ti.com>
+ * Steve Chen <schen@.mvista.com>
+ *
+ * Copyright: (C) 2009 MontaVista Software, Inc., <source@mvista.com>
+ * Copyright: (C) 2009 Texas Instruments, India
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+#include "davinci-pcm.h"
+#include "davinci-mcasp.h"
+
+/*
+ * McASP register definitions
+ */
+#define DAVINCI_MCASP_PID_REG 0x00
+#define DAVINCI_MCASP_PWREMUMGT_REG 0x04
+
+#define DAVINCI_MCASP_PFUNC_REG 0x10
+#define DAVINCI_MCASP_PDIR_REG 0x14
+#define DAVINCI_MCASP_PDOUT_REG 0x18
+#define DAVINCI_MCASP_PDSET_REG 0x1c
+
+#define DAVINCI_MCASP_PDCLR_REG 0x20
+
+#define DAVINCI_MCASP_TLGC_REG 0x30
+#define DAVINCI_MCASP_TLMR_REG 0x34
+
+#define DAVINCI_MCASP_GBLCTL_REG 0x44
+#define DAVINCI_MCASP_AMUTE_REG 0x48
+#define DAVINCI_MCASP_LBCTL_REG 0x4c
+
+#define DAVINCI_MCASP_TXDITCTL_REG 0x50
+
+#define DAVINCI_MCASP_GBLCTLR_REG 0x60
+#define DAVINCI_MCASP_RXMASK_REG 0x64
+#define DAVINCI_MCASP_RXFMT_REG 0x68
+#define DAVINCI_MCASP_RXFMCTL_REG 0x6c
+
+#define DAVINCI_MCASP_ACLKRCTL_REG 0x70
+#define DAVINCI_MCASP_AHCLKRCTL_REG 0x74
+#define DAVINCI_MCASP_RXTDM_REG 0x78
+#define DAVINCI_MCASP_EVTCTLR_REG 0x7c
+
+#define DAVINCI_MCASP_RXSTAT_REG 0x80
+#define DAVINCI_MCASP_RXTDMSLOT_REG 0x84
+#define DAVINCI_MCASP_RXCLKCHK_REG 0x88
+#define DAVINCI_MCASP_REVTCTL_REG 0x8c
+
+#define DAVINCI_MCASP_GBLCTLX_REG 0xa0
+#define DAVINCI_MCASP_TXMASK_REG 0xa4
+#define DAVINCI_MCASP_TXFMT_REG 0xa8
+#define DAVINCI_MCASP_TXFMCTL_REG 0xac
+
+#define DAVINCI_MCASP_ACLKXCTL_REG 0xb0
+#define DAVINCI_MCASP_AHCLKXCTL_REG 0xb4
+#define DAVINCI_MCASP_TXTDM_REG 0xb8
+#define DAVINCI_MCASP_EVTCTLX_REG 0xbc
+
+#define DAVINCI_MCASP_TXSTAT_REG 0xc0
+#define DAVINCI_MCASP_TXTDMSLOT_REG 0xc4
+#define DAVINCI_MCASP_TXCLKCHK_REG 0xc8
+#define DAVINCI_MCASP_XEVTCTL_REG 0xcc
+
+/* Left(even TDM Slot) Channel Status Register File */
+#define DAVINCI_MCASP_DITCSRA_REG 0x100
+/* Right(odd TDM slot) Channel Status Register File */
+#define DAVINCI_MCASP_DITCSRB_REG 0x118
+/* Left(even TDM slot) User Data Register File */
+#define DAVINCI_MCASP_DITUDRA_REG 0x130
+/* Right(odd TDM Slot) User Data Register File */
+#define DAVINCI_MCASP_DITUDRB_REG 0x148
+
+/* Serializer n Control Register */
+#define DAVINCI_MCASP_XRSRCTL_BASE_REG 0x180
+#define DAVINCI_MCASP_XRSRCTL_REG(n) (DAVINCI_MCASP_XRSRCTL_BASE_REG + \
+ (n << 2))
+
+/* Transmit Buffer for Serializer n */
+#define DAVINCI_MCASP_TXBUF_REG 0x200
+/* Receive Buffer for Serializer n */
+#define DAVINCI_MCASP_RXBUF_REG 0x280
+
+/* McASP FIFO Registers */
+#define DAVINCI_MCASP_WFIFOCTL (0x1010)
+#define DAVINCI_MCASP_WFIFOSTS (0x1014)
+#define DAVINCI_MCASP_RFIFOCTL (0x1018)
+#define DAVINCI_MCASP_RFIFOSTS (0x101C)
+
+/*
+ * DAVINCI_MCASP_PWREMUMGT_REG - Power Down and Emulation Management
+ * Register Bits
+ */
+#define MCASP_FREE BIT(0)
+#define MCASP_SOFT BIT(1)
+
+/*
+ * DAVINCI_MCASP_PFUNC_REG - Pin Function / GPIO Enable Register Bits
+ */
+#define AXR(n) (1<<n)
+#define PFUNC_AMUTE BIT(25)
+#define ACLKX BIT(26)
+#define AHCLKX BIT(27)
+#define AFSX BIT(28)
+#define ACLKR BIT(29)
+#define AHCLKR BIT(30)
+#define AFSR BIT(31)
+
+/*
+ * DAVINCI_MCASP_PDIR_REG - Pin Direction Register Bits
+ */
+#define AXR(n) (1<<n)
+#define PDIR_AMUTE BIT(25)
+#define ACLKX BIT(26)
+#define AHCLKX BIT(27)
+#define AFSX BIT(28)
+#define ACLKR BIT(29)
+#define AHCLKR BIT(30)
+#define AFSR BIT(31)
+
+/*
+ * DAVINCI_MCASP_TXDITCTL_REG - Transmit DIT Control Register Bits
+ */
+#define DITEN BIT(0) /* Transmit DIT mode enable/disable */
+#define VA BIT(2)
+#define VB BIT(3)
+
+/*
+ * DAVINCI_MCASP_TXFMT_REG - Transmit Bitstream Format Register Bits
+ */
+#define TXROT(val) (val)
+#define TXSEL BIT(3)
+#define TXSSZ(val) (val<<4)
+#define TXPBIT(val) (val<<8)
+#define TXPAD(val) (val<<13)
+#define TXORD BIT(15)
+#define FSXDLY(val) (val<<16)
+
+/*
+ * DAVINCI_MCASP_RXFMT_REG - Receive Bitstream Format Register Bits
+ */
+#define RXROT(val) (val)
+#define RXSEL BIT(3)
+#define RXSSZ(val) (val<<4)
+#define RXPBIT(val) (val<<8)
+#define RXPAD(val) (val<<13)
+#define RXORD BIT(15)
+#define FSRDLY(val) (val<<16)
+
+/*
+ * DAVINCI_MCASP_TXFMCTL_REG - Transmit Frame Control Register Bits
+ */
+#define FSXPOL BIT(0)
+#define AFSXE BIT(1)
+#define FSXDUR BIT(4)
+#define FSXMOD(val) (val<<7)
+
+/*
+ * DAVINCI_MCASP_RXFMCTL_REG - Receive Frame Control Register Bits
+ */
+#define FSRPOL BIT(0)
+#define AFSRE BIT(1)
+#define FSRDUR BIT(4)
+#define FSRMOD(val) (val<<7)
+
+/*
+ * DAVINCI_MCASP_ACLKXCTL_REG - Transmit Clock Control Register Bits
+ */
+#define ACLKXDIV(val) (val)
+#define ACLKXE BIT(5)
+#define TX_ASYNC BIT(6)
+#define ACLKXPOL BIT(7)
+
+/*
+ * DAVINCI_MCASP_ACLKRCTL_REG Receive Clock Control Register Bits
+ */
+#define ACLKRDIV(val) (val)
+#define ACLKRE BIT(5)
+#define RX_ASYNC BIT(6)
+#define ACLKRPOL BIT(7)
+
+/*
+ * DAVINCI_MCASP_AHCLKXCTL_REG - High Frequency Transmit Clock Control
+ * Register Bits
+ */
+#define AHCLKXDIV(val) (val)
+#define AHCLKXPOL BIT(14)
+#define AHCLKXE BIT(15)
+
+/*
+ * DAVINCI_MCASP_AHCLKRCTL_REG - High Frequency Receive Clock Control
+ * Register Bits
+ */
+#define AHCLKRDIV(val) (val)
+#define AHCLKRPOL BIT(14)
+#define AHCLKRE BIT(15)
+
+/*
+ * DAVINCI_MCASP_XRSRCTL_BASE_REG - Serializer Control Register Bits
+ */
+#define MODE(val) (val)
+#define DISMOD (val)(val<<2)
+#define TXSTATE BIT(4)
+#define RXSTATE BIT(5)
+
+/*
+ * DAVINCI_MCASP_LBCTL_REG - Loop Back Control Register Bits
+ */
+#define LBEN BIT(0)
+#define LBORD BIT(1)
+#define LBGENMODE(val) (val<<2)
+
+/*
+ * DAVINCI_MCASP_TXTDMSLOT_REG - Transmit TDM Slot Register configuration
+ */
+#define TXTDMS(n) (1<<n)
+
+/*
+ * DAVINCI_MCASP_RXTDMSLOT_REG - Receive TDM Slot Register configuration
+ */
+#define RXTDMS(n) (1<<n)
+
+/*
+ * DAVINCI_MCASP_GBLCTL_REG - Global Control Register Bits
+ */
+#define RXCLKRST BIT(0) /* Receiver Clock Divider Reset */
+#define RXHCLKRST BIT(1) /* Receiver High Frequency Clock Divider */
+#define RXSERCLR BIT(2) /* Receiver Serializer Clear */
+#define RXSMRST BIT(3) /* Receiver State Machine Reset */
+#define RXFSRST BIT(4) /* Frame Sync Generator Reset */
+#define TXCLKRST BIT(8) /* Transmitter Clock Divider Reset */
+#define TXHCLKRST BIT(9) /* Transmitter High Frequency Clock Divider*/
+#define TXSERCLR BIT(10) /* Transmit Serializer Clear */
+#define TXSMRST BIT(11) /* Transmitter State Machine Reset */
+#define TXFSRST BIT(12) /* Frame Sync Generator Reset */
+
+/*
+ * DAVINCI_MCASP_AMUTE_REG - Mute Control Register Bits
+ */
+#define MUTENA(val) (val)
+#define MUTEINPOL BIT(2)
+#define MUTEINENA BIT(3)
+#define MUTEIN BIT(4)
+#define MUTER BIT(5)
+#define MUTEX BIT(6)
+#define MUTEFSR BIT(7)
+#define MUTEFSX BIT(8)
+#define MUTEBADCLKR BIT(9)
+#define MUTEBADCLKX BIT(10)
+#define MUTERXDMAERR BIT(11)
+#define MUTETXDMAERR BIT(12)
+
+/*
+ * DAVINCI_MCASP_REVTCTL_REG - Receiver DMA Event Control Register bits
+ */
+#define RXDATADMADIS BIT(0)
+
+/*
+ * DAVINCI_MCASP_XEVTCTL_REG - Transmitter DMA Event Control Register bits
+ */
+#define TXDATADMADIS BIT(0)
+
+/*
+ * DAVINCI_MCASP_W[R]FIFOCTL - Write/Read FIFO Control Register bits
+ */
+#define FIFO_ENABLE BIT(16)
+#define NUMEVT_MASK (0xFF << 8)
+#define NUMDMA_MASK (0xFF)
+
+#define DAVINCI_MCASP_NUM_SERIALIZER 16
+
+static inline void mcasp_set_bits(void __iomem *reg, u32 val)
+{
+ __raw_writel(__raw_readl(reg) | val, reg);
+}
+
+static inline void mcasp_clr_bits(void __iomem *reg, u32 val)
+{
+ __raw_writel((__raw_readl(reg) & ~(val)), reg);
+}
+
+static inline void mcasp_mod_bits(void __iomem *reg, u32 val, u32 mask)
+{
+ __raw_writel((__raw_readl(reg) & ~mask) | val, reg);
+}
+
+static inline void mcasp_set_reg(void __iomem *reg, u32 val)
+{
+ __raw_writel(val, reg);
+}
+
+static inline u32 mcasp_get_reg(void __iomem *reg)
+{
+ return (unsigned int)__raw_readl(reg);
+}
+
+static inline void mcasp_set_ctl_reg(void __iomem *regs, u32 val)
+{
+ int i = 0;
+
+ mcasp_set_bits(regs, val);
+
+ /* programming GBLCTL needs to read back from GBLCTL and verfiy */
+ /* loop count is to avoid the lock-up */
+ for (i = 0; i < 1000; i++) {
+ if ((mcasp_get_reg(regs) & val) == val)
+ break;
+ }
+
+ if (i == 1000 && ((mcasp_get_reg(regs) & val) != val))
+ printk(KERN_ERR "GBLCTL write error\n");
+}
+
+static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct davinci_audio_dev *dev = cpu_dai->private_data;
+ cpu_dai->dma_data = dev->dma_params[substream->stream];
+ return 0;
+}
+
+static void mcasp_start_rx(struct davinci_audio_dev *dev)
+{
+ mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXHCLKRST);
+ mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXCLKRST);
+ mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXSERCLR);
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_RXBUF_REG, 0);
+
+ mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXSMRST);
+ mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_RXBUF_REG, 0);
+
+ mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXSMRST);
+ mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
+}
+
+static void mcasp_start_tx(struct davinci_audio_dev *dev)
+{
+ u8 offset = 0, i;
+ u32 cnt;
+
+ mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST);
+ mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
+ mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXSERCLR);
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXBUF_REG, 0);
+
+ mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXSMRST);
+ mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXBUF_REG, 0);
+ for (i = 0; i < dev->num_serializer; i++) {
+ if (dev->serial_dir[i] == TX_MODE) {
+ offset = i;
+ break;
+ }
+ }
+
+ /* wait for TX ready */
+ cnt = 0;
+ while (!(mcasp_get_reg(dev->base + DAVINCI_MCASP_XRSRCTL_REG(offset)) &
+ TXSTATE) && (cnt < 100000))
+ cnt++;
+
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXBUF_REG, 0);
+}
+
+static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream)
+{
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ mcasp_start_tx(dev);
+ else
+ mcasp_start_rx(dev);
+
+ /* enable FIFO */
+ if (dev->txnumevt)
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, FIFO_ENABLE);
+
+ if (dev->rxnumevt)
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, FIFO_ENABLE);
+}
+
+static void mcasp_stop_rx(struct davinci_audio_dev *dev)
+{
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, 0);
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
+}
+
+static void mcasp_stop_tx(struct davinci_audio_dev *dev)
+{
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, 0);
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
+}
+
+static void davinci_mcasp_stop(struct davinci_audio_dev *dev, int stream)
+{
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ mcasp_stop_tx(dev);
+ else
+ mcasp_stop_rx(dev);
+
+ /* disable FIFO */
+ if (dev->txnumevt)
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, FIFO_ENABLE);
+
+ if (dev->rxnumevt)
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, FIFO_ENABLE);
+}
+
+static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
+ unsigned int fmt)
+{
+ struct davinci_audio_dev *dev = cpu_dai->private_data;
+ void __iomem *base = dev->base;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ /* codec is clock and frame slave */
+ mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
+ mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
+
+ mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
+ mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
+
+ mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, (0x7 << 26));
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ /* codec is clock master and frame slave */
+ mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
+ mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
+
+ mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
+ mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
+
+ mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, (0x2d << 26));
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ /* codec is clock and frame master */
+ mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
+ mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
+
+ mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
+ mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
+
+ mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG, (0x3f << 26));
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_IB_NF:
+ mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
+ mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
+
+ mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+ mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
+ break;
+
+ case SND_SOC_DAIFMT_NB_IF:
+ mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
+ mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
+
+ mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+ mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
+ break;
+
+ case SND_SOC_DAIFMT_IB_IF:
+ mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
+ mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
+
+ mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+ mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
+ break;
+
+ case SND_SOC_DAIFMT_NB_NF:
+ mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
+ mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
+
+ mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+ mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int davinci_config_channel_size(struct davinci_audio_dev *dev,
+ int channel_size)
+{
+ u32 fmt = 0;
+
+ switch (channel_size) {
+ case DAVINCI_AUDIO_WORD_8:
+ fmt = 0x03;
+ break;
+
+ case DAVINCI_AUDIO_WORD_12:
+ fmt = 0x05;
+ break;
+
+ case DAVINCI_AUDIO_WORD_16:
+ fmt = 0x07;
+ break;
+
+ case DAVINCI_AUDIO_WORD_20:
+ fmt = 0x09;
+ break;
+
+ case DAVINCI_AUDIO_WORD_24:
+ fmt = 0x0B;
+ break;
+
+ case DAVINCI_AUDIO_WORD_28:
+ fmt = 0x0D;
+ break;
+
+ case DAVINCI_AUDIO_WORD_32:
+ fmt = 0x0F;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG,
+ RXSSZ(fmt), RXSSZ(0x0F));
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
+ TXSSZ(fmt), TXSSZ(0x0F));
+ return 0;
+}
+
+static void davinci_hw_common_param(struct davinci_audio_dev *dev, int stream)
+{
+ int i;
+ u8 tx_ser = 0;
+ u8 rx_ser = 0;
+
+ /* Default configuration */
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_PWREMUMGT_REG, MCASP_SOFT);
+
+ /* All PINS as McASP */
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_PFUNC_REG, 0x00000000);
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_XEVTCTL_REG,
+ TXDATADMADIS);
+ } else {
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_REVTCTL_REG,
+ RXDATADMADIS);
+ }
+
+ for (i = 0; i < dev->num_serializer; i++) {
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_XRSRCTL_REG(i),
+ dev->serial_dir[i]);
+ if (dev->serial_dir[i] == TX_MODE) {
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_PDIR_REG,
+ AXR(i));
+ tx_ser++;
+ } else if (dev->serial_dir[i] == RX_MODE) {
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_PDIR_REG,
+ AXR(i));
+ rx_ser++;
+ }
+ }
+
+ if (dev->txnumevt && stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (dev->txnumevt * tx_ser > 64)
+ dev->txnumevt = 1;
+
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, tx_ser,
+ NUMDMA_MASK);
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
+ ((dev->txnumevt * tx_ser) << 8), NUMEVT_MASK);
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, FIFO_ENABLE);
+ }
+
+ if (dev->rxnumevt && stream == SNDRV_PCM_STREAM_CAPTURE) {
+ if (dev->rxnumevt * rx_ser > 64)
+ dev->rxnumevt = 1;
+
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, rx_ser,
+ NUMDMA_MASK);
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
+ ((dev->rxnumevt * rx_ser) << 8), NUMEVT_MASK);
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, FIFO_ENABLE);
+ }
+}
+
+static void davinci_hw_param(struct davinci_audio_dev *dev, int stream)
+{
+ int i, active_slots;
+ u32 mask = 0;
+
+ active_slots = (dev->tdm_slots > 31) ? 32 : dev->tdm_slots;
+ for (i = 0; i < active_slots; i++)
+ mask |= (1 << i);
+
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_ACLKXCTL_REG, TX_ASYNC);
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /* bit stream is MSB first with no delay */
+ /* DSP_B mode */
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKXCTL_REG,
+ AHCLKXE);
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, mask);
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXORD);
+
+ if ((dev->tdm_slots >= 2) || (dev->tdm_slots <= 32))
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG,
+ FSXMOD(dev->tdm_slots), FSXMOD(0x1FF));
+ else
+ printk(KERN_ERR "playback tdm slot %d not supported\n",
+ dev->tdm_slots);
+
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, 0xFFFFFFFF);
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG, FSXDUR);
+ } else {
+ /* bit stream is MSB first with no delay */
+ /* DSP_B mode */
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXORD);
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKRCTL_REG,
+ AHCLKRE);
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_RXTDM_REG, mask);
+
+ if ((dev->tdm_slots >= 2) || (dev->tdm_slots <= 32))
+ mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG,
+ FSRMOD(dev->tdm_slots), FSRMOD(0x1FF));
+ else
+ printk(KERN_ERR "capture tdm slot %d not supported\n",
+ dev->tdm_slots);
+
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG, 0xFFFFFFFF);
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG, FSRDUR);
+ }
+}
+
+/* S/PDIF */
+static void davinci_hw_dit_param(struct davinci_audio_dev *dev)
+{
+ /* Set the PDIR for Serialiser as output */
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_PDIR_REG, AFSX);
+
+ /* TXMASK for 24 bits */
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, 0x00FFFFFF);
+
+ /* Set the TX format : 24 bit right rotation, 32 bit slot, Pad 0
+ and LSB first */
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
+ TXROT(6) | TXSSZ(15));
+
+ /* Set TX frame synch : DIT Mode, 1 bit width, internal, rising edge */
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXFMCTL_REG,
+ AFSXE | FSXMOD(0x180));
+
+ /* Set the TX tdm : for all the slots */
+ mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, 0xFFFFFFFF);
+
+ /* Set the TX clock controls : div = 1 and internal */
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_ACLKXCTL_REG,
+ ACLKXE | TX_ASYNC);
+
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_XEVTCTL_REG, TXDATADMADIS);
+
+ /* Only 44100 and 48000 are valid, both have the same setting */
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKXCTL_REG, AHCLKXDIV(3));
+
+ /* Enable the DIT */
+ mcasp_set_bits(dev->base + DAVINCI_MCASP_TXDITCTL_REG, DITEN);
+}
+
+static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct davinci_audio_dev *dev = cpu_dai->private_data;
+ struct davinci_pcm_dma_params *dma_params =
+ dev->dma_params[substream->stream];
+ int word_length;
+ u8 numevt;
+
+ davinci_hw_common_param(dev, substream->stream);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ numevt = dev->txnumevt;
+ else
+ numevt = dev->rxnumevt;
+
+ if (!numevt)
+ numevt = 1;
+
+ if (dev->op_mode == DAVINCI_MCASP_DIT_MODE)
+ davinci_hw_dit_param(dev);
+ else
+ davinci_hw_param(dev, substream->stream);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S8:
+ dma_params->data_type = 1;
+ word_length = DAVINCI_AUDIO_WORD_8;
+ break;
+
+ case SNDRV_PCM_FORMAT_S16_LE:
+ dma_params->data_type = 2;
+ word_length = DAVINCI_AUDIO_WORD_16;
+ break;
+
+ case SNDRV_PCM_FORMAT_S32_LE:
+ dma_params->data_type = 4;
+ word_length = DAVINCI_AUDIO_WORD_32;
+ break;
+
+ default:
+ printk(KERN_WARNING "davinci-mcasp: unsupported PCM format");
+ return -EINVAL;
+ }
+
+ if (dev->version == MCASP_VERSION_2) {
+ dma_params->data_type *= numevt;
+ dma_params->acnt = 4 * numevt;
+ } else
+ dma_params->acnt = dma_params->data_type;
+
+ davinci_config_channel_size(dev, word_length);
+
+ return 0;
+}
+
+static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *cpu_dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct davinci_audio_dev *dev = rtd->dai->cpu_dai->private_data;
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ davinci_mcasp_start(dev, substream->stream);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ davinci_mcasp_stop(dev, substream->stream);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
+ .startup = davinci_mcasp_startup,
+ .trigger = davinci_mcasp_trigger,
+ .hw_params = davinci_mcasp_hw_params,
+ .set_fmt = davinci_mcasp_set_dai_fmt,
+
+};
+
+struct snd_soc_dai davinci_mcasp_dai[] = {
+ {
+ .name = "davinci-i2s",
+ .id = 0,
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = DAVINCI_MCASP_RATES,
+ .formats = SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .capture = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = DAVINCI_MCASP_RATES,
+ .formats = SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .ops = &davinci_mcasp_dai_ops,
+
+ },
+ {
+ .name = "davinci-dit",
+ .id = 1,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 384,
+ .rates = DAVINCI_MCASP_RATES,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &davinci_mcasp_dai_ops,
+ },
+
+};
+EXPORT_SYMBOL_GPL(davinci_mcasp_dai);
+
+static int davinci_mcasp_probe(struct platform_device *pdev)
+{
+ struct davinci_pcm_dma_params *dma_data;
+ struct resource *mem, *ioarea, *res;
+ struct snd_platform_data *pdata;
+ struct davinci_audio_dev *dev;
+ int count = 0;
+ int ret = 0;
+
+ dev = kzalloc(sizeof(struct davinci_audio_dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dma_data = kzalloc(sizeof(struct davinci_pcm_dma_params) * 2,
+ GFP_KERNEL);
+ if (!dma_data) {
+ ret = -ENOMEM;
+ goto err_release_dev;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ ret = -ENODEV;
+ goto err_release_data;
+ }
+
+ ioarea = request_mem_region(mem->start,
+ (mem->end - mem->start) + 1, pdev->name);
+ if (!ioarea) {
+ dev_err(&pdev->dev, "Audio region already claimed\n");
+ ret = -EBUSY;
+ goto err_release_data;
+ }
+
+ pdata = pdev->dev.platform_data;
+ dev->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ ret = -ENODEV;
+ goto err_release_region;
+ }
+
+ clk_enable(dev->clk);
+
+ dev->base = (void __iomem *)IO_ADDRESS(mem->start);
+ dev->op_mode = pdata->op_mode;
+ dev->tdm_slots = pdata->tdm_slots;
+ dev->num_serializer = pdata->num_serializer;
+ dev->serial_dir = pdata->serial_dir;
+ dev->codec_fmt = pdata->codec_fmt;
+ dev->version = pdata->version;
+ dev->txnumevt = pdata->txnumevt;
+ dev->rxnumevt = pdata->rxnumevt;
+
+ dma_data[count].name = "I2S PCM Stereo out";
+ dma_data[count].eventq_no = pdata->eventq_no;
+ dma_data[count].dma_addr = (dma_addr_t) (pdata->tx_dma_offset +
+ io_v2p(dev->base));
+ dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK] = &dma_data[count];
+
+ /* first TX, then RX */
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no DMA resource\n");
+ goto err_release_region;
+ }
+
+ dma_data[count].channel = res->start;
+ count++;
+ dma_data[count].name = "I2S PCM Stereo in";
+ dma_data[count].eventq_no = pdata->eventq_no;
+ dma_data[count].dma_addr = (dma_addr_t)(pdata->rx_dma_offset +
+ io_v2p(dev->base));
+ dev->dma_params[SNDRV_PCM_STREAM_CAPTURE] = &dma_data[count];
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "no DMA resource\n");
+ goto err_release_region;
+ }
+
+ dma_data[count].channel = res->start;
+ davinci_mcasp_dai[pdata->op_mode].private_data = dev;
+ davinci_mcasp_dai[pdata->op_mode].dev = &pdev->dev;
+ ret = snd_soc_register_dai(&davinci_mcasp_dai[pdata->op_mode]);
+
+ if (ret != 0)
+ goto err_release_region;
+ return 0;
+
+err_release_region:
+ release_mem_region(mem->start, (mem->end - mem->start) + 1);
+err_release_data:
+ kfree(dma_data);
+err_release_dev:
+ kfree(dev);
+
+ return ret;
+}
+
+static int davinci_mcasp_remove(struct platform_device *pdev)
+{
+ struct snd_platform_data *pdata = pdev->dev.platform_data;
+ struct davinci_pcm_dma_params *dma_data;
+ struct davinci_audio_dev *dev;
+ struct resource *mem;
+
+ snd_soc_unregister_dai(&davinci_mcasp_dai[pdata->op_mode]);
+ dev = davinci_mcasp_dai[pdata->op_mode].private_data;
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+ dev->clk = NULL;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, (mem->end - mem->start) + 1);
+
+ dma_data = dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK];
+ kfree(dma_data);
+ kfree(dev);
+
+ return 0;
+}
+
+static struct platform_driver davinci_mcasp_driver = {
+ .probe = davinci_mcasp_probe,
+ .remove = davinci_mcasp_remove,
+ .driver = {
+ .name = "davinci-mcasp",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init davinci_mcasp_init(void)
+{
+ return platform_driver_register(&davinci_mcasp_driver);
+}
+module_init(davinci_mcasp_init);
+
+static void __exit davinci_mcasp_exit(void)
+{
+ platform_driver_unregister(&davinci_mcasp_driver);
+}
+module_exit(davinci_mcasp_exit);
+
+MODULE_AUTHOR("Steve Chen");
+MODULE_DESCRIPTION("TI DAVINCI McASP SoC Interface");
+MODULE_LICENSE("GPL");
+
diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/davinci/davinci-mcasp.h
new file mode 100644
index 000000000000..554354c1cc2f
--- /dev/null
+++ b/sound/soc/davinci/davinci-mcasp.h
@@ -0,0 +1,60 @@
+/*
+ * ALSA SoC McASP Audio Layer for TI DAVINCI processor
+ *
+ * MCASP related definitions
+ *
+ * Author: Nirmal Pandey <n-pandey@ti.com>,
+ * Suresh Rajashekara <suresh.r@ti.com>
+ * Steve Chen <schen@.mvista.com>
+ *
+ * Copyright: (C) 2009 MontaVista Software, Inc., <source@mvista.com>
+ * Copyright: (C) 2009 Texas Instruments, India
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef DAVINCI_MCASP_H
+#define DAVINCI_MCASP_H
+
+#include <linux/io.h>
+#include <mach/asp.h>
+#include "davinci-pcm.h"
+
+extern struct snd_soc_dai davinci_mcasp_dai[];
+
+#define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_96000
+#define DAVINCI_MCASP_I2S_DAI 0
+#define DAVINCI_MCASP_DIT_DAI 1
+
+enum {
+ DAVINCI_AUDIO_WORD_8 = 0,
+ DAVINCI_AUDIO_WORD_12,
+ DAVINCI_AUDIO_WORD_16,
+ DAVINCI_AUDIO_WORD_20,
+ DAVINCI_AUDIO_WORD_24,
+ DAVINCI_AUDIO_WORD_32,
+ DAVINCI_AUDIO_WORD_28, /* This is only valid for McASP */
+};
+
+struct davinci_audio_dev {
+ void __iomem *base;
+ int sample_rate;
+ struct clk *clk;
+ struct davinci_pcm_dma_params *dma_params[2];
+ unsigned int codec_fmt;
+
+ /* McASP specific data */
+ int tdm_slots;
+ u8 op_mode;
+ u8 num_serializer;
+ u8 *serial_dir;
+ u8 version;
+
+ /* McASP FIFO related */
+ u8 txnumevt;
+ u8 rxnumevt;
+};
+
+#endif /* DAVINCI_MCASP_H */
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index a05996588489..091dacb78b4d 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -67,6 +67,7 @@ static void davinci_pcm_enqueue_dma(struct snd_pcm_substream *substream)
dma_addr_t src, dst;
unsigned short src_bidx, dst_bidx;
unsigned int data_type;
+ unsigned short acnt;
unsigned int count;
period_size = snd_pcm_lib_period_bytes(substream);
@@ -91,11 +92,12 @@ static void davinci_pcm_enqueue_dma(struct snd_pcm_substream *substream)
dst_bidx = data_type;
}
+ acnt = prtd->params->acnt;
edma_set_src(lch, src, INCR, W8BIT);
edma_set_dest(lch, dst, INCR, W8BIT);
edma_set_src_index(lch, src_bidx, 0);
edma_set_dest_index(lch, dst_bidx, 0);
- edma_set_transfer_params(lch, data_type, count, 1, 0, ASYNC);
+ edma_set_transfer_params(lch, acnt, count, 1, 0, ASYNC);
prtd->period++;
if (unlikely(prtd->period >= runtime->periods))
@@ -206,6 +208,7 @@ static int davinci_pcm_prepare(struct snd_pcm_substream *substream)
/* Copy self-linked parameter RAM entry into master channel */
edma_read_slot(prtd->slave_lch, &temp);
edma_write_slot(prtd->master_lch, &temp);
+ davinci_pcm_enqueue_dma(substream);
return 0;
}
@@ -243,6 +246,11 @@ static int davinci_pcm_open(struct snd_pcm_substream *substream)
int ret = 0;
snd_soc_set_runtime_hwparams(substream, &davinci_pcm_hardware);
+ /* ensure that buffer size is a multiple of period size */
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
prtd = kzalloc(sizeof(struct davinci_runtime_data), GFP_KERNEL);
if (prtd == NULL)
diff --git a/sound/soc/davinci/davinci-pcm.h b/sound/soc/davinci/davinci-pcm.h
index 62cb4eb07e34..63d96253c73a 100644
--- a/sound/soc/davinci/davinci-pcm.h
+++ b/sound/soc/davinci/davinci-pcm.h
@@ -12,17 +12,20 @@
#ifndef _DAVINCI_PCM_H
#define _DAVINCI_PCM_H
+#include <mach/edma.h>
+#include <mach/asp.h>
+
+
struct davinci_pcm_dma_params {
- char *name; /* stream identifier */
- int channel; /* sync dma channel ID */
- dma_addr_t dma_addr; /* device physical address for DMA */
- unsigned int data_type; /* xfer data type */
+ char *name; /* stream identifier */
+ int channel; /* sync dma channel ID */
+ unsigned short acnt;
+ dma_addr_t dma_addr; /* device physical address for DMA */
+ enum dma_event_q eventq_no; /* event queue number */
+ unsigned char data_type; /* xfer data type */
+ unsigned char convert_mono_stereo;
};
-struct evm_snd_platform_data {
- int tx_dma_ch;
- int rx_dma_ch;
-};
extern struct snd_soc_platform davinci_soc_platform;
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index f0a2d4071998..9ff62e3a9b1d 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -69,6 +69,23 @@ static void psc_dma_bcom_enqueue_next_buffer(struct psc_dma_stream *s)
static void psc_dma_bcom_enqueue_tx(struct psc_dma_stream *s)
{
+ if (s->appl_ptr > s->runtime->control->appl_ptr) {
+ /*
+ * In this case s->runtime->control->appl_ptr has wrapped around.
+ * Play the data to the end of the boundary, then wrap our own
+ * appl_ptr back around.
+ */
+ while (s->appl_ptr < s->runtime->boundary) {
+ if (bcom_queue_full(s->bcom_task))
+ return;
+
+ s->appl_ptr += s->period_size;
+
+ psc_dma_bcom_enqueue_next_buffer(s);
+ }
+ s->appl_ptr -= s->runtime->boundary;
+ }
+
while (s->appl_ptr < s->runtime->control->appl_ptr) {
if (bcom_queue_full(s->bcom_task))
diff --git a/sound/soc/fsl/mpc5200_psc_ac97.c b/sound/soc/fsl/mpc5200_psc_ac97.c
index 7eb549985d49..c4ae3e096bb9 100644
--- a/sound/soc/fsl/mpc5200_psc_ac97.c
+++ b/sound/soc/fsl/mpc5200_psc_ac97.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/delay.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -112,7 +113,7 @@ static void psc_ac97_cold_reset(struct snd_ac97 *ac97)
out_8(&regs->op1, MPC52xx_PSC_OP_RES);
udelay(10);
out_8(&regs->op0, MPC52xx_PSC_OP_RES);
- udelay(50);
+ msleep(1);
psc_ac97_warm_reset(ac97);
}
diff --git a/sound/soc/imx/Kconfig b/sound/soc/imx/Kconfig
new file mode 100644
index 000000000000..a700562e8692
--- /dev/null
+++ b/sound/soc/imx/Kconfig
@@ -0,0 +1,21 @@
+config SND_MX1_MX2_SOC
+ tristate "SoC Audio for Freecale i.MX1x i.MX2x CPUs"
+ depends on ARCH_MX2 || ARCH_MX1
+ select SND_PCM
+ help
+ Say Y or M if you want to add support for codecs attached to
+ the MX1 or MX2 SSI interface.
+
+config SND_MXC_SOC_SSI
+ tristate
+
+config SND_SOC_MX27VIS_WM8974
+ tristate "SoC Audio support for MX27 - WM8974 Visstrim_sm10 board"
+ depends on SND_MX1_MX2_SOC && MACH_MX27 && MACH_IMX27_VISSTRIM_M10
+ select SND_MXC_SOC_SSI
+ select SND_SOC_WM8974
+ help
+ Say Y if you want to add support for SoC audio on Visstrim SM10
+ board with WM8974.
+
+
diff --git a/sound/soc/imx/Makefile b/sound/soc/imx/Makefile
new file mode 100644
index 000000000000..c2ffd2c8df5a
--- /dev/null
+++ b/sound/soc/imx/Makefile
@@ -0,0 +1,10 @@
+# i.MX Platform Support
+snd-soc-mx1_mx2-objs := mx1_mx2-pcm.o
+snd-soc-mxc-ssi-objs := mxc-ssi.o
+
+obj-$(CONFIG_SND_MX1_MX2_SOC) += snd-soc-mx1_mx2.o
+obj-$(CONFIG_SND_MXC_SOC_SSI) += snd-soc-mxc-ssi.o
+
+# i.MX Machine Support
+snd-soc-mx27vis-wm8974-objs := mx27vis_wm8974.o
+obj-$(CONFIG_SND_SOC_MX27VIS_WM8974) += snd-soc-mx27vis-wm8974.o
diff --git a/sound/soc/imx/mx1_mx2-pcm.c b/sound/soc/imx/mx1_mx2-pcm.c
new file mode 100644
index 000000000000..b83866529397
--- /dev/null
+++ b/sound/soc/imx/mx1_mx2-pcm.c
@@ -0,0 +1,488 @@
+/*
+ * mx1_mx2-pcm.c -- ALSA SoC interface for Freescale i.MX1x, i.MX2x CPUs
+ *
+ * Copyright 2009 Vista Silicon S.L.
+ * Author: Javier Martin
+ * javier.martin@vista-silicon.com
+ *
+ * Based on mxc-pcm.c by Liam Girdwood.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <asm/dma.h>
+#include <mach/hardware.h>
+#include <mach/dma-mx1-mx2.h>
+
+#include "mx1_mx2-pcm.h"
+
+
+static const struct snd_pcm_hardware mx1_mx2_pcm_hardware = {
+ .info = (SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .buffer_bytes_max = 32 * 1024,
+ .period_bytes_min = 64,
+ .period_bytes_max = 8 * 1024,
+ .periods_min = 2,
+ .periods_max = 255,
+ .fifo_size = 0,
+};
+
+struct mx1_mx2_runtime_data {
+ int dma_ch;
+ int active;
+ unsigned int period;
+ unsigned int periods;
+ int tx_spin;
+ spinlock_t dma_lock;
+ struct mx1_mx2_pcm_dma_params *dma_params;
+};
+
+
+/**
+ * This function stops the current dma transfer for playback
+ * and clears the dma pointers.
+ *
+ * @param substream pointer to the structure of the current stream.
+ *
+ */
+static int audio_stop_dma(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct mx1_mx2_runtime_data *prtd = runtime->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&prtd->dma_lock, flags);
+
+ pr_debug("%s\n", __func__);
+
+ prtd->active = 0;
+ prtd->period = 0;
+ prtd->periods = 0;
+
+ /* this stops the dma channel and clears the buffer ptrs */
+
+ imx_dma_disable(prtd->dma_ch);
+
+ spin_unlock_irqrestore(&prtd->dma_lock, flags);
+
+ return 0;
+}
+
+/**
+ * This function is called whenever a new audio block needs to be
+ * transferred to the codec. The function receives the address and the size
+ * of the new block and start a new DMA transfer.
+ *
+ * @param substream pointer to the structure of the current stream.
+ *
+ */
+static int dma_new_period(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct mx1_mx2_runtime_data *prtd = runtime->private_data;
+ unsigned int dma_size;
+ unsigned int offset;
+ int ret = 0;
+ dma_addr_t mem_addr;
+ unsigned int dev_addr;
+
+ if (prtd->active) {
+ dma_size = frames_to_bytes(runtime, runtime->period_size);
+ offset = dma_size * prtd->period;
+
+ pr_debug("%s: period (%d) out of (%d)\n", __func__,
+ prtd->period,
+ runtime->periods);
+ pr_debug("period_size %d frames\n offset %d bytes\n",
+ (unsigned int)runtime->period_size,
+ offset);
+ pr_debug("dma_size %d bytes\n", dma_size);
+
+ snd_BUG_ON(dma_size > mx1_mx2_pcm_hardware.period_bytes_max);
+
+ mem_addr = (dma_addr_t)(runtime->dma_addr + offset);
+ dev_addr = prtd->dma_params->per_address;
+ pr_debug("%s: mem_addr is %x\n dev_addr is %x\n",
+ __func__, mem_addr, dev_addr);
+
+ ret = imx_dma_setup_single(prtd->dma_ch, mem_addr,
+ dma_size, dev_addr,
+ prtd->dma_params->transfer_type);
+ if (ret < 0) {
+ printk(KERN_ERR "Error %d configuring DMA\n", ret);
+ return ret;
+ }
+ imx_dma_enable(prtd->dma_ch);
+
+ pr_debug("%s: transfer enabled\nmem_addr = %x\n",
+ __func__, (unsigned int) mem_addr);
+ pr_debug("dev_addr = %x\ndma_size = %d\n",
+ (unsigned int) dev_addr, dma_size);
+
+ prtd->tx_spin = 1; /* FGA little trick to retrieve DMA pos */
+ prtd->period++;
+ prtd->period %= runtime->periods;
+ }
+ return ret;
+}
+
+
+/**
+ * This is a callback which will be called
+ * when a TX transfer finishes. The call occurs
+ * in interrupt context.
+ *
+ * @param dat pointer to the structure of the current stream.
+ *
+ */
+static void audio_dma_irq(int channel, void *data)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_pcm_runtime *runtime;
+ struct mx1_mx2_runtime_data *prtd;
+ unsigned int dma_size;
+ unsigned int previous_period;
+ unsigned int offset;
+
+ substream = data;
+ runtime = substream->runtime;
+ prtd = runtime->private_data;
+ previous_period = prtd->periods;
+ dma_size = frames_to_bytes(runtime, runtime->period_size);
+ offset = dma_size * previous_period;
+
+ prtd->tx_spin = 0;
+ prtd->periods++;
+ prtd->periods %= runtime->periods;
+
+ pr_debug("%s: irq per %d offset %x\n", __func__, prtd->periods, offset);
+
+ /*
+ * If we are getting a callback for an active stream then we inform
+ * the PCM middle layer we've finished a period
+ */
+ if (prtd->active)
+ snd_pcm_period_elapsed(substream);
+
+ /*
+ * Trig next DMA transfer
+ */
+ dma_new_period(substream);
+}
+
+/**
+ * This function configures the hardware to allow audio
+ * playback operations. It is called by ALSA framework.
+ *
+ * @param substream pointer to the structure of the current stream.
+ *
+ * @return 0 on success, -1 otherwise.
+ */
+static int
+snd_mx1_mx2_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct mx1_mx2_runtime_data *prtd = runtime->private_data;
+
+ prtd->period = 0;
+ prtd->periods = 0;
+
+ return 0;
+}
+
+static int mx1_mx2_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret;
+
+ ret = snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+ if (ret < 0) {
+ printk(KERN_ERR "%s: Error %d failed to malloc pcm pages \n",
+ __func__, ret);
+ return ret;
+ }
+
+ pr_debug("%s: snd_imx1_mx2_audio_hw_params runtime->dma_addr 0x(%x)\n",
+ __func__, (unsigned int)runtime->dma_addr);
+ pr_debug("%s: snd_imx1_mx2_audio_hw_params runtime->dma_area 0x(%x)\n",
+ __func__, (unsigned int)runtime->dma_area);
+ pr_debug("%s: snd_imx1_mx2_audio_hw_params runtime->dma_bytes 0x(%x)\n",
+ __func__, (unsigned int)runtime->dma_bytes);
+
+ return ret;
+}
+
+static int mx1_mx2_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct mx1_mx2_runtime_data *prtd = runtime->private_data;
+
+ imx_dma_free(prtd->dma_ch);
+
+ snd_pcm_lib_free_pages(substream);
+
+ return 0;
+}
+
+static int mx1_mx2_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct mx1_mx2_runtime_data *prtd = substream->runtime->private_data;
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ prtd->tx_spin = 0;
+ /* requested stream startup */
+ prtd->active = 1;
+ pr_debug("%s: starting dma_new_period\n", __func__);
+ ret = dma_new_period(substream);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ /* requested stream shutdown */
+ pr_debug("%s: stopping dma transfer\n", __func__);
+ ret = audio_stop_dma(substream);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static snd_pcm_uframes_t
+mx1_mx2_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct mx1_mx2_runtime_data *prtd = runtime->private_data;
+ unsigned int offset = 0;
+
+ /* tx_spin value is used here to check if a transfer is active */
+ if (prtd->tx_spin) {
+ offset = (runtime->period_size * (prtd->periods)) +
+ (runtime->period_size >> 1);
+ if (offset >= runtime->buffer_size)
+ offset = runtime->period_size >> 1;
+ } else {
+ offset = (runtime->period_size * (prtd->periods));
+ if (offset >= runtime->buffer_size)
+ offset = 0;
+ }
+ pr_debug("%s: pointer offset %x\n", __func__, offset);
+
+ return offset;
+}
+
+static int mx1_mx2_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct mx1_mx2_runtime_data *prtd;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mx1_mx2_pcm_dma_params *dma_data = rtd->dai->cpu_dai->dma_data;
+ int ret;
+
+ snd_soc_set_runtime_hwparams(substream, &mx1_mx2_pcm_hardware);
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
+
+ prtd = kzalloc(sizeof(struct mx1_mx2_runtime_data), GFP_KERNEL);
+ if (prtd == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ runtime->private_data = prtd;
+
+ if (!dma_data)
+ return -ENODEV;
+
+ prtd->dma_params = dma_data;
+
+ pr_debug("%s: Requesting dma channel (%s)\n", __func__,
+ prtd->dma_params->name);
+ prtd->dma_ch = imx_dma_request_by_prio(prtd->dma_params->name,
+ DMA_PRIO_HIGH);
+ if (prtd->dma_ch < 0) {
+ printk(KERN_ERR "Error %d requesting dma channel\n", ret);
+ return ret;
+ }
+ imx_dma_config_burstlen(prtd->dma_ch,
+ prtd->dma_params->watermark_level);
+
+ ret = imx_dma_config_channel(prtd->dma_ch,
+ prtd->dma_params->per_config,
+ prtd->dma_params->mem_config,
+ prtd->dma_params->event_id, 0);
+
+ if (ret) {
+ pr_debug(KERN_ERR "Error %d configuring dma channel %d\n",
+ ret, prtd->dma_ch);
+ return ret;
+ }
+
+ pr_debug("%s: Setting tx dma callback function\n", __func__);
+ ret = imx_dma_setup_handlers(prtd->dma_ch,
+ audio_dma_irq, NULL,
+ (void *)substream);
+ if (ret < 0) {
+ printk(KERN_ERR "Error %d setting dma callback function\n", ret);
+ return ret;
+ }
+ return 0;
+
+ out:
+ return ret;
+}
+
+static int mx1_mx2_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct mx1_mx2_runtime_data *prtd = runtime->private_data;
+
+ kfree(prtd);
+
+ return 0;
+}
+
+static int mx1_mx2_pcm_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ return dma_mmap_writecombine(substream->pcm->card->dev, vma,
+ runtime->dma_area,
+ runtime->dma_addr,
+ runtime->dma_bytes);
+}
+
+static struct snd_pcm_ops mx1_mx2_pcm_ops = {
+ .open = mx1_mx2_pcm_open,
+ .close = mx1_mx2_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = mx1_mx2_pcm_hw_params,
+ .hw_free = mx1_mx2_pcm_hw_free,
+ .prepare = snd_mx1_mx2_prepare,
+ .trigger = mx1_mx2_pcm_trigger,
+ .pointer = mx1_mx2_pcm_pointer,
+ .mmap = mx1_mx2_pcm_mmap,
+};
+
+static u64 mx1_mx2_pcm_dmamask = 0xffffffff;
+
+static int mx1_mx2_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
+{
+ struct snd_pcm_substream *substream = pcm->streams[stream].substream;
+ struct snd_dma_buffer *buf = &substream->dma_buffer;
+ size_t size = mx1_mx2_pcm_hardware.buffer_bytes_max;
+ buf->dev.type = SNDRV_DMA_TYPE_DEV;
+ buf->dev.dev = pcm->card->dev;
+ buf->private_data = NULL;
+
+ /* Reserve uncached-buffered memory area for DMA */
+ buf->area = dma_alloc_writecombine(pcm->card->dev, size,
+ &buf->addr, GFP_KERNEL);
+
+ pr_debug("%s: preallocate_dma_buffer: area=%p, addr=%p, size=%d\n",
+ __func__, (void *) buf->area, (void *) buf->addr, size);
+
+ if (!buf->area)
+ return -ENOMEM;
+
+ buf->bytes = size;
+ return 0;
+}
+
+static void mx1_mx2_pcm_free_dma_buffers(struct snd_pcm *pcm)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_dma_buffer *buf;
+ int stream;
+
+ for (stream = 0; stream < 2; stream++) {
+ substream = pcm->streams[stream].substream;
+ if (!substream)
+ continue;
+
+ buf = &substream->dma_buffer;
+ if (!buf->area)
+ continue;
+
+ dma_free_writecombine(pcm->card->dev, buf->bytes,
+ buf->area, buf->addr);
+ buf->area = NULL;
+ }
+}
+
+static int mx1_mx2_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
+ struct snd_pcm *pcm)
+{
+ int ret = 0;
+
+ if (!card->dev->dma_mask)
+ card->dev->dma_mask = &mx1_mx2_pcm_dmamask;
+ if (!card->dev->coherent_dma_mask)
+ card->dev->coherent_dma_mask = 0xffffffff;
+
+ if (dai->playback.channels_min) {
+ ret = mx1_mx2_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ pr_debug("%s: preallocate playback buffer\n", __func__);
+ if (ret)
+ goto out;
+ }
+
+ if (dai->capture.channels_min) {
+ ret = mx1_mx2_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_CAPTURE);
+ pr_debug("%s: preallocate capture buffer\n", __func__);
+ if (ret)
+ goto out;
+ }
+ out:
+ return ret;
+}
+
+struct snd_soc_platform mx1_mx2_soc_platform = {
+ .name = "mx1_mx2-audio",
+ .pcm_ops = &mx1_mx2_pcm_ops,
+ .pcm_new = mx1_mx2_pcm_new,
+ .pcm_free = mx1_mx2_pcm_free_dma_buffers,
+};
+EXPORT_SYMBOL_GPL(mx1_mx2_soc_platform);
+
+static int __init mx1_mx2_soc_platform_init(void)
+{
+ return snd_soc_register_platform(&mx1_mx2_soc_platform);
+}
+module_init(mx1_mx2_soc_platform_init);
+
+static void __exit mx1_mx2_soc_platform_exit(void)
+{
+ snd_soc_unregister_platform(&mx1_mx2_soc_platform);
+}
+module_exit(mx1_mx2_soc_platform_exit);
+
+MODULE_AUTHOR("Javier Martin, javier.martin@vista-silicon.com");
+MODULE_DESCRIPTION("Freescale i.MX2x, i.MX1x PCM DMA module");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/imx/mx1_mx2-pcm.h b/sound/soc/imx/mx1_mx2-pcm.h
new file mode 100644
index 000000000000..2e528106570b
--- /dev/null
+++ b/sound/soc/imx/mx1_mx2-pcm.h
@@ -0,0 +1,26 @@
+/*
+ * mx1_mx2-pcm.h :- ASoC platform header for Freescale i.MX1x, i.MX2x
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MX1_MX2_PCM_H
+#define _MX1_MX2_PCM_H
+
+/* DMA information for mx1_mx2 platforms */
+struct mx1_mx2_pcm_dma_params {
+ char *name; /* stream identifier */
+ unsigned int transfer_type; /* READ or WRITE DMA transfer */
+ dma_addr_t per_address; /* physical address of SSI fifo */
+ int event_id; /* fixed DMA number for SSI fifo */
+ int watermark_level; /* SSI fifo watermark level */
+ int per_config; /* DMA Config flags for peripheral */
+ int mem_config; /* DMA Config flags for RAM */
+ };
+
+/* platform data */
+extern struct snd_soc_platform mx1_mx2_soc_platform;
+
+#endif
diff --git a/sound/soc/imx/mx27vis_wm8974.c b/sound/soc/imx/mx27vis_wm8974.c
new file mode 100644
index 000000000000..e4dcb539108a
--- /dev/null
+++ b/sound/soc/imx/mx27vis_wm8974.c
@@ -0,0 +1,317 @@
+/*
+ * mx27vis_wm8974.c -- SoC audio for mx27vis
+ *
+ * Copyright 2009 Vista Silicon S.L.
+ * Author: Javier Martin
+ * javier.martin@vista-silicon.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+
+#include "../codecs/wm8974.h"
+#include "mx1_mx2-pcm.h"
+#include "mxc-ssi.h"
+#include <mach/gpio.h>
+#include <mach/iomux.h>
+
+#define IGNORED_ARG 0
+
+
+static struct snd_soc_card mx27vis;
+
+/**
+ * This function connects SSI1 (HPCR1) as slave to
+ * SSI1 external signals (PPCR1)
+ * As slave, HPCR1 must set TFSDIR and TCLKDIR as inputs from
+ * port 4
+ */
+void audmux_connect_1_4(void)
+{
+ pr_debug("AUDMUX: normal operation mode\n");
+ /* Reset HPCR1 and PPCR1 */
+
+ DAM_HPCR1 = 0x00000000;
+ DAM_PPCR1 = 0x00000000;
+
+ /* set to synchronous */
+ DAM_HPCR1 |= AUDMUX_HPCR_SYN;
+ DAM_PPCR1 |= AUDMUX_PPCR_SYN;
+
+
+ /* set Rx sources 1 <--> 4 */
+ DAM_HPCR1 |= AUDMUX_HPCR_RXDSEL(3); /* port 4 */
+ DAM_PPCR1 |= AUDMUX_PPCR_RXDSEL(0); /* port 1 */
+
+ /* set Tx frame and Clock direction and source 4 --> 1 output */
+ DAM_HPCR1 |= AUDMUX_HPCR_TFSDIR | AUDMUX_HPCR_TCLKDIR;
+ DAM_HPCR1 |= AUDMUX_HPCR_TFCSEL(3); /* TxDS and TxCclk from port 4 */
+
+ return;
+}
+
+static int mx27vis_hifi_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ unsigned int pll_out = 0, bclk = 0, fmt = 0, mclk = 0;
+ int ret = 0;
+
+ /*
+ * The WM8974 is better at generating accurate audio clocks than the
+ * MX27 SSI controller, so we will use it as master when we can.
+ */
+ switch (params_rate(params)) {
+ case 8000:
+ fmt = SND_SOC_DAIFMT_CBM_CFM;
+ mclk = WM8974_MCLKDIV_12;
+ pll_out = 24576000;
+ break;
+ case 16000:
+ fmt = SND_SOC_DAIFMT_CBM_CFM;
+ pll_out = 12288000;
+ break;
+ case 48000:
+ fmt = SND_SOC_DAIFMT_CBM_CFM;
+ bclk = WM8974_BCLKDIV_4;
+ pll_out = 12288000;
+ break;
+ case 96000:
+ fmt = SND_SOC_DAIFMT_CBM_CFM;
+ bclk = WM8974_BCLKDIV_2;
+ pll_out = 12288000;
+ break;
+ case 11025:
+ fmt = SND_SOC_DAIFMT_CBM_CFM;
+ bclk = WM8974_BCLKDIV_16;
+ pll_out = 11289600;
+ break;
+ case 22050:
+ fmt = SND_SOC_DAIFMT_CBM_CFM;
+ bclk = WM8974_BCLKDIV_8;
+ pll_out = 11289600;
+ break;
+ case 44100:
+ fmt = SND_SOC_DAIFMT_CBM_CFM;
+ bclk = WM8974_BCLKDIV_4;
+ mclk = WM8974_MCLKDIV_2;
+ pll_out = 11289600;
+ break;
+ case 88200:
+ fmt = SND_SOC_DAIFMT_CBM_CFM;
+ bclk = WM8974_BCLKDIV_2;
+ pll_out = 11289600;
+ break;
+ }
+
+ /* set codec DAI configuration */
+ ret = codec_dai->ops->set_fmt(codec_dai,
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF |
+ SND_SOC_DAIFMT_SYNC | fmt);
+ if (ret < 0) {
+ printk(KERN_ERR "Error from codec DAI configuration\n");
+ return ret;
+ }
+
+ /* set cpu DAI configuration */
+ ret = cpu_dai->ops->set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_SYNC | fmt);
+ if (ret < 0) {
+ printk(KERN_ERR "Error from cpu DAI configuration\n");
+ return ret;
+ }
+
+ /* Put DC field of STCCR to 1 (not zero) */
+ ret = cpu_dai->ops->set_tdm_slot(cpu_dai, 0, 2);
+
+ /* set the SSI system clock as input */
+ ret = cpu_dai->ops->set_sysclk(cpu_dai, IMX_SSP_SYS_CLK, 0,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ printk(KERN_ERR "Error when setting system SSI clk\n");
+ return ret;
+ }
+
+ /* set codec BCLK division for sample rate */
+ ret = codec_dai->ops->set_clkdiv(codec_dai, WM8974_BCLKDIV, bclk);
+ if (ret < 0) {
+ printk(KERN_ERR "Error when setting BCLK division\n");
+ return ret;
+ }
+
+
+ /* codec PLL input is 25 MHz */
+ ret = codec_dai->ops->set_pll(codec_dai, IGNORED_ARG,
+ 25000000, pll_out);
+ if (ret < 0) {
+ printk(KERN_ERR "Error when setting PLL input\n");
+ return ret;
+ }
+
+ /*set codec MCLK division for sample rate */
+ ret = codec_dai->ops->set_clkdiv(codec_dai, WM8974_MCLKDIV, mclk);
+ if (ret < 0) {
+ printk(KERN_ERR "Error when setting MCLK division\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mx27vis_hifi_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+
+ /* disable the PLL */
+ return codec_dai->ops->set_pll(codec_dai, IGNORED_ARG, 0, 0);
+}
+
+/*
+ * mx27vis WM8974 HiFi DAI opserations.
+ */
+static struct snd_soc_ops mx27vis_hifi_ops = {
+ .hw_params = mx27vis_hifi_hw_params,
+ .hw_free = mx27vis_hifi_hw_free,
+};
+
+
+static int mx27vis_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int mx27vis_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int mx27vis_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ ret = get_ssi_clk(0, &pdev->dev);
+
+ if (ret < 0) {
+ printk(KERN_ERR "%s: cant get ssi clock\n", __func__);
+ return ret;
+ }
+
+
+ return 0;
+}
+
+static int mx27vis_remove(struct platform_device *pdev)
+{
+ put_ssi_clk(0);
+ return 0;
+}
+
+static struct snd_soc_dai_link mx27vis_dai[] = {
+{ /* Hifi Playback*/
+ .name = "WM8974",
+ .stream_name = "WM8974 HiFi",
+ .cpu_dai = &imx_ssi_pcm_dai[0],
+ .codec_dai = &wm8974_dai,
+ .ops = &mx27vis_hifi_ops,
+},
+};
+
+static struct snd_soc_card mx27vis = {
+ .name = "mx27vis",
+ .platform = &mx1_mx2_soc_platform,
+ .probe = mx27vis_probe,
+ .remove = mx27vis_remove,
+ .suspend_pre = mx27vis_suspend,
+ .resume_post = mx27vis_resume,
+ .dai_link = mx27vis_dai,
+ .num_links = ARRAY_SIZE(mx27vis_dai),
+};
+
+static struct snd_soc_device mx27vis_snd_devdata = {
+ .card = &mx27vis,
+ .codec_dev = &soc_codec_dev_wm8974,
+};
+
+static struct platform_device *mx27vis_snd_device;
+
+/* Temporal definition of board specific behaviour */
+void gpio_ssi_active(int ssi_num)
+{
+ int ret = 0;
+
+ unsigned int ssi1_pins[] = {
+ PC20_PF_SSI1_FS,
+ PC21_PF_SSI1_RXD,
+ PC22_PF_SSI1_TXD,
+ PC23_PF_SSI1_CLK,
+ };
+ unsigned int ssi2_pins[] = {
+ PC24_PF_SSI2_FS,
+ PC25_PF_SSI2_RXD,
+ PC26_PF_SSI2_TXD,
+ PC27_PF_SSI2_CLK,
+ };
+ if (ssi_num == 0)
+ ret = mxc_gpio_setup_multiple_pins(ssi1_pins,
+ ARRAY_SIZE(ssi1_pins), "USB OTG");
+ else
+ ret = mxc_gpio_setup_multiple_pins(ssi2_pins,
+ ARRAY_SIZE(ssi2_pins), "USB OTG");
+ if (ret)
+ printk(KERN_ERR "Error requesting ssi %x pins\n", ssi_num);
+}
+
+
+static int __init mx27vis_init(void)
+{
+ int ret;
+
+ mx27vis_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!mx27vis_snd_device)
+ return -ENOMEM;
+
+ platform_set_drvdata(mx27vis_snd_device, &mx27vis_snd_devdata);
+ mx27vis_snd_devdata.dev = &mx27vis_snd_device->dev;
+ ret = platform_device_add(mx27vis_snd_device);
+
+ if (ret) {
+ printk(KERN_ERR "ASoC: Platform device allocation failed\n");
+ platform_device_put(mx27vis_snd_device);
+ }
+
+ /* WM8974 uses SSI1 (HPCR1) via AUDMUX port 4 for audio (PPCR1) */
+ gpio_ssi_active(0);
+ audmux_connect_1_4();
+
+ return ret;
+}
+
+static void __exit mx27vis_exit(void)
+{
+ /* We should call some "ssi_gpio_inactive()" properly */
+}
+
+module_init(mx27vis_init);
+module_exit(mx27vis_exit);
+
+
+MODULE_AUTHOR("Javier Martin, javier.martin@vista-silicon.com");
+MODULE_DESCRIPTION("ALSA SoC WM8974 mx27vis");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/imx/mxc-ssi.c b/sound/soc/imx/mxc-ssi.c
new file mode 100644
index 000000000000..3806ff2c0cd4
--- /dev/null
+++ b/sound/soc/imx/mxc-ssi.c
@@ -0,0 +1,868 @@
+/*
+ * mxc-ssi.c -- SSI driver for Freescale IMX
+ *
+ * Copyright 2006 Wolfson Microelectronics PLC.
+ * Author: Liam Girdwood
+ * liam.girdwood@wolfsonmicro.com or linux@wolfsonmicro.com
+ *
+ * Based on mxc-alsa-mc13783 (C) 2006 Freescale.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * TODO:
+ * Need to rework SSI register defs when new defs go into mainline.
+ * Add support for TDM and FIFO 1.
+ * Add support for i.mx3x DMA interface.
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <mach/dma-mx1-mx2.h>
+#include <asm/mach-types.h>
+
+#include "mxc-ssi.h"
+#include "mx1_mx2-pcm.h"
+
+#define SSI1_PORT 0
+#define SSI2_PORT 1
+
+static int ssi_active[2] = {0, 0};
+
+/* DMA information for mx1_mx2 platforms */
+static struct mx1_mx2_pcm_dma_params imx_ssi1_pcm_stereo_out0 = {
+ .name = "SSI1 PCM Stereo out 0",
+ .transfer_type = DMA_MODE_WRITE,
+ .per_address = SSI1_BASE_ADDR + STX0,
+ .event_id = DMA_REQ_SSI1_TX0,
+ .watermark_level = TXFIFO_WATERMARK,
+ .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
+ .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
+};
+
+static struct mx1_mx2_pcm_dma_params imx_ssi1_pcm_stereo_out1 = {
+ .name = "SSI1 PCM Stereo out 1",
+ .transfer_type = DMA_MODE_WRITE,
+ .per_address = SSI1_BASE_ADDR + STX1,
+ .event_id = DMA_REQ_SSI1_TX1,
+ .watermark_level = TXFIFO_WATERMARK,
+ .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
+ .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
+};
+
+static struct mx1_mx2_pcm_dma_params imx_ssi1_pcm_stereo_in0 = {
+ .name = "SSI1 PCM Stereo in 0",
+ .transfer_type = DMA_MODE_READ,
+ .per_address = SSI1_BASE_ADDR + SRX0,
+ .event_id = DMA_REQ_SSI1_RX0,
+ .watermark_level = RXFIFO_WATERMARK,
+ .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
+ .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
+};
+
+static struct mx1_mx2_pcm_dma_params imx_ssi1_pcm_stereo_in1 = {
+ .name = "SSI1 PCM Stereo in 1",
+ .transfer_type = DMA_MODE_READ,
+ .per_address = SSI1_BASE_ADDR + SRX1,
+ .event_id = DMA_REQ_SSI1_RX1,
+ .watermark_level = RXFIFO_WATERMARK,
+ .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
+ .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
+};
+
+static struct mx1_mx2_pcm_dma_params imx_ssi2_pcm_stereo_out0 = {
+ .name = "SSI2 PCM Stereo out 0",
+ .transfer_type = DMA_MODE_WRITE,
+ .per_address = SSI2_BASE_ADDR + STX0,
+ .event_id = DMA_REQ_SSI2_TX0,
+ .watermark_level = TXFIFO_WATERMARK,
+ .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
+ .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
+};
+
+static struct mx1_mx2_pcm_dma_params imx_ssi2_pcm_stereo_out1 = {
+ .name = "SSI2 PCM Stereo out 1",
+ .transfer_type = DMA_MODE_WRITE,
+ .per_address = SSI2_BASE_ADDR + STX1,
+ .event_id = DMA_REQ_SSI2_TX1,
+ .watermark_level = TXFIFO_WATERMARK,
+ .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
+ .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
+};
+
+static struct mx1_mx2_pcm_dma_params imx_ssi2_pcm_stereo_in0 = {
+ .name = "SSI2 PCM Stereo in 0",
+ .transfer_type = DMA_MODE_READ,
+ .per_address = SSI2_BASE_ADDR + SRX0,
+ .event_id = DMA_REQ_SSI2_RX0,
+ .watermark_level = RXFIFO_WATERMARK,
+ .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
+ .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
+};
+
+static struct mx1_mx2_pcm_dma_params imx_ssi2_pcm_stereo_in1 = {
+ .name = "SSI2 PCM Stereo in 1",
+ .transfer_type = DMA_MODE_READ,
+ .per_address = SSI2_BASE_ADDR + SRX1,
+ .event_id = DMA_REQ_SSI2_RX1,
+ .watermark_level = RXFIFO_WATERMARK,
+ .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
+ .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
+};
+
+static struct clk *ssi_clk0, *ssi_clk1;
+
+int get_ssi_clk(int ssi, struct device *dev)
+{
+ switch (ssi) {
+ case 0:
+ ssi_clk0 = clk_get(dev, "ssi1");
+ if (IS_ERR(ssi_clk0))
+ return PTR_ERR(ssi_clk0);
+ return 0;
+ case 1:
+ ssi_clk1 = clk_get(dev, "ssi2");
+ if (IS_ERR(ssi_clk1))
+ return PTR_ERR(ssi_clk1);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(get_ssi_clk);
+
+void put_ssi_clk(int ssi)
+{
+ switch (ssi) {
+ case 0:
+ clk_put(ssi_clk0);
+ ssi_clk0 = NULL;
+ break;
+ case 1:
+ clk_put(ssi_clk1);
+ ssi_clk1 = NULL;
+ break;
+ }
+}
+EXPORT_SYMBOL(put_ssi_clk);
+
+/*
+ * SSI system clock configuration.
+ * Should only be called when port is inactive (i.e. SSIEN = 0).
+ */
+static int imx_ssi_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ u32 scr;
+
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
+ scr = SSI1_SCR;
+ pr_debug("%s: SCR for SSI1 is %x\n", __func__, scr);
+ } else {
+ scr = SSI2_SCR;
+ pr_debug("%s: SCR for SSI2 is %x\n", __func__, scr);
+ }
+
+ if (scr & SSI_SCR_SSIEN) {
+ printk(KERN_WARNING "Warning ssi already enabled\n");
+ return 0;
+ }
+
+ switch (clk_id) {
+ case IMX_SSP_SYS_CLK:
+ if (dir == SND_SOC_CLOCK_OUT) {
+ scr |= SSI_SCR_SYS_CLK_EN;
+ pr_debug("%s: clk of is output\n", __func__);
+ } else {
+ scr &= ~SSI_SCR_SYS_CLK_EN;
+ pr_debug("%s: clk of is input\n", __func__);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
+ pr_debug("%s: writeback of SSI1_SCR\n", __func__);
+ SSI1_SCR = scr;
+ } else {
+ pr_debug("%s: writeback of SSI2_SCR\n", __func__);
+ SSI2_SCR = scr;
+ }
+
+ return 0;
+}
+
+/*
+ * SSI Clock dividers
+ * Should only be called when port is inactive (i.e. SSIEN = 0).
+ */
+static int imx_ssi_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
+ int div_id, int div)
+{
+ u32 stccr, srccr;
+
+ pr_debug("%s\n", __func__);
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
+ if (SSI1_SCR & SSI_SCR_SSIEN)
+ return 0;
+ srccr = SSI1_STCCR;
+ stccr = SSI1_STCCR;
+ } else {
+ if (SSI2_SCR & SSI_SCR_SSIEN)
+ return 0;
+ srccr = SSI2_STCCR;
+ stccr = SSI2_STCCR;
+ }
+
+ switch (div_id) {
+ case IMX_SSI_TX_DIV_2:
+ stccr &= ~SSI_STCCR_DIV2;
+ stccr |= div;
+ break;
+ case IMX_SSI_TX_DIV_PSR:
+ stccr &= ~SSI_STCCR_PSR;
+ stccr |= div;
+ break;
+ case IMX_SSI_TX_DIV_PM:
+ stccr &= ~0xff;
+ stccr |= SSI_STCCR_PM(div);
+ break;
+ case IMX_SSI_RX_DIV_2:
+ stccr &= ~SSI_STCCR_DIV2;
+ stccr |= div;
+ break;
+ case IMX_SSI_RX_DIV_PSR:
+ stccr &= ~SSI_STCCR_PSR;
+ stccr |= div;
+ break;
+ case IMX_SSI_RX_DIV_PM:
+ stccr &= ~0xff;
+ stccr |= SSI_STCCR_PM(div);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
+ SSI1_STCCR = stccr;
+ SSI1_SRCCR = srccr;
+ } else {
+ SSI2_STCCR = stccr;
+ SSI2_SRCCR = srccr;
+ }
+ return 0;
+}
+
+/*
+ * SSI Network Mode or TDM slots configuration.
+ * Should only be called when port is inactive (i.e. SSIEN = 0).
+ */
+static int imx_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai,
+ unsigned int mask, int slots)
+{
+ u32 stmsk, srmsk, stccr;
+
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
+ if (SSI1_SCR & SSI_SCR_SSIEN) {
+ printk(KERN_WARNING "Warning ssi already enabled\n");
+ return 0;
+ }
+ stccr = SSI1_STCCR;
+ } else {
+ if (SSI2_SCR & SSI_SCR_SSIEN) {
+ printk(KERN_WARNING "Warning ssi already enabled\n");
+ return 0;
+ }
+ stccr = SSI2_STCCR;
+ }
+
+ stmsk = srmsk = mask;
+ stccr &= ~SSI_STCCR_DC_MASK;
+ stccr |= SSI_STCCR_DC(slots - 1);
+
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
+ SSI1_STMSK = stmsk;
+ SSI1_SRMSK = srmsk;
+ SSI1_SRCCR = SSI1_STCCR = stccr;
+ } else {
+ SSI2_STMSK = stmsk;
+ SSI2_SRMSK = srmsk;
+ SSI2_SRCCR = SSI2_STCCR = stccr;
+ }
+
+ return 0;
+}
+
+/*
+ * SSI DAI format configuration.
+ * Should only be called when port is inactive (i.e. SSIEN = 0).
+ * Note: We don't use the I2S modes but instead manually configure the
+ * SSI for I2S.
+ */
+static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai,
+ unsigned int fmt)
+{
+ u32 stcr = 0, srcr = 0, scr;
+
+ /*
+ * This is done to avoid this function to modify
+ * previous set values in stcr
+ */
+ stcr = SSI1_STCR;
+
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2)
+ scr = SSI1_SCR & ~(SSI_SCR_SYN | SSI_SCR_NET);
+ else
+ scr = SSI2_SCR & ~(SSI_SCR_SYN | SSI_SCR_NET);
+
+ if (scr & SSI_SCR_SSIEN) {
+ printk(KERN_WARNING "Warning ssi already enabled\n");
+ return 0;
+ }
+
+ /* DAI mode */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ /* data on rising edge of bclk, frame low 1clk before data */
+ stcr |= SSI_STCR_TFSI | SSI_STCR_TEFS | SSI_STCR_TXBIT0;
+ srcr |= SSI_SRCR_RFSI | SSI_SRCR_REFS | SSI_SRCR_RXBIT0;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ /* data on rising edge of bclk, frame high with data */
+ stcr |= SSI_STCR_TXBIT0;
+ srcr |= SSI_SRCR_RXBIT0;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ /* data on rising edge of bclk, frame high with data */
+ stcr |= SSI_STCR_TFSL;
+ srcr |= SSI_SRCR_RFSL;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ /* data on rising edge of bclk, frame high 1clk before data */
+ stcr |= SSI_STCR_TFSL | SSI_STCR_TEFS;
+ srcr |= SSI_SRCR_RFSL | SSI_SRCR_REFS;
+ break;
+ }
+
+ /* DAI clock inversion */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_IB_IF:
+ stcr |= SSI_STCR_TFSI;
+ stcr &= ~SSI_STCR_TSCKP;
+ srcr |= SSI_SRCR_RFSI;
+ srcr &= ~SSI_SRCR_RSCKP;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ stcr &= ~(SSI_STCR_TSCKP | SSI_STCR_TFSI);
+ srcr &= ~(SSI_SRCR_RSCKP | SSI_SRCR_RFSI);
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ stcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP;
+ srcr |= SSI_SRCR_RFSI | SSI_SRCR_RSCKP;
+ break;
+ case SND_SOC_DAIFMT_NB_NF:
+ stcr &= ~SSI_STCR_TFSI;
+ stcr |= SSI_STCR_TSCKP;
+ srcr &= ~SSI_SRCR_RFSI;
+ srcr |= SSI_SRCR_RSCKP;
+ break;
+ }
+
+ /* DAI clock master masks */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ stcr |= SSI_STCR_TFDIR | SSI_STCR_TXDIR;
+ srcr |= SSI_SRCR_RFDIR | SSI_SRCR_RXDIR;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ stcr |= SSI_STCR_TFDIR;
+ srcr |= SSI_SRCR_RFDIR;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ stcr |= SSI_STCR_TXDIR;
+ srcr |= SSI_SRCR_RXDIR;
+ break;
+ }
+
+ /* sync */
+ if (!(fmt & SND_SOC_DAIFMT_ASYNC))
+ scr |= SSI_SCR_SYN;
+
+ /* tdm - only for stereo atm */
+ if (fmt & SND_SOC_DAIFMT_TDM)
+ scr |= SSI_SCR_NET;
+
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
+ SSI1_STCR = stcr;
+ SSI1_SRCR = srcr;
+ SSI1_SCR = scr;
+ } else {
+ SSI2_STCR = stcr;
+ SSI2_SRCR = srcr;
+ SSI2_SCR = scr;
+ }
+
+ return 0;
+}
+
+static int imx_ssi_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /* set up TX DMA params */
+ switch (cpu_dai->id) {
+ case IMX_DAI_SSI0:
+ cpu_dai->dma_data = &imx_ssi1_pcm_stereo_out0;
+ break;
+ case IMX_DAI_SSI1:
+ cpu_dai->dma_data = &imx_ssi1_pcm_stereo_out1;
+ break;
+ case IMX_DAI_SSI2:
+ cpu_dai->dma_data = &imx_ssi2_pcm_stereo_out0;
+ break;
+ case IMX_DAI_SSI3:
+ cpu_dai->dma_data = &imx_ssi2_pcm_stereo_out1;
+ }
+ pr_debug("%s: (playback)\n", __func__);
+ } else {
+ /* set up RX DMA params */
+ switch (cpu_dai->id) {
+ case IMX_DAI_SSI0:
+ cpu_dai->dma_data = &imx_ssi1_pcm_stereo_in0;
+ break;
+ case IMX_DAI_SSI1:
+ cpu_dai->dma_data = &imx_ssi1_pcm_stereo_in1;
+ break;
+ case IMX_DAI_SSI2:
+ cpu_dai->dma_data = &imx_ssi2_pcm_stereo_in0;
+ break;
+ case IMX_DAI_SSI3:
+ cpu_dai->dma_data = &imx_ssi2_pcm_stereo_in1;
+ }
+ pr_debug("%s: (capture)\n", __func__);
+ }
+
+ /*
+ * we cant really change any SSI values after SSI is enabled
+ * need to fix in software for max flexibility - lrg
+ */
+ if (cpu_dai->active) {
+ printk(KERN_WARNING "Warning ssi already enabled\n");
+ return 0;
+ }
+
+ /* reset the SSI port - Sect 45.4.4 */
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
+
+ if (!ssi_clk0)
+ return -EINVAL;
+
+ if (ssi_active[SSI1_PORT]++) {
+ pr_debug("%s: exit before reset\n", __func__);
+ return 0;
+ }
+
+ /* SSI1 Reset */
+ SSI1_SCR = 0;
+
+ SSI1_SFCSR = SSI_SFCSR_RFWM1(RXFIFO_WATERMARK) |
+ SSI_SFCSR_RFWM0(RXFIFO_WATERMARK) |
+ SSI_SFCSR_TFWM1(TXFIFO_WATERMARK) |
+ SSI_SFCSR_TFWM0(TXFIFO_WATERMARK);
+ } else {
+
+ if (!ssi_clk1)
+ return -EINVAL;
+
+ if (ssi_active[SSI2_PORT]++) {
+ pr_debug("%s: exit before reset\n", __func__);
+ return 0;
+ }
+
+ /* SSI2 Reset */
+ SSI2_SCR = 0;
+
+ SSI2_SFCSR = SSI_SFCSR_RFWM1(RXFIFO_WATERMARK) |
+ SSI_SFCSR_RFWM0(RXFIFO_WATERMARK) |
+ SSI_SFCSR_TFWM1(TXFIFO_WATERMARK) |
+ SSI_SFCSR_TFWM0(TXFIFO_WATERMARK);
+ }
+
+ return 0;
+}
+
+int imx_ssi_hw_tx_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ u32 stccr, stcr, sier;
+
+ pr_debug("%s\n", __func__);
+
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
+ stccr = SSI1_STCCR & ~SSI_STCCR_WL_MASK;
+ stcr = SSI1_STCR;
+ sier = SSI1_SIER;
+ } else {
+ stccr = SSI2_STCCR & ~SSI_STCCR_WL_MASK;
+ stcr = SSI2_STCR;
+ sier = SSI2_SIER;
+ }
+
+ /* DAI data (word) size */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ stccr |= SSI_STCCR_WL(16);
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ stccr |= SSI_STCCR_WL(20);
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ stccr |= SSI_STCCR_WL(24);
+ break;
+ }
+
+ /* enable interrupts */
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2)
+ stcr |= SSI_STCR_TFEN0;
+ else
+ stcr |= SSI_STCR_TFEN1;
+ sier |= SSI_SIER_TDMAE;
+
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
+ SSI1_STCR = stcr;
+ SSI1_STCCR = stccr;
+ SSI1_SIER = sier;
+ } else {
+ SSI2_STCR = stcr;
+ SSI2_STCCR = stccr;
+ SSI2_SIER = sier;
+ }
+
+ return 0;
+}
+
+int imx_ssi_hw_rx_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ u32 srccr, srcr, sier;
+
+ pr_debug("%s\n", __func__);
+
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
+ srccr = SSI1_SRCCR & ~SSI_SRCCR_WL_MASK;
+ srcr = SSI1_SRCR;
+ sier = SSI1_SIER;
+ } else {
+ srccr = SSI2_SRCCR & ~SSI_SRCCR_WL_MASK;
+ srcr = SSI2_SRCR;
+ sier = SSI2_SIER;
+ }
+
+ /* DAI data (word) size */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ srccr |= SSI_SRCCR_WL(16);
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ srccr |= SSI_SRCCR_WL(20);
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ srccr |= SSI_SRCCR_WL(24);
+ break;
+ }
+
+ /* enable interrupts */
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2)
+ srcr |= SSI_SRCR_RFEN0;
+ else
+ srcr |= SSI_SRCR_RFEN1;
+ sier |= SSI_SIER_RDMAE;
+
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
+ SSI1_SRCR = srcr;
+ SSI1_SRCCR = srccr;
+ SSI1_SIER = sier;
+ } else {
+ SSI2_SRCR = srcr;
+ SSI2_SRCCR = srccr;
+ SSI2_SIER = sier;
+ }
+
+ return 0;
+}
+
+/*
+ * Should only be called when port is inactive (i.e. SSIEN = 0),
+ * although can be called multiple times by upper layers.
+ */
+int imx_ssi_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+
+ int ret;
+
+ /* cant change any parameters when SSI is running */
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
+ if (SSI1_SCR & SSI_SCR_SSIEN) {
+ printk(KERN_WARNING "Warning ssi already enabled\n");
+ return 0;
+ }
+ } else {
+ if (SSI2_SCR & SSI_SCR_SSIEN) {
+ printk(KERN_WARNING "Warning ssi already enabled\n");
+ return 0;
+ }
+ }
+
+ /*
+ * Configure both tx and rx params with the same settings. This is
+ * really a harware restriction because SSI must be disabled until
+ * we can change those values. If there is an active audio stream in
+ * one direction, enabling the other direction with different
+ * settings would mean disturbing the running one.
+ */
+ ret = imx_ssi_hw_tx_params(substream, params);
+ if (ret < 0)
+ return ret;
+ return imx_ssi_hw_rx_params(substream, params);
+}
+
+int imx_ssi_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+
+ /* Enable clks here to follow SSI recommended init sequence */
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
+ ret = clk_enable(ssi_clk0);
+ if (ret < 0)
+ printk(KERN_ERR "Unable to enable ssi_clk0\n");
+ } else {
+ ret = clk_enable(ssi_clk1);
+ if (ret < 0)
+ printk(KERN_ERR "Unable to enable ssi_clk1\n");
+ }
+
+ return 0;
+}
+
+static int imx_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ u32 scr;
+
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2)
+ scr = SSI1_SCR;
+ else
+ scr = SSI2_SCR;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ scr |= SSI_SCR_TE | SSI_SCR_SSIEN;
+ else
+ scr |= SSI_SCR_RE | SSI_SCR_SSIEN;
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ scr &= ~SSI_SCR_TE;
+ else
+ scr &= ~SSI_SCR_RE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2)
+ SSI1_SCR = scr;
+ else
+ SSI2_SCR = scr;
+
+ return 0;
+}
+
+static void imx_ssi_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+
+ /* shutdown SSI if neither Tx or Rx is active */
+ if (!cpu_dai->active) {
+
+ if (cpu_dai->id == IMX_DAI_SSI0 ||
+ cpu_dai->id == IMX_DAI_SSI2) {
+
+ if (--ssi_active[SSI1_PORT] > 1)
+ return;
+
+ SSI1_SCR = 0;
+ clk_disable(ssi_clk0);
+ } else {
+ if (--ssi_active[SSI2_PORT])
+ return;
+ SSI2_SCR = 0;
+ clk_disable(ssi_clk1);
+ }
+ }
+}
+
+#ifdef CONFIG_PM
+static int imx_ssi_suspend(struct platform_device *dev,
+ struct snd_soc_dai *dai)
+{
+ return 0;
+}
+
+static int imx_ssi_resume(struct platform_device *pdev,
+ struct snd_soc_dai *dai)
+{
+ return 0;
+}
+
+#else
+#define imx_ssi_suspend NULL
+#define imx_ssi_resume NULL
+#endif
+
+#define IMX_SSI_RATES \
+ (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | \
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | \
+ SNDRV_PCM_RATE_96000)
+
+#define IMX_SSI_BITS \
+ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+static struct snd_soc_dai_ops imx_ssi_pcm_dai_ops = {
+ .startup = imx_ssi_startup,
+ .shutdown = imx_ssi_shutdown,
+ .trigger = imx_ssi_trigger,
+ .prepare = imx_ssi_prepare,
+ .hw_params = imx_ssi_hw_params,
+ .set_sysclk = imx_ssi_set_dai_sysclk,
+ .set_clkdiv = imx_ssi_set_dai_clkdiv,
+ .set_fmt = imx_ssi_set_dai_fmt,
+ .set_tdm_slot = imx_ssi_set_dai_tdm_slot,
+};
+
+struct snd_soc_dai imx_ssi_pcm_dai[] = {
+{
+ .name = "imx-i2s-1-0",
+ .id = IMX_DAI_SSI0,
+ .suspend = imx_ssi_suspend,
+ .resume = imx_ssi_resume,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .formats = IMX_SSI_BITS,
+ .rates = IMX_SSI_RATES,},
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .formats = IMX_SSI_BITS,
+ .rates = IMX_SSI_RATES,},
+ .ops = &imx_ssi_pcm_dai_ops,
+},
+{
+ .name = "imx-i2s-2-0",
+ .id = IMX_DAI_SSI1,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .formats = IMX_SSI_BITS,
+ .rates = IMX_SSI_RATES,},
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .formats = IMX_SSI_BITS,
+ .rates = IMX_SSI_RATES,},
+ .ops = &imx_ssi_pcm_dai_ops,
+},
+{
+ .name = "imx-i2s-1-1",
+ .id = IMX_DAI_SSI2,
+ .suspend = imx_ssi_suspend,
+ .resume = imx_ssi_resume,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .formats = IMX_SSI_BITS,
+ .rates = IMX_SSI_RATES,},
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .formats = IMX_SSI_BITS,
+ .rates = IMX_SSI_RATES,},
+ .ops = &imx_ssi_pcm_dai_ops,
+},
+{
+ .name = "imx-i2s-2-1",
+ .id = IMX_DAI_SSI3,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .formats = IMX_SSI_BITS,
+ .rates = IMX_SSI_RATES,},
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .formats = IMX_SSI_BITS,
+ .rates = IMX_SSI_RATES,},
+ .ops = &imx_ssi_pcm_dai_ops,
+},
+};
+EXPORT_SYMBOL_GPL(imx_ssi_pcm_dai);
+
+static int __init imx_ssi_init(void)
+{
+ return snd_soc_register_dais(imx_ssi_pcm_dai,
+ ARRAY_SIZE(imx_ssi_pcm_dai));
+}
+
+static void __exit imx_ssi_exit(void)
+{
+ snd_soc_unregister_dais(imx_ssi_pcm_dai,
+ ARRAY_SIZE(imx_ssi_pcm_dai));
+}
+
+module_init(imx_ssi_init);
+module_exit(imx_ssi_exit);
+MODULE_AUTHOR("Liam Girdwood, liam.girdwood@wolfsonmicro.com");
+MODULE_DESCRIPTION("i.MX ASoC I2S driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/imx/mxc-ssi.h b/sound/soc/imx/mxc-ssi.h
new file mode 100644
index 000000000000..12bbdc9c7ecd
--- /dev/null
+++ b/sound/soc/imx/mxc-ssi.h
@@ -0,0 +1,238 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _IMX_SSI_H
+#define _IMX_SSI_H
+
+#include <mach/hardware.h>
+
+/* SSI regs definition - MOVE to /arch/arm/plat-mxc/include/mach/ when stable */
+#define SSI1_IO_BASE_ADDR IO_ADDRESS(SSI1_BASE_ADDR)
+#define SSI2_IO_BASE_ADDR IO_ADDRESS(SSI2_BASE_ADDR)
+
+#define STX0 0x00
+#define STX1 0x04
+#define SRX0 0x08
+#define SRX1 0x0c
+#define SCR 0x10
+#define SISR 0x14
+#define SIER 0x18
+#define STCR 0x1c
+#define SRCR 0x20
+#define STCCR 0x24
+#define SRCCR 0x28
+#define SFCSR 0x2c
+#define STR 0x30
+#define SOR 0x34
+#define SACNT 0x38
+#define SACADD 0x3c
+#define SACDAT 0x40
+#define SATAG 0x44
+#define STMSK 0x48
+#define SRMSK 0x4c
+
+#define SSI1_STX0 (*((volatile u32 *)(SSI1_IO_BASE_ADDR + STX0)))
+#define SSI1_STX1 (*((volatile u32 *)(SSI1_IO_BASE_ADDR + STX1)))
+#define SSI1_SRX0 (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SRX0)))
+#define SSI1_SRX1 (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SRX1)))
+#define SSI1_SCR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SCR)))
+#define SSI1_SISR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SISR)))
+#define SSI1_SIER (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SIER)))
+#define SSI1_STCR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + STCR)))
+#define SSI1_SRCR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SRCR)))
+#define SSI1_STCCR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + STCCR)))
+#define SSI1_SRCCR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SRCCR)))
+#define SSI1_SFCSR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SFCSR)))
+#define SSI1_STR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + STR)))
+#define SSI1_SOR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SOR)))
+#define SSI1_SACNT (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SACNT)))
+#define SSI1_SACADD (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SACADD)))
+#define SSI1_SACDAT (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SACDAT)))
+#define SSI1_SATAG (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SATAG)))
+#define SSI1_STMSK (*((volatile u32 *)(SSI1_IO_BASE_ADDR + STMSK)))
+#define SSI1_SRMSK (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SRMSK)))
+
+
+#define SSI2_STX0 (*((volatile u32 *)(SSI2_IO_BASE_ADDR + STX0)))
+#define SSI2_STX1 (*((volatile u32 *)(SSI2_IO_BASE_ADDR + STX1)))
+#define SSI2_SRX0 (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SRX0)))
+#define SSI2_SRX1 (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SRX1)))
+#define SSI2_SCR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SCR)))
+#define SSI2_SISR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SISR)))
+#define SSI2_SIER (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SIER)))
+#define SSI2_STCR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + STCR)))
+#define SSI2_SRCR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SRCR)))
+#define SSI2_STCCR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + STCCR)))
+#define SSI2_SRCCR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SRCCR)))
+#define SSI2_SFCSR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SFCSR)))
+#define SSI2_STR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + STR)))
+#define SSI2_SOR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SOR)))
+#define SSI2_SACNT (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SACNT)))
+#define SSI2_SACADD (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SACADD)))
+#define SSI2_SACDAT (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SACDAT)))
+#define SSI2_SATAG (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SATAG)))
+#define SSI2_STMSK (*((volatile u32 *)(SSI2_IO_BASE_ADDR + STMSK)))
+#define SSI2_SRMSK (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SRMSK)))
+
+#define SSI_SCR_CLK_IST (1 << 9)
+#define SSI_SCR_TCH_EN (1 << 8)
+#define SSI_SCR_SYS_CLK_EN (1 << 7)
+#define SSI_SCR_I2S_MODE_NORM (0 << 5)
+#define SSI_SCR_I2S_MODE_MSTR (1 << 5)
+#define SSI_SCR_I2S_MODE_SLAVE (2 << 5)
+#define SSI_SCR_SYN (1 << 4)
+#define SSI_SCR_NET (1 << 3)
+#define SSI_SCR_RE (1 << 2)
+#define SSI_SCR_TE (1 << 1)
+#define SSI_SCR_SSIEN (1 << 0)
+
+#define SSI_SISR_CMDAU (1 << 18)
+#define SSI_SISR_CMDDU (1 << 17)
+#define SSI_SISR_RXT (1 << 16)
+#define SSI_SISR_RDR1 (1 << 15)
+#define SSI_SISR_RDR0 (1 << 14)
+#define SSI_SISR_TDE1 (1 << 13)
+#define SSI_SISR_TDE0 (1 << 12)
+#define SSI_SISR_ROE1 (1 << 11)
+#define SSI_SISR_ROE0 (1 << 10)
+#define SSI_SISR_TUE1 (1 << 9)
+#define SSI_SISR_TUE0 (1 << 8)
+#define SSI_SISR_TFS (1 << 7)
+#define SSI_SISR_RFS (1 << 6)
+#define SSI_SISR_TLS (1 << 5)
+#define SSI_SISR_RLS (1 << 4)
+#define SSI_SISR_RFF1 (1 << 3)
+#define SSI_SISR_RFF0 (1 << 2)
+#define SSI_SISR_TFE1 (1 << 1)
+#define SSI_SISR_TFE0 (1 << 0)
+
+#define SSI_SIER_RDMAE (1 << 22)
+#define SSI_SIER_RIE (1 << 21)
+#define SSI_SIER_TDMAE (1 << 20)
+#define SSI_SIER_TIE (1 << 19)
+#define SSI_SIER_CMDAU_EN (1 << 18)
+#define SSI_SIER_CMDDU_EN (1 << 17)
+#define SSI_SIER_RXT_EN (1 << 16)
+#define SSI_SIER_RDR1_EN (1 << 15)
+#define SSI_SIER_RDR0_EN (1 << 14)
+#define SSI_SIER_TDE1_EN (1 << 13)
+#define SSI_SIER_TDE0_EN (1 << 12)
+#define SSI_SIER_ROE1_EN (1 << 11)
+#define SSI_SIER_ROE0_EN (1 << 10)
+#define SSI_SIER_TUE1_EN (1 << 9)
+#define SSI_SIER_TUE0_EN (1 << 8)
+#define SSI_SIER_TFS_EN (1 << 7)
+#define SSI_SIER_RFS_EN (1 << 6)
+#define SSI_SIER_TLS_EN (1 << 5)
+#define SSI_SIER_RLS_EN (1 << 4)
+#define SSI_SIER_RFF1_EN (1 << 3)
+#define SSI_SIER_RFF0_EN (1 << 2)
+#define SSI_SIER_TFE1_EN (1 << 1)
+#define SSI_SIER_TFE0_EN (1 << 0)
+
+#define SSI_STCR_TXBIT0 (1 << 9)
+#define SSI_STCR_TFEN1 (1 << 8)
+#define SSI_STCR_TFEN0 (1 << 7)
+#define SSI_STCR_TFDIR (1 << 6)
+#define SSI_STCR_TXDIR (1 << 5)
+#define SSI_STCR_TSHFD (1 << 4)
+#define SSI_STCR_TSCKP (1 << 3)
+#define SSI_STCR_TFSI (1 << 2)
+#define SSI_STCR_TFSL (1 << 1)
+#define SSI_STCR_TEFS (1 << 0)
+
+#define SSI_SRCR_RXBIT0 (1 << 9)
+#define SSI_SRCR_RFEN1 (1 << 8)
+#define SSI_SRCR_RFEN0 (1 << 7)
+#define SSI_SRCR_RFDIR (1 << 6)
+#define SSI_SRCR_RXDIR (1 << 5)
+#define SSI_SRCR_RSHFD (1 << 4)
+#define SSI_SRCR_RSCKP (1 << 3)
+#define SSI_SRCR_RFSI (1 << 2)
+#define SSI_SRCR_RFSL (1 << 1)
+#define SSI_SRCR_REFS (1 << 0)
+
+#define SSI_STCCR_DIV2 (1 << 18)
+#define SSI_STCCR_PSR (1 << 15)
+#define SSI_STCCR_WL(x) ((((x) - 2) >> 1) << 13)
+#define SSI_STCCR_DC(x) (((x) & 0x1f) << 8)
+#define SSI_STCCR_PM(x) (((x) & 0xff) << 0)
+#define SSI_STCCR_WL_MASK (0xf << 13)
+#define SSI_STCCR_DC_MASK (0x1f << 8)
+#define SSI_STCCR_PM_MASK (0xff << 0)
+
+#define SSI_SRCCR_DIV2 (1 << 18)
+#define SSI_SRCCR_PSR (1 << 15)
+#define SSI_SRCCR_WL(x) ((((x) - 2) >> 1) << 13)
+#define SSI_SRCCR_DC(x) (((x) & 0x1f) << 8)
+#define SSI_SRCCR_PM(x) (((x) & 0xff) << 0)
+#define SSI_SRCCR_WL_MASK (0xf << 13)
+#define SSI_SRCCR_DC_MASK (0x1f << 8)
+#define SSI_SRCCR_PM_MASK (0xff << 0)
+
+
+#define SSI_SFCSR_RFCNT1(x) (((x) & 0xf) << 28)
+#define SSI_SFCSR_TFCNT1(x) (((x) & 0xf) << 24)
+#define SSI_SFCSR_RFWM1(x) (((x) & 0xf) << 20)
+#define SSI_SFCSR_TFWM1(x) (((x) & 0xf) << 16)
+#define SSI_SFCSR_RFCNT0(x) (((x) & 0xf) << 12)
+#define SSI_SFCSR_TFCNT0(x) (((x) & 0xf) << 8)
+#define SSI_SFCSR_RFWM0(x) (((x) & 0xf) << 4)
+#define SSI_SFCSR_TFWM0(x) (((x) & 0xf) << 0)
+
+#define SSI_STR_TEST (1 << 15)
+#define SSI_STR_RCK2TCK (1 << 14)
+#define SSI_STR_RFS2TFS (1 << 13)
+#define SSI_STR_RXSTATE(x) (((x) & 0xf) << 8)
+#define SSI_STR_TXD2RXD (1 << 7)
+#define SSI_STR_TCK2RCK (1 << 6)
+#define SSI_STR_TFS2RFS (1 << 5)
+#define SSI_STR_TXSTATE(x) (((x) & 0xf) << 0)
+
+#define SSI_SOR_CLKOFF (1 << 6)
+#define SSI_SOR_RX_CLR (1 << 5)
+#define SSI_SOR_TX_CLR (1 << 4)
+#define SSI_SOR_INIT (1 << 3)
+#define SSI_SOR_WAIT(x) (((x) & 0x3) << 1)
+#define SSI_SOR_SYNRST (1 << 0)
+
+#define SSI_SACNT_FRDIV(x) (((x) & 0x3f) << 5)
+#define SSI_SACNT_WR (x << 4)
+#define SSI_SACNT_RD (x << 3)
+#define SSI_SACNT_TIF (x << 2)
+#define SSI_SACNT_FV (x << 1)
+#define SSI_SACNT_AC97EN (x << 0)
+
+/* Watermarks for FIFO's */
+#define TXFIFO_WATERMARK 0x4
+#define RXFIFO_WATERMARK 0x4
+
+/* i.MX DAI SSP ID's */
+#define IMX_DAI_SSI0 0 /* SSI1 FIFO 0 */
+#define IMX_DAI_SSI1 1 /* SSI1 FIFO 1 */
+#define IMX_DAI_SSI2 2 /* SSI2 FIFO 0 */
+#define IMX_DAI_SSI3 3 /* SSI2 FIFO 1 */
+
+/* SSI clock sources */
+#define IMX_SSP_SYS_CLK 0
+
+/* SSI audio dividers */
+#define IMX_SSI_TX_DIV_2 0
+#define IMX_SSI_TX_DIV_PSR 1
+#define IMX_SSI_TX_DIV_PM 2
+#define IMX_SSI_RX_DIV_2 3
+#define IMX_SSI_RX_DIV_PSR 4
+#define IMX_SSI_RX_DIV_PM 5
+
+
+/* SSI Div 2 */
+#define IMX_SSI_DIV_2_OFF (~SSI_STCCR_DIV2)
+#define IMX_SSI_DIV_2_ON SSI_STCCR_DIV2
+
+extern struct snd_soc_dai imx_ssi_pcm_dai[4];
+extern int get_ssi_clk(int ssi, struct device *dev);
+extern void put_ssi_clk(int ssi);
+#endif
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index b771238662b6..2dee9839be86 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -15,6 +15,14 @@ config SND_OMAP_SOC_N810
help
Say Y if you want to add support for SoC audio on Nokia N810.
+config SND_OMAP_SOC_AMS_DELTA
+ tristate "SoC Audio support for Amstrad E3 (Delta) videophone"
+ depends on SND_OMAP_SOC && MACH_AMS_DELTA
+ select SND_OMAP_SOC_MCBSP
+ select SND_SOC_CX20442
+ help
+ Say Y if you want to add support for SoC audio on Amstrad Delta.
+
config SND_OMAP_SOC_OSK5912
tristate "SoC Audio support for omap osk5912"
depends on SND_OMAP_SOC && MACH_OMAP_OSK && I2C
@@ -72,4 +80,11 @@ config SND_OMAP_SOC_OMAP3_BEAGLE
help
Say Y if you want to add support for SoC audio on the Beagleboard.
+config SND_OMAP_SOC_ZOOM2
+ tristate "SoC Audio support for Zoom2"
+ depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP_ZOOM2
+ select SND_OMAP_SOC_MCBSP
+ select SND_SOC_TWL4030
+ help
+ Say Y if you want to add support for Soc audio on Zoom2 board.
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
index a37f49862389..02d69471dcb5 100644
--- a/sound/soc/omap/Makefile
+++ b/sound/soc/omap/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_SND_OMAP_SOC_MCBSP) += snd-soc-omap-mcbsp.o
# OMAP Machine Support
snd-soc-n810-objs := n810.o
+snd-soc-ams-delta-objs := ams-delta.o
snd-soc-osk5912-objs := osk5912.o
snd-soc-overo-objs := overo.o
snd-soc-omap2evm-objs := omap2evm.o
@@ -14,8 +15,10 @@ snd-soc-omap3evm-objs := omap3evm.o
snd-soc-sdp3430-objs := sdp3430.o
snd-soc-omap3pandora-objs := omap3pandora.o
snd-soc-omap3beagle-objs := omap3beagle.o
+snd-soc-zoom2-objs := zoom2.o
obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
+obj-$(CONFIG_SND_OMAP_SOC_AMS_DELTA) += snd-soc-ams-delta.o
obj-$(CONFIG_SND_OMAP_SOC_OSK5912) += snd-soc-osk5912.o
obj-$(CONFIG_SND_OMAP_SOC_OVERO) += snd-soc-overo.o
obj-$(CONFIG_MACH_OMAP2EVM) += snd-soc-omap2evm.o
@@ -23,3 +26,4 @@ obj-$(CONFIG_MACH_OMAP3EVM) += snd-soc-omap3evm.o
obj-$(CONFIG_SND_OMAP_SOC_SDP3430) += snd-soc-sdp3430.o
obj-$(CONFIG_SND_OMAP_SOC_OMAP3_PANDORA) += snd-soc-omap3pandora.o
obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o
+obj-$(CONFIG_SND_OMAP_SOC_ZOOM2) += snd-soc-zoom2.o
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
new file mode 100644
index 000000000000..5a5166ac7279
--- /dev/null
+++ b/sound/soc/omap/ams-delta.c
@@ -0,0 +1,646 @@
+/*
+ * ams-delta.c -- SoC audio for Amstrad E3 (Delta) videophone
+ *
+ * Copyright (C) 2009 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ *
+ * Initially based on sound/soc/omap/osk5912.x
+ * Copyright (C) 2008 Mistral Solutions
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/tty.h>
+
+#include <sound/soc-dapm.h>
+#include <sound/jack.h>
+
+#include <asm/mach-types.h>
+
+#include <mach/board-ams-delta.h>
+#include <mach/mcbsp.h>
+
+#include "omap-mcbsp.h"
+#include "omap-pcm.h"
+#include "../codecs/cx20442.h"
+
+
+/* Board specific DAPM widgets */
+ const struct snd_soc_dapm_widget ams_delta_dapm_widgets[] = {
+ /* Handset */
+ SND_SOC_DAPM_MIC("Mouthpiece", NULL),
+ SND_SOC_DAPM_HP("Earpiece", NULL),
+ /* Handsfree/Speakerphone */
+ SND_SOC_DAPM_MIC("Microphone", NULL),
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+};
+
+/* How they are connected to codec pins */
+static const struct snd_soc_dapm_route ams_delta_audio_map[] = {
+ {"TELIN", NULL, "Mouthpiece"},
+ {"Earpiece", NULL, "TELOUT"},
+
+ {"MIC", NULL, "Microphone"},
+ {"Speaker", NULL, "SPKOUT"},
+};
+
+/*
+ * Controls, functional after the modem line discipline is activated.
+ */
+
+/* Virtual switch: audio input/output constellations */
+static const char *ams_delta_audio_mode[] =
+ {"Mixed", "Handset", "Handsfree", "Speakerphone"};
+
+/* Selection <-> pin translation */
+#define AMS_DELTA_MOUTHPIECE 0
+#define AMS_DELTA_EARPIECE 1
+#define AMS_DELTA_MICROPHONE 2
+#define AMS_DELTA_SPEAKER 3
+#define AMS_DELTA_AGC 4
+
+#define AMS_DELTA_MIXED ((1 << AMS_DELTA_EARPIECE) | \
+ (1 << AMS_DELTA_MICROPHONE))
+#define AMS_DELTA_HANDSET ((1 << AMS_DELTA_MOUTHPIECE) | \
+ (1 << AMS_DELTA_EARPIECE))
+#define AMS_DELTA_HANDSFREE ((1 << AMS_DELTA_MICROPHONE) | \
+ (1 << AMS_DELTA_SPEAKER))
+#define AMS_DELTA_SPEAKERPHONE (AMS_DELTA_HANDSFREE | (1 << AMS_DELTA_AGC))
+
+unsigned short ams_delta_audio_mode_pins[] = {
+ AMS_DELTA_MIXED,
+ AMS_DELTA_HANDSET,
+ AMS_DELTA_HANDSFREE,
+ AMS_DELTA_SPEAKERPHONE,
+};
+
+static unsigned short ams_delta_audio_agc;
+
+static int ams_delta_set_audio_mode(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct soc_enum *control = (struct soc_enum *)kcontrol->private_value;
+ unsigned short pins;
+ int pin, changed = 0;
+
+ /* Refuse any mode changes if we are not able to control the codec. */
+ if (!codec->control_data)
+ return -EUNATCH;
+
+ if (ucontrol->value.enumerated.item[0] >= control->max)
+ return -EINVAL;
+
+ mutex_lock(&codec->mutex);
+
+ /* Translate selection to bitmap */
+ pins = ams_delta_audio_mode_pins[ucontrol->value.enumerated.item[0]];
+
+ /* Setup pins after corresponding bits if changed */
+ pin = !!(pins & (1 << AMS_DELTA_MOUTHPIECE));
+ if (pin != snd_soc_dapm_get_pin_status(codec, "Mouthpiece")) {
+ changed = 1;
+ if (pin)
+ snd_soc_dapm_enable_pin(codec, "Mouthpiece");
+ else
+ snd_soc_dapm_disable_pin(codec, "Mouthpiece");
+ }
+ pin = !!(pins & (1 << AMS_DELTA_EARPIECE));
+ if (pin != snd_soc_dapm_get_pin_status(codec, "Earpiece")) {
+ changed = 1;
+ if (pin)
+ snd_soc_dapm_enable_pin(codec, "Earpiece");
+ else
+ snd_soc_dapm_disable_pin(codec, "Earpiece");
+ }
+ pin = !!(pins & (1 << AMS_DELTA_MICROPHONE));
+ if (pin != snd_soc_dapm_get_pin_status(codec, "Microphone")) {
+ changed = 1;
+ if (pin)
+ snd_soc_dapm_enable_pin(codec, "Microphone");
+ else
+ snd_soc_dapm_disable_pin(codec, "Microphone");
+ }
+ pin = !!(pins & (1 << AMS_DELTA_SPEAKER));
+ if (pin != snd_soc_dapm_get_pin_status(codec, "Speaker")) {
+ changed = 1;
+ if (pin)
+ snd_soc_dapm_enable_pin(codec, "Speaker");
+ else
+ snd_soc_dapm_disable_pin(codec, "Speaker");
+ }
+ pin = !!(pins & (1 << AMS_DELTA_AGC));
+ if (pin != ams_delta_audio_agc) {
+ ams_delta_audio_agc = pin;
+ changed = 1;
+ if (pin)
+ snd_soc_dapm_enable_pin(codec, "AGCIN");
+ else
+ snd_soc_dapm_disable_pin(codec, "AGCIN");
+ }
+ if (changed)
+ snd_soc_dapm_sync(codec);
+
+ mutex_unlock(&codec->mutex);
+
+ return changed;
+}
+
+static int ams_delta_get_audio_mode(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ unsigned short pins, mode;
+
+ pins = ((snd_soc_dapm_get_pin_status(codec, "Mouthpiece") <<
+ AMS_DELTA_MOUTHPIECE) |
+ (snd_soc_dapm_get_pin_status(codec, "Earpiece") <<
+ AMS_DELTA_EARPIECE));
+ if (pins)
+ pins |= (snd_soc_dapm_get_pin_status(codec, "Microphone") <<
+ AMS_DELTA_MICROPHONE);
+ else
+ pins = ((snd_soc_dapm_get_pin_status(codec, "Microphone") <<
+ AMS_DELTA_MICROPHONE) |
+ (snd_soc_dapm_get_pin_status(codec, "Speaker") <<
+ AMS_DELTA_SPEAKER) |
+ (ams_delta_audio_agc << AMS_DELTA_AGC));
+
+ for (mode = 0; mode < ARRAY_SIZE(ams_delta_audio_mode); mode++)
+ if (pins == ams_delta_audio_mode_pins[mode])
+ break;
+
+ if (mode >= ARRAY_SIZE(ams_delta_audio_mode))
+ return -EINVAL;
+
+ ucontrol->value.enumerated.item[0] = mode;
+
+ return 0;
+}
+
+static const struct soc_enum ams_delta_audio_enum[] = {
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(ams_delta_audio_mode),
+ ams_delta_audio_mode),
+};
+
+static const struct snd_kcontrol_new ams_delta_audio_controls[] = {
+ SOC_ENUM_EXT("Audio Mode", ams_delta_audio_enum[0],
+ ams_delta_get_audio_mode, ams_delta_set_audio_mode),
+};
+
+/* Hook switch */
+static struct snd_soc_jack ams_delta_hook_switch;
+static struct snd_soc_jack_gpio ams_delta_hook_switch_gpios[] = {
+ {
+ .gpio = 4,
+ .name = "hook_switch",
+ .report = SND_JACK_HEADSET,
+ .invert = 1,
+ .debounce_time = 150,
+ }
+};
+
+/* After we are able to control the codec over the modem,
+ * the hook switch can be used for dynamic DAPM reconfiguration. */
+static struct snd_soc_jack_pin ams_delta_hook_switch_pins[] = {
+ /* Handset */
+ {
+ .pin = "Mouthpiece",
+ .mask = SND_JACK_MICROPHONE,
+ },
+ {
+ .pin = "Earpiece",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ /* Handsfree */
+ {
+ .pin = "Microphone",
+ .mask = SND_JACK_MICROPHONE,
+ .invert = 1,
+ },
+ {
+ .pin = "Speaker",
+ .mask = SND_JACK_HEADPHONE,
+ .invert = 1,
+ },
+};
+
+
+/*
+ * Modem line discipline, required for making above controls functional.
+ * Activated from userspace with ldattach, possibly invoked from udev rule.
+ */
+
+/* To actually apply any modem controlled configuration changes to the codec,
+ * we must connect codec DAI pins to the modem for a moment. Be carefull not
+ * to interfere with our digital mute function that shares the same hardware. */
+static struct timer_list cx81801_timer;
+static bool cx81801_cmd_pending;
+static bool ams_delta_muted;
+static DEFINE_SPINLOCK(ams_delta_lock);
+
+static void cx81801_timeout(unsigned long data)
+{
+ int muted;
+
+ spin_lock(&ams_delta_lock);
+ cx81801_cmd_pending = 0;
+ muted = ams_delta_muted;
+ spin_unlock(&ams_delta_lock);
+
+ /* Reconnect the codec DAI back from the modem to the CPU DAI
+ * only if digital mute still off */
+ if (!muted)
+ ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_CODEC, 0);
+}
+
+/* Line discipline .open() */
+static int cx81801_open(struct tty_struct *tty)
+{
+ return v253_ops.open(tty);
+}
+
+/* Line discipline .close() */
+static void cx81801_close(struct tty_struct *tty)
+{
+ struct snd_soc_codec *codec = tty->disc_data;
+
+ del_timer_sync(&cx81801_timer);
+
+ v253_ops.close(tty);
+
+ /* Prevent the hook switch from further changing the DAPM pins */
+ INIT_LIST_HEAD(&ams_delta_hook_switch.pins);
+
+ /* Revert back to default audio input/output constellation */
+ snd_soc_dapm_disable_pin(codec, "Mouthpiece");
+ snd_soc_dapm_enable_pin(codec, "Earpiece");
+ snd_soc_dapm_enable_pin(codec, "Microphone");
+ snd_soc_dapm_disable_pin(codec, "Speaker");
+ snd_soc_dapm_disable_pin(codec, "AGCIN");
+ snd_soc_dapm_sync(codec);
+}
+
+/* Line discipline .hangup() */
+static int cx81801_hangup(struct tty_struct *tty)
+{
+ cx81801_close(tty);
+ return 0;
+}
+
+/* Line discipline .recieve_buf() */
+static void cx81801_receive(struct tty_struct *tty,
+ const unsigned char *cp, char *fp, int count)
+{
+ struct snd_soc_codec *codec = tty->disc_data;
+ const unsigned char *c;
+ int apply, ret;
+
+ if (!codec->control_data) {
+ /* First modem response, complete setup procedure */
+
+ /* Initialize timer used for config pulse generation */
+ setup_timer(&cx81801_timer, cx81801_timeout, 0);
+
+ v253_ops.receive_buf(tty, cp, fp, count);
+
+ /* Link hook switch to DAPM pins */
+ ret = snd_soc_jack_add_pins(&ams_delta_hook_switch,
+ ARRAY_SIZE(ams_delta_hook_switch_pins),
+ ams_delta_hook_switch_pins);
+ if (ret)
+ dev_warn(codec->socdev->card->dev,
+ "Failed to link hook switch to DAPM pins, "
+ "will continue with hook switch unlinked.\n");
+
+ return;
+ }
+
+ v253_ops.receive_buf(tty, cp, fp, count);
+
+ for (c = &cp[count - 1]; c >= cp; c--) {
+ if (*c != '\r')
+ continue;
+ /* Complete modem response received, apply config to codec */
+
+ spin_lock_bh(&ams_delta_lock);
+ mod_timer(&cx81801_timer, jiffies + msecs_to_jiffies(150));
+ apply = !ams_delta_muted && !cx81801_cmd_pending;
+ cx81801_cmd_pending = 1;
+ spin_unlock_bh(&ams_delta_lock);
+
+ /* Apply config pulse by connecting the codec to the modem
+ * if not already done */
+ if (apply)
+ ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_CODEC,
+ AMS_DELTA_LATCH2_MODEM_CODEC);
+ break;
+ }
+}
+
+/* Line discipline .write_wakeup() */
+static void cx81801_wakeup(struct tty_struct *tty)
+{
+ v253_ops.write_wakeup(tty);
+}
+
+static struct tty_ldisc_ops cx81801_ops = {
+ .magic = TTY_LDISC_MAGIC,
+ .name = "cx81801",
+ .owner = THIS_MODULE,
+ .open = cx81801_open,
+ .close = cx81801_close,
+ .hangup = cx81801_hangup,
+ .receive_buf = cx81801_receive,
+ .write_wakeup = cx81801_wakeup,
+};
+
+
+/*
+ * Even if not very usefull, the sound card can still work without any of the
+ * above functonality activated. You can still control its audio input/output
+ * constellation and speakerphone gain from userspace by issueing AT commands
+ * over the modem port.
+ */
+
+static int ams_delta_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+ /* Set cpu DAI configuration */
+ return snd_soc_dai_set_fmt(rtd->dai->cpu_dai,
+ SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+}
+
+static struct snd_soc_ops ams_delta_ops = {
+ .hw_params = ams_delta_hw_params,
+};
+
+
+/* Board specific codec bias level control */
+static int ams_delta_set_bias_level(struct snd_soc_card *card,
+ enum snd_soc_bias_level level)
+{
+ struct snd_soc_codec *codec = card->codec;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->bias_level == SND_SOC_BIAS_OFF)
+ ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_NRESET,
+ AMS_DELTA_LATCH2_MODEM_NRESET);
+ break;
+ case SND_SOC_BIAS_OFF:
+ if (codec->bias_level != SND_SOC_BIAS_OFF)
+ ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_NRESET,
+ 0);
+ }
+ codec->bias_level = level;
+
+ return 0;
+}
+
+/* Digital mute implemented using modem/CPU multiplexer.
+ * Shares hardware with codec config pulse generation */
+static bool ams_delta_muted = 1;
+
+static int ams_delta_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ int apply;
+
+ if (ams_delta_muted == mute)
+ return 0;
+
+ spin_lock_bh(&ams_delta_lock);
+ ams_delta_muted = mute;
+ apply = !cx81801_cmd_pending;
+ spin_unlock_bh(&ams_delta_lock);
+
+ if (apply)
+ ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_CODEC,
+ mute ? AMS_DELTA_LATCH2_MODEM_CODEC : 0);
+ return 0;
+}
+
+/* Our codec DAI probably doesn't have its own .ops structure */
+static struct snd_soc_dai_ops ams_delta_dai_ops = {
+ .digital_mute = ams_delta_digital_mute,
+};
+
+/* Will be used if the codec ever has its own digital_mute function */
+static int ams_delta_startup(struct snd_pcm_substream *substream)
+{
+ return ams_delta_digital_mute(NULL, 0);
+}
+
+static void ams_delta_shutdown(struct snd_pcm_substream *substream)
+{
+ ams_delta_digital_mute(NULL, 1);
+}
+
+
+/*
+ * Card initialization
+ */
+
+static int ams_delta_cx20442_init(struct snd_soc_codec *codec)
+{
+ struct snd_soc_dai *codec_dai = codec->dai;
+ struct snd_soc_card *card = codec->socdev->card;
+ int ret;
+ /* Codec is ready, now add/activate board specific controls */
+
+ /* Set up digital mute if not provided by the codec */
+ if (!codec_dai->ops) {
+ codec_dai->ops = &ams_delta_dai_ops;
+ } else if (!codec_dai->ops->digital_mute) {
+ codec_dai->ops->digital_mute = ams_delta_digital_mute;
+ } else {
+ ams_delta_ops.startup = ams_delta_startup;
+ ams_delta_ops.shutdown = ams_delta_shutdown;
+ }
+
+ /* Set codec bias level */
+ ams_delta_set_bias_level(card, SND_SOC_BIAS_STANDBY);
+
+ /* Add hook switch - can be used to control the codec from userspace
+ * even if line discipline fails */
+ ret = snd_soc_jack_new(card, "hook_switch",
+ SND_JACK_HEADSET, &ams_delta_hook_switch);
+ if (ret)
+ dev_warn(card->dev,
+ "Failed to allocate resources for hook switch, "
+ "will continue without one.\n");
+ else {
+ ret = snd_soc_jack_add_gpios(&ams_delta_hook_switch,
+ ARRAY_SIZE(ams_delta_hook_switch_gpios),
+ ams_delta_hook_switch_gpios);
+ if (ret)
+ dev_warn(card->dev,
+ "Failed to set up hook switch GPIO line, "
+ "will continue with hook switch inactive.\n");
+ }
+
+ /* Register optional line discipline for over the modem control */
+ ret = tty_register_ldisc(N_V253, &cx81801_ops);
+ if (ret) {
+ dev_warn(card->dev,
+ "Failed to register line discipline, "
+ "will continue without any controls.\n");
+ return 0;
+ }
+
+ /* Add board specific DAPM widgets and routes */
+ ret = snd_soc_dapm_new_controls(codec, ams_delta_dapm_widgets,
+ ARRAY_SIZE(ams_delta_dapm_widgets));
+ if (ret) {
+ dev_warn(card->dev,
+ "Failed to register DAPM controls, "
+ "will continue without any.\n");
+ return 0;
+ }
+
+ ret = snd_soc_dapm_add_routes(codec, ams_delta_audio_map,
+ ARRAY_SIZE(ams_delta_audio_map));
+ if (ret) {
+ dev_warn(card->dev,
+ "Failed to set up DAPM routes, "
+ "will continue with codec default map.\n");
+ return 0;
+ }
+
+ /* Set up initial pin constellation */
+ snd_soc_dapm_disable_pin(codec, "Mouthpiece");
+ snd_soc_dapm_enable_pin(codec, "Earpiece");
+ snd_soc_dapm_enable_pin(codec, "Microphone");
+ snd_soc_dapm_disable_pin(codec, "Speaker");
+ snd_soc_dapm_disable_pin(codec, "AGCIN");
+ snd_soc_dapm_disable_pin(codec, "AGCOUT");
+ snd_soc_dapm_sync(codec);
+
+ /* Add virtual switch */
+ ret = snd_soc_add_controls(codec, ams_delta_audio_controls,
+ ARRAY_SIZE(ams_delta_audio_controls));
+ if (ret)
+ dev_warn(card->dev,
+ "Failed to register audio mode control, "
+ "will continue without it.\n");
+
+ return 0;
+}
+
+/* DAI glue - connects codec <--> CPU */
+static struct snd_soc_dai_link ams_delta_dai_link = {
+ .name = "CX20442",
+ .stream_name = "CX20442",
+ .cpu_dai = &omap_mcbsp_dai[0],
+ .codec_dai = &cx20442_dai,
+ .init = ams_delta_cx20442_init,
+ .ops = &ams_delta_ops,
+};
+
+/* Audio card driver */
+static struct snd_soc_card ams_delta_audio_card = {
+ .name = "AMS_DELTA",
+ .platform = &omap_soc_platform,
+ .dai_link = &ams_delta_dai_link,
+ .num_links = 1,
+ .set_bias_level = ams_delta_set_bias_level,
+};
+
+/* Audio subsystem */
+static struct snd_soc_device ams_delta_snd_soc_device = {
+ .card = &ams_delta_audio_card,
+ .codec_dev = &cx20442_codec_dev,
+};
+
+/* Module init/exit */
+static struct platform_device *ams_delta_audio_platform_device;
+static struct platform_device *cx20442_platform_device;
+
+static int __init ams_delta_module_init(void)
+{
+ int ret;
+
+ if (!(machine_is_ams_delta()))
+ return -ENODEV;
+
+ ams_delta_audio_platform_device =
+ platform_device_alloc("soc-audio", -1);
+ if (!ams_delta_audio_platform_device)
+ return -ENOMEM;
+
+ platform_set_drvdata(ams_delta_audio_platform_device,
+ &ams_delta_snd_soc_device);
+ ams_delta_snd_soc_device.dev = &ams_delta_audio_platform_device->dev;
+ *(unsigned int *)ams_delta_dai_link.cpu_dai->private_data = OMAP_MCBSP1;
+
+ ret = platform_device_add(ams_delta_audio_platform_device);
+ if (ret)
+ goto err;
+
+ /*
+ * Codec platform device could be registered from elsewhere (board?),
+ * but I do it here as it makes sense only if used with the card.
+ */
+ cx20442_platform_device = platform_device_register_simple("cx20442",
+ -1, NULL, 0);
+ return 0;
+err:
+ platform_device_put(ams_delta_audio_platform_device);
+ return ret;
+}
+module_init(ams_delta_module_init);
+
+static void __exit ams_delta_module_exit(void)
+{
+ struct snd_soc_codec *codec;
+ struct tty_struct *tty;
+
+ if (ams_delta_audio_card.codec) {
+ codec = ams_delta_audio_card.codec;
+
+ if (codec->control_data) {
+ tty = codec->control_data;
+
+ tty_hangup(tty);
+ }
+ }
+
+ if (tty_unregister_ldisc(N_V253) != 0)
+ dev_warn(&ams_delta_audio_platform_device->dev,
+ "failed to unregister V253 line discipline\n");
+
+ snd_soc_jack_free_gpios(&ams_delta_hook_switch,
+ ARRAY_SIZE(ams_delta_hook_switch_gpios),
+ ams_delta_hook_switch_gpios);
+
+ /* Keep modem power on */
+ ams_delta_set_bias_level(&ams_delta_audio_card, SND_SOC_BIAS_STANDBY);
+
+ platform_device_unregister(cx20442_platform_device);
+ platform_device_unregister(ams_delta_audio_platform_device);
+}
+module_exit(ams_delta_module_exit);
+
+MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
+MODULE_DESCRIPTION("ALSA SoC driver for Amstrad E3 (Delta) videophone");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index b60b1dfbc435..0a505938e42b 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -22,6 +22,7 @@
*/
#include <linux/clk.h>
+#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -322,8 +323,6 @@ static struct snd_soc_card snd_soc_n810 = {
/* Audio private data */
static struct aic3x_setup_data n810_aic33_setup = {
- .i2c_bus = 2,
- .i2c_address = 0x18,
.gpio_func[0] = AIC3X_GPIO1_FUNC_DISABLED,
.gpio_func[1] = AIC3X_GPIO2_FUNC_DIGITAL_MIC_INPUT,
};
@@ -337,6 +336,13 @@ static struct snd_soc_device n810_snd_devdata = {
static struct platform_device *n810_snd_device;
+/* temporary i2c device creation until this can be moved into the machine
+ * support file.
+*/
+static struct i2c_board_info i2c_device[] = {
+ { I2C_BOARD_INFO("tlv320aic3x", 0x1b), }
+};
+
static int __init n810_soc_init(void)
{
int err;
@@ -345,6 +351,8 @@ static int __init n810_soc_init(void)
if (!(machine_is_nokia_n810() || machine_is_nokia_n810_wimax()))
return -ENODEV;
+ i2c_register_board_info(1, i2c_device, ARRAY_SIZE(i2c_device));
+
n810_snd_device = platform_device_alloc("soc-audio", -1);
if (!n810_snd_device)
return -ENOMEM;
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index a5d46a7b196a..3341f49402ca 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -139,27 +139,67 @@ static const unsigned long omap34xx_mcbsp_port[][2] = {
static const unsigned long omap34xx_mcbsp_port[][2] = {};
#endif
+static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
+ int dma_op_mode = omap_mcbsp_get_dma_op_mode(mcbsp_data->bus_id);
+ int samples;
+
+ /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
+ if (dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)
+ samples = snd_pcm_lib_period_bytes(substream) >> 1;
+ else
+ samples = 1;
+
+ /* Configure McBSP internal buffer usage */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ omap_mcbsp_set_tx_threshold(mcbsp_data->bus_id, samples - 1);
+ else
+ omap_mcbsp_set_rx_threshold(mcbsp_data->bus_id, samples - 1);
+}
+
static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
+ int bus_id = mcbsp_data->bus_id;
int err = 0;
- if (cpu_is_omap343x() && mcbsp_data->bus_id == 1) {
+ if (!cpu_dai->active)
+ err = omap_mcbsp_request(bus_id);
+
+ if (cpu_is_omap343x()) {
+ int dma_op_mode = omap_mcbsp_get_dma_op_mode(bus_id);
+ int max_period;
+
/*
* McBSP2 in OMAP3 has 1024 * 32-bit internal audio buffer.
* Set constraint for minimum buffer size to the same than FIFO
* size in order to avoid underruns in playback startup because
* HW is keeping the DMA request active until FIFO is filled.
*/
- snd_pcm_hw_constraint_minmax(substream->runtime,
- SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 4096, UINT_MAX);
- }
+ if (bus_id == 1)
+ snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ 4096, UINT_MAX);
- if (!cpu_dai->active)
- err = omap_mcbsp_request(mcbsp_data->bus_id);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ max_period = omap_mcbsp_get_max_tx_threshold(bus_id);
+ else
+ max_period = omap_mcbsp_get_max_rx_threshold(bus_id);
+
+ max_period++;
+ max_period <<= 1;
+
+ if (dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)
+ snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ 32, max_period);
+ }
return err;
}
@@ -183,21 +223,21 @@ static int omap_mcbsp_dai_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
- int err = 0;
+ int err = 0, play = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- if (!mcbsp_data->active++)
- omap_mcbsp_start(mcbsp_data->bus_id);
+ mcbsp_data->active++;
+ omap_mcbsp_start(mcbsp_data->bus_id, play, !play);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- if (!--mcbsp_data->active)
- omap_mcbsp_stop(mcbsp_data->bus_id);
+ omap_mcbsp_stop(mcbsp_data->bus_id, play, !play);
+ mcbsp_data->active--;
break;
default:
err = -EINVAL;
@@ -215,7 +255,7 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
int dma, bus_id = mcbsp_data->bus_id, id = cpu_dai->id;
- int wlen, channels, wpf;
+ int wlen, channels, wpf, sync_mode = OMAP_DMA_SYNC_ELEMENT;
unsigned long port;
unsigned int format;
@@ -231,6 +271,12 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
} else if (cpu_is_omap343x()) {
dma = omap24xx_dma_reqs[bus_id][substream->stream];
port = omap34xx_mcbsp_port[bus_id][substream->stream];
+ omap_mcbsp_dai_dma_params[id][substream->stream].set_threshold =
+ omap_mcbsp_set_threshold;
+ /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
+ if (omap_mcbsp_get_dma_op_mode(bus_id) ==
+ MCBSP_DMA_MODE_THRESHOLD)
+ sync_mode = OMAP_DMA_SYNC_FRAME;
} else {
return -ENODEV;
}
@@ -238,6 +284,7 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
substream->stream ? "Audio Capture" : "Audio Playback";
omap_mcbsp_dai_dma_params[id][substream->stream].dma_req = dma;
omap_mcbsp_dai_dma_params[id][substream->stream].port_addr = port;
+ omap_mcbsp_dai_dma_params[id][substream->stream].sync_mode = sync_mode;
cpu_dai->dma_data = &omap_mcbsp_dai_dma_params[id][substream->stream];
if (mcbsp_data->configured) {
@@ -321,11 +368,14 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
/* Generic McBSP register settings */
regs->spcr2 |= XINTM(3) | FREE;
regs->spcr1 |= RINTM(3);
- regs->rcr2 |= RFIG;
- regs->xcr2 |= XFIG;
+ /* RFIG and XFIG are not defined in 34xx */
+ if (!cpu_is_omap34xx()) {
+ regs->rcr2 |= RFIG;
+ regs->xcr2 |= XFIG;
+ }
if (cpu_is_omap2430() || cpu_is_omap34xx()) {
- regs->xccr = DXENDLY(1) | XDMAEN;
- regs->rccr = RFULL_CYCLE | RDMAEN;
+ regs->xccr = DXENDLY(1) | XDMAEN | XDISABLE;
+ regs->rccr = RFULL_CYCLE | RDMAEN | RDISABLE;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
@@ -462,6 +512,40 @@ static int omap_mcbsp_dai_set_clks_src(struct omap_mcbsp_data *mcbsp_data,
return 0;
}
+static int omap_mcbsp_dai_set_rcvr_src(struct omap_mcbsp_data *mcbsp_data,
+ int clk_id)
+{
+ int sel_bit, set = 0;
+ u16 reg = OMAP2_CONTROL_DEVCONF0;
+
+ if (cpu_class_is_omap1())
+ return -EINVAL; /* TODO: Can this be implemented for OMAP1? */
+ if (mcbsp_data->bus_id != 0)
+ return -EINVAL;
+
+ switch (clk_id) {
+ case OMAP_MCBSP_CLKR_SRC_CLKX:
+ set = 1;
+ case OMAP_MCBSP_CLKR_SRC_CLKR:
+ sel_bit = 3;
+ break;
+ case OMAP_MCBSP_FSR_SRC_FSX:
+ set = 1;
+ case OMAP_MCBSP_FSR_SRC_FSR:
+ sel_bit = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (set)
+ omap_ctrl_writel(omap_ctrl_readl(reg) | (1 << sel_bit), reg);
+ else
+ omap_ctrl_writel(omap_ctrl_readl(reg) & ~(1 << sel_bit), reg);
+
+ return 0;
+}
+
static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
int clk_id, unsigned int freq,
int dir)
@@ -484,6 +568,13 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
case OMAP_MCBSP_SYSCLK_CLKR_EXT:
regs->pcr0 |= SCLKME;
break;
+
+ case OMAP_MCBSP_CLKR_SRC_CLKR:
+ case OMAP_MCBSP_CLKR_SRC_CLKX:
+ case OMAP_MCBSP_FSR_SRC_FSR:
+ case OMAP_MCBSP_FSR_SRC_FSX:
+ err = omap_mcbsp_dai_set_rcvr_src(mcbsp_data, clk_id);
+ break;
default:
err = -ENODEV;
}
diff --git a/sound/soc/omap/omap-mcbsp.h b/sound/soc/omap/omap-mcbsp.h
index c8147aace813..647d2f981ab0 100644
--- a/sound/soc/omap/omap-mcbsp.h
+++ b/sound/soc/omap/omap-mcbsp.h
@@ -32,6 +32,10 @@ enum omap_mcbsp_clksrg_clk {
OMAP_MCBSP_SYSCLK_CLK, /* Internal ICLK */
OMAP_MCBSP_SYSCLK_CLKX_EXT, /* External CLKX pin */
OMAP_MCBSP_SYSCLK_CLKR_EXT, /* External CLKR pin */
+ OMAP_MCBSP_CLKR_SRC_CLKR, /* CLKR from CLKR pin */
+ OMAP_MCBSP_CLKR_SRC_CLKX, /* CLKR from CLKX pin */
+ OMAP_MCBSP_FSR_SRC_FSR, /* FSR from FSR pin */
+ OMAP_MCBSP_FSR_SRC_FSX, /* FSR from FSX pin */
};
/* McBSP dividers */
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index 84a1950880eb..5735945788bf 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -59,16 +59,31 @@ static void omap_pcm_dma_irq(int ch, u16 stat, void *data)
struct omap_runtime_data *prtd = runtime->private_data;
unsigned long flags;
- if (cpu_is_omap1510()) {
+ if ((cpu_is_omap1510()) &&
+ (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)) {
/*
- * OMAP1510 doesn't support DMA chaining so have to restart
- * the transfer after all periods are transferred
+ * OMAP1510 doesn't fully support DMA progress counter
+ * and there is no software emulation implemented yet,
+ * so have to maintain our own playback progress counter
+ * that can be used by omap_pcm_pointer() instead.
*/
spin_lock_irqsave(&prtd->lock, flags);
+ if ((stat == OMAP_DMA_LAST_IRQ) &&
+ (prtd->period_index == runtime->periods - 1)) {
+ /* we are in sync, do nothing */
+ spin_unlock_irqrestore(&prtd->lock, flags);
+ return;
+ }
if (prtd->period_index >= 0) {
- if (++prtd->period_index == runtime->periods) {
+ if (stat & OMAP_DMA_BLOCK_IRQ) {
+ /* end of buffer reached, loop back */
+ prtd->period_index = 0;
+ } else if (stat & OMAP_DMA_LAST_IRQ) {
+ /* update the counter for the last period */
+ prtd->period_index = runtime->periods - 1;
+ } else if (++prtd->period_index >= runtime->periods) {
+ /* end of buffer missed? loop back */
prtd->period_index = 0;
- omap_start_dma(prtd->dma_ch);
}
}
spin_unlock_irqrestore(&prtd->lock, flags);
@@ -100,7 +115,7 @@ static int omap_pcm_hw_params(struct snd_pcm_substream *substream,
prtd->dma_data = dma_data;
err = omap_request_dma(dma_data->dma_req, dma_data->name,
omap_pcm_dma_irq, substream, &prtd->dma_ch);
- if (!err && !cpu_is_omap1510()) {
+ if (!err) {
/*
* Link channel with itself so DMA doesn't need any
* reprogramming while looping the buffer
@@ -119,8 +134,7 @@ static int omap_pcm_hw_free(struct snd_pcm_substream *substream)
if (prtd->dma_data == NULL)
return 0;
- if (!cpu_is_omap1510())
- omap_dma_unlink_lch(prtd->dma_ch, prtd->dma_ch);
+ omap_dma_unlink_lch(prtd->dma_ch, prtd->dma_ch);
omap_free_dma(prtd->dma_ch);
prtd->dma_data = NULL;
@@ -148,7 +162,7 @@ static int omap_pcm_prepare(struct snd_pcm_substream *substream)
*/
dma_params.data_type = OMAP_DMA_DATA_TYPE_S16;
dma_params.trigger = dma_data->dma_req;
- dma_params.sync_mode = OMAP_DMA_SYNC_ELEMENT;
+ dma_params.sync_mode = dma_data->sync_mode;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;
dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
@@ -174,7 +188,15 @@ static int omap_pcm_prepare(struct snd_pcm_substream *substream)
dma_params.frame_count = runtime->periods;
omap_set_dma_params(prtd->dma_ch, &dma_params);
- omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ);
+ if ((cpu_is_omap1510()) &&
+ (substream->stream == SNDRV_PCM_STREAM_PLAYBACK))
+ omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ |
+ OMAP_DMA_LAST_IRQ | OMAP_DMA_BLOCK_IRQ);
+ else
+ omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ);
+
+ omap_set_dma_src_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16);
return 0;
}
@@ -183,6 +205,7 @@ static int omap_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct omap_runtime_data *prtd = runtime->private_data;
+ struct omap_pcm_dma_data *dma_data = prtd->dma_data;
unsigned long flags;
int ret = 0;
@@ -192,6 +215,10 @@ static int omap_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
prtd->period_index = 0;
+ /* Configure McBSP internal buffer usage */
+ if (dma_data->set_threshold)
+ dma_data->set_threshold(substream);
+
omap_start_dma(prtd->dma_ch);
break;
@@ -288,7 +315,7 @@ static struct snd_pcm_ops omap_pcm_ops = {
.mmap = omap_pcm_mmap,
};
-static u64 omap_pcm_dmamask = DMA_BIT_MASK(32);
+static u64 omap_pcm_dmamask = DMA_BIT_MASK(64);
static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
int stream)
@@ -330,7 +357,7 @@ static void omap_pcm_free_dma_buffers(struct snd_pcm *pcm)
}
}
-int omap_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
+static int omap_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
struct snd_pcm *pcm)
{
int ret = 0;
@@ -338,7 +365,7 @@ int omap_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
if (!card->dev->dma_mask)
card->dev->dma_mask = &omap_pcm_dmamask;
if (!card->dev->coherent_dma_mask)
- card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ card->dev->coherent_dma_mask = DMA_BIT_MASK(64);
if (dai->playback.channels_min) {
ret = omap_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/omap/omap-pcm.h b/sound/soc/omap/omap-pcm.h
index 8d9d26916b05..38a821dd4118 100644
--- a/sound/soc/omap/omap-pcm.h
+++ b/sound/soc/omap/omap-pcm.h
@@ -29,6 +29,8 @@ struct omap_pcm_dma_data {
char *name; /* stream identifier */
int dma_req; /* DMA request line */
unsigned long port_addr; /* transmit/receive register */
+ int sync_mode; /* DMA sync mode */
+ void (*set_threshold)(struct snd_pcm_substream *substream);
};
extern struct snd_soc_platform omap_soc_platform;
diff --git a/sound/soc/omap/sdp3430.c b/sound/soc/omap/sdp3430.c
index b719e5db4f57..4a3f62d1f295 100644
--- a/sound/soc/omap/sdp3430.c
+++ b/sound/soc/omap/sdp3430.c
@@ -24,6 +24,7 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/i2c/twl4030.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
@@ -39,6 +40,11 @@
#include "omap-pcm.h"
#include "../codecs/twl4030.h"
+/* TWL4030 PMBR1 Register */
+#define TWL4030_INTBR_PMBR1 0x0D
+/* TWL4030 PMBR1 Register GPIO6 mux bit */
+#define TWL4030_GPIO6_PWM0_MUTE(value) (value << 2)
+
static struct snd_soc_card snd_soc_sdp3430;
static int sdp3430_hw_params(struct snd_pcm_substream *substream,
@@ -96,7 +102,7 @@ static int sdp3430_hw_voice_params(struct snd_pcm_substream *substream,
ret = snd_soc_dai_set_fmt(codec_dai,
SND_SOC_DAIFMT_DSP_A |
SND_SOC_DAIFMT_IB_NF |
- SND_SOC_DAIFMT_CBS_CFM);
+ SND_SOC_DAIFMT_CBM_CFM);
if (ret) {
printk(KERN_ERR "can't set codec DAI configuration\n");
return ret;
@@ -280,6 +286,7 @@ static struct snd_soc_card snd_soc_sdp3430 = {
static struct twl4030_setup_data twl4030_setup = {
.ramp_delay_value = 3,
.sysclk = 26000,
+ .hs_extmute = 1,
};
/* Audio subsystem */
@@ -294,6 +301,7 @@ static struct platform_device *sdp3430_snd_device;
static int __init sdp3430_soc_init(void)
{
int ret;
+ u8 pin_mux;
if (!machine_is_omap_3430sdp()) {
pr_debug("Not SDP3430!\n");
@@ -312,6 +320,14 @@ static int __init sdp3430_soc_init(void)
*(unsigned int *)sdp3430_dai[0].cpu_dai->private_data = 1; /* McBSP2 */
*(unsigned int *)sdp3430_dai[1].cpu_dai->private_data = 2; /* McBSP3 */
+ /* Set TWL4030 GPIO6 as EXTMUTE signal */
+ twl4030_i2c_read_u8(TWL4030_MODULE_INTBR, &pin_mux,
+ TWL4030_INTBR_PMBR1);
+ pin_mux &= ~TWL4030_GPIO6_PWM0_MUTE(0x03);
+ pin_mux |= TWL4030_GPIO6_PWM0_MUTE(0x02);
+ twl4030_i2c_write_u8(TWL4030_MODULE_INTBR, pin_mux,
+ TWL4030_INTBR_PMBR1);
+
ret = platform_device_add(sdp3430_snd_device);
if (ret)
goto err1;
diff --git a/sound/soc/omap/zoom2.c b/sound/soc/omap/zoom2.c
new file mode 100644
index 000000000000..f90b45f56220
--- /dev/null
+++ b/sound/soc/omap/zoom2.c
@@ -0,0 +1,314 @@
+/*
+ * zoom2.c -- SoC audio for Zoom2
+ *
+ * Author: Misael Lopez Cruz <x0052729@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
+#include <mach/gpio.h>
+#include <mach/mcbsp.h>
+
+#include "omap-mcbsp.h"
+#include "omap-pcm.h"
+#include "../codecs/twl4030.h"
+
+#define ZOOM2_HEADSET_MUX_GPIO (OMAP_MAX_GPIO_LINES + 15)
+#define ZOOM2_HEADSET_EXTMUTE_GPIO 153
+
+static int zoom2_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ int ret;
+
+ /* Set codec DAI configuration */
+ ret = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0) {
+ printk(KERN_ERR "can't set codec DAI configuration\n");
+ return ret;
+ }
+
+ /* Set cpu DAI configuration */
+ ret = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0) {
+ printk(KERN_ERR "can't set cpu DAI configuration\n");
+ return ret;
+ }
+
+ /* Set the codec system clock for DAC and ADC */
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ printk(KERN_ERR "can't set codec system clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops zoom2_ops = {
+ .hw_params = zoom2_hw_params,
+};
+
+static int zoom2_hw_voice_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ int ret;
+
+ /* Set codec DAI configuration */
+ ret = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_IB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (ret) {
+ printk(KERN_ERR "can't set codec DAI configuration\n");
+ return ret;
+ }
+
+ /* Set cpu DAI configuration */
+ ret = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_IB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0) {
+ printk(KERN_ERR "can't set cpu DAI configuration\n");
+ return ret;
+ }
+
+ /* Set the codec system clock for DAC and ADC */
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ printk(KERN_ERR "can't set codec system clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops zoom2_voice_ops = {
+ .hw_params = zoom2_hw_voice_params,
+};
+
+/* Zoom2 machine DAPM */
+static const struct snd_soc_dapm_widget zoom2_twl4030_dapm_widgets[] = {
+ SND_SOC_DAPM_MIC("Ext Mic", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_HP("Headset Stereophone", NULL),
+ SND_SOC_DAPM_LINE("Aux In", NULL),
+};
+
+static const struct snd_soc_dapm_route audio_map[] = {
+ /* External Mics: MAINMIC, SUBMIC with bias*/
+ {"MAINMIC", NULL, "Mic Bias 1"},
+ {"SUBMIC", NULL, "Mic Bias 2"},
+ {"Mic Bias 1", NULL, "Ext Mic"},
+ {"Mic Bias 2", NULL, "Ext Mic"},
+
+ /* External Speakers: HFL, HFR */
+ {"Ext Spk", NULL, "HFL"},
+ {"Ext Spk", NULL, "HFR"},
+
+ /* Headset Stereophone: HSOL, HSOR */
+ {"Headset Stereophone", NULL, "HSOL"},
+ {"Headset Stereophone", NULL, "HSOR"},
+
+ /* Headset Mic: HSMIC with bias */
+ {"HSMIC", NULL, "Headset Mic Bias"},
+ {"Headset Mic Bias", NULL, "Headset Mic"},
+
+ /* Aux In: AUXL, AUXR */
+ {"Aux In", NULL, "AUXL"},
+ {"Aux In", NULL, "AUXR"},
+};
+
+static int zoom2_twl4030_init(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ /* Add Zoom2 specific widgets */
+ ret = snd_soc_dapm_new_controls(codec, zoom2_twl4030_dapm_widgets,
+ ARRAY_SIZE(zoom2_twl4030_dapm_widgets));
+ if (ret)
+ return ret;
+
+ /* Set up Zoom2 specific audio path audio_map */
+ snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+
+ /* Zoom2 connected pins */
+ snd_soc_dapm_enable_pin(codec, "Ext Mic");
+ snd_soc_dapm_enable_pin(codec, "Ext Spk");
+ snd_soc_dapm_enable_pin(codec, "Headset Mic");
+ snd_soc_dapm_enable_pin(codec, "Headset Stereophone");
+ snd_soc_dapm_enable_pin(codec, "Aux In");
+
+ /* TWL4030 not connected pins */
+ snd_soc_dapm_nc_pin(codec, "CARKITMIC");
+ snd_soc_dapm_nc_pin(codec, "DIGIMIC0");
+ snd_soc_dapm_nc_pin(codec, "DIGIMIC1");
+
+ snd_soc_dapm_nc_pin(codec, "OUTL");
+ snd_soc_dapm_nc_pin(codec, "OUTR");
+ snd_soc_dapm_nc_pin(codec, "EARPIECE");
+ snd_soc_dapm_nc_pin(codec, "PREDRIVEL");
+ snd_soc_dapm_nc_pin(codec, "PREDRIVER");
+ snd_soc_dapm_nc_pin(codec, "CARKITL");
+ snd_soc_dapm_nc_pin(codec, "CARKITR");
+
+ ret = snd_soc_dapm_sync(codec);
+
+ return ret;
+}
+
+static int zoom2_twl4030_voice_init(struct snd_soc_codec *codec)
+{
+ unsigned short reg;
+
+ /* Enable voice interface */
+ reg = codec->read(codec, TWL4030_REG_VOICE_IF);
+ reg |= TWL4030_VIF_DIN_EN | TWL4030_VIF_DOUT_EN | TWL4030_VIF_EN;
+ codec->write(codec, TWL4030_REG_VOICE_IF, reg);
+
+ return 0;
+}
+
+/* Digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link zoom2_dai[] = {
+ {
+ .name = "TWL4030 I2S",
+ .stream_name = "TWL4030 Audio",
+ .cpu_dai = &omap_mcbsp_dai[0],
+ .codec_dai = &twl4030_dai[TWL4030_DAI_HIFI],
+ .init = zoom2_twl4030_init,
+ .ops = &zoom2_ops,
+ },
+ {
+ .name = "TWL4030 PCM",
+ .stream_name = "TWL4030 Voice",
+ .cpu_dai = &omap_mcbsp_dai[1],
+ .codec_dai = &twl4030_dai[TWL4030_DAI_VOICE],
+ .init = zoom2_twl4030_voice_init,
+ .ops = &zoom2_voice_ops,
+ },
+};
+
+/* Audio machine driver */
+static struct snd_soc_card snd_soc_zoom2 = {
+ .name = "Zoom2",
+ .platform = &omap_soc_platform,
+ .dai_link = zoom2_dai,
+ .num_links = ARRAY_SIZE(zoom2_dai),
+};
+
+/* EXTMUTE callback function */
+void zoom2_set_hs_extmute(int mute)
+{
+ gpio_set_value(ZOOM2_HEADSET_EXTMUTE_GPIO, mute);
+}
+
+/* twl4030 setup */
+static struct twl4030_setup_data twl4030_setup = {
+ .ramp_delay_value = 3, /* 161 ms */
+ .sysclk = 26000,
+ .hs_extmute = 1,
+ .set_hs_extmute = zoom2_set_hs_extmute,
+};
+
+/* Audio subsystem */
+static struct snd_soc_device zoom2_snd_devdata = {
+ .card = &snd_soc_zoom2,
+ .codec_dev = &soc_codec_dev_twl4030,
+ .codec_data = &twl4030_setup,
+};
+
+static struct platform_device *zoom2_snd_device;
+
+static int __init zoom2_soc_init(void)
+{
+ int ret;
+
+ if (!machine_is_omap_zoom2()) {
+ pr_debug("Not Zoom2!\n");
+ return -ENODEV;
+ }
+ printk(KERN_INFO "Zoom2 SoC init\n");
+
+ zoom2_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!zoom2_snd_device) {
+ printk(KERN_ERR "Platform device allocation failed\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(zoom2_snd_device, &zoom2_snd_devdata);
+ zoom2_snd_devdata.dev = &zoom2_snd_device->dev;
+ *(unsigned int *)zoom2_dai[0].cpu_dai->private_data = 1; /* McBSP2 */
+ *(unsigned int *)zoom2_dai[1].cpu_dai->private_data = 2; /* McBSP3 */
+
+ ret = platform_device_add(zoom2_snd_device);
+ if (ret)
+ goto err1;
+
+ BUG_ON(gpio_request(ZOOM2_HEADSET_MUX_GPIO, "hs_mux") < 0);
+ gpio_direction_output(ZOOM2_HEADSET_MUX_GPIO, 0);
+
+ BUG_ON(gpio_request(ZOOM2_HEADSET_EXTMUTE_GPIO, "ext_mute") < 0);
+ gpio_direction_output(ZOOM2_HEADSET_EXTMUTE_GPIO, 0);
+
+ return 0;
+
+err1:
+ printk(KERN_ERR "Unable to add platform device\n");
+ platform_device_put(zoom2_snd_device);
+
+ return ret;
+}
+module_init(zoom2_soc_init);
+
+static void __exit zoom2_soc_exit(void)
+{
+ gpio_free(ZOOM2_HEADSET_MUX_GPIO);
+ gpio_free(ZOOM2_HEADSET_EXTMUTE_GPIO);
+
+ platform_device_unregister(zoom2_snd_device);
+}
+module_exit(zoom2_soc_exit);
+
+MODULE_AUTHOR("Misael Lopez Cruz <x0052729@ti.com>");
+MODULE_DESCRIPTION("ALSA SoC Zoom2");
+MODULE_LICENSE("GPL");
+
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index 326955dea36c..9f7c61e23daf 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -20,12 +20,14 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/i2c.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
+#include <sound/uda1380.h>
#include <mach/magician.h>
#include <asm/mach-types.h>
@@ -188,7 +190,7 @@ static int magician_playback_hw_params(struct snd_pcm_substream *substream,
if (ret < 0)
return ret;
- ret = snd_soc_dai_set_tdm_slot(cpu_dai, 1, 1);
+ ret = snd_soc_dai_set_tdm_slot(cpu_dai, 1, 0, 1, width);
if (ret < 0)
return ret;
@@ -447,34 +449,47 @@ static struct snd_soc_card snd_soc_card_magician = {
.platform = &pxa2xx_soc_platform,
};
-/* magician audio private data */
-static struct uda1380_setup_data magician_uda1380_setup = {
- .i2c_address = 0x18,
- .dac_clk = UDA1380_DAC_CLK_WSPLL,
-};
-
/* magician audio subsystem */
static struct snd_soc_device magician_snd_devdata = {
.card = &snd_soc_card_magician,
.codec_dev = &soc_codec_dev_uda1380,
- .codec_data = &magician_uda1380_setup,
};
static struct platform_device *magician_snd_device;
+/*
+ * FIXME: move into magician board file once merged into the pxa tree
+ */
+static struct uda1380_platform_data uda1380_info = {
+ .gpio_power = EGPIO_MAGICIAN_CODEC_POWER,
+ .gpio_reset = EGPIO_MAGICIAN_CODEC_RESET,
+ .dac_clk = UDA1380_DAC_CLK_WSPLL,
+};
+
+static struct i2c_board_info i2c_board_info[] = {
+ {
+ I2C_BOARD_INFO("uda1380", 0x18),
+ .platform_data = &uda1380_info,
+ },
+};
+
static int __init magician_init(void)
{
int ret;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
if (!machine_is_magician())
return -ENODEV;
- ret = gpio_request(EGPIO_MAGICIAN_CODEC_POWER, "CODEC_POWER");
- if (ret)
- goto err_request_power;
- ret = gpio_request(EGPIO_MAGICIAN_CODEC_RESET, "CODEC_RESET");
- if (ret)
- goto err_request_reset;
+ adapter = i2c_get_adapter(0);
+ if (!adapter)
+ return -ENODEV;
+ client = i2c_new_device(adapter, i2c_board_info);
+ i2c_put_adapter(adapter);
+ if (!client)
+ return -ENODEV;
+
ret = gpio_request(EGPIO_MAGICIAN_SPK_POWER, "SPK_POWER");
if (ret)
goto err_request_spk;
@@ -491,14 +506,8 @@ static int __init magician_init(void)
if (ret)
goto err_request_in_sel1;
- gpio_set_value(EGPIO_MAGICIAN_CODEC_POWER, 1);
gpio_set_value(EGPIO_MAGICIAN_IN_SEL0, 0);
- /* we may need to have the clock running here - pH5 */
- gpio_set_value(EGPIO_MAGICIAN_CODEC_RESET, 1);
- udelay(5);
- gpio_set_value(EGPIO_MAGICIAN_CODEC_RESET, 0);
-
magician_snd_device = platform_device_alloc("soc-audio", -1);
if (!magician_snd_device) {
ret = -ENOMEM;
@@ -526,10 +535,6 @@ err_request_mic:
err_request_ep:
gpio_free(EGPIO_MAGICIAN_SPK_POWER);
err_request_spk:
- gpio_free(EGPIO_MAGICIAN_CODEC_RESET);
-err_request_reset:
- gpio_free(EGPIO_MAGICIAN_CODEC_POWER);
-err_request_power:
return ret;
}
@@ -540,15 +545,12 @@ static void __exit magician_exit(void)
gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, 0);
gpio_set_value(EGPIO_MAGICIAN_EP_POWER, 0);
gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, 0);
- gpio_set_value(EGPIO_MAGICIAN_CODEC_POWER, 0);
gpio_free(EGPIO_MAGICIAN_IN_SEL1);
gpio_free(EGPIO_MAGICIAN_IN_SEL0);
gpio_free(EGPIO_MAGICIAN_MIC_POWER);
gpio_free(EGPIO_MAGICIAN_EP_POWER);
gpio_free(EGPIO_MAGICIAN_SPK_POWER);
- gpio_free(EGPIO_MAGICIAN_CODEC_RESET);
- gpio_free(EGPIO_MAGICIAN_CODEC_POWER);
}
module_init(magician_init);
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index e6102fda0a7f..1f96e3227be5 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -17,13 +17,12 @@
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
+#include <sound/jack.h>
#include <asm/mach-types.h>
#include <mach/audio.h>
@@ -33,90 +32,31 @@
#include "pxa2xx-pcm.h"
#include "pxa2xx-ac97.h"
-static int palm27x_jack_func = 1;
-static int palm27x_spk_func = 1;
-static int palm27x_ep_gpio = -1;
+static struct snd_soc_jack hs_jack;
-static void palm27x_ext_control(struct snd_soc_codec *codec)
-{
- if (!palm27x_spk_func)
- snd_soc_dapm_enable_pin(codec, "Speaker");
- else
- snd_soc_dapm_disable_pin(codec, "Speaker");
-
- if (!palm27x_jack_func)
- snd_soc_dapm_enable_pin(codec, "Headphone Jack");
- else
- snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-
- snd_soc_dapm_sync(codec);
-}
-
-static int palm27x_startup(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_codec *codec = rtd->socdev->card->codec;
-
- /* check the jack status at stream startup */
- palm27x_ext_control(codec);
- return 0;
-}
-
-static struct snd_soc_ops palm27x_ops = {
- .startup = palm27x_startup,
+/* Headphones jack detection DAPM pins */
+static struct snd_soc_jack_pin hs_jack_pins[] = {
+ {
+ .pin = "Headphone Jack",
+ .mask = SND_JACK_HEADPHONE,
+ },
};
-static irqreturn_t palm27x_interrupt(int irq, void *v)
-{
- palm27x_spk_func = gpio_get_value(palm27x_ep_gpio);
- palm27x_jack_func = !palm27x_spk_func;
- return IRQ_HANDLED;
-}
-
-static int palm27x_get_jack(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- ucontrol->value.integer.value[0] = palm27x_jack_func;
- return 0;
-}
-
-static int palm27x_set_jack(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-
- if (palm27x_jack_func == ucontrol->value.integer.value[0])
- return 0;
-
- palm27x_jack_func = ucontrol->value.integer.value[0];
- palm27x_ext_control(codec);
- return 1;
-}
-
-static int palm27x_get_spk(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- ucontrol->value.integer.value[0] = palm27x_spk_func;
- return 0;
-}
-
-static int palm27x_set_spk(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-
- if (palm27x_spk_func == ucontrol->value.integer.value[0])
- return 0;
-
- palm27x_spk_func = ucontrol->value.integer.value[0];
- palm27x_ext_control(codec);
- return 1;
-}
+/* Headphones jack detection gpios */
+static struct snd_soc_jack_gpio hs_jack_gpios[] = {
+ [0] = {
+ /* gpio is set on per-platform basis */
+ .name = "hp-gpio",
+ .report = SND_JACK_HEADPHONE,
+ .debounce_time = 200,
+ },
+};
-/* PalmTX machine dapm widgets */
+/* Palm27x machine dapm widgets */
static const struct snd_soc_dapm_widget palm27x_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
- SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_SPK("Ext. Speaker", NULL),
+ SND_SOC_DAPM_MIC("Ext. Microphone", NULL),
};
/* PalmTX audio map */
@@ -126,46 +66,66 @@ static const struct snd_soc_dapm_route audio_map[] = {
{"Headphone Jack", NULL, "HPOUTR"},
/* ext speaker connected to ROUT2, LOUT2 */
- {"Speaker", NULL, "LOUT2"},
- {"Speaker", NULL, "ROUT2"},
-};
+ {"Ext. Speaker", NULL, "LOUT2"},
+ {"Ext. Speaker", NULL, "ROUT2"},
-static const char *jack_function[] = {"Headphone", "Off"};
-static const char *spk_function[] = {"On", "Off"};
-static const struct soc_enum palm27x_enum[] = {
- SOC_ENUM_SINGLE_EXT(2, jack_function),
- SOC_ENUM_SINGLE_EXT(2, spk_function),
+ /* mic connected to MIC1 */
+ {"Ext. Microphone", NULL, "MIC1"},
};
-static const struct snd_kcontrol_new palm27x_controls[] = {
- SOC_ENUM_EXT("Jack Function", palm27x_enum[0], palm27x_get_jack,
- palm27x_set_jack),
- SOC_ENUM_EXT("Speaker Function", palm27x_enum[1], palm27x_get_spk,
- palm27x_set_spk),
-};
+static struct snd_soc_card palm27x_asoc;
static int palm27x_ac97_init(struct snd_soc_codec *codec)
{
int err;
+ /* add palm27x specific widgets */
+ err = snd_soc_dapm_new_controls(codec, palm27x_dapm_widgets,
+ ARRAY_SIZE(palm27x_dapm_widgets));
+ if (err)
+ return err;
+
+ /* set up palm27x specific audio path audio_map */
+ err = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+ if (err)
+ return err;
+
+ /* connected pins */
+ if (machine_is_palmld())
+ snd_soc_dapm_enable_pin(codec, "MIC1");
+ snd_soc_dapm_enable_pin(codec, "HPOUTL");
+ snd_soc_dapm_enable_pin(codec, "HPOUTR");
+ snd_soc_dapm_enable_pin(codec, "LOUT2");
+ snd_soc_dapm_enable_pin(codec, "ROUT2");
+
+ /* not connected pins */
snd_soc_dapm_nc_pin(codec, "OUT3");
snd_soc_dapm_nc_pin(codec, "MONOOUT");
+ snd_soc_dapm_nc_pin(codec, "LINEINL");
+ snd_soc_dapm_nc_pin(codec, "LINEINR");
+ snd_soc_dapm_nc_pin(codec, "PCBEEP");
+ snd_soc_dapm_nc_pin(codec, "PHONE");
+ snd_soc_dapm_nc_pin(codec, "MIC2");
+
+ err = snd_soc_dapm_sync(codec);
+ if (err)
+ return err;
- /* add palm27x specific controls */
- err = snd_soc_add_controls(codec, palm27x_controls,
- ARRAY_SIZE(palm27x_controls));
- if (err < 0)
+ /* Jack detection API stuff */
+ err = snd_soc_jack_new(&palm27x_asoc, "Headphone Jack",
+ SND_JACK_HEADPHONE, &hs_jack);
+ if (err)
return err;
- /* add palm27x specific widgets */
- snd_soc_dapm_new_controls(codec, palm27x_dapm_widgets,
- ARRAY_SIZE(palm27x_dapm_widgets));
+ err = snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins),
+ hs_jack_pins);
+ if (err)
+ return err;
- /* set up palm27x specific audio path audio_map */
- snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+ err = snd_soc_jack_add_gpios(&hs_jack, ARRAY_SIZE(hs_jack_gpios),
+ hs_jack_gpios);
- snd_soc_dapm_sync(codec);
- return 0;
+ return err;
}
static struct snd_soc_dai_link palm27x_dai[] = {
@@ -175,14 +135,12 @@ static struct snd_soc_dai_link palm27x_dai[] = {
.cpu_dai = &pxa_ac97_dai[PXA2XX_DAI_AC97_HIFI],
.codec_dai = &wm9712_dai[WM9712_DAI_AC97_HIFI],
.init = palm27x_ac97_init,
- .ops = &palm27x_ops,
},
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
.cpu_dai = &pxa_ac97_dai[PXA2XX_DAI_AC97_AUX],
.codec_dai = &wm9712_dai[WM9712_DAI_AC97_AUX],
- .ops = &palm27x_ops,
},
};
@@ -208,27 +166,17 @@ static int palm27x_asoc_probe(struct platform_device *pdev)
machine_is_palmld() || machine_is_palmte2()))
return -ENODEV;
- if (pdev->dev.platform_data)
- palm27x_ep_gpio = ((struct palm27x_asoc_info *)
- (pdev->dev.platform_data))->jack_gpio;
-
- ret = gpio_request(palm27x_ep_gpio, "Headphone Jack");
- if (ret)
- return ret;
- ret = gpio_direction_input(palm27x_ep_gpio);
- if (ret)
- goto err_alloc;
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "please supply platform_data\n");
+ return -ENODEV;
+ }
- if (request_irq(gpio_to_irq(palm27x_ep_gpio), palm27x_interrupt,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "Headphone jack", NULL))
- goto err_alloc;
+ hs_jack_gpios[0].gpio = ((struct palm27x_asoc_info *)
+ (pdev->dev.platform_data))->jack_gpio;
palm27x_snd_device = platform_device_alloc("soc-audio", -1);
- if (!palm27x_snd_device) {
- ret = -ENOMEM;
- goto err_dev;
- }
+ if (!palm27x_snd_device)
+ return -ENOMEM;
platform_set_drvdata(palm27x_snd_device, &palm27x_snd_devdata);
palm27x_snd_devdata.dev = &palm27x_snd_device->dev;
@@ -241,18 +189,12 @@ static int palm27x_asoc_probe(struct platform_device *pdev)
put_device:
platform_device_put(palm27x_snd_device);
-err_dev:
- free_irq(gpio_to_irq(palm27x_ep_gpio), NULL);
-err_alloc:
- gpio_free(palm27x_ep_gpio);
return ret;
}
static int __devexit palm27x_asoc_remove(struct platform_device *pdev)
{
- free_irq(gpio_to_irq(palm27x_ep_gpio), NULL);
- gpio_free(palm27x_ep_gpio);
platform_device_unregister(palm27x_snd_device);
return 0;
}
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 19c45409d94c..5b9ed6464789 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -375,21 +375,34 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai,
* Set the active slots in TDM/Network mode
*/
static int pxa_ssp_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai,
- unsigned int mask, int slots)
+ unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
{
struct ssp_priv *priv = cpu_dai->private_data;
struct ssp_device *ssp = priv->dev.ssp;
u32 sscr0;
- sscr0 = ssp_read_reg(ssp, SSCR0) & ~SSCR0_SlotsPerFrm(7);
+ sscr0 = ssp_read_reg(ssp, SSCR0);
+ sscr0 &= ~(SSCR0_MOD | SSCR0_SlotsPerFrm(8) | SSCR0_EDSS | SSCR0_DSS);
+
+ /* set slot width */
+ if (slot_width > 16)
+ sscr0 |= SSCR0_EDSS | SSCR0_DataSize(slot_width - 16);
+ else
+ sscr0 |= SSCR0_DataSize(slot_width);
+
+ if (slots > 1) {
+ /* enable network mode */
+ sscr0 |= SSCR0_MOD;
- /* set number of active slots */
- sscr0 |= SSCR0_SlotsPerFrm(slots);
+ /* set number of active slots */
+ sscr0 |= SSCR0_SlotsPerFrm(slots);
+
+ /* set active slot mask */
+ ssp_write_reg(ssp, SSTSA, tx_mask);
+ ssp_write_reg(ssp, SSRSA, rx_mask);
+ }
ssp_write_reg(ssp, SSCR0, sscr0);
- /* set active slot mask */
- ssp_write_reg(ssp, SSTSA, mask);
- ssp_write_reg(ssp, SSRSA, mask);
return 0;
}
@@ -457,31 +470,27 @@ static int pxa_ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
return -EINVAL;
}
- ssp_write_reg(ssp, SSCR0, sscr0);
- ssp_write_reg(ssp, SSCR1, sscr1);
- ssp_write_reg(ssp, SSPSP, sspsp);
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ sspsp |= SSPSP_SFRMP;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ sspsp |= SSPSP_SCMODE(2);
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP;
+ break;
+ default:
+ return -EINVAL;
+ }
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
sscr0 |= SSCR0_PSP;
sscr1 |= SSCR1_RWOT | SSCR1_TRAIL;
-
/* See hw_params() */
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_NB_NF:
- sspsp |= SSPSP_SFRMP;
- break;
- case SND_SOC_DAIFMT_NB_IF:
- break;
- case SND_SOC_DAIFMT_IB_IF:
- sspsp |= SSPSP_SCMODE(2);
- break;
- case SND_SOC_DAIFMT_IB_NF:
- sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP;
- break;
- default:
- return -EINVAL;
- }
break;
case SND_SOC_DAIFMT_DSP_A:
@@ -489,22 +498,6 @@ static int pxa_ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
case SND_SOC_DAIFMT_DSP_B:
sscr0 |= SSCR0_MOD | SSCR0_PSP;
sscr1 |= SSCR1_TRAIL | SSCR1_RWOT;
-
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_NB_NF:
- sspsp |= SSPSP_SFRMP;
- break;
- case SND_SOC_DAIFMT_NB_IF:
- break;
- case SND_SOC_DAIFMT_IB_IF:
- sspsp |= SSPSP_SCMODE(2);
- break;
- case SND_SOC_DAIFMT_IB_NF:
- sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP;
- break;
- default:
- return -EINVAL;
- }
break;
default:
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index d9c94d71fa61..e9ae7b3a7e00 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -22,6 +22,7 @@
#include <mach/hardware.h>
#include <mach/regs-ac97.h>
#include <mach/dma.h>
+#include <mach/audio.h>
#include "pxa2xx-pcm.h"
#include "pxa2xx-ac97.h"
@@ -241,9 +242,18 @@ EXPORT_SYMBOL_GPL(soc_ac97_ops);
static int __devinit pxa2xx_ac97_dev_probe(struct platform_device *pdev)
{
int i;
+ pxa2xx_audio_ops_t *pdata = pdev->dev.platform_data;
- for (i = 0; i < ARRAY_SIZE(pxa_ac97_dai); i++)
+ if (pdev->id >= 0) {
+ dev_err(&pdev->dev, "PXA2xx has only one AC97 port.\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pxa_ac97_dai); i++) {
pxa_ac97_dai[i].dev = &pdev->dev;
+ if (pdata && pdata->codec_pdata[0])
+ pxa_ac97_dai[i].ac97_pdata = pdata->codec_pdata[0];
+ }
/* Punt most of the init to the SoC probe; we may need the machine
* driver to do interesting things with the clocking to get us up
diff --git a/sound/soc/s3c24xx/Kconfig b/sound/soc/s3c24xx/Kconfig
index df494d1e346f..923428fc1adb 100644
--- a/sound/soc/s3c24xx/Kconfig
+++ b/sound/soc/s3c24xx/Kconfig
@@ -1,6 +1,7 @@
config SND_S3C24XX_SOC
tristate "SoC Audio for the Samsung S3CXXXX chips"
- depends on ARCH_S3C2410
+ depends on ARCH_S3C2410 || ARCH_S3C64XX
+ select S3C64XX_DMA if ARCH_S3C64XX
help
Say Y or M if you want to add support for codecs attached to
the S3C24XX AC97 or I2S interfaces. You will also need to
@@ -38,6 +39,15 @@ config SND_S3C24XX_SOC_NEO1973_WM8753
Say Y if you want to add support for SoC audio on smdk2440
with the WM8753.
+config SND_S3C24XX_SOC_NEO1973_GTA02_WM8753
+ tristate "Audio support for the Openmoko Neo FreeRunner (GTA02)"
+ depends on SND_S3C24XX_SOC && MACH_NEO1973_GTA02
+ select SND_S3C24XX_SOC_I2S
+ select SND_SOC_WM8753
+ help
+ This driver provides audio support for the Openmoko Neo FreeRunner
+ smartphone.
+
config SND_S3C24XX_SOC_JIVE_WM8750
tristate "SoC I2S Audio support for Jive"
depends on SND_S3C24XX_SOC && MACH_JIVE
@@ -57,7 +67,7 @@ config SND_S3C24XX_SOC_SMDK2443_WM9710
config SND_S3C24XX_SOC_LN2440SBC_ALC650
tristate "SoC AC97 Audio support for LN2440SBC - ALC650"
- depends on SND_S3C24XX_SOC
+ depends on SND_S3C24XX_SOC && ARCH_S3C2410
select SND_S3C2443_SOC_AC97
select SND_SOC_AC97_CODEC
help
@@ -66,7 +76,26 @@ config SND_S3C24XX_SOC_LN2440SBC_ALC650
config SND_S3C24XX_SOC_S3C24XX_UDA134X
tristate "SoC I2S Audio support UDA134X wired to a S3C24XX"
- depends on SND_S3C24XX_SOC
+ depends on SND_S3C24XX_SOC && ARCH_S3C2410
select SND_S3C24XX_SOC_I2S
select SND_SOC_L3
select SND_SOC_UDA134X
+
+config SND_S3C24XX_SOC_SIMTEC
+ tristate
+ help
+ Internal node for common S3C24XX/Simtec suppor
+
+config SND_S3C24XX_SOC_SIMTEC_TLV320AIC23
+ tristate "SoC I2S Audio support for TLV320AIC23 on Simtec boards"
+ depends on SND_S3C24XX_SOC && ARCH_S3C2410
+ select SND_S3C24XX_SOC_I2S
+ select SND_SOC_TLV320AIC23
+ select SND_S3C24XX_SOC_SIMTEC
+
+config SND_S3C24XX_SOC_SIMTEC_HERMES
+ tristate "SoC I2S Audio support for Simtec Hermes board"
+ depends on SND_S3C24XX_SOC && ARCH_S3C2410
+ select SND_S3C24XX_SOC_I2S
+ select SND_SOC_TLV320AIC3X
+ select SND_S3C24XX_SOC_SIMTEC
diff --git a/sound/soc/s3c24xx/Makefile b/sound/soc/s3c24xx/Makefile
index 07a93a2ebe5f..99f5a7dd3fc6 100644
--- a/sound/soc/s3c24xx/Makefile
+++ b/sound/soc/s3c24xx/Makefile
@@ -16,12 +16,21 @@ obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o
# S3C24XX Machine Support
snd-soc-jive-wm8750-objs := jive_wm8750.o
snd-soc-neo1973-wm8753-objs := neo1973_wm8753.o
+snd-soc-neo1973-gta02-wm8753-objs := neo1973_gta02_wm8753.o
snd-soc-smdk2443-wm9710-objs := smdk2443_wm9710.o
snd-soc-ln2440sbc-alc650-objs := ln2440sbc_alc650.o
snd-soc-s3c24xx-uda134x-objs := s3c24xx_uda134x.o
+snd-soc-s3c24xx-simtec-objs := s3c24xx_simtec.o
+snd-soc-s3c24xx-simtec-hermes-objs := s3c24xx_simtec_hermes.o
+snd-soc-s3c24xx-simtec-tlv320aic23-objs := s3c24xx_simtec_tlv320aic23.o
obj-$(CONFIG_SND_S3C24XX_SOC_JIVE_WM8750) += snd-soc-jive-wm8750.o
obj-$(CONFIG_SND_S3C24XX_SOC_NEO1973_WM8753) += snd-soc-neo1973-wm8753.o
+obj-$(CONFIG_SND_S3C24XX_SOC_NEO1973_GTA02_WM8753) += snd-soc-neo1973-gta02-wm8753.o
obj-$(CONFIG_SND_S3C24XX_SOC_SMDK2443_WM9710) += snd-soc-smdk2443-wm9710.o
obj-$(CONFIG_SND_S3C24XX_SOC_LN2440SBC_ALC650) += snd-soc-ln2440sbc-alc650.o
obj-$(CONFIG_SND_S3C24XX_SOC_S3C24XX_UDA134X) += snd-soc-s3c24xx-uda134x.o
+obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC) += snd-soc-s3c24xx-simtec.o
+obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC_HERMES) += snd-soc-s3c24xx-simtec-hermes.o
+obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC_TLV320AIC23) += snd-soc-s3c24xx-simtec-tlv320aic23.o
+
diff --git a/sound/soc/s3c24xx/neo1973_gta02_wm8753.c b/sound/soc/s3c24xx/neo1973_gta02_wm8753.c
new file mode 100644
index 000000000000..0c52e36ddd87
--- /dev/null
+++ b/sound/soc/s3c24xx/neo1973_gta02_wm8753.c
@@ -0,0 +1,498 @@
+/*
+ * neo1973_gta02_wm8753.c -- SoC audio for Openmoko Freerunner(GTA02)
+ *
+ * Copyright 2007 Openmoko Inc
+ * Author: Graeme Gregory <graeme@openmoko.org>
+ * Copyright 2007 Wolfson Microelectronics PLC.
+ * Author: Graeme Gregory <linux@wolfsonmicro.com>
+ * Copyright 2009 Wolfson Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include <asm/mach-types.h>
+
+#include <plat/regs-iis.h>
+
+#include <mach/regs-clock.h>
+#include <asm/io.h>
+#include <mach/gta02.h>
+#include "../codecs/wm8753.h"
+#include "s3c24xx-pcm.h"
+#include "s3c24xx-i2s.h"
+
+static struct snd_soc_card neo1973_gta02;
+
+static int neo1973_gta02_hifi_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ unsigned int pll_out = 0, bclk = 0;
+ int ret = 0;
+ unsigned long iis_clkrate;
+
+ iis_clkrate = s3c24xx_i2s_get_clockrate();
+
+ switch (params_rate(params)) {
+ case 8000:
+ case 16000:
+ pll_out = 12288000;
+ break;
+ case 48000:
+ bclk = WM8753_BCLK_DIV_4;
+ pll_out = 12288000;
+ break;
+ case 96000:
+ bclk = WM8753_BCLK_DIV_2;
+ pll_out = 12288000;
+ break;
+ case 11025:
+ bclk = WM8753_BCLK_DIV_16;
+ pll_out = 11289600;
+ break;
+ case 22050:
+ bclk = WM8753_BCLK_DIV_8;
+ pll_out = 11289600;
+ break;
+ case 44100:
+ bclk = WM8753_BCLK_DIV_4;
+ pll_out = 11289600;
+ break;
+ case 88200:
+ bclk = WM8753_BCLK_DIV_2;
+ pll_out = 11289600;
+ break;
+ }
+
+ /* set codec DAI configuration */
+ ret = snd_soc_dai_set_fmt(codec_dai,
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0)
+ return ret;
+
+ /* set cpu DAI configuration */
+ ret = snd_soc_dai_set_fmt(cpu_dai,
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0)
+ return ret;
+
+ /* set the codec system clock for DAC and ADC */
+ ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_MCLK, pll_out,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ return ret;
+
+ /* set MCLK division for sample rate */
+ ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
+ S3C2410_IISMOD_32FS);
+ if (ret < 0)
+ return ret;
+
+ /* set codec BCLK division for sample rate */
+ ret = snd_soc_dai_set_clkdiv(codec_dai,
+ WM8753_BCLKDIV, bclk);
+ if (ret < 0)
+ return ret;
+
+ /* set prescaler division for sample rate */
+ ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
+ S3C24XX_PRESCALE(4, 4));
+ if (ret < 0)
+ return ret;
+
+ /* codec PLL input is PCLK/4 */
+ ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL1,
+ iis_clkrate / 4, pll_out);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int neo1973_gta02_hifi_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+
+ /* disable the PLL */
+ return snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0, 0);
+}
+
+/*
+ * Neo1973 WM8753 HiFi DAI opserations.
+ */
+static struct snd_soc_ops neo1973_gta02_hifi_ops = {
+ .hw_params = neo1973_gta02_hifi_hw_params,
+ .hw_free = neo1973_gta02_hifi_hw_free,
+};
+
+static int neo1973_gta02_voice_hw_params(
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+ unsigned int pcmdiv = 0;
+ int ret = 0;
+ unsigned long iis_clkrate;
+
+ iis_clkrate = s3c24xx_i2s_get_clockrate();
+
+ if (params_rate(params) != 8000)
+ return -EINVAL;
+ if (params_channels(params) != 1)
+ return -EINVAL;
+
+ pcmdiv = WM8753_PCM_DIV_6; /* 2.048 MHz */
+
+ /* todo: gg check mode (DSP_B) against CSR datasheet */
+ /* set codec DAI configuration */
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B |
+ SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+ if (ret < 0)
+ return ret;
+
+ /* set the codec system clock for DAC and ADC */
+ ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_PCMCLK,
+ 12288000, SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ return ret;
+
+ /* set codec PCM division for sample rate */
+ ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_PCMDIV,
+ pcmdiv);
+ if (ret < 0)
+ return ret;
+
+ /* configue and enable PLL for 12.288MHz output */
+ ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL2,
+ iis_clkrate / 4, 12288000);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int neo1973_gta02_voice_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+
+ /* disable the PLL */
+ return snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, 0);
+}
+
+static struct snd_soc_ops neo1973_gta02_voice_ops = {
+ .hw_params = neo1973_gta02_voice_hw_params,
+ .hw_free = neo1973_gta02_voice_hw_free,
+};
+
+#define LM4853_AMP 1
+#define LM4853_SPK 2
+
+static u8 lm4853_state;
+
+/* This has no effect, it exists only to maintain compatibility with
+ * existing ALSA state files.
+ */
+static int lm4853_set_state(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int val = ucontrol->value.integer.value[0];
+
+ if (val)
+ lm4853_state |= LM4853_AMP;
+ else
+ lm4853_state &= ~LM4853_AMP;
+
+ return 0;
+}
+
+static int lm4853_get_state(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = lm4853_state & LM4853_AMP;
+
+ return 0;
+}
+
+static int lm4853_set_spk(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int val = ucontrol->value.integer.value[0];
+
+ if (val) {
+ lm4853_state |= LM4853_SPK;
+ gpio_set_value(GTA02_GPIO_HP_IN, 0);
+ } else {
+ lm4853_state &= ~LM4853_SPK;
+ gpio_set_value(GTA02_GPIO_HP_IN, 1);
+ }
+
+ return 0;
+}
+
+static int lm4853_get_spk(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = (lm4853_state & LM4853_SPK) >> 1;
+
+ return 0;
+}
+
+static int lm4853_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k,
+ int event)
+{
+ gpio_set_value(GTA02_GPIO_AMP_SHUT, SND_SOC_DAPM_EVENT_OFF(value));
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget wm8753_dapm_widgets[] = {
+ SND_SOC_DAPM_SPK("Stereo Out", lm4853_event),
+ SND_SOC_DAPM_LINE("GSM Line Out", NULL),
+ SND_SOC_DAPM_LINE("GSM Line In", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Handset Mic", NULL),
+ SND_SOC_DAPM_SPK("Handset Spk", NULL),
+};
+
+
+/* example machine audio_mapnections */
+static const struct snd_soc_dapm_route audio_map[] = {
+
+ /* Connections to the lm4853 amp */
+ {"Stereo Out", NULL, "LOUT1"},
+ {"Stereo Out", NULL, "ROUT1"},
+
+ /* Connections to the GSM Module */
+ {"GSM Line Out", NULL, "MONO1"},
+ {"GSM Line Out", NULL, "MONO2"},
+ {"RXP", NULL, "GSM Line In"},
+ {"RXN", NULL, "GSM Line In"},
+
+ /* Connections to Headset */
+ {"MIC1", NULL, "Mic Bias"},
+ {"Mic Bias", NULL, "Headset Mic"},
+
+ /* Call Mic */
+ {"MIC2", NULL, "Mic Bias"},
+ {"MIC2N", NULL, "Mic Bias"},
+ {"Mic Bias", NULL, "Handset Mic"},
+
+ /* Call Speaker */
+ {"Handset Spk", NULL, "LOUT2"},
+ {"Handset Spk", NULL, "ROUT2"},
+
+ /* Connect the ALC pins */
+ {"ACIN", NULL, "ACOP"},
+};
+
+static const struct snd_kcontrol_new wm8753_neo1973_gta02_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Stereo Out"),
+ SOC_DAPM_PIN_SWITCH("GSM Line Out"),
+ SOC_DAPM_PIN_SWITCH("GSM Line In"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Handset Mic"),
+ SOC_DAPM_PIN_SWITCH("Handset Spk"),
+
+ /* This has no effect, it exists only to maintain compatibility with
+ * existing ALSA state files.
+ */
+ SOC_SINGLE_EXT("Amp State Switch", 6, 0, 1, 0,
+ lm4853_get_state,
+ lm4853_set_state),
+ SOC_SINGLE_EXT("Amp Spk Switch", 7, 0, 1, 0,
+ lm4853_get_spk,
+ lm4853_set_spk),
+};
+
+/*
+ * This is an example machine initialisation for a wm8753 connected to a
+ * neo1973 GTA02.
+ */
+static int neo1973_gta02_wm8753_init(struct snd_soc_codec *codec)
+{
+ int err;
+
+ /* set up NC codec pins */
+ snd_soc_dapm_nc_pin(codec, "OUT3");
+ snd_soc_dapm_nc_pin(codec, "OUT4");
+ snd_soc_dapm_nc_pin(codec, "LINE1");
+ snd_soc_dapm_nc_pin(codec, "LINE2");
+
+ /* Add neo1973 gta02 specific widgets */
+ snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets,
+ ARRAY_SIZE(wm8753_dapm_widgets));
+
+ /* add neo1973 gta02 specific controls */
+ err = snd_soc_add_controls(codec, wm8753_neo1973_gta02_controls,
+ ARRAY_SIZE(wm8753_neo1973_gta02_controls));
+
+ if (err < 0)
+ return err;
+
+ /* set up neo1973 gta02 specific audio path audio_map */
+ snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+
+ /* set endpoints to default off mode */
+ snd_soc_dapm_disable_pin(codec, "Stereo Out");
+ snd_soc_dapm_disable_pin(codec, "GSM Line Out");
+ snd_soc_dapm_disable_pin(codec, "GSM Line In");
+ snd_soc_dapm_disable_pin(codec, "Headset Mic");
+ snd_soc_dapm_disable_pin(codec, "Handset Mic");
+ snd_soc_dapm_disable_pin(codec, "Handset Spk");
+
+ snd_soc_dapm_sync(codec);
+
+ return 0;
+}
+
+/*
+ * BT Codec DAI
+ */
+static struct snd_soc_dai bt_dai = {
+ .name = "Bluetooth",
+ .id = 0,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_8000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,},
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_8000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,},
+};
+
+static struct snd_soc_dai_link neo1973_gta02_dai[] = {
+{ /* Hifi Playback - for similatious use with voice below */
+ .name = "WM8753",
+ .stream_name = "WM8753 HiFi",
+ .cpu_dai = &s3c24xx_i2s_dai,
+ .codec_dai = &wm8753_dai[WM8753_DAI_HIFI],
+ .init = neo1973_gta02_wm8753_init,
+ .ops = &neo1973_gta02_hifi_ops,
+},
+{ /* Voice via BT */
+ .name = "Bluetooth",
+ .stream_name = "Voice",
+ .cpu_dai = &bt_dai,
+ .codec_dai = &wm8753_dai[WM8753_DAI_VOICE],
+ .ops = &neo1973_gta02_voice_ops,
+},
+};
+
+static struct snd_soc_card neo1973_gta02 = {
+ .name = "neo1973-gta02",
+ .platform = &s3c24xx_soc_platform,
+ .dai_link = neo1973_gta02_dai,
+ .num_links = ARRAY_SIZE(neo1973_gta02_dai),
+};
+
+static struct snd_soc_device neo1973_gta02_snd_devdata = {
+ .card = &neo1973_gta02,
+ .codec_dev = &soc_codec_dev_wm8753,
+};
+
+static struct platform_device *neo1973_gta02_snd_device;
+
+static int __init neo1973_gta02_init(void)
+{
+ int ret;
+
+ if (!machine_is_neo1973_gta02()) {
+ printk(KERN_INFO
+ "Only GTA02 is supported by this ASoC driver\n");
+ return -ENODEV;
+ }
+
+ /* register bluetooth DAI here */
+ ret = snd_soc_register_dai(&bt_dai);
+ if (ret)
+ return ret;
+
+ neo1973_gta02_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!neo1973_gta02_snd_device)
+ return -ENOMEM;
+
+ platform_set_drvdata(neo1973_gta02_snd_device,
+ &neo1973_gta02_snd_devdata);
+ neo1973_gta02_snd_devdata.dev = &neo1973_gta02_snd_device->dev;
+ ret = platform_device_add(neo1973_gta02_snd_device);
+
+ if (ret) {
+ platform_device_put(neo1973_gta02_snd_device);
+ return ret;
+ }
+
+ /* Initialise GPIOs used by amp */
+ ret = gpio_request(GTA02_GPIO_HP_IN, "GTA02_HP_IN");
+ if (ret) {
+ pr_err("gta02_wm8753: Failed to register GPIO %d\n", GTA02_GPIO_HP_IN);
+ goto err_unregister_device;
+ }
+
+ ret = gpio_direction_output(GTA02_GPIO_AMP_HP_IN, 1);
+ if (ret) {
+ pr_err("gta02_wm8753: Failed to configure GPIO %d\n", GTA02_GPIO_HP_IN);
+ goto err_free_gpio_hp_in;
+ }
+
+ ret = gpio_request(GTA02_GPIO_AMP_SHUT, "GTA02_AMP_SHUT");
+ if (ret) {
+ pr_err("gta02_wm8753: Failed to register GPIO %d\n", GTA02_GPIO_AMP_SHUT);
+ goto err_free_gpio_hp_in;
+ }
+
+ ret = gpio_direction_output(GTA02_GPIO_AMP_SHUT, 1);
+ if (ret) {
+ pr_err("gta02_wm8753: Failed to configure GPIO %d\n", GTA02_GPIO_AMP_SHUT);
+ goto err_free_gpio_amp_shut;
+ }
+
+ return 0;
+
+err_free_gpio_amp_shut:
+ gpio_free(GTA02_GPIO_AMP_SHUT);
+err_free_gpio_hp_in:
+ gpio_free(GTA02_GPIO_HP_IN);
+err_unregister_device:
+ platform_device_unregister(neo1973_gta02_snd_device);
+ return ret;
+}
+module_init(neo1973_gta02_init);
+
+static void __exit neo1973_gta02_exit(void)
+{
+ snd_soc_unregister_dai(&bt_dai);
+ platform_device_unregister(neo1973_gta02_snd_device);
+ gpio_free(GTA02_GPIO_HP_IN);
+ gpio_free(GTA02_GPIO_AMP_SHUT);
+}
+module_exit(neo1973_gta02_exit);
+
+/* Module information */
+MODULE_AUTHOR("Graeme Gregory, graeme@openmoko.org");
+MODULE_DESCRIPTION("ALSA SoC WM8753 Neo1973 GTA02");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.c b/sound/soc/s3c24xx/s3c-i2s-v2.c
index 1a283170ca92..aa7af0b8d421 100644
--- a/sound/soc/s3c24xx/s3c-i2s-v2.c
+++ b/sound/soc/s3c24xx/s3c-i2s-v2.c
@@ -36,6 +36,7 @@
#include <mach/dma.h>
#include "s3c-i2s-v2.h"
+#include "s3c24xx-pcm.h"
#undef S3C_IIS_V2_SUPPORTED
@@ -357,19 +358,19 @@ static int s3c2412_i2s_hw_params(struct snd_pcm_substream *substream,
#endif
#ifdef CONFIG_PLAT_S3C64XX
- iismod &= ~0x606;
+ iismod &= ~(S3C64XX_IISMOD_BLC_MASK | S3C2412_IISMOD_BCLK_MASK);
/* Sample size */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
/* 8 bit sample, 16fs BCLK */
- iismod |= 0x2004;
+ iismod |= (S3C64XX_IISMOD_BLC_8BIT | S3C2412_IISMOD_BCLK_16FS);
break;
case SNDRV_PCM_FORMAT_S16_LE:
/* 16 bit sample, 32fs BCLK */
break;
case SNDRV_PCM_FORMAT_S24_LE:
/* 24 bit sample, 48fs BCLK */
- iismod |= 0x4002;
+ iismod |= (S3C64XX_IISMOD_BLC_24BIT | S3C2412_IISMOD_BCLK_48FS);
break;
}
#endif
@@ -387,6 +388,8 @@ static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
unsigned long irqs;
int ret = 0;
+ int channel = ((struct s3c24xx_pcm_dma_params *)
+ rtd->dai->cpu_dai->dma_data)->channel;
pr_debug("Entered %s\n", __func__);
@@ -416,6 +419,14 @@ static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
s3c2412_snd_txctrl(i2s, 1);
local_irq_restore(irqs);
+
+ /*
+ * Load the next buffer to DMA to meet the reqirement
+ * of the auto reload mechanism of S3C24XX.
+ * This call won't bother S3C64XX.
+ */
+ s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED);
+
break;
case SNDRV_PCM_TRIGGER_STOP:
diff --git a/sound/soc/s3c24xx/s3c2443-ac97.c b/sound/soc/s3c24xx/s3c2443-ac97.c
index 3f03d5ddfacd..fc1beb0930b9 100644
--- a/sound/soc/s3c24xx/s3c2443-ac97.c
+++ b/sound/soc/s3c24xx/s3c2443-ac97.c
@@ -47,7 +47,7 @@ static struct s3c24xx_ac97_info s3c24xx_ac97;
static DECLARE_COMPLETION(ac97_completion);
static u32 codec_ready;
-static DECLARE_MUTEX(ac97_mutex);
+static DEFINE_MUTEX(ac97_mutex);
static unsigned short s3c2443_ac97_read(struct snd_ac97 *ac97,
unsigned short reg)
@@ -56,7 +56,7 @@ static unsigned short s3c2443_ac97_read(struct snd_ac97 *ac97,
u32 ac_codec_cmd;
u32 stat, addr, data;
- down(&ac97_mutex);
+ mutex_lock(&ac97_mutex);
codec_ready = S3C_AC97_GLBSTAT_CODECREADY;
ac_codec_cmd = readl(s3c24xx_ac97.regs + S3C_AC97_CODEC_CMD);
@@ -79,7 +79,7 @@ static unsigned short s3c2443_ac97_read(struct snd_ac97 *ac97,
printk(KERN_ERR "s3c24xx-ac97: req addr = %02x,"
" rep addr = %02x\n", reg, addr);
- up(&ac97_mutex);
+ mutex_unlock(&ac97_mutex);
return (unsigned short)data;
}
@@ -90,7 +90,7 @@ static void s3c2443_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
u32 ac_glbctrl;
u32 ac_codec_cmd;
- down(&ac97_mutex);
+ mutex_lock(&ac97_mutex);
codec_ready = S3C_AC97_GLBSTAT_CODECREADY;
ac_codec_cmd = readl(s3c24xx_ac97.regs + S3C_AC97_CODEC_CMD);
@@ -109,7 +109,7 @@ static void s3c2443_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
ac_codec_cmd |= S3C_AC97_CODEC_CMD_READ;
writel(ac_codec_cmd, s3c24xx_ac97.regs + S3C_AC97_CODEC_CMD);
- up(&ac97_mutex);
+ mutex_unlock(&ac97_mutex);
}
@@ -290,6 +290,9 @@ static int s3c2443_ac97_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
u32 ac_glbctrl;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int channel = ((struct s3c24xx_pcm_dma_params *)
+ rtd->dai->cpu_dai->dma_data)->channel;
ac_glbctrl = readl(s3c24xx_ac97.regs + S3C_AC97_GLBCTRL);
switch (cmd) {
@@ -312,6 +315,8 @@ static int s3c2443_ac97_trigger(struct snd_pcm_substream *substream, int cmd,
}
writel(ac_glbctrl, s3c24xx_ac97.regs + S3C_AC97_GLBCTRL);
+ s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED);
+
return 0;
}
@@ -334,6 +339,9 @@ static int s3c2443_ac97_mic_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
u32 ac_glbctrl;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int channel = ((struct s3c24xx_pcm_dma_params *)
+ rtd->dai->cpu_dai->dma_data)->channel;
ac_glbctrl = readl(s3c24xx_ac97.regs + S3C_AC97_GLBCTRL);
switch (cmd) {
@@ -349,6 +357,8 @@ static int s3c2443_ac97_mic_trigger(struct snd_pcm_substream *substream,
}
writel(ac_glbctrl, s3c24xx_ac97.regs + S3C_AC97_GLBCTRL);
+ s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED);
+
return 0;
}
diff --git a/sound/soc/s3c24xx/s3c24xx-i2s.c b/sound/soc/s3c24xx/s3c24xx-i2s.c
index 556e35f0ab73..40e2c4790f0d 100644
--- a/sound/soc/s3c24xx/s3c24xx-i2s.c
+++ b/sound/soc/s3c24xx/s3c24xx-i2s.c
@@ -279,6 +279,9 @@ static int s3c24xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
int ret = 0;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int channel = ((struct s3c24xx_pcm_dma_params *)
+ rtd->dai->cpu_dai->dma_data)->channel;
pr_debug("Entered %s\n", __func__);
@@ -296,6 +299,8 @@ static int s3c24xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
s3c24xx_snd_rxctrl(1);
else
s3c24xx_snd_txctrl(1);
+
+ s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
diff --git a/sound/soc/s3c24xx/s3c24xx-pcm.c b/sound/soc/s3c24xx/s3c24xx-pcm.c
index eecfa5eba06b..5cbbdc80fde3 100644
--- a/sound/soc/s3c24xx/s3c24xx-pcm.c
+++ b/sound/soc/s3c24xx/s3c24xx-pcm.c
@@ -255,7 +255,6 @@ static int s3c24xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
prtd->state |= ST_RUNNING;
s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_START);
- s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_STARTED);
break;
case SNDRV_PCM_TRIGGER_STOP:
@@ -318,6 +317,7 @@ static int s3c24xx_pcm_open(struct snd_pcm_substream *substream)
pr_debug("Entered %s\n", __func__);
+ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
snd_soc_set_runtime_hwparams(substream, &s3c24xx_pcm_hardware);
prtd = kzalloc(sizeof(struct s3c24xx_runtime_data), GFP_KERNEL);
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.c b/sound/soc/s3c24xx/s3c24xx_simtec.c
new file mode 100644
index 000000000000..1966e0d5652d
--- /dev/null
+++ b/sound/soc/s3c24xx/s3c24xx_simtec.c
@@ -0,0 +1,394 @@
+/* sound/soc/s3c24xx/s3c24xx_simtec.c
+ *
+ * Copyright 2009 Simtec Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/i2c.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include <plat/audio-simtec.h>
+
+#include "s3c24xx-pcm.h"
+#include "s3c24xx-i2s.h"
+#include "s3c24xx_simtec.h"
+
+static struct s3c24xx_audio_simtec_pdata *pdata;
+static struct clk *xtal_clk;
+
+static int spk_gain;
+static int spk_unmute;
+
+/**
+ * speaker_gain_get - read the speaker gain setting.
+ * @kcontrol: The control for the speaker gain.
+ * @ucontrol: The value that needs to be updated.
+ *
+ * Read the value for the AMP gain control.
+ */
+static int speaker_gain_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = spk_gain;
+ return 0;
+}
+
+/**
+ * speaker_gain_set - set the value of the speaker amp gain
+ * @value: The value to write.
+ */
+static void speaker_gain_set(int value)
+{
+ gpio_set_value_cansleep(pdata->amp_gain[0], value & 1);
+ gpio_set_value_cansleep(pdata->amp_gain[1], value >> 1);
+}
+
+/**
+ * speaker_gain_put - set the speaker gain setting.
+ * @kcontrol: The control for the speaker gain.
+ * @ucontrol: The value that needs to be set.
+ *
+ * Set the value of the speaker gain from the specified
+ * @ucontrol setting.
+ *
+ * Note, if the speaker amp is muted, then we do not set a gain value
+ * as at-least one of the ICs that is fitted will try and power up even
+ * if the main control is set to off.
+ */
+static int speaker_gain_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int value = ucontrol->value.integer.value[0];
+
+ spk_gain = value;
+
+ if (!spk_unmute)
+ speaker_gain_set(value);
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new amp_gain_controls[] = {
+ SOC_SINGLE_EXT("Speaker Gain", 0, 0, 3, 0,
+ speaker_gain_get, speaker_gain_put),
+};
+
+/**
+ * spk_unmute_state - set the unmute state of the speaker
+ * @to: zero to unmute, non-zero to ununmute.
+ */
+static void spk_unmute_state(int to)
+{
+ pr_debug("%s: to=%d\n", __func__, to);
+
+ spk_unmute = to;
+ gpio_set_value(pdata->amp_gpio, to);
+
+ /* if we're umuting, also re-set the gain */
+ if (to && pdata->amp_gain[0] > 0)
+ speaker_gain_set(spk_gain);
+}
+
+/**
+ * speaker_unmute_get - read the speaker unmute setting.
+ * @kcontrol: The control for the speaker gain.
+ * @ucontrol: The value that needs to be updated.
+ *
+ * Read the value for the AMP gain control.
+ */
+static int speaker_unmute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = spk_unmute;
+ return 0;
+}
+
+/**
+ * speaker_unmute_put - set the speaker unmute setting.
+ * @kcontrol: The control for the speaker gain.
+ * @ucontrol: The value that needs to be set.
+ *
+ * Set the value of the speaker gain from the specified
+ * @ucontrol setting.
+ */
+static int speaker_unmute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ spk_unmute_state(ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+/* This is added as a manual control as the speaker amps create clicks
+ * when their power state is changed, which are far more noticeable than
+ * anything produced by the CODEC itself.
+ */
+static const struct snd_kcontrol_new amp_unmute_controls[] = {
+ SOC_SINGLE_EXT("Speaker Switch", 0, 0, 1, 0,
+ speaker_unmute_get, speaker_unmute_put),
+};
+
+void simtec_audio_init(struct snd_soc_codec *codec)
+{
+ if (pdata->amp_gpio > 0) {
+ pr_debug("%s: adding amp routes\n", __func__);
+
+ snd_soc_add_controls(codec, amp_unmute_controls,
+ ARRAY_SIZE(amp_unmute_controls));
+ }
+
+ if (pdata->amp_gain[0] > 0) {
+ pr_debug("%s: adding amp controls\n", __func__);
+ snd_soc_add_controls(codec, amp_gain_controls,
+ ARRAY_SIZE(amp_gain_controls));
+ }
+}
+EXPORT_SYMBOL_GPL(simtec_audio_init);
+
+#define CODEC_CLOCK 12000000
+
+/**
+ * simtec_hw_params - update hardware parameters
+ * @substream: The audio substream instance.
+ * @params: The parameters requested.
+ *
+ * Update the codec data routing and configuration settings
+ * from the supplied data.
+ */
+static int simtec_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ int ret;
+
+ /* Set the CODEC as the bus clock master, I2S */
+ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (ret) {
+ pr_err("%s: failed set cpu dai format\n", __func__);
+ return ret;
+ }
+
+ /* Set the CODEC as the bus clock master */
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+ if (ret) {
+ pr_err("%s: failed set codec dai format\n", __func__);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0,
+ CODEC_CLOCK, SND_SOC_CLOCK_IN);
+ if (ret) {
+ pr_err( "%s: failed setting codec sysclk\n", __func__);
+ return ret;
+ }
+
+ if (pdata->use_mpllin) {
+ ret = snd_soc_dai_set_sysclk(cpu_dai, S3C24XX_CLKSRC_MPLL,
+ 0, SND_SOC_CLOCK_OUT);
+
+ if (ret) {
+ pr_err("%s: failed to set MPLLin as clksrc\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (pdata->output_cdclk) {
+ int cdclk_scale;
+
+ cdclk_scale = clk_get_rate(xtal_clk) / CODEC_CLOCK;
+ cdclk_scale--;
+
+ ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
+ cdclk_scale);
+ }
+
+ return 0;
+}
+
+static int simtec_call_startup(struct s3c24xx_audio_simtec_pdata *pd)
+{
+ /* call any board supplied startup code, this currently only
+ * covers the bast/vr1000 which have a CPLD in the way of the
+ * LRCLK */
+ if (pd->startup)
+ pd->startup();
+
+ return 0;
+}
+
+static struct snd_soc_ops simtec_snd_ops = {
+ .hw_params = simtec_hw_params,
+};
+
+/**
+ * attach_gpio_amp - get and configure the necessary gpios
+ * @dev: The device we're probing.
+ * @pd: The platform data supplied by the board.
+ *
+ * If there is a GPIO based amplifier attached to the board, claim
+ * the necessary GPIO lines for it, and set default values.
+ */
+static int attach_gpio_amp(struct device *dev,
+ struct s3c24xx_audio_simtec_pdata *pd)
+{
+ int ret;
+
+ /* attach gpio amp gain (if any) */
+ if (pdata->amp_gain[0] > 0) {
+ ret = gpio_request(pd->amp_gain[0], "gpio-amp-gain0");
+ if (ret) {
+ dev_err(dev, "cannot get amp gpio gain0\n");
+ return ret;
+ }
+
+ ret = gpio_request(pd->amp_gain[1], "gpio-amp-gain1");
+ if (ret) {
+ dev_err(dev, "cannot get amp gpio gain1\n");
+ gpio_free(pdata->amp_gain[0]);
+ return ret;
+ }
+
+ gpio_direction_output(pd->amp_gain[0], 0);
+ gpio_direction_output(pd->amp_gain[1], 0);
+ }
+
+ /* note, curently we assume GPA0 isn't valid amp */
+ if (pdata->amp_gpio > 0) {
+ ret = gpio_request(pd->amp_gpio, "gpio-amp");
+ if (ret) {
+ dev_err(dev, "cannot get amp gpio %d (%d)\n",
+ pd->amp_gpio, ret);
+ goto err_amp;
+ }
+
+ /* set the amp off at startup */
+ spk_unmute_state(0);
+ }
+
+ return 0;
+
+err_amp:
+ if (pd->amp_gain[0] > 0) {
+ gpio_free(pd->amp_gain[0]);
+ gpio_free(pd->amp_gain[1]);
+ }
+
+ return ret;
+}
+
+static void detach_gpio_amp(struct s3c24xx_audio_simtec_pdata *pd)
+{
+ if (pd->amp_gain[0] > 0) {
+ gpio_free(pd->amp_gain[0]);
+ gpio_free(pd->amp_gain[1]);
+ }
+
+ if (pd->amp_gpio > 0)
+ gpio_free(pd->amp_gpio);
+}
+
+#ifdef CONFIG_PM
+int simtec_audio_resume(struct device *dev)
+{
+ simtec_call_startup(pdata);
+ return 0;
+}
+
+struct dev_pm_ops simtec_audio_pmops = {
+ .resume = simtec_audio_resume,
+};
+EXPORT_SYMBOL_GPL(simtec_audio_pmops);
+#endif
+
+int __devinit simtec_audio_core_probe(struct platform_device *pdev,
+ struct snd_soc_device *socdev)
+{
+ struct platform_device *snd_dev;
+ int ret;
+
+ socdev->card->dai_link->ops = &simtec_snd_ops;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ return -EINVAL;
+ }
+
+ simtec_call_startup(pdata);
+
+ xtal_clk = clk_get(&pdev->dev, "xtal");
+ if (IS_ERR(xtal_clk)) {
+ dev_err(&pdev->dev, "could not get clkout0\n");
+ return -EINVAL;
+ }
+
+ dev_info(&pdev->dev, "xtal rate is %ld\n", clk_get_rate(xtal_clk));
+
+ ret = attach_gpio_amp(&pdev->dev, pdata);
+ if (ret)
+ goto err_clk;
+
+ snd_dev = platform_device_alloc("soc-audio", -1);
+ if (!snd_dev) {
+ dev_err(&pdev->dev, "failed to alloc soc-audio devicec\n");
+ ret = -ENOMEM;
+ goto err_gpio;
+ }
+
+ platform_set_drvdata(snd_dev, socdev);
+ socdev->dev = &snd_dev->dev;
+
+ ret = platform_device_add(snd_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add soc-audio dev\n");
+ goto err_pdev;
+ }
+
+ platform_set_drvdata(pdev, snd_dev);
+ return 0;
+
+err_pdev:
+ platform_device_put(snd_dev);
+
+err_gpio:
+ detach_gpio_amp(pdata);
+
+err_clk:
+ clk_put(xtal_clk);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(simtec_audio_core_probe);
+
+int __devexit simtec_audio_remove(struct platform_device *pdev)
+{
+ struct platform_device *snd_dev = platform_get_drvdata(pdev);
+
+ platform_device_unregister(snd_dev);
+
+ detach_gpio_amp(pdata);
+ clk_put(xtal_clk);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(simtec_audio_remove);
+
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("ALSA SoC Simtec Audio common support");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.h b/sound/soc/s3c24xx/s3c24xx_simtec.h
new file mode 100644
index 000000000000..2714203af161
--- /dev/null
+++ b/sound/soc/s3c24xx/s3c24xx_simtec.h
@@ -0,0 +1,22 @@
+/* sound/soc/s3c24xx/s3c24xx_simtec.h
+ *
+ * Copyright 2009 Simtec Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+extern void simtec_audio_init(struct snd_soc_codec *codec);
+
+extern int simtec_audio_core_probe(struct platform_device *pdev,
+ struct snd_soc_device *socdev);
+
+extern int simtec_audio_remove(struct platform_device *pdev);
+
+#ifdef CONFIG_PM
+extern struct dev_pm_ops simtec_audio_pmops;
+#define simtec_audio_pm &simtec_audio_pmops
+#else
+#define simtec_audio_pm NULL
+#endif
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c b/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
new file mode 100644
index 000000000000..8346bd96eaf5
--- /dev/null
+++ b/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
@@ -0,0 +1,153 @@
+/* sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
+ *
+ * Copyright 2009 Simtec Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include <plat/audio-simtec.h>
+
+#include "s3c24xx-pcm.h"
+#include "s3c24xx-i2s.h"
+#include "s3c24xx_simtec.h"
+
+#include "../codecs/tlv320aic3x.h"
+
+static const struct snd_soc_dapm_widget dapm_widgets[] = {
+ SND_SOC_DAPM_LINE("GSM Out", NULL),
+ SND_SOC_DAPM_LINE("GSM In", NULL),
+ SND_SOC_DAPM_LINE("Line In", NULL),
+ SND_SOC_DAPM_LINE("Line Out", NULL),
+ SND_SOC_DAPM_LINE("ZV", NULL),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+};
+
+static const struct snd_soc_dapm_route base_map[] = {
+ /* Headphone connected to HP{L,R}OUT and HP{L,R}COM */
+
+ { "Headphone Jack", NULL, "HPLOUT" },
+ { "Headphone Jack", NULL, "HPLCOM" },
+ { "Headphone Jack", NULL, "HPROUT" },
+ { "Headphone Jack", NULL, "HPRCOM" },
+
+ /* ZV connected to Line1 */
+
+ { "LINE1L", NULL, "ZV" },
+ { "LINE1R", NULL, "ZV" },
+
+ /* Line In connected to Line2 */
+
+ { "LINE2L", NULL, "Line In" },
+ { "LINE2R", NULL, "Line In" },
+
+ /* Microphone connected to MIC3R and MIC_BIAS */
+
+ { "MIC3L", NULL, "Mic Jack" },
+
+ /* GSM connected to MONO_LOUT and MIC3L (in) */
+
+ { "GSM Out", NULL, "MONO_LOUT" },
+ { "MIC3L", NULL, "GSM In" },
+
+ /* Speaker is connected to LINEOUT{LN,LP,RN,RP}, however we are
+ * not using the DAPM to power it up and down as there it makes
+ * a click when powering up. */
+};
+
+/**
+ * simtec_hermes_init - initialise and add controls
+ * @codec; The codec instance to attach to.
+ *
+ * Attach our controls and configure the necessary codec
+ * mappings for our sound card instance.
+*/
+static int simtec_hermes_init(struct snd_soc_codec *codec)
+{
+ snd_soc_dapm_new_controls(codec, dapm_widgets,
+ ARRAY_SIZE(dapm_widgets));
+
+ snd_soc_dapm_add_routes(codec, base_map, ARRAY_SIZE(base_map));
+
+ snd_soc_dapm_enable_pin(codec, "Headphone Jack");
+ snd_soc_dapm_enable_pin(codec, "Line In");
+ snd_soc_dapm_enable_pin(codec, "Line Out");
+ snd_soc_dapm_enable_pin(codec, "Mic Jack");
+
+ simtec_audio_init(codec);
+ snd_soc_dapm_sync(codec);
+
+ return 0;
+}
+
+static struct aic3x_setup_data codec_setup = {
+};
+
+static struct snd_soc_dai_link simtec_dai_aic33 = {
+ .name = "tlv320aic33",
+ .stream_name = "TLV320AIC33",
+ .cpu_dai = &s3c24xx_i2s_dai,
+ .codec_dai = &aic3x_dai,
+ .init = simtec_hermes_init,
+};
+
+/* simtec audio machine driver */
+static struct snd_soc_card snd_soc_machine_simtec_aic33 = {
+ .name = "Simtec-Hermes",
+ .platform = &s3c24xx_soc_platform,
+ .dai_link = &simtec_dai_aic33,
+ .num_links = 1,
+};
+
+/* simtec audio subsystem */
+static struct snd_soc_device simtec_snd_devdata_aic33 = {
+ .card = &snd_soc_machine_simtec_aic33,
+ .codec_dev = &soc_codec_dev_aic3x,
+ .codec_data = &codec_setup,
+};
+
+static int __devinit simtec_audio_hermes_probe(struct platform_device *pd)
+{
+ dev_info(&pd->dev, "probing....\n");
+ return simtec_audio_core_probe(pd, &simtec_snd_devdata_aic33);
+}
+
+static struct platform_driver simtec_audio_hermes_platdrv = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "s3c24xx-simtec-hermes-snd",
+ .pm = simtec_audio_pm,
+ },
+ .probe = simtec_audio_hermes_probe,
+ .remove = __devexit_p(simtec_audio_remove),
+};
+
+MODULE_ALIAS("platform:s3c24xx-simtec-hermes-snd");
+
+static int __init simtec_hermes_modinit(void)
+{
+ return platform_driver_register(&simtec_audio_hermes_platdrv);
+}
+
+static void __exit simtec_hermes_modexit(void)
+{
+ platform_driver_unregister(&simtec_audio_hermes_platdrv);
+}
+
+module_init(simtec_hermes_modinit);
+module_exit(simtec_hermes_modexit);
+
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("ALSA SoC Simtec Audio support");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c b/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
new file mode 100644
index 000000000000..25797e096175
--- /dev/null
+++ b/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
@@ -0,0 +1,137 @@
+/* sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
+ *
+ * Copyright 2009 Simtec Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include <plat/audio-simtec.h>
+
+#include "s3c24xx-pcm.h"
+#include "s3c24xx-i2s.h"
+#include "s3c24xx_simtec.h"
+
+#include "../codecs/tlv320aic23.h"
+
+/* supported machines:
+ *
+ * Machine Connections AMP
+ * ------- ----------- ---
+ * BAST MIC, HPOUT, LOUT, LIN TPA2001D1 (HPOUTL,R) (gain hardwired)
+ * VR1000 HPOUT, LIN None
+ * VR2000 LIN, LOUT, MIC, HP LM4871 (HPOUTL,R)
+ * DePicture LIN, LOUT, MIC, HP LM4871 (HPOUTL,R)
+ * Anubis LIN, LOUT, MIC, HP TPA2001D1 (HPOUTL,R)
+ */
+
+static const struct snd_soc_dapm_widget dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_LINE("Line In", NULL),
+ SND_SOC_DAPM_LINE("Line Out", NULL),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+};
+
+static const struct snd_soc_dapm_route base_map[] = {
+ { "Headphone Jack", NULL, "LHPOUT"},
+ { "Headphone Jack", NULL, "RHPOUT"},
+
+ { "Line Out", NULL, "LOUT" },
+ { "Line Out", NULL, "ROUT" },
+
+ { "LLINEIN", NULL, "Line In"},
+ { "RLINEIN", NULL, "Line In"},
+
+ { "MICIN", NULL, "Mic Jack"},
+};
+
+/**
+ * simtec_tlv320aic23_init - initialise and add controls
+ * @codec; The codec instance to attach to.
+ *
+ * Attach our controls and configure the necessary codec
+ * mappings for our sound card instance.
+*/
+static int simtec_tlv320aic23_init(struct snd_soc_codec *codec)
+{
+ snd_soc_dapm_new_controls(codec, dapm_widgets,
+ ARRAY_SIZE(dapm_widgets));
+
+ snd_soc_dapm_add_routes(codec, base_map, ARRAY_SIZE(base_map));
+
+ snd_soc_dapm_enable_pin(codec, "Headphone Jack");
+ snd_soc_dapm_enable_pin(codec, "Line In");
+ snd_soc_dapm_enable_pin(codec, "Line Out");
+ snd_soc_dapm_enable_pin(codec, "Mic Jack");
+
+ simtec_audio_init(codec);
+ snd_soc_dapm_sync(codec);
+
+ return 0;
+}
+
+static struct snd_soc_dai_link simtec_dai_aic23 = {
+ .name = "tlv320aic23",
+ .stream_name = "TLV320AIC23",
+ .cpu_dai = &s3c24xx_i2s_dai,
+ .codec_dai = &tlv320aic23_dai,
+ .init = simtec_tlv320aic23_init,
+};
+
+/* simtec audio machine driver */
+static struct snd_soc_card snd_soc_machine_simtec_aic23 = {
+ .name = "Simtec",
+ .platform = &s3c24xx_soc_platform,
+ .dai_link = &simtec_dai_aic23,
+ .num_links = 1,
+};
+
+/* simtec audio subsystem */
+static struct snd_soc_device simtec_snd_devdata_aic23 = {
+ .card = &snd_soc_machine_simtec_aic23,
+ .codec_dev = &soc_codec_dev_tlv320aic23,
+};
+
+static int __devinit simtec_audio_tlv320aic23_probe(struct platform_device *pd)
+{
+ return simtec_audio_core_probe(pd, &simtec_snd_devdata_aic23);
+}
+
+static struct platform_driver simtec_audio_tlv320aic23_platdrv = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "s3c24xx-simtec-tlv320aic23",
+ .pm = simtec_audio_pm,
+ },
+ .probe = simtec_audio_tlv320aic23_probe,
+ .remove = __devexit_p(simtec_audio_remove),
+};
+
+MODULE_ALIAS("platform:s3c24xx-simtec-tlv320aic23");
+
+static int __init simtec_tlv320aic23_modinit(void)
+{
+ return platform_driver_register(&simtec_audio_tlv320aic23_platdrv);
+}
+
+static void __exit simtec_tlv320aic23_modexit(void)
+{
+ platform_driver_unregister(&simtec_audio_tlv320aic23_platdrv);
+}
+
+module_init(simtec_tlv320aic23_modinit);
+module_exit(simtec_tlv320aic23_modexit);
+
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("ALSA SoC Simtec Audio support");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/s6000/s6105-ipcam.c b/sound/soc/s6000/s6105-ipcam.c
index b5f95f9781c1..c1b40ac22c05 100644
--- a/sound/soc/s6000/s6105-ipcam.c
+++ b/sound/soc/s6000/s6105-ipcam.c
@@ -14,6 +14,7 @@
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/i2c.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
@@ -189,8 +190,6 @@ static struct snd_soc_card snd_soc_card_s6105 = {
/* s6105 audio private data */
static struct aic3x_setup_data s6105_aic3x_setup = {
- .i2c_bus = 0,
- .i2c_address = 0x18,
};
/* s6105 audio subsystem */
@@ -211,10 +210,19 @@ static struct s6000_snd_platform_data __initdata s6105_snd_data = {
static struct platform_device *s6105_snd_device;
+/* temporary i2c device creation until this can be moved into the machine
+ * support file.
+*/
+static struct i2c_board_info i2c_device[] = {
+ { I2C_BOARD_INFO("tlv320aic33", 0x18), }
+};
+
static int __init s6105_init(void)
{
int ret;
+ i2c_register_board_info(0, i2c_device, ARRAY_SIZE(i2c_device));
+
s6105_snd_device = platform_device_alloc("soc-audio", -1);
if (!s6105_snd_device)
return -ENOMEM;
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 54bd604012af..9154b4363db3 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -20,7 +20,12 @@ config SND_SOC_SH4_HAC
config SND_SOC_SH4_SSI
tristate
-
+config SND_SOC_SH4_FSI
+ tristate "SH4 FSI support"
+ depends on CPU_SUBTYPE_SH7724
+ select SH_DMA
+ help
+ This option enables FSI sound support
##
## Boards
@@ -35,4 +40,12 @@ config SND_SH7760_AC97
This option enables generic sound support for the first
AC97 unit of the SH7760.
+config SND_FSI_AK4642
+ bool "FSI-AK4642 sound support"
+ depends on SND_SOC_SH4_FSI
+ select SND_SOC_AK4642
+ help
+ This option enables generic sound support for the
+ FSI - AK4642 unit
+
endmenu
diff --git a/sound/soc/sh/Makefile b/sound/soc/sh/Makefile
index a8e8ab81cc6a..a6997872f24e 100644
--- a/sound/soc/sh/Makefile
+++ b/sound/soc/sh/Makefile
@@ -5,10 +5,14 @@ obj-$(CONFIG_SND_SOC_PCM_SH7760) += snd-soc-dma-sh7760.o
## audio units found on some SH-4
snd-soc-hac-objs := hac.o
snd-soc-ssi-objs := ssi.o
+snd-soc-fsi-objs := fsi.o
obj-$(CONFIG_SND_SOC_SH4_HAC) += snd-soc-hac.o
obj-$(CONFIG_SND_SOC_SH4_SSI) += snd-soc-ssi.o
+obj-$(CONFIG_SND_SOC_SH4_FSI) += snd-soc-fsi.o
## boards
snd-soc-sh7760-ac97-objs := sh7760-ac97.o
+snd-soc-fsi-ak4642-objs := fsi-ak4642.o
obj-$(CONFIG_SND_SH7760_AC97) += snd-soc-sh7760-ac97.o
+obj-$(CONFIG_SND_FSI_AK4642) += snd-soc-fsi-ak4642.o
diff --git a/sound/soc/sh/fsi-ak4642.c b/sound/soc/sh/fsi-ak4642.c
new file mode 100644
index 000000000000..c7af09729c6e
--- /dev/null
+++ b/sound/soc/sh/fsi-ak4642.c
@@ -0,0 +1,107 @@
+/*
+ * FSI-AK464x sound support for ms7724se
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include <sound/sh_fsi.h>
+#include <../sound/soc/codecs/ak4642.h>
+
+static struct snd_soc_dai_link fsi_dai_link = {
+ .name = "AK4642",
+ .stream_name = "AK4642",
+ .cpu_dai = &fsi_soc_dai[0], /* fsi */
+ .codec_dai = &ak4642_dai,
+ .ops = NULL,
+};
+
+static struct snd_soc_card fsi_soc_card = {
+ .name = "FSI",
+ .platform = &fsi_soc_platform,
+ .dai_link = &fsi_dai_link,
+ .num_links = 1,
+};
+
+static struct snd_soc_device fsi_snd_devdata = {
+ .card = &fsi_soc_card,
+ .codec_dev = &soc_codec_dev_ak4642,
+};
+
+#define AK4642_BUS 0
+#define AK4642_ADR 0x12
+static int ak4642_add_i2c_device(void)
+{
+ struct i2c_board_info info;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ info.addr = AK4642_ADR;
+ strlcpy(info.type, "ak4642", I2C_NAME_SIZE);
+
+ adapter = i2c_get_adapter(AK4642_BUS);
+ if (!adapter) {
+ printk(KERN_DEBUG "can't get i2c adapter\n");
+ return -ENODEV;
+ }
+
+ client = i2c_new_device(adapter, &info);
+ i2c_put_adapter(adapter);
+ if (!client) {
+ printk(KERN_DEBUG "can't add i2c device\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static struct platform_device *fsi_snd_device;
+
+static int __init fsi_ak4642_init(void)
+{
+ int ret = -ENOMEM;
+
+ ak4642_add_i2c_device();
+
+ fsi_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!fsi_snd_device)
+ goto out;
+
+ platform_set_drvdata(fsi_snd_device,
+ &fsi_snd_devdata);
+ fsi_snd_devdata.dev = &fsi_snd_device->dev;
+ ret = platform_device_add(fsi_snd_device);
+
+ if (ret)
+ platform_device_put(fsi_snd_device);
+
+out:
+ return ret;
+}
+
+static void __exit fsi_ak4642_exit(void)
+{
+ platform_device_unregister(fsi_snd_device);
+}
+
+module_init(fsi_ak4642_init);
+module_exit(fsi_ak4642_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Generic SH4 FSI-AK4642 sound card");
+MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
new file mode 100644
index 000000000000..44123248b630
--- /dev/null
+++ b/sound/soc/sh/fsi.c
@@ -0,0 +1,1004 @@
+/*
+ * Fifo-attached Serial Interface (FSI) support for SH7724
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on ssi.c
+ * Copyright (c) 2007 Manuel Lauss <mano@roarinelk.homelinux.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+#include <sound/sh_fsi.h>
+#include <asm/atomic.h>
+#include <asm/dma.h>
+#include <asm/dma-sh.h>
+
+#define DO_FMT 0x0000
+#define DOFF_CTL 0x0004
+#define DOFF_ST 0x0008
+#define DI_FMT 0x000C
+#define DIFF_CTL 0x0010
+#define DIFF_ST 0x0014
+#define CKG1 0x0018
+#define CKG2 0x001C
+#define DIDT 0x0020
+#define DODT 0x0024
+#define MUTE_ST 0x0028
+#define REG_END MUTE_ST
+
+#define INT_ST 0x0200
+#define IEMSK 0x0204
+#define IMSK 0x0208
+#define MUTE 0x020C
+#define CLK_RST 0x0210
+#define SOFT_RST 0x0214
+#define MREG_START INT_ST
+#define MREG_END SOFT_RST
+
+/* DO_FMT */
+/* DI_FMT */
+#define CR_FMT(param) ((param) << 4)
+# define CR_MONO 0x0
+# define CR_MONO_D 0x1
+# define CR_PCM 0x2
+# define CR_I2S 0x3
+# define CR_TDM 0x4
+# define CR_TDM_D 0x5
+
+/* DOFF_CTL */
+/* DIFF_CTL */
+#define IRQ_HALF 0x00100000
+#define FIFO_CLR 0x00000001
+
+/* DOFF_ST */
+#define ERR_OVER 0x00000010
+#define ERR_UNDER 0x00000001
+
+/* CLK_RST */
+#define B_CLK 0x00000010
+#define A_CLK 0x00000001
+
+/* INT_ST */
+#define INT_B_IN (1 << 12)
+#define INT_B_OUT (1 << 8)
+#define INT_A_IN (1 << 4)
+#define INT_A_OUT (1 << 0)
+
+#define FSI_RATES SNDRV_PCM_RATE_8000_96000
+
+#define FSI_FMTS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
+
+/************************************************************************
+
+
+ struct
+
+
+************************************************************************/
+struct fsi_priv {
+ void __iomem *base;
+ struct snd_pcm_substream *substream;
+
+ int fifo_max;
+ int chan;
+ int dma_chan;
+
+ int byte_offset;
+ int period_len;
+ int buffer_len;
+ int periods;
+};
+
+struct fsi_master {
+ void __iomem *base;
+ int irq;
+ struct clk *clk;
+ struct fsi_priv fsia;
+ struct fsi_priv fsib;
+ struct sh_fsi_platform_info *info;
+};
+
+static struct fsi_master *master;
+
+/************************************************************************
+
+
+ basic read write function
+
+
+************************************************************************/
+static int __fsi_reg_write(u32 reg, u32 data)
+{
+ /* valid data area is 24bit */
+ data &= 0x00ffffff;
+
+ return ctrl_outl(data, reg);
+}
+
+static u32 __fsi_reg_read(u32 reg)
+{
+ return ctrl_inl(reg);
+}
+
+static int __fsi_reg_mask_set(u32 reg, u32 mask, u32 data)
+{
+ u32 val = __fsi_reg_read(reg);
+
+ val &= ~mask;
+ val |= data & mask;
+
+ return __fsi_reg_write(reg, val);
+}
+
+static int fsi_reg_write(struct fsi_priv *fsi, u32 reg, u32 data)
+{
+ if (reg > REG_END)
+ return -1;
+
+ return __fsi_reg_write((u32)(fsi->base + reg), data);
+}
+
+static u32 fsi_reg_read(struct fsi_priv *fsi, u32 reg)
+{
+ if (reg > REG_END)
+ return 0;
+
+ return __fsi_reg_read((u32)(fsi->base + reg));
+}
+
+static int fsi_reg_mask_set(struct fsi_priv *fsi, u32 reg, u32 mask, u32 data)
+{
+ if (reg > REG_END)
+ return -1;
+
+ return __fsi_reg_mask_set((u32)(fsi->base + reg), mask, data);
+}
+
+static int fsi_master_write(u32 reg, u32 data)
+{
+ if ((reg < MREG_START) ||
+ (reg > MREG_END))
+ return -1;
+
+ return __fsi_reg_write((u32)(master->base + reg), data);
+}
+
+static u32 fsi_master_read(u32 reg)
+{
+ if ((reg < MREG_START) ||
+ (reg > MREG_END))
+ return 0;
+
+ return __fsi_reg_read((u32)(master->base + reg));
+}
+
+static int fsi_master_mask_set(u32 reg, u32 mask, u32 data)
+{
+ if ((reg < MREG_START) ||
+ (reg > MREG_END))
+ return -1;
+
+ return __fsi_reg_mask_set((u32)(master->base + reg), mask, data);
+}
+
+/************************************************************************
+
+
+ basic function
+
+
+************************************************************************/
+static struct fsi_priv *fsi_get(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd;
+ struct fsi_priv *fsi = NULL;
+
+ if (!substream || !master)
+ return NULL;
+
+ rtd = substream->private_data;
+ switch (rtd->dai->cpu_dai->id) {
+ case 0:
+ fsi = &master->fsia;
+ break;
+ case 1:
+ fsi = &master->fsib;
+ break;
+ }
+
+ return fsi;
+}
+
+static int fsi_is_port_a(struct fsi_priv *fsi)
+{
+ /* return
+ * 1 : port a
+ * 0 : port b
+ */
+
+ if (fsi == &master->fsia)
+ return 1;
+
+ return 0;
+}
+
+static u32 fsi_get_info_flags(struct fsi_priv *fsi)
+{
+ int is_porta = fsi_is_port_a(fsi);
+
+ return is_porta ? master->info->porta_flags :
+ master->info->portb_flags;
+}
+
+static int fsi_is_master_mode(struct fsi_priv *fsi, int is_play)
+{
+ u32 mode;
+ u32 flags = fsi_get_info_flags(fsi);
+
+ mode = is_play ? SH_FSI_OUT_SLAVE_MODE : SH_FSI_IN_SLAVE_MODE;
+
+ /* return
+ * 1 : master mode
+ * 0 : slave mode
+ */
+
+ return (mode & flags) != mode;
+}
+
+static u32 fsi_port_ab_io_bit(struct fsi_priv *fsi, int is_play)
+{
+ int is_porta = fsi_is_port_a(fsi);
+ u32 data;
+
+ if (is_porta)
+ data = is_play ? (1 << 0) : (1 << 4);
+ else
+ data = is_play ? (1 << 8) : (1 << 12);
+
+ return data;
+}
+
+static void fsi_stream_push(struct fsi_priv *fsi,
+ struct snd_pcm_substream *substream,
+ u32 buffer_len,
+ u32 period_len)
+{
+ fsi->substream = substream;
+ fsi->buffer_len = buffer_len;
+ fsi->period_len = period_len;
+ fsi->byte_offset = 0;
+ fsi->periods = 0;
+}
+
+static void fsi_stream_pop(struct fsi_priv *fsi)
+{
+ fsi->substream = NULL;
+ fsi->buffer_len = 0;
+ fsi->period_len = 0;
+ fsi->byte_offset = 0;
+ fsi->periods = 0;
+}
+
+static int fsi_get_fifo_residue(struct fsi_priv *fsi, int is_play)
+{
+ u32 status;
+ u32 reg = is_play ? DOFF_ST : DIFF_ST;
+ int residue;
+
+ status = fsi_reg_read(fsi, reg);
+ residue = 0x1ff & (status >> 8);
+ residue *= fsi->chan;
+
+ return residue;
+}
+
+static int fsi_get_residue(struct fsi_priv *fsi, int is_play)
+{
+ int residue;
+ int width;
+ struct snd_pcm_runtime *runtime;
+
+ runtime = fsi->substream->runtime;
+
+ /* get 1 channel data width */
+ width = frames_to_bytes(runtime, 1) / fsi->chan;
+
+ if (2 == width)
+ residue = fsi_get_fifo_residue(fsi, is_play);
+ else
+ residue = get_dma_residue(fsi->dma_chan);
+
+ return residue;
+}
+
+/************************************************************************
+
+
+ basic dma function
+
+
+************************************************************************/
+#define PORTA_DMA 0
+#define PORTB_DMA 1
+
+static int fsi_get_dma_chan(void)
+{
+ if (0 != request_dma(PORTA_DMA, "fsia"))
+ return -EIO;
+
+ if (0 != request_dma(PORTB_DMA, "fsib")) {
+ free_dma(PORTA_DMA);
+ return -EIO;
+ }
+
+ master->fsia.dma_chan = PORTA_DMA;
+ master->fsib.dma_chan = PORTB_DMA;
+
+ return 0;
+}
+
+static void fsi_free_dma_chan(void)
+{
+ dma_wait_for_completion(PORTA_DMA);
+ dma_wait_for_completion(PORTB_DMA);
+ free_dma(PORTA_DMA);
+ free_dma(PORTB_DMA);
+
+ master->fsia.dma_chan = -1;
+ master->fsib.dma_chan = -1;
+}
+
+/************************************************************************
+
+
+ ctrl function
+
+
+************************************************************************/
+static void fsi_irq_enable(struct fsi_priv *fsi, int is_play)
+{
+ u32 data = fsi_port_ab_io_bit(fsi, is_play);
+
+ fsi_master_mask_set(IMSK, data, data);
+ fsi_master_mask_set(IEMSK, data, data);
+}
+
+static void fsi_irq_disable(struct fsi_priv *fsi, int is_play)
+{
+ u32 data = fsi_port_ab_io_bit(fsi, is_play);
+
+ fsi_master_mask_set(IMSK, data, 0);
+ fsi_master_mask_set(IEMSK, data, 0);
+}
+
+static void fsi_clk_ctrl(struct fsi_priv *fsi, int enable)
+{
+ u32 val = fsi_is_port_a(fsi) ? (1 << 0) : (1 << 4);
+
+ if (enable)
+ fsi_master_mask_set(CLK_RST, val, val);
+ else
+ fsi_master_mask_set(CLK_RST, val, 0);
+}
+
+static void fsi_irq_init(struct fsi_priv *fsi, int is_play)
+{
+ u32 data;
+ u32 ctrl;
+
+ data = fsi_port_ab_io_bit(fsi, is_play);
+ ctrl = is_play ? DOFF_CTL : DIFF_CTL;
+
+ /* set IMSK */
+ fsi_irq_disable(fsi, is_play);
+
+ /* set interrupt generation factor */
+ fsi_reg_write(fsi, ctrl, IRQ_HALF);
+
+ /* clear FIFO */
+ fsi_reg_mask_set(fsi, ctrl, FIFO_CLR, FIFO_CLR);
+
+ /* clear interrupt factor */
+ fsi_master_mask_set(INT_ST, data, 0);
+}
+
+static void fsi_soft_all_reset(void)
+{
+ u32 status = fsi_master_read(SOFT_RST);
+
+ /* port AB reset */
+ status &= 0x000000ff;
+ fsi_master_write(SOFT_RST, status);
+ mdelay(10);
+
+ /* soft reset */
+ status &= 0x000000f0;
+ fsi_master_write(SOFT_RST, status);
+ status |= 0x00000001;
+ fsi_master_write(SOFT_RST, status);
+ mdelay(10);
+}
+
+static void fsi_16data_push(struct fsi_priv *fsi,
+ struct snd_pcm_runtime *runtime,
+ int send)
+{
+ u16 *dma_start;
+ u32 snd;
+ int i;
+
+ /* get dma start position for FSI */
+ dma_start = (u16 *)runtime->dma_area;
+ dma_start += fsi->byte_offset / 2;
+
+ /*
+ * soft dma
+ * FSI can not use DMA when 16bpp
+ */
+ for (i = 0; i < send; i++) {
+ snd = (u32)dma_start[i];
+ fsi_reg_write(fsi, DODT, snd << 8);
+ }
+}
+
+static void fsi_32data_push(struct fsi_priv *fsi,
+ struct snd_pcm_runtime *runtime,
+ int send)
+{
+ u32 *dma_start;
+
+ /* get dma start position for FSI */
+ dma_start = (u32 *)runtime->dma_area;
+ dma_start += fsi->byte_offset / 4;
+
+ dma_wait_for_completion(fsi->dma_chan);
+ dma_configure_channel(fsi->dma_chan, (SM_INC|0x400|TS_32|TM_BUR));
+ dma_write(fsi->dma_chan, (u32)dma_start,
+ (u32)(fsi->base + DODT), send * 4);
+}
+
+/* playback interrupt */
+static int fsi_data_push(struct fsi_priv *fsi)
+{
+ struct snd_pcm_runtime *runtime;
+ struct snd_pcm_substream *substream = NULL;
+ int send;
+ int fifo_free;
+ int width;
+
+ if (!fsi ||
+ !fsi->substream ||
+ !fsi->substream->runtime)
+ return -EINVAL;
+
+ runtime = fsi->substream->runtime;
+
+ /* FSI FIFO has limit.
+ * So, this driver can not send periods data at a time
+ */
+ if (fsi->byte_offset >=
+ fsi->period_len * (fsi->periods + 1)) {
+
+ substream = fsi->substream;
+ fsi->periods = (fsi->periods + 1) % runtime->periods;
+
+ if (0 == fsi->periods)
+ fsi->byte_offset = 0;
+ }
+
+ /* get 1 channel data width */
+ width = frames_to_bytes(runtime, 1) / fsi->chan;
+
+ /* get send size for alsa */
+ send = (fsi->buffer_len - fsi->byte_offset) / width;
+
+ /* get FIFO free size */
+ fifo_free = (fsi->fifo_max * fsi->chan) - fsi_get_fifo_residue(fsi, 1);
+
+ /* size check */
+ if (fifo_free < send)
+ send = fifo_free;
+
+ if (2 == width)
+ fsi_16data_push(fsi, runtime, send);
+ else if (4 == width)
+ fsi_32data_push(fsi, runtime, send);
+ else
+ return -EINVAL;
+
+ fsi->byte_offset += send * width;
+
+ fsi_irq_enable(fsi, 1);
+
+ if (substream)
+ snd_pcm_period_elapsed(substream);
+
+ return 0;
+}
+
+static irqreturn_t fsi_interrupt(int irq, void *data)
+{
+ u32 status = fsi_master_read(SOFT_RST) & ~0x00000010;
+ u32 int_st = fsi_master_read(INT_ST);
+
+ /* clear irq status */
+ fsi_master_write(SOFT_RST, status);
+ fsi_master_write(SOFT_RST, status | 0x00000010);
+
+ if (int_st & INT_A_OUT)
+ fsi_data_push(&master->fsia);
+ if (int_st & INT_B_OUT)
+ fsi_data_push(&master->fsib);
+
+ fsi_master_write(INT_ST, 0x0000000);
+
+ return IRQ_HANDLED;
+}
+
+/************************************************************************
+
+
+ dai ops
+
+
+************************************************************************/
+static int fsi_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct fsi_priv *fsi = fsi_get(substream);
+ const char *msg;
+ u32 flags = fsi_get_info_flags(fsi);
+ u32 fmt;
+ u32 reg;
+ u32 data;
+ int is_play = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+ int is_master;
+ int ret = 0;
+
+ clk_enable(master->clk);
+
+ /* CKG1 */
+ data = is_play ? (1 << 0) : (1 << 4);
+ is_master = fsi_is_master_mode(fsi, is_play);
+ if (is_master)
+ fsi_reg_mask_set(fsi, CKG1, data, data);
+ else
+ fsi_reg_mask_set(fsi, CKG1, data, 0);
+
+ /* clock inversion (CKG2) */
+ data = 0;
+ switch (SH_FSI_INVERSION_MASK & flags) {
+ case SH_FSI_LRM_INV:
+ data = 1 << 12;
+ break;
+ case SH_FSI_BRM_INV:
+ data = 1 << 8;
+ break;
+ case SH_FSI_LRS_INV:
+ data = 1 << 4;
+ break;
+ case SH_FSI_BRS_INV:
+ data = 1 << 0;
+ break;
+ }
+ fsi_reg_write(fsi, CKG2, data);
+
+ /* do fmt, di fmt */
+ data = 0;
+ reg = is_play ? DO_FMT : DI_FMT;
+ fmt = is_play ? SH_FSI_GET_OFMT(flags) : SH_FSI_GET_IFMT(flags);
+ switch (fmt) {
+ case SH_FSI_FMT_MONO:
+ msg = "MONO";
+ data = CR_FMT(CR_MONO);
+ fsi->chan = 1;
+ break;
+ case SH_FSI_FMT_MONO_DELAY:
+ msg = "MONO Delay";
+ data = CR_FMT(CR_MONO_D);
+ fsi->chan = 1;
+ break;
+ case SH_FSI_FMT_PCM:
+ msg = "PCM";
+ data = CR_FMT(CR_PCM);
+ fsi->chan = 2;
+ break;
+ case SH_FSI_FMT_I2S:
+ msg = "I2S";
+ data = CR_FMT(CR_I2S);
+ fsi->chan = 2;
+ break;
+ case SH_FSI_FMT_TDM:
+ msg = "TDM";
+ data = CR_FMT(CR_TDM) | (fsi->chan - 1);
+ fsi->chan = is_play ?
+ SH_FSI_GET_CH_O(flags) : SH_FSI_GET_CH_I(flags);
+ break;
+ case SH_FSI_FMT_TDM_DELAY:
+ msg = "TDM Delay";
+ data = CR_FMT(CR_TDM_D) | (fsi->chan - 1);
+ fsi->chan = is_play ?
+ SH_FSI_GET_CH_O(flags) : SH_FSI_GET_CH_I(flags);
+ break;
+ default:
+ dev_err(dai->dev, "unknown format.\n");
+ return -EINVAL;
+ }
+
+ switch (fsi->chan) {
+ case 1:
+ fsi->fifo_max = 256;
+ break;
+ case 2:
+ fsi->fifo_max = 128;
+ break;
+ case 3:
+ case 4:
+ fsi->fifo_max = 64;
+ break;
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ fsi->fifo_max = 32;
+ break;
+ default:
+ dev_err(dai->dev, "channel size error.\n");
+ return -EINVAL;
+ }
+
+ fsi_reg_write(fsi, reg, data);
+ dev_dbg(dai->dev, "use %s format (%d channel) use %d DMAC\n",
+ msg, fsi->chan, fsi->dma_chan);
+
+ /*
+ * clear clk reset if master mode
+ */
+ if (is_master)
+ fsi_clk_ctrl(fsi, 1);
+
+ /* irq setting */
+ fsi_irq_init(fsi, is_play);
+
+ return ret;
+}
+
+static void fsi_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct fsi_priv *fsi = fsi_get(substream);
+ int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+
+ fsi_irq_disable(fsi, is_play);
+ fsi_clk_ctrl(fsi, 0);
+
+ clk_disable(master->clk);
+}
+
+static int fsi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct fsi_priv *fsi = fsi_get(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ int ret = 0;
+
+ /* capture not supported */
+ if (!is_play)
+ return -ENODEV;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ fsi_stream_push(fsi, substream,
+ frames_to_bytes(runtime, runtime->buffer_size),
+ frames_to_bytes(runtime, runtime->period_size));
+ ret = fsi_data_push(fsi);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ fsi_irq_disable(fsi, is_play);
+ fsi_stream_pop(fsi);
+ break;
+ }
+
+ return ret;
+}
+
+static struct snd_soc_dai_ops fsi_dai_ops = {
+ .startup = fsi_dai_startup,
+ .shutdown = fsi_dai_shutdown,
+ .trigger = fsi_dai_trigger,
+};
+
+/************************************************************************
+
+
+ pcm ops
+
+
+************************************************************************/
+static struct snd_pcm_hardware fsi_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE,
+ .formats = FSI_FMTS,
+ .rates = FSI_RATES,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 1,
+ .channels_max = 2,
+ .buffer_bytes_max = 64 * 1024,
+ .period_bytes_min = 32,
+ .period_bytes_max = 8192,
+ .periods_min = 1,
+ .periods_max = 32,
+ .fifo_size = 256,
+};
+
+static int fsi_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret = 0;
+
+ snd_soc_set_runtime_hwparams(substream, &fsi_pcm_hardware);
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+
+ return ret;
+}
+
+static int fsi_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ return snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+}
+
+static int fsi_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+
+static snd_pcm_uframes_t fsi_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct fsi_priv *fsi = fsi_get(substream);
+ int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ long location;
+
+ location = (fsi->byte_offset - 1) - fsi_get_residue(fsi, is_play);
+ if (location < 0)
+ location = 0;
+
+ return bytes_to_frames(runtime, location);
+}
+
+static struct snd_pcm_ops fsi_pcm_ops = {
+ .open = fsi_pcm_open,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = fsi_hw_params,
+ .hw_free = fsi_hw_free,
+ .pointer = fsi_pointer,
+};
+
+/************************************************************************
+
+
+ snd_soc_platform
+
+
+************************************************************************/
+#define PREALLOC_BUFFER (32 * 1024)
+#define PREALLOC_BUFFER_MAX (32 * 1024)
+
+static void fsi_pcm_free(struct snd_pcm *pcm)
+{
+ snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+static int fsi_pcm_new(struct snd_card *card,
+ struct snd_soc_dai *dai,
+ struct snd_pcm *pcm)
+{
+ /*
+ * dont use SNDRV_DMA_TYPE_DEV, since it will oops the SH kernel
+ * in MMAP mode (i.e. aplay -M)
+ */
+ return snd_pcm_lib_preallocate_pages_for_all(
+ pcm,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
+}
+
+/************************************************************************
+
+
+ alsa struct
+
+
+************************************************************************/
+struct snd_soc_dai fsi_soc_dai[] = {
+ {
+ .name = "FSIA",
+ .id = 0,
+ .playback = {
+ .rates = FSI_RATES,
+ .formats = FSI_FMTS,
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ /* capture not supported */
+ .ops = &fsi_dai_ops,
+ },
+ {
+ .name = "FSIB",
+ .id = 1,
+ .playback = {
+ .rates = FSI_RATES,
+ .formats = FSI_FMTS,
+ .channels_min = 1,
+ .channels_max = 8,
+ },
+ /* capture not supported */
+ .ops = &fsi_dai_ops,
+ },
+};
+EXPORT_SYMBOL_GPL(fsi_soc_dai);
+
+struct snd_soc_platform fsi_soc_platform = {
+ .name = "fsi-pcm",
+ .pcm_ops = &fsi_pcm_ops,
+ .pcm_new = fsi_pcm_new,
+ .pcm_free = fsi_pcm_free,
+};
+EXPORT_SYMBOL_GPL(fsi_soc_platform);
+
+/************************************************************************
+
+
+ platform function
+
+
+************************************************************************/
+static int fsi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ char clk_name[8];
+ unsigned int irq;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || !irq) {
+ dev_err(&pdev->dev, "Not enough FSI platform resources.\n");
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master) {
+ dev_err(&pdev->dev, "Could not allocate master\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ master->base = ioremap_nocache(res->start, resource_size(res));
+ if (!master->base) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "Unable to ioremap FSI registers.\n");
+ goto exit_kfree;
+ }
+
+ master->irq = irq;
+ master->info = pdev->dev.platform_data;
+ master->fsia.base = master->base;
+ master->fsib.base = master->base + 0x40;
+
+ master->fsia.dma_chan = -1;
+ master->fsib.dma_chan = -1;
+
+ ret = fsi_get_dma_chan();
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot get dma api\n");
+ goto exit_iounmap;
+ }
+
+ /* FSI is based on SPU mstp */
+ snprintf(clk_name, sizeof(clk_name), "spu%d", pdev->id);
+ master->clk = clk_get(NULL, clk_name);
+ if (IS_ERR(master->clk)) {
+ dev_err(&pdev->dev, "cannot get %s mstp\n", clk_name);
+ ret = -EIO;
+ goto exit_free_dma;
+ }
+
+ fsi_soc_dai[0].dev = &pdev->dev;
+ fsi_soc_dai[1].dev = &pdev->dev;
+
+ fsi_soft_all_reset();
+
+ ret = request_irq(irq, &fsi_interrupt, IRQF_DISABLED, "fsi", master);
+ if (ret) {
+ dev_err(&pdev->dev, "irq request err\n");
+ goto exit_free_dma;
+ }
+
+ ret = snd_soc_register_platform(&fsi_soc_platform);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot snd soc register\n");
+ goto exit_free_irq;
+ }
+
+ return snd_soc_register_dais(fsi_soc_dai, ARRAY_SIZE(fsi_soc_dai));
+
+exit_free_irq:
+ free_irq(irq, master);
+exit_free_dma:
+ fsi_free_dma_chan();
+exit_iounmap:
+ iounmap(master->base);
+exit_kfree:
+ kfree(master);
+ master = NULL;
+exit:
+ return ret;
+}
+
+static int fsi_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_dais(fsi_soc_dai, ARRAY_SIZE(fsi_soc_dai));
+ snd_soc_unregister_platform(&fsi_soc_platform);
+
+ clk_put(master->clk);
+
+ fsi_free_dma_chan();
+
+ free_irq(master->irq, master);
+
+ iounmap(master->base);
+ kfree(master);
+ master = NULL;
+ return 0;
+}
+
+static struct platform_driver fsi_driver = {
+ .driver = {
+ .name = "sh_fsi",
+ },
+ .probe = fsi_probe,
+ .remove = fsi_remove,
+};
+
+static int __init fsi_mobile_init(void)
+{
+ return platform_driver_register(&fsi_driver);
+}
+
+static void __exit fsi_mobile_exit(void)
+{
+ platform_driver_unregister(&fsi_driver);
+}
+module_init(fsi_mobile_init);
+module_exit(fsi_mobile_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SuperH onchip FSI audio driver");
+MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
new file mode 100644
index 000000000000..c8ceddc2a26c
--- /dev/null
+++ b/sound/soc/soc-cache.c
@@ -0,0 +1,218 @@
+/*
+ * soc-cache.c -- ASoC register cache helpers
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <sound/soc.h>
+
+static unsigned int snd_soc_7_9_read(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u16 *cache = codec->reg_cache;
+ if (reg >= codec->reg_cache_size)
+ return -1;
+ return cache[reg];
+}
+
+static int snd_soc_7_9_write(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ u16 *cache = codec->reg_cache;
+ u8 data[2];
+ int ret;
+
+ BUG_ON(codec->volatile_register);
+
+ data[0] = (reg << 1) | ((value >> 8) & 0x0001);
+ data[1] = value & 0x00ff;
+
+ if (reg < codec->reg_cache_size)
+ cache[reg] = value;
+ ret = codec->hw_write(codec->control_data, data, 2);
+ if (ret == 2)
+ return 0;
+ if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+#if defined(CONFIG_SPI_MASTER)
+static int snd_soc_7_9_spi_write(void *control_data, const char *data,
+ int len)
+{
+ struct spi_device *spi = control_data;
+ struct spi_transfer t;
+ struct spi_message m;
+ u8 msg[2];
+
+ if (len <= 0)
+ return 0;
+
+ msg[0] = data[0];
+ msg[1] = data[1];
+
+ spi_message_init(&m);
+ memset(&t, 0, (sizeof t));
+
+ t.tx_buf = &msg[0];
+ t.len = len;
+
+ spi_message_add_tail(&t, &m);
+ spi_sync(spi, &m);
+
+ return len;
+}
+#else
+#define snd_soc_7_9_spi_write NULL
+#endif
+
+static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ u16 *reg_cache = codec->reg_cache;
+ u8 data[3];
+
+ data[0] = reg;
+ data[1] = (value >> 8) & 0xff;
+ data[2] = value & 0xff;
+
+ if (!snd_soc_codec_volatile_register(codec, reg))
+ reg_cache[reg] = value;
+
+ if (codec->hw_write(codec->control_data, data, 3) == 3)
+ return 0;
+ else
+ return -EIO;
+}
+
+static unsigned int snd_soc_8_16_read(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ u16 *cache = codec->reg_cache;
+
+ if (reg >= codec->reg_cache_size ||
+ snd_soc_codec_volatile_register(codec, reg))
+ return codec->hw_read(codec, reg);
+ else
+ return cache[reg];
+}
+
+#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
+static unsigned int snd_soc_8_16_read_i2c(struct snd_soc_codec *codec,
+ unsigned int r)
+{
+ struct i2c_msg xfer[2];
+ u8 reg = r;
+ u16 data;
+ int ret;
+ struct i2c_client *client = codec->control_data;
+
+ /* Write register */
+ xfer[0].addr = client->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &reg;
+
+ /* Read data */
+ xfer[1].addr = client->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = 2;
+ xfer[1].buf = (u8 *)&data;
+
+ ret = i2c_transfer(client->adapter, xfer, 2);
+ if (ret != 2) {
+ dev_err(&client->dev, "i2c_transfer() returned %d\n", ret);
+ return 0;
+ }
+
+ return (data >> 8) | ((data & 0xff) << 8);
+}
+#else
+#define snd_soc_8_16_read_i2c NULL
+#endif
+
+static struct {
+ int addr_bits;
+ int data_bits;
+ int (*write)(struct snd_soc_codec *codec, unsigned int, unsigned int);
+ int (*spi_write)(void *, const char *, int);
+ unsigned int (*read)(struct snd_soc_codec *, unsigned int);
+ unsigned int (*i2c_read)(struct snd_soc_codec *, unsigned int);
+} io_types[] = {
+ { 7, 9, snd_soc_7_9_write, snd_soc_7_9_spi_write, snd_soc_7_9_read },
+ { 8, 16, snd_soc_8_16_write, NULL, snd_soc_8_16_read,
+ snd_soc_8_16_read_i2c },
+};
+
+/**
+ * snd_soc_codec_set_cache_io: Set up standard I/O functions.
+ *
+ * @codec: CODEC to configure.
+ * @type: Type of cache.
+ * @addr_bits: Number of bits of register address data.
+ * @data_bits: Number of bits of data per register.
+ * @control: Control bus used.
+ *
+ * Register formats are frequently shared between many I2C and SPI
+ * devices. In order to promote code reuse the ASoC core provides
+ * some standard implementations of CODEC read and write operations
+ * which can be set up using this function.
+ *
+ * The caller is responsible for allocating and initialising the
+ * actual cache.
+ *
+ * Note that at present this code cannot be used by CODECs with
+ * volatile registers.
+ */
+int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
+ int addr_bits, int data_bits,
+ enum snd_soc_control_type control)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(io_types); i++)
+ if (io_types[i].addr_bits == addr_bits &&
+ io_types[i].data_bits == data_bits)
+ break;
+ if (i == ARRAY_SIZE(io_types)) {
+ printk(KERN_ERR
+ "No I/O functions for %d bit address %d bit data\n",
+ addr_bits, data_bits);
+ return -EINVAL;
+ }
+
+ codec->write = io_types[i].write;
+ codec->read = io_types[i].read;
+
+ switch (control) {
+ case SND_SOC_CUSTOM:
+ break;
+
+ case SND_SOC_I2C:
+#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
+ codec->hw_write = (hw_write_t)i2c_master_send;
+#endif
+ if (io_types[i].i2c_read)
+ codec->hw_read = io_types[i].i2c_read;
+ break;
+
+ case SND_SOC_SPI:
+ if (io_types[i].spi_write)
+ codec->hw_write = io_types[i].spi_write;
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 1d70829464ef..7ff04ad2a97e 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -28,6 +28,7 @@
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
+#include <sound/ac97_codec.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -619,8 +620,9 @@ static struct snd_pcm_ops soc_pcm_ops = {
#ifdef CONFIG_PM
/* powers down audio subsystem for suspend */
-static int soc_suspend(struct platform_device *pdev, pm_message_t state)
+static int soc_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_card *card = socdev->card;
struct snd_soc_platform *platform = card->platform;
@@ -656,7 +658,7 @@ static int soc_suspend(struct platform_device *pdev, pm_message_t state)
snd_pcm_suspend_all(card->dai_link[i].pcm);
if (card->suspend_pre)
- card->suspend_pre(pdev, state);
+ card->suspend_pre(pdev, PMSG_SUSPEND);
for (i = 0; i < card->num_links; i++) {
struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai;
@@ -682,7 +684,7 @@ static int soc_suspend(struct platform_device *pdev, pm_message_t state)
}
if (codec_dev->suspend)
- codec_dev->suspend(pdev, state);
+ codec_dev->suspend(pdev, PMSG_SUSPEND);
for (i = 0; i < card->num_links; i++) {
struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai;
@@ -691,7 +693,7 @@ static int soc_suspend(struct platform_device *pdev, pm_message_t state)
}
if (card->suspend_post)
- card->suspend_post(pdev, state);
+ card->suspend_post(pdev, PMSG_SUSPEND);
return 0;
}
@@ -765,8 +767,9 @@ static void soc_resume_deferred(struct work_struct *work)
}
/* powers up audio subsystem after a suspend */
-static int soc_resume(struct platform_device *pdev)
+static int soc_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_card *card = socdev->card;
struct snd_soc_dai *cpu_dai = card->dai_link[0].cpu_dai;
@@ -788,6 +791,44 @@ static int soc_resume(struct platform_device *pdev)
return 0;
}
+/**
+ * snd_soc_suspend_device: Notify core of device suspend
+ *
+ * @dev: Device being suspended.
+ *
+ * In order to ensure that the entire audio subsystem is suspended in a
+ * coordinated fashion ASoC devices should suspend themselves when
+ * called by ASoC. When the standard kernel suspend process asks the
+ * device to suspend it should call this function to initiate a suspend
+ * of the entire ASoC card.
+ *
+ * \note Currently this function is stubbed out.
+ */
+int snd_soc_suspend_device(struct device *dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_suspend_device);
+
+/**
+ * snd_soc_resume_device: Notify core of device resume
+ *
+ * @dev: Device being resumed.
+ *
+ * In order to ensure that the entire audio subsystem is resumed in a
+ * coordinated fashion ASoC devices should resume themselves when called
+ * by ASoC. When the standard kernel resume process asks the device
+ * to resume it should call this function. Once all the components of
+ * the card have notified that they are ready to be resumed the card
+ * will be resumed.
+ *
+ * \note Currently this function is stubbed out.
+ */
+int snd_soc_resume_device(struct device *dev)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_resume_device);
#else
#define soc_suspend NULL
#define soc_resume NULL
@@ -981,16 +1022,39 @@ static int soc_remove(struct platform_device *pdev)
return 0;
}
+static int soc_poweroff(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_card *card = socdev->card;
+
+ if (!card->instantiated)
+ return 0;
+
+ /* Flush out pmdown_time work - we actually do want to run it
+ * now, we're shutting down so no imminent restart. */
+ run_delayed_work(&card->delayed_work);
+
+ snd_soc_dapm_shutdown(socdev);
+
+ return 0;
+}
+
+static struct dev_pm_ops soc_pm_ops = {
+ .suspend = soc_suspend,
+ .resume = soc_resume,
+ .poweroff = soc_poweroff,
+};
+
/* ASoC platform driver */
static struct platform_driver soc_driver = {
.driver = {
.name = "soc-audio",
.owner = THIS_MODULE,
+ .pm = &soc_pm_ops,
},
.probe = soc_probe,
.remove = soc_remove,
- .suspend = soc_suspend,
- .resume = soc_resume,
};
/* create a new pcm */
@@ -1062,6 +1126,23 @@ static int soc_new_pcm(struct snd_soc_device *socdev,
return ret;
}
+/**
+ * snd_soc_codec_volatile_register: Report if a register is volatile.
+ *
+ * @codec: CODEC to query.
+ * @reg: Register to query.
+ *
+ * Boolean function indiciating if a CODEC register is volatile.
+ */
+int snd_soc_codec_volatile_register(struct snd_soc_codec *codec, int reg)
+{
+ if (codec->volatile_register)
+ return codec->volatile_register(reg);
+ else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_codec_volatile_register);
+
/* codec register dump */
static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf)
{
@@ -1075,6 +1156,9 @@ static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf)
count += sprintf(buf, "%s registers\n", codec->name);
for (i = 0; i < codec->reg_cache_size; i += step) {
+ if (codec->readable_register && !codec->readable_register(i))
+ continue;
+
count += sprintf(buf + count, "%2x: ", i);
if (count >= PAGE_SIZE - 1)
break;
@@ -1183,10 +1267,18 @@ static void soc_init_codec_debugfs(struct snd_soc_codec *codec)
if (!codec->debugfs_pop_time)
printk(KERN_WARNING
"Failed to create pop time debugfs file\n");
+
+ codec->debugfs_dapm = debugfs_create_dir("dapm", debugfs_root);
+ if (!codec->debugfs_dapm)
+ printk(KERN_WARNING
+ "Failed to create DAPM debugfs directory\n");
+
+ snd_soc_dapm_debugfs_init(codec);
}
static void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec)
{
+ debugfs_remove_recursive(codec->debugfs_dapm);
debugfs_remove(codec->debugfs_pop_time);
debugfs_remove(codec->debugfs_reg);
}
@@ -1264,10 +1356,10 @@ EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec);
* Returns 1 for change else 0.
*/
int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg,
- unsigned short mask, unsigned short value)
+ unsigned int mask, unsigned int value)
{
int change;
- unsigned short old, new;
+ unsigned int old, new;
mutex_lock(&io_mutex);
old = snd_soc_read(codec, reg);
@@ -1294,10 +1386,10 @@ EXPORT_SYMBOL_GPL(snd_soc_update_bits);
* Returns 1 for change else 0.
*/
int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned short reg,
- unsigned short mask, unsigned short value)
+ unsigned int mask, unsigned int value)
{
int change;
- unsigned short old, new;
+ unsigned int old, new;
mutex_lock(&io_mutex);
old = snd_soc_read(codec, reg);
@@ -1381,8 +1473,11 @@ int snd_soc_init_card(struct snd_soc_device *socdev)
continue;
}
}
- if (card->dai_link[i].codec_dai->ac97_control)
+ if (card->dai_link[i].codec_dai->ac97_control) {
ac97 = 1;
+ snd_ac97_dev_add_pdata(codec->ac97,
+ card->dai_link[i].cpu_dai->ac97_pdata);
+ }
}
snprintf(codec->card->shortname, sizeof(codec->card->shortname),
"%s", card->name);
@@ -1586,7 +1681,7 @@ int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short val, bitmask;
+ unsigned int val, bitmask;
for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
;
@@ -1615,8 +1710,8 @@ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short val;
- unsigned short mask, bitmask;
+ unsigned int val;
+ unsigned int mask, bitmask;
for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
;
@@ -1652,7 +1747,7 @@ int snd_soc_get_value_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short reg_val, val, mux;
+ unsigned int reg_val, val, mux;
reg_val = snd_soc_read(codec, e->reg);
val = (reg_val >> e->shift_l) & e->mask;
@@ -1691,8 +1786,8 @@ int snd_soc_put_value_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short val;
- unsigned short mask;
+ unsigned int val;
+ unsigned int mask;
if (ucontrol->value.enumerated.item[0] > e->max - 1)
return -EINVAL;
@@ -1852,7 +1947,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
- unsigned short val, val2, val_mask;
+ unsigned int val, val2, val_mask;
val = (ucontrol->value.integer.value[0] & mask);
if (invert)
@@ -1918,7 +2013,7 @@ int snd_soc_get_volsw_2r(struct snd_kcontrol *kcontrol,
unsigned int reg2 = mc->rreg;
unsigned int shift = mc->shift;
int max = mc->max;
- unsigned int mask = (1<<fls(max))-1;
+ unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
ucontrol->value.integer.value[0] =
@@ -1958,7 +2053,7 @@ int snd_soc_put_volsw_2r(struct snd_kcontrol *kcontrol,
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
int err;
- unsigned short val, val2, val_mask;
+ unsigned int val, val2, val_mask;
val_mask = mask << shift;
val = (ucontrol->value.integer.value[0] & mask);
@@ -2050,7 +2145,7 @@ int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int reg = mc->reg;
int min = mc->min;
- unsigned short val;
+ unsigned int val;
val = (ucontrol->value.integer.value[0]+min) & 0xff;
val |= ((ucontrol->value.integer.value[1]+min) & 0xff) << 8;
@@ -2136,17 +2231,20 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_fmt);
/**
* snd_soc_dai_set_tdm_slot - configure DAI TDM.
* @dai: DAI
- * @mask: DAI specific mask representing used slots.
+ * @tx_mask: bitmask representing active TX slots.
+ * @rx_mask: bitmask representing active RX slots.
* @slots: Number of slots in use.
+ * @slot_width: Width in bits for each slot.
*
* Configures a DAI for TDM operation. Both mask and slots are codec and DAI
* specific.
*/
int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
- unsigned int mask, int slots)
+ unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
{
if (dai->ops && dai->ops->set_tdm_slot)
- return dai->ops->set_tdm_slot(dai, mask, slots);
+ return dai->ops->set_tdm_slot(dai, tx_mask, rx_mask,
+ slots, slot_width);
else
return -EINVAL;
}
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 21c69074aa17..0d8b08ef8731 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -37,6 +37,7 @@
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/jiffies.h>
+#include <linux/debugfs.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -52,19 +53,41 @@
/* dapm power sequences - make this per codec in the future */
static int dapm_up_seq[] = {
- snd_soc_dapm_pre, snd_soc_dapm_supply, snd_soc_dapm_micbias,
- snd_soc_dapm_mic, snd_soc_dapm_mux, snd_soc_dapm_value_mux,
- snd_soc_dapm_dac, snd_soc_dapm_mixer, snd_soc_dapm_mixer_named_ctl,
- snd_soc_dapm_pga, snd_soc_dapm_adc, snd_soc_dapm_hp, snd_soc_dapm_spk,
- snd_soc_dapm_post
+ [snd_soc_dapm_pre] = 0,
+ [snd_soc_dapm_supply] = 1,
+ [snd_soc_dapm_micbias] = 2,
+ [snd_soc_dapm_aif_in] = 3,
+ [snd_soc_dapm_aif_out] = 3,
+ [snd_soc_dapm_mic] = 4,
+ [snd_soc_dapm_mux] = 5,
+ [snd_soc_dapm_value_mux] = 5,
+ [snd_soc_dapm_dac] = 6,
+ [snd_soc_dapm_mixer] = 7,
+ [snd_soc_dapm_mixer_named_ctl] = 7,
+ [snd_soc_dapm_pga] = 8,
+ [snd_soc_dapm_adc] = 9,
+ [snd_soc_dapm_hp] = 10,
+ [snd_soc_dapm_spk] = 10,
+ [snd_soc_dapm_post] = 11,
};
static int dapm_down_seq[] = {
- snd_soc_dapm_pre, snd_soc_dapm_adc, snd_soc_dapm_hp, snd_soc_dapm_spk,
- snd_soc_dapm_pga, snd_soc_dapm_mixer_named_ctl, snd_soc_dapm_mixer,
- snd_soc_dapm_dac, snd_soc_dapm_mic, snd_soc_dapm_micbias,
- snd_soc_dapm_mux, snd_soc_dapm_value_mux, snd_soc_dapm_supply,
- snd_soc_dapm_post
+ [snd_soc_dapm_pre] = 0,
+ [snd_soc_dapm_adc] = 1,
+ [snd_soc_dapm_hp] = 2,
+ [snd_soc_dapm_spk] = 2,
+ [snd_soc_dapm_pga] = 4,
+ [snd_soc_dapm_mixer_named_ctl] = 5,
+ [snd_soc_dapm_mixer] = 5,
+ [snd_soc_dapm_dac] = 6,
+ [snd_soc_dapm_mic] = 7,
+ [snd_soc_dapm_micbias] = 8,
+ [snd_soc_dapm_mux] = 9,
+ [snd_soc_dapm_value_mux] = 9,
+ [snd_soc_dapm_aif_in] = 10,
+ [snd_soc_dapm_aif_out] = 10,
+ [snd_soc_dapm_supply] = 11,
+ [snd_soc_dapm_post] = 12,
};
static void pop_wait(u32 pop_time)
@@ -130,8 +153,12 @@ static int snd_soc_dapm_set_bias_level(struct snd_soc_device *socdev,
if (card->set_bias_level)
ret = card->set_bias_level(card, level);
- if (ret == 0 && codec->set_bias_level)
- ret = codec->set_bias_level(codec, level);
+ if (ret == 0) {
+ if (codec->set_bias_level)
+ ret = codec->set_bias_level(codec, level);
+ else
+ codec->bias_level = level;
+ }
return ret;
}
@@ -206,6 +233,8 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
case snd_soc_dapm_micbias:
case snd_soc_dapm_vmid:
case snd_soc_dapm_supply:
+ case snd_soc_dapm_aif_in:
+ case snd_soc_dapm_aif_out:
p->connect = 1;
break;
/* does effect routing - dynamically connected */
@@ -268,7 +297,7 @@ static int dapm_connect_mixer(struct snd_soc_codec *codec,
static int dapm_update_bits(struct snd_soc_dapm_widget *widget)
{
int change, power;
- unsigned short old, new;
+ unsigned int old, new;
struct snd_soc_codec *codec = widget->codec;
/* check for valid widgets */
@@ -479,8 +508,14 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget)
if (widget->id == snd_soc_dapm_supply)
return 0;
- if (widget->id == snd_soc_dapm_adc && widget->active)
- return 1;
+ switch (widget->id) {
+ case snd_soc_dapm_adc:
+ case snd_soc_dapm_aif_out:
+ if (widget->active)
+ return 1;
+ default:
+ break;
+ }
if (widget->connected) {
/* connected pin ? */
@@ -519,8 +554,14 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget)
return 0;
/* active stream ? */
- if (widget->id == snd_soc_dapm_dac && widget->active)
- return 1;
+ switch (widget->id) {
+ case snd_soc_dapm_dac:
+ case snd_soc_dapm_aif_in:
+ if (widget->active)
+ return 1;
+ default:
+ break;
+ }
if (widget->connected) {
/* connected pin ? */
@@ -689,53 +730,211 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
return power;
}
-/*
- * Scan a single DAPM widget for a complete audio path and update the
- * power status appropriately.
- */
-static int dapm_power_widget(struct snd_soc_codec *codec, int event,
- struct snd_soc_dapm_widget *w)
+static int dapm_seq_compare(struct snd_soc_dapm_widget *a,
+ struct snd_soc_dapm_widget *b,
+ int sort[])
{
- int ret;
+ if (sort[a->id] != sort[b->id])
+ return sort[a->id] - sort[b->id];
+ if (a->reg != b->reg)
+ return a->reg - b->reg;
- switch (w->id) {
- case snd_soc_dapm_pre:
- if (!w->event)
- return 0;
+ return 0;
+}
- if (event == SND_SOC_DAPM_STREAM_START) {
- ret = w->event(w,
- NULL, SND_SOC_DAPM_PRE_PMU);
+/* Insert a widget in order into a DAPM power sequence. */
+static void dapm_seq_insert(struct snd_soc_dapm_widget *new_widget,
+ struct list_head *list,
+ int sort[])
+{
+ struct snd_soc_dapm_widget *w;
+
+ list_for_each_entry(w, list, power_list)
+ if (dapm_seq_compare(new_widget, w, sort) < 0) {
+ list_add_tail(&new_widget->power_list, &w->power_list);
+ return;
+ }
+
+ list_add_tail(&new_widget->power_list, list);
+}
+
+/* Apply the coalesced changes from a DAPM sequence */
+static void dapm_seq_run_coalesced(struct snd_soc_codec *codec,
+ struct list_head *pending)
+{
+ struct snd_soc_dapm_widget *w;
+ int reg, power, ret;
+ unsigned int value = 0;
+ unsigned int mask = 0;
+ unsigned int cur_mask;
+
+ reg = list_first_entry(pending, struct snd_soc_dapm_widget,
+ power_list)->reg;
+
+ list_for_each_entry(w, pending, power_list) {
+ cur_mask = 1 << w->shift;
+ BUG_ON(reg != w->reg);
+
+ if (w->invert)
+ power = !w->power;
+ else
+ power = w->power;
+
+ mask |= cur_mask;
+ if (power)
+ value |= cur_mask;
+
+ pop_dbg(codec->pop_time,
+ "pop test : Queue %s: reg=0x%x, 0x%x/0x%x\n",
+ w->name, reg, value, mask);
+
+ /* power up pre event */
+ if (w->power && w->event &&
+ (w->event_flags & SND_SOC_DAPM_PRE_PMU)) {
+ pop_dbg(codec->pop_time, "pop test : %s PRE_PMU\n",
+ w->name);
+ ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMU);
if (ret < 0)
- return ret;
- } else if (event == SND_SOC_DAPM_STREAM_STOP) {
- ret = w->event(w,
- NULL, SND_SOC_DAPM_PRE_PMD);
+ pr_err("%s: pre event failed: %d\n",
+ w->name, ret);
+ }
+
+ /* power down pre event */
+ if (!w->power && w->event &&
+ (w->event_flags & SND_SOC_DAPM_PRE_PMD)) {
+ pop_dbg(codec->pop_time, "pop test : %s PRE_PMD\n",
+ w->name);
+ ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMD);
if (ret < 0)
- return ret;
+ pr_err("%s: pre event failed: %d\n",
+ w->name, ret);
}
- return 0;
- case snd_soc_dapm_post:
- if (!w->event)
- return 0;
+ /* Lower PGA volume to reduce pops */
+ if (w->id == snd_soc_dapm_pga && !w->power)
+ dapm_set_pga(w, w->power);
+ }
- if (event == SND_SOC_DAPM_STREAM_START) {
+ if (reg >= 0) {
+ pop_dbg(codec->pop_time,
+ "pop test : Applying 0x%x/0x%x to %x in %dms\n",
+ value, mask, reg, codec->pop_time);
+ pop_wait(codec->pop_time);
+ snd_soc_update_bits(codec, reg, mask, value);
+ }
+
+ list_for_each_entry(w, pending, power_list) {
+ /* Raise PGA volume to reduce pops */
+ if (w->id == snd_soc_dapm_pga && w->power)
+ dapm_set_pga(w, w->power);
+
+ /* power up post event */
+ if (w->power && w->event &&
+ (w->event_flags & SND_SOC_DAPM_POST_PMU)) {
+ pop_dbg(codec->pop_time, "pop test : %s POST_PMU\n",
+ w->name);
ret = w->event(w,
NULL, SND_SOC_DAPM_POST_PMU);
if (ret < 0)
- return ret;
- } else if (event == SND_SOC_DAPM_STREAM_STOP) {
- ret = w->event(w,
- NULL, SND_SOC_DAPM_POST_PMD);
+ pr_err("%s: post event failed: %d\n",
+ w->name, ret);
+ }
+
+ /* power down post event */
+ if (!w->power && w->event &&
+ (w->event_flags & SND_SOC_DAPM_POST_PMD)) {
+ pop_dbg(codec->pop_time, "pop test : %s POST_PMD\n",
+ w->name);
+ ret = w->event(w, NULL, SND_SOC_DAPM_POST_PMD);
if (ret < 0)
- return ret;
+ pr_err("%s: post event failed: %d\n",
+ w->name, ret);
}
- return 0;
+ }
+}
- default:
- return dapm_generic_apply_power(w);
+/* Apply a DAPM power sequence.
+ *
+ * We walk over a pre-sorted list of widgets to apply power to. In
+ * order to minimise the number of writes to the device required
+ * multiple widgets will be updated in a single write where possible.
+ * Currently anything that requires more than a single write is not
+ * handled.
+ */
+static void dapm_seq_run(struct snd_soc_codec *codec, struct list_head *list,
+ int event, int sort[])
+{
+ struct snd_soc_dapm_widget *w, *n;
+ LIST_HEAD(pending);
+ int cur_sort = -1;
+ int cur_reg = SND_SOC_NOPM;
+ int ret;
+
+ list_for_each_entry_safe(w, n, list, power_list) {
+ ret = 0;
+
+ /* Do we need to apply any queued changes? */
+ if (sort[w->id] != cur_sort || w->reg != cur_reg) {
+ if (!list_empty(&pending))
+ dapm_seq_run_coalesced(codec, &pending);
+
+ INIT_LIST_HEAD(&pending);
+ cur_sort = -1;
+ cur_reg = SND_SOC_NOPM;
+ }
+
+ switch (w->id) {
+ case snd_soc_dapm_pre:
+ if (!w->event)
+ list_for_each_entry_safe_continue(w, n, list,
+ power_list);
+
+ if (event == SND_SOC_DAPM_STREAM_START)
+ ret = w->event(w,
+ NULL, SND_SOC_DAPM_PRE_PMU);
+ else if (event == SND_SOC_DAPM_STREAM_STOP)
+ ret = w->event(w,
+ NULL, SND_SOC_DAPM_PRE_PMD);
+ break;
+
+ case snd_soc_dapm_post:
+ if (!w->event)
+ list_for_each_entry_safe_continue(w, n, list,
+ power_list);
+
+ if (event == SND_SOC_DAPM_STREAM_START)
+ ret = w->event(w,
+ NULL, SND_SOC_DAPM_POST_PMU);
+ else if (event == SND_SOC_DAPM_STREAM_STOP)
+ ret = w->event(w,
+ NULL, SND_SOC_DAPM_POST_PMD);
+ break;
+
+ case snd_soc_dapm_input:
+ case snd_soc_dapm_output:
+ case snd_soc_dapm_hp:
+ case snd_soc_dapm_mic:
+ case snd_soc_dapm_line:
+ case snd_soc_dapm_spk:
+ /* No register support currently */
+ ret = dapm_generic_apply_power(w);
+ break;
+
+ default:
+ /* Queue it up for application */
+ cur_sort = sort[w->id];
+ cur_reg = w->reg;
+ list_move(&w->power_list, &pending);
+ break;
+ }
+
+ if (ret < 0)
+ pr_err("Failed to apply widget power: %d\n",
+ ret);
}
+
+ if (!list_empty(&pending))
+ dapm_seq_run_coalesced(codec, &pending);
}
/*
@@ -751,23 +950,22 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
{
struct snd_soc_device *socdev = codec->socdev;
struct snd_soc_dapm_widget *w;
+ LIST_HEAD(up_list);
+ LIST_HEAD(down_list);
int ret = 0;
- int i, power;
+ int power;
int sys_power = 0;
- INIT_LIST_HEAD(&codec->up_list);
- INIT_LIST_HEAD(&codec->down_list);
-
/* Check which widgets we need to power and store them in
* lists indicating if they should be powered up or down.
*/
list_for_each_entry(w, &codec->dapm_widgets, list) {
switch (w->id) {
case snd_soc_dapm_pre:
- list_add_tail(&codec->down_list, &w->power_list);
+ dapm_seq_insert(w, &down_list, dapm_down_seq);
break;
case snd_soc_dapm_post:
- list_add_tail(&codec->up_list, &w->power_list);
+ dapm_seq_insert(w, &up_list, dapm_up_seq);
break;
default:
@@ -782,16 +980,31 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
continue;
if (power)
- list_add_tail(&w->power_list, &codec->up_list);
+ dapm_seq_insert(w, &up_list, dapm_up_seq);
else
- list_add_tail(&w->power_list,
- &codec->down_list);
+ dapm_seq_insert(w, &down_list, dapm_down_seq);
w->power = power;
break;
}
}
+ /* If there are no DAPM widgets then try to figure out power from the
+ * event type.
+ */
+ if (list_empty(&codec->dapm_widgets)) {
+ switch (event) {
+ case SND_SOC_DAPM_STREAM_START:
+ case SND_SOC_DAPM_STREAM_RESUME:
+ sys_power = 1;
+ break;
+ case SND_SOC_DAPM_STREAM_NOP:
+ sys_power = codec->bias_level != SND_SOC_BIAS_STANDBY;
+ default:
+ break;
+ }
+ }
+
/* If we're changing to all on or all off then prepare */
if ((sys_power && codec->bias_level == SND_SOC_BIAS_STANDBY) ||
(!sys_power && codec->bias_level == SND_SOC_BIAS_ON)) {
@@ -802,32 +1015,10 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
}
/* Power down widgets first; try to avoid amplifying pops. */
- for (i = 0; i < ARRAY_SIZE(dapm_down_seq); i++) {
- list_for_each_entry(w, &codec->down_list, power_list) {
- /* is widget in stream order */
- if (w->id != dapm_down_seq[i])
- continue;
-
- ret = dapm_power_widget(codec, event, w);
- if (ret != 0)
- pr_err("Failed to power down %s: %d\n",
- w->name, ret);
- }
- }
+ dapm_seq_run(codec, &down_list, event, dapm_down_seq);
/* Now power up. */
- for (i = 0; i < ARRAY_SIZE(dapm_up_seq); i++) {
- list_for_each_entry(w, &codec->up_list, power_list) {
- /* is widget in stream order */
- if (w->id != dapm_up_seq[i])
- continue;
-
- ret = dapm_power_widget(codec, event, w);
- if (ret != 0)
- pr_err("Failed to power up %s: %d\n",
- w->name, ret);
- }
- }
+ dapm_seq_run(codec, &up_list, event, dapm_up_seq);
/* If we just powered the last thing off drop to standby bias */
if (codec->bias_level == SND_SOC_BIAS_PREPARE && !sys_power) {
@@ -845,6 +1036,9 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
pr_err("Failed to apply active bias: %d\n", ret);
}
+ pop_dbg(codec->pop_time, "DAPM sequencing finished, waiting %dms\n",
+ codec->pop_time);
+
return 0;
}
@@ -881,6 +1075,8 @@ static void dbg_dump_dapm(struct snd_soc_codec* codec, const char *action)
case snd_soc_dapm_mixer:
case snd_soc_dapm_mixer_named_ctl:
case snd_soc_dapm_supply:
+ case snd_soc_dapm_aif_in:
+ case snd_soc_dapm_aif_out:
if (w->name) {
in = is_connected_input_ep(w);
dapm_clear_walk(w->codec);
@@ -906,6 +1102,92 @@ static void dbg_dump_dapm(struct snd_soc_codec* codec, const char *action)
}
#endif
+#ifdef CONFIG_DEBUG_FS
+static int dapm_widget_power_open_file(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t dapm_widget_power_read_file(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct snd_soc_dapm_widget *w = file->private_data;
+ char *buf;
+ int in, out;
+ ssize_t ret;
+ struct snd_soc_dapm_path *p = NULL;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ in = is_connected_input_ep(w);
+ dapm_clear_walk(w->codec);
+ out = is_connected_output_ep(w);
+ dapm_clear_walk(w->codec);
+
+ ret = snprintf(buf, PAGE_SIZE, "%s: %s in %d out %d\n",
+ w->name, w->power ? "On" : "Off", in, out);
+
+ if (w->active && w->sname)
+ ret += snprintf(buf, PAGE_SIZE - ret, " stream %s active\n",
+ w->sname);
+
+ list_for_each_entry(p, &w->sources, list_sink) {
+ if (p->connect)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ " in %s %s\n",
+ p->name ? p->name : "static",
+ p->source->name);
+ }
+ list_for_each_entry(p, &w->sinks, list_source) {
+ if (p->connect)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ " out %s %s\n",
+ p->name ? p->name : "static",
+ p->sink->name);
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations dapm_widget_power_fops = {
+ .open = dapm_widget_power_open_file,
+ .read = dapm_widget_power_read_file,
+};
+
+void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec)
+{
+ struct snd_soc_dapm_widget *w;
+ struct dentry *d;
+
+ if (!codec->debugfs_dapm)
+ return;
+
+ list_for_each_entry(w, &codec->dapm_widgets, list) {
+ if (!w->name)
+ continue;
+
+ d = debugfs_create_file(w->name, 0444,
+ codec->debugfs_dapm, w,
+ &dapm_widget_power_fops);
+ if (!d)
+ printk(KERN_WARNING
+ "ASoC: Failed to create %s debugfs file\n",
+ w->name);
+ }
+}
+#else
+void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec)
+{
+}
+#endif
+
/* test and update the power status of a mux widget */
static int dapm_mux_update_power(struct snd_soc_dapm_widget *widget,
struct snd_kcontrol *kcontrol, int mask,
@@ -1138,8 +1420,8 @@ static int snd_soc_dapm_add_route(struct snd_soc_codec *codec,
if (wsink->id == snd_soc_dapm_input) {
if (wsource->id == snd_soc_dapm_micbias ||
wsource->id == snd_soc_dapm_mic ||
- wsink->id == snd_soc_dapm_line ||
- wsink->id == snd_soc_dapm_output)
+ wsource->id == snd_soc_dapm_line ||
+ wsource->id == snd_soc_dapm_output)
wsink->ext = 1;
}
if (wsource->id == snd_soc_dapm_output) {
@@ -1171,6 +1453,8 @@ static int snd_soc_dapm_add_route(struct snd_soc_codec *codec,
case snd_soc_dapm_pre:
case snd_soc_dapm_post:
case snd_soc_dapm_supply:
+ case snd_soc_dapm_aif_in:
+ case snd_soc_dapm_aif_out:
list_add(&path->list, &codec->dapm_paths);
list_add(&path->list_sink, &wsink->sources);
list_add(&path->list_source, &wsource->sinks);
@@ -1273,9 +1557,11 @@ int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec)
dapm_new_mux(codec, w);
break;
case snd_soc_dapm_adc:
+ case snd_soc_dapm_aif_out:
w->power_check = dapm_adc_check_power;
break;
case snd_soc_dapm_dac:
+ case snd_soc_dapm_aif_in:
w->power_check = dapm_dac_check_power;
break;
case snd_soc_dapm_pga:
@@ -1372,7 +1658,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
- unsigned short val, val2, val_mask;
+ unsigned int val, val2, val_mask;
int ret;
val = (ucontrol->value.integer.value[0] & mask);
@@ -1436,7 +1722,7 @@ int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short val, bitmask;
+ unsigned int val, bitmask;
for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
;
@@ -1464,8 +1750,8 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short val, mux;
- unsigned short mask, bitmask;
+ unsigned int val, mux;
+ unsigned int mask, bitmask;
int ret = 0;
for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
@@ -1523,7 +1809,7 @@ int snd_soc_dapm_get_value_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short reg_val, val, mux;
+ unsigned int reg_val, val, mux;
reg_val = snd_soc_read(widget->codec, e->reg);
val = (reg_val >> e->shift_l) & e->mask;
@@ -1563,8 +1849,8 @@ int snd_soc_dapm_put_value_enum_double(struct snd_kcontrol *kcontrol,
{
struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned short val, mux;
- unsigned short mask;
+ unsigned int val, mux;
+ unsigned int mask;
int ret = 0;
if (ucontrol->value.enumerated.item[0] > e->max - 1)
@@ -1880,6 +2166,36 @@ void snd_soc_dapm_free(struct snd_soc_device *socdev)
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_free);
+/*
+ * snd_soc_dapm_shutdown - callback for system shutdown
+ */
+void snd_soc_dapm_shutdown(struct snd_soc_device *socdev)
+{
+ struct snd_soc_codec *codec = socdev->card->codec;
+ struct snd_soc_dapm_widget *w;
+ LIST_HEAD(down_list);
+ int powerdown = 0;
+
+ list_for_each_entry(w, &codec->dapm_widgets, list) {
+ if (w->power) {
+ dapm_seq_insert(w, &down_list, dapm_down_seq);
+ w->power = 0;
+ powerdown = 1;
+ }
+ }
+
+ /* If there were no widgets to power down we're already in
+ * standby.
+ */
+ if (powerdown) {
+ snd_soc_dapm_set_bias_level(socdev, SND_SOC_BIAS_PREPARE);
+ dapm_seq_run(codec, &down_list, 0, dapm_down_seq);
+ snd_soc_dapm_set_bias_level(socdev, SND_SOC_BIAS_STANDBY);
+ }
+
+ snd_soc_dapm_set_bias_level(socdev, SND_SOC_BIAS_OFF);
+}
+
/* Module information */
MODULE_AUTHOR("Liam Girdwood, lrg@slimlogic.co.uk");
MODULE_DESCRIPTION("Dynamic Audio Power Management core for ALSA SoC");
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 28346fb2e70c..1d455ab79490 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -73,14 +73,15 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
oldstatus = jack->status;
jack->status &= ~mask;
- jack->status |= status;
+ jack->status |= status & mask;
- /* The DAPM sync is expensive enough to be worth skipping */
- if (jack->status == oldstatus)
+ /* The DAPM sync is expensive enough to be worth skipping.
+ * However, empty mask means pin synchronization is desired. */
+ if (mask && (jack->status == oldstatus))
goto out;
list_for_each_entry(pin, &jack->pins, list) {
- enable = pin->mask & status;
+ enable = pin->mask & jack->status;
if (pin->invert)
enable = !enable;
@@ -220,6 +221,9 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
if (ret)
goto err;
+ INIT_WORK(&gpios[i].work, gpio_work);
+ gpios[i].jack = jack;
+
ret = request_irq(gpio_to_irq(gpios[i].gpio),
gpio_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
@@ -228,8 +232,13 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
if (ret)
goto err;
- INIT_WORK(&gpios[i].work, gpio_work);
- gpios[i].jack = jack;
+#ifdef CONFIG_GPIO_SYSFS
+ /* Expose GPIO value over sysfs for diagnostic purposes */
+ gpio_export(gpios[i].gpio, false);
+#endif
+
+ /* Update initial jack status */
+ snd_soc_jack_gpio_detect(&gpios[i]);
}
return 0;
@@ -258,6 +267,9 @@ void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
int i;
for (i = 0; i < count; i++) {
+#ifdef CONFIG_GPIO_SYSFS
+ gpio_unexport(gpios[i].gpio);
+#endif
free_irq(gpio_to_irq(gpios[i].gpio), &gpios[i]);
gpio_free(gpios[i].gpio);
gpios[i].jack = NULL;
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 938a58a5a244..efed64b8b026 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -297,15 +297,17 @@ static int txx9aclc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
static bool filter(struct dma_chan *chan, void *param)
{
struct txx9aclc_dmadata *dmadata = param;
- char devname[20 + 2]; /* FIXME: old BUS_ID_SIZE + 2 */
+ char *devname;
+ bool found = false;
- snprintf(devname, sizeof(devname), "%s.%d", dmadata->dma_res->name,
+ devname = kasprintf(GFP_KERNEL, "%s.%d", dmadata->dma_res->name,
(int)dmadata->dma_res->start);
if (strcmp(dev_name(chan->device->dev), devname) == 0) {
chan->private = &dmadata->dma_slave;
- return true;
+ found = true;
}
- return false;
+ kfree(devname);
+ return found;
}
static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev,
diff --git a/sound/sound_core.c b/sound/sound_core.c
index a41f8b127f49..bb4b88e606bb 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -128,6 +128,46 @@ extern int msnd_pinnacle_init(void);
#endif
/*
+ * By default, OSS sound_core claims full legacy minor range (0-255)
+ * of SOUND_MAJOR to trap open attempts to any sound minor and
+ * requests modules using custom sound-slot/service-* module aliases.
+ * The only benefit of doing this is allowing use of custom module
+ * aliases instead of the standard char-major-* ones. This behavior
+ * prevents alternative OSS implementation and is scheduled to be
+ * removed.
+ *
+ * CONFIG_SOUND_OSS_CORE_PRECLAIM and soundcore.preclaim_oss kernel
+ * parameter are added to allow distros and developers to try and
+ * switch to alternative implementations without needing to rebuild
+ * the kernel in the meantime. If preclaim_oss is non-zero, the
+ * kernel will behave the same as before. All SOUND_MAJOR minors are
+ * preclaimed and the custom module aliases along with standard chrdev
+ * ones are emitted if a missing device is opened. If preclaim_oss is
+ * zero, sound_core only grabs what's actually in use and for missing
+ * devices only the standard chrdev aliases are requested.
+ *
+ * All these clutters are scheduled to be removed along with
+ * sound-slot/service-* module aliases. Please take a look at
+ * feature-removal-schedule.txt for details.
+ */
+#ifdef CONFIG_SOUND_OSS_CORE_PRECLAIM
+static int preclaim_oss = 1;
+#else
+static int preclaim_oss = 0;
+#endif
+
+module_param(preclaim_oss, int, 0444);
+
+static int soundcore_open(struct inode *, struct file *);
+
+static const struct file_operations soundcore_fops =
+{
+ /* We must have an owner or the module locking fails */
+ .owner = THIS_MODULE,
+ .open = soundcore_open,
+};
+
+/*
* Low level list operator. Scan the ordered list, find a hole and
* join into it. Called with the lock asserted
*/
@@ -219,8 +259,9 @@ static int sound_insert_unit(struct sound_unit **list, const struct file_operati
if (!s)
return -ENOMEM;
-
+
spin_lock(&sound_loader_lock);
+retry:
r = __sound_insert_unit(s, list, fops, index, low, top);
spin_unlock(&sound_loader_lock);
@@ -231,11 +272,31 @@ static int sound_insert_unit(struct sound_unit **list, const struct file_operati
else
sprintf(s->name, "sound/%s%d", name, r / SOUND_STEP);
+ if (!preclaim_oss) {
+ /*
+ * Something else might have grabbed the minor. If
+ * first free slot is requested, rescan with @low set
+ * to the next unit; otherwise, -EBUSY.
+ */
+ r = __register_chrdev(SOUND_MAJOR, s->unit_minor, 1, s->name,
+ &soundcore_fops);
+ if (r < 0) {
+ spin_lock(&sound_loader_lock);
+ __sound_remove_unit(list, s->unit_minor);
+ if (index < 0) {
+ low = s->unit_minor + SOUND_STEP;
+ goto retry;
+ }
+ spin_unlock(&sound_loader_lock);
+ return -EBUSY;
+ }
+ }
+
device_create(sound_class, dev, MKDEV(SOUND_MAJOR, s->unit_minor),
NULL, s->name+6);
- return r;
+ return s->unit_minor;
- fail:
+fail:
kfree(s);
return r;
}
@@ -254,6 +315,9 @@ static void sound_remove_unit(struct sound_unit **list, int unit)
p = __sound_remove_unit(list, unit);
spin_unlock(&sound_loader_lock);
if (p) {
+ if (!preclaim_oss)
+ __unregister_chrdev(SOUND_MAJOR, p->unit_minor, 1,
+ p->name);
device_destroy(sound_class, MKDEV(SOUND_MAJOR, p->unit_minor));
kfree(p);
}
@@ -491,19 +555,6 @@ void unregister_sound_dsp(int unit)
EXPORT_SYMBOL(unregister_sound_dsp);
-/*
- * Now our file operations
- */
-
-static int soundcore_open(struct inode *, struct file *);
-
-static const struct file_operations soundcore_fops=
-{
- /* We must have an owner or the module locking fails */
- .owner = THIS_MODULE,
- .open = soundcore_open,
-};
-
static struct sound_unit *__look_for_unit(int chain, int unit)
{
struct sound_unit *s;
@@ -539,8 +590,9 @@ static int soundcore_open(struct inode *inode, struct file *file)
s = __look_for_unit(chain, unit);
if (s)
new_fops = fops_get(s->unit_fops);
- if (!new_fops) {
+ if (preclaim_oss && !new_fops) {
spin_unlock(&sound_loader_lock);
+
/*
* Please, don't change this order or code.
* For ALSA slot means soundcard and OSS emulation code
@@ -550,6 +602,17 @@ static int soundcore_open(struct inode *inode, struct file *file)
*/
request_module("sound-slot-%i", unit>>4);
request_module("sound-service-%i-%i", unit>>4, chain);
+
+ /*
+ * sound-slot/service-* module aliases are scheduled
+ * for removal in favor of the standard char-major-*
+ * module aliases. For the time being, generate both
+ * the legacy and standard module aliases to ease
+ * transition.
+ */
+ if (request_module("char-major-%d-%d", SOUND_MAJOR, unit) > 0)
+ request_module("char-major-%d", SOUND_MAJOR);
+
spin_lock(&sound_loader_lock);
s = __look_for_unit(chain, unit);
if (s)
@@ -593,7 +656,8 @@ static void cleanup_oss_soundcore(void)
static int __init init_oss_soundcore(void)
{
- if (register_chrdev(SOUND_MAJOR, "sound", &soundcore_fops)==-1) {
+ if (preclaim_oss &&
+ register_chrdev(SOUND_MAJOR, "sound", &soundcore_fops) == -1) {
printk(KERN_ERR "soundcore: sound device already in use.\n");
return -EBUSY;
}
diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
index 44b9cdc8a83b..8db0374e10d5 100644
--- a/sound/usb/usbaudio.c
+++ b/sound/usb/usbaudio.c
@@ -1083,6 +1083,8 @@ static int init_substream_urbs(struct snd_usb_substream *subs, unsigned int peri
} else
urb_packs = 1;
urb_packs *= packs_per_ms;
+ if (subs->syncpipe)
+ urb_packs = min(urb_packs, 1U << subs->syncinterval);
/* decide how many packets to be used */
if (is_playback) {
@@ -2124,8 +2126,8 @@ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct s
fp = list_entry(p, struct audioformat, list);
snd_iprintf(buffer, " Interface %d\n", fp->iface);
snd_iprintf(buffer, " Altset %d\n", fp->altsetting);
- snd_iprintf(buffer, " Format: %#x (%d bits)\n",
- fp->format, snd_pcm_format_width(fp->format));
+ snd_iprintf(buffer, " Format: %s\n",
+ snd_pcm_format_name(fp->format));
snd_iprintf(buffer, " Channels: %d\n", fp->channels);
snd_iprintf(buffer, " Endpoint: %d %s (%s)\n",
fp->endpoint & USB_ENDPOINT_NUMBER_MASK,
diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
index 2fb35cc22a30..0eff19ceb7e1 100644
--- a/sound/usb/usbmidi.c
+++ b/sound/usb/usbmidi.c
@@ -45,6 +45,7 @@
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/usb.h>
+#include <linux/wait.h>
#include <sound/core.h>
#include <sound/rawmidi.h>
#include <sound/asequencer.h>
@@ -62,6 +63,9 @@
*/
#define ERROR_DELAY_JIFFIES (HZ / 10)
+#define OUTPUT_URBS 7
+#define INPUT_URBS 7
+
MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
MODULE_DESCRIPTION("USB Audio/MIDI helper module");
@@ -90,7 +94,7 @@ struct snd_usb_midi_endpoint;
struct usb_protocol_ops {
void (*input)(struct snd_usb_midi_in_endpoint*, uint8_t*, int);
- void (*output)(struct snd_usb_midi_out_endpoint*);
+ void (*output)(struct snd_usb_midi_out_endpoint *ep, struct urb *urb);
void (*output_packet)(struct urb*, uint8_t, uint8_t, uint8_t, uint8_t);
void (*init_out_endpoint)(struct snd_usb_midi_out_endpoint*);
void (*finish_out_endpoint)(struct snd_usb_midi_out_endpoint*);
@@ -116,11 +120,15 @@ struct snd_usb_midi {
struct snd_usb_midi_out_endpoint {
struct snd_usb_midi* umidi;
- struct urb* urb;
- int urb_active;
+ struct out_urb_context {
+ struct urb *urb;
+ struct snd_usb_midi_out_endpoint *ep;
+ } urbs[OUTPUT_URBS];
+ unsigned int active_urbs;
+ unsigned int drain_urbs;
int max_transfer; /* size of urb buffer */
struct tasklet_struct tasklet;
-
+ unsigned int next_urb;
spinlock_t buffer_lock;
struct usbmidi_out_port {
@@ -139,11 +147,13 @@ struct snd_usb_midi_out_endpoint {
uint8_t data[2];
} ports[0x10];
int current_port;
+
+ wait_queue_head_t drain_wait;
};
struct snd_usb_midi_in_endpoint {
struct snd_usb_midi* umidi;
- struct urb* urb;
+ struct urb* urbs[INPUT_URBS];
struct usbmidi_in_port {
struct snd_rawmidi_substream *substream;
u8 running_status_length;
@@ -251,10 +261,17 @@ static void snd_usbmidi_in_urb_complete(struct urb* urb)
static void snd_usbmidi_out_urb_complete(struct urb* urb)
{
- struct snd_usb_midi_out_endpoint* ep = urb->context;
+ struct out_urb_context *context = urb->context;
+ struct snd_usb_midi_out_endpoint* ep = context->ep;
+ unsigned int urb_index;
spin_lock(&ep->buffer_lock);
- ep->urb_active = 0;
+ urb_index = context - ep->urbs;
+ ep->active_urbs &= ~(1 << urb_index);
+ if (unlikely(ep->drain_urbs)) {
+ ep->drain_urbs &= ~(1 << urb_index);
+ wake_up(&ep->drain_wait);
+ }
spin_unlock(&ep->buffer_lock);
if (urb->status < 0) {
int err = snd_usbmidi_urb_error(urb->status);
@@ -274,24 +291,38 @@ static void snd_usbmidi_out_urb_complete(struct urb* urb)
*/
static void snd_usbmidi_do_output(struct snd_usb_midi_out_endpoint* ep)
{
- struct urb* urb = ep->urb;
+ unsigned int urb_index;
+ struct urb* urb;
unsigned long flags;
spin_lock_irqsave(&ep->buffer_lock, flags);
- if (ep->urb_active || ep->umidi->chip->shutdown) {
+ if (ep->umidi->chip->shutdown) {
spin_unlock_irqrestore(&ep->buffer_lock, flags);
return;
}
- urb->transfer_buffer_length = 0;
- ep->umidi->usb_protocol_ops->output(ep);
+ urb_index = ep->next_urb;
+ for (;;) {
+ if (!(ep->active_urbs & (1 << urb_index))) {
+ urb = ep->urbs[urb_index].urb;
+ urb->transfer_buffer_length = 0;
+ ep->umidi->usb_protocol_ops->output(ep, urb);
+ if (urb->transfer_buffer_length == 0)
+ break;
- if (urb->transfer_buffer_length > 0) {
- dump_urb("sending", urb->transfer_buffer,
- urb->transfer_buffer_length);
- urb->dev = ep->umidi->chip->dev;
- ep->urb_active = snd_usbmidi_submit_urb(urb, GFP_ATOMIC) >= 0;
+ dump_urb("sending", urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->dev = ep->umidi->chip->dev;
+ if (snd_usbmidi_submit_urb(urb, GFP_ATOMIC) < 0)
+ break;
+ ep->active_urbs |= 1 << urb_index;
+ }
+ if (++urb_index >= OUTPUT_URBS)
+ urb_index = 0;
+ if (urb_index == ep->next_urb)
+ break;
}
+ ep->next_urb = urb_index;
spin_unlock_irqrestore(&ep->buffer_lock, flags);
}
@@ -306,7 +337,7 @@ static void snd_usbmidi_out_tasklet(unsigned long data)
static void snd_usbmidi_error_timer(unsigned long data)
{
struct snd_usb_midi *umidi = (struct snd_usb_midi *)data;
- int i;
+ unsigned int i, j;
spin_lock(&umidi->disc_lock);
if (umidi->disconnected) {
@@ -317,8 +348,10 @@ static void snd_usbmidi_error_timer(unsigned long data)
struct snd_usb_midi_in_endpoint *in = umidi->endpoints[i].in;
if (in && in->error_resubmit) {
in->error_resubmit = 0;
- in->urb->dev = umidi->chip->dev;
- snd_usbmidi_submit_urb(in->urb, GFP_ATOMIC);
+ for (j = 0; j < INPUT_URBS; ++j) {
+ in->urbs[j]->dev = umidi->chip->dev;
+ snd_usbmidi_submit_urb(in->urbs[j], GFP_ATOMIC);
+ }
}
if (umidi->endpoints[i].out)
snd_usbmidi_do_output(umidi->endpoints[i].out);
@@ -330,13 +363,14 @@ static void snd_usbmidi_error_timer(unsigned long data)
static int send_bulk_static_data(struct snd_usb_midi_out_endpoint* ep,
const void *data, int len)
{
- int err;
+ int err = 0;
void *buf = kmemdup(data, len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
dump_urb("sending", buf, len);
- err = usb_bulk_msg(ep->umidi->chip->dev, ep->urb->pipe, buf, len,
- NULL, 250);
+ if (ep->urbs[0].urb)
+ err = usb_bulk_msg(ep->umidi->chip->dev, ep->urbs[0].urb->pipe,
+ buf, len, NULL, 250);
kfree(buf);
return err;
}
@@ -554,9 +588,9 @@ static void snd_usbmidi_transmit_byte(struct usbmidi_out_port* port,
}
}
-static void snd_usbmidi_standard_output(struct snd_usb_midi_out_endpoint* ep)
+static void snd_usbmidi_standard_output(struct snd_usb_midi_out_endpoint* ep,
+ struct urb *urb)
{
- struct urb* urb = ep->urb;
int p;
/* FIXME: lower-numbered ports can starve higher-numbered ports */
@@ -613,14 +647,15 @@ static void snd_usbmidi_novation_input(struct snd_usb_midi_in_endpoint* ep,
snd_usbmidi_input_data(ep, 0, &buffer[2], buffer[0] - 1);
}
-static void snd_usbmidi_novation_output(struct snd_usb_midi_out_endpoint* ep)
+static void snd_usbmidi_novation_output(struct snd_usb_midi_out_endpoint* ep,
+ struct urb *urb)
{
uint8_t* transfer_buffer;
int count;
if (!ep->ports[0].active)
return;
- transfer_buffer = ep->urb->transfer_buffer;
+ transfer_buffer = urb->transfer_buffer;
count = snd_rawmidi_transmit(ep->ports[0].substream,
&transfer_buffer[2],
ep->max_transfer - 2);
@@ -630,7 +665,7 @@ static void snd_usbmidi_novation_output(struct snd_usb_midi_out_endpoint* ep)
}
transfer_buffer[0] = 0;
transfer_buffer[1] = count;
- ep->urb->transfer_buffer_length = 2 + count;
+ urb->transfer_buffer_length = 2 + count;
}
static struct usb_protocol_ops snd_usbmidi_novation_ops = {
@@ -648,20 +683,21 @@ static void snd_usbmidi_raw_input(struct snd_usb_midi_in_endpoint* ep,
snd_usbmidi_input_data(ep, 0, buffer, buffer_length);
}
-static void snd_usbmidi_raw_output(struct snd_usb_midi_out_endpoint* ep)
+static void snd_usbmidi_raw_output(struct snd_usb_midi_out_endpoint* ep,
+ struct urb *urb)
{
int count;
if (!ep->ports[0].active)
return;
count = snd_rawmidi_transmit(ep->ports[0].substream,
- ep->urb->transfer_buffer,
+ urb->transfer_buffer,
ep->max_transfer);
if (count < 1) {
ep->ports[0].active = 0;
return;
}
- ep->urb->transfer_buffer_length = count;
+ urb->transfer_buffer_length = count;
}
static struct usb_protocol_ops snd_usbmidi_raw_ops = {
@@ -681,23 +717,25 @@ static void snd_usbmidi_us122l_input(struct snd_usb_midi_in_endpoint *ep,
snd_usbmidi_input_data(ep, 0, buffer, buffer_length);
}
-static void snd_usbmidi_us122l_output(struct snd_usb_midi_out_endpoint *ep)
+static void snd_usbmidi_us122l_output(struct snd_usb_midi_out_endpoint *ep,
+ struct urb *urb)
{
int count;
if (!ep->ports[0].active)
return;
- count = ep->urb->dev->speed == USB_SPEED_HIGH ? 1 : 2;
+ count = snd_usb_get_speed(ep->umidi->chip->dev) == USB_SPEED_HIGH
+ ? 1 : 2;
count = snd_rawmidi_transmit(ep->ports[0].substream,
- ep->urb->transfer_buffer,
+ urb->transfer_buffer,
count);
if (count < 1) {
ep->ports[0].active = 0;
return;
}
- memset(ep->urb->transfer_buffer + count, 0xFD, 9 - count);
- ep->urb->transfer_buffer_length = count;
+ memset(urb->transfer_buffer + count, 0xFD, 9 - count);
+ urb->transfer_buffer_length = count;
}
static struct usb_protocol_ops snd_usbmidi_122l_ops = {
@@ -786,10 +824,11 @@ static void snd_usbmidi_emagic_input(struct snd_usb_midi_in_endpoint* ep,
}
}
-static void snd_usbmidi_emagic_output(struct snd_usb_midi_out_endpoint* ep)
+static void snd_usbmidi_emagic_output(struct snd_usb_midi_out_endpoint* ep,
+ struct urb *urb)
{
int port0 = ep->current_port;
- uint8_t* buf = ep->urb->transfer_buffer;
+ uint8_t* buf = urb->transfer_buffer;
int buf_free = ep->max_transfer;
int length, i;
@@ -829,7 +868,7 @@ static void snd_usbmidi_emagic_output(struct snd_usb_midi_out_endpoint* ep)
*buf = 0xff;
--buf_free;
}
- ep->urb->transfer_buffer_length = ep->max_transfer - buf_free;
+ urb->transfer_buffer_length = ep->max_transfer - buf_free;
}
static struct usb_protocol_ops snd_usbmidi_emagic_ops = {
@@ -884,6 +923,35 @@ static void snd_usbmidi_output_trigger(struct snd_rawmidi_substream *substream,
}
}
+static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
+{
+ struct usbmidi_out_port* port = substream->runtime->private_data;
+ struct snd_usb_midi_out_endpoint *ep = port->ep;
+ unsigned int drain_urbs;
+ DEFINE_WAIT(wait);
+ long timeout = msecs_to_jiffies(50);
+
+ /*
+ * The substream buffer is empty, but some data might still be in the
+ * currently active URBs, so we have to wait for those to complete.
+ */
+ spin_lock_irq(&ep->buffer_lock);
+ drain_urbs = ep->active_urbs;
+ if (drain_urbs) {
+ ep->drain_urbs |= drain_urbs;
+ do {
+ prepare_to_wait(&ep->drain_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&ep->buffer_lock);
+ timeout = schedule_timeout(timeout);
+ spin_lock_irq(&ep->buffer_lock);
+ drain_urbs &= ep->drain_urbs;
+ } while (drain_urbs && timeout);
+ finish_wait(&ep->drain_wait, &wait);
+ }
+ spin_unlock_irq(&ep->buffer_lock);
+}
+
static int snd_usbmidi_input_open(struct snd_rawmidi_substream *substream)
{
return 0;
@@ -908,6 +976,7 @@ static struct snd_rawmidi_ops snd_usbmidi_output_ops = {
.open = snd_usbmidi_output_open,
.close = snd_usbmidi_output_close,
.trigger = snd_usbmidi_output_trigger,
+ .drain = snd_usbmidi_output_drain,
};
static struct snd_rawmidi_ops snd_usbmidi_input_ops = {
@@ -916,19 +985,26 @@ static struct snd_rawmidi_ops snd_usbmidi_input_ops = {
.trigger = snd_usbmidi_input_trigger
};
+static void free_urb_and_buffer(struct snd_usb_midi *umidi, struct urb *urb,
+ unsigned int buffer_length)
+{
+ usb_buffer_free(umidi->chip->dev, buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+ usb_free_urb(urb);
+}
+
/*
* Frees an input endpoint.
* May be called when ep hasn't been initialized completely.
*/
static void snd_usbmidi_in_endpoint_delete(struct snd_usb_midi_in_endpoint* ep)
{
- if (ep->urb) {
- usb_buffer_free(ep->umidi->chip->dev,
- ep->urb->transfer_buffer_length,
- ep->urb->transfer_buffer,
- ep->urb->transfer_dma);
- usb_free_urb(ep->urb);
- }
+ unsigned int i;
+
+ for (i = 0; i < INPUT_URBS; ++i)
+ if (ep->urbs[i])
+ free_urb_and_buffer(ep->umidi, ep->urbs[i],
+ ep->urbs[i]->transfer_buffer_length);
kfree(ep);
}
@@ -943,6 +1019,7 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi,
void* buffer;
unsigned int pipe;
int length;
+ unsigned int i;
rep->in = NULL;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
@@ -950,30 +1027,36 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi,
return -ENOMEM;
ep->umidi = umidi;
- ep->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!ep->urb) {
- snd_usbmidi_in_endpoint_delete(ep);
- return -ENOMEM;
+ for (i = 0; i < INPUT_URBS; ++i) {
+ ep->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ep->urbs[i]) {
+ snd_usbmidi_in_endpoint_delete(ep);
+ return -ENOMEM;
+ }
}
if (ep_info->in_interval)
pipe = usb_rcvintpipe(umidi->chip->dev, ep_info->in_ep);
else
pipe = usb_rcvbulkpipe(umidi->chip->dev, ep_info->in_ep);
length = usb_maxpacket(umidi->chip->dev, pipe, 0);
- buffer = usb_buffer_alloc(umidi->chip->dev, length, GFP_KERNEL,
- &ep->urb->transfer_dma);
- if (!buffer) {
- snd_usbmidi_in_endpoint_delete(ep);
- return -ENOMEM;
+ for (i = 0; i < INPUT_URBS; ++i) {
+ buffer = usb_buffer_alloc(umidi->chip->dev, length, GFP_KERNEL,
+ &ep->urbs[i]->transfer_dma);
+ if (!buffer) {
+ snd_usbmidi_in_endpoint_delete(ep);
+ return -ENOMEM;
+ }
+ if (ep_info->in_interval)
+ usb_fill_int_urb(ep->urbs[i], umidi->chip->dev,
+ pipe, buffer, length,
+ snd_usbmidi_in_urb_complete,
+ ep, ep_info->in_interval);
+ else
+ usb_fill_bulk_urb(ep->urbs[i], umidi->chip->dev,
+ pipe, buffer, length,
+ snd_usbmidi_in_urb_complete, ep);
+ ep->urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
}
- if (ep_info->in_interval)
- usb_fill_int_urb(ep->urb, umidi->chip->dev, pipe, buffer,
- length, snd_usbmidi_in_urb_complete, ep,
- ep_info->in_interval);
- else
- usb_fill_bulk_urb(ep->urb, umidi->chip->dev, pipe, buffer,
- length, snd_usbmidi_in_urb_complete, ep);
- ep->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
rep->in = ep;
return 0;
@@ -994,12 +1077,12 @@ static unsigned int snd_usbmidi_count_bits(unsigned int x)
*/
static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep)
{
- if (ep->urb) {
- usb_buffer_free(ep->umidi->chip->dev, ep->max_transfer,
- ep->urb->transfer_buffer,
- ep->urb->transfer_dma);
- usb_free_urb(ep->urb);
- }
+ unsigned int i;
+
+ for (i = 0; i < OUTPUT_URBS; ++i)
+ if (ep->urbs[i].urb)
+ free_urb_and_buffer(ep->umidi, ep->urbs[i].urb,
+ ep->max_transfer);
kfree(ep);
}
@@ -1011,7 +1094,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi* umidi,
struct snd_usb_midi_endpoint* rep)
{
struct snd_usb_midi_out_endpoint* ep;
- int i;
+ unsigned int i;
unsigned int pipe;
void* buffer;
@@ -1021,38 +1104,46 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi* umidi,
return -ENOMEM;
ep->umidi = umidi;
- ep->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!ep->urb) {
- snd_usbmidi_out_endpoint_delete(ep);
- return -ENOMEM;
+ for (i = 0; i < OUTPUT_URBS; ++i) {
+ ep->urbs[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ep->urbs[i].urb) {
+ snd_usbmidi_out_endpoint_delete(ep);
+ return -ENOMEM;
+ }
+ ep->urbs[i].ep = ep;
}
if (ep_info->out_interval)
pipe = usb_sndintpipe(umidi->chip->dev, ep_info->out_ep);
else
pipe = usb_sndbulkpipe(umidi->chip->dev, ep_info->out_ep);
if (umidi->chip->usb_id == USB_ID(0x0a92, 0x1020)) /* ESI M4U */
- /* FIXME: we need more URBs to get reasonable bandwidth here: */
ep->max_transfer = 4;
else
ep->max_transfer = usb_maxpacket(umidi->chip->dev, pipe, 1);
- buffer = usb_buffer_alloc(umidi->chip->dev, ep->max_transfer,
- GFP_KERNEL, &ep->urb->transfer_dma);
- if (!buffer) {
- snd_usbmidi_out_endpoint_delete(ep);
- return -ENOMEM;
+ for (i = 0; i < OUTPUT_URBS; ++i) {
+ buffer = usb_buffer_alloc(umidi->chip->dev,
+ ep->max_transfer, GFP_KERNEL,
+ &ep->urbs[i].urb->transfer_dma);
+ if (!buffer) {
+ snd_usbmidi_out_endpoint_delete(ep);
+ return -ENOMEM;
+ }
+ if (ep_info->out_interval)
+ usb_fill_int_urb(ep->urbs[i].urb, umidi->chip->dev,
+ pipe, buffer, ep->max_transfer,
+ snd_usbmidi_out_urb_complete,
+ &ep->urbs[i], ep_info->out_interval);
+ else
+ usb_fill_bulk_urb(ep->urbs[i].urb, umidi->chip->dev,
+ pipe, buffer, ep->max_transfer,
+ snd_usbmidi_out_urb_complete,
+ &ep->urbs[i]);
+ ep->urbs[i].urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
}
- if (ep_info->out_interval)
- usb_fill_int_urb(ep->urb, umidi->chip->dev, pipe, buffer,
- ep->max_transfer, snd_usbmidi_out_urb_complete,
- ep, ep_info->out_interval);
- else
- usb_fill_bulk_urb(ep->urb, umidi->chip->dev,
- pipe, buffer, ep->max_transfer,
- snd_usbmidi_out_urb_complete, ep);
- ep->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
spin_lock_init(&ep->buffer_lock);
tasklet_init(&ep->tasklet, snd_usbmidi_out_tasklet, (unsigned long)ep);
+ init_waitqueue_head(&ep->drain_wait);
for (i = 0; i < 0x10; ++i)
if (ep_info->out_cables & (1 << i)) {
@@ -1090,7 +1181,7 @@ static void snd_usbmidi_free(struct snd_usb_midi* umidi)
void snd_usbmidi_disconnect(struct list_head* p)
{
struct snd_usb_midi* umidi;
- int i;
+ unsigned int i, j;
umidi = list_entry(p, struct snd_usb_midi, list);
/*
@@ -1105,13 +1196,15 @@ void snd_usbmidi_disconnect(struct list_head* p)
struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i];
if (ep->out)
tasklet_kill(&ep->out->tasklet);
- if (ep->out && ep->out->urb) {
- usb_kill_urb(ep->out->urb);
+ if (ep->out) {
+ for (j = 0; j < OUTPUT_URBS; ++j)
+ usb_kill_urb(ep->out->urbs[j].urb);
if (umidi->usb_protocol_ops->finish_out_endpoint)
umidi->usb_protocol_ops->finish_out_endpoint(ep->out);
}
if (ep->in)
- usb_kill_urb(ep->in->urb);
+ for (j = 0; j < INPUT_URBS; ++j)
+ usb_kill_urb(ep->in->urbs[j]);
/* free endpoints here; later call can result in Oops */
if (ep->out) {
snd_usbmidi_out_endpoint_delete(ep->out);
@@ -1692,20 +1785,25 @@ static int snd_usbmidi_create_rawmidi(struct snd_usb_midi* umidi,
void snd_usbmidi_input_stop(struct list_head* p)
{
struct snd_usb_midi* umidi;
- int i;
+ unsigned int i, j;
umidi = list_entry(p, struct snd_usb_midi, list);
for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i];
if (ep->in)
- usb_kill_urb(ep->in->urb);
+ for (j = 0; j < INPUT_URBS; ++j)
+ usb_kill_urb(ep->in->urbs[j]);
}
}
static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint* ep)
{
- if (ep) {
- struct urb* urb = ep->urb;
+ unsigned int i;
+
+ if (!ep)
+ return;
+ for (i = 0; i < INPUT_URBS; ++i) {
+ struct urb* urb = ep->urbs[i];
urb->dev = ep->umidi->chip->dev;
snd_usbmidi_submit_urb(urb, GFP_KERNEL);
}
diff --git a/sound/usb/usbmixer.c b/sound/usb/usbmixer.c
index ec9cdf986928..ab5a3ac2ac47 100644
--- a/sound/usb/usbmixer.c
+++ b/sound/usb/usbmixer.c
@@ -86,6 +86,7 @@ struct usb_mixer_interface {
u8 rc_buffer[6];
u8 audigy2nx_leds[3];
+ u8 xonar_u1_status;
};
@@ -461,7 +462,7 @@ static int mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
unsigned int size, unsigned int __user *_tlv)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
- DECLARE_TLV_DB_SCALE(scale, 0, 0, 0);
+ DECLARE_TLV_DB_MINMAX(scale, 0, 0);
if (size < sizeof(scale))
return -ENOMEM;
@@ -469,7 +470,16 @@ static int mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
* while ALSA TLV contains in 1/100 dB unit
*/
scale[2] = (convert_signed_value(cval, cval->min) * 100) / 256;
- scale[3] = (convert_signed_value(cval, cval->res) * 100) / 256;
+ scale[3] = (convert_signed_value(cval, cval->max) * 100) / 256;
+ if (scale[3] <= scale[2]) {
+ /* something is wrong; assume it's either from/to 0dB */
+ if (scale[2] < 0)
+ scale[3] = 0;
+ else if (scale[2] > 0)
+ scale[2] = 0;
+ else /* totally crap, return an error */
+ return -EINVAL;
+ }
if (copy_to_user(_tlv, scale, sizeof(scale)))
return -EFAULT;
return 0;
@@ -2033,6 +2043,58 @@ static void snd_audigy2nx_proc_read(struct snd_info_entry *entry,
}
}
+static int snd_xonar_u1_switch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.integer.value[0] = !!(mixer->xonar_u1_status & 0x02);
+ return 0;
+}
+
+static int snd_xonar_u1_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
+ u8 old_status, new_status;
+ int err, changed;
+
+ old_status = mixer->xonar_u1_status;
+ if (ucontrol->value.integer.value[0])
+ new_status = old_status | 0x02;
+ else
+ new_status = old_status & ~0x02;
+ changed = new_status != old_status;
+ err = snd_usb_ctl_msg(mixer->chip->dev,
+ usb_sndctrlpipe(mixer->chip->dev, 0), 0x08,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ 50, 0, &new_status, 1, 100);
+ if (err < 0)
+ return err;
+ mixer->xonar_u1_status = new_status;
+ return changed;
+}
+
+static struct snd_kcontrol_new snd_xonar_u1_output_switch = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Digital Playback Switch",
+ .info = snd_ctl_boolean_mono_info,
+ .get = snd_xonar_u1_switch_get,
+ .put = snd_xonar_u1_switch_put,
+};
+
+static int snd_xonar_u1_controls_create(struct usb_mixer_interface *mixer)
+{
+ int err;
+
+ err = snd_ctl_add(mixer->chip->card,
+ snd_ctl_new1(&snd_xonar_u1_output_switch, mixer));
+ if (err < 0)
+ return err;
+ mixer->xonar_u1_status = 0x05;
+ return 0;
+}
+
int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
int ignore_error)
{
@@ -2075,6 +2137,13 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
snd_audigy2nx_proc_read);
}
+ if (mixer->chip->usb_id == USB_ID(0x0b05, 0x1739) ||
+ mixer->chip->usb_id == USB_ID(0x0b05, 0x1743)) {
+ err = snd_xonar_u1_controls_create(mixer);
+ if (err < 0)
+ goto _error;
+ }
+
err = snd_device_new(chip->card, SNDRV_DEV_LOWLEVEL, mixer, &dev_ops);
if (err < 0)
goto _error;
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 6be696b0a2bb..0ff23de9e453 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -91,6 +91,10 @@ OPTIONS
--no-samples::
Don't sample.
+-R::
+--raw-samples::
+Collect raw sample records from all opened counters (typically for tracepoint counters).
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index e72e93110782..59f0b846cd71 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -27,6 +27,9 @@ OPTIONS
-n
--show-nr-samples
Show the number of samples for each symbol
+-T
+--threads
+ Show per-thread event counters
-C::
--comms=::
Only consider symbols in these comms. CSV that understands
@@ -48,6 +51,16 @@ OPTIONS
all occurances of this separator in symbol names (and other output)
with a '.' character, that thus it's the only non valid separator.
+-g [type,min]::
+--call-graph::
+ Display callchains using type and min percent threshold.
+ type can be either:
+ - flat: single column, linear exposure of callchains.
+ - graph: use a graph tree, displaying absolute overhead rates.
+ - fractal: like graph, but displays relative rates. Each branch of
+ the tree is considered as a new profiled object. +
+ Default: fractal,0.5.
+
SEE ALSO
--------
linkperf:perf-stat[1]
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index c045b4271e57..9f8d207a91bf 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -166,7 +166,35 @@ endif
# CFLAGS and LDFLAGS are for the users to override from the command line.
-CFLAGS = $(M64) -ggdb3 -Wall -Wextra -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement -Werror -O6
+#
+# Include saner warnings here, which can catch bugs:
+#
+
+EXTRA_WARNINGS := -Wcast-align
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-security
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-y2k
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wshadow
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Winit-self
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wpacked
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wredundant-decls
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstack-protector
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-aliasing=3
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-default
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-enum
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wno-system-headers
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wundef
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wvolatile-register-var
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wwrite-strings
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wbad-function-cast
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-declarations
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-prototypes
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wnested-externs
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement
+
+CFLAGS = $(M64) -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS)
LDFLAGS = -lpthread -lrt -lelf -lm
ALL_CFLAGS = $(CFLAGS)
ALL_LDFLAGS = $(LDFLAGS)
@@ -310,6 +338,7 @@ LIB_H += util/sigchain.h
LIB_H += util/symbol.h
LIB_H += util/module.h
LIB_H += util/color.h
+LIB_H += util/values.h
LIB_OBJS += util/abspath.o
LIB_OBJS += util/alias.o
@@ -337,6 +366,13 @@ LIB_OBJS += util/color.o
LIB_OBJS += util/pager.o
LIB_OBJS += util/header.o
LIB_OBJS += util/callchain.o
+LIB_OBJS += util/values.o
+LIB_OBJS += util/debug.o
+LIB_OBJS += util/map.o
+LIB_OBJS += util/thread.o
+LIB_OBJS += util/trace-event-parse.o
+LIB_OBJS += util/trace-event-read.o
+LIB_OBJS += util/trace-event-info.o
BUILTIN_OBJS += builtin-annotate.o
BUILTIN_OBJS += builtin-help.o
@@ -345,6 +381,7 @@ BUILTIN_OBJS += builtin-record.o
BUILTIN_OBJS += builtin-report.o
BUILTIN_OBJS += builtin-stat.o
BUILTIN_OBJS += builtin-top.o
+BUILTIN_OBJS += builtin-trace.o
PERFLIBS = $(LIB_FILE)
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 5e17de984dc8..043d85b7e254 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -17,16 +17,13 @@
#include "util/string.h"
#include "perf.h"
+#include "util/debug.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
-
-#define SHOW_KERNEL 1
-#define SHOW_USER 2
-#define SHOW_HV 4
+#include "util/thread.h"
static char const *input_name = "perf.data";
-static char *vmlinux = "vmlinux";
static char default_sort_order[] = "comm,symbol";
static char *sort_order = default_sort_order;
@@ -35,13 +32,6 @@ static int force;
static int input;
static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
-static int dump_trace = 0;
-#define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
-
-static int verbose;
-
-static int modules;
-
static int full_paths;
static int print_line;
@@ -49,39 +39,8 @@ static int print_line;
static unsigned long page_size;
static unsigned long mmap_window = 32;
-struct ip_event {
- struct perf_event_header header;
- u64 ip;
- u32 pid, tid;
-};
-
-struct mmap_event {
- struct perf_event_header header;
- u32 pid, tid;
- u64 start;
- u64 len;
- u64 pgoff;
- char filename[PATH_MAX];
-};
-
-struct comm_event {
- struct perf_event_header header;
- u32 pid, tid;
- char comm[16];
-};
-
-struct fork_event {
- struct perf_event_header header;
- u32 pid, ppid;
-};
-
-typedef union event_union {
- struct perf_event_header header;
- struct ip_event ip;
- struct mmap_event mmap;
- struct comm_event comm;
- struct fork_event fork;
-} event_t;
+static struct rb_root threads;
+static struct thread *last_match;
struct sym_ext {
@@ -90,323 +49,6 @@ struct sym_ext {
char *path;
};
-static LIST_HEAD(dsos);
-static struct dso *kernel_dso;
-static struct dso *vdso;
-
-
-static void dsos__add(struct dso *dso)
-{
- list_add_tail(&dso->node, &dsos);
-}
-
-static struct dso *dsos__find(const char *name)
-{
- struct dso *pos;
-
- list_for_each_entry(pos, &dsos, node)
- if (strcmp(pos->name, name) == 0)
- return pos;
- return NULL;
-}
-
-static struct dso *dsos__findnew(const char *name)
-{
- struct dso *dso = dsos__find(name);
- int nr;
-
- if (dso)
- return dso;
-
- dso = dso__new(name, 0);
- if (!dso)
- goto out_delete_dso;
-
- nr = dso__load(dso, NULL, verbose);
- if (nr < 0) {
- if (verbose)
- fprintf(stderr, "Failed to open: %s\n", name);
- goto out_delete_dso;
- }
- if (!nr && verbose) {
- fprintf(stderr,
- "No symbols found in: %s, maybe install a debug package?\n",
- name);
- }
-
- dsos__add(dso);
-
- return dso;
-
-out_delete_dso:
- dso__delete(dso);
- return NULL;
-}
-
-static void dsos__fprintf(FILE *fp)
-{
- struct dso *pos;
-
- list_for_each_entry(pos, &dsos, node)
- dso__fprintf(pos, fp);
-}
-
-static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
-{
- return dso__find_symbol(dso, ip);
-}
-
-static int load_kernel(void)
-{
- int err;
-
- kernel_dso = dso__new("[kernel]", 0);
- if (!kernel_dso)
- return -1;
-
- err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
- if (err <= 0) {
- dso__delete(kernel_dso);
- kernel_dso = NULL;
- } else
- dsos__add(kernel_dso);
-
- vdso = dso__new("[vdso]", 0);
- if (!vdso)
- return -1;
-
- vdso->find_symbol = vdso__find_symbol;
-
- dsos__add(vdso);
-
- return err;
-}
-
-struct map {
- struct list_head node;
- u64 start;
- u64 end;
- u64 pgoff;
- u64 (*map_ip)(struct map *, u64);
- struct dso *dso;
-};
-
-static u64 map__map_ip(struct map *map, u64 ip)
-{
- return ip - map->start + map->pgoff;
-}
-
-static u64 vdso__map_ip(struct map *map __used, u64 ip)
-{
- return ip;
-}
-
-static struct map *map__new(struct mmap_event *event)
-{
- struct map *self = malloc(sizeof(*self));
-
- if (self != NULL) {
- const char *filename = event->filename;
-
- self->start = event->start;
- self->end = event->start + event->len;
- self->pgoff = event->pgoff;
-
- self->dso = dsos__findnew(filename);
- if (self->dso == NULL)
- goto out_delete;
-
- if (self->dso == vdso)
- self->map_ip = vdso__map_ip;
- else
- self->map_ip = map__map_ip;
- }
- return self;
-out_delete:
- free(self);
- return NULL;
-}
-
-static struct map *map__clone(struct map *self)
-{
- struct map *map = malloc(sizeof(*self));
-
- if (!map)
- return NULL;
-
- memcpy(map, self, sizeof(*self));
-
- return map;
-}
-
-static int map__overlap(struct map *l, struct map *r)
-{
- if (l->start > r->start) {
- struct map *t = l;
- l = r;
- r = t;
- }
-
- if (l->end > r->start)
- return 1;
-
- return 0;
-}
-
-static size_t map__fprintf(struct map *self, FILE *fp)
-{
- return fprintf(fp, " %Lx-%Lx %Lx %s\n",
- self->start, self->end, self->pgoff, self->dso->name);
-}
-
-
-struct thread {
- struct rb_node rb_node;
- struct list_head maps;
- pid_t pid;
- char *comm;
-};
-
-static struct thread *thread__new(pid_t pid)
-{
- struct thread *self = malloc(sizeof(*self));
-
- if (self != NULL) {
- self->pid = pid;
- self->comm = malloc(32);
- if (self->comm)
- snprintf(self->comm, 32, ":%d", self->pid);
- INIT_LIST_HEAD(&self->maps);
- }
-
- return self;
-}
-
-static int thread__set_comm(struct thread *self, const char *comm)
-{
- if (self->comm)
- free(self->comm);
- self->comm = strdup(comm);
- return self->comm ? 0 : -ENOMEM;
-}
-
-static size_t thread__fprintf(struct thread *self, FILE *fp)
-{
- struct map *pos;
- size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
-
- list_for_each_entry(pos, &self->maps, node)
- ret += map__fprintf(pos, fp);
-
- return ret;
-}
-
-
-static struct rb_root threads;
-static struct thread *last_match;
-
-static struct thread *threads__findnew(pid_t pid)
-{
- struct rb_node **p = &threads.rb_node;
- struct rb_node *parent = NULL;
- struct thread *th;
-
- /*
- * Font-end cache - PID lookups come in blocks,
- * so most of the time we dont have to look up
- * the full rbtree:
- */
- if (last_match && last_match->pid == pid)
- return last_match;
-
- while (*p != NULL) {
- parent = *p;
- th = rb_entry(parent, struct thread, rb_node);
-
- if (th->pid == pid) {
- last_match = th;
- return th;
- }
-
- if (pid < th->pid)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- th = thread__new(pid);
- if (th != NULL) {
- rb_link_node(&th->rb_node, parent, p);
- rb_insert_color(&th->rb_node, &threads);
- last_match = th;
- }
-
- return th;
-}
-
-static void thread__insert_map(struct thread *self, struct map *map)
-{
- struct map *pos, *tmp;
-
- list_for_each_entry_safe(pos, tmp, &self->maps, node) {
- if (map__overlap(pos, map)) {
- list_del_init(&pos->node);
- /* XXX leaks dsos */
- free(pos);
- }
- }
-
- list_add_tail(&map->node, &self->maps);
-}
-
-static int thread__fork(struct thread *self, struct thread *parent)
-{
- struct map *map;
-
- if (self->comm)
- free(self->comm);
- self->comm = strdup(parent->comm);
- if (!self->comm)
- return -ENOMEM;
-
- list_for_each_entry(map, &parent->maps, node) {
- struct map *new = map__clone(map);
- if (!new)
- return -ENOMEM;
- thread__insert_map(self, new);
- }
-
- return 0;
-}
-
-static struct map *thread__find_map(struct thread *self, u64 ip)
-{
- struct map *pos;
-
- if (self == NULL)
- return NULL;
-
- list_for_each_entry(pos, &self->maps, node)
- if (ip >= pos->start && ip <= pos->end)
- return pos;
-
- return NULL;
-}
-
-static size_t threads__fprintf(FILE *fp)
-{
- size_t ret = 0;
- struct rb_node *nd;
-
- for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
- struct thread *pos = rb_entry(nd, struct thread, rb_node);
-
- ret += thread__fprintf(pos, fp);
- }
-
- return ret;
-}
-
/*
* histogram, sorted on item, collects counts
*/
@@ -433,7 +75,7 @@ struct hist_entry {
struct sort_entry {
struct list_head list;
- char *header;
+ const char *header;
int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
@@ -577,7 +219,7 @@ static struct sort_entry sort_sym = {
static int sort__need_collapse = 0;
struct sort_dimension {
- char *name;
+ const char *name;
struct sort_entry *entry;
int taken;
};
@@ -830,17 +472,6 @@ static void output__resort(void)
}
}
-static void register_idle_thread(void)
-{
- struct thread *thread = threads__findnew(0);
-
- if (thread == NULL ||
- thread__set_comm(thread, "[idle]")) {
- fprintf(stderr, "problem inserting idle task.\n");
- exit(-1);
- }
-}
-
static unsigned long total = 0,
total_mmap = 0,
total_comm = 0,
@@ -853,18 +484,20 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
char level;
int show = 0;
struct dso *dso = NULL;
- struct thread *thread = threads__findnew(event->ip.pid);
+ struct thread *thread;
u64 ip = event->ip.ip;
struct map *map = NULL;
- dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
+ thread = threads__findnew(event->ip.pid, &threads, &last_match);
+
+ dump_printf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->header.misc,
event->ip.pid,
(void *)(long)ip);
- dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
if (thread == NULL) {
fprintf(stderr, "problem processing %d event, skipping it.\n",
@@ -878,7 +511,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
dso = kernel_dso;
- dprintf(" ...... dso: %s\n", dso->name);
+ dump_printf(" ...... dso: %s\n", dso->name);
} else if (event->header.misc & PERF_EVENT_MISC_USER) {
@@ -899,12 +532,12 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
if ((long long)ip < 0)
dso = kernel_dso;
}
- dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
+ dump_printf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
} else {
show = SHOW_HV;
level = 'H';
- dprintf(" ...... dso: [hypervisor]\n");
+ dump_printf(" ...... dso: [hypervisor]\n");
}
if (show & show_mask) {
@@ -927,10 +560,12 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct thread *thread = threads__findnew(event->mmap.pid);
- struct map *map = map__new(&event->mmap);
+ struct thread *thread;
+ struct map *map = map__new(&event->mmap, NULL, 0);
+
+ thread = threads__findnew(event->mmap.pid, &threads, &last_match);
- dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
+ dump_printf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->mmap.pid,
@@ -940,7 +575,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
event->mmap.filename);
if (thread == NULL || map == NULL) {
- dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
+ dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n");
return 0;
}
@@ -953,16 +588,17 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_comm_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct thread *thread = threads__findnew(event->comm.pid);
+ struct thread *thread;
- dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
+ thread = threads__findnew(event->comm.pid, &threads, &last_match);
+ dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->comm.comm, event->comm.pid);
if (thread == NULL ||
thread__set_comm(thread, event->comm.comm)) {
- dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
+ dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n");
return -1;
}
total_comm++;
@@ -973,10 +609,12 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_fork_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct thread *thread = threads__findnew(event->fork.pid);
- struct thread *parent = threads__findnew(event->fork.ppid);
+ struct thread *thread;
+ struct thread *parent;
- dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
+ thread = threads__findnew(event->fork.pid, &threads, &last_match);
+ parent = threads__findnew(event->fork.ppid, &threads, &last_match);
+ dump_printf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->fork.pid, event->fork.ppid);
@@ -989,7 +627,7 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head)
return 0;
if (!thread || !parent || thread__fork(thread, parent)) {
- dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
+ dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n");
return -1;
}
total_fork++;
@@ -1075,7 +713,7 @@ parse_line(FILE *file, struct symbol *sym, u64 start, u64 len)
const char *path = NULL;
unsigned int hits = 0;
double percent = 0.0;
- char *color;
+ const char *color;
struct sym_ext *sym_ext = sym->priv;
offset = line_ip - start;
@@ -1157,7 +795,7 @@ static void free_source_line(struct symbol *sym, int len)
/* Get the filename:line for the colored entries */
static void
-get_source_line(struct symbol *sym, u64 start, int len, char *filename)
+get_source_line(struct symbol *sym, u64 start, int len, const char *filename)
{
int i;
char cmd[PATH_MAX * 2];
@@ -1203,7 +841,7 @@ get_source_line(struct symbol *sym, u64 start, int len, char *filename)
}
}
-static void print_summary(char *filename)
+static void print_summary(const char *filename)
{
struct sym_ext *sym_ext;
struct rb_node *node;
@@ -1219,7 +857,7 @@ static void print_summary(char *filename)
node = rb_first(&root_sym_ext);
while (node) {
double percent;
- char *color;
+ const char *color;
char *path;
sym_ext = rb_entry(node, struct sym_ext, node);
@@ -1234,7 +872,7 @@ static void print_summary(char *filename)
static void annotate_sym(struct dso *dso, struct symbol *sym)
{
- char *filename = dso->name, *d_filename;
+ const char *filename = dso->name, *d_filename;
u64 start, end, len;
char command[PATH_MAX*2];
FILE *file;
@@ -1244,7 +882,7 @@ static void annotate_sym(struct dso *dso, struct symbol *sym)
if (sym->module)
filename = sym->module->path;
else if (dso == kernel_dso)
- filename = vmlinux;
+ filename = vmlinux_name;
start = sym->obj_start;
if (!start)
@@ -1316,12 +954,12 @@ static int __cmd_annotate(void)
int ret, rc = EXIT_FAILURE;
unsigned long offset = 0;
unsigned long head = 0;
- struct stat stat;
+ struct stat input_stat;
event_t *event;
uint32_t size;
char *buf;
- register_idle_thread();
+ register_idle_thread(&threads, &last_match);
input = open(input_name, O_RDONLY);
if (input < 0) {
@@ -1329,18 +967,18 @@ static int __cmd_annotate(void)
exit(-1);
}
- ret = fstat(input, &stat);
+ ret = fstat(input, &input_stat);
if (ret < 0) {
perror("failed to stat file");
exit(-1);
}
- if (!force && (stat.st_uid != geteuid())) {
- fprintf(stderr, "file: %s not owned by current user\n", input_name);
+ if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
+ fprintf(stderr, "file: %s not owned by current user or root\n", input_name);
exit(-1);
}
- if (!stat.st_size) {
+ if (!input_stat.st_size) {
fprintf(stderr, "zero-sized file, nothing to do!\n");
exit(0);
}
@@ -1367,10 +1005,10 @@ more:
if (head + event->header.size >= page_size * mmap_window) {
unsigned long shift = page_size * (head / page_size);
- int ret;
+ int munmap_ret;
- ret = munmap(buf, page_size * mmap_window);
- assert(ret == 0);
+ munmap_ret = munmap(buf, page_size * mmap_window);
+ assert(munmap_ret == 0);
offset += shift;
head -= shift;
@@ -1379,14 +1017,14 @@ more:
size = event->header.size;
- dprintf("%p [%p]: event: %d\n",
+ dump_printf("%p [%p]: event: %d\n",
(void *)(offset + head),
(void *)(long)event->header.size,
event->header.type);
if (!size || process_event(event, offset, head) < 0) {
- dprintf("%p [%p]: skipping unknown header type: %d\n",
+ dump_printf("%p [%p]: skipping unknown header type: %d\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->header.type);
@@ -1406,23 +1044,23 @@ more:
head += size;
- if (offset + head < (unsigned long)stat.st_size)
+ if (offset + head < (unsigned long)input_stat.st_size)
goto more;
rc = EXIT_SUCCESS;
close(input);
- dprintf(" IP events: %10ld\n", total);
- dprintf(" mmap events: %10ld\n", total_mmap);
- dprintf(" comm events: %10ld\n", total_comm);
- dprintf(" fork events: %10ld\n", total_fork);
- dprintf(" unknown events: %10ld\n", total_unknown);
+ dump_printf(" IP events: %10ld\n", total);
+ dump_printf(" mmap events: %10ld\n", total_mmap);
+ dump_printf(" comm events: %10ld\n", total_comm);
+ dump_printf(" fork events: %10ld\n", total_fork);
+ dump_printf(" unknown events: %10ld\n", total_unknown);
if (dump_trace)
return 0;
if (verbose >= 3)
- threads__fprintf(stdout);
+ threads__fprintf(stdout, &threads);
if (verbose >= 2)
dsos__fprintf(stdout);
@@ -1450,7 +1088,7 @@ static const struct option options[] = {
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
- OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
+ OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"),
OPT_BOOLEAN('m', "modules", &modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_BOOLEAN('l', "print-line", &print_line,
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 2599d86a733b..4fb8734a796e 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -456,6 +456,7 @@ int cmd_help(int argc, const char **argv, const char *prefix __used)
break;
case HELP_FORMAT_WEB:
show_html_page(argv[0]);
+ default:
break;
}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 89a5ddcd1ded..99a12fe86e9f 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -15,6 +15,9 @@
#include "util/string.h"
#include "util/header.h"
+#include "util/event.h"
+#include "util/debug.h"
+#include "util/trace-event.h"
#include <unistd.h>
#include <sched.h>
@@ -42,7 +45,6 @@ static int inherit = 1;
static int force = 0;
static int append_file = 0;
static int call_graph = 0;
-static int verbose = 0;
static int inherit_stat = 0;
static int no_samples = 0;
static int sample_address = 0;
@@ -62,24 +64,6 @@ static int file_new = 1;
struct perf_header *header;
-struct mmap_event {
- struct perf_event_header header;
- u32 pid;
- u32 tid;
- u64 start;
- u64 len;
- u64 pgoff;
- char filename[PATH_MAX];
-};
-
-struct comm_event {
- struct perf_event_header header;
- u32 pid;
- u32 tid;
- char comm[16];
-};
-
-
struct mmap_data {
int counter;
void *base;
@@ -419,8 +403,11 @@ static void create_counter(int counter, int cpu, pid_t pid)
if (call_graph)
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
- if (raw_samples)
+ if (raw_samples) {
+ attr->sample_type |= PERF_SAMPLE_TIME;
attr->sample_type |= PERF_SAMPLE_RAW;
+ attr->sample_type |= PERF_SAMPLE_CPU;
+ }
attr->mmap = track;
attr->comm = track;
@@ -563,6 +550,17 @@ static int __cmd_record(int argc, const char **argv)
else
header = perf_header__new();
+
+ if (raw_samples) {
+ read_tracing_data(attrs, nr_counters);
+ } else {
+ for (i = 0; i < nr_counters; i++) {
+ if (attrs[i].sample_type & PERF_SAMPLE_RAW) {
+ read_tracing_data(attrs, nr_counters);
+ break;
+ }
+ }
+ }
atexit(atexit_header);
if (!system_wide) {
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 8b2ec882e6e0..cdf9a8d27bb9 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -17,19 +17,18 @@
#include "util/string.h"
#include "util/callchain.h"
#include "util/strlist.h"
+#include "util/values.h"
#include "perf.h"
+#include "util/debug.h"
#include "util/header.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
-#define SHOW_KERNEL 1
-#define SHOW_USER 2
-#define SHOW_HV 4
+#include "util/thread.h"
static char const *input_name = "perf.data";
-static char *vmlinux = NULL;
static char default_sort_order[] = "comm,dso,symbol";
static char *sort_order = default_sort_order;
@@ -42,18 +41,15 @@ static int force;
static int input;
static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
-static int dump_trace = 0;
-#define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
-#define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
-
-static int verbose;
-#define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
-
-static int modules;
-
static int full_paths;
static int show_nr_samples;
+static int show_threads;
+static struct perf_read_values show_threads_values;
+
+static char default_pretty_printing_style[] = "normal";
+static char *pretty_printing_style = default_pretty_printing_style;
+
static unsigned long page_size;
static unsigned long mmap_window = 32;
@@ -67,6 +63,15 @@ static char callchain_default_opt[] = "fractal,0.5";
static int callchain;
+static char __cwd[PATH_MAX];
+static char *cwd = __cwd;
+static int cwdlen;
+
+static struct rb_root threads;
+static struct thread *last_match;
+
+static struct perf_header *header;
+
static
struct callchain_param callchain_param = {
.mode = CHAIN_GRAPH_REL,
@@ -75,59 +80,6 @@ struct callchain_param callchain_param = {
static u64 sample_type;
-struct ip_event {
- struct perf_event_header header;
- u64 ip;
- u32 pid, tid;
- unsigned char __more_data[];
-};
-
-struct mmap_event {
- struct perf_event_header header;
- u32 pid, tid;
- u64 start;
- u64 len;
- u64 pgoff;
- char filename[PATH_MAX];
-};
-
-struct comm_event {
- struct perf_event_header header;
- u32 pid, tid;
- char comm[16];
-};
-
-struct fork_event {
- struct perf_event_header header;
- u32 pid, ppid;
- u32 tid, ptid;
-};
-
-struct lost_event {
- struct perf_event_header header;
- u64 id;
- u64 lost;
-};
-
-struct read_event {
- struct perf_event_header header;
- u32 pid,tid;
- u64 value;
- u64 time_enabled;
- u64 time_running;
- u64 id;
-};
-
-typedef union event_union {
- struct perf_event_header header;
- struct ip_event ip;
- struct mmap_event mmap;
- struct comm_event comm;
- struct fork_event fork;
- struct lost_event lost;
- struct read_event read;
-} event_t;
-
static int repsep_fprintf(FILE *fp, const char *fmt, ...)
{
int n;
@@ -141,6 +93,7 @@ static int repsep_fprintf(FILE *fp, const char *fmt, ...)
n = vasprintf(&bf, fmt, ap);
if (n > 0) {
char *sep = bf;
+
while (1) {
sep = strchr(sep, *field_sep);
if (sep == NULL)
@@ -155,396 +108,10 @@ static int repsep_fprintf(FILE *fp, const char *fmt, ...)
return n;
}
-static LIST_HEAD(dsos);
-static struct dso *kernel_dso;
-static struct dso *vdso;
-static struct dso *hypervisor_dso;
-
-static void dsos__add(struct dso *dso)
-{
- list_add_tail(&dso->node, &dsos);
-}
-
-static struct dso *dsos__find(const char *name)
-{
- struct dso *pos;
-
- list_for_each_entry(pos, &dsos, node)
- if (strcmp(pos->name, name) == 0)
- return pos;
- return NULL;
-}
-
-static struct dso *dsos__findnew(const char *name)
-{
- struct dso *dso = dsos__find(name);
- int nr;
-
- if (dso)
- return dso;
-
- dso = dso__new(name, 0);
- if (!dso)
- goto out_delete_dso;
-
- nr = dso__load(dso, NULL, verbose);
- if (nr < 0) {
- eprintf("Failed to open: %s\n", name);
- goto out_delete_dso;
- }
- if (!nr)
- eprintf("No symbols found in: %s, maybe install a debug package?\n", name);
-
- dsos__add(dso);
-
- return dso;
-
-out_delete_dso:
- dso__delete(dso);
- return NULL;
-}
-
-static void dsos__fprintf(FILE *fp)
-{
- struct dso *pos;
-
- list_for_each_entry(pos, &dsos, node)
- dso__fprintf(pos, fp);
-}
-
-static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
-{
- return dso__find_symbol(dso, ip);
-}
-
-static int load_kernel(void)
-{
- int err;
-
- kernel_dso = dso__new("[kernel]", 0);
- if (!kernel_dso)
- return -1;
-
- err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
- if (err <= 0) {
- dso__delete(kernel_dso);
- kernel_dso = NULL;
- } else
- dsos__add(kernel_dso);
-
- vdso = dso__new("[vdso]", 0);
- if (!vdso)
- return -1;
-
- vdso->find_symbol = vdso__find_symbol;
-
- dsos__add(vdso);
-
- hypervisor_dso = dso__new("[hypervisor]", 0);
- if (!hypervisor_dso)
- return -1;
- dsos__add(hypervisor_dso);
-
- return err;
-}
-
-static char __cwd[PATH_MAX];
-static char *cwd = __cwd;
-static int cwdlen;
-
-static int strcommon(const char *pathname)
-{
- int n = 0;
-
- while (n < cwdlen && pathname[n] == cwd[n])
- ++n;
-
- return n;
-}
-
-struct map {
- struct list_head node;
- u64 start;
- u64 end;
- u64 pgoff;
- u64 (*map_ip)(struct map *, u64);
- struct dso *dso;
-};
-
-static u64 map__map_ip(struct map *map, u64 ip)
-{
- return ip - map->start + map->pgoff;
-}
-
-static u64 vdso__map_ip(struct map *map __used, u64 ip)
-{
- return ip;
-}
-
-static inline int is_anon_memory(const char *filename)
-{
- return strcmp(filename, "//anon") == 0;
-}
-
-static struct map *map__new(struct mmap_event *event)
-{
- struct map *self = malloc(sizeof(*self));
-
- if (self != NULL) {
- const char *filename = event->filename;
- char newfilename[PATH_MAX];
- int anon;
-
- if (cwd) {
- int n = strcommon(filename);
-
- if (n == cwdlen) {
- snprintf(newfilename, sizeof(newfilename),
- ".%s", filename + n);
- filename = newfilename;
- }
- }
-
- anon = is_anon_memory(filename);
-
- if (anon) {
- snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
- filename = newfilename;
- }
-
- self->start = event->start;
- self->end = event->start + event->len;
- self->pgoff = event->pgoff;
-
- self->dso = dsos__findnew(filename);
- if (self->dso == NULL)
- goto out_delete;
-
- if (self->dso == vdso || anon)
- self->map_ip = vdso__map_ip;
- else
- self->map_ip = map__map_ip;
- }
- return self;
-out_delete:
- free(self);
- return NULL;
-}
-
-static struct map *map__clone(struct map *self)
-{
- struct map *map = malloc(sizeof(*self));
-
- if (!map)
- return NULL;
-
- memcpy(map, self, sizeof(*self));
-
- return map;
-}
-
-static int map__overlap(struct map *l, struct map *r)
-{
- if (l->start > r->start) {
- struct map *t = l;
- l = r;
- r = t;
- }
-
- if (l->end > r->start)
- return 1;
-
- return 0;
-}
-
-static size_t map__fprintf(struct map *self, FILE *fp)
-{
- return fprintf(fp, " %Lx-%Lx %Lx %s\n",
- self->start, self->end, self->pgoff, self->dso->name);
-}
-
-
-struct thread {
- struct rb_node rb_node;
- struct list_head maps;
- pid_t pid;
- char *comm;
-};
-
-static struct thread *thread__new(pid_t pid)
-{
- struct thread *self = malloc(sizeof(*self));
-
- if (self != NULL) {
- self->pid = pid;
- self->comm = malloc(32);
- if (self->comm)
- snprintf(self->comm, 32, ":%d", self->pid);
- INIT_LIST_HEAD(&self->maps);
- }
-
- return self;
-}
-
static unsigned int dsos__col_width,
comms__col_width,
threads__col_width;
-static int thread__set_comm(struct thread *self, const char *comm)
-{
- if (self->comm)
- free(self->comm);
- self->comm = strdup(comm);
- if (!self->comm)
- return -ENOMEM;
-
- if (!col_width_list_str && !field_sep &&
- (!comm_list || strlist__has_entry(comm_list, comm))) {
- unsigned int slen = strlen(comm);
- if (slen > comms__col_width) {
- comms__col_width = slen;
- threads__col_width = slen + 6;
- }
- }
-
- return 0;
-}
-
-static size_t thread__fprintf(struct thread *self, FILE *fp)
-{
- struct map *pos;
- size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
-
- list_for_each_entry(pos, &self->maps, node)
- ret += map__fprintf(pos, fp);
-
- return ret;
-}
-
-
-static struct rb_root threads;
-static struct thread *last_match;
-
-static struct thread *threads__findnew(pid_t pid)
-{
- struct rb_node **p = &threads.rb_node;
- struct rb_node *parent = NULL;
- struct thread *th;
-
- /*
- * Font-end cache - PID lookups come in blocks,
- * so most of the time we dont have to look up
- * the full rbtree:
- */
- if (last_match && last_match->pid == pid)
- return last_match;
-
- while (*p != NULL) {
- parent = *p;
- th = rb_entry(parent, struct thread, rb_node);
-
- if (th->pid == pid) {
- last_match = th;
- return th;
- }
-
- if (pid < th->pid)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- th = thread__new(pid);
- if (th != NULL) {
- rb_link_node(&th->rb_node, parent, p);
- rb_insert_color(&th->rb_node, &threads);
- last_match = th;
- }
-
- return th;
-}
-
-static void thread__insert_map(struct thread *self, struct map *map)
-{
- struct map *pos, *tmp;
-
- list_for_each_entry_safe(pos, tmp, &self->maps, node) {
- if (map__overlap(pos, map)) {
- if (verbose >= 2) {
- printf("overlapping maps:\n");
- map__fprintf(map, stdout);
- map__fprintf(pos, stdout);
- }
-
- if (map->start <= pos->start && map->end > pos->start)
- pos->start = map->end;
-
- if (map->end >= pos->end && map->start < pos->end)
- pos->end = map->start;
-
- if (verbose >= 2) {
- printf("after collision:\n");
- map__fprintf(pos, stdout);
- }
-
- if (pos->start >= pos->end) {
- list_del_init(&pos->node);
- free(pos);
- }
- }
- }
-
- list_add_tail(&map->node, &self->maps);
-}
-
-static int thread__fork(struct thread *self, struct thread *parent)
-{
- struct map *map;
-
- if (self->comm)
- free(self->comm);
- self->comm = strdup(parent->comm);
- if (!self->comm)
- return -ENOMEM;
-
- list_for_each_entry(map, &parent->maps, node) {
- struct map *new = map__clone(map);
- if (!new)
- return -ENOMEM;
- thread__insert_map(self, new);
- }
-
- return 0;
-}
-
-static struct map *thread__find_map(struct thread *self, u64 ip)
-{
- struct map *pos;
-
- if (self == NULL)
- return NULL;
-
- list_for_each_entry(pos, &self->maps, node)
- if (ip >= pos->start && ip <= pos->end)
- return pos;
-
- return NULL;
-}
-
-static size_t threads__fprintf(FILE *fp)
-{
- size_t ret = 0;
- struct rb_node *nd;
-
- for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
- struct thread *pos = rb_entry(nd, struct thread, rb_node);
-
- ret += thread__fprintf(pos, fp);
- }
-
- return ret;
-}
-
/*
* histogram, sorted on item, collects counts
*/
@@ -574,7 +141,7 @@ struct hist_entry {
struct sort_entry {
struct list_head list;
- char *header;
+ const char *header;
int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
@@ -758,7 +325,7 @@ static int sort__need_collapse = 0;
static int sort__has_parent = 0;
struct sort_dimension {
- char *name;
+ const char *name;
struct sort_entry *entry;
int taken;
};
@@ -773,7 +340,7 @@ static struct sort_dimension sort_dimensions[] = {
static LIST_HEAD(hist_entry__sort_list);
-static int sort_dimension__add(char *tok)
+static int sort_dimension__add(const char *tok)
{
unsigned int i;
@@ -1032,6 +599,7 @@ hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
case CHAIN_GRAPH_REL:
ret += callchain__fprintf_graph(fp, chain,
total_samples, 1, 1);
+ case CHAIN_NONE:
default:
break;
}
@@ -1098,6 +666,34 @@ static void dso__calc_col_width(struct dso *self)
self->slen_calculated = 1;
}
+static void thread__comm_adjust(struct thread *self)
+{
+ char *comm = self->comm;
+
+ if (!col_width_list_str && !field_sep &&
+ (!comm_list || strlist__has_entry(comm_list, comm))) {
+ unsigned int slen = strlen(comm);
+
+ if (slen > comms__col_width) {
+ comms__col_width = slen;
+ threads__col_width = slen + 6;
+ }
+ }
+}
+
+static int thread__set_comm_adjust(struct thread *self, const char *comm)
+{
+ int ret = thread__set_comm(self, comm);
+
+ if (ret)
+ return ret;
+
+ thread__comm_adjust(self);
+
+ return 0;
+}
+
+
static struct symbol *
resolve_symbol(struct thread *thread, struct map **mapp,
struct dso **dsop, u64 *ipp)
@@ -1141,8 +737,8 @@ got_map:
if ((long long)ip < 0)
dso = kernel_dso;
}
- dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
- dprintf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
+ dump_printf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
+ dump_printf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
*ipp = ip;
if (dsop)
@@ -1398,6 +994,9 @@ static size_t output__fprintf(FILE *fp, u64 total_samples)
size_t ret = 0;
unsigned int width;
char *col_width = col_width_list_str;
+ int raw_printing_style;
+
+ raw_printing_style = !strcmp(pretty_printing_style, "raw");
init_rem_hits();
@@ -1474,18 +1073,11 @@ print_entries:
free(rem_sq_bracket);
- return ret;
-}
+ if (show_threads)
+ perf_read_values_display(fp, &show_threads_values,
+ raw_printing_style);
-static void register_idle_thread(void)
-{
- struct thread *thread = threads__findnew(0);
-
- if (thread == NULL ||
- thread__set_comm(thread, "[idle]")) {
- fprintf(stderr, "problem inserting idle task.\n");
- exit(-1);
- }
+ return ret;
}
static unsigned long total = 0,
@@ -1514,7 +1106,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
char level;
int show = 0;
struct dso *dso = NULL;
- struct thread *thread = threads__findnew(event->ip.pid);
+ struct thread *thread;
u64 ip = event->ip.ip;
u64 period = 1;
struct map *map = NULL;
@@ -1522,12 +1114,14 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
struct ip_callchain *chain = NULL;
int cpumode;
+ thread = threads__findnew(event->ip.pid, &threads, &last_match);
+
if (sample_type & PERF_SAMPLE_PERIOD) {
period = *(u64 *)more_data;
more_data += sizeof(u64);
}
- dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
+ dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->header.misc,
@@ -1540,7 +1134,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
chain = (void *)more_data;
- dprintf("... chain: nr:%Lu\n", chain->nr);
+ dump_printf("... chain: nr:%Lu\n", chain->nr);
if (validate_chain(chain, event) < 0) {
eprintf("call-chain problem with event, skipping it.\n");
@@ -1549,11 +1143,11 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
if (dump_trace) {
for (i = 0; i < chain->nr; i++)
- dprintf("..... %2d: %016Lx\n", i, chain->ips[i]);
+ dump_printf("..... %2d: %016Lx\n", i, chain->ips[i]);
}
}
- dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
if (thread == NULL) {
eprintf("problem processing %d event, skipping it.\n",
@@ -1572,7 +1166,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
dso = kernel_dso;
- dprintf(" ...... dso: %s\n", dso->name);
+ dump_printf(" ...... dso: %s\n", dso->name);
} else if (cpumode == PERF_EVENT_MISC_USER) {
@@ -1585,7 +1179,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
dso = hypervisor_dso;
- dprintf(" ...... dso: [hypervisor]\n");
+ dump_printf(" ...... dso: [hypervisor]\n");
}
if (show & show_mask) {
@@ -1611,10 +1205,12 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct thread *thread = threads__findnew(event->mmap.pid);
- struct map *map = map__new(&event->mmap);
+ struct thread *thread;
+ struct map *map = map__new(&event->mmap, cwd, cwdlen);
- dprintf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n",
+ thread = threads__findnew(event->mmap.pid, &threads, &last_match);
+
+ dump_printf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->mmap.pid,
@@ -1625,7 +1221,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
event->mmap.filename);
if (thread == NULL || map == NULL) {
- dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
+ dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n");
return 0;
}
@@ -1638,16 +1234,18 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_comm_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct thread *thread = threads__findnew(event->comm.pid);
+ struct thread *thread;
+
+ thread = threads__findnew(event->comm.pid, &threads, &last_match);
- dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
+ dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->comm.comm, event->comm.pid);
if (thread == NULL ||
- thread__set_comm(thread, event->comm.comm)) {
- dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
+ thread__set_comm_adjust(thread, event->comm.comm)) {
+ dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n");
return -1;
}
total_comm++;
@@ -1658,10 +1256,13 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_task_event(event_t *event, unsigned long offset, unsigned long head)
{
- struct thread *thread = threads__findnew(event->fork.pid);
- struct thread *parent = threads__findnew(event->fork.ppid);
+ struct thread *thread;
+ struct thread *parent;
- dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n",
+ thread = threads__findnew(event->fork.pid, &threads, &last_match);
+ parent = threads__findnew(event->fork.ppid, &threads, &last_match);
+
+ dump_printf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT",
@@ -1679,7 +1280,7 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
return 0;
if (!thread || !parent || thread__fork(thread, parent)) {
- dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
+ dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n");
return -1;
}
total_fork++;
@@ -1690,7 +1291,7 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_lost_event(event_t *event, unsigned long offset, unsigned long head)
{
- dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
+ dump_printf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->lost.id,
@@ -1701,67 +1302,24 @@ process_lost_event(event_t *event, unsigned long offset, unsigned long head)
return 0;
}
-static void trace_event(event_t *event)
-{
- unsigned char *raw_event = (void *)event;
- char *color = PERF_COLOR_BLUE;
- int i, j;
-
- if (!dump_trace)
- return;
-
- dprintf(".");
- cdprintf("\n. ... raw event: size %d bytes\n", event->header.size);
-
- for (i = 0; i < event->header.size; i++) {
- if ((i & 15) == 0) {
- dprintf(".");
- cdprintf(" %04x: ", i);
- }
-
- cdprintf(" %02x", raw_event[i]);
-
- if (((i & 15) == 15) || i == event->header.size-1) {
- cdprintf(" ");
- for (j = 0; j < 15-(i & 15); j++)
- cdprintf(" ");
- for (j = 0; j < (i & 15); j++) {
- if (isprint(raw_event[i-15+j]))
- cdprintf("%c", raw_event[i-15+j]);
- else
- cdprintf(".");
- }
- cdprintf("\n");
- }
- }
- dprintf(".\n");
-}
-
-static struct perf_header *header;
-
-static struct perf_counter_attr *perf_header__find_attr(u64 id)
+static int
+process_read_event(event_t *event, unsigned long offset, unsigned long head)
{
- int i;
+ struct perf_counter_attr *attr;
- for (i = 0; i < header->attrs; i++) {
- struct perf_header_attr *attr = header->attr[i];
- int j;
+ attr = perf_header__find_attr(event->read.id, header);
- for (j = 0; j < attr->ids; j++) {
- if (attr->id[j] == id)
- return &attr->attr;
- }
+ if (show_threads) {
+ const char *name = attr ? __event_name(attr->type, attr->config)
+ : "unknown";
+ perf_read_values_add_value(&show_threads_values,
+ event->read.pid, event->read.tid,
+ event->read.id,
+ name,
+ event->read.value);
}
- return NULL;
-}
-
-static int
-process_read_event(event_t *event, unsigned long offset, unsigned long head)
-{
- struct perf_counter_attr *attr = perf_header__find_attr(event->read.id);
-
- dprintf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n",
+ dump_printf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->read.pid,
@@ -1813,34 +1371,22 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
return 0;
}
-static u64 perf_header__sample_type(void)
-{
- u64 sample_type = 0;
- int i;
-
- for (i = 0; i < header->attrs; i++) {
- struct perf_header_attr *attr = header->attr[i];
-
- if (!sample_type)
- sample_type = attr->attr.sample_type;
- else if (sample_type != attr->attr.sample_type)
- die("non matching sample_type");
- }
-
- return sample_type;
-}
-
static int __cmd_report(void)
{
int ret, rc = EXIT_FAILURE;
unsigned long offset = 0;
unsigned long head, shift;
- struct stat stat;
+ struct stat input_stat;
+ struct thread *idle;
event_t *event;
uint32_t size;
char *buf;
- register_idle_thread();
+ idle = register_idle_thread(&threads, &last_match);
+ thread__comm_adjust(idle);
+
+ if (show_threads)
+ perf_read_values_init(&show_threads_values);
input = open(input_name, O_RDONLY);
if (input < 0) {
@@ -1851,18 +1397,18 @@ static int __cmd_report(void)
exit(-1);
}
- ret = fstat(input, &stat);
+ ret = fstat(input, &input_stat);
if (ret < 0) {
perror("failed to stat file");
exit(-1);
}
- if (!force && (stat.st_uid != geteuid())) {
- fprintf(stderr, "file: %s not owned by current user\n", input_name);
+ if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
+ fprintf(stderr, "file: %s not owned by current user or root\n", input_name);
exit(-1);
}
- if (!stat.st_size) {
+ if (!input_stat.st_size) {
fprintf(stderr, "zero-sized file, nothing to do!\n");
exit(0);
}
@@ -1870,7 +1416,7 @@ static int __cmd_report(void)
header = perf_header__read(input);
head = header->data_offset;
- sample_type = perf_header__sample_type();
+ sample_type = perf_header__sample_type(header);
if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
@@ -1880,7 +1426,7 @@ static int __cmd_report(void)
exit(-1);
}
if (callchain) {
- fprintf(stderr, "selected -c but no callchain data."
+ fprintf(stderr, "selected -g but no callchain data."
" Did you call perf record without"
" -g?\n");
exit(-1);
@@ -1930,12 +1476,12 @@ more:
size = 8;
if (head + event->header.size >= page_size * mmap_window) {
- int ret;
+ int munmap_ret;
shift = page_size * (head / page_size);
- ret = munmap(buf, page_size * mmap_window);
- assert(ret == 0);
+ munmap_ret = munmap(buf, page_size * mmap_window);
+ assert(munmap_ret == 0);
offset += shift;
head -= shift;
@@ -1944,14 +1490,14 @@ more:
size = event->header.size;
- dprintf("\n%p [%p]: event: %d\n",
+ dump_printf("\n%p [%p]: event: %d\n",
(void *)(offset + head),
(void *)(long)event->header.size,
event->header.type);
if (!size || process_event(event, offset, head) < 0) {
- dprintf("%p [%p]: skipping unknown header type: %d\n",
+ dump_printf("%p [%p]: skipping unknown header type: %d\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->header.type);
@@ -1974,25 +1520,25 @@ more:
if (offset + head >= header->data_offset + header->data_size)
goto done;
- if (offset + head < (unsigned long)stat.st_size)
+ if (offset + head < (unsigned long)input_stat.st_size)
goto more;
done:
rc = EXIT_SUCCESS;
close(input);
- dprintf(" IP events: %10ld\n", total);
- dprintf(" mmap events: %10ld\n", total_mmap);
- dprintf(" comm events: %10ld\n", total_comm);
- dprintf(" fork events: %10ld\n", total_fork);
- dprintf(" lost events: %10ld\n", total_lost);
- dprintf(" unknown events: %10ld\n", total_unknown);
+ dump_printf(" IP events: %10ld\n", total);
+ dump_printf(" mmap events: %10ld\n", total_mmap);
+ dump_printf(" comm events: %10ld\n", total_comm);
+ dump_printf(" fork events: %10ld\n", total_fork);
+ dump_printf(" lost events: %10ld\n", total_lost);
+ dump_printf(" unknown events: %10ld\n", total_unknown);
if (dump_trace)
return 0;
if (verbose >= 3)
- threads__fprintf(stdout);
+ threads__fprintf(stdout, &threads);
if (verbose >= 2)
dsos__fprintf(stdout);
@@ -2001,6 +1547,9 @@ done:
output__resort(total);
output__fprintf(stdout, total);
+ if (show_threads)
+ perf_read_values_destroy(&show_threads_values);
+
return rc;
}
@@ -2069,12 +1618,16 @@ static const struct option options[] = {
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
- OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
+ OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_BOOLEAN('m', "modules", &modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples,
"Show a column with the number of samples"),
+ OPT_BOOLEAN('T', "threads", &show_threads,
+ "Show per-thread event counters"),
+ OPT_STRING(0, "pretty", &pretty_printing_style, "key",
+ "pretty printing style key: normal raw"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
OPT_BOOLEAN('P', "full-paths", &full_paths,
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index b4b06c7903e1..61b828236c11 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -42,6 +42,8 @@
#include "util/util.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
+#include "util/event.h"
+#include "util/debug.h"
#include <sys/prctl.h>
#include <math.h>
@@ -60,10 +62,7 @@ static struct perf_counter_attr default_attrs[] = {
};
-#define MAX_RUN 100
-
static int system_wide = 0;
-static int verbose = 0;
static unsigned int nr_cpus = 0;
static int run_idx = 0;
@@ -75,26 +74,56 @@ static int null_run = 0;
static int fd[MAX_NR_CPUS][MAX_COUNTERS];
-static u64 runtime_nsecs[MAX_RUN];
-static u64 walltime_nsecs[MAX_RUN];
-static u64 runtime_cycles[MAX_RUN];
+static int event_scaled[MAX_COUNTERS];
-static u64 event_res[MAX_RUN][MAX_COUNTERS][3];
-static u64 event_scaled[MAX_RUN][MAX_COUNTERS];
+struct stats
+{
+ double n, mean, M2;
+};
-static u64 event_res_avg[MAX_COUNTERS][3];
-static u64 event_res_noise[MAX_COUNTERS][3];
+static void update_stats(struct stats *stats, u64 val)
+{
+ double delta;
-static u64 event_scaled_avg[MAX_COUNTERS];
+ stats->n++;
+ delta = val - stats->mean;
+ stats->mean += delta / stats->n;
+ stats->M2 += delta*(val - stats->mean);
+}
-static u64 runtime_nsecs_avg;
-static u64 runtime_nsecs_noise;
+static double avg_stats(struct stats *stats)
+{
+ return stats->mean;
+}
-static u64 walltime_nsecs_avg;
-static u64 walltime_nsecs_noise;
+/*
+ * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
+ *
+ * (\Sum n_i^2) - ((\Sum n_i)^2)/n
+ * s^2 = -------------------------------
+ * n - 1
+ *
+ * http://en.wikipedia.org/wiki/Stddev
+ *
+ * The std dev of the mean is related to the std dev by:
+ *
+ * s
+ * s_mean = -------
+ * sqrt(n)
+ *
+ */
+static double stddev_stats(struct stats *stats)
+{
+ double variance = stats->M2 / (stats->n - 1);
+ double variance_mean = variance / stats->n;
+
+ return sqrt(variance_mean);
+}
-static u64 runtime_cycles_avg;
-static u64 runtime_cycles_noise;
+struct stats event_res_stats[MAX_COUNTERS][3];
+struct stats runtime_nsecs_stats;
+struct stats walltime_nsecs_stats;
+struct stats runtime_cycles_stats;
#define MATCH_EVENT(t, c, counter) \
(attrs[counter].type == PERF_TYPE_##t && \
@@ -149,12 +178,11 @@ static inline int nsec_counter(int counter)
*/
static void read_counter(int counter)
{
- u64 *count, single_count[3];
+ u64 count[3], single_count[3];
unsigned int cpu;
size_t res, nv;
int scaled;
-
- count = event_res[run_idx][counter];
+ int i;
count[0] = count[1] = count[2] = 0;
@@ -179,24 +207,33 @@ static void read_counter(int counter)
scaled = 0;
if (scale) {
if (count[2] == 0) {
- event_scaled[run_idx][counter] = -1;
+ event_scaled[counter] = -1;
count[0] = 0;
return;
}
if (count[2] < count[1]) {
- event_scaled[run_idx][counter] = 1;
+ event_scaled[counter] = 1;
count[0] = (unsigned long long)
((double)count[0] * count[1] / count[2] + 0.5);
}
}
+
+ for (i = 0; i < 3; i++)
+ update_stats(&event_res_stats[counter][i], count[i]);
+
+ if (verbose) {
+ fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter),
+ count[0], count[1], count[2]);
+ }
+
/*
* Save the full runtime - to allow normalization during printout:
*/
if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
- runtime_nsecs[run_idx] = count[0];
+ update_stats(&runtime_nsecs_stats, count[0]);
if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter))
- runtime_cycles[run_idx] = count[0];
+ update_stats(&runtime_cycles_stats, count[0]);
}
static int run_perf_stat(int argc __used, const char **argv)
@@ -270,7 +307,7 @@ static int run_perf_stat(int argc __used, const char **argv)
t1 = rdclock();
- walltime_nsecs[run_idx] = t1 - t0;
+ update_stats(&walltime_nsecs_stats, t1 - t0);
for (counter = 0; counter < nr_counters; counter++)
read_counter(counter);
@@ -278,42 +315,38 @@ static int run_perf_stat(int argc __used, const char **argv)
return WEXITSTATUS(status);
}
-static void print_noise(u64 *count, u64 *noise)
+static void print_noise(int counter, double avg)
{
- if (run_count > 1)
- fprintf(stderr, " ( +- %7.3f%% )",
- (double)noise[0]/(count[0]+1)*100.0);
+ if (run_count == 1)
+ return;
+
+ fprintf(stderr, " ( +- %7.3f%% )",
+ 100 * stddev_stats(&event_res_stats[counter][0]) / avg);
}
-static void nsec_printout(int counter, u64 *count, u64 *noise)
+static void nsec_printout(int counter, double avg)
{
- double msecs = (double)count[0] / 1000000;
+ double msecs = avg / 1e6;
fprintf(stderr, " %14.6f %-24s", msecs, event_name(counter));
if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) {
- if (walltime_nsecs_avg)
- fprintf(stderr, " # %10.3f CPUs ",
- (double)count[0] / (double)walltime_nsecs_avg);
+ fprintf(stderr, " # %10.3f CPUs ",
+ avg / avg_stats(&walltime_nsecs_stats));
}
- print_noise(count, noise);
}
-static void abs_printout(int counter, u64 *count, u64 *noise)
+static void abs_printout(int counter, double avg)
{
- fprintf(stderr, " %14Ld %-24s", count[0], event_name(counter));
+ fprintf(stderr, " %14.0f %-24s", avg, event_name(counter));
- if (runtime_cycles_avg &&
- MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
+ if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
fprintf(stderr, " # %10.3f IPC ",
- (double)count[0] / (double)runtime_cycles_avg);
+ avg / avg_stats(&runtime_cycles_stats));
} else {
- if (runtime_nsecs_avg) {
- fprintf(stderr, " # %10.3f M/sec",
- (double)count[0]/runtime_nsecs_avg*1000.0);
- }
+ fprintf(stderr, " # %10.3f M/sec",
+ 1000.0 * avg / avg_stats(&runtime_nsecs_stats));
}
- print_noise(count, noise);
}
/*
@@ -321,12 +354,8 @@ static void abs_printout(int counter, u64 *count, u64 *noise)
*/
static void print_counter(int counter)
{
- u64 *count, *noise;
- int scaled;
-
- count = event_res_avg[counter];
- noise = event_res_noise[counter];
- scaled = event_scaled_avg[counter];
+ double avg = avg_stats(&event_res_stats[counter][0]);
+ int scaled = event_scaled[counter];
if (scaled == -1) {
fprintf(stderr, " %14s %-24s\n",
@@ -335,110 +364,29 @@ static void print_counter(int counter)
}
if (nsec_counter(counter))
- nsec_printout(counter, count, noise);
+ nsec_printout(counter, avg);
else
- abs_printout(counter, count, noise);
-
- if (scaled)
- fprintf(stderr, " (scaled from %.2f%%)",
- (double) count[2] / count[1] * 100);
-
- fprintf(stderr, "\n");
-}
+ abs_printout(counter, avg);
-/*
- * normalize_noise noise values down to stddev:
- */
-static void normalize_noise(u64 *val)
-{
- double res;
+ print_noise(counter, avg);
- res = (double)*val / (run_count * sqrt((double)run_count));
+ if (scaled) {
+ double avg_enabled, avg_running;
- *val = (u64)res;
-}
+ avg_enabled = avg_stats(&event_res_stats[counter][1]);
+ avg_running = avg_stats(&event_res_stats[counter][2]);
-static void update_avg(const char *name, int idx, u64 *avg, u64 *val)
-{
- *avg += *val;
-
- if (verbose > 1)
- fprintf(stderr, "debug: %20s[%d]: %Ld\n", name, idx, *val);
-}
-/*
- * Calculate the averages and noises:
- */
-static void calc_avg(void)
-{
- int i, j;
-
- if (verbose > 1)
- fprintf(stderr, "\n");
-
- for (i = 0; i < run_count; i++) {
- update_avg("runtime", 0, &runtime_nsecs_avg, runtime_nsecs + i);
- update_avg("walltime", 0, &walltime_nsecs_avg, walltime_nsecs + i);
- update_avg("runtime_cycles", 0, &runtime_cycles_avg, runtime_cycles + i);
-
- for (j = 0; j < nr_counters; j++) {
- update_avg("counter/0", j,
- event_res_avg[j]+0, event_res[i][j]+0);
- update_avg("counter/1", j,
- event_res_avg[j]+1, event_res[i][j]+1);
- update_avg("counter/2", j,
- event_res_avg[j]+2, event_res[i][j]+2);
- if (event_scaled[i][j] != (u64)-1)
- update_avg("scaled", j,
- event_scaled_avg + j, event_scaled[i]+j);
- else
- event_scaled_avg[j] = -1;
- }
- }
- runtime_nsecs_avg /= run_count;
- walltime_nsecs_avg /= run_count;
- runtime_cycles_avg /= run_count;
-
- for (j = 0; j < nr_counters; j++) {
- event_res_avg[j][0] /= run_count;
- event_res_avg[j][1] /= run_count;
- event_res_avg[j][2] /= run_count;
- }
-
- for (i = 0; i < run_count; i++) {
- runtime_nsecs_noise +=
- abs((s64)(runtime_nsecs[i] - runtime_nsecs_avg));
- walltime_nsecs_noise +=
- abs((s64)(walltime_nsecs[i] - walltime_nsecs_avg));
- runtime_cycles_noise +=
- abs((s64)(runtime_cycles[i] - runtime_cycles_avg));
-
- for (j = 0; j < nr_counters; j++) {
- event_res_noise[j][0] +=
- abs((s64)(event_res[i][j][0] - event_res_avg[j][0]));
- event_res_noise[j][1] +=
- abs((s64)(event_res[i][j][1] - event_res_avg[j][1]));
- event_res_noise[j][2] +=
- abs((s64)(event_res[i][j][2] - event_res_avg[j][2]));
- }
+ fprintf(stderr, " (scaled from %.2f%%)",
+ 100 * avg_running / avg_enabled);
}
- normalize_noise(&runtime_nsecs_noise);
- normalize_noise(&walltime_nsecs_noise);
- normalize_noise(&runtime_cycles_noise);
-
- for (j = 0; j < nr_counters; j++) {
- normalize_noise(&event_res_noise[j][0]);
- normalize_noise(&event_res_noise[j][1]);
- normalize_noise(&event_res_noise[j][2]);
- }
+ fprintf(stderr, "\n");
}
static void print_stat(int argc, const char **argv)
{
int i, counter;
- calc_avg();
-
fflush(stdout);
fprintf(stderr, "\n");
@@ -457,10 +405,11 @@ static void print_stat(int argc, const char **argv)
fprintf(stderr, "\n");
fprintf(stderr, " %14.9f seconds time elapsed",
- (double)walltime_nsecs_avg/1e9);
+ avg_stats(&walltime_nsecs_stats)/1e9);
if (run_count > 1) {
fprintf(stderr, " ( +- %7.3f%% )",
- 100.0*(double)walltime_nsecs_noise/(double)walltime_nsecs_avg);
+ 100*stddev_stats(&walltime_nsecs_stats) /
+ avg_stats(&walltime_nsecs_stats));
}
fprintf(stderr, "\n\n");
}
@@ -515,7 +464,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
usage_with_options(stat_usage, options);
- if (run_count <= 0 || run_count > MAX_RUN)
+ if (run_count <= 0)
usage_with_options(stat_usage, options);
/* Set attrs and nr_counters if no event is selected and !null_run */
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 7de28ce9ca26..4002ccb36750 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -27,6 +27,8 @@
#include "util/parse-options.h"
#include "util/parse-events.h"
+#include "util/debug.h"
+
#include <assert.h>
#include <fcntl.h>
@@ -68,8 +70,6 @@ static int group = 0;
static unsigned int page_size;
static unsigned int mmap_pages = 16;
static int freq = 0;
-static int verbose = 0;
-static char *vmlinux = NULL;
static int delay_secs = 2;
static int zero;
@@ -122,7 +122,8 @@ static void parse_source(struct sym_entry *syme)
struct module *module;
struct section *section = NULL;
FILE *file;
- char command[PATH_MAX*2], *path = vmlinux;
+ char command[PATH_MAX*2];
+ const char *path = vmlinux_name;
u64 start, end, len;
if (!syme)
@@ -338,8 +339,6 @@ static void show_details(struct sym_entry *syme)
printf("%d lines not displayed, maybe increase display entries [e]\n", more);
}
-struct dso *kernel_dso;
-
/*
* Symbols will be added here in record_ip and will get out
* after decayed.
@@ -484,17 +483,24 @@ static void print_sym_table(void)
if (nr_counters == 1)
printf(" samples pcnt");
else
- printf(" weight samples pcnt");
+ printf(" weight samples pcnt");
- printf(" RIP kernel function\n"
- " ______ _______ _____ ________________ _______________\n\n"
- );
+ if (verbose)
+ printf(" RIP ");
+ printf(" kernel function\n");
+ printf(" %s _______ _____",
+ nr_counters == 1 ? " " : "______");
+ if (verbose)
+ printf(" ________________");
+ printf(" _______________\n\n");
for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
- struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node);
- struct symbol *sym = (struct symbol *)(syme + 1);
+ struct symbol *sym;
double pcnt;
+ syme = rb_entry(nd, struct sym_entry, rb_node);
+ sym = (struct symbol *)(syme + 1);
+
if (++printed > print_entries || (int)syme->snap_count < count_filter)
continue;
@@ -507,7 +513,9 @@ static void print_sym_table(void)
printf("%9.1f %10ld - ", syme->weight, syme->snap_count);
percent_color_fprintf(stdout, "%4.1f%%", pcnt);
- printf(" - %016llx : %s", sym->start, sym->name);
+ if (verbose)
+ printf(" - %016llx", sym->start);
+ printf(" : %s", sym->name);
if (sym->module)
printf("\t[%s]", sym->module->name);
printf("\n");
@@ -613,7 +621,7 @@ static void print_mapped_keys(void)
fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter);
- if (vmlinux) {
+ if (vmlinux_name) {
fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
fprintf(stdout, "\t[S] stop annotation.\n");
@@ -642,7 +650,9 @@ static int key_mapped(int c)
case 'F':
case 's':
case 'S':
- return vmlinux ? 1 : 0;
+ return vmlinux_name ? 1 : 0;
+ default:
+ break;
}
return 0;
@@ -728,6 +738,8 @@ static void handle_keypress(int c)
case 'z':
zero = ~zero;
break;
+ default:
+ break;
}
}
@@ -816,13 +828,13 @@ static int parse_symbols(void)
{
struct rb_node *node;
struct symbol *sym;
- int modules = vmlinux ? 1 : 0;
+ int use_modules = vmlinux_name ? 1 : 0;
kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry));
if (kernel_dso == NULL)
return -1;
- if (dso__load_kernel(kernel_dso, vmlinux, symbol_filter, verbose, modules) <= 0)
+ if (dso__load_kernel(kernel_dso, vmlinux_name, symbol_filter, verbose, use_modules) <= 0)
goto out_delete_dso;
node = rb_first(&kernel_dso->syms);
@@ -937,26 +949,6 @@ static void mmap_read_counter(struct mmap_data *md)
last_read = this_read;
for (; old != head;) {
- struct ip_event {
- struct perf_event_header header;
- u64 ip;
- u32 pid, target_pid;
- };
- struct mmap_event {
- struct perf_event_header header;
- u32 pid, target_pid;
- u64 start;
- u64 len;
- u64 pgoff;
- char filename[PATH_MAX];
- };
-
- typedef union event_union {
- struct perf_event_header header;
- struct ip_event ip;
- struct mmap_event mmap;
- } event_t;
-
event_t *event = (event_t *)&data[old & md->mask];
event_t event_copy;
@@ -1138,7 +1130,7 @@ static const struct option options[] = {
"system-wide collection from all CPUs"),
OPT_INTEGER('C', "CPU", &profile_cpu,
"CPU to profile on"),
- OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
+ OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"),
OPT_INTEGER('m', "mmap-pages", &mmap_pages,
"number of mmap data pages"),
OPT_INTEGER('r', "realtime", &realtime_prio,
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
new file mode 100644
index 000000000000..914ab366e369
--- /dev/null
+++ b/tools/perf/builtin-trace.c
@@ -0,0 +1,297 @@
+#include "builtin.h"
+
+#include "util/util.h"
+#include "util/cache.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/header.h"
+
+#include "util/parse-options.h"
+
+#include "perf.h"
+#include "util/debug.h"
+
+#include "util/trace-event.h"
+
+static char const *input_name = "perf.data";
+static int input;
+static unsigned long page_size;
+static unsigned long mmap_window = 32;
+
+static unsigned long total = 0;
+static unsigned long total_comm = 0;
+
+static struct rb_root threads;
+static struct thread *last_match;
+
+static struct perf_header *header;
+static u64 sample_type;
+
+
+static int
+process_comm_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ struct thread *thread;
+
+ thread = threads__findnew(event->comm.pid, &threads, &last_match);
+
+ dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->comm.comm, event->comm.pid);
+
+ if (thread == NULL ||
+ thread__set_comm(thread, event->comm.comm)) {
+ dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n");
+ return -1;
+ }
+ total_comm++;
+
+ return 0;
+}
+
+static int
+process_sample_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ char level;
+ int show = 0;
+ struct dso *dso = NULL;
+ struct thread *thread;
+ u64 ip = event->ip.ip;
+ u64 timestamp = -1;
+ u32 cpu = -1;
+ u64 period = 1;
+ void *more_data = event->ip.__more_data;
+ int cpumode;
+
+ thread = threads__findnew(event->ip.pid, &threads, &last_match);
+
+ if (sample_type & PERF_SAMPLE_TIME) {
+ timestamp = *(u64 *)more_data;
+ more_data += sizeof(u64);
+ }
+
+ if (sample_type & PERF_SAMPLE_CPU) {
+ cpu = *(u32 *)more_data;
+ more_data += sizeof(u32);
+ more_data += sizeof(u32); /* reserved */
+ }
+
+ if (sample_type & PERF_SAMPLE_PERIOD) {
+ period = *(u64 *)more_data;
+ more_data += sizeof(u64);
+ }
+
+ dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->header.misc,
+ event->ip.pid, event->ip.tid,
+ (void *)(long)ip,
+ (long long)period);
+
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+
+ if (thread == NULL) {
+ eprintf("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
+
+ if (cpumode == PERF_EVENT_MISC_KERNEL) {
+ show = SHOW_KERNEL;
+ level = 'k';
+
+ dso = kernel_dso;
+
+ dump_printf(" ...... dso: %s\n", dso->name);
+
+ } else if (cpumode == PERF_EVENT_MISC_USER) {
+
+ show = SHOW_USER;
+ level = '.';
+
+ } else {
+ show = SHOW_HV;
+ level = 'H';
+
+ dso = hypervisor_dso;
+
+ dump_printf(" ...... dso: [hypervisor]\n");
+ }
+
+ if (sample_type & PERF_SAMPLE_RAW) {
+ struct {
+ u32 size;
+ char data[0];
+ } *raw = more_data;
+
+ /*
+ * FIXME: better resolve from pid from the struct trace_entry
+ * field, although it should be the same than this perf
+ * event pid
+ */
+ print_event(cpu, raw->data, raw->size, timestamp, thread->comm);
+ }
+ total += period;
+
+ return 0;
+}
+
+static int
+process_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ trace_event(event);
+
+ switch (event->header.type) {
+ case PERF_EVENT_MMAP ... PERF_EVENT_LOST:
+ return 0;
+
+ case PERF_EVENT_COMM:
+ return process_comm_event(event, offset, head);
+
+ case PERF_EVENT_EXIT ... PERF_EVENT_READ:
+ return 0;
+
+ case PERF_EVENT_SAMPLE:
+ return process_sample_event(event, offset, head);
+
+ case PERF_EVENT_MAX:
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int __cmd_trace(void)
+{
+ int ret, rc = EXIT_FAILURE;
+ unsigned long offset = 0;
+ unsigned long head = 0;
+ struct stat perf_stat;
+ event_t *event;
+ uint32_t size;
+ char *buf;
+
+ trace_report();
+ register_idle_thread(&threads, &last_match);
+
+ input = open(input_name, O_RDONLY);
+ if (input < 0) {
+ perror("failed to open file");
+ exit(-1);
+ }
+
+ ret = fstat(input, &perf_stat);
+ if (ret < 0) {
+ perror("failed to stat file");
+ exit(-1);
+ }
+
+ if (!perf_stat.st_size) {
+ fprintf(stderr, "zero-sized file, nothing to do!\n");
+ exit(0);
+ }
+ header = perf_header__read(input);
+ head = header->data_offset;
+ sample_type = perf_header__sample_type(header);
+
+ if (!(sample_type & PERF_SAMPLE_RAW))
+ die("No trace sample to read. Did you call perf record "
+ "without -R?");
+
+ if (load_kernel() < 0) {
+ perror("failed to load kernel symbols");
+ return EXIT_FAILURE;
+ }
+
+remap:
+ buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
+ MAP_SHARED, input, offset);
+ if (buf == MAP_FAILED) {
+ perror("failed to mmap file");
+ exit(-1);
+ }
+
+more:
+ event = (event_t *)(buf + head);
+
+ size = event->header.size;
+ if (!size)
+ size = 8;
+
+ if (head + event->header.size >= page_size * mmap_window) {
+ unsigned long shift = page_size * (head / page_size);
+ int res;
+
+ res = munmap(buf, page_size * mmap_window);
+ assert(res == 0);
+
+ offset += shift;
+ head -= shift;
+ goto remap;
+ }
+
+ size = event->header.size;
+
+
+ if (!size || process_event(event, offset, head) < 0) {
+
+ /*
+ * assume we lost track of the stream, check alignment, and
+ * increment a single u64 in the hope to catch on again 'soon'.
+ */
+
+ if (unlikely(head & 7))
+ head &= ~7ULL;
+
+ size = 8;
+ }
+
+ head += size;
+
+ if (offset + head < (unsigned long)perf_stat.st_size)
+ goto more;
+
+ rc = EXIT_SUCCESS;
+ close(input);
+
+ return rc;
+}
+
+static const char * const annotate_usage[] = {
+ "perf trace [<options>] <command>",
+ NULL
+};
+
+static const struct option options[] = {
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_BOOLEAN('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_END()
+};
+
+int cmd_trace(int argc, const char **argv, const char *prefix __used)
+{
+ symbol__init();
+ page_size = getpagesize();
+
+ argc = parse_options(argc, argv, options, annotate_usage, 0);
+ if (argc) {
+ /*
+ * Special case: if there's an argument left then assume tha
+ * it's a symbol filter:
+ */
+ if (argc > 1)
+ usage_with_options(annotate_usage, options);
+ }
+
+
+ setup_pager();
+
+ return __cmd_trace();
+}
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 51d168230ee7..3a63e41fb44e 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -22,5 +22,6 @@ extern int cmd_stat(int argc, const char **argv, const char *prefix);
extern int cmd_top(int argc, const char **argv, const char *prefix);
extern int cmd_version(int argc, const char **argv, const char *prefix);
extern int cmd_list(int argc, const char **argv, const char *prefix);
+extern int cmd_trace(int argc, const char **argv, const char *prefix);
#endif
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 31982ad064b4..fe4589dde950 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -292,6 +292,7 @@ static void handle_internal_command(int argc, const char **argv)
{ "top", cmd_top, 0 },
{ "annotate", cmd_annotate, 0 },
{ "version", cmd_version, 0 },
+ { "trace", cmd_trace, 0 },
};
unsigned int i;
static const char ext[] = STRIP_EXTENSION;
diff --git a/tools/perf/util/abspath.c b/tools/perf/util/abspath.c
index 61d33b81fc97..a791dd467261 100644
--- a/tools/perf/util/abspath.c
+++ b/tools/perf/util/abspath.c
@@ -50,7 +50,8 @@ const char *make_absolute_path(const char *path)
die ("Could not get current working directory");
if (last_elem) {
- int len = strlen(buf);
+ len = strlen(buf);
+
if (len + strlen(last_elem) + 2 > PATH_MAX)
die ("Too long path name: '%s/%s'",
buf, last_elem);
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 4b50c412b9c5..6f8ea9d210b6 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -52,7 +52,6 @@ extern const char *perf_mailmap_file;
extern void maybe_flush_or_die(FILE *, const char *);
extern int copy_fd(int ifd, int ofd);
extern int copy_file(const char *dst, const char *src, int mode);
-extern ssize_t read_in_full(int fd, void *buf, size_t count);
extern ssize_t write_in_full(int fd, const void *buf, size_t count);
extern void write_or_die(int fd, const void *buf, size_t count);
extern int write_or_whine(int fd, const void *buf, size_t count, const char *msg);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 011473411642..3b8380f1b478 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -50,6 +50,7 @@ rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
else
p = &(*p)->rb_right;
break;
+ case CHAIN_NONE:
default:
break;
}
@@ -143,6 +144,7 @@ int register_callchain_param(struct callchain_param *param)
case CHAIN_FLAT:
param->sort = sort_chain_flat;
break;
+ case CHAIN_NONE:
default:
return -1;
}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index a926ae4f5a16..43cf3ea9e088 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -4,6 +4,7 @@
#include "../perf.h"
#include <linux/list.h>
#include <linux/rbtree.h>
+#include "util.h"
#include "symbol.h"
enum chain_mode {
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index 90a044d1fe7d..e88bca55a599 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -166,7 +166,7 @@ int perf_color_default_config(const char *var, const char *value, void *cb)
return perf_default_config(var, value, cb);
}
-static int color_vfprintf(FILE *fp, const char *color, const char *fmt,
+static int __color_vfprintf(FILE *fp, const char *color, const char *fmt,
va_list args, const char *trail)
{
int r = 0;
@@ -191,6 +191,10 @@ static int color_vfprintf(FILE *fp, const char *color, const char *fmt,
return r;
}
+int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args)
+{
+ return __color_vfprintf(fp, color, fmt, args, NULL);
+}
int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
@@ -199,7 +203,7 @@ int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
int r;
va_start(args, fmt);
- r = color_vfprintf(fp, color, fmt, args, NULL);
+ r = color_vfprintf(fp, color, fmt, args);
va_end(args);
return r;
}
@@ -209,7 +213,7 @@ int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...)
va_list args;
int r;
va_start(args, fmt);
- r = color_vfprintf(fp, color, fmt, args, "\n");
+ r = __color_vfprintf(fp, color, fmt, args, "\n");
va_end(args);
return r;
}
@@ -242,9 +246,9 @@ int color_fwrite_lines(FILE *fp, const char *color,
return 0;
}
-char *get_percent_color(double percent)
+const char *get_percent_color(double percent)
{
- char *color = PERF_COLOR_NORMAL;
+ const char *color = PERF_COLOR_NORMAL;
/*
* We color high-overhead entries in red, mid-overhead
@@ -263,7 +267,7 @@ char *get_percent_color(double percent)
int percent_color_fprintf(FILE *fp, const char *fmt, double percent)
{
int r;
- char *color;
+ const char *color;
color = get_percent_color(percent);
r = color_fprintf(fp, color, fmt, percent);
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 706cec50bd25..58d597564b99 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -32,10 +32,11 @@ int perf_color_default_config(const char *var, const char *value, void *cb);
int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty);
void color_parse(const char *value, const char *var, char *dst);
void color_parse_mem(const char *value, int len, const char *var, char *dst);
+int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args);
int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf);
int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
-char *get_percent_color(double percent);
+const char *get_percent_color(double percent);
#endif /* COLOR_H */
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 780df541006d..8784649109ce 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -160,17 +160,18 @@ static int get_extended_base_var(char *name, int baselen, int c)
name[baselen++] = '.';
for (;;) {
- int c = get_next_char();
- if (c == '\n')
+ int ch = get_next_char();
+
+ if (ch == '\n')
return -1;
- if (c == '"')
+ if (ch == '"')
break;
- if (c == '\\') {
- c = get_next_char();
- if (c == '\n')
+ if (ch == '\\') {
+ ch = get_next_char();
+ if (ch == '\n')
return -1;
}
- name[baselen++] = c;
+ name[baselen++] = ch;
if (baselen > MAXNAME / 2)
return -1;
}
@@ -530,6 +531,8 @@ static int store_aux(const char* key, const char* value, void *cb __used)
store.offset[store.seen] = ftell(config_file);
}
}
+ default:
+ break;
}
return 0;
}
@@ -619,6 +622,7 @@ contline:
switch (contents[offset]) {
case '=': equal_offset = offset; break;
case ']': bracket_offset = offset; break;
+ default: break;
}
if (offset > 0 && contents[offset-1] == '\\') {
offset_ = offset;
@@ -742,9 +746,9 @@ int perf_config_set_multivar(const char* key, const char* value,
goto write_err_out;
} else {
struct stat st;
- char* contents;
+ char *contents;
ssize_t contents_sz, copy_begin, copy_end;
- int i, new_line = 0;
+ int new_line = 0;
if (value_regex == NULL)
store.value_regex = NULL;
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
new file mode 100644
index 000000000000..e8ca98fe0bd4
--- /dev/null
+++ b/tools/perf/util/debug.c
@@ -0,0 +1,95 @@
+/* For general debugging purposes */
+
+#include "../perf.h"
+
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+#include "color.h"
+#include "event.h"
+#include "debug.h"
+
+int verbose = 0;
+int dump_trace = 0;
+
+int eprintf(const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (verbose) {
+ va_start(args, fmt);
+ ret = vfprintf(stderr, fmt, args);
+ va_end(args);
+ }
+
+ return ret;
+}
+
+int dump_printf(const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (dump_trace) {
+ va_start(args, fmt);
+ ret = vprintf(fmt, args);
+ va_end(args);
+ }
+
+ return ret;
+}
+
+static int dump_printf_color(const char *fmt, const char *color, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (dump_trace) {
+ va_start(args, color);
+ ret = color_vfprintf(stdout, color, fmt, args);
+ va_end(args);
+ }
+
+ return ret;
+}
+
+
+void trace_event(event_t *event)
+{
+ unsigned char *raw_event = (void *)event;
+ const char *color = PERF_COLOR_BLUE;
+ int i, j;
+
+ if (!dump_trace)
+ return;
+
+ dump_printf(".");
+ dump_printf_color("\n. ... raw event: size %d bytes\n", color,
+ event->header.size);
+
+ for (i = 0; i < event->header.size; i++) {
+ if ((i & 15) == 0) {
+ dump_printf(".");
+ dump_printf_color(" %04x: ", color, i);
+ }
+
+ dump_printf_color(" %02x", color, raw_event[i]);
+
+ if (((i & 15) == 15) || i == event->header.size-1) {
+ dump_printf_color(" ", color);
+ for (j = 0; j < 15-(i & 15); j++)
+ dump_printf_color(" ", color);
+ for (j = 0; j < (i & 15); j++) {
+ if (isprint(raw_event[i-15+j]))
+ dump_printf_color("%c", color,
+ raw_event[i-15+j]);
+ else
+ dump_printf_color(".", color);
+ }
+ dump_printf_color("\n", color);
+ }
+ }
+ dump_printf(".\n");
+}
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
new file mode 100644
index 000000000000..437eea58ce40
--- /dev/null
+++ b/tools/perf/util/debug.h
@@ -0,0 +1,8 @@
+/* For debugging general purposes */
+
+extern int verbose;
+extern int dump_trace;
+
+int eprintf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
+int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
+void trace_event(event_t *event);
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
new file mode 100644
index 000000000000..fa2d4e91d329
--- /dev/null
+++ b/tools/perf/util/event.h
@@ -0,0 +1,96 @@
+#ifndef __PERF_EVENT_H
+#define __PERF_EVENT_H
+#include "../perf.h"
+#include "util.h"
+#include <linux/list.h>
+
+enum {
+ SHOW_KERNEL = 1,
+ SHOW_USER = 2,
+ SHOW_HV = 4,
+};
+
+/*
+ * PERF_SAMPLE_IP | PERF_SAMPLE_TID | *
+ */
+struct ip_event {
+ struct perf_event_header header;
+ u64 ip;
+ u32 pid, tid;
+ unsigned char __more_data[];
+};
+
+struct mmap_event {
+ struct perf_event_header header;
+ u32 pid, tid;
+ u64 start;
+ u64 len;
+ u64 pgoff;
+ char filename[PATH_MAX];
+};
+
+struct comm_event {
+ struct perf_event_header header;
+ u32 pid, tid;
+ char comm[16];
+};
+
+struct fork_event {
+ struct perf_event_header header;
+ u32 pid, ppid;
+ u32 tid, ptid;
+};
+
+struct lost_event {
+ struct perf_event_header header;
+ u64 id;
+ u64 lost;
+};
+
+/*
+ * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
+ */
+struct read_event {
+ struct perf_event_header header;
+ u32 pid,tid;
+ u64 value;
+ u64 time_enabled;
+ u64 time_running;
+ u64 id;
+};
+
+typedef union event_union {
+ struct perf_event_header header;
+ struct ip_event ip;
+ struct mmap_event mmap;
+ struct comm_event comm;
+ struct fork_event fork;
+ struct lost_event lost;
+ struct read_event read;
+} event_t;
+
+struct map {
+ struct list_head node;
+ u64 start;
+ u64 end;
+ u64 pgoff;
+ u64 (*map_ip)(struct map *, u64);
+ struct dso *dso;
+};
+
+static inline u64 map__map_ip(struct map *map, u64 ip)
+{
+ return ip - map->start + map->pgoff;
+}
+
+static inline u64 vdso__map_ip(struct map *map __used, u64 ip)
+{
+ return ip;
+}
+
+struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen);
+struct map *map__clone(struct map *self);
+int map__overlap(struct map *l, struct map *r);
+size_t map__fprintf(struct map *self, FILE *fp);
+
+#endif
diff --git a/tools/perf/util/exec_cmd.c b/tools/perf/util/exec_cmd.c
index 34a352867382..2745605dba11 100644
--- a/tools/perf/util/exec_cmd.c
+++ b/tools/perf/util/exec_cmd.c
@@ -6,7 +6,6 @@
#define MAX_ARGS 32
-extern char **environ;
static const char *argv_exec_path;
static const char *argv0_path;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index b92a457ca32e..ec4d4c2f9522 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -237,9 +237,44 @@ struct perf_header *perf_header__read(int fd)
self->data_offset = f_header.data.offset;
self->data_size = f_header.data.size;
- lseek(fd, self->data_offset + self->data_size, SEEK_SET);
+ lseek(fd, self->data_offset, SEEK_SET);
self->frozen = 1;
return self;
}
+
+u64 perf_header__sample_type(struct perf_header *header)
+{
+ u64 type = 0;
+ int i;
+
+ for (i = 0; i < header->attrs; i++) {
+ struct perf_header_attr *attr = header->attr[i];
+
+ if (!type)
+ type = attr->attr.sample_type;
+ else if (type != attr->attr.sample_type)
+ die("non matching sample_type");
+ }
+
+ return type;
+}
+
+struct perf_counter_attr *
+perf_header__find_attr(u64 id, struct perf_header *header)
+{
+ int i;
+
+ for (i = 0; i < header->attrs; i++) {
+ struct perf_header_attr *attr = header->attr[i];
+ int j;
+
+ for (j = 0; j < attr->ids; j++) {
+ if (attr->id[j] == id)
+ return &attr->attr;
+ }
+ }
+
+ return NULL;
+}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index bf280449fcfd..5d0a72ecc919 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -31,6 +31,10 @@ struct perf_header_attr *
perf_header_attr__new(struct perf_counter_attr *attr);
void perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
+u64 perf_header__sample_type(struct perf_header *header);
+struct perf_counter_attr *
+perf_header__find_attr(u64 id, struct perf_header *header);
+
struct perf_header *perf_header__new(void);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
new file mode 100644
index 000000000000..804e02382739
--- /dev/null
+++ b/tools/perf/util/map.c
@@ -0,0 +1,97 @@
+#include "event.h"
+#include "symbol.h"
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+
+static inline int is_anon_memory(const char *filename)
+{
+ return strcmp(filename, "//anon") == 0;
+}
+
+static int strcommon(const char *pathname, char *cwd, int cwdlen)
+{
+ int n = 0;
+
+ while (n < cwdlen && pathname[n] == cwd[n])
+ ++n;
+
+ return n;
+}
+
+ struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen)
+{
+ struct map *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ const char *filename = event->filename;
+ char newfilename[PATH_MAX];
+ int anon;
+
+ if (cwd) {
+ int n = strcommon(filename, cwd, cwdlen);
+
+ if (n == cwdlen) {
+ snprintf(newfilename, sizeof(newfilename),
+ ".%s", filename + n);
+ filename = newfilename;
+ }
+ }
+
+ anon = is_anon_memory(filename);
+
+ if (anon) {
+ snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
+ filename = newfilename;
+ }
+
+ self->start = event->start;
+ self->end = event->start + event->len;
+ self->pgoff = event->pgoff;
+
+ self->dso = dsos__findnew(filename);
+ if (self->dso == NULL)
+ goto out_delete;
+
+ if (self->dso == vdso || anon)
+ self->map_ip = vdso__map_ip;
+ else
+ self->map_ip = map__map_ip;
+ }
+ return self;
+out_delete:
+ free(self);
+ return NULL;
+}
+
+struct map *map__clone(struct map *self)
+{
+ struct map *map = malloc(sizeof(*self));
+
+ if (!map)
+ return NULL;
+
+ memcpy(map, self, sizeof(*self));
+
+ return map;
+}
+
+int map__overlap(struct map *l, struct map *r)
+{
+ if (l->start > r->start) {
+ struct map *t = l;
+ l = r;
+ r = t;
+ }
+
+ if (l->end > r->start)
+ return 1;
+
+ return 0;
+}
+
+size_t map__fprintf(struct map *self, FILE *fp)
+{
+ return fprintf(fp, " %Lx-%Lx %Lx %s\n",
+ self->start, self->end, self->pgoff, self->dso->name);
+}
diff --git a/tools/perf/util/module.c b/tools/perf/util/module.c
index ddabe925d65d..3d567fe59c79 100644
--- a/tools/perf/util/module.c
+++ b/tools/perf/util/module.c
@@ -436,9 +436,9 @@ static int mod_dso__load_module_paths(struct mod_dso *self)
goto out_failure;
while (!feof(file)) {
- char *path, *name, *tmp;
+ char *name, *tmp;
struct module *module;
- int line_len, len;
+ int line_len;
line_len = getline(&line, &n, file);
if (line_len < 0)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 044178408783..a587d41ae3c9 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1,23 +1,21 @@
-#include "../perf.h"
#include "util.h"
+#include "../perf.h"
#include "parse-options.h"
#include "parse-events.h"
#include "exec_cmd.h"
#include "string.h"
#include "cache.h"
-extern char *strcasestr(const char *haystack, const char *needle);
-
int nr_counters;
struct perf_counter_attr attrs[MAX_COUNTERS];
struct event_symbol {
- u8 type;
- u64 config;
- char *symbol;
- char *alias;
+ u8 type;
+ u64 config;
+ const char *symbol;
+ const char *alias;
};
char debugfs_path[MAXPATHLEN];
@@ -51,7 +49,7 @@ static struct event_symbol event_symbols[] = {
#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
-static char *hw_event_names[] = {
+static const char *hw_event_names[] = {
"cycles",
"instructions",
"cache-references",
@@ -61,7 +59,7 @@ static char *hw_event_names[] = {
"bus-cycles",
};
-static char *sw_event_names[] = {
+static const char *sw_event_names[] = {
"cpu-clock-msecs",
"task-clock-msecs",
"page-faults",
@@ -73,7 +71,7 @@ static char *sw_event_names[] = {
#define MAX_ALIASES 8
-static char *hw_cache[][MAX_ALIASES] = {
+static const char *hw_cache[][MAX_ALIASES] = {
{ "L1-dcache", "l1-d", "l1d", "L1-data", },
{ "L1-icache", "l1-i", "l1i", "L1-instruction", },
{ "LLC", "L2" },
@@ -82,13 +80,13 @@ static char *hw_cache[][MAX_ALIASES] = {
{ "branch", "branches", "bpu", "btb", "bpc", },
};
-static char *hw_cache_op[][MAX_ALIASES] = {
+static const char *hw_cache_op[][MAX_ALIASES] = {
{ "load", "loads", "read", },
{ "store", "stores", "write", },
{ "prefetch", "prefetches", "speculative-read", "speculative-load", },
};
-static char *hw_cache_result[][MAX_ALIASES] = {
+static const char *hw_cache_result[][MAX_ALIASES] = {
{ "refs", "Reference", "ops", "access", },
{ "misses", "miss", },
};
@@ -113,11 +111,9 @@ static unsigned long hw_cache_stat[C(MAX)] = {
[C(BPU)] = (CACHE_READ),
};
-#define for_each_subsystem(sys_dir, sys_dirent, sys_next, file, st) \
+#define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
- if (snprintf(file, MAXPATHLEN, "%s/%s", debugfs_path, \
- sys_dirent.d_name) && \
- (!stat(file, &st)) && (S_ISDIR(st.st_mode)) && \
+ if (sys_dirent.d_type == DT_DIR && \
(strcmp(sys_dirent.d_name, ".")) && \
(strcmp(sys_dirent.d_name, "..")))
@@ -136,11 +132,9 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
return 0;
}
-#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next, file, st) \
+#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
- if (snprintf(file, MAXPATHLEN, "%s/%s/%s", debugfs_path, \
- sys_dirent.d_name, evt_dirent.d_name) && \
- (!stat(file, &st)) && (S_ISDIR(st.st_mode)) && \
+ if (evt_dirent.d_type == DT_DIR && \
(strcmp(evt_dirent.d_name, ".")) && \
(strcmp(evt_dirent.d_name, "..")) && \
(!tp_event_has_id(&sys_dirent, &evt_dirent)))
@@ -158,34 +152,39 @@ int valid_debugfs_mount(const char *debugfs)
return 0;
}
-static char *tracepoint_id_to_name(u64 config)
+struct tracepoint_path *tracepoint_id_to_path(u64 config)
{
- static char tracepoint_name[2 * MAX_EVENT_LENGTH];
+ struct tracepoint_path *path = NULL;
DIR *sys_dir, *evt_dir;
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
- struct stat st;
char id_buf[4];
- int fd;
+ int sys_dir_fd, fd;
u64 id;
char evt_path[MAXPATHLEN];
if (valid_debugfs_mount(debugfs_path))
- return "unkown";
+ return NULL;
sys_dir = opendir(debugfs_path);
if (!sys_dir)
goto cleanup;
-
- for_each_subsystem(sys_dir, sys_dirent, sys_next, evt_path, st) {
- evt_dir = opendir(evt_path);
- if (!evt_dir)
- goto cleanup;
- for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next,
- evt_path, st) {
- snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id",
- debugfs_path, sys_dirent.d_name,
+ sys_dir_fd = dirfd(sys_dir);
+
+ for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+ int dfd = openat(sys_dir_fd, sys_dirent.d_name,
+ O_RDONLY|O_DIRECTORY), evt_dir_fd;
+ if (dfd == -1)
+ continue;
+ evt_dir = fdopendir(dfd);
+ if (!evt_dir) {
+ close(dfd);
+ continue;
+ }
+ evt_dir_fd = dirfd(evt_dir);
+ for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+ snprintf(evt_path, MAXPATHLEN, "%s/id",
evt_dirent.d_name);
- fd = open(evt_path, O_RDONLY);
+ fd = openat(evt_dir_fd, evt_path, O_RDONLY);
if (fd < 0)
continue;
if (read(fd, id_buf, sizeof(id_buf)) < 0) {
@@ -197,10 +196,23 @@ static char *tracepoint_id_to_name(u64 config)
if (id == config) {
closedir(evt_dir);
closedir(sys_dir);
- snprintf(tracepoint_name, 2 * MAX_EVENT_LENGTH,
- "%s:%s", sys_dirent.d_name,
- evt_dirent.d_name);
- return tracepoint_name;
+ path = calloc(1, sizeof(path));
+ path->system = malloc(MAX_EVENT_LENGTH);
+ if (!path->system) {
+ free(path);
+ return NULL;
+ }
+ path->name = malloc(MAX_EVENT_LENGTH);
+ if (!path->name) {
+ free(path->system);
+ free(path);
+ return NULL;
+ }
+ strncpy(path->system, sys_dirent.d_name,
+ MAX_EVENT_LENGTH);
+ strncpy(path->name, evt_dirent.d_name,
+ MAX_EVENT_LENGTH);
+ return path;
}
}
closedir(evt_dir);
@@ -208,7 +220,25 @@ static char *tracepoint_id_to_name(u64 config)
cleanup:
closedir(sys_dir);
- return "unkown";
+ return NULL;
+}
+
+#define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
+static const char *tracepoint_id_to_name(u64 config)
+{
+ static char buf[TP_PATH_LEN];
+ struct tracepoint_path *path;
+
+ path = tracepoint_id_to_path(config);
+ if (path) {
+ snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
+ free(path->name);
+ free(path->system);
+ free(path);
+ } else
+ snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
+
+ return buf;
}
static int is_cache_op_valid(u8 cache_type, u8 cache_op)
@@ -235,7 +265,7 @@ static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
return name;
}
-char *event_name(int counter)
+const char *event_name(int counter)
{
u64 config = attrs[counter].config;
int type = attrs[counter].type;
@@ -243,7 +273,7 @@ char *event_name(int counter)
return __event_name(type, config);
}
-char *__event_name(int type, u64 config)
+const char *__event_name(int type, u64 config)
{
static char buf[32];
@@ -294,7 +324,7 @@ char *__event_name(int type, u64 config)
return "unknown";
}
-static int parse_aliases(const char **str, char *names[][MAX_ALIASES], int size)
+static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size)
{
int i, j;
int n, longest = -1;
@@ -598,7 +628,7 @@ static void print_tracepoint_events(void)
{
DIR *sys_dir, *evt_dir;
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
- struct stat st;
+ int sys_dir_fd;
char evt_path[MAXPATHLEN];
if (valid_debugfs_mount(debugfs_path))
@@ -607,16 +637,23 @@ static void print_tracepoint_events(void)
sys_dir = opendir(debugfs_path);
if (!sys_dir)
goto cleanup;
-
- for_each_subsystem(sys_dir, sys_dirent, sys_next, evt_path, st) {
- evt_dir = opendir(evt_path);
- if (!evt_dir)
- goto cleanup;
- for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next,
- evt_path, st) {
+ sys_dir_fd = dirfd(sys_dir);
+
+ for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+ int dfd = openat(sys_dir_fd, sys_dirent.d_name,
+ O_RDONLY|O_DIRECTORY), evt_dir_fd;
+ if (dfd == -1)
+ continue;
+ evt_dir = fdopendir(dfd);
+ if (!evt_dir) {
+ close(dfd);
+ continue;
+ }
+ evt_dir_fd = dirfd(evt_dir);
+ for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
snprintf(evt_path, MAXPATHLEN, "%s:%s",
sys_dirent.d_name, evt_dirent.d_name);
- fprintf(stderr, " %-40s [%s]\n", evt_path,
+ fprintf(stderr, " %-42s [%s]\n", evt_path,
event_type_descriptors[PERF_TYPE_TRACEPOINT+1]);
}
closedir(evt_dir);
@@ -650,7 +687,7 @@ void print_events(void)
sprintf(name, "%s OR %s", syms->symbol, syms->alias);
else
strcpy(name, syms->symbol);
- fprintf(stderr, " %-40s [%s]\n", name,
+ fprintf(stderr, " %-42s [%s]\n", name,
event_type_descriptors[type]);
prev_type = type;
@@ -664,7 +701,7 @@ void print_events(void)
continue;
for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
- fprintf(stderr, " %-40s [%s]\n",
+ fprintf(stderr, " %-42s [%s]\n",
event_cache_name(type, op, i),
event_type_descriptors[4]);
}
@@ -672,7 +709,7 @@ void print_events(void)
}
fprintf(stderr, "\n");
- fprintf(stderr, " %-40s [raw hardware event descriptor]\n",
+ fprintf(stderr, " %-42s [raw hardware event descriptor]\n",
"rNNN");
fprintf(stderr, "\n");
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 192a962e3a0f..60704c15961f 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -1,16 +1,25 @@
-
+#ifndef _PARSE_EVENTS_H
+#define _PARSE_EVENTS_H
/*
* Parse symbolic events/counts passed in as options:
*/
struct option;
+struct tracepoint_path {
+ char *system;
+ char *name;
+ struct tracepoint_path *next;
+};
+
+extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
+
extern int nr_counters;
extern struct perf_counter_attr attrs[MAX_COUNTERS];
-extern char *event_name(int ctr);
-extern char *__event_name(int type, u64 config);
+extern const char *event_name(int ctr);
+extern const char *__event_name(int type, u64 config);
extern int parse_events(const struct option *opt, const char *str, int unset);
@@ -21,3 +30,5 @@ extern void print_events(void);
extern char debugfs_path[];
extern int valid_debugfs_mount(const char *debugfs);
+
+#endif /* _PARSE_EVENTS_H */
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index 1bf67190c820..6d8af48c925e 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -53,6 +53,12 @@ static int get_value(struct parse_opt_ctx_t *p,
case OPTION_SET_INT:
case OPTION_SET_PTR:
return opterror(opt, "takes no value", flags);
+ case OPTION_END:
+ case OPTION_ARGUMENT:
+ case OPTION_GROUP:
+ case OPTION_STRING:
+ case OPTION_INTEGER:
+ case OPTION_LONG:
default:
break;
}
@@ -130,6 +136,9 @@ static int get_value(struct parse_opt_ctx_t *p,
return opterror(opt, "expects a numerical value", flags);
return 0;
+ case OPTION_END:
+ case OPTION_ARGUMENT:
+ case OPTION_GROUP:
default:
die("should not happen, someone must be hit on the forehead");
}
@@ -296,6 +305,8 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
return parse_options_usage(usagestr, options);
case -2:
goto unknown;
+ default:
+ break;
}
if (ctx->opt)
check_typos(arg + 1, options);
@@ -314,6 +325,8 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
ctx->argv[0] = strdup(ctx->opt - 1);
*(char *)ctx->argv[0] = '-';
goto unknown;
+ default:
+ break;
}
}
continue;
@@ -336,6 +349,8 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
return parse_options_usage(usagestr, options);
case -2:
goto unknown;
+ default:
+ break;
}
continue;
unknown:
@@ -456,6 +471,13 @@ int usage_with_options_internal(const char * const *usagestr,
}
break;
default: /* OPTION_{BIT,BOOLEAN,SET_INT,SET_PTR} */
+ case OPTION_END:
+ case OPTION_GROUP:
+ case OPTION_BIT:
+ case OPTION_BOOLEAN:
+ case OPTION_SET_INT:
+ case OPTION_SET_PTR:
+ case OPTION_LONG:
break;
}
diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
index a501a40dd2cb..fd1f2faaade4 100644
--- a/tools/perf/util/path.c
+++ b/tools/perf/util/path.c
@@ -17,7 +17,7 @@ static char bad_path[] = "/bad-path/";
* Two hacks:
*/
-static char *get_perf_dir(void)
+static const char *get_perf_dir(void)
{
return ".";
}
@@ -38,8 +38,9 @@ size_t strlcpy(char *dest, const char *src, size_t size)
static char *get_pathname(void)
{
static char pathname_array[4][PATH_MAX];
- static int index;
- return pathname_array[3 & ++index];
+ static int idx;
+
+ return pathname_array[3 & ++idx];
}
static char *cleanup_path(char *path)
@@ -161,20 +162,24 @@ int perf_mkstemp(char *path, size_t len, const char *template)
}
-const char *make_relative_path(const char *abs, const char *base)
+const char *make_relative_path(const char *abs_path, const char *base)
{
static char buf[PATH_MAX + 1];
int baselen;
+
if (!base)
- return abs;
+ return abs_path;
+
baselen = strlen(base);
- if (prefixcmp(abs, base))
- return abs;
- if (abs[baselen] == '/')
+ if (prefixcmp(abs_path, base))
+ return abs_path;
+ if (abs_path[baselen] == '/')
baselen++;
else if (base[baselen - 1] != '/')
- return abs;
- strcpy(buf, abs + baselen);
+ return abs_path;
+
+ strcpy(buf, abs_path + baselen);
+
return buf;
}
diff --git a/tools/perf/util/run-command.c b/tools/perf/util/run-command.c
index a3935343091a..2b615acf94d7 100644
--- a/tools/perf/util/run-command.c
+++ b/tools/perf/util/run-command.c
@@ -262,7 +262,7 @@ int run_hook(const char *index_file, const char *name, ...)
{
struct child_process hook;
const char **argv = NULL, *env[2];
- char index[PATH_MAX];
+ char idx[PATH_MAX];
va_list args;
int ret;
size_t i = 0, alloc = 0;
@@ -284,8 +284,8 @@ int run_hook(const char *index_file, const char *name, ...)
hook.no_stdin = 1;
hook.stdout_to_stderr = 1;
if (index_file) {
- snprintf(index, sizeof(index), "PERF_INDEX_FILE=%s", index_file);
- env[0] = index;
+ snprintf(idx, sizeof(idx), "PERF_INDEX_FILE=%s", index_file);
+ env[0] = idx;
env[1] = NULL;
hook.env = env;
}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 5c0f42e6b33b..fd3d9c8e90fc 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -3,6 +3,8 @@
#include "string.h"
#include "symbol.h"
+#include "debug.h"
+
#include <libelf.h>
#include <gelf.h>
#include <elf.h>
@@ -21,7 +23,7 @@ enum dso_origin {
static struct symbol *symbol__new(u64 start, u64 len,
const char *name, unsigned int priv_size,
- u64 obj_start, int verbose)
+ u64 obj_start, int v)
{
size_t namelen = strlen(name) + 1;
struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen);
@@ -29,7 +31,7 @@ static struct symbol *symbol__new(u64 start, u64 len,
if (!self)
return NULL;
- if (verbose >= 2)
+ if (v >= 2)
printf("new symbol: %016Lx [%08lx]: %s, hist: %p, obj_start: %p\n",
(u64)start, (unsigned long)len, name, self->hist, (void *)(unsigned long)obj_start);
@@ -156,7 +158,7 @@ size_t dso__fprintf(struct dso *self, FILE *fp)
return ret;
}
-static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int verbose)
+static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v)
{
struct rb_node *nd, *prevnd;
char *line = NULL;
@@ -198,7 +200,7 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int verb
* Well fix up the end later, when we have all sorted.
*/
sym = symbol__new(start, 0xdead, line + len + 2,
- self->sym_priv_size, 0, verbose);
+ self->sym_priv_size, 0, v);
if (sym == NULL)
goto out_delete_line;
@@ -239,7 +241,7 @@ out_failure:
return -1;
}
-static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int verbose)
+static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int v)
{
char *line = NULL;
size_t n;
@@ -277,7 +279,7 @@ static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int verb
continue;
sym = symbol__new(start, size, line + len,
- self->sym_priv_size, start, verbose);
+ self->sym_priv_size, start, v);
if (sym == NULL)
goto out_delete_line;
@@ -305,13 +307,13 @@ out_failure:
* elf_symtab__for_each_symbol - iterate thru all the symbols
*
* @self: struct elf_symtab instance to iterate
- * @index: uint32_t index
+ * @idx: uint32_t idx
* @sym: GElf_Sym iterator
*/
-#define elf_symtab__for_each_symbol(syms, nr_syms, index, sym) \
- for (index = 0, gelf_getsym(syms, index, &sym);\
- index < nr_syms; \
- index++, gelf_getsym(syms, index, &sym))
+#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
+ for (idx = 0, gelf_getsym(syms, idx, &sym);\
+ idx < nr_syms; \
+ idx++, gelf_getsym(syms, idx, &sym))
static inline uint8_t elf_sym__type(const GElf_Sym *sym)
{
@@ -354,7 +356,7 @@ static inline const char *elf_sym__name(const GElf_Sym *sym,
static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
GElf_Shdr *shp, const char *name,
- size_t *index)
+ size_t *idx)
{
Elf_Scn *sec = NULL;
size_t cnt = 1;
@@ -365,8 +367,8 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
gelf_getshdr(sec, shp);
str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
if (!strcmp(name, str)) {
- if (index)
- *index = cnt;
+ if (idx)
+ *idx = cnt;
break;
}
++cnt;
@@ -392,7 +394,7 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
* And always look at the original dso, not at debuginfo packages, that
* have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
*/
-static int dso__synthesize_plt_symbols(struct dso *self, int verbose)
+static int dso__synthesize_plt_symbols(struct dso *self, int v)
{
uint32_t nr_rel_entries, idx;
GElf_Sym sym;
@@ -442,7 +444,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int verbose)
goto out_elf_end;
/*
- * Fetch the relocation section to find the indexes to the GOT
+ * Fetch the relocation section to find the idxes to the GOT
* and the symbols in the .dynsym they refer to.
*/
reldata = elf_getdata(scn_plt_rel, NULL);
@@ -476,7 +478,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int verbose)
"%s@plt", elf_sym__name(&sym, symstrs));
f = symbol__new(plt_offset, shdr_plt.sh_entsize,
- sympltname, self->sym_priv_size, 0, verbose);
+ sympltname, self->sym_priv_size, 0, v);
if (!f)
goto out_elf_end;
@@ -494,7 +496,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int verbose)
"%s@plt", elf_sym__name(&sym, symstrs));
f = symbol__new(plt_offset, shdr_plt.sh_entsize,
- sympltname, self->sym_priv_size, 0, verbose);
+ sympltname, self->sym_priv_size, 0, v);
if (!f)
goto out_elf_end;
@@ -518,12 +520,12 @@ out:
}
static int dso__load_sym(struct dso *self, int fd, const char *name,
- symbol_filter_t filter, int verbose, struct module *mod)
+ symbol_filter_t filter, int v, struct module *mod)
{
Elf_Data *symstrs, *secstrs;
uint32_t nr_syms;
int err = -1;
- uint32_t index;
+ uint32_t idx;
GElf_Ehdr ehdr;
GElf_Shdr shdr;
Elf_Data *syms;
@@ -534,14 +536,14 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
if (elf == NULL) {
- if (verbose)
+ if (v)
fprintf(stderr, "%s: cannot read %s ELF file.\n",
__func__, name);
goto out_close;
}
if (gelf_getehdr(elf, &ehdr) == NULL) {
- if (verbose)
+ if (v)
fprintf(stderr, "%s: cannot get elf header.\n", __func__);
goto out_elf_end;
}
@@ -583,9 +585,9 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
NULL) != NULL);
} else self->adjust_symbols = 0;
- elf_symtab__for_each_symbol(syms, nr_syms, index, sym) {
+ elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
struct symbol *f;
- const char *name;
+ const char *elf_name;
char *demangled;
u64 obj_start;
struct section *section = NULL;
@@ -608,7 +610,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
obj_start = sym.st_value;
if (self->adjust_symbols) {
- if (verbose >= 2)
+ if (v >= 2)
printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n",
(u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset);
@@ -630,13 +632,13 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
* DWARF DW_compile_unit has this, but we don't always have access
* to it...
*/
- name = elf_sym__name(&sym, symstrs);
- demangled = bfd_demangle(NULL, name, DMGL_PARAMS | DMGL_ANSI);
+ elf_name = elf_sym__name(&sym, symstrs);
+ demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI);
if (demangled != NULL)
- name = demangled;
+ elf_name = demangled;
- f = symbol__new(sym.st_value, sym.st_size, name,
- self->sym_priv_size, obj_start, verbose);
+ f = symbol__new(sym.st_value, sym.st_size, elf_name,
+ self->sym_priv_size, obj_start, v);
free(demangled);
if (!f)
goto out_elf_end;
@@ -659,7 +661,7 @@ out_close:
#define BUILD_ID_SIZE 128
-static char *dso__read_build_id(struct dso *self, int verbose)
+static char *dso__read_build_id(struct dso *self, int v)
{
int i;
GElf_Ehdr ehdr;
@@ -676,14 +678,14 @@ static char *dso__read_build_id(struct dso *self, int verbose)
elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
if (elf == NULL) {
- if (verbose)
+ if (v)
fprintf(stderr, "%s: cannot read %s ELF file.\n",
__func__, self->name);
goto out_close;
}
if (gelf_getehdr(elf, &ehdr) == NULL) {
- if (verbose)
+ if (v)
fprintf(stderr, "%s: cannot get elf header.\n", __func__);
goto out_elf_end;
}
@@ -706,7 +708,7 @@ static char *dso__read_build_id(struct dso *self, int verbose)
++raw;
bid += 2;
}
- if (verbose >= 2)
+ if (v >= 2)
printf("%s(%s): %s\n", __func__, self->name, build_id);
out_elf_end:
elf_end(elf);
@@ -732,7 +734,7 @@ char dso__symtab_origin(const struct dso *self)
return origin[self->origin];
}
-int dso__load(struct dso *self, symbol_filter_t filter, int verbose)
+int dso__load(struct dso *self, symbol_filter_t filter, int v)
{
int size = PATH_MAX;
char *name = malloc(size), *build_id = NULL;
@@ -745,7 +747,7 @@ int dso__load(struct dso *self, symbol_filter_t filter, int verbose)
self->adjust_symbols = 0;
if (strncmp(self->name, "/tmp/perf-", 10) == 0) {
- ret = dso__load_perf_map(self, filter, verbose);
+ ret = dso__load_perf_map(self, filter, v);
self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT :
DSO__ORIG_NOT_FOUND;
return ret;
@@ -764,7 +766,7 @@ more:
snprintf(name, size, "/usr/lib/debug%s", self->name);
break;
case DSO__ORIG_BUILDID:
- build_id = dso__read_build_id(self, verbose);
+ build_id = dso__read_build_id(self, v);
if (build_id != NULL) {
snprintf(name, size,
"/usr/lib/debug/.build-id/%.2s/%s.debug",
@@ -785,7 +787,7 @@ more:
fd = open(name, O_RDONLY);
} while (fd < 0);
- ret = dso__load_sym(self, fd, name, filter, verbose, NULL);
+ ret = dso__load_sym(self, fd, name, filter, v, NULL);
close(fd);
/*
@@ -795,7 +797,7 @@ more:
goto more;
if (ret > 0) {
- int nr_plt = dso__synthesize_plt_symbols(self, verbose);
+ int nr_plt = dso__synthesize_plt_symbols(self, v);
if (nr_plt > 0)
ret += nr_plt;
}
@@ -807,7 +809,7 @@ out:
}
static int dso__load_module(struct dso *self, struct mod_dso *mods, const char *name,
- symbol_filter_t filter, int verbose)
+ symbol_filter_t filter, int v)
{
struct module *mod = mod_dso__find_module(mods, name);
int err = 0, fd;
@@ -820,13 +822,13 @@ static int dso__load_module(struct dso *self, struct mod_dso *mods, const char *
if (fd < 0)
return err;
- err = dso__load_sym(self, fd, name, filter, verbose, mod);
+ err = dso__load_sym(self, fd, name, filter, v, mod);
close(fd);
return err;
}
-int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose)
+int dso__load_modules(struct dso *self, symbol_filter_t filter, int v)
{
struct mod_dso *mods = mod_dso__new_dso("modules");
struct module *pos;
@@ -844,7 +846,7 @@ int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose)
next = rb_first(&mods->mods);
while (next) {
pos = rb_entry(next, struct module, rb_node);
- err = dso__load_module(self, mods, pos->name, filter, verbose);
+ err = dso__load_module(self, mods, pos->name, filter, v);
if (err < 0)
break;
@@ -887,14 +889,14 @@ static inline void dso__fill_symbol_holes(struct dso *self)
}
static int dso__load_vmlinux(struct dso *self, const char *vmlinux,
- symbol_filter_t filter, int verbose)
+ symbol_filter_t filter, int v)
{
int err, fd = open(vmlinux, O_RDONLY);
if (fd < 0)
return -1;
- err = dso__load_sym(self, fd, vmlinux, filter, verbose, NULL);
+ err = dso__load_sym(self, fd, vmlinux, filter, v, NULL);
if (err > 0)
dso__fill_symbol_holes(self);
@@ -905,18 +907,18 @@ static int dso__load_vmlinux(struct dso *self, const char *vmlinux,
}
int dso__load_kernel(struct dso *self, const char *vmlinux,
- symbol_filter_t filter, int verbose, int modules)
+ symbol_filter_t filter, int v, int use_modules)
{
int err = -1;
if (vmlinux) {
- err = dso__load_vmlinux(self, vmlinux, filter, verbose);
- if (err > 0 && modules)
- err = dso__load_modules(self, filter, verbose);
+ err = dso__load_vmlinux(self, vmlinux, filter, v);
+ if (err > 0 && use_modules)
+ err = dso__load_modules(self, filter, v);
}
if (err <= 0)
- err = dso__load_kallsyms(self, filter, verbose);
+ err = dso__load_kallsyms(self, filter, v);
if (err > 0)
self->origin = DSO__ORIG_KERNEL;
@@ -924,6 +926,103 @@ int dso__load_kernel(struct dso *self, const char *vmlinux,
return err;
}
+LIST_HEAD(dsos);
+struct dso *kernel_dso;
+struct dso *vdso;
+struct dso *hypervisor_dso;
+
+const char *vmlinux_name = "vmlinux";
+int modules;
+
+static void dsos__add(struct dso *dso)
+{
+ list_add_tail(&dso->node, &dsos);
+}
+
+static struct dso *dsos__find(const char *name)
+{
+ struct dso *pos;
+
+ list_for_each_entry(pos, &dsos, node)
+ if (strcmp(pos->name, name) == 0)
+ return pos;
+ return NULL;
+}
+
+struct dso *dsos__findnew(const char *name)
+{
+ struct dso *dso = dsos__find(name);
+ int nr;
+
+ if (dso)
+ return dso;
+
+ dso = dso__new(name, 0);
+ if (!dso)
+ goto out_delete_dso;
+
+ nr = dso__load(dso, NULL, verbose);
+ if (nr < 0) {
+ eprintf("Failed to open: %s\n", name);
+ goto out_delete_dso;
+ }
+ if (!nr)
+ eprintf("No symbols found in: %s, maybe install a debug package?\n", name);
+
+ dsos__add(dso);
+
+ return dso;
+
+out_delete_dso:
+ dso__delete(dso);
+ return NULL;
+}
+
+void dsos__fprintf(FILE *fp)
+{
+ struct dso *pos;
+
+ list_for_each_entry(pos, &dsos, node)
+ dso__fprintf(pos, fp);
+}
+
+static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
+{
+ return dso__find_symbol(dso, ip);
+}
+
+int load_kernel(void)
+{
+ int err;
+
+ kernel_dso = dso__new("[kernel]", 0);
+ if (!kernel_dso)
+ return -1;
+
+ err = dso__load_kernel(kernel_dso, vmlinux_name, NULL, verbose, modules);
+ if (err <= 0) {
+ dso__delete(kernel_dso);
+ kernel_dso = NULL;
+ } else
+ dsos__add(kernel_dso);
+
+ vdso = dso__new("[vdso]", 0);
+ if (!vdso)
+ return -1;
+
+ vdso->find_symbol = vdso__find_symbol;
+
+ dsos__add(vdso);
+
+ hypervisor_dso = dso__new("[hypervisor]", 0);
+ if (!hypervisor_dso)
+ return -1;
+ dsos__add(hypervisor_dso);
+
+ return err;
+}
+
+
void symbol__init(void)
{
elf_version(EV_CURRENT);
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index b53bf0125c1b..6e8490716408 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -6,6 +6,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include "module.h"
+#include "event.h"
#ifdef HAVE_CPLUS_DEMANGLE
extern char *cplus_demangle(const char *, int);
@@ -54,7 +55,7 @@ struct dso {
char name[0];
};
-const char *sym_hist_filter;
+extern const char *sym_hist_filter;
typedef int (*symbol_filter_t)(struct dso *self, struct symbol *sym);
@@ -72,9 +73,20 @@ int dso__load_kernel(struct dso *self, const char *vmlinux,
symbol_filter_t filter, int verbose, int modules);
int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose);
int dso__load(struct dso *self, symbol_filter_t filter, int verbose);
+struct dso *dsos__findnew(const char *name);
+void dsos__fprintf(FILE *fp);
size_t dso__fprintf(struct dso *self, FILE *fp);
char dso__symtab_origin(const struct dso *self);
+int load_kernel(void);
+
void symbol__init(void);
+
+extern struct list_head dsos;
+extern struct dso *kernel_dso;
+extern struct dso *vdso;
+extern struct dso *hypervisor_dso;
+extern const char *vmlinux_name;
+extern int modules;
#endif /* _PERF_SYMBOL_ */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
new file mode 100644
index 000000000000..7635928ca278
--- /dev/null
+++ b/tools/perf/util/thread.c
@@ -0,0 +1,175 @@
+#include "../perf.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "thread.h"
+#include "util.h"
+#include "debug.h"
+
+static struct thread *thread__new(pid_t pid)
+{
+ struct thread *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ self->pid = pid;
+ self->comm = malloc(32);
+ if (self->comm)
+ snprintf(self->comm, 32, ":%d", self->pid);
+ INIT_LIST_HEAD(&self->maps);
+ }
+
+ return self;
+}
+
+int thread__set_comm(struct thread *self, const char *comm)
+{
+ if (self->comm)
+ free(self->comm);
+ self->comm = strdup(comm);
+ return self->comm ? 0 : -ENOMEM;
+}
+
+static size_t thread__fprintf(struct thread *self, FILE *fp)
+{
+ struct map *pos;
+ size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
+
+ list_for_each_entry(pos, &self->maps, node)
+ ret += map__fprintf(pos, fp);
+
+ return ret;
+}
+
+struct thread *
+threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
+{
+ struct rb_node **p = &threads->rb_node;
+ struct rb_node *parent = NULL;
+ struct thread *th;
+
+ /*
+ * Font-end cache - PID lookups come in blocks,
+ * so most of the time we dont have to look up
+ * the full rbtree:
+ */
+ if (*last_match && (*last_match)->pid == pid)
+ return *last_match;
+
+ while (*p != NULL) {
+ parent = *p;
+ th = rb_entry(parent, struct thread, rb_node);
+
+ if (th->pid == pid) {
+ *last_match = th;
+ return th;
+ }
+
+ if (pid < th->pid)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ th = thread__new(pid);
+ if (th != NULL) {
+ rb_link_node(&th->rb_node, parent, p);
+ rb_insert_color(&th->rb_node, threads);
+ *last_match = th;
+ }
+
+ return th;
+}
+
+struct thread *
+register_idle_thread(struct rb_root *threads, struct thread **last_match)
+{
+ struct thread *thread = threads__findnew(0, threads, last_match);
+
+ if (!thread || thread__set_comm(thread, "[init]")) {
+ fprintf(stderr, "problem inserting idle task.\n");
+ exit(-1);
+ }
+
+ return thread;
+}
+
+void thread__insert_map(struct thread *self, struct map *map)
+{
+ struct map *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, &self->maps, node) {
+ if (map__overlap(pos, map)) {
+ if (verbose >= 2) {
+ printf("overlapping maps:\n");
+ map__fprintf(map, stdout);
+ map__fprintf(pos, stdout);
+ }
+
+ if (map->start <= pos->start && map->end > pos->start)
+ pos->start = map->end;
+
+ if (map->end >= pos->end && map->start < pos->end)
+ pos->end = map->start;
+
+ if (verbose >= 2) {
+ printf("after collision:\n");
+ map__fprintf(pos, stdout);
+ }
+
+ if (pos->start >= pos->end) {
+ list_del_init(&pos->node);
+ free(pos);
+ }
+ }
+ }
+
+ list_add_tail(&map->node, &self->maps);
+}
+
+int thread__fork(struct thread *self, struct thread *parent)
+{
+ struct map *map;
+
+ if (self->comm)
+ free(self->comm);
+ self->comm = strdup(parent->comm);
+ if (!self->comm)
+ return -ENOMEM;
+
+ list_for_each_entry(map, &parent->maps, node) {
+ struct map *new = map__clone(map);
+ if (!new)
+ return -ENOMEM;
+ thread__insert_map(self, new);
+ }
+
+ return 0;
+}
+
+struct map *thread__find_map(struct thread *self, u64 ip)
+{
+ struct map *pos;
+
+ if (self == NULL)
+ return NULL;
+
+ list_for_each_entry(pos, &self->maps, node)
+ if (ip >= pos->start && ip <= pos->end)
+ return pos;
+
+ return NULL;
+}
+
+size_t threads__fprintf(FILE *fp, struct rb_root *threads)
+{
+ size_t ret = 0;
+ struct rb_node *nd;
+
+ for (nd = rb_first(threads); nd; nd = rb_next(nd)) {
+ struct thread *pos = rb_entry(nd, struct thread, rb_node);
+
+ ret += thread__fprintf(pos, fp);
+ }
+
+ return ret;
+}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
new file mode 100644
index 000000000000..634f2809a342
--- /dev/null
+++ b/tools/perf/util/thread.h
@@ -0,0 +1,21 @@
+#include <linux/rbtree.h>
+#include <linux/list.h>
+#include <unistd.h>
+#include "symbol.h"
+
+struct thread {
+ struct rb_node rb_node;
+ struct list_head maps;
+ pid_t pid;
+ char *comm;
+};
+
+int thread__set_comm(struct thread *self, const char *comm);
+struct thread *
+threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match);
+struct thread *
+register_idle_thread(struct rb_root *threads, struct thread **last_match);
+void thread__insert_map(struct thread *self, struct map *map);
+int thread__fork(struct thread *self, struct thread *parent);
+struct map *thread__find_map(struct thread *self, u64 ip);
+size_t threads__fprintf(FILE *fp, struct rb_root *threads);
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
new file mode 100644
index 000000000000..6c9302a7274c
--- /dev/null
+++ b/tools/perf/util/trace-event-info.c
@@ -0,0 +1,539 @@
+/*
+ * Copyright (C) 2008,2009, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define _GNU_SOURCE
+#include <dirent.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <ctype.h>
+#include <errno.h>
+#include <stdbool.h>
+
+#include "../perf.h"
+#include "trace-event.h"
+
+
+#define VERSION "0.5"
+
+#define _STR(x) #x
+#define STR(x) _STR(x)
+#define MAX_PATH 256
+
+#define TRACE_CTRL "tracing_on"
+#define TRACE "trace"
+#define AVAILABLE "available_tracers"
+#define CURRENT "current_tracer"
+#define ITER_CTRL "trace_options"
+#define MAX_LATENCY "tracing_max_latency"
+
+unsigned int page_size;
+
+static const char *output_file = "trace.info";
+static int output_fd;
+
+struct event_list {
+ struct event_list *next;
+ const char *event;
+};
+
+struct events {
+ struct events *sibling;
+ struct events *children;
+ struct events *next;
+ char *name;
+};
+
+
+
+static void die(const char *fmt, ...)
+{
+ va_list ap;
+ int ret = errno;
+
+ if (errno)
+ perror("trace-cmd");
+ else
+ ret = -1;
+
+ va_start(ap, fmt);
+ fprintf(stderr, " ");
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+
+ fprintf(stderr, "\n");
+ exit(ret);
+}
+
+void *malloc_or_die(unsigned int size)
+{
+ void *data;
+
+ data = malloc(size);
+ if (!data)
+ die("malloc");
+ return data;
+}
+
+static const char *find_debugfs(void)
+{
+ static char debugfs[MAX_PATH+1];
+ static int debugfs_found;
+ char type[100];
+ FILE *fp;
+
+ if (debugfs_found)
+ return debugfs;
+
+ if ((fp = fopen("/proc/mounts","r")) == NULL)
+ die("Can't open /proc/mounts for read");
+
+ while (fscanf(fp, "%*s %"
+ STR(MAX_PATH)
+ "s %99s %*s %*d %*d\n",
+ debugfs, type) == 2) {
+ if (strcmp(type, "debugfs") == 0)
+ break;
+ }
+ fclose(fp);
+
+ if (strcmp(type, "debugfs") != 0)
+ die("debugfs not mounted, please mount");
+
+ debugfs_found = 1;
+
+ return debugfs;
+}
+
+/*
+ * Finds the path to the debugfs/tracing
+ * Allocates the string and stores it.
+ */
+static const char *find_tracing_dir(void)
+{
+ static char *tracing;
+ static int tracing_found;
+ const char *debugfs;
+
+ if (tracing_found)
+ return tracing;
+
+ debugfs = find_debugfs();
+
+ tracing = malloc_or_die(strlen(debugfs) + 9);
+
+ sprintf(tracing, "%s/tracing", debugfs);
+
+ tracing_found = 1;
+ return tracing;
+}
+
+static char *get_tracing_file(const char *name)
+{
+ const char *tracing;
+ char *file;
+
+ tracing = find_tracing_dir();
+ if (!tracing)
+ return NULL;
+
+ file = malloc_or_die(strlen(tracing) + strlen(name) + 2);
+
+ sprintf(file, "%s/%s", tracing, name);
+ return file;
+}
+
+static void put_tracing_file(char *file)
+{
+ free(file);
+}
+
+static ssize_t write_or_die(const void *buf, size_t len)
+{
+ int ret;
+
+ ret = write(output_fd, buf, len);
+ if (ret < 0)
+ die("writing to '%s'", output_file);
+
+ return ret;
+}
+
+int bigendian(void)
+{
+ unsigned char str[] = { 0x1, 0x2, 0x3, 0x4, 0x0, 0x0, 0x0, 0x0};
+ unsigned int *ptr;
+
+ ptr = (unsigned int *)(void *)str;
+ return *ptr == 0x01020304;
+}
+
+static unsigned long long copy_file_fd(int fd)
+{
+ unsigned long long size = 0;
+ char buf[BUFSIZ];
+ int r;
+
+ do {
+ r = read(fd, buf, BUFSIZ);
+ if (r > 0) {
+ size += r;
+ write_or_die(buf, r);
+ }
+ } while (r > 0);
+
+ return size;
+}
+
+static unsigned long long copy_file(const char *file)
+{
+ unsigned long long size = 0;
+ int fd;
+
+ fd = open(file, O_RDONLY);
+ if (fd < 0)
+ die("Can't read '%s'", file);
+ size = copy_file_fd(fd);
+ close(fd);
+
+ return size;
+}
+
+static unsigned long get_size_fd(int fd)
+{
+ unsigned long long size = 0;
+ char buf[BUFSIZ];
+ int r;
+
+ do {
+ r = read(fd, buf, BUFSIZ);
+ if (r > 0)
+ size += r;
+ } while (r > 0);
+
+ lseek(fd, 0, SEEK_SET);
+
+ return size;
+}
+
+static unsigned long get_size(const char *file)
+{
+ unsigned long long size = 0;
+ int fd;
+
+ fd = open(file, O_RDONLY);
+ if (fd < 0)
+ die("Can't read '%s'", file);
+ size = get_size_fd(fd);
+ close(fd);
+
+ return size;
+}
+
+static void read_header_files(void)
+{
+ unsigned long long size, check_size;
+ char *path;
+ int fd;
+
+ path = get_tracing_file("events/header_page");
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ die("can't read '%s'", path);
+
+ /* unfortunately, you can not stat debugfs files for size */
+ size = get_size_fd(fd);
+
+ write_or_die("header_page", 12);
+ write_or_die(&size, 8);
+ check_size = copy_file_fd(fd);
+ if (size != check_size)
+ die("wrong size for '%s' size=%lld read=%lld",
+ path, size, check_size);
+ put_tracing_file(path);
+
+ path = get_tracing_file("events/header_event");
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ die("can't read '%s'", path);
+
+ size = get_size_fd(fd);
+
+ write_or_die("header_event", 13);
+ write_or_die(&size, 8);
+ check_size = copy_file_fd(fd);
+ if (size != check_size)
+ die("wrong size for '%s'", path);
+ put_tracing_file(path);
+}
+
+static bool name_in_tp_list(char *sys, struct tracepoint_path *tps)
+{
+ while (tps) {
+ if (!strcmp(sys, tps->name))
+ return true;
+ tps = tps->next;
+ }
+
+ return false;
+}
+
+static void copy_event_system(const char *sys, struct tracepoint_path *tps)
+{
+ unsigned long long size, check_size;
+ struct dirent *dent;
+ struct stat st;
+ char *format;
+ DIR *dir;
+ int count = 0;
+ int ret;
+
+ dir = opendir(sys);
+ if (!dir)
+ die("can't read directory '%s'", sys);
+
+ while ((dent = readdir(dir))) {
+ if (strcmp(dent->d_name, ".") == 0 ||
+ strcmp(dent->d_name, "..") == 0 ||
+ !name_in_tp_list(dent->d_name, tps))
+ continue;
+ format = malloc_or_die(strlen(sys) + strlen(dent->d_name) + 10);
+ sprintf(format, "%s/%s/format", sys, dent->d_name);
+ ret = stat(format, &st);
+ free(format);
+ if (ret < 0)
+ continue;
+ count++;
+ }
+
+ write_or_die(&count, 4);
+
+ rewinddir(dir);
+ while ((dent = readdir(dir))) {
+ if (strcmp(dent->d_name, ".") == 0 ||
+ strcmp(dent->d_name, "..") == 0 ||
+ !name_in_tp_list(dent->d_name, tps))
+ continue;
+ format = malloc_or_die(strlen(sys) + strlen(dent->d_name) + 10);
+ sprintf(format, "%s/%s/format", sys, dent->d_name);
+ ret = stat(format, &st);
+
+ if (ret >= 0) {
+ /* unfortunately, you can not stat debugfs files for size */
+ size = get_size(format);
+ write_or_die(&size, 8);
+ check_size = copy_file(format);
+ if (size != check_size)
+ die("error in size of file '%s'", format);
+ }
+
+ free(format);
+ }
+}
+
+static void read_ftrace_files(struct tracepoint_path *tps)
+{
+ char *path;
+
+ path = get_tracing_file("events/ftrace");
+
+ copy_event_system(path, tps);
+
+ put_tracing_file(path);
+}
+
+static bool system_in_tp_list(char *sys, struct tracepoint_path *tps)
+{
+ while (tps) {
+ if (!strcmp(sys, tps->system))
+ return true;
+ tps = tps->next;
+ }
+
+ return false;
+}
+
+static void read_event_files(struct tracepoint_path *tps)
+{
+ struct dirent *dent;
+ struct stat st;
+ char *path;
+ char *sys;
+ DIR *dir;
+ int count = 0;
+ int ret;
+
+ path = get_tracing_file("events");
+
+ dir = opendir(path);
+ if (!dir)
+ die("can't read directory '%s'", path);
+
+ while ((dent = readdir(dir))) {
+ if (strcmp(dent->d_name, ".") == 0 ||
+ strcmp(dent->d_name, "..") == 0 ||
+ strcmp(dent->d_name, "ftrace") == 0 ||
+ !system_in_tp_list(dent->d_name, tps))
+ continue;
+ sys = malloc_or_die(strlen(path) + strlen(dent->d_name) + 2);
+ sprintf(sys, "%s/%s", path, dent->d_name);
+ ret = stat(sys, &st);
+ free(sys);
+ if (ret < 0)
+ continue;
+ if (S_ISDIR(st.st_mode))
+ count++;
+ }
+
+ write_or_die(&count, 4);
+
+ rewinddir(dir);
+ while ((dent = readdir(dir))) {
+ if (strcmp(dent->d_name, ".") == 0 ||
+ strcmp(dent->d_name, "..") == 0 ||
+ strcmp(dent->d_name, "ftrace") == 0 ||
+ !system_in_tp_list(dent->d_name, tps))
+ continue;
+ sys = malloc_or_die(strlen(path) + strlen(dent->d_name) + 2);
+ sprintf(sys, "%s/%s", path, dent->d_name);
+ ret = stat(sys, &st);
+ if (ret >= 0) {
+ if (S_ISDIR(st.st_mode)) {
+ write_or_die(dent->d_name, strlen(dent->d_name) + 1);
+ copy_event_system(sys, tps);
+ }
+ }
+ free(sys);
+ }
+
+ put_tracing_file(path);
+}
+
+static void read_proc_kallsyms(void)
+{
+ unsigned int size, check_size;
+ const char *path = "/proc/kallsyms";
+ struct stat st;
+ int ret;
+
+ ret = stat(path, &st);
+ if (ret < 0) {
+ /* not found */
+ size = 0;
+ write_or_die(&size, 4);
+ return;
+ }
+ size = get_size(path);
+ write_or_die(&size, 4);
+ check_size = copy_file(path);
+ if (size != check_size)
+ die("error in size of file '%s'", path);
+
+}
+
+static void read_ftrace_printk(void)
+{
+ unsigned int size, check_size;
+ const char *path;
+ struct stat st;
+ int ret;
+
+ path = get_tracing_file("printk_formats");
+ ret = stat(path, &st);
+ if (ret < 0) {
+ /* not found */
+ size = 0;
+ write_or_die(&size, 4);
+ return;
+ }
+ size = get_size(path);
+ write_or_die(&size, 4);
+ check_size = copy_file(path);
+ if (size != check_size)
+ die("error in size of file '%s'", path);
+
+}
+
+static struct tracepoint_path *
+get_tracepoints_path(struct perf_counter_attr *pattrs, int nb_counters)
+{
+ struct tracepoint_path path, *ppath = &path;
+ int i;
+
+ for (i = 0; i < nb_counters; i++) {
+ if (pattrs[i].type != PERF_TYPE_TRACEPOINT)
+ continue;
+ ppath->next = tracepoint_id_to_path(pattrs[i].config);
+ if (!ppath->next)
+ die("%s\n", "No memory to alloc tracepoints list");
+ ppath = ppath->next;
+ }
+
+ return path.next;
+}
+void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters)
+{
+ char buf[BUFSIZ];
+ struct tracepoint_path *tps;
+
+ output_fd = open(output_file, O_WRONLY | O_CREAT | O_TRUNC | O_LARGEFILE, 0644);
+ if (output_fd < 0)
+ die("creating file '%s'", output_file);
+
+ buf[0] = 23;
+ buf[1] = 8;
+ buf[2] = 68;
+ memcpy(buf + 3, "tracing", 7);
+
+ write_or_die(buf, 10);
+
+ write_or_die(VERSION, strlen(VERSION) + 1);
+
+ /* save endian */
+ if (bigendian())
+ buf[0] = 1;
+ else
+ buf[0] = 0;
+
+ write_or_die(buf, 1);
+
+ /* save size of long */
+ buf[0] = sizeof(long);
+ write_or_die(buf, 1);
+
+ /* save page_size */
+ page_size = getpagesize();
+ write_or_die(&page_size, 4);
+
+ tps = get_tracepoints_path(pattrs, nb_counters);
+
+ read_header_files();
+ read_ftrace_files(tps);
+ read_event_files(tps);
+ read_proc_kallsyms();
+ read_ftrace_printk();
+}
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
new file mode 100644
index 000000000000..629e602d9405
--- /dev/null
+++ b/tools/perf/util/trace-event-parse.c
@@ -0,0 +1,2942 @@
+/*
+ * Copyright (C) 2009, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The parts for function graph printing was taken and modified from the
+ * Linux Kernel that were written by Frederic Weisbecker.
+ */
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+
+#undef _GNU_SOURCE
+#include "../perf.h"
+#include "util.h"
+#include "trace-event.h"
+
+int header_page_ts_offset;
+int header_page_ts_size;
+int header_page_size_offset;
+int header_page_size_size;
+int header_page_data_offset;
+int header_page_data_size;
+
+static char *input_buf;
+static unsigned long long input_buf_ptr;
+static unsigned long long input_buf_siz;
+
+static int cpus;
+static int long_size;
+
+static void init_input_buf(char *buf, unsigned long long size)
+{
+ input_buf = buf;
+ input_buf_siz = size;
+ input_buf_ptr = 0;
+}
+
+struct cmdline {
+ char *comm;
+ int pid;
+};
+
+static struct cmdline *cmdlines;
+static int cmdline_count;
+
+static int cmdline_cmp(const void *a, const void *b)
+{
+ const struct cmdline *ca = a;
+ const struct cmdline *cb = b;
+
+ if (ca->pid < cb->pid)
+ return -1;
+ if (ca->pid > cb->pid)
+ return 1;
+
+ return 0;
+}
+
+void parse_cmdlines(char *file, int size __unused)
+{
+ struct cmdline_list {
+ struct cmdline_list *next;
+ char *comm;
+ int pid;
+ } *list = NULL, *item;
+ char *line;
+ char *next = NULL;
+ int i;
+
+ line = strtok_r(file, "\n", &next);
+ while (line) {
+ item = malloc_or_die(sizeof(*item));
+ sscanf(line, "%d %as", &item->pid,
+ (float *)(void *)&item->comm); /* workaround gcc warning */
+ item->next = list;
+ list = item;
+ line = strtok_r(NULL, "\n", &next);
+ cmdline_count++;
+ }
+
+ cmdlines = malloc_or_die(sizeof(*cmdlines) * cmdline_count);
+
+ i = 0;
+ while (list) {
+ cmdlines[i].pid = list->pid;
+ cmdlines[i].comm = list->comm;
+ i++;
+ item = list;
+ list = list->next;
+ free(item);
+ }
+
+ qsort(cmdlines, cmdline_count, sizeof(*cmdlines), cmdline_cmp);
+}
+
+static struct func_map {
+ unsigned long long addr;
+ char *func;
+ char *mod;
+} *func_list;
+static unsigned int func_count;
+
+static int func_cmp(const void *a, const void *b)
+{
+ const struct func_map *fa = a;
+ const struct func_map *fb = b;
+
+ if (fa->addr < fb->addr)
+ return -1;
+ if (fa->addr > fb->addr)
+ return 1;
+
+ return 0;
+}
+
+void parse_proc_kallsyms(char *file, unsigned int size __unused)
+{
+ struct func_list {
+ struct func_list *next;
+ unsigned long long addr;
+ char *func;
+ char *mod;
+ } *list = NULL, *item;
+ char *line;
+ char *next = NULL;
+ char *addr_str;
+ char ch;
+ int ret;
+ int i;
+
+ line = strtok_r(file, "\n", &next);
+ while (line) {
+ item = malloc_or_die(sizeof(*item));
+ item->mod = NULL;
+ ret = sscanf(line, "%as %c %as\t[%as",
+ (float *)(void *)&addr_str, /* workaround gcc warning */
+ &ch,
+ (float *)(void *)&item->func,
+ (float *)(void *)&item->mod);
+ item->addr = strtoull(addr_str, NULL, 16);
+ free(addr_str);
+
+ /* truncate the extra ']' */
+ if (item->mod)
+ item->mod[strlen(item->mod) - 1] = 0;
+
+
+ item->next = list;
+ list = item;
+ line = strtok_r(NULL, "\n", &next);
+ func_count++;
+ }
+
+ func_list = malloc_or_die(sizeof(*func_list) * func_count + 1);
+
+ i = 0;
+ while (list) {
+ func_list[i].func = list->func;
+ func_list[i].addr = list->addr;
+ func_list[i].mod = list->mod;
+ i++;
+ item = list;
+ list = list->next;
+ free(item);
+ }
+
+ qsort(func_list, func_count, sizeof(*func_list), func_cmp);
+
+ /*
+ * Add a special record at the end.
+ */
+ func_list[func_count].func = NULL;
+ func_list[func_count].addr = 0;
+ func_list[func_count].mod = NULL;
+}
+
+/*
+ * We are searching for a record in between, not an exact
+ * match.
+ */
+static int func_bcmp(const void *a, const void *b)
+{
+ const struct func_map *fa = a;
+ const struct func_map *fb = b;
+
+ if ((fa->addr == fb->addr) ||
+
+ (fa->addr > fb->addr &&
+ fa->addr < (fb+1)->addr))
+ return 0;
+
+ if (fa->addr < fb->addr)
+ return -1;
+
+ return 1;
+}
+
+static struct func_map *find_func(unsigned long long addr)
+{
+ struct func_map *func;
+ struct func_map key;
+
+ key.addr = addr;
+
+ func = bsearch(&key, func_list, func_count, sizeof(*func_list),
+ func_bcmp);
+
+ return func;
+}
+
+void print_funcs(void)
+{
+ int i;
+
+ for (i = 0; i < (int)func_count; i++) {
+ printf("%016llx %s",
+ func_list[i].addr,
+ func_list[i].func);
+ if (func_list[i].mod)
+ printf(" [%s]\n", func_list[i].mod);
+ else
+ printf("\n");
+ }
+}
+
+static struct printk_map {
+ unsigned long long addr;
+ char *printk;
+} *printk_list;
+static unsigned int printk_count;
+
+static int printk_cmp(const void *a, const void *b)
+{
+ const struct func_map *fa = a;
+ const struct func_map *fb = b;
+
+ if (fa->addr < fb->addr)
+ return -1;
+ if (fa->addr > fb->addr)
+ return 1;
+
+ return 0;
+}
+
+static struct printk_map *find_printk(unsigned long long addr)
+{
+ struct printk_map *printk;
+ struct printk_map key;
+
+ key.addr = addr;
+
+ printk = bsearch(&key, printk_list, printk_count, sizeof(*printk_list),
+ printk_cmp);
+
+ return printk;
+}
+
+void parse_ftrace_printk(char *file, unsigned int size __unused)
+{
+ struct printk_list {
+ struct printk_list *next;
+ unsigned long long addr;
+ char *printk;
+ } *list = NULL, *item;
+ char *line;
+ char *next = NULL;
+ char *addr_str;
+ int ret;
+ int i;
+
+ line = strtok_r(file, "\n", &next);
+ while (line) {
+ item = malloc_or_die(sizeof(*item));
+ ret = sscanf(line, "%as : %as",
+ (float *)(void *)&addr_str, /* workaround gcc warning */
+ (float *)(void *)&item->printk);
+ item->addr = strtoull(addr_str, NULL, 16);
+ free(addr_str);
+
+ item->next = list;
+ list = item;
+ line = strtok_r(NULL, "\n", &next);
+ printk_count++;
+ }
+
+ printk_list = malloc_or_die(sizeof(*printk_list) * printk_count + 1);
+
+ i = 0;
+ while (list) {
+ printk_list[i].printk = list->printk;
+ printk_list[i].addr = list->addr;
+ i++;
+ item = list;
+ list = list->next;
+ free(item);
+ }
+
+ qsort(printk_list, printk_count, sizeof(*printk_list), printk_cmp);
+}
+
+void print_printk(void)
+{
+ int i;
+
+ for (i = 0; i < (int)printk_count; i++) {
+ printf("%016llx %s\n",
+ printk_list[i].addr,
+ printk_list[i].printk);
+ }
+}
+
+static struct event *alloc_event(void)
+{
+ struct event *event;
+
+ event = malloc_or_die(sizeof(*event));
+ memset(event, 0, sizeof(*event));
+
+ return event;
+}
+
+enum event_type {
+ EVENT_ERROR,
+ EVENT_NONE,
+ EVENT_SPACE,
+ EVENT_NEWLINE,
+ EVENT_OP,
+ EVENT_DELIM,
+ EVENT_ITEM,
+ EVENT_DQUOTE,
+ EVENT_SQUOTE,
+};
+
+static struct event *event_list;
+
+static void add_event(struct event *event)
+{
+ event->next = event_list;
+ event_list = event;
+}
+
+static int event_item_type(enum event_type type)
+{
+ switch (type) {
+ case EVENT_ITEM ... EVENT_SQUOTE:
+ return 1;
+ case EVENT_ERROR ... EVENT_DELIM:
+ default:
+ return 0;
+ }
+}
+
+static void free_arg(struct print_arg *arg)
+{
+ if (!arg)
+ return;
+
+ switch (arg->type) {
+ case PRINT_ATOM:
+ if (arg->atom.atom)
+ free(arg->atom.atom);
+ break;
+ case PRINT_NULL:
+ case PRINT_FIELD ... PRINT_OP:
+ default:
+ /* todo */
+ break;
+ }
+
+ free(arg);
+}
+
+static enum event_type get_type(int ch)
+{
+ if (ch == '\n')
+ return EVENT_NEWLINE;
+ if (isspace(ch))
+ return EVENT_SPACE;
+ if (isalnum(ch) || ch == '_')
+ return EVENT_ITEM;
+ if (ch == '\'')
+ return EVENT_SQUOTE;
+ if (ch == '"')
+ return EVENT_DQUOTE;
+ if (!isprint(ch))
+ return EVENT_NONE;
+ if (ch == '(' || ch == ')' || ch == ',')
+ return EVENT_DELIM;
+
+ return EVENT_OP;
+}
+
+static int __read_char(void)
+{
+ if (input_buf_ptr >= input_buf_siz)
+ return -1;
+
+ return input_buf[input_buf_ptr++];
+}
+
+static int __peek_char(void)
+{
+ if (input_buf_ptr >= input_buf_siz)
+ return -1;
+
+ return input_buf[input_buf_ptr];
+}
+
+static enum event_type __read_token(char **tok)
+{
+ char buf[BUFSIZ];
+ int ch, last_ch, quote_ch, next_ch;
+ int i = 0;
+ int tok_size = 0;
+ enum event_type type;
+
+ *tok = NULL;
+
+
+ ch = __read_char();
+ if (ch < 0)
+ return EVENT_NONE;
+
+ type = get_type(ch);
+ if (type == EVENT_NONE)
+ return type;
+
+ buf[i++] = ch;
+
+ switch (type) {
+ case EVENT_NEWLINE:
+ case EVENT_DELIM:
+ *tok = malloc_or_die(2);
+ (*tok)[0] = ch;
+ (*tok)[1] = 0;
+ return type;
+
+ case EVENT_OP:
+ switch (ch) {
+ case '-':
+ next_ch = __peek_char();
+ if (next_ch == '>') {
+ buf[i++] = __read_char();
+ break;
+ }
+ /* fall through */
+ case '+':
+ case '|':
+ case '&':
+ case '>':
+ case '<':
+ last_ch = ch;
+ ch = __peek_char();
+ if (ch != last_ch)
+ goto test_equal;
+ buf[i++] = __read_char();
+ switch (last_ch) {
+ case '>':
+ case '<':
+ goto test_equal;
+ default:
+ break;
+ }
+ break;
+ case '!':
+ case '=':
+ goto test_equal;
+ default: /* what should we do instead? */
+ break;
+ }
+ buf[i] = 0;
+ *tok = strdup(buf);
+ return type;
+
+ test_equal:
+ ch = __peek_char();
+ if (ch == '=')
+ buf[i++] = __read_char();
+ break;
+
+ case EVENT_DQUOTE:
+ case EVENT_SQUOTE:
+ /* don't keep quotes */
+ i--;
+ quote_ch = ch;
+ last_ch = 0;
+ do {
+ if (i == (BUFSIZ - 1)) {
+ buf[i] = 0;
+ if (*tok) {
+ *tok = realloc(*tok, tok_size + BUFSIZ);
+ if (!*tok)
+ return EVENT_NONE;
+ strcat(*tok, buf);
+ } else
+ *tok = strdup(buf);
+
+ if (!*tok)
+ return EVENT_NONE;
+ tok_size += BUFSIZ;
+ i = 0;
+ }
+ last_ch = ch;
+ ch = __read_char();
+ buf[i++] = ch;
+ } while (ch != quote_ch && last_ch != '\\');
+ /* remove the last quote */
+ i--;
+ goto out;
+
+ case EVENT_ERROR ... EVENT_SPACE:
+ case EVENT_ITEM:
+ default:
+ break;
+ }
+
+ while (get_type(__peek_char()) == type) {
+ if (i == (BUFSIZ - 1)) {
+ buf[i] = 0;
+ if (*tok) {
+ *tok = realloc(*tok, tok_size + BUFSIZ);
+ if (!*tok)
+ return EVENT_NONE;
+ strcat(*tok, buf);
+ } else
+ *tok = strdup(buf);
+
+ if (!*tok)
+ return EVENT_NONE;
+ tok_size += BUFSIZ;
+ i = 0;
+ }
+ ch = __read_char();
+ buf[i++] = ch;
+ }
+
+ out:
+ buf[i] = 0;
+ if (*tok) {
+ *tok = realloc(*tok, tok_size + i);
+ if (!*tok)
+ return EVENT_NONE;
+ strcat(*tok, buf);
+ } else
+ *tok = strdup(buf);
+ if (!*tok)
+ return EVENT_NONE;
+
+ return type;
+}
+
+static void free_token(char *tok)
+{
+ if (tok)
+ free(tok);
+}
+
+static enum event_type read_token(char **tok)
+{
+ enum event_type type;
+
+ for (;;) {
+ type = __read_token(tok);
+ if (type != EVENT_SPACE)
+ return type;
+
+ free_token(*tok);
+ }
+
+ /* not reached */
+ return EVENT_NONE;
+}
+
+/* no newline */
+static enum event_type read_token_item(char **tok)
+{
+ enum event_type type;
+
+ for (;;) {
+ type = __read_token(tok);
+ if (type != EVENT_SPACE && type != EVENT_NEWLINE)
+ return type;
+
+ free_token(*tok);
+ }
+
+ /* not reached */
+ return EVENT_NONE;
+}
+
+static int test_type(enum event_type type, enum event_type expect)
+{
+ if (type != expect) {
+ die("Error: expected type %d but read %d",
+ expect, type);
+ return -1;
+ }
+ return 0;
+}
+
+static int test_type_token(enum event_type type, char *token,
+ enum event_type expect, char *expect_tok)
+{
+ if (type != expect) {
+ die("Error: expected type %d but read %d",
+ expect, type);
+ return -1;
+ }
+
+ if (strcmp(token, expect_tok) != 0) {
+ die("Error: expected '%s' but read '%s'",
+ expect_tok, token);
+ return -1;
+ }
+ return 0;
+}
+
+static int __read_expect_type(enum event_type expect, char **tok, int newline_ok)
+{
+ enum event_type type;
+
+ if (newline_ok)
+ type = read_token(tok);
+ else
+ type = read_token_item(tok);
+ return test_type(type, expect);
+}
+
+static int read_expect_type(enum event_type expect, char **tok)
+{
+ return __read_expect_type(expect, tok, 1);
+}
+
+static int __read_expected(enum event_type expect, char *str, int newline_ok)
+{
+ enum event_type type;
+ char *token;
+ int ret;
+
+ if (newline_ok)
+ type = read_token(&token);
+ else
+ type = read_token_item(&token);
+
+ ret = test_type_token(type, token, expect, str);
+
+ free_token(token);
+
+ return 0;
+}
+
+static int read_expected(enum event_type expect, char *str)
+{
+ return __read_expected(expect, str, 1);
+}
+
+static int read_expected_item(enum event_type expect, char *str)
+{
+ return __read_expected(expect, str, 0);
+}
+
+static char *event_read_name(void)
+{
+ char *token;
+
+ if (read_expected(EVENT_ITEM, (char *)"name") < 0)
+ return NULL;
+
+ if (read_expected(EVENT_OP, (char *)":") < 0)
+ return NULL;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto fail;
+
+ return token;
+
+ fail:
+ free_token(token);
+ return NULL;
+}
+
+static int event_read_id(void)
+{
+ char *token;
+ int id;
+
+ if (read_expected_item(EVENT_ITEM, (char *)"ID") < 0)
+ return -1;
+
+ if (read_expected(EVENT_OP, (char *)":") < 0)
+ return -1;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto fail;
+
+ id = strtoul(token, NULL, 0);
+ free_token(token);
+ return id;
+
+ fail:
+ free_token(token);
+ return -1;
+}
+
+static int event_read_fields(struct event *event, struct format_field **fields)
+{
+ struct format_field *field = NULL;
+ enum event_type type;
+ char *token;
+ char *last_token;
+ int count = 0;
+
+ do {
+ type = read_token(&token);
+ if (type == EVENT_NEWLINE) {
+ free_token(token);
+ return count;
+ }
+
+ count++;
+
+ if (test_type_token(type, token, EVENT_ITEM, (char *)"field"))
+ goto fail;
+ free_token(token);
+
+ type = read_token(&token);
+ /*
+ * The ftrace fields may still use the "special" name.
+ * Just ignore it.
+ */
+ if (event->flags & EVENT_FL_ISFTRACE &&
+ type == EVENT_ITEM && strcmp(token, "special") == 0) {
+ free_token(token);
+ type = read_token(&token);
+ }
+
+ if (test_type_token(type, token, EVENT_OP, (char *)":") < 0)
+ return -1;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto fail;
+
+ last_token = token;
+
+ field = malloc_or_die(sizeof(*field));
+ memset(field, 0, sizeof(*field));
+
+ /* read the rest of the type */
+ for (;;) {
+ type = read_token(&token);
+ if (type == EVENT_ITEM ||
+ (type == EVENT_OP && strcmp(token, "*") == 0) ||
+ /*
+ * Some of the ftrace fields are broken and have
+ * an illegal "." in them.
+ */
+ (event->flags & EVENT_FL_ISFTRACE &&
+ type == EVENT_OP && strcmp(token, ".") == 0)) {
+
+ if (strcmp(token, "*") == 0)
+ field->flags |= FIELD_IS_POINTER;
+
+ if (field->type) {
+ field->type = realloc(field->type,
+ strlen(field->type) +
+ strlen(last_token) + 2);
+ strcat(field->type, " ");
+ strcat(field->type, last_token);
+ } else
+ field->type = last_token;
+ last_token = token;
+ continue;
+ }
+
+ break;
+ }
+
+ if (!field->type) {
+ die("no type found");
+ goto fail;
+ }
+ field->name = last_token;
+
+ if (test_type(type, EVENT_OP))
+ goto fail;
+
+ if (strcmp(token, "[") == 0) {
+ enum event_type last_type = type;
+ char *brackets = token;
+ int len;
+
+ field->flags |= FIELD_IS_ARRAY;
+
+ type = read_token(&token);
+ while (strcmp(token, "]") != 0) {
+ if (last_type == EVENT_ITEM &&
+ type == EVENT_ITEM)
+ len = 2;
+ else
+ len = 1;
+ last_type = type;
+
+ brackets = realloc(brackets,
+ strlen(brackets) +
+ strlen(token) + len);
+ if (len == 2)
+ strcat(brackets, " ");
+ strcat(brackets, token);
+ free_token(token);
+ type = read_token(&token);
+ if (type == EVENT_NONE) {
+ die("failed to find token");
+ goto fail;
+ }
+ }
+
+ free_token(token);
+
+ brackets = realloc(brackets, strlen(brackets) + 2);
+ strcat(brackets, "]");
+
+ /* add brackets to type */
+
+ type = read_token(&token);
+ /*
+ * If the next token is not an OP, then it is of
+ * the format: type [] item;
+ */
+ if (type == EVENT_ITEM) {
+ field->type = realloc(field->type,
+ strlen(field->type) +
+ strlen(field->name) +
+ strlen(brackets) + 2);
+ strcat(field->type, " ");
+ strcat(field->type, field->name);
+ free_token(field->name);
+ strcat(field->type, brackets);
+ field->name = token;
+ type = read_token(&token);
+ } else {
+ field->type = realloc(field->type,
+ strlen(field->type) +
+ strlen(brackets) + 1);
+ strcat(field->type, brackets);
+ }
+ free(brackets);
+ }
+
+ if (test_type_token(type, token, EVENT_OP, (char *)";"))
+ goto fail;
+ free_token(token);
+
+ if (read_expected(EVENT_ITEM, (char *)"offset") < 0)
+ goto fail_expect;
+
+ if (read_expected(EVENT_OP, (char *)":") < 0)
+ goto fail_expect;
+
+ if (read_expect_type(EVENT_ITEM, &token))
+ goto fail;
+ field->offset = strtoul(token, NULL, 0);
+ free_token(token);
+
+ if (read_expected(EVENT_OP, (char *)";") < 0)
+ goto fail_expect;
+
+ if (read_expected(EVENT_ITEM, (char *)"size") < 0)
+ goto fail_expect;
+
+ if (read_expected(EVENT_OP, (char *)":") < 0)
+ goto fail_expect;
+
+ if (read_expect_type(EVENT_ITEM, &token))
+ goto fail;
+ field->size = strtoul(token, NULL, 0);
+ free_token(token);
+
+ if (read_expected(EVENT_OP, (char *)";") < 0)
+ goto fail_expect;
+
+ if (read_expect_type(EVENT_NEWLINE, &token) < 0)
+ goto fail;
+ free_token(token);
+
+ *fields = field;
+ fields = &field->next;
+
+ } while (1);
+
+ return 0;
+
+fail:
+ free_token(token);
+fail_expect:
+ if (field)
+ free(field);
+ return -1;
+}
+
+static int event_read_format(struct event *event)
+{
+ char *token;
+ int ret;
+
+ if (read_expected_item(EVENT_ITEM, (char *)"format") < 0)
+ return -1;
+
+ if (read_expected(EVENT_OP, (char *)":") < 0)
+ return -1;
+
+ if (read_expect_type(EVENT_NEWLINE, &token))
+ goto fail;
+ free_token(token);
+
+ ret = event_read_fields(event, &event->format.common_fields);
+ if (ret < 0)
+ return ret;
+ event->format.nr_common = ret;
+
+ ret = event_read_fields(event, &event->format.fields);
+ if (ret < 0)
+ return ret;
+ event->format.nr_fields = ret;
+
+ return 0;
+
+ fail:
+ free_token(token);
+ return -1;
+}
+
+enum event_type
+process_arg_token(struct event *event, struct print_arg *arg,
+ char **tok, enum event_type type);
+
+static enum event_type
+process_arg(struct event *event, struct print_arg *arg, char **tok)
+{
+ enum event_type type;
+ char *token;
+
+ type = read_token(&token);
+ *tok = token;
+
+ return process_arg_token(event, arg, tok, type);
+}
+
+static enum event_type
+process_cond(struct event *event, struct print_arg *top, char **tok)
+{
+ struct print_arg *arg, *left, *right;
+ enum event_type type;
+ char *token = NULL;
+
+ arg = malloc_or_die(sizeof(*arg));
+ memset(arg, 0, sizeof(*arg));
+
+ left = malloc_or_die(sizeof(*left));
+
+ right = malloc_or_die(sizeof(*right));
+
+ arg->type = PRINT_OP;
+ arg->op.left = left;
+ arg->op.right = right;
+
+ *tok = NULL;
+ type = process_arg(event, left, &token);
+ if (test_type_token(type, token, EVENT_OP, (char *)":"))
+ goto out_free;
+
+ arg->op.op = token;
+
+ type = process_arg(event, right, &token);
+
+ top->op.right = arg;
+
+ *tok = token;
+ return type;
+
+out_free:
+ free_token(*tok);
+ free(right);
+ free(left);
+ free_arg(arg);
+ return EVENT_ERROR;
+}
+
+static int get_op_prio(char *op)
+{
+ if (!op[1]) {
+ switch (op[0]) {
+ case '*':
+ case '/':
+ case '%':
+ return 6;
+ case '+':
+ case '-':
+ return 7;
+ /* '>>' and '<<' are 8 */
+ case '<':
+ case '>':
+ return 9;
+ /* '==' and '!=' are 10 */
+ case '&':
+ return 11;
+ case '^':
+ return 12;
+ case '|':
+ return 13;
+ case '?':
+ return 16;
+ default:
+ die("unknown op '%c'", op[0]);
+ return -1;
+ }
+ } else {
+ if (strcmp(op, "++") == 0 ||
+ strcmp(op, "--") == 0) {
+ return 3;
+ } else if (strcmp(op, ">>") == 0 ||
+ strcmp(op, "<<") == 0) {
+ return 8;
+ } else if (strcmp(op, ">=") == 0 ||
+ strcmp(op, "<=") == 0) {
+ return 9;
+ } else if (strcmp(op, "==") == 0 ||
+ strcmp(op, "!=") == 0) {
+ return 10;
+ } else if (strcmp(op, "&&") == 0) {
+ return 14;
+ } else if (strcmp(op, "||") == 0) {
+ return 15;
+ } else {
+ die("unknown op '%s'", op);
+ return -1;
+ }
+ }
+}
+
+static void set_op_prio(struct print_arg *arg)
+{
+
+ /* single ops are the greatest */
+ if (!arg->op.left || arg->op.left->type == PRINT_NULL) {
+ arg->op.prio = 0;
+ return;
+ }
+
+ arg->op.prio = get_op_prio(arg->op.op);
+}
+
+static enum event_type
+process_op(struct event *event, struct print_arg *arg, char **tok)
+{
+ struct print_arg *left, *right = NULL;
+ enum event_type type;
+ char *token;
+
+ /* the op is passed in via tok */
+ token = *tok;
+
+ if (arg->type == PRINT_OP && !arg->op.left) {
+ /* handle single op */
+ if (token[1]) {
+ die("bad op token %s", token);
+ return EVENT_ERROR;
+ }
+ switch (token[0]) {
+ case '!':
+ case '+':
+ case '-':
+ break;
+ default:
+ die("bad op token %s", token);
+ return EVENT_ERROR;
+ }
+
+ /* make an empty left */
+ left = malloc_or_die(sizeof(*left));
+ left->type = PRINT_NULL;
+ arg->op.left = left;
+
+ right = malloc_or_die(sizeof(*right));
+ arg->op.right = right;
+
+ type = process_arg(event, right, tok);
+
+ } else if (strcmp(token, "?") == 0) {
+
+ left = malloc_or_die(sizeof(*left));
+ /* copy the top arg to the left */
+ *left = *arg;
+
+ arg->type = PRINT_OP;
+ arg->op.op = token;
+ arg->op.left = left;
+ arg->op.prio = 0;
+
+ type = process_cond(event, arg, tok);
+
+ } else if (strcmp(token, ">>") == 0 ||
+ strcmp(token, "<<") == 0 ||
+ strcmp(token, "&") == 0 ||
+ strcmp(token, "|") == 0 ||
+ strcmp(token, "&&") == 0 ||
+ strcmp(token, "||") == 0 ||
+ strcmp(token, "-") == 0 ||
+ strcmp(token, "+") == 0 ||
+ strcmp(token, "*") == 0 ||
+ strcmp(token, "^") == 0 ||
+ strcmp(token, "/") == 0 ||
+ strcmp(token, "==") == 0 ||
+ strcmp(token, "!=") == 0) {
+
+ left = malloc_or_die(sizeof(*left));
+
+ /* copy the top arg to the left */
+ *left = *arg;
+
+ arg->type = PRINT_OP;
+ arg->op.op = token;
+ arg->op.left = left;
+
+ set_op_prio(arg);
+
+ right = malloc_or_die(sizeof(*right));
+
+ type = process_arg(event, right, tok);
+
+ arg->op.right = right;
+
+ } else {
+ die("unknown op '%s'", token);
+ /* the arg is now the left side */
+ return EVENT_NONE;
+ }
+
+
+ if (type == EVENT_OP) {
+ int prio;
+
+ /* higher prios need to be closer to the root */
+ prio = get_op_prio(*tok);
+
+ if (prio > arg->op.prio)
+ return process_op(event, arg, tok);
+
+ return process_op(event, right, tok);
+ }
+
+ return type;
+}
+
+static enum event_type
+process_entry(struct event *event __unused, struct print_arg *arg,
+ char **tok)
+{
+ enum event_type type;
+ char *field;
+ char *token;
+
+ if (read_expected(EVENT_OP, (char *)"->") < 0)
+ return EVENT_ERROR;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto fail;
+ field = token;
+
+ arg->type = PRINT_FIELD;
+ arg->field.name = field;
+
+ type = read_token(&token);
+ *tok = token;
+
+ return type;
+
+fail:
+ free_token(token);
+ return EVENT_ERROR;
+}
+
+static char *arg_eval (struct print_arg *arg);
+
+static long long arg_num_eval(struct print_arg *arg)
+{
+ long long left, right;
+ long long val = 0;
+
+ switch (arg->type) {
+ case PRINT_ATOM:
+ val = strtoll(arg->atom.atom, NULL, 0);
+ break;
+ case PRINT_TYPE:
+ val = arg_num_eval(arg->typecast.item);
+ break;
+ case PRINT_OP:
+ switch (arg->op.op[0]) {
+ case '|':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+ if (arg->op.op[1])
+ val = left || right;
+ else
+ val = left | right;
+ break;
+ case '&':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+ if (arg->op.op[1])
+ val = left && right;
+ else
+ val = left & right;
+ break;
+ case '<':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+ switch (arg->op.op[1]) {
+ case 0:
+ val = left < right;
+ break;
+ case '<':
+ val = left << right;
+ break;
+ case '=':
+ val = left <= right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ case '>':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+ switch (arg->op.op[1]) {
+ case 0:
+ val = left > right;
+ break;
+ case '>':
+ val = left >> right;
+ break;
+ case '=':
+ val = left >= right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ case '=':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+
+ if (arg->op.op[1] != '=')
+ die("unknown op '%s'", arg->op.op);
+
+ val = left == right;
+ break;
+ case '!':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+
+ switch (arg->op.op[1]) {
+ case '=':
+ val = left != right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+
+ case PRINT_NULL:
+ case PRINT_FIELD ... PRINT_SYMBOL:
+ case PRINT_STRING:
+ default:
+ die("invalid eval type %d", arg->type);
+
+ }
+ return val;
+}
+
+static char *arg_eval (struct print_arg *arg)
+{
+ long long val;
+ static char buf[20];
+
+ switch (arg->type) {
+ case PRINT_ATOM:
+ return arg->atom.atom;
+ case PRINT_TYPE:
+ return arg_eval(arg->typecast.item);
+ case PRINT_OP:
+ val = arg_num_eval(arg);
+ sprintf(buf, "%lld", val);
+ return buf;
+
+ case PRINT_NULL:
+ case PRINT_FIELD ... PRINT_SYMBOL:
+ case PRINT_STRING:
+ default:
+ die("invalid eval type %d", arg->type);
+ break;
+ }
+
+ return NULL;
+}
+
+static enum event_type
+process_fields(struct event *event, struct print_flag_sym **list, char **tok)
+{
+ enum event_type type;
+ struct print_arg *arg = NULL;
+ struct print_flag_sym *field;
+ char *token = NULL;
+ char *value;
+
+ do {
+ free_token(token);
+ type = read_token_item(&token);
+ if (test_type_token(type, token, EVENT_OP, (char *)"{"))
+ break;
+
+ arg = malloc_or_die(sizeof(*arg));
+
+ free_token(token);
+ type = process_arg(event, arg, &token);
+ if (test_type_token(type, token, EVENT_DELIM, (char *)","))
+ goto out_free;
+
+ field = malloc_or_die(sizeof(*field));
+ memset(field, 0, sizeof(field));
+
+ value = arg_eval(arg);
+ field->value = strdup(value);
+
+ free_token(token);
+ type = process_arg(event, arg, &token);
+ if (test_type_token(type, token, EVENT_OP, (char *)"}"))
+ goto out_free;
+
+ value = arg_eval(arg);
+ field->str = strdup(value);
+ free_arg(arg);
+ arg = NULL;
+
+ *list = field;
+ list = &field->next;
+
+ free_token(token);
+ type = read_token_item(&token);
+ } while (type == EVENT_DELIM && strcmp(token, ",") == 0);
+
+ *tok = token;
+ return type;
+
+out_free:
+ free_arg(arg);
+ free_token(token);
+
+ return EVENT_ERROR;
+}
+
+static enum event_type
+process_flags(struct event *event, struct print_arg *arg, char **tok)
+{
+ struct print_arg *field;
+ enum event_type type;
+ char *token;
+
+ memset(arg, 0, sizeof(*arg));
+ arg->type = PRINT_FLAGS;
+
+ if (read_expected_item(EVENT_DELIM, (char *)"(") < 0)
+ return EVENT_ERROR;
+
+ field = malloc_or_die(sizeof(*field));
+
+ type = process_arg(event, field, &token);
+ if (test_type_token(type, token, EVENT_DELIM, (char *)","))
+ goto out_free;
+
+ arg->flags.field = field;
+
+ type = read_token_item(&token);
+ if (event_item_type(type)) {
+ arg->flags.delim = token;
+ type = read_token_item(&token);
+ }
+
+ if (test_type_token(type, token, EVENT_DELIM, (char *)","))
+ goto out_free;
+
+ type = process_fields(event, &arg->flags.flags, &token);
+ if (test_type_token(type, token, EVENT_DELIM, (char *)")"))
+ goto out_free;
+
+ free_token(token);
+ type = read_token_item(tok);
+ return type;
+
+out_free:
+ free_token(token);
+ return EVENT_ERROR;
+}
+
+static enum event_type
+process_symbols(struct event *event, struct print_arg *arg, char **tok)
+{
+ struct print_arg *field;
+ enum event_type type;
+ char *token;
+
+ memset(arg, 0, sizeof(*arg));
+ arg->type = PRINT_SYMBOL;
+
+ if (read_expected_item(EVENT_DELIM, (char *)"(") < 0)
+ return EVENT_ERROR;
+
+ field = malloc_or_die(sizeof(*field));
+
+ type = process_arg(event, field, &token);
+ if (test_type_token(type, token, EVENT_DELIM, (char *)","))
+ goto out_free;
+
+ arg->symbol.field = field;
+
+ type = process_fields(event, &arg->symbol.symbols, &token);
+ if (test_type_token(type, token, EVENT_DELIM, (char *)")"))
+ goto out_free;
+
+ free_token(token);
+ type = read_token_item(tok);
+ return type;
+
+out_free:
+ free_token(token);
+ return EVENT_ERROR;
+}
+
+static enum event_type
+process_paren(struct event *event, struct print_arg *arg, char **tok)
+{
+ struct print_arg *item_arg;
+ enum event_type type;
+ int ptr_cast = 0;
+ char *token;
+
+ type = process_arg(event, arg, &token);
+
+ if (type == EVENT_ERROR)
+ return EVENT_ERROR;
+
+ if (type == EVENT_OP) {
+ /* handle the ptr casts */
+ if (!strcmp(token, "*")) {
+ /*
+ * FIXME: should we zapp whitespaces before ')' ?
+ * (may require a peek_token_item())
+ */
+ if (__peek_char() == ')') {
+ ptr_cast = 1;
+ free_token(token);
+ type = read_token_item(&token);
+ }
+ }
+ if (!ptr_cast) {
+ type = process_op(event, arg, &token);
+
+ if (type == EVENT_ERROR)
+ return EVENT_ERROR;
+ }
+ }
+
+ if (test_type_token(type, token, EVENT_DELIM, (char *)")")) {
+ free_token(token);
+ return EVENT_ERROR;
+ }
+
+ free_token(token);
+ type = read_token_item(&token);
+
+ /*
+ * If the next token is an item or another open paren, then
+ * this was a typecast.
+ */
+ if (event_item_type(type) ||
+ (type == EVENT_DELIM && strcmp(token, "(") == 0)) {
+
+ /* make this a typecast and contine */
+
+ /* prevous must be an atom */
+ if (arg->type != PRINT_ATOM)
+ die("previous needed to be PRINT_ATOM");
+
+ item_arg = malloc_or_die(sizeof(*item_arg));
+
+ arg->type = PRINT_TYPE;
+ if (ptr_cast) {
+ char *old = arg->atom.atom;
+
+ arg->atom.atom = malloc_or_die(strlen(old + 3));
+ sprintf(arg->atom.atom, "%s *", old);
+ free(old);
+ }
+ arg->typecast.type = arg->atom.atom;
+ arg->typecast.item = item_arg;
+ type = process_arg_token(event, item_arg, &token, type);
+
+ }
+
+ *tok = token;
+ return type;
+}
+
+
+static enum event_type
+process_str(struct event *event __unused, struct print_arg *arg, char **tok)
+{
+ enum event_type type;
+ char *token;
+
+ if (read_expected(EVENT_DELIM, (char *)"(") < 0)
+ return EVENT_ERROR;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto fail;
+
+ arg->type = PRINT_STRING;
+ arg->string.string = token;
+ arg->string.offset = -1;
+
+ if (read_expected(EVENT_DELIM, (char *)")") < 0)
+ return EVENT_ERROR;
+
+ type = read_token(&token);
+ *tok = token;
+
+ return type;
+fail:
+ free_token(token);
+ return EVENT_ERROR;
+}
+
+enum event_type
+process_arg_token(struct event *event, struct print_arg *arg,
+ char **tok, enum event_type type)
+{
+ char *token;
+ char *atom;
+
+ token = *tok;
+
+ switch (type) {
+ case EVENT_ITEM:
+ if (strcmp(token, "REC") == 0) {
+ free_token(token);
+ type = process_entry(event, arg, &token);
+ } else if (strcmp(token, "__print_flags") == 0) {
+ free_token(token);
+ type = process_flags(event, arg, &token);
+ } else if (strcmp(token, "__print_symbolic") == 0) {
+ free_token(token);
+ type = process_symbols(event, arg, &token);
+ } else if (strcmp(token, "__get_str") == 0) {
+ free_token(token);
+ type = process_str(event, arg, &token);
+ } else {
+ atom = token;
+ /* test the next token */
+ type = read_token_item(&token);
+
+ /* atoms can be more than one token long */
+ while (type == EVENT_ITEM) {
+ atom = realloc(atom, strlen(atom) + strlen(token) + 2);
+ strcat(atom, " ");
+ strcat(atom, token);
+ free_token(token);
+ type = read_token_item(&token);
+ }
+
+ /* todo, test for function */
+
+ arg->type = PRINT_ATOM;
+ arg->atom.atom = atom;
+ }
+ break;
+ case EVENT_DQUOTE:
+ case EVENT_SQUOTE:
+ arg->type = PRINT_ATOM;
+ arg->atom.atom = token;
+ type = read_token_item(&token);
+ break;
+ case EVENT_DELIM:
+ if (strcmp(token, "(") == 0) {
+ free_token(token);
+ type = process_paren(event, arg, &token);
+ break;
+ }
+ case EVENT_OP:
+ /* handle single ops */
+ arg->type = PRINT_OP;
+ arg->op.op = token;
+ arg->op.left = NULL;
+ type = process_op(event, arg, &token);
+
+ break;
+
+ case EVENT_ERROR ... EVENT_NEWLINE:
+ default:
+ die("unexpected type %d", type);
+ }
+ *tok = token;
+
+ return type;
+}
+
+static int event_read_print_args(struct event *event, struct print_arg **list)
+{
+ enum event_type type;
+ struct print_arg *arg;
+ char *token;
+ int args = 0;
+
+ do {
+ arg = malloc_or_die(sizeof(*arg));
+ memset(arg, 0, sizeof(*arg));
+
+ type = process_arg(event, arg, &token);
+
+ if (type == EVENT_ERROR) {
+ free_arg(arg);
+ return -1;
+ }
+
+ *list = arg;
+ args++;
+
+ if (type == EVENT_OP) {
+ type = process_op(event, arg, &token);
+ list = &arg->next;
+ continue;
+ }
+
+ if (type == EVENT_DELIM && strcmp(token, ",") == 0) {
+ free_token(token);
+ *list = arg;
+ list = &arg->next;
+ continue;
+ }
+ break;
+ } while (type != EVENT_NONE);
+
+ if (type != EVENT_NONE)
+ free_token(token);
+
+ return args;
+}
+
+static int event_read_print(struct event *event)
+{
+ enum event_type type;
+ char *token;
+ int ret;
+
+ if (read_expected_item(EVENT_ITEM, (char *)"print") < 0)
+ return -1;
+
+ if (read_expected(EVENT_ITEM, (char *)"fmt") < 0)
+ return -1;
+
+ if (read_expected(EVENT_OP, (char *)":") < 0)
+ return -1;
+
+ if (read_expect_type(EVENT_DQUOTE, &token) < 0)
+ goto fail;
+
+ event->print_fmt.format = token;
+ event->print_fmt.args = NULL;
+
+ /* ok to have no arg */
+ type = read_token_item(&token);
+
+ if (type == EVENT_NONE)
+ return 0;
+
+ if (test_type_token(type, token, EVENT_DELIM, (char *)","))
+ goto fail;
+
+ free_token(token);
+
+ ret = event_read_print_args(event, &event->print_fmt.args);
+ if (ret < 0)
+ return -1;
+
+ return 0;
+
+ fail:
+ free_token(token);
+ return -1;
+}
+
+static struct format_field *
+find_common_field(struct event *event, const char *name)
+{
+ struct format_field *format;
+
+ for (format = event->format.common_fields;
+ format; format = format->next) {
+ if (strcmp(format->name, name) == 0)
+ break;
+ }
+
+ return format;
+}
+
+static struct format_field *
+find_field(struct event *event, const char *name)
+{
+ struct format_field *format;
+
+ for (format = event->format.fields;
+ format; format = format->next) {
+ if (strcmp(format->name, name) == 0)
+ break;
+ }
+
+ return format;
+}
+
+static struct format_field *
+find_any_field(struct event *event, const char *name)
+{
+ struct format_field *format;
+
+ format = find_common_field(event, name);
+ if (format)
+ return format;
+ return find_field(event, name);
+}
+
+static unsigned long long read_size(void *ptr, int size)
+{
+ switch (size) {
+ case 1:
+ return *(unsigned char *)ptr;
+ case 2:
+ return data2host2(ptr);
+ case 4:
+ return data2host4(ptr);
+ case 8:
+ return data2host8(ptr);
+ default:
+ /* BUG! */
+ return 0;
+ }
+}
+
+static int get_common_info(const char *type, int *offset, int *size)
+{
+ struct event *event;
+ struct format_field *field;
+
+ /*
+ * All events should have the same common elements.
+ * Pick any event to find where the type is;
+ */
+ if (!event_list)
+ die("no event_list!");
+
+ event = event_list;
+ field = find_common_field(event, type);
+ if (!field)
+ die("field '%s' not found", type);
+
+ *offset = field->offset;
+ *size = field->size;
+
+ return 0;
+}
+
+static int parse_common_type(void *data)
+{
+ static int type_offset;
+ static int type_size;
+ int ret;
+
+ if (!type_size) {
+ ret = get_common_info("common_type",
+ &type_offset,
+ &type_size);
+ if (ret < 0)
+ return ret;
+ }
+ return read_size(data + type_offset, type_size);
+}
+
+static int parse_common_pid(void *data)
+{
+ static int pid_offset;
+ static int pid_size;
+ int ret;
+
+ if (!pid_size) {
+ ret = get_common_info("common_pid",
+ &pid_offset,
+ &pid_size);
+ if (ret < 0)
+ return ret;
+ }
+
+ return read_size(data + pid_offset, pid_size);
+}
+
+static struct event *find_event(int id)
+{
+ struct event *event;
+
+ for (event = event_list; event; event = event->next) {
+ if (event->id == id)
+ break;
+ }
+ return event;
+}
+
+static unsigned long long eval_num_arg(void *data, int size,
+ struct event *event, struct print_arg *arg)
+{
+ unsigned long long val = 0;
+ unsigned long long left, right;
+
+ switch (arg->type) {
+ case PRINT_NULL:
+ /* ?? */
+ return 0;
+ case PRINT_ATOM:
+ return strtoull(arg->atom.atom, NULL, 0);
+ case PRINT_FIELD:
+ if (!arg->field.field) {
+ arg->field.field = find_any_field(event, arg->field.name);
+ if (!arg->field.field)
+ die("field %s not found", arg->field.name);
+ }
+ /* must be a number */
+ val = read_size(data + arg->field.field->offset,
+ arg->field.field->size);
+ break;
+ case PRINT_FLAGS:
+ case PRINT_SYMBOL:
+ break;
+ case PRINT_TYPE:
+ return eval_num_arg(data, size, event, arg->typecast.item);
+ case PRINT_STRING:
+ return 0;
+ break;
+ case PRINT_OP:
+ left = eval_num_arg(data, size, event, arg->op.left);
+ right = eval_num_arg(data, size, event, arg->op.right);
+ switch (arg->op.op[0]) {
+ case '|':
+ if (arg->op.op[1])
+ val = left || right;
+ else
+ val = left | right;
+ break;
+ case '&':
+ if (arg->op.op[1])
+ val = left && right;
+ else
+ val = left & right;
+ break;
+ case '<':
+ switch (arg->op.op[1]) {
+ case 0:
+ val = left < right;
+ break;
+ case '<':
+ val = left << right;
+ break;
+ case '=':
+ val = left <= right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ case '>':
+ switch (arg->op.op[1]) {
+ case 0:
+ val = left > right;
+ break;
+ case '>':
+ val = left >> right;
+ break;
+ case '=':
+ val = left >= right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ case '=':
+ if (arg->op.op[1] != '=')
+ die("unknown op '%s'", arg->op.op);
+ val = left == right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ default: /* not sure what to do there */
+ return 0;
+ }
+ return val;
+}
+
+struct flag {
+ const char *name;
+ unsigned long long value;
+};
+
+static const struct flag flags[] = {
+ { "HI_SOFTIRQ", 0 },
+ { "TIMER_SOFTIRQ", 1 },
+ { "NET_TX_SOFTIRQ", 2 },
+ { "NET_RX_SOFTIRQ", 3 },
+ { "BLOCK_SOFTIRQ", 4 },
+ { "TASKLET_SOFTIRQ", 5 },
+ { "SCHED_SOFTIRQ", 6 },
+ { "HRTIMER_SOFTIRQ", 7 },
+ { "RCU_SOFTIRQ", 8 },
+
+ { "HRTIMER_NORESTART", 0 },
+ { "HRTIMER_RESTART", 1 },
+};
+
+static unsigned long long eval_flag(const char *flag)
+{
+ int i;
+
+ /*
+ * Some flags in the format files do not get converted.
+ * If the flag is not numeric, see if it is something that
+ * we already know about.
+ */
+ if (isdigit(flag[0]))
+ return strtoull(flag, NULL, 0);
+
+ for (i = 0; i < (int)(sizeof(flags)/sizeof(flags[0])); i++)
+ if (strcmp(flags[i].name, flag) == 0)
+ return flags[i].value;
+
+ return 0;
+}
+
+static void print_str_arg(void *data, int size,
+ struct event *event, struct print_arg *arg)
+{
+ struct print_flag_sym *flag;
+ unsigned long long val, fval;
+ char *str;
+ int print;
+
+ switch (arg->type) {
+ case PRINT_NULL:
+ /* ?? */
+ return;
+ case PRINT_ATOM:
+ printf("%s", arg->atom.atom);
+ return;
+ case PRINT_FIELD:
+ if (!arg->field.field) {
+ arg->field.field = find_any_field(event, arg->field.name);
+ if (!arg->field.field)
+ die("field %s not found", arg->field.name);
+ }
+ str = malloc_or_die(arg->field.field->size + 1);
+ memcpy(str, data + arg->field.field->offset,
+ arg->field.field->size);
+ str[arg->field.field->size] = 0;
+ printf("%s", str);
+ free(str);
+ break;
+ case PRINT_FLAGS:
+ val = eval_num_arg(data, size, event, arg->flags.field);
+ print = 0;
+ for (flag = arg->flags.flags; flag; flag = flag->next) {
+ fval = eval_flag(flag->value);
+ if (!val && !fval) {
+ printf("%s", flag->str);
+ break;
+ }
+ if (fval && (val & fval) == fval) {
+ if (print && arg->flags.delim)
+ printf("%s", arg->flags.delim);
+ printf("%s", flag->str);
+ print = 1;
+ val &= ~fval;
+ }
+ }
+ break;
+ case PRINT_SYMBOL:
+ val = eval_num_arg(data, size, event, arg->symbol.field);
+ for (flag = arg->symbol.symbols; flag; flag = flag->next) {
+ fval = eval_flag(flag->value);
+ if (val == fval) {
+ printf("%s", flag->str);
+ break;
+ }
+ }
+ break;
+
+ case PRINT_TYPE:
+ break;
+ case PRINT_STRING: {
+ int str_offset;
+
+ if (arg->string.offset == -1) {
+ struct format_field *f;
+
+ f = find_any_field(event, arg->string.string);
+ arg->string.offset = f->offset;
+ }
+ str_offset = *(int *)(data + arg->string.offset);
+ str_offset &= 0xffff;
+ printf("%s", ((char *)data) + str_offset);
+ break;
+ }
+ case PRINT_OP:
+ /*
+ * The only op for string should be ? :
+ */
+ if (arg->op.op[0] != '?')
+ return;
+ val = eval_num_arg(data, size, event, arg->op.left);
+ if (val)
+ print_str_arg(data, size, event, arg->op.right->op.left);
+ else
+ print_str_arg(data, size, event, arg->op.right->op.right);
+ break;
+ default:
+ /* well... */
+ break;
+ }
+}
+
+static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event *event)
+{
+ static struct format_field *field, *ip_field;
+ struct print_arg *args, *arg, **next;
+ unsigned long long ip, val;
+ char *ptr;
+ void *bptr;
+
+ if (!field) {
+ field = find_field(event, "buf");
+ if (!field)
+ die("can't find buffer field for binary printk");
+ ip_field = find_field(event, "ip");
+ if (!ip_field)
+ die("can't find ip field for binary printk");
+ }
+
+ ip = read_size(data + ip_field->offset, ip_field->size);
+
+ /*
+ * The first arg is the IP pointer.
+ */
+ args = malloc_or_die(sizeof(*args));
+ arg = args;
+ arg->next = NULL;
+ next = &arg->next;
+
+ arg->type = PRINT_ATOM;
+ arg->atom.atom = malloc_or_die(32);
+ sprintf(arg->atom.atom, "%lld", ip);
+
+ /* skip the first "%pf : " */
+ for (ptr = fmt + 6, bptr = data + field->offset;
+ bptr < data + size && *ptr; ptr++) {
+ int ls = 0;
+
+ if (*ptr == '%') {
+ process_again:
+ ptr++;
+ switch (*ptr) {
+ case '%':
+ break;
+ case 'l':
+ ls++;
+ goto process_again;
+ case 'L':
+ ls = 2;
+ goto process_again;
+ case '0' ... '9':
+ goto process_again;
+ case 'p':
+ ls = 1;
+ /* fall through */
+ case 'd':
+ case 'u':
+ case 'x':
+ case 'i':
+ bptr = (void *)(((unsigned long)bptr + (long_size - 1)) &
+ ~(long_size - 1));
+ switch (ls) {
+ case 0:
+ case 1:
+ ls = long_size;
+ break;
+ case 2:
+ ls = 8;
+ default:
+ break;
+ }
+ val = read_size(bptr, ls);
+ bptr += ls;
+ arg = malloc_or_die(sizeof(*arg));
+ arg->next = NULL;
+ arg->type = PRINT_ATOM;
+ arg->atom.atom = malloc_or_die(32);
+ sprintf(arg->atom.atom, "%lld", val);
+ *next = arg;
+ next = &arg->next;
+ break;
+ case 's':
+ arg = malloc_or_die(sizeof(*arg));
+ arg->next = NULL;
+ arg->type = PRINT_STRING;
+ arg->string.string = strdup(bptr);
+ bptr += strlen(bptr) + 1;
+ *next = arg;
+ next = &arg->next;
+ default:
+ break;
+ }
+ }
+ }
+
+ return args;
+}
+
+static void free_args(struct print_arg *args)
+{
+ struct print_arg *next;
+
+ while (args) {
+ next = args->next;
+
+ if (args->type == PRINT_ATOM)
+ free(args->atom.atom);
+ else
+ free(args->string.string);
+ free(args);
+ args = next;
+ }
+}
+
+static char *get_bprint_format(void *data, int size __unused, struct event *event)
+{
+ unsigned long long addr;
+ static struct format_field *field;
+ struct printk_map *printk;
+ char *format;
+ char *p;
+
+ if (!field) {
+ field = find_field(event, "fmt");
+ if (!field)
+ die("can't find format field for binary printk");
+ printf("field->offset = %d size=%d\n", field->offset, field->size);
+ }
+
+ addr = read_size(data + field->offset, field->size);
+
+ printk = find_printk(addr);
+ if (!printk) {
+ format = malloc_or_die(45);
+ sprintf(format, "%%pf : (NO FORMAT FOUND at %llx)\n",
+ addr);
+ return format;
+ }
+
+ p = printk->printk;
+ /* Remove any quotes. */
+ if (*p == '"')
+ p++;
+ format = malloc_or_die(strlen(p) + 10);
+ sprintf(format, "%s : %s", "%pf", p);
+ /* remove ending quotes and new line since we will add one too */
+ p = format + strlen(format) - 1;
+ if (*p == '"')
+ *p = 0;
+
+ p -= 2;
+ if (strcmp(p, "\\n") == 0)
+ *p = 0;
+
+ return format;
+}
+
+static void pretty_print(void *data, int size, struct event *event)
+{
+ struct print_fmt *print_fmt = &event->print_fmt;
+ struct print_arg *arg = print_fmt->args;
+ struct print_arg *args = NULL;
+ const char *ptr = print_fmt->format;
+ unsigned long long val;
+ struct func_map *func;
+ const char *saveptr;
+ char *bprint_fmt = NULL;
+ char format[32];
+ int show_func;
+ int len;
+ int ls;
+
+ if (event->flags & EVENT_FL_ISFUNC)
+ ptr = " %pF <-- %pF";
+
+ if (event->flags & EVENT_FL_ISBPRINT) {
+ bprint_fmt = get_bprint_format(data, size, event);
+ args = make_bprint_args(bprint_fmt, data, size, event);
+ arg = args;
+ ptr = bprint_fmt;
+ }
+
+ for (; *ptr; ptr++) {
+ ls = 0;
+ if (*ptr == '%') {
+ saveptr = ptr;
+ show_func = 0;
+ cont_process:
+ ptr++;
+ switch (*ptr) {
+ case '%':
+ printf("%%");
+ break;
+ case 'l':
+ ls++;
+ goto cont_process;
+ case 'L':
+ ls = 2;
+ goto cont_process;
+ case 'z':
+ case 'Z':
+ case '0' ... '9':
+ goto cont_process;
+ case 'p':
+ if (long_size == 4)
+ ls = 1;
+ else
+ ls = 2;
+
+ if (*(ptr+1) == 'F' ||
+ *(ptr+1) == 'f') {
+ ptr++;
+ show_func = *ptr;
+ }
+
+ /* fall through */
+ case 'd':
+ case 'i':
+ case 'x':
+ case 'X':
+ case 'u':
+ if (!arg)
+ die("no argument match");
+
+ len = ((unsigned long)ptr + 1) -
+ (unsigned long)saveptr;
+
+ /* should never happen */
+ if (len > 32)
+ die("bad format!");
+
+ memcpy(format, saveptr, len);
+ format[len] = 0;
+
+ val = eval_num_arg(data, size, event, arg);
+ arg = arg->next;
+
+ if (show_func) {
+ func = find_func(val);
+ if (func) {
+ printf("%s", func->func);
+ if (show_func == 'F')
+ printf("+0x%llx",
+ val - func->addr);
+ break;
+ }
+ }
+ switch (ls) {
+ case 0:
+ printf(format, (int)val);
+ break;
+ case 1:
+ printf(format, (long)val);
+ break;
+ case 2:
+ printf(format, (long long)val);
+ break;
+ default:
+ die("bad count (%d)", ls);
+ }
+ break;
+ case 's':
+ if (!arg)
+ die("no matching argument");
+
+ print_str_arg(data, size, event, arg);
+ arg = arg->next;
+ break;
+ default:
+ printf(">%c<", *ptr);
+
+ }
+ } else
+ printf("%c", *ptr);
+ }
+
+ if (args) {
+ free_args(args);
+ free(bprint_fmt);
+ }
+}
+
+static inline int log10_cpu(int nb)
+{
+ if (nb / 100)
+ return 3;
+ if (nb / 10)
+ return 2;
+ return 1;
+}
+
+/* taken from Linux, written by Frederic Weisbecker */
+static void print_graph_cpu(int cpu)
+{
+ int i;
+ int log10_this = log10_cpu(cpu);
+ int log10_all = log10_cpu(cpus);
+
+
+ /*
+ * Start with a space character - to make it stand out
+ * to the right a bit when trace output is pasted into
+ * email:
+ */
+ printf(" ");
+
+ /*
+ * Tricky - we space the CPU field according to the max
+ * number of online CPUs. On a 2-cpu system it would take
+ * a maximum of 1 digit - on a 128 cpu system it would
+ * take up to 3 digits:
+ */
+ for (i = 0; i < log10_all - log10_this; i++)
+ printf(" ");
+
+ printf("%d) ", cpu);
+}
+
+#define TRACE_GRAPH_PROCINFO_LENGTH 14
+#define TRACE_GRAPH_INDENT 2
+
+static void print_graph_proc(int pid, const char *comm)
+{
+ /* sign + log10(MAX_INT) + '\0' */
+ char pid_str[11];
+ int spaces = 0;
+ int len;
+ int i;
+
+ sprintf(pid_str, "%d", pid);
+
+ /* 1 stands for the "-" character */
+ len = strlen(comm) + strlen(pid_str) + 1;
+
+ if (len < TRACE_GRAPH_PROCINFO_LENGTH)
+ spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
+
+ /* First spaces to align center */
+ for (i = 0; i < spaces / 2; i++)
+ printf(" ");
+
+ printf("%s-%s", comm, pid_str);
+
+ /* Last spaces to align center */
+ for (i = 0; i < spaces - (spaces / 2); i++)
+ printf(" ");
+}
+
+static struct record *
+get_return_for_leaf(int cpu, int cur_pid, unsigned long long cur_func,
+ struct record *next)
+{
+ struct format_field *field;
+ struct event *event;
+ unsigned long val;
+ int type;
+ int pid;
+
+ type = parse_common_type(next->data);
+ event = find_event(type);
+ if (!event)
+ return NULL;
+
+ if (!(event->flags & EVENT_FL_ISFUNCRET))
+ return NULL;
+
+ pid = parse_common_pid(next->data);
+ field = find_field(event, "func");
+ if (!field)
+ die("function return does not have field func");
+
+ val = read_size(next->data + field->offset, field->size);
+
+ if (cur_pid != pid || cur_func != val)
+ return NULL;
+
+ /* this is a leaf, now advance the iterator */
+ return trace_read_data(cpu);
+}
+
+/* Signal a overhead of time execution to the output */
+static void print_graph_overhead(unsigned long long duration)
+{
+ /* Non nested entry or return */
+ if (duration == ~0ULL)
+ return (void)printf(" ");
+
+ /* Duration exceeded 100 msecs */
+ if (duration > 100000ULL)
+ return (void)printf("! ");
+
+ /* Duration exceeded 10 msecs */
+ if (duration > 10000ULL)
+ return (void)printf("+ ");
+
+ printf(" ");
+}
+
+static void print_graph_duration(unsigned long long duration)
+{
+ unsigned long usecs = duration / 1000;
+ unsigned long nsecs_rem = duration % 1000;
+ /* log10(ULONG_MAX) + '\0' */
+ char msecs_str[21];
+ char nsecs_str[5];
+ int len;
+ int i;
+
+ sprintf(msecs_str, "%lu", usecs);
+
+ /* Print msecs */
+ len = printf("%lu", usecs);
+
+ /* Print nsecs (we don't want to exceed 7 numbers) */
+ if (len < 7) {
+ snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
+ len += printf(".%s", nsecs_str);
+ }
+
+ printf(" us ");
+
+ /* Print remaining spaces to fit the row's width */
+ for (i = len; i < 7; i++)
+ printf(" ");
+
+ printf("| ");
+}
+
+static void
+print_graph_entry_leaf(struct event *event, void *data, struct record *ret_rec)
+{
+ unsigned long long rettime, calltime;
+ unsigned long long duration, depth;
+ unsigned long long val;
+ struct format_field *field;
+ struct func_map *func;
+ struct event *ret_event;
+ int type;
+ int i;
+
+ type = parse_common_type(ret_rec->data);
+ ret_event = find_event(type);
+
+ field = find_field(ret_event, "rettime");
+ if (!field)
+ die("can't find rettime in return graph");
+ rettime = read_size(ret_rec->data + field->offset, field->size);
+
+ field = find_field(ret_event, "calltime");
+ if (!field)
+ die("can't find rettime in return graph");
+ calltime = read_size(ret_rec->data + field->offset, field->size);
+
+ duration = rettime - calltime;
+
+ /* Overhead */
+ print_graph_overhead(duration);
+
+ /* Duration */
+ print_graph_duration(duration);
+
+ field = find_field(event, "depth");
+ if (!field)
+ die("can't find depth in entry graph");
+ depth = read_size(data + field->offset, field->size);
+
+ /* Function */
+ for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
+ printf(" ");
+
+ field = find_field(event, "func");
+ if (!field)
+ die("can't find func in entry graph");
+ val = read_size(data + field->offset, field->size);
+ func = find_func(val);
+
+ if (func)
+ printf("%s();", func->func);
+ else
+ printf("%llx();", val);
+}
+
+static void print_graph_nested(struct event *event, void *data)
+{
+ struct format_field *field;
+ unsigned long long depth;
+ unsigned long long val;
+ struct func_map *func;
+ int i;
+
+ /* No overhead */
+ print_graph_overhead(-1);
+
+ /* No time */
+ printf(" | ");
+
+ field = find_field(event, "depth");
+ if (!field)
+ die("can't find depth in entry graph");
+ depth = read_size(data + field->offset, field->size);
+
+ /* Function */
+ for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
+ printf(" ");
+
+ field = find_field(event, "func");
+ if (!field)
+ die("can't find func in entry graph");
+ val = read_size(data + field->offset, field->size);
+ func = find_func(val);
+
+ if (func)
+ printf("%s() {", func->func);
+ else
+ printf("%llx() {", val);
+}
+
+static void
+pretty_print_func_ent(void *data, int size, struct event *event,
+ int cpu, int pid, const char *comm,
+ unsigned long secs, unsigned long usecs)
+{
+ struct format_field *field;
+ struct record *rec;
+ void *copy_data;
+ unsigned long val;
+
+ printf("%5lu.%06lu | ", secs, usecs);
+
+ print_graph_cpu(cpu);
+ print_graph_proc(pid, comm);
+
+ printf(" | ");
+
+ field = find_field(event, "func");
+ if (!field)
+ die("function entry does not have func field");
+
+ val = read_size(data + field->offset, field->size);
+
+ /*
+ * peek_data may unmap the data pointer. Copy it first.
+ */
+ copy_data = malloc_or_die(size);
+ memcpy(copy_data, data, size);
+ data = copy_data;
+
+ rec = trace_peek_data(cpu);
+ if (rec) {
+ rec = get_return_for_leaf(cpu, pid, val, rec);
+ if (rec) {
+ print_graph_entry_leaf(event, data, rec);
+ goto out_free;
+ }
+ }
+ print_graph_nested(event, data);
+out_free:
+ free(data);
+}
+
+static void
+pretty_print_func_ret(void *data, int size __unused, struct event *event,
+ int cpu, int pid, const char *comm,
+ unsigned long secs, unsigned long usecs)
+{
+ unsigned long long rettime, calltime;
+ unsigned long long duration, depth;
+ struct format_field *field;
+ int i;
+
+ printf("%5lu.%06lu | ", secs, usecs);
+
+ print_graph_cpu(cpu);
+ print_graph_proc(pid, comm);
+
+ printf(" | ");
+
+ field = find_field(event, "rettime");
+ if (!field)
+ die("can't find rettime in return graph");
+ rettime = read_size(data + field->offset, field->size);
+
+ field = find_field(event, "calltime");
+ if (!field)
+ die("can't find calltime in return graph");
+ calltime = read_size(data + field->offset, field->size);
+
+ duration = rettime - calltime;
+
+ /* Overhead */
+ print_graph_overhead(duration);
+
+ /* Duration */
+ print_graph_duration(duration);
+
+ field = find_field(event, "depth");
+ if (!field)
+ die("can't find depth in entry graph");
+ depth = read_size(data + field->offset, field->size);
+
+ /* Function */
+ for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
+ printf(" ");
+
+ printf("}");
+}
+
+static void
+pretty_print_func_graph(void *data, int size, struct event *event,
+ int cpu, int pid, const char *comm,
+ unsigned long secs, unsigned long usecs)
+{
+ if (event->flags & EVENT_FL_ISFUNCENT)
+ pretty_print_func_ent(data, size, event,
+ cpu, pid, comm, secs, usecs);
+ else if (event->flags & EVENT_FL_ISFUNCRET)
+ pretty_print_func_ret(data, size, event,
+ cpu, pid, comm, secs, usecs);
+ printf("\n");
+}
+
+void print_event(int cpu, void *data, int size, unsigned long long nsecs,
+ char *comm)
+{
+ struct event *event;
+ unsigned long secs;
+ unsigned long usecs;
+ int type;
+ int pid;
+
+ secs = nsecs / NSECS_PER_SEC;
+ nsecs -= secs * NSECS_PER_SEC;
+ usecs = nsecs / NSECS_PER_USEC;
+
+ type = parse_common_type(data);
+
+ event = find_event(type);
+ if (!event)
+ die("ug! no event found for type %d", type);
+
+ pid = parse_common_pid(data);
+
+ if (event->flags & (EVENT_FL_ISFUNCENT | EVENT_FL_ISFUNCRET))
+ return pretty_print_func_graph(data, size, event, cpu,
+ pid, comm, secs, usecs);
+
+ printf("%16s-%-5d [%03d] %5lu.%09Lu: %s: ",
+ comm, pid, cpu,
+ secs, nsecs, event->name);
+
+ pretty_print(data, size, event);
+ printf("\n");
+}
+
+static void print_fields(struct print_flag_sym *field)
+{
+ printf("{ %s, %s }", field->value, field->str);
+ if (field->next) {
+ printf(", ");
+ print_fields(field->next);
+ }
+}
+
+static void print_args(struct print_arg *args)
+{
+ int print_paren = 1;
+
+ switch (args->type) {
+ case PRINT_NULL:
+ printf("null");
+ break;
+ case PRINT_ATOM:
+ printf("%s", args->atom.atom);
+ break;
+ case PRINT_FIELD:
+ printf("REC->%s", args->field.name);
+ break;
+ case PRINT_FLAGS:
+ printf("__print_flags(");
+ print_args(args->flags.field);
+ printf(", %s, ", args->flags.delim);
+ print_fields(args->flags.flags);
+ printf(")");
+ break;
+ case PRINT_SYMBOL:
+ printf("__print_symbolic(");
+ print_args(args->symbol.field);
+ printf(", ");
+ print_fields(args->symbol.symbols);
+ printf(")");
+ break;
+ case PRINT_STRING:
+ printf("__get_str(%s)", args->string.string);
+ break;
+ case PRINT_TYPE:
+ printf("(%s)", args->typecast.type);
+ print_args(args->typecast.item);
+ break;
+ case PRINT_OP:
+ if (strcmp(args->op.op, ":") == 0)
+ print_paren = 0;
+ if (print_paren)
+ printf("(");
+ print_args(args->op.left);
+ printf(" %s ", args->op.op);
+ print_args(args->op.right);
+ if (print_paren)
+ printf(")");
+ break;
+ default:
+ /* we should warn... */
+ return;
+ }
+ if (args->next) {
+ printf("\n");
+ print_args(args->next);
+ }
+}
+
+static void parse_header_field(char *type,
+ int *offset, int *size)
+{
+ char *token;
+
+ if (read_expected(EVENT_ITEM, (char *)"field") < 0)
+ return;
+ if (read_expected(EVENT_OP, (char *)":") < 0)
+ return;
+ /* type */
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ return;
+ free_token(token);
+
+ if (read_expected(EVENT_ITEM, type) < 0)
+ return;
+ if (read_expected(EVENT_OP, (char *)";") < 0)
+ return;
+ if (read_expected(EVENT_ITEM, (char *)"offset") < 0)
+ return;
+ if (read_expected(EVENT_OP, (char *)":") < 0)
+ return;
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ return;
+ *offset = atoi(token);
+ free_token(token);
+ if (read_expected(EVENT_OP, (char *)";") < 0)
+ return;
+ if (read_expected(EVENT_ITEM, (char *)"size") < 0)
+ return;
+ if (read_expected(EVENT_OP, (char *)":") < 0)
+ return;
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ return;
+ *size = atoi(token);
+ free_token(token);
+ if (read_expected(EVENT_OP, (char *)";") < 0)
+ return;
+ if (read_expect_type(EVENT_NEWLINE, &token) < 0)
+ return;
+ free_token(token);
+}
+
+int parse_header_page(char *buf, unsigned long size)
+{
+ init_input_buf(buf, size);
+
+ parse_header_field((char *)"timestamp", &header_page_ts_offset,
+ &header_page_ts_size);
+ parse_header_field((char *)"commit", &header_page_size_offset,
+ &header_page_size_size);
+ parse_header_field((char *)"data", &header_page_data_offset,
+ &header_page_data_size);
+
+ return 0;
+}
+
+int parse_ftrace_file(char *buf, unsigned long size)
+{
+ struct format_field *field;
+ struct print_arg *arg, **list;
+ struct event *event;
+ int ret;
+
+ init_input_buf(buf, size);
+
+ event = alloc_event();
+ if (!event)
+ return -ENOMEM;
+
+ event->flags |= EVENT_FL_ISFTRACE;
+
+ event->name = event_read_name();
+ if (!event->name)
+ die("failed to read ftrace event name");
+
+ if (strcmp(event->name, "function") == 0)
+ event->flags |= EVENT_FL_ISFUNC;
+
+ else if (strcmp(event->name, "funcgraph_entry") == 0)
+ event->flags |= EVENT_FL_ISFUNCENT;
+
+ else if (strcmp(event->name, "funcgraph_exit") == 0)
+ event->flags |= EVENT_FL_ISFUNCRET;
+
+ else if (strcmp(event->name, "bprint") == 0)
+ event->flags |= EVENT_FL_ISBPRINT;
+
+ event->id = event_read_id();
+ if (event->id < 0)
+ die("failed to read ftrace event id");
+
+ add_event(event);
+
+ ret = event_read_format(event);
+ if (ret < 0)
+ die("failed to read ftrace event format");
+
+ ret = event_read_print(event);
+ if (ret < 0)
+ die("failed to read ftrace event print fmt");
+
+ /*
+ * The arguments for ftrace files are parsed by the fields.
+ * Set up the fields as their arguments.
+ */
+ list = &event->print_fmt.args;
+ for (field = event->format.fields; field; field = field->next) {
+ arg = malloc_or_die(sizeof(*arg));
+ memset(arg, 0, sizeof(*arg));
+ *list = arg;
+ list = &arg->next;
+ arg->type = PRINT_FIELD;
+ arg->field.name = field->name;
+ arg->field.field = field;
+ }
+ return 0;
+}
+
+int parse_event_file(char *buf, unsigned long size, char *system__unused __unused)
+{
+ struct event *event;
+ int ret;
+
+ init_input_buf(buf, size);
+
+ event = alloc_event();
+ if (!event)
+ return -ENOMEM;
+
+ event->name = event_read_name();
+ if (!event->name)
+ die("failed to read event name");
+
+ event->id = event_read_id();
+ if (event->id < 0)
+ die("failed to read event id");
+
+ ret = event_read_format(event);
+ if (ret < 0)
+ die("failed to read event format");
+
+ ret = event_read_print(event);
+ if (ret < 0)
+ die("failed to read event print fmt");
+
+#define PRINT_ARGS 0
+ if (PRINT_ARGS && event->print_fmt.args)
+ print_args(event->print_fmt.args);
+
+ add_event(event);
+ return 0;
+}
+
+void parse_set_info(int nr_cpus, int long_sz)
+{
+ cpus = nr_cpus;
+ long_size = long_sz;
+}
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
new file mode 100644
index 000000000000..a1217a10632f
--- /dev/null
+++ b/tools/perf/util/trace-event-read.c
@@ -0,0 +1,512 @@
+/*
+ * Copyright (C) 2009, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define _LARGEFILE64_SOURCE
+
+#include <dirent.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <ctype.h>
+#include <errno.h>
+
+#include "../perf.h"
+#include "util.h"
+#include "trace-event.h"
+
+static int input_fd;
+
+static int read_page;
+
+int file_bigendian;
+int host_bigendian;
+static int long_size;
+
+static unsigned long page_size;
+
+static int read_or_die(void *data, int size)
+{
+ int r;
+
+ r = read(input_fd, data, size);
+ if (r != size)
+ die("reading input file (size expected=%d received=%d)",
+ size, r);
+ return r;
+}
+
+static unsigned int read4(void)
+{
+ unsigned int data;
+
+ read_or_die(&data, 4);
+ return __data2host4(data);
+}
+
+static unsigned long long read8(void)
+{
+ unsigned long long data;
+
+ read_or_die(&data, 8);
+ return __data2host8(data);
+}
+
+static char *read_string(void)
+{
+ char buf[BUFSIZ];
+ char *str = NULL;
+ int size = 0;
+ int i;
+ int r;
+
+ for (;;) {
+ r = read(input_fd, buf, BUFSIZ);
+ if (r < 0)
+ die("reading input file");
+
+ if (!r)
+ die("no data");
+
+ for (i = 0; i < r; i++) {
+ if (!buf[i])
+ break;
+ }
+ if (i < r)
+ break;
+
+ if (str) {
+ size += BUFSIZ;
+ str = realloc(str, size);
+ if (!str)
+ die("malloc of size %d", size);
+ memcpy(str + (size - BUFSIZ), buf, BUFSIZ);
+ } else {
+ size = BUFSIZ;
+ str = malloc_or_die(size);
+ memcpy(str, buf, size);
+ }
+ }
+
+ /* trailing \0: */
+ i++;
+
+ /* move the file descriptor to the end of the string */
+ r = lseek(input_fd, -(r - i), SEEK_CUR);
+ if (r < 0)
+ die("lseek");
+
+ if (str) {
+ size += i;
+ str = realloc(str, size);
+ if (!str)
+ die("malloc of size %d", size);
+ memcpy(str + (size - i), buf, i);
+ } else {
+ size = i;
+ str = malloc_or_die(i);
+ memcpy(str, buf, i);
+ }
+
+ return str;
+}
+
+static void read_proc_kallsyms(void)
+{
+ unsigned int size;
+ char *buf;
+
+ size = read4();
+ if (!size)
+ return;
+
+ buf = malloc_or_die(size);
+ read_or_die(buf, size);
+
+ parse_proc_kallsyms(buf, size);
+
+ free(buf);
+}
+
+static void read_ftrace_printk(void)
+{
+ unsigned int size;
+ char *buf;
+
+ size = read4();
+ if (!size)
+ return;
+
+ buf = malloc_or_die(size);
+ read_or_die(buf, size);
+
+ parse_ftrace_printk(buf, size);
+
+ free(buf);
+}
+
+static void read_header_files(void)
+{
+ unsigned long long size;
+ char *header_page;
+ char *header_event;
+ char buf[BUFSIZ];
+
+ read_or_die(buf, 12);
+
+ if (memcmp(buf, "header_page", 12) != 0)
+ die("did not read header page");
+
+ size = read8();
+ header_page = malloc_or_die(size);
+ read_or_die(header_page, size);
+ parse_header_page(header_page, size);
+ free(header_page);
+
+ /*
+ * The size field in the page is of type long,
+ * use that instead, since it represents the kernel.
+ */
+ long_size = header_page_size_size;
+
+ read_or_die(buf, 13);
+ if (memcmp(buf, "header_event", 13) != 0)
+ die("did not read header event");
+
+ size = read8();
+ header_event = malloc_or_die(size);
+ read_or_die(header_event, size);
+ free(header_event);
+}
+
+static void read_ftrace_file(unsigned long long size)
+{
+ char *buf;
+
+ buf = malloc_or_die(size);
+ read_or_die(buf, size);
+ parse_ftrace_file(buf, size);
+ free(buf);
+}
+
+static void read_event_file(char *sys, unsigned long long size)
+{
+ char *buf;
+
+ buf = malloc_or_die(size);
+ read_or_die(buf, size);
+ parse_event_file(buf, size, sys);
+ free(buf);
+}
+
+static void read_ftrace_files(void)
+{
+ unsigned long long size;
+ int count;
+ int i;
+
+ count = read4();
+
+ for (i = 0; i < count; i++) {
+ size = read8();
+ read_ftrace_file(size);
+ }
+}
+
+static void read_event_files(void)
+{
+ unsigned long long size;
+ char *sys;
+ int systems;
+ int count;
+ int i,x;
+
+ systems = read4();
+
+ for (i = 0; i < systems; i++) {
+ sys = read_string();
+
+ count = read4();
+ for (x=0; x < count; x++) {
+ size = read8();
+ read_event_file(sys, size);
+ }
+ }
+}
+
+struct cpu_data {
+ unsigned long long offset;
+ unsigned long long size;
+ unsigned long long timestamp;
+ struct record *next;
+ char *page;
+ int cpu;
+ int index;
+ int page_size;
+};
+
+static struct cpu_data *cpu_data;
+
+static void update_cpu_data_index(int cpu)
+{
+ cpu_data[cpu].offset += page_size;
+ cpu_data[cpu].size -= page_size;
+ cpu_data[cpu].index = 0;
+}
+
+static void get_next_page(int cpu)
+{
+ off64_t save_seek;
+ off64_t ret;
+
+ if (!cpu_data[cpu].page)
+ return;
+
+ if (read_page) {
+ if (cpu_data[cpu].size <= page_size) {
+ free(cpu_data[cpu].page);
+ cpu_data[cpu].page = NULL;
+ return;
+ }
+
+ update_cpu_data_index(cpu);
+
+ /* other parts of the code may expect the pointer to not move */
+ save_seek = lseek64(input_fd, 0, SEEK_CUR);
+
+ ret = lseek64(input_fd, cpu_data[cpu].offset, SEEK_SET);
+ if (ret < 0)
+ die("failed to lseek");
+ ret = read(input_fd, cpu_data[cpu].page, page_size);
+ if (ret < 0)
+ die("failed to read page");
+
+ /* reset the file pointer back */
+ lseek64(input_fd, save_seek, SEEK_SET);
+
+ return;
+ }
+
+ munmap(cpu_data[cpu].page, page_size);
+ cpu_data[cpu].page = NULL;
+
+ if (cpu_data[cpu].size <= page_size)
+ return;
+
+ update_cpu_data_index(cpu);
+
+ cpu_data[cpu].page = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE,
+ input_fd, cpu_data[cpu].offset);
+ if (cpu_data[cpu].page == MAP_FAILED)
+ die("failed to mmap cpu %d at offset 0x%llx",
+ cpu, cpu_data[cpu].offset);
+}
+
+static unsigned int type_len4host(unsigned int type_len_ts)
+{
+ if (file_bigendian)
+ return (type_len_ts >> 27) & ((1 << 5) - 1);
+ else
+ return type_len_ts & ((1 << 5) - 1);
+}
+
+static unsigned int ts4host(unsigned int type_len_ts)
+{
+ if (file_bigendian)
+ return type_len_ts & ((1 << 27) - 1);
+ else
+ return type_len_ts >> 5;
+}
+
+static int calc_index(void *ptr, int cpu)
+{
+ return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page;
+}
+
+struct record *trace_peek_data(int cpu)
+{
+ struct record *data;
+ void *page = cpu_data[cpu].page;
+ int idx = cpu_data[cpu].index;
+ void *ptr = page + idx;
+ unsigned long long extend;
+ unsigned int type_len_ts;
+ unsigned int type_len;
+ unsigned int delta;
+ unsigned int length = 0;
+
+ if (cpu_data[cpu].next)
+ return cpu_data[cpu].next;
+
+ if (!page)
+ return NULL;
+
+ if (!idx) {
+ /* FIXME: handle header page */
+ if (header_page_ts_size != 8)
+ die("expected a long long type for timestamp");
+ cpu_data[cpu].timestamp = data2host8(ptr);
+ ptr += 8;
+ switch (header_page_size_size) {
+ case 4:
+ cpu_data[cpu].page_size = data2host4(ptr);
+ ptr += 4;
+ break;
+ case 8:
+ cpu_data[cpu].page_size = data2host8(ptr);
+ ptr += 8;
+ break;
+ default:
+ die("bad long size");
+ }
+ ptr = cpu_data[cpu].page + header_page_data_offset;
+ }
+
+read_again:
+ idx = calc_index(ptr, cpu);
+
+ if (idx >= cpu_data[cpu].page_size) {
+ get_next_page(cpu);
+ return trace_peek_data(cpu);
+ }
+
+ type_len_ts = data2host4(ptr);
+ ptr += 4;
+
+ type_len = type_len4host(type_len_ts);
+ delta = ts4host(type_len_ts);
+
+ switch (type_len) {
+ case RINGBUF_TYPE_PADDING:
+ if (!delta)
+ die("error, hit unexpected end of page");
+ length = data2host4(ptr);
+ ptr += 4;
+ length *= 4;
+ ptr += length;
+ goto read_again;
+
+ case RINGBUF_TYPE_TIME_EXTEND:
+ extend = data2host4(ptr);
+ ptr += 4;
+ extend <<= TS_SHIFT;
+ extend += delta;
+ cpu_data[cpu].timestamp += extend;
+ goto read_again;
+
+ case RINGBUF_TYPE_TIME_STAMP:
+ ptr += 12;
+ break;
+ case 0:
+ length = data2host4(ptr);
+ ptr += 4;
+ die("here! length=%d", length);
+ break;
+ default:
+ length = type_len * 4;
+ break;
+ }
+
+ cpu_data[cpu].timestamp += delta;
+
+ data = malloc_or_die(sizeof(*data));
+ memset(data, 0, sizeof(*data));
+
+ data->ts = cpu_data[cpu].timestamp;
+ data->size = length;
+ data->data = ptr;
+ ptr += length;
+
+ cpu_data[cpu].index = calc_index(ptr, cpu);
+ cpu_data[cpu].next = data;
+
+ return data;
+}
+
+struct record *trace_read_data(int cpu)
+{
+ struct record *data;
+
+ data = trace_peek_data(cpu);
+ cpu_data[cpu].next = NULL;
+
+ return data;
+}
+
+void trace_report (void)
+{
+ const char *input_file = "trace.info";
+ char buf[BUFSIZ];
+ char test[] = { 23, 8, 68 };
+ char *version;
+ int show_funcs = 0;
+ int show_printk = 0;
+
+ input_fd = open(input_file, O_RDONLY);
+ if (input_fd < 0)
+ die("opening '%s'\n", input_file);
+
+ read_or_die(buf, 3);
+ if (memcmp(buf, test, 3) != 0)
+ die("not an trace data file");
+
+ read_or_die(buf, 7);
+ if (memcmp(buf, "tracing", 7) != 0)
+ die("not a trace file (missing tracing)");
+
+ version = read_string();
+ printf("version = %s\n", version);
+ free(version);
+
+ read_or_die(buf, 1);
+ file_bigendian = buf[0];
+ host_bigendian = bigendian();
+
+ read_or_die(buf, 1);
+ long_size = buf[0];
+
+ page_size = read4();
+
+ read_header_files();
+
+ read_ftrace_files();
+ read_event_files();
+ read_proc_kallsyms();
+ read_ftrace_printk();
+
+ if (show_funcs) {
+ print_funcs();
+ return;
+ }
+ if (show_printk) {
+ print_printk();
+ return;
+ }
+
+ return;
+}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
new file mode 100644
index 000000000000..420294a5773e
--- /dev/null
+++ b/tools/perf/util/trace-event.h
@@ -0,0 +1,240 @@
+#ifndef _TRACE_EVENTS_H
+#define _TRACE_EVENTS_H
+
+#include "parse-events.h"
+
+#define __unused __attribute__((unused))
+
+
+#ifndef PAGE_MASK
+#define PAGE_MASK (page_size - 1)
+#endif
+
+enum {
+ RINGBUF_TYPE_PADDING = 29,
+ RINGBUF_TYPE_TIME_EXTEND = 30,
+ RINGBUF_TYPE_TIME_STAMP = 31,
+};
+
+#ifndef TS_SHIFT
+#define TS_SHIFT 27
+#endif
+
+#define NSECS_PER_SEC 1000000000ULL
+#define NSECS_PER_USEC 1000ULL
+
+enum format_flags {
+ FIELD_IS_ARRAY = 1,
+ FIELD_IS_POINTER = 2,
+};
+
+struct format_field {
+ struct format_field *next;
+ char *type;
+ char *name;
+ int offset;
+ int size;
+ unsigned long flags;
+};
+
+struct format {
+ int nr_common;
+ int nr_fields;
+ struct format_field *common_fields;
+ struct format_field *fields;
+};
+
+struct print_arg_atom {
+ char *atom;
+};
+
+struct print_arg_string {
+ char *string;
+ int offset;
+};
+
+struct print_arg_field {
+ char *name;
+ struct format_field *field;
+};
+
+struct print_flag_sym {
+ struct print_flag_sym *next;
+ char *value;
+ char *str;
+};
+
+struct print_arg_typecast {
+ char *type;
+ struct print_arg *item;
+};
+
+struct print_arg_flags {
+ struct print_arg *field;
+ char *delim;
+ struct print_flag_sym *flags;
+};
+
+struct print_arg_symbol {
+ struct print_arg *field;
+ struct print_flag_sym *symbols;
+};
+
+struct print_arg;
+
+struct print_arg_op {
+ char *op;
+ int prio;
+ struct print_arg *left;
+ struct print_arg *right;
+};
+
+struct print_arg_func {
+ char *name;
+ struct print_arg *args;
+};
+
+enum print_arg_type {
+ PRINT_NULL,
+ PRINT_ATOM,
+ PRINT_FIELD,
+ PRINT_FLAGS,
+ PRINT_SYMBOL,
+ PRINT_TYPE,
+ PRINT_STRING,
+ PRINT_OP,
+};
+
+struct print_arg {
+ struct print_arg *next;
+ enum print_arg_type type;
+ union {
+ struct print_arg_atom atom;
+ struct print_arg_field field;
+ struct print_arg_typecast typecast;
+ struct print_arg_flags flags;
+ struct print_arg_symbol symbol;
+ struct print_arg_func func;
+ struct print_arg_string string;
+ struct print_arg_op op;
+ };
+};
+
+struct print_fmt {
+ char *format;
+ struct print_arg *args;
+};
+
+struct event {
+ struct event *next;
+ char *name;
+ int id;
+ int flags;
+ struct format format;
+ struct print_fmt print_fmt;
+};
+
+enum {
+ EVENT_FL_ISFTRACE = 1,
+ EVENT_FL_ISPRINT = 2,
+ EVENT_FL_ISBPRINT = 4,
+ EVENT_FL_ISFUNC = 8,
+ EVENT_FL_ISFUNCENT = 16,
+ EVENT_FL_ISFUNCRET = 32,
+};
+
+struct record {
+ unsigned long long ts;
+ int size;
+ void *data;
+};
+
+struct record *trace_peek_data(int cpu);
+struct record *trace_read_data(int cpu);
+
+void parse_set_info(int nr_cpus, int long_sz);
+
+void trace_report(void);
+
+void *malloc_or_die(unsigned int size);
+
+void parse_cmdlines(char *file, int size);
+void parse_proc_kallsyms(char *file, unsigned int size);
+void parse_ftrace_printk(char *file, unsigned int size);
+
+void print_funcs(void);
+void print_printk(void);
+
+int parse_ftrace_file(char *buf, unsigned long size);
+int parse_event_file(char *buf, unsigned long size, char *system);
+void print_event(int cpu, void *data, int size, unsigned long long nsecs,
+ char *comm);
+
+extern int file_bigendian;
+extern int host_bigendian;
+
+int bigendian(void);
+
+static inline unsigned short __data2host2(unsigned short data)
+{
+ unsigned short swap;
+
+ if (host_bigendian == file_bigendian)
+ return data;
+
+ swap = ((data & 0xffULL) << 8) |
+ ((data & (0xffULL << 8)) >> 8);
+
+ return swap;
+}
+
+static inline unsigned int __data2host4(unsigned int data)
+{
+ unsigned int swap;
+
+ if (host_bigendian == file_bigendian)
+ return data;
+
+ swap = ((data & 0xffULL) << 24) |
+ ((data & (0xffULL << 8)) << 8) |
+ ((data & (0xffULL << 16)) >> 8) |
+ ((data & (0xffULL << 24)) >> 24);
+
+ return swap;
+}
+
+static inline unsigned long long __data2host8(unsigned long long data)
+{
+ unsigned long long swap;
+
+ if (host_bigendian == file_bigendian)
+ return data;
+
+ swap = ((data & 0xffULL) << 56) |
+ ((data & (0xffULL << 8)) << 40) |
+ ((data & (0xffULL << 16)) << 24) |
+ ((data & (0xffULL << 24)) << 8) |
+ ((data & (0xffULL << 32)) >> 8) |
+ ((data & (0xffULL << 40)) >> 24) |
+ ((data & (0xffULL << 48)) >> 40) |
+ ((data & (0xffULL << 56)) >> 56);
+
+ return swap;
+}
+
+#define data2host2(ptr) __data2host2(*(unsigned short *)ptr)
+#define data2host4(ptr) __data2host4(*(unsigned int *)ptr)
+#define data2host8(ptr) __data2host8(*(unsigned long long *)ptr)
+
+extern int header_page_ts_offset;
+extern int header_page_ts_size;
+extern int header_page_size_offset;
+extern int header_page_size_size;
+extern int header_page_data_offset;
+extern int header_page_data_size;
+
+int parse_header_page(char *buf, unsigned long size);
+
+void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters);
+
+#endif /* _TRACE_EVENTS_H */
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 68fe157d72fb..9de2329dd44d 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -39,10 +39,6 @@
/* Approximation of the length of the decimal representation of this type. */
#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1)
-#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__USLC__) && !defined(_M_UNIX)
-#define _XOPEN_SOURCE 600 /* glibc2 and AIX 5.3L need 500, OpenBSD needs 600 for S_ISLNK() */
-#define _XOPEN_SOURCE_EXTENDED 1 /* AIX 5.3L needs this */
-#endif
#define _ALL_SOURCE 1
#define _GNU_SOURCE 1
#define _BSD_SOURCE 1
@@ -83,6 +79,7 @@
#include <inttypes.h>
#include "../../../include/linux/magic.h"
+
#ifndef NO_ICONV
#include <iconv.h>
#endif
@@ -310,6 +307,7 @@ static inline int has_extension(const char *filename, const char *ext)
#undef isspace
#undef isdigit
#undef isalpha
+#undef isprint
#undef isalnum
#undef tolower
#undef toupper
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
new file mode 100644
index 000000000000..1c15e39f99e3
--- /dev/null
+++ b/tools/perf/util/values.c
@@ -0,0 +1,230 @@
+#include <stdlib.h>
+
+#include "util.h"
+#include "values.h"
+
+void perf_read_values_init(struct perf_read_values *values)
+{
+ values->threads_max = 16;
+ values->pid = malloc(values->threads_max * sizeof(*values->pid));
+ values->tid = malloc(values->threads_max * sizeof(*values->tid));
+ values->value = malloc(values->threads_max * sizeof(*values->value));
+ if (!values->pid || !values->tid || !values->value)
+ die("failed to allocate read_values threads arrays");
+ values->threads = 0;
+
+ values->counters_max = 16;
+ values->counterrawid = malloc(values->counters_max
+ * sizeof(*values->counterrawid));
+ values->countername = malloc(values->counters_max
+ * sizeof(*values->countername));
+ if (!values->counterrawid || !values->countername)
+ die("failed to allocate read_values counters arrays");
+ values->counters = 0;
+}
+
+void perf_read_values_destroy(struct perf_read_values *values)
+{
+ int i;
+
+ if (!values->threads_max || !values->counters_max)
+ return;
+
+ for (i = 0; i < values->threads; i++)
+ free(values->value[i]);
+ free(values->pid);
+ free(values->tid);
+ free(values->counterrawid);
+ for (i = 0; i < values->counters; i++)
+ free(values->countername[i]);
+ free(values->countername);
+}
+
+static void perf_read_values__enlarge_threads(struct perf_read_values *values)
+{
+ values->threads_max *= 2;
+ values->pid = realloc(values->pid,
+ values->threads_max * sizeof(*values->pid));
+ values->tid = realloc(values->tid,
+ values->threads_max * sizeof(*values->tid));
+ values->value = realloc(values->value,
+ values->threads_max * sizeof(*values->value));
+ if (!values->pid || !values->tid || !values->value)
+ die("failed to enlarge read_values threads arrays");
+}
+
+static int perf_read_values__findnew_thread(struct perf_read_values *values,
+ u32 pid, u32 tid)
+{
+ int i;
+
+ for (i = 0; i < values->threads; i++)
+ if (values->pid[i] == pid && values->tid[i] == tid)
+ return i;
+
+ if (values->threads == values->threads_max)
+ perf_read_values__enlarge_threads(values);
+
+ i = values->threads++;
+ values->pid[i] = pid;
+ values->tid[i] = tid;
+ values->value[i] = malloc(values->counters_max * sizeof(**values->value));
+ if (!values->value[i])
+ die("failed to allocate read_values counters array");
+
+ return i;
+}
+
+static void perf_read_values__enlarge_counters(struct perf_read_values *values)
+{
+ int i;
+
+ values->counters_max *= 2;
+ values->counterrawid = realloc(values->counterrawid,
+ values->counters_max * sizeof(*values->counterrawid));
+ values->countername = realloc(values->countername,
+ values->counters_max * sizeof(*values->countername));
+ if (!values->counterrawid || !values->countername)
+ die("failed to enlarge read_values counters arrays");
+
+ for (i = 0; i < values->threads; i++) {
+ values->value[i] = realloc(values->value[i],
+ values->counters_max * sizeof(**values->value));
+ if (!values->value[i])
+ die("failed to enlarge read_values counters arrays");
+ }
+}
+
+static int perf_read_values__findnew_counter(struct perf_read_values *values,
+ u64 rawid, const char *name)
+{
+ int i;
+
+ for (i = 0; i < values->counters; i++)
+ if (values->counterrawid[i] == rawid)
+ return i;
+
+ if (values->counters == values->counters_max)
+ perf_read_values__enlarge_counters(values);
+
+ i = values->counters++;
+ values->counterrawid[i] = rawid;
+ values->countername[i] = strdup(name);
+
+ return i;
+}
+
+void perf_read_values_add_value(struct perf_read_values *values,
+ u32 pid, u32 tid,
+ u64 rawid, const char *name, u64 value)
+{
+ int tindex, cindex;
+
+ tindex = perf_read_values__findnew_thread(values, pid, tid);
+ cindex = perf_read_values__findnew_counter(values, rawid, name);
+
+ values->value[tindex][cindex] = value;
+}
+
+static void perf_read_values__display_pretty(FILE *fp,
+ struct perf_read_values *values)
+{
+ int i, j;
+ int pidwidth, tidwidth;
+ int *counterwidth;
+
+ counterwidth = malloc(values->counters * sizeof(*counterwidth));
+ if (!counterwidth)
+ die("failed to allocate counterwidth array");
+ tidwidth = 3;
+ pidwidth = 3;
+ for (j = 0; j < values->counters; j++)
+ counterwidth[j] = strlen(values->countername[j]);
+ for (i = 0; i < values->threads; i++) {
+ int width;
+
+ width = snprintf(NULL, 0, "%d", values->pid[i]);
+ if (width > pidwidth)
+ pidwidth = width;
+ width = snprintf(NULL, 0, "%d", values->tid[i]);
+ if (width > tidwidth)
+ tidwidth = width;
+ for (j = 0; j < values->counters; j++) {
+ width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
+ if (width > counterwidth[j])
+ counterwidth[j] = width;
+ }
+ }
+
+ fprintf(fp, "# %*s %*s", pidwidth, "PID", tidwidth, "TID");
+ for (j = 0; j < values->counters; j++)
+ fprintf(fp, " %*s", counterwidth[j], values->countername[j]);
+ fprintf(fp, "\n");
+
+ for (i = 0; i < values->threads; i++) {
+ fprintf(fp, " %*d %*d", pidwidth, values->pid[i],
+ tidwidth, values->tid[i]);
+ for (j = 0; j < values->counters; j++)
+ fprintf(fp, " %*Lu",
+ counterwidth[j], values->value[i][j]);
+ fprintf(fp, "\n");
+ }
+}
+
+static void perf_read_values__display_raw(FILE *fp,
+ struct perf_read_values *values)
+{
+ int width, pidwidth, tidwidth, namewidth, rawwidth, countwidth;
+ int i, j;
+
+ tidwidth = 3; /* TID */
+ pidwidth = 3; /* PID */
+ namewidth = 4; /* "Name" */
+ rawwidth = 3; /* "Raw" */
+ countwidth = 5; /* "Count" */
+
+ for (i = 0; i < values->threads; i++) {
+ width = snprintf(NULL, 0, "%d", values->pid[i]);
+ if (width > pidwidth)
+ pidwidth = width;
+ width = snprintf(NULL, 0, "%d", values->tid[i]);
+ if (width > tidwidth)
+ tidwidth = width;
+ }
+ for (j = 0; j < values->counters; j++) {
+ width = strlen(values->countername[j]);
+ if (width > namewidth)
+ namewidth = width;
+ width = snprintf(NULL, 0, "%llx", values->counterrawid[j]);
+ if (width > rawwidth)
+ rawwidth = width;
+ }
+ for (i = 0; i < values->threads; i++) {
+ for (j = 0; j < values->counters; j++) {
+ width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
+ if (width > countwidth)
+ countwidth = width;
+ }
+ }
+
+ fprintf(fp, "# %*s %*s %*s %*s %*s\n",
+ pidwidth, "PID", tidwidth, "TID",
+ namewidth, "Name", rawwidth, "Raw",
+ countwidth, "Count");
+ for (i = 0; i < values->threads; i++)
+ for (j = 0; j < values->counters; j++)
+ fprintf(fp, " %*d %*d %*s %*llx %*Lu\n",
+ pidwidth, values->pid[i],
+ tidwidth, values->tid[i],
+ namewidth, values->countername[j],
+ rawwidth, values->counterrawid[j],
+ countwidth, values->value[i][j]);
+}
+
+void perf_read_values_display(FILE *fp, struct perf_read_values *values, int raw)
+{
+ if (raw)
+ perf_read_values__display_raw(fp, values);
+ else
+ perf_read_values__display_pretty(fp, values);
+}
diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h
new file mode 100644
index 000000000000..cadf8cf2a590
--- /dev/null
+++ b/tools/perf/util/values.h
@@ -0,0 +1,27 @@
+#ifndef _PERF_VALUES_H
+#define _PERF_VALUES_H
+
+#include "types.h"
+
+struct perf_read_values {
+ int threads;
+ int threads_max;
+ u32 *pid, *tid;
+ int counters;
+ int counters_max;
+ u64 *counterrawid;
+ char **countername;
+ u64 **value;
+};
+
+void perf_read_values_init(struct perf_read_values *values);
+void perf_read_values_destroy(struct perf_read_values *values);
+
+void perf_read_values_add_value(struct perf_read_values *values,
+ u32 pid, u32 tid,
+ u64 rawid, const char *name, u64 value);
+
+void perf_read_values_display(FILE *fp, struct perf_read_values *values,
+ int raw);
+
+#endif /* _PERF_VALUES_H */