diff options
153 files changed, 1421 insertions, 824 deletions
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 98838a05ba6d..9d0ac091a52a 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -156,6 +156,8 @@ retry:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 6f7e3a68803a..563cb27e37f5 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -161,6 +161,8 @@ good_area:  	if (fault & VM_FAULT_OOM)  		goto out_of_memory; +	else if (fault & VM_FAULT_SIGSEGV) +		goto bad_area;  	else if (fault & VM_FAULT_SIGBUS)  		goto do_sigbus; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts index 8c1febd7e3f2..c108bb451337 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dts +++ b/arch/arm/boot/dts/imx6sx-sdb.dts @@ -166,12 +166,12 @@  		#address-cells = <1>;  		#size-cells = <0>; -		ethphy1: ethernet-phy@0 { -			reg = <0>; +		ethphy1: ethernet-phy@1 { +			reg = <1>;  		}; -		ethphy2: ethernet-phy@1 { -			reg = <1>; +		ethphy2: ethernet-phy@2 { +			reg = <2>;  		};  	};  }; diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 0eca93327195..d223a8b57c1e 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -142,6 +142,8 @@ good_area:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 1790f22e71a2..2686a7aa8ec8 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -176,6 +176,8 @@ retry:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 9a66372fc7c7..ec4917ddf678 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 7225dad87094..ba5ba7accd0d 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -172,6 +172,8 @@ retry:  		 */  		if (fault & VM_FAULT_OOM) {  			goto out_of_memory; +		} else if (fault & VM_FAULT_SIGSEGV) { +			goto bad_area;  		} else if (fault & VM_FAULT_SIGBUS) {  			signal = SIGBUS;  			goto bad_area; diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index e9c6a8014bd6..e3d4d4890104 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -200,6 +200,8 @@ good_area:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 2bd7487440c4..b2f04aee46ec 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -145,6 +145,8 @@ good_area:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto map_err;  		else if (fault & VM_FAULT_SIGBUS)  			goto bus_err;  		BUG(); diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c index 332680e5ebf2..2de5dc695a87 100644 --- a/arch/metag/mm/fault.c +++ b/arch/metag/mm/fault.c @@ -141,6 +141,8 @@ good_area:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index fa4cf52aa7a6..d46a5ebb7570 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -224,6 +224,8 @@ good_area:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index becc42bb1849..70ab5d664332 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -158,6 +158,8 @@ good_area:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 3516cbdf1ee9..0c2cc5d39c8e 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c @@ -262,6 +262,8 @@ good_area:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 15a0bb5fc06d..34429d5a0ccd 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -135,6 +135,8 @@ survive:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 0703acf7d327..230ac20ae794 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -171,6 +171,8 @@ good_area:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 3ca9c1131cfe..e5120e653240 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -256,6 +256,8 @@ good_area:  		 */  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto bad_area;  		BUG(); diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 5a236f082c78..1b5305d4bdab 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,  		if (*flt & VM_FAULT_OOM) {  			ret = -ENOMEM;  			goto out_unlock; -		} else if (*flt & VM_FAULT_SIGBUS) { +		} else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {  			ret = -EFAULT;  			goto out_unlock;  		} diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index eb79907f34fa..6154b0a2b063 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -437,6 +437,8 @@ good_area:  	 */  	fault = handle_mm_fault(mm, vma, address, flags);  	if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { +		if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		rc = mm_fault_error(regs, address, fault);  		if (rc >= MM_FAULT_RETURN)  			goto bail; diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index b700a329c31d..d2de7d5d7574 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -304,7 +304,7 @@ int pnv_save_sprs_for_winkle(void)  	 * all cpus at boot. Get these reg values of current cpu and use the  	 * same accross all cpus.  	 */ -	uint64_t lpcr_val = mfspr(SPRN_LPCR); +	uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;  	uint64_t hid0_val = mfspr(SPRN_HID0);  	uint64_t hid1_val = mfspr(SPRN_HID1);  	uint64_t hid4_val = mfspr(SPRN_HID4); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 5b150f0c5df9..13c6e200b24e 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -337,6 +337,7 @@ static inline void disable_surveillance(void)  	args.token = rtas_token("set-indicator");  	if (args.token == RTAS_UNKNOWN_SERVICE)  		return; +	args.token = cpu_to_be32(args.token);  	args.nargs = cpu_to_be32(3);  	args.nret = cpu_to_be32(1);  	args.rets = &args.args[3]; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 811937bb90be..9065d5aa3932 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)  				do_no_context(regs);  			else  				pagefault_out_of_memory(); +		} else if (fault & VM_FAULT_SIGSEGV) { +			/* Kernel mode? Handle exceptions or die */ +			if (!user_mode(regs)) +				do_no_context(regs); +			else +				do_sigsegv(regs, SEGV_MAPERR);  		} else if (fault & VM_FAULT_SIGBUS) {  			/* Kernel mode? Handle exceptions or die */  			if (!user_mode(regs)) diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c index 52238983527d..6860beb2a280 100644 --- a/arch/score/mm/fault.c +++ b/arch/score/mm/fault.c @@ -114,6 +114,8 @@ good_area:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 541dc6101508..a58fec9b55e0 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,  	} else {  		if (fault & VM_FAULT_SIGBUS)  			do_sigbus(regs, error_code, address); +		else if (fault & VM_FAULT_SIGSEGV) +			bad_area(regs, error_code, address);  		else  			BUG();  	} diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 908e8c17c902..70d817154fe8 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -249,6 +249,8 @@ good_area:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 18fcd7167095..479823249429 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -446,6 +446,8 @@ good_area:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 565e25a98334..0f61a73534e6 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -442,6 +442,8 @@ good_area:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 5678c3571e7c..209617302df8 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -80,6 +80,8 @@ good_area:  		if (unlikely(fault & VM_FAULT_ERROR)) {  			if (fault & VM_FAULT_OOM) {  				goto out_of_memory; +			} else if (fault & VM_FAULT_SIGSEGV) { +				goto out;  			} else if (fault & VM_FAULT_SIGBUS) {  				err = -EACCES;  				goto out; diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index d999398928bc..ad754b4411f7 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -90,7 +90,7 @@ suffix-$(CONFIG_KERNEL_LZO) 	:= lzo  suffix-$(CONFIG_KERNEL_LZ4) 	:= lz4  RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \ -	     perl $(srctree)/arch/x86/tools/calc_run_size.pl) +	     $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh)  quiet_cmd_mkpiggy = MKPIGGY $@        cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 38dcec403b46..e3ff27a5b634 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,  		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|  			     VM_FAULT_HWPOISON_LARGE))  			do_sigbus(regs, error_code, address, fault); +		else if (fault & VM_FAULT_SIGSEGV) +			bad_area_nosemaphore(regs, error_code, address);  		else  			BUG();  	} diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl deleted file mode 100644 index 23210baade2d..000000000000 --- a/arch/x86/tools/calc_run_size.pl +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/perl -# -# Calculate the amount of space needed to run the kernel, including room for -# the .bss and .brk sections. -# -# Usage: -# objdump -h a.out | perl calc_run_size.pl -use strict; - -my $mem_size = 0; -my $file_offset = 0; - -my $sections=" *[0-9]+ \.(?:bss|brk) +"; -while (<>) { -	if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) { -		my $size = hex($1); -		my $offset = hex($2); -		$mem_size += $size; -		if ($file_offset == 0) { -			$file_offset = $offset; -		} elsif ($file_offset != $offset) { -			# BFD linker shows the same file offset in ELF. -			# Gold linker shows them as consecutive. -			next if ($file_offset + $mem_size == $offset + $size); - -			printf STDERR "file_offset: 0x%lx\n", $file_offset; -			printf STDERR "mem_size: 0x%lx\n", $mem_size; -			printf STDERR "offset: 0x%lx\n", $offset; -			printf STDERR "size: 0x%lx\n", $size; - -			die ".bss and .brk are non-contiguous\n"; -		} -	} -} - -if ($file_offset == 0) { -	die "Never found .bss or .brk file offset\n"; -} -printf("%d\n", $mem_size + $file_offset); diff --git a/arch/x86/tools/calc_run_size.sh b/arch/x86/tools/calc_run_size.sh new file mode 100644 index 000000000000..1a4c17bb3910 --- /dev/null +++ b/arch/x86/tools/calc_run_size.sh @@ -0,0 +1,42 @@ +#!/bin/sh +# +# Calculate the amount of space needed to run the kernel, including room for +# the .bss and .brk sections. +# +# Usage: +# objdump -h a.out | sh calc_run_size.sh + +NUM='\([0-9a-fA-F]*[ \t]*\)' +OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p') +if [ -z "$OUT" ] ; then +	echo "Never found .bss or .brk file offset" >&2 +	exit 1 +fi + +OUT=$(echo ${OUT# }) +sizeA=$(printf "%d" 0x${OUT%% *}) +OUT=${OUT#* } +offsetA=$(printf "%d" 0x${OUT%% *}) +OUT=${OUT#* } +sizeB=$(printf "%d" 0x${OUT%% *}) +OUT=${OUT#* } +offsetB=$(printf "%d" 0x${OUT%% *}) + +run_size=$(( $offsetA + $sizeA + $sizeB )) + +# BFD linker shows the same file offset in ELF. +if [ "$offsetA" -ne "$offsetB" ] ; then +	# Gold linker shows them as consecutive. +	endB=$(( $offsetB + $sizeB )) +	if [ "$endB" != "$run_size" ] ; then +		printf "sizeA: 0x%x\n" $sizeA >&2 +		printf "offsetA: 0x%x\n" $offsetA >&2 +		printf "sizeB: 0x%x\n" $sizeB >&2 +		printf "offsetB: 0x%x\n" $offsetB >&2 +		echo ".bss and .brk are non-contiguous" >&2 +		exit 1 +	fi +fi + +printf "%d\n" $run_size +exit 0 diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index b57c4f91f487..9e3571a6535c 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -117,6 +117,8 @@ good_area:  	if (unlikely(fault & VM_FAULT_ERROR)) {  		if (fault & VM_FAULT_OOM)  			goto out_of_memory; +		else if (fault & VM_FAULT_SIGSEGV) +			goto bad_area;  		else if (fault & VM_FAULT_SIGBUS)  			goto do_sigbus;  		BUG(); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3ec85dfce124..8a86b62466f7 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)   * If an image has a non-zero parent overlap, get a reference to its   * parent.   * - * We must get the reference before checking for the overlap to - * coordinate properly with zeroing the parent overlap in - * rbd_dev_v2_parent_info() when an image gets flattened.  We - * drop it again if there is no overlap. - *   * Returns true if the rbd device has a parent with a non-zero   * overlap and a reference for it was successfully taken, or   * false otherwise.   */  static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)  { -	int counter; +	int counter = 0;  	if (!rbd_dev->parent_spec)  		return false; -	counter = atomic_inc_return_safe(&rbd_dev->parent_ref); -	if (counter > 0 && rbd_dev->parent_overlap) -		return true; - -	/* Image was flattened, but parent is not yet torn down */ +	down_read(&rbd_dev->header_rwsem); +	if (rbd_dev->parent_overlap) +		counter = atomic_inc_return_safe(&rbd_dev->parent_ref); +	up_read(&rbd_dev->header_rwsem);  	if (counter < 0)  		rbd_warn(rbd_dev, "parent reference overflow"); -	return false; +	return counter > 0;  }  /* @@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)  		 */  		if (rbd_dev->parent_overlap) {  			rbd_dev->parent_overlap = 0; -			smp_mb();  			rbd_dev_parent_put(rbd_dev);  			pr_info("%s: clone image has been flattened\n",  				rbd_dev->disk->disk_name); @@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)  	 * treat it specially.  	 */  	rbd_dev->parent_overlap = overlap; -	smp_mb();  	if (!overlap) {  		/* A null parent_spec indicates it's the initial probe */ @@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)  {  	struct rbd_image_header	*header; -	/* Drop parent reference unless it's already been done (or none) */ - -	if (rbd_dev->parent_overlap) -		rbd_dev_parent_put(rbd_dev); +	rbd_dev_parent_put(rbd_dev);  	/* Free dynamic fields from the header, then zero it out */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 633532a2e7ec..25bc47f3c1cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -26,6 +26,7 @@  #include <linux/slab.h>  #include "kfd_priv.h"  #include "kfd_device_queue_manager.h" +#include "kfd_pm4_headers.h"  #define MQD_SIZE_ALIGNED 768 @@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,  	kfd->shared_resources = *gpu_resources;  	/* calculate max size of mqds needed for queues */ -	size = max_num_of_processes * -		max_num_of_queues_per_process * -		kfd->device_info->mqd_size_aligned; +	size = max_num_of_queues_per_device * +			kfd->device_info->mqd_size_aligned;  	/* add another 512KB for all other allocations on gart */  	size += 512 * 1024; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 30c8fda9622e..0d8694f015c1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,  	mutex_lock(&dqm->lock); +	if (dqm->total_queue_count >= max_num_of_queues_per_device) { +		pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", +				dqm->total_queue_count); +		mutex_unlock(&dqm->lock); +		return -EPERM; +	} +  	if (list_empty(&qpd->queues_list)) {  		retval = allocate_vmid(dqm, qpd, q);  		if (retval != 0) { @@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,  	list_add(&q->list, &qpd->queues_list);  	dqm->queue_count++; +	/* +	 * Unconditionally increment this counter, regardless of the queue's +	 * type or whether the queue is active. +	 */ +	dqm->total_queue_count++; +	pr_debug("Total of %d queues are accountable so far\n", +			dqm->total_queue_count); +  	mutex_unlock(&dqm->lock);  	return 0;  } @@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,  	if (list_empty(&qpd->queues_list))  		deallocate_vmid(dqm, qpd, q);  	dqm->queue_count--; + +	/* +	 * Unconditionally decrement this counter, regardless of the queue's +	 * type +	 */ +	dqm->total_queue_count--; +	pr_debug("Total of %d queues are accountable so far\n", +			dqm->total_queue_count); +  out:  	mutex_unlock(&dqm->lock);  	return retval; @@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm,  	for (i = 0; i < pipes_num; i++) {  		inx = i + first_pipe; +		/* +		 * HPD buffer on GTT is allocated by amdkfd, no need to waste +		 * space in GTT for pipelines we don't initialize +		 */  		pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;  		pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);  		/* = log2(bytes/4)-1 */ -		kfd2kgd->init_pipeline(dqm->dev->kgd, i, +		kfd2kgd->init_pipeline(dqm->dev->kgd, inx,  				CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);  	} @@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm)  	pr_debug("kfd: In %s\n", __func__); -	retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); +	retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));  	if (retval != 0)  		return retval; @@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,  	pr_debug("kfd: In func %s\n", __func__);  	mutex_lock(&dqm->lock); +	if (dqm->total_queue_count >= max_num_of_queues_per_device) { +		pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n", +				dqm->total_queue_count); +		mutex_unlock(&dqm->lock); +		return -EPERM; +	} + +	/* +	 * Unconditionally increment this counter, regardless of the queue's +	 * type or whether the queue is active. +	 */ +	dqm->total_queue_count++; +	pr_debug("Total of %d queues are accountable so far\n", +			dqm->total_queue_count); +  	list_add(&kq->list, &qpd->priv_queue_list);  	dqm->queue_count++;  	qpd->is_debug = true; @@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,  	dqm->queue_count--;  	qpd->is_debug = false;  	execute_queues_cpsch(dqm, false); +	/* +	 * Unconditionally decrement this counter, regardless of the queue's +	 * type. +	 */ +	dqm->total_queue_count++; +	pr_debug("Total of %d queues are accountable so far\n", +			dqm->total_queue_count);  	mutex_unlock(&dqm->lock);  } @@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,  	mutex_lock(&dqm->lock); +	if (dqm->total_queue_count >= max_num_of_queues_per_device) { +		pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", +				dqm->total_queue_count); +		retval = -EPERM; +		goto out; +	} +  	mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);  	if (mqd == NULL) {  		mutex_unlock(&dqm->lock); @@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,  		retval = execute_queues_cpsch(dqm, false);  	} +	/* +	 * Unconditionally increment this counter, regardless of the queue's +	 * type or whether the queue is active. +	 */ +	dqm->total_queue_count++; + +	pr_debug("Total of %d queues are accountable so far\n", +			dqm->total_queue_count); +  out:  	mutex_unlock(&dqm->lock);  	return retval; @@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,  	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); +	/* +	 * Unconditionally decrement this counter, regardless of the queue's +	 * type +	 */ +	dqm->total_queue_count--; +	pr_debug("Total of %d queues are accountable so far\n", +			dqm->total_queue_count); +  	mutex_unlock(&dqm->lock);  	return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index c3f189e8ae35..52035bf0c1cb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -130,6 +130,7 @@ struct device_queue_manager {  	struct list_head	queues;  	unsigned int		processes_count;  	unsigned int		queue_count; +	unsigned int		total_queue_count;  	unsigned int		next_pipe_to_allocate;  	unsigned int		*allocated_queues;  	unsigned int		vmid_bitmap; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 95d5af138e6e..a8be6df85347 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);  MODULE_PARM_DESC(sched_policy,  	"Kernel cmdline parameter that defines the amdkfd scheduling policy"); -int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; -module_param(max_num_of_processes, int, 0444); -MODULE_PARM_DESC(max_num_of_processes, -	"Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); - -int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT; -module_param(max_num_of_queues_per_process, int, 0444); -MODULE_PARM_DESC(max_num_of_queues_per_process, -	"Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process"); +int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; +module_param(max_num_of_queues_per_device, int, 0444); +MODULE_PARM_DESC(max_num_of_queues_per_device, +	"Maximum number of supported queues per device (1 = Minimum, 4096 = default)");  bool kgd2kfd_init(unsigned interface_version,  		  const struct kfd2kgd_calls *f2g, @@ -100,16 +95,10 @@ static int __init kfd_module_init(void)  	}  	/* Verify module parameters */ -	if ((max_num_of_processes < 0) || -		(max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) { -		pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n"); -		return -1; -	} - -	if ((max_num_of_queues_per_process < 0) || -		(max_num_of_queues_per_process > -			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) { -		pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n"); +	if ((max_num_of_queues_per_device < 0) || +		(max_num_of_queues_per_device > +			KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { +		pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");  		return -1;  	} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c index 4c25ef504f79..6cfe7f1f18cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c @@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex);  int kfd_pasid_init(void)  { -	pasid_limit = max_num_of_processes; +	pasid_limit = KFD_MAX_NUM_OF_PROCESSES;  	pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);  	if (!pasid_bitmap) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index b3dc13c83169..96dc10e8904a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -52,20 +52,19 @@  #define kfd_alloc_struct(ptr_to_struct)	\  	((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) -/* Kernel module parameter to specify maximum number of supported processes */ -extern int max_num_of_processes; - -#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32  #define KFD_MAX_NUM_OF_PROCESSES 512 +#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024  /* - * Kernel module parameter to specify maximum number of supported queues - * per process + * Kernel module parameter to specify maximum number of supported queues per + * device   */ -extern int max_num_of_queues_per_process; +extern int max_num_of_queues_per_device; -#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 -#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 +#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 +#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE		\ +	(KFD_MAX_NUM_OF_PROCESSES *			\ +			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)  #define KFD_KERNEL_QUEUE_SIZE 2048 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 47526780d736..f37cf5efe642 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,  	pr_debug("kfd: in %s\n", __func__);  	found = find_first_zero_bit(pqm->queue_slot_bitmap, -			max_num_of_queues_per_process); +			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);  	pr_debug("kfd: the new slot id %lu\n", found); -	if (found >= max_num_of_queues_per_process) { +	if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {  		pr_info("amdkfd: Can not open more queues for process with pasid %d\n",  				pqm->process->pasid);  		return -ENOMEM; @@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)  	INIT_LIST_HEAD(&pqm->queues);  	pqm->queue_slot_bitmap = -			kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process, +			kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,  					BITS_PER_BYTE), GFP_KERNEL);  	if (pqm->queue_slot_bitmap == NULL)  		return -ENOMEM; @@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,  		pqn->kq = NULL;  		retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,  						&q->properties.vmid); +		pr_debug("DQM returned %d for create_queue\n", retval);  		print_queue(q);  		break;  	case KFD_QUEUE_TYPE_DIQ: @@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,  	}  	if (retval != 0) { -		pr_err("kfd: error dqm create queue\n"); +		pr_debug("Error dqm create queue\n");  		goto err_create_queue;  	} @@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,  err_create_queue:  	kfree(pqn);  err_allocate_pqn: +	/* check if queues list is empty unregister process from device */  	clear_bit(*qid, pqm->queue_slot_bitmap); +	if (list_empty(&pqm->queues)) +		dev->dqm->unregister_process(dev->dqm, &pdd->qpd);  	return retval;  } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index cf775a4449c1..dc386ebe5193 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_  }  EXPORT_SYMBOL(drm_fb_helper_add_one_connector); +static void remove_from_modeset(struct drm_mode_set *set, +		struct drm_connector *connector) +{ +	int i, j; + +	for (i = 0; i < set->num_connectors; i++) { +		if (set->connectors[i] == connector) +			break; +	} + +	if (i == set->num_connectors) +		return; + +	for (j = i + 1; j < set->num_connectors; j++) { +		set->connectors[j - 1] = set->connectors[j]; +	} +	set->num_connectors--; + +	/* because i915 is pissy about this.. +	 * TODO maybe need to makes sure we set it back to !=NULL somewhere? +	 */ +	if (set->num_connectors == 0) +		set->fb = NULL; +} +  int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,  				       struct drm_connector *connector)  { @@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,  	}  	fb_helper->connector_count--;  	kfree(fb_helper_connector); + +	/* also cleanup dangling references to the connector: */ +	for (i = 0; i < fb_helper->crtc_count; i++) +		remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector); +  	return 0;  }  EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index d4762799351d..a9041d1a8ff0 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -32,6 +32,8 @@  struct tda998x_priv {  	struct i2c_client *cec;  	struct i2c_client *hdmi; +	struct mutex mutex; +	struct delayed_work dwork;  	uint16_t rev;  	uint8_t current_page;  	int dpms; @@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)  	uint8_t addr = REG2ADDR(reg);  	int ret; +	mutex_lock(&priv->mutex);  	ret = set_page(priv, reg);  	if (ret < 0) -		return ret; +		goto out;  	ret = i2c_master_send(client, &addr, sizeof(addr));  	if (ret < 0) @@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)  	if (ret < 0)  		goto fail; -	return ret; +	goto out;  fail:  	dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); +out: +	mutex_unlock(&priv->mutex);  	return ret;  } @@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)  	buf[0] = REG2ADDR(reg);  	memcpy(&buf[1], p, cnt); +	mutex_lock(&priv->mutex);  	ret = set_page(priv, reg);  	if (ret < 0) -		return; +		goto out;  	ret = i2c_master_send(client, buf, cnt + 1);  	if (ret < 0)  		dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); +out: +	mutex_unlock(&priv->mutex);  }  static int @@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)  	uint8_t buf[] = {REG2ADDR(reg), val};  	int ret; +	mutex_lock(&priv->mutex);  	ret = set_page(priv, reg);  	if (ret < 0) -		return; +		goto out;  	ret = i2c_master_send(client, buf, sizeof(buf));  	if (ret < 0)  		dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); +out: +	mutex_unlock(&priv->mutex);  }  static void @@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)  	uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};  	int ret; +	mutex_lock(&priv->mutex);  	ret = set_page(priv, reg);  	if (ret < 0) -		return; +		goto out;  	ret = i2c_master_send(client, buf, sizeof(buf));  	if (ret < 0)  		dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); +out: +	mutex_unlock(&priv->mutex);  }  static void @@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)  	reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);  } +/* handle HDMI connect/disconnect */ +static void tda998x_hpd(struct work_struct *work) +{ +	struct delayed_work *dwork = to_delayed_work(work); +	struct tda998x_priv *priv = +			container_of(dwork, struct tda998x_priv, dwork); + +	if (priv->encoder && priv->encoder->dev) +		drm_kms_helper_hotplug_event(priv->encoder->dev); +} +  /*   * only 2 interrupts may occur: screen plug/unplug and EDID read   */ @@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)  		priv->wq_edid_wait = 0;  		wake_up(&priv->wq_edid);  	} else if (cec != 0) {			/* HPD change */ -		if (priv->encoder && priv->encoder->dev) -			drm_helper_hpd_irq_event(priv->encoder->dev); +		schedule_delayed_work(&priv->dwork, HZ/10);  	}  	return IRQ_HANDLED;  } @@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)  	/* disable all IRQs and free the IRQ handler */  	cec_write(priv, REG_CEC_RXSHPDINTENA, 0);  	reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); -	if (priv->hdmi->irq) +	if (priv->hdmi->irq) {  		free_irq(priv->hdmi->irq, priv); +		cancel_delayed_work_sync(&priv->dwork); +	}  	i2c_unregister_device(priv->cec);  } @@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)  	struct device_node *np = client->dev.of_node;  	u32 video;  	int rev_lo, rev_hi, ret; +	unsigned short cec_addr;  	priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);  	priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); @@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)  	priv->current_page = 0xff;  	priv->hdmi = client; -	priv->cec = i2c_new_dummy(client->adapter, 0x34); +	/* CEC I2C address bound to TDA998x I2C addr by configuration pins */ +	cec_addr = 0x34 + (client->addr & 0x03); +	priv->cec = i2c_new_dummy(client->adapter, cec_addr);  	if (!priv->cec)  		return -ENODEV;  	priv->dpms = DRM_MODE_DPMS_OFF; +	mutex_init(&priv->mutex);	/* protect the page access */ +  	/* wake up the device: */  	cec_write(priv, REG_CEC_ENAMODS,  			CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); @@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)  	if (client->irq) {  		int irqf_trigger; -		/* init read EDID waitqueue */ +		/* init read EDID waitqueue and HDP work */  		init_waitqueue_head(&priv->wq_edid); +		INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);  		/* clear pending interrupts */  		reg_read(priv, REG_INT_FLAGS_0); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 574057cd1d09..7643300828c3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev)  			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {  				dev_priv->pch_type = PCH_LPT;  				DRM_DEBUG_KMS("Found LynxPoint PCH\n"); -				WARN_ON(!IS_HASWELL(dev)); -				WARN_ON(IS_HSW_ULT(dev)); -			} else if (IS_BROADWELL(dev)) { -				dev_priv->pch_type = PCH_LPT; -				dev_priv->pch_id = -					INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; -				DRM_DEBUG_KMS("This is Broadwell, assuming " -					      "LynxPoint LP PCH\n"); +				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); +				WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));  			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {  				dev_priv->pch_type = PCH_LPT;  				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); -				WARN_ON(!IS_HASWELL(dev)); -				WARN_ON(!IS_HSW_ULT(dev)); +				WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); +				WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));  			} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {  				dev_priv->pch_type = PCH_SPT;  				DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e9f891c432f8..9d7a7155bf02 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2159,8 +2159,7 @@ struct drm_i915_cmd_table {  #define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \  				 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)  #define IS_BDW_ULT(dev)		(IS_BROADWELL(dev) && \ -				 ((INTEL_DEVID(dev) & 0xf) == 0x2  || \ -				 (INTEL_DEVID(dev) & 0xf) == 0x6 || \ +				 ((INTEL_DEVID(dev) & 0xf) == 0x6 ||	\  				 (INTEL_DEVID(dev) & 0xf) == 0xe))  #define IS_BDW_GT3(dev)		(IS_BROADWELL(dev) && \  				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 76354d3ba925..5f614828d365 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3148,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,  		u32 size = i915_gem_obj_ggtt_size(obj);  		uint64_t val; +		/* Adjust fence size to match tiled area */ +		if (obj->tiling_mode != I915_TILING_NONE) { +			uint32_t row_size = obj->stride * +				(obj->tiling_mode == I915_TILING_Y ? 32 : 8); +			size = (size / row_size) * row_size; +		} +  		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &  				 0xfffff000) << 32;  		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; @@ -4884,25 +4891,18 @@ i915_gem_init_hw(struct drm_device *dev)  	for (i = 0; i < NUM_L3_SLICES(dev); i++)  		i915_gem_l3_remap(&dev_priv->ring[RCS], i); -	/* -	 * XXX: Contexts should only be initialized once. Doing a switch to the -	 * default context switch however is something we'd like to do after -	 * reset or thaw (the latter may not actually be necessary for HW, but -	 * goes with our code better). Context switching requires rings (for -	 * the do_switch), but before enabling PPGTT. So don't move this. -	 */ -	ret = i915_gem_context_enable(dev_priv); +	ret = i915_ppgtt_init_hw(dev);  	if (ret && ret != -EIO) { -		DRM_ERROR("Context enable failed %d\n", ret); +		DRM_ERROR("PPGTT enable failed %d\n", ret);  		i915_gem_cleanup_ringbuffer(dev); - -		return ret;  	} -	ret = i915_ppgtt_init_hw(dev); +	ret = i915_gem_context_enable(dev_priv);  	if (ret && ret != -EIO) { -		DRM_ERROR("PPGTT enable failed %d\n", ret); +		DRM_ERROR("Context enable failed %d\n", ret);  		i915_gem_cleanup_ringbuffer(dev); + +		return ret;  	}  	return ret; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 4d63839bd9b4..dfb783a8f2c3 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)  	WARN_ON(panel->backlight.max == 0); -	if (panel->backlight.level == 0) { +	if (panel->backlight.level <= panel->backlight.min) {  		panel->backlight.level = panel->backlight.max;  		if (panel->backlight.device)  			panel->backlight.device->props.brightness = diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index a0133c74f4cf..42cd0cffe210 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,  		for (; ndw > 0; ndw -= 2, --count, pe += 8) {  			if (flags & R600_PTE_SYSTEM) {  				value = radeon_vm_map_gart(rdev, addr); -				value &= 0xFFFFFFFFFFFFF000ULL;  			} else if (flags & R600_PTE_VALID) {  				value = addr;  			} else { diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index 4be2bb7cbef3..ce787a9f12c0 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c @@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,  		for (; ndw > 0; ndw -= 2, --count, pe += 8) {  			if (flags & R600_PTE_SYSTEM) {  				value = radeon_vm_map_gart(rdev, addr); -				value &= 0xFFFFFFFFFFFFF000ULL;  			} else if (flags & R600_PTE_VALID) {  				value = addr;  			} else { diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 74f06d540591..279801ca5110 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)  		return r;  	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;  	rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; +	rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;  	rdev->asic->gart.set_page = &r100_pci_gart_set_page;  	return radeon_gart_table_ram_alloc(rdev);  } @@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)  	WREG32(RADEON_AIC_HI_ADDR, 0);  } +uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) +{ +	return addr; +} +  void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, -			    uint64_t addr, uint32_t flags) +			    uint64_t entry)  {  	u32 *gtt = rdev->gart.ptr; -	gtt[i] = cpu_to_le32(lower_32_bits(addr)); +	gtt[i] = cpu_to_le32(lower_32_bits(entry));  }  void r100_pci_gart_fini(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 064ad5569cca..08d68f3e13e9 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)  #define R300_PTE_WRITEABLE (1 << 2)  #define R300_PTE_READABLE  (1 << 3) -void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, -			      uint64_t addr, uint32_t flags) +uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)  { -	void __iomem *ptr = rdev->gart.ptr; -  	addr = (lower_32_bits(addr) >> 8) |  		((upper_32_bits(addr) & 0xff) << 24);  	if (flags & RADEON_GART_PAGE_READ) @@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,  		addr |= R300_PTE_WRITEABLE;  	if (!(flags & RADEON_GART_PAGE_SNOOP))  		addr |= R300_PTE_UNSNOOPED; +	return addr; +} + +void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, +			      uint64_t entry) +{ +	void __iomem *ptr = rdev->gart.ptr; +  	/* on x86 we want this to be CPU endian, on powerpc  	 * on powerpc without HW swappers, it'll get swapped on way  	 * into VRAM - so no need for cpu_to_le32 on VRAM tables */ -	writel(addr, ((void __iomem *)ptr) + (i * 4)); +	writel(entry, ((void __iomem *)ptr) + (i * 4));  }  int rv370_pcie_gart_init(struct radeon_device *rdev) @@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)  		DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");  	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;  	rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; +	rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;  	rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;  	return radeon_gart_table_vram_alloc(rdev);  } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 54529b837afa..3f2a8d3febca 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);   * Dummy page   */  struct radeon_dummy_page { +	uint64_t	entry;  	struct page	*page;  	dma_addr_t	addr;  }; @@ -645,7 +646,7 @@ struct radeon_gart {  	unsigned			num_cpu_pages;  	unsigned			table_size;  	struct page			**pages; -	dma_addr_t			*pages_addr; +	uint64_t			*pages_entry;  	bool				ready;  }; @@ -1847,8 +1848,9 @@ struct radeon_asic {  	/* gart */  	struct {  		void (*tlb_flush)(struct radeon_device *rdev); +		uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);  		void (*set_page)(struct radeon_device *rdev, unsigned i, -				 uint64_t addr, uint32_t flags); +				 uint64_t entry);  	} gart;  	struct {  		int (*init)(struct radeon_device *rdev); @@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)  #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))  #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))  #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) -#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) +#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f)) +#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))  #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))  #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))  #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 121aff6a3b41..ed0e10eee2dc 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)  		DRM_INFO("Forcing AGP to PCIE mode\n");  		rdev->flags |= RADEON_IS_PCIE;  		rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; +		rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;  		rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;  	} else {  		DRM_INFO("Forcing AGP to PCI mode\n");  		rdev->flags |= RADEON_IS_PCI;  		rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; +		rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;  		rdev->asic->gart.set_page = &r100_pci_gart_set_page;  	}  	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; @@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {  	.mc_wait_for_idle = &r100_mc_wait_for_idle,  	.gart = {  		.tlb_flush = &r100_pci_gart_tlb_flush, +		.get_page_entry = &r100_pci_gart_get_page_entry,  		.set_page = &r100_pci_gart_set_page,  	},  	.ring = { @@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {  	.mc_wait_for_idle = &r100_mc_wait_for_idle,  	.gart = {  		.tlb_flush = &r100_pci_gart_tlb_flush, +		.get_page_entry = &r100_pci_gart_get_page_entry,  		.set_page = &r100_pci_gart_set_page,  	},  	.ring = { @@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = {  	.mc_wait_for_idle = &r300_mc_wait_for_idle,  	.gart = {  		.tlb_flush = &r100_pci_gart_tlb_flush, +		.get_page_entry = &r100_pci_gart_get_page_entry,  		.set_page = &r100_pci_gart_set_page,  	},  	.ring = { @@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {  	.mc_wait_for_idle = &r300_mc_wait_for_idle,  	.gart = {  		.tlb_flush = &rv370_pcie_gart_tlb_flush, +		.get_page_entry = &rv370_pcie_gart_get_page_entry,  		.set_page = &rv370_pcie_gart_set_page,  	},  	.ring = { @@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = {  	.mc_wait_for_idle = &r300_mc_wait_for_idle,  	.gart = {  		.tlb_flush = &rv370_pcie_gart_tlb_flush, +		.get_page_entry = &rv370_pcie_gart_get_page_entry,  		.set_page = &rv370_pcie_gart_set_page,  	},  	.ring = { @@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = {  	.mc_wait_for_idle = &rs400_mc_wait_for_idle,  	.gart = {  		.tlb_flush = &rs400_gart_tlb_flush, +		.get_page_entry = &rs400_gart_get_page_entry,  		.set_page = &rs400_gart_set_page,  	},  	.ring = { @@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = {  	.mc_wait_for_idle = &rs600_mc_wait_for_idle,  	.gart = {  		.tlb_flush = &rs600_gart_tlb_flush, +		.get_page_entry = &rs600_gart_get_page_entry,  		.set_page = &rs600_gart_set_page,  	},  	.ring = { @@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = {  	.mc_wait_for_idle = &rs690_mc_wait_for_idle,  	.gart = {  		.tlb_flush = &rs400_gart_tlb_flush, +		.get_page_entry = &rs400_gart_get_page_entry,  		.set_page = &rs400_gart_set_page,  	},  	.ring = { @@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = {  	.mc_wait_for_idle = &rv515_mc_wait_for_idle,  	.gart = {  		.tlb_flush = &rv370_pcie_gart_tlb_flush, +		.get_page_entry = &rv370_pcie_gart_get_page_entry,  		.set_page = &rv370_pcie_gart_set_page,  	},  	.ring = { @@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = {  	.mc_wait_for_idle = &r520_mc_wait_for_idle,  	.gart = {  		.tlb_flush = &rv370_pcie_gart_tlb_flush, +		.get_page_entry = &rv370_pcie_gart_get_page_entry,  		.set_page = &rv370_pcie_gart_set_page,  	},  	.ring = { @@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = {  	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,  	.gart = {  		.tlb_flush = &r600_pcie_gart_tlb_flush, +		.get_page_entry = &rs600_gart_get_page_entry,  		.set_page = &rs600_gart_set_page,  	},  	.ring = { @@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = {  	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,  	.gart = {  		.tlb_flush = &r600_pcie_gart_tlb_flush, +		.get_page_entry = &rs600_gart_get_page_entry,  		.set_page = &rs600_gart_set_page,  	},  	.ring = { @@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = {  	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,  	.gart = {  		.tlb_flush = &r600_pcie_gart_tlb_flush, +		.get_page_entry = &rs600_gart_get_page_entry,  		.set_page = &rs600_gart_set_page,  	},  	.ring = { @@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = {  	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,  	.gart = {  		.tlb_flush = &r600_pcie_gart_tlb_flush, +		.get_page_entry = &rs600_gart_get_page_entry,  		.set_page = &rs600_gart_set_page,  	},  	.ring = { @@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic = {  	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,  	.gart = {  		.tlb_flush = &evergreen_pcie_gart_tlb_flush, +		.get_page_entry = &rs600_gart_get_page_entry,  		.set_page = &rs600_gart_set_page,  	},  	.ring = { @@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = {  	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,  	.gart = {  		.tlb_flush = &evergreen_pcie_gart_tlb_flush, +		.get_page_entry = &rs600_gart_get_page_entry,  		.set_page = &rs600_gart_set_page,  	},  	.ring = { @@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = {  	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,  	.gart = {  		.tlb_flush = &evergreen_pcie_gart_tlb_flush, +		.get_page_entry = &rs600_gart_get_page_entry,  		.set_page = &rs600_gart_set_page,  	},  	.ring = { @@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = {  	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,  	.gart = {  		.tlb_flush = &cayman_pcie_gart_tlb_flush, +		.get_page_entry = &rs600_gart_get_page_entry,  		.set_page = &rs600_gart_set_page,  	},  	.vm = { @@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = {  	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,  	.gart = {  		.tlb_flush = &cayman_pcie_gart_tlb_flush, +		.get_page_entry = &rs600_gart_get_page_entry,  		.set_page = &rs600_gart_set_page,  	},  	.vm = { @@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = {  	.get_gpu_clock_counter = &si_get_gpu_clock_counter,  	.gart = {  		.tlb_flush = &si_pcie_gart_tlb_flush, +		.get_page_entry = &rs600_gart_get_page_entry,  		.set_page = &rs600_gart_set_page,  	},  	.vm = { @@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = {  	.get_gpu_clock_counter = &cik_get_gpu_clock_counter,  	.gart = {  		.tlb_flush = &cik_pcie_gart_tlb_flush, +		.get_page_entry = &rs600_gart_get_page_entry,  		.set_page = &rs600_gart_set_page,  	},  	.vm = { @@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = {  	.get_gpu_clock_counter = &cik_get_gpu_clock_counter,  	.gart = {  		.tlb_flush = &cik_pcie_gart_tlb_flush, +		.get_page_entry = &rs600_gart_get_page_entry,  		.set_page = &rs600_gart_set_page,  	},  	.vm = { diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 2a45d548d5ec..8d787d115653 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);  int r100_asic_reset(struct radeon_device *rdev);  u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);  void r100_pci_gart_tlb_flush(struct radeon_device *rdev); +uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);  void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, -			    uint64_t addr, uint32_t flags); +			    uint64_t entry);  void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);  int r100_irq_set(struct radeon_device *rdev);  int r100_irq_process(struct radeon_device *rdev); @@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,  				struct radeon_fence *fence);  extern int r300_cs_parse(struct radeon_cs_parser *p);  extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); +extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);  extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, -				     uint64_t addr, uint32_t flags); +				     uint64_t entry);  extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);  extern int rv370_get_pcie_lanes(struct radeon_device *rdev);  extern void r300_set_reg_safe(struct radeon_device *rdev); @@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);  extern int rs400_suspend(struct radeon_device *rdev);  extern int rs400_resume(struct radeon_device *rdev);  void rs400_gart_tlb_flush(struct radeon_device *rdev); +uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);  void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, -			 uint64_t addr, uint32_t flags); +			 uint64_t entry);  uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);  void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);  int rs400_gart_init(struct radeon_device *rdev); @@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);  void rs600_irq_disable(struct radeon_device *rdev);  u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);  void rs600_gart_tlb_flush(struct radeon_device *rdev); +uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);  void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, -			 uint64_t addr, uint32_t flags); +			 uint64_t entry);  uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);  void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);  void rs600_bandwidth_update(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0ec65168f331..bd7519fdd3f4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)  		rdev->dummy_page.page = NULL;  		return -ENOMEM;  	} +	rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr, +							    RADEON_GART_PAGE_DUMMY);  	return 0;  } diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 84146d5901aa..5450fa95a47e 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)  		radeon_bo_unpin(rdev->gart.robj);  	radeon_bo_unreserve(rdev->gart.robj);  	rdev->gart.table_addr = gpu_addr; + +	if (!r) { +		int i; + +		/* We might have dropped some GART table updates while it wasn't +		 * mapped, restore all entries +		 */ +		for (i = 0; i < rdev->gart.num_gpu_pages; i++) +			radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]); +		mb(); +		radeon_gart_tlb_flush(rdev); +	} +  	return r;  } @@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,  	unsigned t;  	unsigned p;  	int i, j; -	u64 page_base;  	if (!rdev->gart.ready) {  		WARN(1, "trying to unbind memory from uninitialized GART !\n"); @@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,  	for (i = 0; i < pages; i++, p++) {  		if (rdev->gart.pages[p]) {  			rdev->gart.pages[p] = NULL; -			rdev->gart.pages_addr[p] = rdev->dummy_page.addr; -			page_base = rdev->gart.pages_addr[p];  			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { +				rdev->gart.pages_entry[t] = rdev->dummy_page.entry;  				if (rdev->gart.ptr) { -					radeon_gart_set_page(rdev, t, page_base, -							     RADEON_GART_PAGE_DUMMY); +					radeon_gart_set_page(rdev, t, +							     rdev->dummy_page.entry);  				} -				page_base += RADEON_GPU_PAGE_SIZE;  			}  		}  	} @@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,  {  	unsigned t;  	unsigned p; -	uint64_t page_base; +	uint64_t page_base, page_entry;  	int i, j;  	if (!rdev->gart.ready) { @@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,  	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);  	for (i = 0; i < pages; i++, p++) { -		rdev->gart.pages_addr[p] = dma_addr[i];  		rdev->gart.pages[p] = pagelist[i]; -		if (rdev->gart.ptr) { -			page_base = rdev->gart.pages_addr[p]; -			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { -				radeon_gart_set_page(rdev, t, page_base, flags); -				page_base += RADEON_GPU_PAGE_SIZE; +		page_base = dma_addr[i]; +		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { +			page_entry = radeon_gart_get_page_entry(page_base, flags); +			rdev->gart.pages_entry[t] = page_entry; +			if (rdev->gart.ptr) { +				radeon_gart_set_page(rdev, t, page_entry);  			} +			page_base += RADEON_GPU_PAGE_SIZE;  		}  	}  	mb(); @@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev)  		radeon_gart_fini(rdev);  		return -ENOMEM;  	} -	rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * -					rdev->gart.num_cpu_pages); -	if (rdev->gart.pages_addr == NULL) { +	rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) * +					 rdev->gart.num_gpu_pages); +	if (rdev->gart.pages_entry == NULL) {  		radeon_gart_fini(rdev);  		return -ENOMEM;  	}  	/* set GART entry to point to the dummy page by default */ -	for (i = 0; i < rdev->gart.num_cpu_pages; i++) { -		rdev->gart.pages_addr[i] = rdev->dummy_page.addr; -	} +	for (i = 0; i < rdev->gart.num_gpu_pages; i++) +		rdev->gart.pages_entry[i] = rdev->dummy_page.entry;  	return 0;  } @@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev)   */  void radeon_gart_fini(struct radeon_device *rdev)  { -	if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { +	if (rdev->gart.ready) {  		/* unbind pages */  		radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);  	}  	rdev->gart.ready = false;  	vfree(rdev->gart.pages); -	vfree(rdev->gart.pages_addr); +	vfree(rdev->gart.pages_entry);  	rdev->gart.pages = NULL; -	rdev->gart.pages_addr = NULL; +	rdev->gart.pages_entry = NULL;  	radeon_dummy_page_fini(rdev);  } diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 8bf87f1203cc..bef9a0953284 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -436,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd)  static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,  				uint32_t hpd_size, uint64_t hpd_gpu_addr)  { -	uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; +	uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;  	uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);  	lock_srbm(kgd, mec, pipe, 0, 0); diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index cde48c42b30a..06d2246d07f1 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)  	uint64_t result;  	/* page table offset */ -	result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; - -	/* in case cpu page size != gpu page size*/ -	result |= addr & (~PAGE_MASK); +	result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT]; +	result &= ~RADEON_GPU_PAGE_MASK;  	return result;  } diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index c5799f16aa4b..34e3235f41d2 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)  #define RS400_PTE_WRITEABLE (1 << 2)  #define RS400_PTE_READABLE  (1 << 3) -void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, -			 uint64_t addr, uint32_t flags) +uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)  {  	uint32_t entry; -	u32 *gtt = rdev->gart.ptr;  	entry = (lower_32_bits(addr) & PAGE_MASK) |  		((upper_32_bits(addr) & 0xff) << 4); @@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,  		entry |= RS400_PTE_WRITEABLE;  	if (!(flags & RADEON_GART_PAGE_SNOOP))  		entry |= RS400_PTE_UNSNOOPED; -	entry = cpu_to_le32(entry); -	gtt[i] = entry; +	return entry; +} + +void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, +			 uint64_t entry) +{ +	u32 *gtt = rdev->gart.ptr; +	gtt[i] = cpu_to_le32(lower_32_bits(entry));  }  int rs400_mc_wait_for_idle(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 9acb1c3c005b..74bce91aecc1 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)  	radeon_gart_table_vram_free(rdev);  } -void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, -			 uint64_t addr, uint32_t flags) +uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)  { -	void __iomem *ptr = (void *)rdev->gart.ptr; -  	addr = addr & 0xFFFFFFFFFFFFF000ULL;  	addr |= R600_PTE_SYSTEM;  	if (flags & RADEON_GART_PAGE_VALID) @@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,  		addr |= R600_PTE_WRITEABLE;  	if (flags & RADEON_GART_PAGE_SNOOP)  		addr |= R600_PTE_SNOOPED; -	writeq(addr, ptr + (i * 8)); +	return addr; +} + +void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, +			 uint64_t entry) +{ +	void __iomem *ptr = (void *)rdev->gart.ptr; +	writeq(entry, ptr + (i * 8));  }  int rs600_irq_set(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index aa7b872b2c43..83207929fc62 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,  		for (; ndw > 0; ndw -= 2, --count, pe += 8) {  			if (flags & R600_PTE_SYSTEM) {  				value = radeon_vm_map_gart(rdev, addr); -				value &= 0xFFFFFFFFFFFFF000ULL;  			} else if (flags & R600_PTE_VALID) {  				value = addr;  			} else { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 7b5d22110f25..6c6b655defcf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,  		if (unlikely(ret != 0))  			--dev_priv->num_3d_resources;  	} else if (unhide_svga) { -		mutex_lock(&dev_priv->hw_mutex);  		vmw_write(dev_priv, SVGA_REG_ENABLE,  			  vmw_read(dev_priv, SVGA_REG_ENABLE) &  			  ~SVGA_REG_ENABLE_HIDE); -		mutex_unlock(&dev_priv->hw_mutex);  	}  	mutex_unlock(&dev_priv->release_mutex); @@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,  	mutex_lock(&dev_priv->release_mutex);  	if (unlikely(--dev_priv->num_3d_resources == 0))  		vmw_release_device(dev_priv); -	else if (hide_svga) { -		mutex_lock(&dev_priv->hw_mutex); +	else if (hide_svga)  		vmw_write(dev_priv, SVGA_REG_ENABLE,  			  vmw_read(dev_priv, SVGA_REG_ENABLE) |  			  SVGA_REG_ENABLE_HIDE); -		mutex_unlock(&dev_priv->hw_mutex); -	}  	n3d = (int32_t) dev_priv->num_3d_resources;  	mutex_unlock(&dev_priv->release_mutex); @@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)  	dev_priv->dev = dev;  	dev_priv->vmw_chipset = chipset;  	dev_priv->last_read_seqno = (uint32_t) -100; -	mutex_init(&dev_priv->hw_mutex);  	mutex_init(&dev_priv->cmdbuf_mutex);  	mutex_init(&dev_priv->release_mutex);  	mutex_init(&dev_priv->binding_mutex);  	rwlock_init(&dev_priv->resource_lock);  	ttm_lock_init(&dev_priv->reservation_sem); +	spin_lock_init(&dev_priv->hw_lock); +	spin_lock_init(&dev_priv->waiter_lock); +	spin_lock_init(&dev_priv->cap_lock);  	for (i = vmw_res_context; i < vmw_res_max; ++i) {  		idr_init(&dev_priv->res_idr[i]); @@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)  	dev_priv->enable_fb = enable_fbdev; -	mutex_lock(&dev_priv->hw_mutex); -  	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);  	svga_id = vmw_read(dev_priv, SVGA_REG_ID);  	if (svga_id != SVGA_ID_2) {  		ret = -ENOSYS;  		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); -		mutex_unlock(&dev_priv->hw_mutex);  		goto out_err0;  	} @@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)  		dev_priv->prim_bb_mem = dev_priv->vram_size;  	ret = vmw_dma_masks(dev_priv); -	if (unlikely(ret != 0)) { -		mutex_unlock(&dev_priv->hw_mutex); +	if (unlikely(ret != 0))  		goto out_err0; -	}  	/*  	 * Limit back buffer size to VRAM size.  Remove this once @@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)  	if (dev_priv->prim_bb_mem > dev_priv->vram_size)  		dev_priv->prim_bb_mem = dev_priv->vram_size; -	mutex_unlock(&dev_priv->hw_mutex); -  	vmw_print_capabilities(dev_priv->capabilities);  	if (dev_priv->capabilities & SVGA_CAP_GMR2) { @@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,  		if (unlikely(ret != 0))  			return ret;  		vmw_kms_save_vga(dev_priv); -		mutex_lock(&dev_priv->hw_mutex);  		vmw_write(dev_priv, SVGA_REG_TRACES, 0); -		mutex_unlock(&dev_priv->hw_mutex);  	}  	if (active) { @@ -1196,9 +1184,7 @@ out_no_active_lock:  	if (!dev_priv->enable_fb) {  		vmw_kms_restore_vga(dev_priv);  		vmw_3d_resource_dec(dev_priv, true); -		mutex_lock(&dev_priv->hw_mutex);  		vmw_write(dev_priv, SVGA_REG_TRACES, 1); -		mutex_unlock(&dev_priv->hw_mutex);  	}  	return ret;  } @@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,  			DRM_ERROR("Unable to clean VRAM on master drop.\n");  		vmw_kms_restore_vga(dev_priv);  		vmw_3d_resource_dec(dev_priv, true); -		mutex_lock(&dev_priv->hw_mutex);  		vmw_write(dev_priv, SVGA_REG_TRACES, 1); -		mutex_unlock(&dev_priv->hw_mutex);  	}  	dev_priv->active_master = &dev_priv->fbdev_master; @@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)  	struct drm_device *dev = pci_get_drvdata(pdev);  	struct vmw_private *dev_priv = vmw_priv(dev); -	mutex_lock(&dev_priv->hw_mutex);  	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);  	(void) vmw_read(dev_priv, SVGA_REG_ID); -	mutex_unlock(&dev_priv->hw_mutex);  	/**  	 * Reclaim 3d reference held by fbdev and potentially diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 4ee799b43d5d..d26a6daa9719 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -399,7 +399,8 @@ struct vmw_private {  	uint32_t memory_size;  	bool has_gmr;  	bool has_mob; -	struct mutex hw_mutex; +	spinlock_t hw_lock; +	spinlock_t cap_lock;  	/*  	 * VGA registers. @@ -449,8 +450,9 @@ struct vmw_private {  	atomic_t marker_seq;  	wait_queue_head_t fence_queue;  	wait_queue_head_t fifo_queue; -	int fence_queue_waiters; /* Protected by hw_mutex */ -	int goal_queue_waiters; /* Protected by hw_mutex */ +	spinlock_t waiter_lock; +	int fence_queue_waiters; /* Protected by waiter_lock */ +	int goal_queue_waiters; /* Protected by waiter_lock */  	atomic_t fifo_queue_waiters;  	uint32_t last_read_seqno;  	spinlock_t irq_lock; @@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)  	return (struct vmw_master *) master->driver_priv;  } +/* + * The locking here is fine-grained, so that it is performed once + * for every read- and write operation. This is of course costly, but we + * don't perform much register access in the timing critical paths anyway. + * Instead we have the extra benefit of being sure that we don't forget + * the hw lock around register accesses. + */  static inline void vmw_write(struct vmw_private *dev_priv,  			     unsigned int offset, uint32_t value)  { +	unsigned long irq_flags; + +	spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);  	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);  	outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); +	spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);  }  static inline uint32_t vmw_read(struct vmw_private *dev_priv,  				unsigned int offset)  { -	uint32_t val; +	unsigned long irq_flags; +	u32 val; +	spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);  	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);  	val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); +	spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); +  	return val;  } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index b7594cb758af..945f1e0dad92 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -35,7 +35,7 @@ struct vmw_fence_manager {  	struct vmw_private *dev_priv;  	spinlock_t lock;  	struct list_head fence_list; -	struct work_struct work, ping_work; +	struct work_struct work;  	u32 user_fence_size;  	u32 fence_size;  	u32 event_fence_action_size; @@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)  	return "svga";  } -static void vmw_fence_ping_func(struct work_struct *work) -{ -	struct vmw_fence_manager *fman = -		container_of(work, struct vmw_fence_manager, ping_work); - -	vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC); -} -  static bool vmw_fence_enable_signaling(struct fence *f)  {  	struct vmw_fence_obj *fence = @@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)  	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)  		return false; -	if (mutex_trylock(&dev_priv->hw_mutex)) { -		vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC); -		mutex_unlock(&dev_priv->hw_mutex); -	} else -		schedule_work(&fman->ping_work); +	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);  	return true;  } @@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)  	INIT_LIST_HEAD(&fman->fence_list);  	INIT_LIST_HEAD(&fman->cleanup_list);  	INIT_WORK(&fman->work, &vmw_fence_work_func); -	INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);  	fman->fifo_down = true;  	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));  	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); @@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)  	bool lists_empty;  	(void) cancel_work_sync(&fman->work); -	(void) cancel_work_sync(&fman->ping_work);  	spin_lock_irqsave(&fman->lock, irq_flags);  	lists_empty = list_empty(&fman->fence_list) && diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 09e10aefcd8e..39f2b03888e7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)  		if (!dev_priv->has_mob)  			return false; -		mutex_lock(&dev_priv->hw_mutex); +		spin_lock(&dev_priv->cap_lock);  		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);  		result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); -		mutex_unlock(&dev_priv->hw_mutex); +		spin_unlock(&dev_priv->cap_lock);  		return (result != 0);  	} @@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)  	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));  	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); -	mutex_lock(&dev_priv->hw_mutex);  	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);  	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);  	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); @@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)  	mb();  	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); -	mutex_unlock(&dev_priv->hw_mutex);  	max = ioread32(fifo_mem + SVGA_FIFO_MAX);  	min = ioread32(fifo_mem  + SVGA_FIFO_MIN); @@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)  	return vmw_fifo_send_fence(dev_priv, &dummy);  } -void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) +void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)  {  	__le32 __iomem *fifo_mem = dev_priv->mmio_virt; +	static DEFINE_SPINLOCK(ping_lock); +	unsigned long irq_flags; +	/* +	 * The ping_lock is needed because we don't have an atomic +	 * test-and-set of the SVGA_FIFO_BUSY register. +	 */ +	spin_lock_irqsave(&ping_lock, irq_flags);  	if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {  		iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);  		vmw_write(dev_priv, SVGA_REG_SYNC, reason);  	} -} - -void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) -{ -	mutex_lock(&dev_priv->hw_mutex); - -	vmw_fifo_ping_host_locked(dev_priv, reason); - -	mutex_unlock(&dev_priv->hw_mutex); +	spin_unlock_irqrestore(&ping_lock, irq_flags);  }  void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)  {  	__le32 __iomem *fifo_mem = dev_priv->mmio_virt; -	mutex_lock(&dev_priv->hw_mutex); -  	vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);  	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)  		; @@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)  	vmw_write(dev_priv, SVGA_REG_TRACES,  		  dev_priv->traces_state); -	mutex_unlock(&dev_priv->hw_mutex);  	vmw_marker_queue_takedown(&fifo->marker_queue);  	if (likely(fifo->static_buffer != NULL)) { @@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,  		return vmw_fifo_wait_noirq(dev_priv, bytes,  					   interruptible, timeout); -	mutex_lock(&dev_priv->hw_mutex); +	spin_lock(&dev_priv->waiter_lock);  	if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {  		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);  		outl(SVGA_IRQFLAG_FIFO_PROGRESS, @@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,  		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);  		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);  	} -	mutex_unlock(&dev_priv->hw_mutex); +	spin_unlock(&dev_priv->waiter_lock);  	if (interruptible)  		ret = wait_event_interruptible_timeout @@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,  	else if (likely(ret > 0))  		ret = 0; -	mutex_lock(&dev_priv->hw_mutex); +	spin_lock(&dev_priv->waiter_lock);  	if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {  		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);  		dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;  		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);  		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);  	} -	mutex_unlock(&dev_priv->hw_mutex); +	spin_unlock(&dev_priv->waiter_lock);  	return ret;  } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 37881ecf5d7a..69c8ce23123c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,  		(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);  	compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; -	mutex_lock(&dev_priv->hw_mutex); +	spin_lock(&dev_priv->cap_lock);  	for (i = 0; i < max_size; ++i) {  		vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);  		compat_cap->pairs[i][0] = i;  		compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);  	} -	mutex_unlock(&dev_priv->hw_mutex); +	spin_unlock(&dev_priv->cap_lock);  	return 0;  } @@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,  		if (num > SVGA3D_DEVCAP_MAX)  			num = SVGA3D_DEVCAP_MAX; -		mutex_lock(&dev_priv->hw_mutex); +		spin_lock(&dev_priv->cap_lock);  		for (i = 0; i < num; ++i) {  			vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);  			*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);  		} -		mutex_unlock(&dev_priv->hw_mutex); +		spin_unlock(&dev_priv->cap_lock);  	} else if (gb_objects) {  		ret = vmw_fill_compat_cap(dev_priv, bounce, size);  		if (unlikely(ret != 0)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 0c423766c441..9fe9827ee499 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)  static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)  { -	uint32_t busy; -	mutex_lock(&dev_priv->hw_mutex); -	busy = vmw_read(dev_priv, SVGA_REG_BUSY); -	mutex_unlock(&dev_priv->hw_mutex); - -	return (busy == 0); +	return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);  }  void vmw_update_seqno(struct vmw_private *dev_priv, @@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,  void vmw_seqno_waiter_add(struct vmw_private *dev_priv)  { -	mutex_lock(&dev_priv->hw_mutex); +	spin_lock(&dev_priv->waiter_lock);  	if (dev_priv->fence_queue_waiters++ == 0) {  		unsigned long irq_flags; @@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)  		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);  		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);  	} -	mutex_unlock(&dev_priv->hw_mutex); +	spin_unlock(&dev_priv->waiter_lock);  }  void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)  { -	mutex_lock(&dev_priv->hw_mutex); +	spin_lock(&dev_priv->waiter_lock);  	if (--dev_priv->fence_queue_waiters == 0) {  		unsigned long irq_flags; @@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)  		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);  		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);  	} -	mutex_unlock(&dev_priv->hw_mutex); +	spin_unlock(&dev_priv->waiter_lock);  }  void vmw_goal_waiter_add(struct vmw_private *dev_priv)  { -	mutex_lock(&dev_priv->hw_mutex); +	spin_lock(&dev_priv->waiter_lock);  	if (dev_priv->goal_queue_waiters++ == 0) {  		unsigned long irq_flags; @@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)  		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);  		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);  	} -	mutex_unlock(&dev_priv->hw_mutex); +	spin_unlock(&dev_priv->waiter_lock);  }  void vmw_goal_waiter_remove(struct vmw_private *dev_priv)  { -	mutex_lock(&dev_priv->hw_mutex); +	spin_lock(&dev_priv->waiter_lock);  	if (--dev_priv->goal_queue_waiters == 0) {  		unsigned long irq_flags; @@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)  		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);  		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);  	} -	mutex_unlock(&dev_priv->hw_mutex); +	spin_unlock(&dev_priv->waiter_lock);  }  int vmw_wait_seqno(struct vmw_private *dev_priv, @@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)  	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))  		return; -	mutex_lock(&dev_priv->hw_mutex);  	vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); -	mutex_unlock(&dev_priv->hw_mutex);  	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);  	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 3725b521d931..8725b79e7847 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)  	struct vmw_private *dev_priv = vmw_priv(dev);  	struct vmw_display_unit *du = vmw_connector_to_du(connector); -	mutex_lock(&dev_priv->hw_mutex);  	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); -	mutex_unlock(&dev_priv->hw_mutex);  	return ((vmw_connector_to_du(connector)->unit < num_displays &&  		 du->pref_active) ? diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index f722a0c466cf..c48da057dbb1 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = {  	.attach_dev	= gart_iommu_attach_dev,  	.detach_dev	= gart_iommu_detach_dev,  	.map		= gart_iommu_map, +	.map_sg		= default_iommu_map_sg,  	.unmap		= gart_iommu_unmap,  	.iova_to_phys	= gart_iommu_iova_to_phys,  	.pgsize_bitmap	= GART_IOMMU_PGSIZES, @@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev)  	do_gart_setup(gart, NULL);  	gart_handle = gart; -	bus_set_iommu(&platform_bus_type, &gart_iommu_ops); +  	return 0;  } diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 21b156242e42..c1c010498a21 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -683,7 +683,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,  	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);  	if (!cmd) {  		DMERR("could not allocate metadata struct"); -		return NULL; +		return ERR_PTR(-ENOMEM);  	}  	atomic_set(&cmd->ref_count, 1); @@ -745,7 +745,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,  		return cmd;  	cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); -	if (cmd) { +	if (!IS_ERR(cmd)) {  		mutex_lock(&table_lock);  		cmd2 = lookup(bdev);  		if (cmd2) { @@ -780,9 +780,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,  {  	struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,  						       may_format_device, policy_hint_size); -	if (cmd && !same_params(cmd, data_block_size)) { + +	if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {  		dm_cache_metadata_close(cmd); -		return NULL; +		return ERR_PTR(-EINVAL);  	}  	return cmd; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 493478989dbd..07705ee181e3 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3385,6 +3385,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)  	struct pool_c *pt = ti->private;  	struct pool *pool = pt->pool; +	if (get_pool_mode(pool) >= PM_READ_ONLY) { +		DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", +		      dm_device_name(pool->pool_md)); +		return -EINVAL; +	} +  	if (!strcasecmp(argv[0], "create_thin"))  		r = process_create_thin_mesg(argc, argv, pool); diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index f94a9fa60488..c672c4dcffac 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev)  	c_can_irq_control(priv, false); +	/* put ctrl to init on stop to end ongoing transmission */ +	priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT); +  	/* deactivate pins */  	pinctrl_pm_select_sleep_state(dev->dev.parent);  	priv->can.state = CAN_STATE_STOPPED; diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index c32cd61073bc..7af379ca861b 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,  			  usb_sndbulkpipe(dev->udev,  					  dev->bulk_out->bEndpointAddress),  			  buf, msg->len, -			  kvaser_usb_simple_msg_callback, priv); +			  kvaser_usb_simple_msg_callback, netdev);  	usb_anchor_urb(urb, &priv->tx_submitted);  	err = usb_submit_urb(urb, GFP_ATOMIC); @@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,  	priv = dev->nets[channel];  	stats = &priv->netdev->stats; -	if (status & M16C_STATE_BUS_RESET) { -		kvaser_usb_unlink_tx_urbs(priv); -		return; -	} -  	skb = alloc_can_err_skb(priv->netdev, &cf);  	if (!skb) {  		stats->rx_dropped++; @@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,  	netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status); -	if (status & M16C_STATE_BUS_OFF) { +	if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {  		cf->can_id |= CAN_ERR_BUSOFF;  		priv->can.can_stats.bus_off++; @@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,  		}  		new_state = CAN_STATE_ERROR_PASSIVE; -	} - -	if (status == M16C_STATE_BUS_ERROR) { +	} else if (status & M16C_STATE_BUS_ERROR) {  		if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&  		    ((txerr >= 96) || (rxerr >= 96))) {  			cf->can_id |= CAN_ERR_CRTL; @@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,  			priv->can.can_stats.error_warning++;  			new_state = CAN_STATE_ERROR_WARNING; -		} else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) { +		} else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) && +			   ((txerr < 96) && (rxerr < 96))) {  			cf->can_id |= CAN_ERR_PROT;  			cf->data[2] = CAN_ERR_PROT_ACTIVE; @@ -1590,7 +1584,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,  {  	struct kvaser_usb *dev;  	int err = -ENOMEM; -	int i; +	int i, retry = 3;  	dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);  	if (!dev) @@ -1608,7 +1602,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,  	usb_set_intfdata(intf, dev); -	err = kvaser_usb_get_software_info(dev); +	/* On some x86 laptops, plugging a Kvaser device again after +	 * an unplug makes the firmware always ignore the very first +	 * command. For such a case, provide some room for retries +	 * instead of completely exiting the driver. +	 */ +	do { +		err = kvaser_usb_get_software_info(dev); +	} while (--retry && err == -ETIMEDOUT); +  	if (err) {  		dev_err(&intf->dev,  			"Cannot get software infos, error %d\n", err); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 75b08c63d39f..29a09271b64a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -767,16 +767,17 @@  #define MTL_Q_RQOMR			0x40  #define MTL_Q_RQMPOCR			0x44  #define MTL_Q_RQDR			0x4c +#define MTL_Q_RQFCR			0x50  #define MTL_Q_IER			0x70  #define MTL_Q_ISR			0x74  /* MTL queue register entry bit positions and sizes */ +#define MTL_Q_RQFCR_RFA_INDEX		1 +#define MTL_Q_RQFCR_RFA_WIDTH		6 +#define MTL_Q_RQFCR_RFD_INDEX		17 +#define MTL_Q_RQFCR_RFD_WIDTH		6  #define MTL_Q_RQOMR_EHFC_INDEX		7  #define MTL_Q_RQOMR_EHFC_WIDTH		1 -#define MTL_Q_RQOMR_RFA_INDEX		8 -#define MTL_Q_RQOMR_RFA_WIDTH		3 -#define MTL_Q_RQOMR_RFD_INDEX		13 -#define MTL_Q_RQOMR_RFD_WIDTH		3  #define MTL_Q_RQOMR_RQS_INDEX		16  #define MTL_Q_RQOMR_RQS_WIDTH		9  #define MTL_Q_RQOMR_RSF_INDEX		5 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 53f5f66ec2ee..4c66cd1d1e60 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)  	for (i = 0; i < pdata->rx_q_count; i++) {  		/* Activate flow control when less than 4k left in fifo */ -		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2); +		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);  		/* De-activate flow control when more than 6k left in fifo */ -		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4); +		XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);  	}  } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 1d1147c93d59..e468ed3f210f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)  		}  #endif  		if (!bnx2x_fp_lock_napi(fp)) -			return work_done; +			return budget;  		for_each_cos_in_tx_queue(fp, cos)  			if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index b29e027c476e..e356afa44e7d 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1335,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)  	int err;  	if (!enic_poll_lock_napi(&enic->rq[rq])) -		return work_done; +		return budget;  	/* Service RQ  	 */ diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index a62fc38f045e..1c75829eb166 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4";  #define IS_TSO_HEADER(txq, addr) \  	((addr >= txq->tso_hdrs_dma) && \  	 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) + +#define DESC_DMA_MAP_SINGLE 0 +#define DESC_DMA_MAP_PAGE 1 +  /*   * RX/TX descriptors.   */ @@ -362,6 +366,7 @@ struct tx_queue {  	dma_addr_t tso_hdrs_dma;  	struct tx_desc *tx_desc_area; +	char *tx_desc_mapping; /* array to track the type of the dma mapping */  	dma_addr_t tx_desc_dma;  	int tx_desc_area_size; @@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,  	if (txq->tx_curr_desc == txq->tx_ring_size)  		txq->tx_curr_desc = 0;  	desc = &txq->tx_desc_area[tx_index]; +	txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;  	desc->l4i_chk = 0;  	desc->byte_cnt = length; @@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)  		skb_frag_t *this_frag;  		int tx_index;  		struct tx_desc *desc; -		void *addr;  		this_frag = &skb_shinfo(skb)->frags[frag]; -		addr = page_address(this_frag->page.p) + this_frag->page_offset;  		tx_index = txq->tx_curr_desc++;  		if (txq->tx_curr_desc == txq->tx_ring_size)  			txq->tx_curr_desc = 0;  		desc = &txq->tx_desc_area[tx_index]; +		txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;  		/*  		 * The last fragment will generate an interrupt @@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)  		desc->l4i_chk = 0;  		desc->byte_cnt = skb_frag_size(this_frag); -		desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr, -					       desc->byte_cnt, DMA_TO_DEVICE); +		desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, +						 this_frag, 0, desc->byte_cnt, +						 DMA_TO_DEVICE);  	}  } @@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,  	if (txq->tx_curr_desc == txq->tx_ring_size)  		txq->tx_curr_desc = 0;  	desc = &txq->tx_desc_area[tx_index]; +	txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;  	if (nr_frags) {  		txq_submit_frag_skb(txq, skb); @@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)  		int tx_index;  		struct tx_desc *desc;  		u32 cmd_sts; +		char desc_dma_map;  		tx_index = txq->tx_used_desc;  		desc = &txq->tx_desc_area[tx_index]; +		desc_dma_map = txq->tx_desc_mapping[tx_index]; +  		cmd_sts = desc->cmd_sts;  		if (cmd_sts & BUFFER_OWNED_BY_DMA) { @@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)  		reclaimed++;  		txq->tx_desc_count--; -		if (!IS_TSO_HEADER(txq, desc->buf_ptr)) -			dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, -					 desc->byte_cnt, DMA_TO_DEVICE); +		if (!IS_TSO_HEADER(txq, desc->buf_ptr)) { + +			if (desc_dma_map == DESC_DMA_MAP_PAGE) +				dma_unmap_page(mp->dev->dev.parent, +					       desc->buf_ptr, +					       desc->byte_cnt, +					       DMA_TO_DEVICE); +			else +				dma_unmap_single(mp->dev->dev.parent, +						 desc->buf_ptr, +						 desc->byte_cnt, +						 DMA_TO_DEVICE); +		}  		if (cmd_sts & TX_ENABLE_INTERRUPT) {  			struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); @@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)  	struct tx_queue *txq = mp->txq + index;  	struct tx_desc *tx_desc;  	int size; +	int ret;  	int i;  	txq->index = index; @@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)  					nexti * sizeof(struct tx_desc);  	} +	txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char), +				       GFP_KERNEL); +	if (!txq->tx_desc_mapping) { +		ret = -ENOMEM; +		goto err_free_desc_area; +	} +  	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */  	txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,  					   txq->tx_ring_size * TSO_HEADER_SIZE,  					   &txq->tso_hdrs_dma, GFP_KERNEL);  	if (txq->tso_hdrs == NULL) { -		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, -				  txq->tx_desc_area, txq->tx_desc_dma); -		return -ENOMEM; +		ret = -ENOMEM; +		goto err_free_desc_mapping;  	}  	skb_queue_head_init(&txq->tx_skb);  	return 0; + +err_free_desc_mapping: +	kfree(txq->tx_desc_mapping); +err_free_desc_area: +	if (index == 0 && size <= mp->tx_desc_sram_size) +		iounmap(txq->tx_desc_area); +	else +		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, +				  txq->tx_desc_area, txq->tx_desc_dma); +	return ret;  }  static void txq_deinit(struct tx_queue *txq) @@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq)  	else  		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,  				  txq->tx_desc_area, txq->tx_desc_dma); +	kfree(txq->tx_desc_mapping); +  	if (txq->tso_hdrs)  		dma_free_coherent(mp->dev->dev.parent,  				  txq->tx_ring_size * TSO_HEADER_SIZE, diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 613037584d08..c531c8ae1be4 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)  	work_done = netxen_process_rcv_ring(sds_ring, budget); -	if ((work_done < budget) && tx_complete) { +	if (!tx_complete) +		work_done = budget; + +	if (work_done < budget) {  		napi_complete(&sds_ring->napi);  		if (test_bit(__NX_DEV_UP, &adapter->state))  			netxen_nic_enable_int(sds_ring); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 6576243222af..04283fe0e6a7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {  	[TSU_ADRL31]	= 0x01fc,  }; +static void sh_eth_rcv_snd_disable(struct net_device *ndev); +static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); +  static bool sh_eth_is_gether(struct sh_eth_private *mdp)  {  	return mdp->reg_offset == sh_eth_offset_gigabit; @@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)  	int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;  	int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;  	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; +	dma_addr_t dma_addr;  	mdp->cur_rx = 0;  	mdp->cur_tx = 0; @@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)  		/* skb */  		mdp->rx_skbuff[i] = NULL;  		skb = netdev_alloc_skb(ndev, skbuff_size); -		mdp->rx_skbuff[i] = skb;  		if (skb == NULL)  			break;  		sh_eth_set_receive_align(skb); @@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)  		rxdesc = &mdp->rx_ring[i];  		/* The size of the buffer is a multiple of 16 bytes. */  		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); -		dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, -			       DMA_FROM_DEVICE); -		rxdesc->addr = virt_to_phys(skb->data); +		dma_addr = dma_map_single(&ndev->dev, skb->data, +					  rxdesc->buffer_length, +					  DMA_FROM_DEVICE); +		if (dma_mapping_error(&ndev->dev, dma_addr)) { +			kfree_skb(skb); +			break; +		} +		mdp->rx_skbuff[i] = skb; +		rxdesc->addr = dma_addr;  		rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);  		/* Rx descriptor address set */ @@ -1316,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)  		     RFLR);  	sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); -	if (start) +	if (start) { +		mdp->irq_enabled = true;  		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); +	}  	/* PAUSE Prohibition */  	val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | @@ -1356,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)  	return ret;  } +static void sh_eth_dev_exit(struct net_device *ndev) +{ +	struct sh_eth_private *mdp = netdev_priv(ndev); +	int i; + +	/* Deactivate all TX descriptors, so DMA should stop at next +	 * packet boundary if it's currently running +	 */ +	for (i = 0; i < mdp->num_tx_ring; i++) +		mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT); + +	/* Disable TX FIFO egress to MAC */ +	sh_eth_rcv_snd_disable(ndev); + +	/* Stop RX DMA at next packet boundary */ +	sh_eth_write(ndev, 0, EDRRR); + +	/* Aside from TX DMA, we can't tell when the hardware is +	 * really stopped, so we need to reset to make sure. +	 * Before doing that, wait for long enough to *probably* +	 * finish transmitting the last packet and poll stats. +	 */ +	msleep(2); /* max frame time at 10 Mbps < 1250 us */ +	sh_eth_get_stats(ndev); +	sh_eth_reset(ndev); +} +  /* free Tx skb function */  static int sh_eth_txfree(struct net_device *ndev)  { @@ -1400,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)  	u16 pkt_len = 0;  	u32 desc_status;  	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; +	dma_addr_t dma_addr;  	boguscnt = min(boguscnt, *quota);  	limit = boguscnt; @@ -1447,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)  			mdp->rx_skbuff[entry] = NULL;  			if (mdp->cd->rpadir)  				skb_reserve(skb, NET_IP_ALIGN); -			dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, -						ALIGN(mdp->rx_buf_sz, 16), -						DMA_FROM_DEVICE); +			dma_unmap_single(&ndev->dev, rxdesc->addr, +					 ALIGN(mdp->rx_buf_sz, 16), +					 DMA_FROM_DEVICE);  			skb_put(skb, pkt_len);  			skb->protocol = eth_type_trans(skb, ndev);  			netif_receive_skb(skb); @@ -1469,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)  		if (mdp->rx_skbuff[entry] == NULL) {  			skb = netdev_alloc_skb(ndev, skbuff_size); -			mdp->rx_skbuff[entry] = skb;  			if (skb == NULL)  				break;	/* Better luck next round. */  			sh_eth_set_receive_align(skb); -			dma_map_single(&ndev->dev, skb->data, -				       rxdesc->buffer_length, DMA_FROM_DEVICE); +			dma_addr = dma_map_single(&ndev->dev, skb->data, +						  rxdesc->buffer_length, +						  DMA_FROM_DEVICE); +			if (dma_mapping_error(&ndev->dev, dma_addr)) { +				kfree_skb(skb); +				break; +			} +			mdp->rx_skbuff[entry] = skb;  			skb_checksum_none_assert(skb); -			rxdesc->addr = virt_to_phys(skb->data); +			rxdesc->addr = dma_addr;  		}  		if (entry >= mdp->num_rx_ring - 1)  			rxdesc->status |= @@ -1573,7 +1617,6 @@ ignore_link:  		if (intr_status & EESR_RFRMER) {  			/* Receive Frame Overflow int */  			ndev->stats.rx_frame_errors++; -			netif_err(mdp, rx_err, ndev, "Receive Abort\n");  		}  	} @@ -1592,13 +1635,11 @@ ignore_link:  	if (intr_status & EESR_RDE) {  		/* Receive Descriptor Empty int */  		ndev->stats.rx_over_errors++; -		netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");  	}  	if (intr_status & EESR_RFE) {  		/* Receive FIFO Overflow int */  		ndev->stats.rx_fifo_errors++; -		netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");  	}  	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { @@ -1653,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)  	if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))  		ret = IRQ_HANDLED;  	else -		goto other_irq; +		goto out; + +	if (!likely(mdp->irq_enabled)) { +		sh_eth_write(ndev, 0, EESIPR); +		goto out; +	}  	if (intr_status & EESR_RX_CHECK) {  		if (napi_schedule_prep(&mdp->napi)) { @@ -1684,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)  		sh_eth_error(ndev, intr_status);  	} -other_irq: +out:  	spin_unlock(&mdp->lock);  	return ret; @@ -1712,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)  	napi_complete(napi);  	/* Reenable Rx interrupts */ -	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); +	if (mdp->irq_enabled) +		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);  out:  	return budget - quota;  } @@ -1968,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev,  		return -EINVAL;  	if (netif_running(ndev)) { +		netif_device_detach(ndev);  		netif_tx_disable(ndev); -		/* Disable interrupts by clearing the interrupt mask. */ -		sh_eth_write(ndev, 0x0000, EESIPR); -		/* Stop the chip's Tx and Rx processes. */ -		sh_eth_write(ndev, 0, EDTRR); -		sh_eth_write(ndev, 0, EDRRR); + +		/* Serialise with the interrupt handler and NAPI, then +		 * disable interrupts.  We have to clear the +		 * irq_enabled flag first to ensure that interrupts +		 * won't be re-enabled. +		 */ +		mdp->irq_enabled = false;  		synchronize_irq(ndev->irq); -	} +		napi_synchronize(&mdp->napi); +		sh_eth_write(ndev, 0x0000, EESIPR); -	/* Free all the skbuffs in the Rx queue. */ -	sh_eth_ring_free(ndev); -	/* Free DMA buffer */ -	sh_eth_free_dma_buffer(mdp); +		sh_eth_dev_exit(ndev); + +		/* Free all the skbuffs in the Rx queue. */ +		sh_eth_ring_free(ndev); +		/* Free DMA buffer */ +		sh_eth_free_dma_buffer(mdp); +	}  	/* Set new parameters */  	mdp->num_rx_ring = ring->rx_pending;  	mdp->num_tx_ring = ring->tx_pending; -	ret = sh_eth_ring_init(ndev); -	if (ret < 0) { -		netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__); -		return ret; -	} -	ret = sh_eth_dev_init(ndev, false); -	if (ret < 0) { -		netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__); -		return ret; -	} -  	if (netif_running(ndev)) { +		ret = sh_eth_ring_init(ndev); +		if (ret < 0) { +			netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", +				   __func__); +			return ret; +		} +		ret = sh_eth_dev_init(ndev, false); +		if (ret < 0) { +			netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", +				   __func__); +			return ret; +		} + +		mdp->irq_enabled = true;  		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);  		/* Setting the Rx mode will start the Rx process. */  		sh_eth_write(ndev, EDRRR_R, EDRRR); -		netif_wake_queue(ndev); +		netif_device_attach(ndev);  	}  	return 0; @@ -2117,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)  	}  	spin_unlock_irqrestore(&mdp->lock, flags); +	if (skb_padto(skb, ETH_ZLEN)) +		return NETDEV_TX_OK; +  	entry = mdp->cur_tx % mdp->num_tx_ring;  	mdp->tx_skbuff[entry] = skb;  	txdesc = &mdp->tx_ring[entry]; @@ -2126,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)  				 skb->len + 2);  	txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,  				      DMA_TO_DEVICE); -	if (skb->len < ETH_ZLEN) -		txdesc->buffer_length = ETH_ZLEN; -	else -		txdesc->buffer_length = skb->len; +	if (dma_mapping_error(&ndev->dev, txdesc->addr)) { +		kfree_skb(skb); +		return NETDEV_TX_OK; +	} +	txdesc->buffer_length = skb->len;  	if (entry >= mdp->num_tx_ring - 1)  		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); @@ -2181,14 +2242,17 @@ static int sh_eth_close(struct net_device *ndev)  	netif_stop_queue(ndev); -	/* Disable interrupts by clearing the interrupt mask. */ +	/* Serialise with the interrupt handler and NAPI, then disable +	 * interrupts.  We have to clear the irq_enabled flag first to +	 * ensure that interrupts won't be re-enabled. +	 */ +	mdp->irq_enabled = false; +	synchronize_irq(ndev->irq); +	napi_disable(&mdp->napi);  	sh_eth_write(ndev, 0x0000, EESIPR); -	/* Stop the chip's Tx and Rx processes. */ -	sh_eth_write(ndev, 0, EDTRR); -	sh_eth_write(ndev, 0, EDRRR); +	sh_eth_dev_exit(ndev); -	sh_eth_get_stats(ndev);  	/* PHY Disconnect */  	if (mdp->phydev) {  		phy_stop(mdp->phydev); @@ -2198,8 +2262,6 @@ static int sh_eth_close(struct net_device *ndev)  	free_irq(ndev->irq, ndev); -	napi_disable(&mdp->napi); -  	/* Free all the skbuffs in the Rx queue. */  	sh_eth_ring_free(ndev); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 71f5de1171bd..332d3c16d483 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -513,6 +513,7 @@ struct sh_eth_private {  	u32 rx_buf_sz;			/* Based on MTU+slack. */  	int edmac_endian;  	struct napi_struct napi; +	bool irq_enabled;  	/* MII transceiver section. */  	u32 phy_id;			/* PHY ID */  	struct mii_bus *mii_bus;	/* MDIO bus control */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8c6b7c1651e5..cf62ff4c8c56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2778,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)   * @addr: iobase memory address   * Description: this is the main probe function used to   * call the alloc_etherdev, allocate the priv structure. + * Return: + * on success the new private structure is returned, otherwise the error + * pointer.   */  struct stmmac_priv *stmmac_dvr_probe(struct device *device,  				     struct plat_stmmacenet_data *plat_dat, @@ -2789,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,  	ndev = alloc_etherdev(sizeof(struct stmmac_priv));  	if (!ndev) -		return NULL; +		return ERR_PTR(-ENOMEM);  	SET_NETDEV_DEV(ndev, device); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e068d48b0f21..a39131f494ec 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1683,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,  	if (vid == priv->data.default_vlan)  		return 0; +	if (priv->data.dual_emac) { +		/* In dual EMAC, reserved VLAN id should not be used for +		 * creating VLAN interfaces as this can break the dual +		 * EMAC port separation +		 */ +		int i; + +		for (i = 0; i < priv->data.slaves; i++) { +			if (vid == priv->slaves[i].port_vlan) +				return -EINVAL; +		} +	} +  	dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);  	return cpsw_add_vlan_ale_entry(priv, vid);  } @@ -1696,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,  	if (vid == priv->data.default_vlan)  		return 0; +	if (priv->data.dual_emac) { +		int i; + +		for (i = 0; i < priv->data.slaves; i++) { +			if (vid == priv->slaves[i].port_vlan) +				return -EINVAL; +		} +	} +  	dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);  	ret = cpsw_ale_del_vlan(priv->ale, vid, 0);  	if (ret != 0) diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index a14d87783245..2e195289ddf4 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)  	};  	dst = ip6_route_output(dev_net(dev), NULL, &fl6); -	if (IS_ERR(dst)) +	if (dst->error) { +		ret = dst->error; +		dst_release(dst);  		goto err; - +	}  	skb_dst_drop(skb);  	skb_dst_set(skb, dst);  	err = ip6_local_out(skb); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9a72640237cb..62b0bf4fdf6b 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)  	__ath_cancel_work(sc); +	disable_irq(sc->irq);  	tasklet_disable(&sc->intr_tq);  	tasklet_disable(&sc->bcon_tasklet);  	spin_lock_bh(&sc->sc_pcu_lock); @@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)  		r = -EIO;  out: +	enable_irq(sc->irq);  	spin_unlock_bh(&sc->sc_pcu_lock);  	tasklet_enable(&sc->bcon_tasklet);  	tasklet_enable(&sc->intr_tq); @@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev)  	if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))  		return IRQ_NONE; -	if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) -		return IRQ_NONE; -  	/* shared irq, not for us */  	if (!ath9k_hw_intrpend(ah))  		return IRQ_NONE; @@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev)  	ath9k_debug_sync_cause(sc, sync_cause);  	status &= ah->imask;	/* discard unasked-for bits */ -	if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) +	if (test_bit(ATH_OP_HW_RESET, &common->op_flags))  		return IRQ_HANDLED;  	/* diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index 1bbe4fc47b97..660ddb1b7d8a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag {   * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,   *	regardless of the band or the number of the probes. FW will calculate   *	the actual dwell time. + * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.   */  enum iwl_ucode_tlv_api {  	IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID	= BIT(0), @@ -257,6 +258,7 @@ enum iwl_ucode_tlv_api {  	IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF	= BIT(7),  	IWL_UCODE_TLV_API_FRAGMENTED_SCAN	= BIT(8),  	IWL_UCODE_TLV_API_BASIC_DWELL		= BIT(13), +	IWL_UCODE_TLV_API_SINGLE_SCAN_EBS	= BIT(16),  };  /** diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 201846de94e7..cfc0e65b34a5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -653,8 +653,11 @@ enum iwl_scan_channel_flags {  };  /* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S - * @flags: enum iwl_scan_channel_flgs - * @non_ebs_ratio: how many regular scan iteration before EBS + * @flags: enum iwl_scan_channel_flags + * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is + *	involved. + *	1 - EBS is disabled. + *	2 - every second scan will be full scan(and so on).   */  struct iwl_scan_channel_opt {  	__le16 flags; diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index e880f9d4717b..20915587c820 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -3343,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,  		msk |= mvmsta->tfd_queue_msk;  	} -	if (drop) { -		if (iwl_mvm_flush_tx_path(mvm, msk, true)) -			IWL_ERR(mvm, "flush request fail\n"); -		mutex_unlock(&mvm->mutex); -	} else { -		mutex_unlock(&mvm->mutex); +	msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]); -		/* this can take a while, and we may need/want other operations -		 * to succeed while doing this, so do it without the mutex held -		 */ -		iwl_trans_wait_tx_queue_empty(mvm->trans, msk); -	} +	if (iwl_mvm_flush_tx_path(mvm, msk, true)) +		IWL_ERR(mvm, "flush request fail\n"); +	mutex_unlock(&mvm->mutex); + +	/* this can take a while, and we may need/want other operations +	 * to succeed while doing this, so do it without the mutex held +	 */ +	iwl_trans_wait_tx_queue_empty(mvm->trans, msk);  }  const struct ieee80211_ops iwl_mvm_hw_ops = { diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index ec9a8e7bae1d..844bf7c4c8de 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -72,6 +72,8 @@  #define IWL_PLCP_QUIET_THRESH 1  #define IWL_ACTIVE_QUIET_TIME 10 +#define IWL_DENSE_EBS_SCAN_RATIO 5 +#define IWL_SPARSE_EBS_SCAN_RATIO 1  struct iwl_mvm_scan_params {  	u32 max_out_time; @@ -1105,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)  		return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,  					  notify); +	if (mvm->scan_status == IWL_MVM_SCAN_NONE) +		return 0; + +	if (iwl_mvm_is_radio_killed(mvm)) +		goto out; +  	if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&  	    (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||  	     mvm->scan_status != IWL_MVM_SCAN_OS)) { @@ -1141,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)  	if (mvm->scan_status == IWL_MVM_SCAN_OS)  		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); +out:  	mvm->scan_status = IWL_MVM_SCAN_NONE;  	if (notify) { @@ -1297,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,  	cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);  	cmd->iter_num = cpu_to_le32(1); -	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT && -	    mvm->last_ebs_successful) { -		cmd->channel_opt[0].flags = -			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | -				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | -				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); -		cmd->channel_opt[1].flags = -			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | -				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | -				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); -	} -  	if (iwl_mvm_rrm_scan_needed(mvm))  		cmd->scan_flags |=  			cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED); @@ -1383,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,  	cmd->schedule[1].iterations = 0;  	cmd->schedule[1].full_scan_mul = 0; +	if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS && +	    mvm->last_ebs_successful) { +		cmd->channel_opt[0].flags = +			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | +				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | +				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); +		cmd->channel_opt[0].non_ebs_ratio = +			cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); +		cmd->channel_opt[1].flags = +			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | +				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | +				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); +		cmd->channel_opt[1].non_ebs_ratio = +			cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); +	} +  	for (i = 1; i <= req->req.n_ssids; i++)  		ssid_bitmap |= BIT(i); @@ -1483,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,  	cmd->schedule[1].iterations = 0xff;  	cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; +	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT && +	    mvm->last_ebs_successful) { +		cmd->channel_opt[0].flags = +			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | +				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | +				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); +		cmd->channel_opt[0].non_ebs_ratio = +			cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); +		cmd->channel_opt[1].flags = +			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | +				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | +				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); +		cmd->channel_opt[1].non_ebs_ratio = +			cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); +	} +  	iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,  				       ssid_bitmap, cmd); diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 4333306ccdee..c59d07567d90 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,  	if (ieee80211_is_probe_resp(fc))  		tx_flags |= TX_CMD_FLG_TSF; -	else if (ieee80211_is_back_req(fc)) -		tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;  	if (ieee80211_has_morefrags(fc))  		tx_flags |= TX_CMD_FLG_MORE_FRAG; @@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,  		u8 *qc = ieee80211_get_qos_ctl(hdr);  		tx_cmd->tid_tspec = qc[0] & 0xf;  		tx_flags &= ~TX_CMD_FLG_SEQ_CTL; +	} else if (ieee80211_is_back_req(fc)) { +		struct ieee80211_bar *bar = (void *)skb->data; +		u16 control = le16_to_cpu(bar->control); + +		tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; +		tx_cmd->tid_tspec = (control & +				     IEEE80211_BAR_CTRL_TID_INFO_MASK) >> +			IEEE80211_BAR_CTRL_TID_INFO_SHIFT; +		WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);  	} else {  		tx_cmd->tid_tspec = IWL_TID_NON_QOS;  		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index dfd021e8268f..f4cd0b9b2438 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -177,7 +177,7 @@ struct at91_pinctrl {  	struct device		*dev;  	struct pinctrl_dev	*pctl; -	int			nbanks; +	int			nactive_banks;  	uint32_t		*mux_mask;  	int			nmux; @@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name,  	int mux;  	/* check if it's a valid config */ -	if (pin->bank >= info->nbanks) { +	if (pin->bank >= gpio_banks) {  		dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n", -			name, index, pin->bank, info->nbanks); +			name, index, pin->bank, gpio_banks);  		return -EINVAL;  	} +	if (!gpio_chips[pin->bank]) { +		dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n", +			name, index, pin->bank); +		return -ENXIO; +	} +  	if (pin->pin >= MAX_NB_GPIO_PER_BANK) {  		dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n",  			name, index, pin->pin, MAX_NB_GPIO_PER_BANK); @@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info,  	for_each_child_of_node(np, child) {  		if (of_device_is_compatible(child, gpio_compat)) { -			info->nbanks++; +			if (of_device_is_available(child)) +				info->nactive_banks++;  		} else {  			info->nfunctions++;  			info->ngroups += of_get_child_count(child); @@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info,  	}  	size /= sizeof(*list); -	if (!size || size % info->nbanks) { -		dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks); +	if (!size || size % gpio_banks) { +		dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks);  		return -EINVAL;  	} -	info->nmux = size / info->nbanks; +	info->nmux = size / gpio_banks;  	info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL);  	if (!info->mux_mask) { @@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,  		of_match_device(at91_pinctrl_of_match, &pdev->dev)->data;  	at91_pinctrl_child_count(info, np); -	if (info->nbanks < 1) { +	if (gpio_banks < 1) {  		dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n");  		return -EINVAL;  	} @@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,  	dev_dbg(&pdev->dev, "mux-mask\n");  	tmp = info->mux_mask; -	for (i = 0; i < info->nbanks; i++) { +	for (i = 0; i < gpio_banks; i++) {  		for (j = 0; j < info->nmux; j++, tmp++) {  			dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]);  		} @@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,  	if (!info->groups)  		return -ENOMEM; -	dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks); +	dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks);  	dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);  	dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups); @@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev)  {  	struct at91_pinctrl *info;  	struct pinctrl_pin_desc *pdesc; -	int ret, i, j, k; +	int ret, i, j, k, ngpio_chips_enabled = 0;  	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);  	if (!info) @@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev)  	 * to obtain references to the struct gpio_chip * for them, and we  	 * need this to proceed.  	 */ -	for (i = 0; i < info->nbanks; i++) { -		if (!gpio_chips[i]) { -			dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); -			devm_kfree(&pdev->dev, info); -			return -EPROBE_DEFER; -		} +	for (i = 0; i < gpio_banks; i++) +		if (gpio_chips[i]) +			ngpio_chips_enabled++; + +	if (ngpio_chips_enabled < info->nactive_banks) { +		dev_warn(&pdev->dev, +			 "All GPIO chips are not registered yet (%d/%d)\n", +			 ngpio_chips_enabled, info->nactive_banks); +		devm_kfree(&pdev->dev, info); +		return -EPROBE_DEFER;  	}  	at91_pinctrl_desc.name = dev_name(&pdev->dev); -	at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK; +	at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK;  	at91_pinctrl_desc.pins = pdesc =  		devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL);  	if (!at91_pinctrl_desc.pins)  		return -ENOMEM; -	for (i = 0 , k = 0; i < info->nbanks; i++) { +	for (i = 0, k = 0; i < gpio_banks; i++) {  		for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {  			pdesc->number = k;  			pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j); @@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev)  	}  	/* We will handle a range of GPIO pins */ -	for (i = 0; i < info->nbanks; i++) -		pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); +	for (i = 0; i < gpio_banks; i++) +		if (gpio_chips[i]) +			pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);  	dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n"); @@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)  static int at91_gpio_of_irq_setup(struct platform_device *pdev,  				  struct at91_gpio_chip *at91_gpio)  { +	struct gpio_chip	*gpiochip_prev = NULL;  	struct at91_gpio_chip   *prev = NULL;  	struct irq_data		*d = irq_get_irq_data(at91_gpio->pioc_virq); -	int ret; +	int ret, i;  	at91_gpio->pioc_hwirq = irqd_to_hwirq(d); @@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,  		return ret;  	} -	/* Setup chained handler */ -	if (at91_gpio->pioc_idx) -		prev = gpio_chips[at91_gpio->pioc_idx - 1]; -  	/* The top level handler handles one bank of GPIOs, except  	 * on some SoC it can handle up to three...  	 * We only set up the handler for the first of the list.  	 */ -	if (prev && prev->next == at91_gpio) +	gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq); +	if (!gpiochip_prev) { +		/* Then register the chain on the parent IRQ */ +		gpiochip_set_chained_irqchip(&at91_gpio->chip, +					     &gpio_irqchip, +					     at91_gpio->pioc_virq, +					     gpio_irq_handler);  		return 0; +	} -	/* Then register the chain on the parent IRQ */ -	gpiochip_set_chained_irqchip(&at91_gpio->chip, -				     &gpio_irqchip, -				     at91_gpio->pioc_virq, -				     gpio_irq_handler); +	prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip); -	return 0; +	/* we can only have 2 banks before */ +	for (i = 0; i < 2; i++) { +		if (prev->next) { +			prev = prev->next; +		} else { +			prev->next = at91_gpio; +			return 0; +		} +	} + +	return -EINVAL;  }  /* This structure is replicated for each GPIO block allocated at probe time */ @@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = {  	.ngpio			= MAX_NB_GPIO_PER_BANK,  }; -static void at91_gpio_probe_fixup(void) -{ -	unsigned i; -	struct at91_gpio_chip *at91_gpio, *last = NULL; - -	for (i = 0; i < gpio_banks; i++) { -		at91_gpio = gpio_chips[i]; - -		/* -		 * GPIO controller are grouped on some SoC: -		 * PIOC, PIOD and PIOE can share the same IRQ line -		 */ -		if (last && last->pioc_virq == at91_gpio->pioc_virq) -			last->next = at91_gpio; -		last = at91_gpio; -	} -} -  static struct of_device_id at91_gpio_of_match[] = {  	{ .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },  	{ .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops }, @@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev)  	gpio_chips[alias_idx] = at91_chip;  	gpio_banks = max(gpio_banks, alias_idx + 1); -	at91_gpio_probe_fixup(); -  	ret = at91_gpio_of_irq_setup(pdev, at91_chip);  	if (ret)  		goto irq_setup_err; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index e225711bb8bc..9c48fb32f660 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1488,7 +1488,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id)  }  EXPORT_SYMBOL_GPL(regulator_get_optional); -/* Locks held by regulator_put() */ +/* regulator_list_mutex lock held by regulator_put() */  static void _regulator_put(struct regulator *regulator)  {  	struct regulator_dev *rdev; @@ -1503,12 +1503,14 @@ static void _regulator_put(struct regulator *regulator)  	/* remove any sysfs entries */  	if (regulator->dev)  		sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); +	mutex_lock(&rdev->mutex);  	kfree(regulator->supply_name);  	list_del(®ulator->list);  	kfree(regulator);  	rdev->open_count--;  	rdev->exclusive = 0; +	mutex_unlock(&rdev->mutex);  	module_put(rdev->owner);  } diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 2809ae0d6bcd..ff828117798f 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -405,6 +405,40 @@ static struct regulator_ops s2mps14_reg_ops;  	.enable_mask	= S2MPS14_ENABLE_MASK			\  } +#define regulator_desc_s2mps13_buck7(num, min, step, min_sel) {	\ +	.name		= "BUCK"#num,				\ +	.id		= S2MPS13_BUCK##num,			\ +	.ops		= &s2mps14_reg_ops,			\ +	.type		= REGULATOR_VOLTAGE,			\ +	.owner		= THIS_MODULE,				\ +	.min_uV		= min,					\ +	.uV_step	= step,					\ +	.linear_min_sel	= min_sel,				\ +	.n_voltages	= S2MPS14_BUCK_N_VOLTAGES,		\ +	.ramp_delay	= S2MPS13_BUCK_RAMP_DELAY,		\ +	.vsel_reg	= S2MPS13_REG_B1OUT + (num) * 2 - 1,	\ +	.vsel_mask	= S2MPS14_BUCK_VSEL_MASK,		\ +	.enable_reg	= S2MPS13_REG_B1CTRL + (num - 1) * 2,	\ +	.enable_mask	= S2MPS14_ENABLE_MASK			\ +} + +#define regulator_desc_s2mps13_buck8_10(num, min, step, min_sel) {	\ +	.name		= "BUCK"#num,				\ +	.id		= S2MPS13_BUCK##num,			\ +	.ops		= &s2mps14_reg_ops,			\ +	.type		= REGULATOR_VOLTAGE,			\ +	.owner		= THIS_MODULE,				\ +	.min_uV		= min,					\ +	.uV_step	= step,					\ +	.linear_min_sel	= min_sel,				\ +	.n_voltages	= S2MPS14_BUCK_N_VOLTAGES,		\ +	.ramp_delay	= S2MPS13_BUCK_RAMP_DELAY,		\ +	.vsel_reg	= S2MPS13_REG_B1OUT + (num) * 2 - 1,	\ +	.vsel_mask	= S2MPS14_BUCK_VSEL_MASK,		\ +	.enable_reg	= S2MPS13_REG_B1CTRL + (num) * 2 - 1,	\ +	.enable_mask	= S2MPS14_ENABLE_MASK			\ +} +  static const struct regulator_desc s2mps13_regulators[] = {  	regulator_desc_s2mps13_ldo(1,  MIN_800_MV,  STEP_12_5_MV, 0x00),  	regulator_desc_s2mps13_ldo(2,  MIN_1400_MV, STEP_50_MV,   0x0C), @@ -452,10 +486,10 @@ static const struct regulator_desc s2mps13_regulators[] = {  	regulator_desc_s2mps13_buck(4,  MIN_500_MV,  STEP_6_25_MV, 0x10),  	regulator_desc_s2mps13_buck(5,  MIN_500_MV,  STEP_6_25_MV, 0x10),  	regulator_desc_s2mps13_buck(6,  MIN_500_MV,  STEP_6_25_MV, 0x10), -	regulator_desc_s2mps13_buck(7,  MIN_500_MV,  STEP_6_25_MV, 0x10), -	regulator_desc_s2mps13_buck(8,  MIN_1000_MV, STEP_12_5_MV, 0x20), -	regulator_desc_s2mps13_buck(9,  MIN_1000_MV, STEP_12_5_MV, 0x20), -	regulator_desc_s2mps13_buck(10, MIN_500_MV,  STEP_6_25_MV, 0x10), +	regulator_desc_s2mps13_buck7(7,  MIN_500_MV,  STEP_6_25_MV, 0x10), +	regulator_desc_s2mps13_buck8_10(8,  MIN_1000_MV, STEP_12_5_MV, 0x20), +	regulator_desc_s2mps13_buck8_10(9,  MIN_1000_MV, STEP_12_5_MV, 0x20), +	regulator_desc_s2mps13_buck8_10(10, MIN_500_MV,  STEP_6_25_MV, 0x10),  };  static int s2mps14_regulator_enable(struct regulator_dev *rdev) diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c index b5e7c4670205..89ac1d5083c6 100644 --- a/drivers/rtc/rtc-s5m.c +++ b/drivers/rtc/rtc-s5m.c @@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);  static const struct platform_device_id s5m_rtc_id[] = {  	{ "s5m-rtc",		S5M8767X },  	{ "s2mps14-rtc",	S2MPS14X }, +	{ },  };  static struct platform_driver s5m_rtc_driver = { diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index f407e3763432..642c77c76b84 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,  	QETH_DBF_TEXT(SETUP, 2, "idxanswr");  	card = CARD_FROM_CDEV(channel->ccwdev);  	iob = qeth_get_buffer(channel); +	if (!iob) +		return -ENOMEM;  	iob->callback = idx_reply_cb;  	memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));  	channel->ccw.count = QETH_BUFSIZE; @@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,  	QETH_DBF_TEXT(SETUP, 2, "idxactch");  	iob = qeth_get_buffer(channel); +	if (!iob) +		return -ENOMEM;  	iob->callback = idx_reply_cb;  	memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));  	channel->ccw.count = IDX_ACTIVATE_SIZE; @@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len,  }  EXPORT_SYMBOL_GPL(qeth_prepare_control_data); +/** + * qeth_send_control_data() -	send control command to the card + * @card:			qeth_card structure pointer + * @len:			size of the command buffer + * @iob:			qeth_cmd_buffer pointer + * @reply_cb:			callback function pointer + * @cb_card:			pointer to the qeth_card structure + * @cb_reply:			pointer to the qeth_reply structure + * @cb_cmd:			pointer to the original iob for non-IPA + *				commands, or to the qeth_ipa_cmd structure + *				for the IPA commands. + * @reply_param:		private pointer passed to the callback + * + * Returns the value of the `return_code' field of the response + * block returned from the hardware, or other error indication. + * Value of zero indicates successful execution of the command. + * + * Callback function gets called one or more times, with cb_cmd + * pointing to the response returned by the hardware. Callback + * function must return non-zero if more reply blocks are expected, + * and zero if the last or only reply block is received. Callback + * function can get the value of the reply_param pointer from the + * field 'param' of the structure qeth_reply. + */ +  int qeth_send_control_data(struct qeth_card *card, int len,  		struct qeth_cmd_buffer *iob, -		int (*reply_cb)(struct qeth_card *, struct qeth_reply *, -			unsigned long), +		int (*reply_cb)(struct qeth_card *cb_card, +				struct qeth_reply *cb_reply, +				unsigned long cb_cmd),  		void *reply_param)  {  	int rc; @@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,  	struct qeth_cmd_buffer *iob;  	struct qeth_ipa_cmd *cmd; -	iob = qeth_wait_for_buffer(&card->write); -	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); -	qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); +	iob = qeth_get_buffer(&card->write); +	if (iob) { +		cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); +		qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); +	} else { +		dev_warn(&card->gdev->dev, +			 "The qeth driver ran out of channel command buffers\n"); +		QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers", +				 dev_name(&card->gdev->dev)); +	}  	return iob;  } @@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,  }  EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); +/** + * qeth_send_ipa_cmd() - send an IPA command + * + * See qeth_send_control_data() for explanation of the arguments. + */ +  int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,  		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,  			unsigned long), @@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card)  	QETH_DBF_TEXT(SETUP, 2, "strtlan");  	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); +	if (!iob) +		return -ENOMEM;  	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);  	return rc;  } @@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,  	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,  				     QETH_PROT_IPV4); -	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); -	cmd->data.setadapterparms.hdr.cmdlength = cmdlen; -	cmd->data.setadapterparms.hdr.command_code = command; -	cmd->data.setadapterparms.hdr.used_total = 1; -	cmd->data.setadapterparms.hdr.seq_no = 1; +	if (iob) { +		cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); +		cmd->data.setadapterparms.hdr.cmdlength = cmdlen; +		cmd->data.setadapterparms.hdr.command_code = command; +		cmd->data.setadapterparms.hdr.used_total = 1; +		cmd->data.setadapterparms.hdr.seq_no = 1; +	}  	return iob;  } @@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)  	QETH_CARD_TEXT(card, 3, "queryadp");  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,  				   sizeof(struct qeth_ipacmd_setadpparms)); +	if (!iob) +		return -ENOMEM;  	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);  	return rc;  } @@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)  	QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);  	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); +	if (!iob) +		return -ENOMEM;  	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);  	return rc;  } @@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card,  		return -ENOMEDIUM;  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,  				sizeof(struct qeth_ipacmd_setadpparms_hdr)); +	if (!iob) +		return -ENOMEM;  	return qeth_send_ipa_cmd(card, iob,  				qeth_query_switch_attributes_cb, sw_info);  } @@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card)  	QETH_DBF_TEXT(SETUP, 2, "qdiagass");  	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	cmd->data.diagass.subcmd_len = 16;  	cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; @@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)  	QETH_DBF_TEXT(SETUP, 2, "diagtrap");  	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	cmd->data.diagass.subcmd_len = 80;  	cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; @@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,  			sizeof(struct qeth_ipacmd_setadpparms)); +	if (!iob) +		return;  	cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);  	cmd->data.setadapterparms.data.mode = mode;  	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); @@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,  				   sizeof(struct qeth_ipacmd_setadpparms)); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;  	cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; @@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,  				   sizeof(struct qeth_ipacmd_setadpparms_hdr) +  				   sizeof(struct qeth_set_access_ctrl)); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;  	access_ctrl_req->subcmd_code = isolation; @@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,  				   QETH_SNMP_SETADP_CMDLENGTH + req_len); +	if (!iob) { +		rc = -ENOMEM; +		goto out; +	}  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);  	rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, @@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)  		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))  			rc = -EFAULT;  	} - +out:  	kfree(ureq);  	kfree(qinfo.udata);  	return rc; @@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,  				   sizeof(struct qeth_ipacmd_setadpparms_hdr) +  				   sizeof(struct qeth_query_oat)); +	if (!iob) { +		rc = -ENOMEM; +		goto out_free; +	}  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	oat_req = &cmd->data.setadapterparms.data.query_oat;  	oat_req->subcmd_code = oat_data.command; @@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card,  		return -EOPNOTSUPP;  	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,  		sizeof(struct qeth_ipacmd_setadpparms_hdr)); +	if (!iob) +		return -ENOMEM;  	return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,  					(void *)carrier_info);  } @@ -5060,11 +5133,23 @@ retriable:  	card->options.adp.supported_funcs = 0;  	card->options.sbp.supported_funcs = 0;  	card->info.diagass_support = 0; -	qeth_query_ipassists(card, QETH_PROT_IPV4); -	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) -		qeth_query_setadapterparms(card); -	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) -		qeth_query_setdiagass(card); +	rc = qeth_query_ipassists(card, QETH_PROT_IPV4); +	if (rc == -ENOMEM) +		goto out; +	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { +		rc = qeth_query_setadapterparms(card); +		if (rc < 0) { +			QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); +			goto out; +		} +	} +	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { +		rc = qeth_query_setdiagass(card); +		if (rc < 0) { +			QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); +			goto out; +		} +	}  	return 0;  out:  	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d02cd1a67943..ce87ae72edbd 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *);  static int qeth_l2_stop(struct net_device *);  static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);  static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, -			   enum qeth_ipa_cmds, -			   int (*reply_cb) (struct qeth_card *, -					    struct qeth_reply*, -					    unsigned long)); +			   enum qeth_ipa_cmds);  static void qeth_l2_set_multicast_list(struct net_device *);  static int qeth_l2_recover(void *);  static void qeth_bridgeport_query_support(struct qeth_card *card); @@ -130,56 +127,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)  	return ndev;  } -static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, -				struct qeth_reply *reply, -				unsigned long data) +static int qeth_setdel_makerc(struct qeth_card *card, int retcode)  { -	struct qeth_ipa_cmd *cmd; -	__u8 *mac; +	int rc; -	QETH_CARD_TEXT(card, 2, "L2Sgmacb"); -	cmd = (struct qeth_ipa_cmd *) data; -	mac = &cmd->data.setdelmac.mac[0]; -	/* MAC already registered, needed in couple/uncouple case */ -	if (cmd->hdr.return_code ==  IPA_RC_L2_DUP_MAC) { -		QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n", -			  mac, QETH_CARD_IFNAME(card)); -		cmd->hdr.return_code = 0; +	if (retcode) +		QETH_CARD_TEXT_(card, 2, "err%04x", retcode); +	switch (retcode) { +	case IPA_RC_SUCCESS: +		rc = 0; +		break; +	case IPA_RC_L2_UNSUPPORTED_CMD: +		rc = -ENOSYS; +		break; +	case IPA_RC_L2_ADDR_TABLE_FULL: +		rc = -ENOSPC; +		break; +	case IPA_RC_L2_DUP_MAC: +	case IPA_RC_L2_DUP_LAYER3_MAC: +		rc = -EEXIST; +		break; +	case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: +	case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: +		rc = -EPERM; +		break; +	case IPA_RC_L2_MAC_NOT_FOUND: +		rc = -ENOENT; +		break; +	case -ENOMEM: +		rc = -ENOMEM; +		break; +	default: +		rc = -EIO; +		break;  	} -	if (cmd->hdr.return_code) -		QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n", -			  mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code); -	return 0; +	return rc;  }  static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)  { -	QETH_CARD_TEXT(card, 2, "L2Sgmac"); -	return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC, -					  qeth_l2_send_setgroupmac_cb); -} - -static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, -				struct qeth_reply *reply, -				unsigned long data) -{ -	struct qeth_ipa_cmd *cmd; -	__u8 *mac; +	int rc; -	QETH_CARD_TEXT(card, 2, "L2Dgmacb"); -	cmd = (struct qeth_ipa_cmd *) data; -	mac = &cmd->data.setdelmac.mac[0]; -	if (cmd->hdr.return_code) -		QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n", -			  mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code); -	return 0; +	QETH_CARD_TEXT(card, 2, "L2Sgmac"); +	rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, +					IPA_CMD_SETGMAC)); +	if (rc == -EEXIST) +		QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n", +			mac, QETH_CARD_IFNAME(card)); +	else if (rc) +		QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n", +			mac, QETH_CARD_IFNAME(card), rc); +	return rc;  }  static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)  { +	int rc; +  	QETH_CARD_TEXT(card, 2, "L2Dgmac"); -	return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, -					  qeth_l2_send_delgroupmac_cb); +	rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, +					IPA_CMD_DELGMAC)); +	if (rc) +		QETH_DBF_MESSAGE(2, +			"Could not delete group MAC %pM on %s: %d\n", +			mac, QETH_CARD_IFNAME(card), rc); +	return rc;  }  static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) @@ -197,10 +209,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)  	mc->is_vmac = vmac;  	if (vmac) { -		rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, -					NULL); +		rc = qeth_setdel_makerc(card, +			qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC));  	} else { -		rc = qeth_l2_send_setgroupmac(card, mac); +		rc = qeth_setdel_makerc(card, +			qeth_l2_send_setgroupmac(card, mac));  	}  	if (!rc) @@ -218,7 +231,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del)  		if (del) {  			if (mc->is_vmac)  				qeth_l2_send_setdelmac(card, mc->mc_addr, -					IPA_CMD_DELVMAC, NULL); +					IPA_CMD_DELVMAC);  			else  				qeth_l2_send_delgroupmac(card, mc->mc_addr);  		} @@ -291,6 +304,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,  	QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);  	iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	cmd->data.setdelvlan.vlan_id = i;  	return qeth_send_ipa_cmd(card, iob, @@ -313,6 +328,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,  {  	struct qeth_card *card = dev->ml_priv;  	struct qeth_vlan_vid *id; +	int rc;  	QETH_CARD_TEXT_(card, 4, "aid:%d", vid);  	if (!vid) @@ -328,7 +344,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,  	id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);  	if (id) {  		id->vid = vid; -		qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); +		rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); +		if (rc) { +			kfree(id); +			return rc; +		}  		spin_lock_bh(&card->vlanlock);  		list_add_tail(&id->list, &card->vid_list);  		spin_unlock_bh(&card->vlanlock); @@ -343,6 +363,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,  {  	struct qeth_vlan_vid *id, *tmpid = NULL;  	struct qeth_card *card = dev->ml_priv; +	int rc = 0;  	QETH_CARD_TEXT_(card, 4, "kid:%d", vid);  	if (card->info.type == QETH_CARD_TYPE_OSM) { @@ -363,11 +384,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,  	}  	spin_unlock_bh(&card->vlanlock);  	if (tmpid) { -		qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); +		rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);  		kfree(tmpid);  	}  	qeth_l2_set_multicast_list(card->dev); -	return 0; +	return rc;  }  static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) @@ -539,91 +560,62 @@ out:  }  static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, -			   enum qeth_ipa_cmds ipacmd, -			   int (*reply_cb) (struct qeth_card *, -					    struct qeth_reply*, -					    unsigned long)) +			   enum qeth_ipa_cmds ipacmd)  {  	struct qeth_ipa_cmd *cmd;  	struct qeth_cmd_buffer *iob;  	QETH_CARD_TEXT(card, 2, "L2sdmac");  	iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;  	memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); -	return qeth_send_ipa_cmd(card, iob, reply_cb, NULL); +	return qeth_send_ipa_cmd(card, iob, NULL, NULL);  } -static int qeth_l2_send_setmac_cb(struct qeth_card *card, -			   struct qeth_reply *reply, -			   unsigned long data) +static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)  { -	struct qeth_ipa_cmd *cmd; +	int rc; -	QETH_CARD_TEXT(card, 2, "L2Smaccb"); -	cmd = (struct qeth_ipa_cmd *) data; -	if (cmd->hdr.return_code) { -		QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code); +	QETH_CARD_TEXT(card, 2, "L2Setmac"); +	rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, +					IPA_CMD_SETVMAC)); +	if (rc == 0) { +		card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; +		memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN); +		dev_info(&card->gdev->dev, +			"MAC address %pM successfully registered on device %s\n", +			card->dev->dev_addr, card->dev->name); +	} else {  		card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; -		switch (cmd->hdr.return_code) { -		case IPA_RC_L2_DUP_MAC: -		case IPA_RC_L2_DUP_LAYER3_MAC: +		switch (rc) { +		case -EEXIST:  			dev_warn(&card->gdev->dev, -				"MAC address %pM already exists\n", -				cmd->data.setdelmac.mac); +				"MAC address %pM already exists\n", mac);  			break; -		case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: -		case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: +		case -EPERM:  			dev_warn(&card->gdev->dev, -				"MAC address %pM is not authorized\n", -				cmd->data.setdelmac.mac); -			break; -		default: +				"MAC address %pM is not authorized\n", mac);  			break;  		} -	} else { -		card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; -		memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, -		       OSA_ADDR_LEN); -		dev_info(&card->gdev->dev, -			"MAC address %pM successfully registered on device %s\n", -			card->dev->dev_addr, card->dev->name); -	} -	return 0; -} - -static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) -{ -	QETH_CARD_TEXT(card, 2, "L2Setmac"); -	return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, -					  qeth_l2_send_setmac_cb); -} - -static int qeth_l2_send_delmac_cb(struct qeth_card *card, -			   struct qeth_reply *reply, -			   unsigned long data) -{ -	struct qeth_ipa_cmd *cmd; - -	QETH_CARD_TEXT(card, 2, "L2Dmaccb"); -	cmd = (struct qeth_ipa_cmd *) data; -	if (cmd->hdr.return_code) { -		QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); -		return 0;  	} -	card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - -	return 0; +	return rc;  }  static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)  { +	int rc; +  	QETH_CARD_TEXT(card, 2, "L2Delmac");  	if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))  		return 0; -	return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, -					  qeth_l2_send_delmac_cb); +	rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, +					IPA_CMD_DELVMAC)); +	if (rc == 0) +		card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; +	return rc;  }  static int qeth_l2_request_initial_mac(struct qeth_card *card) @@ -651,7 +643,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)  		if (rc) {  			QETH_DBF_MESSAGE(2, "couldn't get MAC address on "  				"device %s: x%x\n", CARD_BUS_ID(card), rc); -			QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); +			QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);  			return rc;  		}  		QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); @@ -687,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)  		return -ERESTARTSYS;  	}  	rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); -	if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND)) +	if (!rc || (rc == -ENOENT))  		rc = qeth_l2_send_setmac(card, addr->sa_data);  	return rc ? -EINVAL : 0;  } @@ -996,7 +988,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)  	recover_flag = card->state;  	rc = qeth_core_hardsetup_card(card);  	if (rc) { -		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); +		QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);  		rc = -ENODEV;  		goto out_remove;  	} @@ -1730,6 +1722,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)  	QETH_CARD_TEXT(card, 2, "brqsuppo");  	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); +	if (!iob) +		return;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	cmd->data.sbp.hdr.cmdlength =  		sizeof(struct qeth_ipacmd_sbp_hdr) + @@ -1805,6 +1799,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,  	if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))  		return -EOPNOTSUPP;  	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	cmd->data.sbp.hdr.cmdlength =  		sizeof(struct qeth_ipacmd_sbp_hdr); @@ -1817,9 +1813,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,  	if (rc)  		return rc;  	rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); -	if (rc) -		return rc; -	return 0; +	return rc;  }  EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); @@ -1873,6 +1867,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)  	if (!(card->options.sbp.supported_funcs & setcmd))  		return -EOPNOTSUPP;  	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	cmd->data.sbp.hdr.cmdlength = cmdlength;  	cmd->data.sbp.hdr.command_code = setcmd; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 625227ad16ee..e2a0ee845399 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -549,6 +549,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,  	QETH_CARD_TEXT(card, 4, "setdelmc");  	iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);  	if (addr->proto == QETH_PROT_IPV6) @@ -588,6 +590,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,  	QETH_CARD_TEXT_(card, 4, "flags%02X", flags);  	iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	if (addr->proto == QETH_PROT_IPV6) {  		memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, @@ -616,6 +620,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,  	QETH_CARD_TEXT(card, 4, "setroutg");  	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	cmd->data.setrtg.type = (type);  	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); @@ -1049,12 +1055,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(  	QETH_CARD_TEXT(card, 4, "getasscm");  	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); -	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); -	cmd->data.setassparms.hdr.assist_no = ipa_func; -	cmd->data.setassparms.hdr.length = 8 + len; -	cmd->data.setassparms.hdr.command_code = cmd_code; -	cmd->data.setassparms.hdr.return_code = 0; -	cmd->data.setassparms.hdr.seq_no = 0; +	if (iob) { +		cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); +		cmd->data.setassparms.hdr.assist_no = ipa_func; +		cmd->data.setassparms.hdr.length = 8 + len; +		cmd->data.setassparms.hdr.command_code = cmd_code; +		cmd->data.setassparms.hdr.return_code = 0; +		cmd->data.setassparms.hdr.seq_no = 0; +	}  	return iob;  } @@ -1090,6 +1098,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,  	QETH_CARD_TEXT(card, 4, "simassp6");  	iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,  				       0, QETH_PROT_IPV6); +	if (!iob) +		return -ENOMEM;  	rc = qeth_l3_send_setassparms(card, iob, 0, 0,  				   qeth_l3_default_setassparms_cb, NULL);  	return rc; @@ -1108,6 +1118,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card,  		length = sizeof(__u32);  	iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,  				       length, QETH_PROT_IPV4); +	if (!iob) +		return -ENOMEM;  	rc = qeth_l3_send_setassparms(card, iob, length, data,  				   qeth_l3_default_setassparms_cb, NULL);  	return rc; @@ -1494,6 +1506,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)  	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,  				     QETH_PROT_IPV6); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =  			card->info.unique_id; @@ -1537,6 +1551,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)  	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,  				     QETH_PROT_IPV6); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =  			card->info.unique_id; @@ -1611,6 +1627,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)  	QETH_DBF_TEXT(SETUP, 2, "diagtrac");  	iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	cmd->data.diagass.subcmd_len = 16;  	cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE; @@ -2442,6 +2460,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,  			IPA_CMD_ASS_ARP_QUERY_INFO,  			sizeof(struct qeth_arp_query_data) - sizeof(char),  			prot); +	if (!iob) +		return -ENOMEM;  	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);  	cmd->data.setassparms.data.query_arp.request_bits = 0x000F;  	cmd->data.setassparms.data.query_arp.reply_bits = 0; @@ -2535,6 +2555,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,  				       IPA_CMD_ASS_ARP_ADD_ENTRY,  				       sizeof(struct qeth_arp_cache_entry),  				       QETH_PROT_IPV4); +	if (!iob) +		return -ENOMEM;  	rc = qeth_l3_send_setassparms(card, iob,  				   sizeof(struct qeth_arp_cache_entry),  				   (unsigned long) entry, @@ -2574,6 +2596,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,  				       IPA_CMD_ASS_ARP_REMOVE_ENTRY,  				       12,  				       QETH_PROT_IPV4); +	if (!iob) +		return -ENOMEM;  	rc = qeth_l3_send_setassparms(card, iob,  				   12, (unsigned long)buf,  				   qeth_l3_default_setassparms_cb, NULL); @@ -3262,6 +3286,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {  static int qeth_l3_setup_netdev(struct qeth_card *card)  { +	int rc; +  	if (card->info.type == QETH_CARD_TYPE_OSD ||  	    card->info.type == QETH_CARD_TYPE_OSX) {  		if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || @@ -3293,7 +3319,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)  			return -ENODEV;  		card->dev->flags |= IFF_NOARP;  		card->dev->netdev_ops = &qeth_l3_netdev_ops; -		qeth_l3_iqd_read_initial_mac(card); +		rc = qeth_l3_iqd_read_initial_mac(card); +		if (rc) +			return rc;  		if (card->options.hsuid[0])  			memcpy(card->dev->perm_addr, card->options.hsuid, 9);  	} else @@ -3360,7 +3388,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)  	recover_flag = card->state;  	rc = qeth_core_hardsetup_card(card);  	if (rc) { -		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); +		QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);  		rc = -ENODEV;  		goto out_remove;  	} @@ -3401,7 +3429,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)  contin:  	rc = qeth_l3_setadapter_parms(card);  	if (rc) -		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); +		QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);  	if (!card->options.sniffer) {  		rc = qeth_l3_start_ipassists(card);  		if (rc) { @@ -3410,10 +3438,10 @@ contin:  		}  		rc = qeth_l3_setrouting_v4(card);  		if (rc) -			QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); +			QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);  		rc = qeth_l3_setrouting_v6(card);  		if (rc) -			QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); +			QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);  	}  	netif_tx_disable(card->dev); diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index e02885451425..9b3829931f40 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -986,9 +986,9 @@ int scsi_device_get(struct scsi_device *sdev)  		return -ENXIO;  	if (!get_device(&sdev->sdev_gendev))  		return -ENXIO; -	/* We can fail this if we're doing SCSI operations +	/* We can fail try_module_get if we're doing SCSI operations  	 * from module exit (like cache flush) */ -	try_module_get(sdev->host->hostt->module); +	__module_get(sdev->host->hostt->module);  	return 0;  } @@ -1004,14 +1004,7 @@ EXPORT_SYMBOL(scsi_device_get);   */  void scsi_device_put(struct scsi_device *sdev)  { -#ifdef CONFIG_MODULE_UNLOAD -	struct module *module = sdev->host->hostt->module; - -	/* The module refcount will be zero if scsi_device_get() -	 * was called from a module removal routine */ -	if (module && module_refcount(module) != 0) -		module_put(module); -#endif +	module_put(sdev->host->hostt->module);  	put_device(&sdev->sdev_gendev);  }  EXPORT_SYMBOL(scsi_device_put); diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 7281316a5ecb..a67d37c7e3c0 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -271,7 +271,6 @@ int dw_spi_mid_init(struct dw_spi *dws)  	iounmap(clk_reg);  	dws->num_cs = 16; -	dws->fifo_len = 40;	/* FIFO has 40 words buffer */  #ifdef CONFIG_SPI_DW_MID_DMA  	dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index d0d5542efc06..8edcd1b84562 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -621,13 +621,13 @@ static void spi_hw_init(struct dw_spi *dws)  	if (!dws->fifo_len) {  		u32 fifo; -		for (fifo = 2; fifo <= 257; fifo++) { +		for (fifo = 2; fifo <= 256; fifo++) {  			dw_writew(dws, DW_SPI_TXFLTR, fifo);  			if (fifo != dw_readw(dws, DW_SPI_TXFLTR))  				break;  		} -		dws->fifo_len = (fifo == 257) ? 0 : fifo; +		dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;  		dw_writew(dws, DW_SPI_TXFLTR, 0);  	}  } @@ -673,7 +673,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)  	if (dws->dma_ops && dws->dma_ops->dma_init) {  		ret = dws->dma_ops->dma_init(dws);  		if (ret) { -			dev_warn(&master->dev, "DMA init failed\n"); +			dev_warn(dev, "DMA init failed\n");  			dws->dma_inited = 0;  		}  	} diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 05c623cfb078..23822e7df6c1 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -546,8 +546,8 @@ static void giveback(struct driver_data *drv_data)  			cs_deassert(drv_data);  	} -	spi_finalize_current_message(drv_data->master);  	drv_data->cur_chip = NULL; +	spi_finalize_current_message(drv_data->master);  }  static void reset_sccr1(struct driver_data *drv_data) diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 96a5fc0878d8..3ab7a21445fc 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -82,7 +82,7 @@ struct sh_msiof_spi_priv {  #define MDR1_SYNCMD_LR	 0x30000000 /*   L/R mode */  #define MDR1_SYNCAC_SHIFT	 25 /* Sync Polarity (1 = Active-low) */  #define MDR1_BITLSB_SHIFT	 24 /* MSB/LSB First (1 = LSB first) */ -#define MDR1_FLD_MASK	 0x000000c0 /* Frame Sync Signal Interval (0-3) */ +#define MDR1_FLD_MASK	 0x0000000c /* Frame Sync Signal Interval (0-3) */  #define MDR1_FLD_SHIFT		  2  #define MDR1_XXSTP	 0x00000001 /* Transmission/Reception Stop on FIFO */  /* TMDR1 */ diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c index 930f6010203e..65d610abe06e 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c @@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)  		return 0;  	} -	if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) { +	if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {  		CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);  		return -EFAULT;  	} diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 10bf07280f4a..294692ff83b1 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -212,6 +212,12 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,   */  ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)  { +	struct inode *inode = iocb->ki_filp->f_mapping->host; + +	/* we only support swap file calling nfs_direct_IO */ +	if (!IS_SWAPFILE(inode)) +		return 0; +  #ifndef CONFIG_NFS_SWAP  	dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",  			iocb->ki_filp, (long long) pos, iter->nr_segs); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 4bffe637ea32..2211f6ba8736 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -352,8 +352,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st  	nfs_attr_check_mountpoint(sb, fattr); -	if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) && -	    !nfs_attr_use_mounted_on_fileid(fattr)) +	if (nfs_attr_use_mounted_on_fileid(fattr)) +		fattr->fileid = fattr->mounted_on_fileid; +	else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0)  		goto out_no_inode;  	if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)  		goto out_no_inode; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index efaa31c70fbe..b6f34bfa6fe8 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -31,8 +31,6 @@ static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)  	    (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&  	     ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))  		return 0; - -	fattr->fileid = fattr->mounted_on_fileid;  	return 1;  } diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 953daa44a282..706ad10b8186 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -639,7 +639,7 @@ int nfs41_walk_client_list(struct nfs_client *new,  			prev = pos;  			status = nfs_wait_client_init_complete(pos); -			if (status == 0) { +			if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {  				nfs4_schedule_lease_recovery(pos);  				status = nfs4_wait_clnt_recover(pos);  			} diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h index ce5dda8958fe..b1fd675fa36f 100644 --- a/include/linux/mfd/samsung/s2mps13.h +++ b/include/linux/mfd/samsung/s2mps13.h @@ -59,6 +59,7 @@ enum s2mps13_reg {  	S2MPS13_REG_B6CTRL,  	S2MPS13_REG_B6OUT,  	S2MPS13_REG_B7CTRL, +	S2MPS13_REG_B7SW,  	S2MPS13_REG_B7OUT,  	S2MPS13_REG_B8CTRL,  	S2MPS13_REG_B8OUT, @@ -102,6 +103,7 @@ enum s2mps13_reg {  	S2MPS13_REG_L26CTRL,  	S2MPS13_REG_L27CTRL,  	S2MPS13_REG_L28CTRL, +	S2MPS13_REG_L29CTRL,  	S2MPS13_REG_L30CTRL,  	S2MPS13_REG_L31CTRL,  	S2MPS13_REG_L32CTRL, diff --git a/include/linux/mm.h b/include/linux/mm.h index 80fc92a49649..dd5ea3016fc4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1070,6 +1070,7 @@ static inline int page_mapped(struct page *page)  #define VM_FAULT_WRITE	0x0008	/* Special case for get_user_pages */  #define VM_FAULT_HWPOISON 0x0010	/* Hit poisoned small page */  #define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */ +#define VM_FAULT_SIGSEGV 0x0040  #define VM_FAULT_NOPAGE	0x0100	/* ->fault installed the pte, not return page */  #define VM_FAULT_LOCKED	0x0200	/* ->fault locked the returned page */ @@ -1078,8 +1079,9 @@ static inline int page_mapped(struct page *page)  #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ -#define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ -			 VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE) +#define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ +			 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ +			 VM_FAULT_FALLBACK)  /* Encode hstate index for a hwpoisoned large page */  #define VM_FAULT_SET_HINDEX(x) ((x) << 12) diff --git a/include/linux/oom.h b/include/linux/oom.h index 853698c721f7..76200984d1e2 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -85,11 +85,6 @@ static inline void oom_killer_enable(void)  	oom_killer_disabled = false;  } -static inline bool oom_gfp_allowed(gfp_t gfp_mask) -{ -	return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY); -} -  extern struct task_struct *find_lock_task_mm(struct task_struct *p);  static inline bool task_will_free_mem(struct task_struct *task) diff --git a/include/linux/printk.h b/include/linux/printk.h index c8f170324e64..4d5bf5726578 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -10,9 +10,6 @@  extern const char linux_banner[];  extern const char linux_proc_banner[]; -extern char *log_buf_addr_get(void); -extern u32 log_buf_len_get(void); -  static inline int printk_get_level(const char *buffer)  {  	if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { @@ -163,6 +160,8 @@ extern int kptr_restrict;  extern void wake_up_klogd(void); +char *log_buf_addr_get(void); +u32 log_buf_len_get(void);  void log_buf_kexec_setup(void);  void __init setup_log_buf(int early);  void dump_stack_set_arch_desc(const char *fmt, ...); @@ -198,6 +197,16 @@ static inline void wake_up_klogd(void)  {  } +static inline char *log_buf_addr_get(void) +{ +	return NULL; +} + +static inline u32 log_buf_len_get(void) +{ +	return 0; +} +  static inline void log_buf_kexec_setup(void)  {  } diff --git a/include/net/ip.h b/include/net/ip.h index 0bb620702929..f7cbd703d15d 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -39,11 +39,12 @@ struct inet_skb_parm {  	struct ip_options	opt;		/* Compiled IP options		*/  	unsigned char		flags; -#define IPSKB_FORWARDED		1 -#define IPSKB_XFRM_TUNNEL_SIZE	2 -#define IPSKB_XFRM_TRANSFORMED	4 -#define IPSKB_FRAG_COMPLETE	8 -#define IPSKB_REROUTED		16 +#define IPSKB_FORWARDED		BIT(0) +#define IPSKB_XFRM_TUNNEL_SIZE	BIT(1) +#define IPSKB_XFRM_TRANSFORMED	BIT(2) +#define IPSKB_FRAG_COMPLETE	BIT(3) +#define IPSKB_REROUTED		BIT(4) +#define IPSKB_DOREDIRECT	BIT(5)  	u16			frag_max_size;  }; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 088ac0b1b106..536edc2be307 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -150,7 +150,7 @@ static int map_lookup_elem(union bpf_attr *attr)  	int ufd = attr->map_fd;  	struct fd f = fdget(ufd);  	struct bpf_map *map; -	void *key, *value; +	void *key, *value, *ptr;  	int err;  	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) @@ -169,20 +169,29 @@ static int map_lookup_elem(union bpf_attr *attr)  	if (copy_from_user(key, ukey, map->key_size) != 0)  		goto free_key; -	err = -ENOENT; -	rcu_read_lock(); -	value = map->ops->map_lookup_elem(map, key); +	err = -ENOMEM; +	value = kmalloc(map->value_size, GFP_USER);  	if (!value) -		goto err_unlock; +		goto free_key; + +	rcu_read_lock(); +	ptr = map->ops->map_lookup_elem(map, key); +	if (ptr) +		memcpy(value, ptr, map->value_size); +	rcu_read_unlock(); + +	err = -ENOENT; +	if (!ptr) +		goto free_value;  	err = -EFAULT;  	if (copy_to_user(uvalue, value, map->value_size) != 0) -		goto err_unlock; +		goto free_value;  	err = 0; -err_unlock: -	rcu_read_unlock(); +free_value: +	kfree(value);  free_key:  	kfree(key);  err_put: diff --git a/kernel/cgroup.c b/kernel/cgroup.c index bb263d0caab3..04cfe8ace520 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1909,7 +1909,7 @@ static void cgroup_kill_sb(struct super_block *sb)  	 *  	 * And don't kill the default root.  	 */ -	if (css_has_online_children(&root->cgrp.self) || +	if (!list_empty(&root->cgrp.self.children) ||  	    root == &cgrp_dfl_root)  		cgroup_put(&root->cgrp);  	else @@ -296,7 +296,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,  			return -ENOMEM;  		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))  			return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; -		if (ret & VM_FAULT_SIGBUS) +		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))  			return -EFAULT;  		BUG();  	} @@ -571,7 +571,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,  			return -ENOMEM;  		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))  			return -EHWPOISON; -		if (ret & VM_FAULT_SIGBUS) +		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))  			return -EFAULT;  		BUG();  	} @@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)  		else  			ret = VM_FAULT_WRITE;  		put_page(page); -	} while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); +	} while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));  	/*  	 * We must loop because handle_mm_fault() may back out if there's  	 * any difficulty e.g. if pte accessed bit gets updated concurrently. diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 851924fa5170..683b4782019b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1477,9 +1477,9 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)  	pr_info("Task in ");  	pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); -	pr_info(" killed as a result of limit of "); +	pr_cont(" killed as a result of limit of ");  	pr_cont_cgroup_path(memcg->css.cgroup); -	pr_info("\n"); +	pr_cont("\n");  	rcu_read_unlock(); diff --git a/mm/memory.c b/mm/memory.c index 54f3a9b00956..2c3536cc6c63 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2632,7 +2632,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,  	/* Check if we need to add a guard page to the stack */  	if (check_stack_guard_page(vma, address) < 0) -		return VM_FAULT_SIGBUS; +		return VM_FAULT_SIGSEGV;  	/* Use the zero-page for reads */  	if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7633c503a116..8e20f9c2fa5a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2332,12 +2332,21 @@ static inline struct page *  __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,  	struct zonelist *zonelist, enum zone_type high_zoneidx,  	nodemask_t *nodemask, struct zone *preferred_zone, -	int classzone_idx, int migratetype) +	int classzone_idx, int migratetype, unsigned long *did_some_progress)  {  	struct page *page; -	/* Acquire the per-zone oom lock for each zone */ +	*did_some_progress = 0; + +	if (oom_killer_disabled) +		return NULL; + +	/* +	 * Acquire the per-zone oom lock for each zone.  If that +	 * fails, somebody else is making progress for us. +	 */  	if (!oom_zonelist_trylock(zonelist, gfp_mask)) { +		*did_some_progress = 1;  		schedule_timeout_uninterruptible(1);  		return NULL;  	} @@ -2363,12 +2372,18 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,  		goto out;  	if (!(gfp_mask & __GFP_NOFAIL)) { +		/* Coredumps can quickly deplete all memory reserves */ +		if (current->flags & PF_DUMPCORE) +			goto out;  		/* The OOM killer will not help higher order allocs */  		if (order > PAGE_ALLOC_COSTLY_ORDER)  			goto out;  		/* The OOM killer does not needlessly kill tasks for lowmem */  		if (high_zoneidx < ZONE_NORMAL)  			goto out; +		/* The OOM killer does not compensate for light reclaim */ +		if (!(gfp_mask & __GFP_FS)) +			goto out;  		/*  		 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.  		 * Sanity check for bare calls of __GFP_THISNODE, not real OOM. @@ -2381,7 +2396,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,  	}  	/* Exhausted what can be done so it's blamo time */  	out_of_memory(zonelist, gfp_mask, order, nodemask, false); - +	*did_some_progress = 1;  out:  	oom_zonelist_unlock(zonelist, gfp_mask);  	return page; @@ -2658,7 +2673,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,  	    (gfp_mask & GFP_THISNODE) == GFP_THISNODE)  		goto nopage; -restart: +retry:  	if (!(gfp_mask & __GFP_NO_KSWAPD))  		wake_all_kswapds(order, zonelist, high_zoneidx,  				preferred_zone, nodemask); @@ -2681,7 +2696,6 @@ restart:  		classzone_idx = zonelist_zone_idx(preferred_zoneref);  	} -rebalance:  	/* This is the last chance, in general, before the goto nopage. */  	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,  			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, @@ -2788,54 +2802,28 @@ rebalance:  	if (page)  		goto got_pg; -	/* -	 * If we failed to make any progress reclaiming, then we are -	 * running out of options and have to consider going OOM -	 */ -	if (!did_some_progress) { -		if (oom_gfp_allowed(gfp_mask)) { -			if (oom_killer_disabled) -				goto nopage; -			/* Coredumps can quickly deplete all memory reserves */ -			if ((current->flags & PF_DUMPCORE) && -			    !(gfp_mask & __GFP_NOFAIL)) -				goto nopage; -			page = __alloc_pages_may_oom(gfp_mask, order, -					zonelist, high_zoneidx, -					nodemask, preferred_zone, -					classzone_idx, migratetype); -			if (page) -				goto got_pg; - -			if (!(gfp_mask & __GFP_NOFAIL)) { -				/* -				 * The oom killer is not called for high-order -				 * allocations that may fail, so if no progress -				 * is being made, there are no other options and -				 * retrying is unlikely to help. -				 */ -				if (order > PAGE_ALLOC_COSTLY_ORDER) -					goto nopage; -				/* -				 * The oom killer is not called for lowmem -				 * allocations to prevent needlessly killing -				 * innocent tasks. -				 */ -				if (high_zoneidx < ZONE_NORMAL) -					goto nopage; -			} - -			goto restart; -		} -	} -  	/* Check if we should retry the allocation */  	pages_reclaimed += did_some_progress;  	if (should_alloc_retry(gfp_mask, order, did_some_progress,  						pages_reclaimed)) { +		/* +		 * If we fail to make progress by freeing individual +		 * pages, but the allocation wants us to keep going, +		 * start OOM killing tasks. +		 */ +		if (!did_some_progress) { +			page = __alloc_pages_may_oom(gfp_mask, order, zonelist, +						high_zoneidx, nodemask, +						preferred_zone, classzone_idx, +						migratetype,&did_some_progress); +			if (page) +				goto got_pg; +			if (!did_some_progress) +				goto nopage; +		}  		/* Wait for some write requests to complete then retry */  		wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); -		goto rebalance; +		goto retry;  	} else {  		/*  		 * High-order allocations do not necessarily loop after diff --git a/mm/vmscan.c b/mm/vmscan.c index ab2505c3ef54..dcd90c891d8e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2656,7 +2656,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,  	 * should make reasonable progress.  	 */  	for_each_zone_zonelist_nodemask(zone, z, zonelist, -					gfp_mask, nodemask) { +					gfp_zone(gfp_mask), nodemask) {  		if (zone_idx(zone) > ZONE_NORMAL)  			continue; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 515569ffde8a..589aafd01fc5 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -46,6 +46,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)  	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x",  			ds->index, ds->pd->sw_addr);  	ds->slave_mii_bus->parent = ds->master_dev; +	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;  } diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 3a83ce5efa80..787b3c294ce6 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -129,7 +129,8 @@ int ip_forward(struct sk_buff *skb)  	 *	We now generate an ICMP HOST REDIRECT giving the route  	 *	we calculated.  	 */ -	if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb)) +	if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr && +	    !skb_sec_path(skb))  		ip_rt_send_redirect(skb);  	skb->priority = rt_tos2priority(iph->tos); diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index c0d82f78d364..2a3720fb5a5f 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -966,8 +966,11 @@ bool ping_rcv(struct sk_buff *skb)  	sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));  	if (sk != NULL) { +		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); +  		pr_debug("rcv on socket %p\n", sk); -		ping_queue_rcv_skb(sk, skb_get(skb)); +		if (skb2) +			ping_queue_rcv_skb(sk, skb2);  		sock_put(sk);  		return true;  	} diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 6a2155b02602..d58dd0ec3e53 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1554,11 +1554,10 @@ static int __mkroute_input(struct sk_buff *skb,  	do_cache = res->fi && !itag;  	if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && +	    skb->protocol == htons(ETH_P_IP) &&  	    (IN_DEV_SHARED_MEDIA(out_dev) || -	     inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) { -		flags |= RTCF_DOREDIRECT; -		do_cache = false; -	} +	     inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) +		IPCB(skb)->flags |= IPSKB_DOREDIRECT;  	if (skb->protocol != htons(ETH_P_IP)) {  		/* Not IP (i.e. ARP). Do not create route, if it is @@ -2303,6 +2302,8 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,  	r->rtm_flags	= (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;  	if (rt->rt_flags & RTCF_NOTIFY)  		r->rtm_flags |= RTM_F_NOTIFY; +	if (IPCB(skb)->flags & IPSKB_DOREDIRECT) +		r->rtm_flags |= RTCF_DOREDIRECT;  	if (nla_put_be32(skb, RTA_DST, dst))  		goto nla_put_failure; diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 7927db0a9279..4a000f1dd757 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin  	s_slot = cb->args[0];  	num = s_num = cb->args[1]; -	for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) { +	for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {  		struct sock *sk;  		struct hlist_nulls_node *node;  		struct udp_hslot *hslot = &table->hash[slot]; +		num = 0; +  		if (hlist_nulls_empty(&hslot->head))  			continue; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index b2d1838897c9..f1c6d5e98322 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -659,6 +659,29 @@ static int fib6_commit_metrics(struct dst_entry *dst,  	return 0;  } +static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn, +			  struct net *net) +{ +	if (atomic_read(&rt->rt6i_ref) != 1) { +		/* This route is used as dummy address holder in some split +		 * nodes. It is not leaked, but it still holds other resources, +		 * which must be released in time. So, scan ascendant nodes +		 * and replace dummy references to this route with references +		 * to still alive ones. +		 */ +		while (fn) { +			if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) { +				fn->leaf = fib6_find_prefix(net, fn); +				atomic_inc(&fn->leaf->rt6i_ref); +				rt6_release(rt); +			} +			fn = fn->parent; +		} +		/* No more references are possible at this point. */ +		BUG_ON(atomic_read(&rt->rt6i_ref) != 1); +	} +} +  /*   *	Insert routing information in a node.   */ @@ -807,11 +830,12 @@ add:  		rt->dst.rt6_next = iter->dst.rt6_next;  		atomic_inc(&rt->rt6i_ref);  		inet6_rt_notify(RTM_NEWROUTE, rt, info); -		rt6_release(iter);  		if (!(fn->fn_flags & RTN_RTINFO)) {  			info->nl_net->ipv6.rt6_stats->fib_route_nodes++;  			fn->fn_flags |= RTN_RTINFO;  		} +		fib6_purge_rt(iter, fn, info->nl_net); +		rt6_release(iter);  	}  	return 0; @@ -1322,24 +1346,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,  		fn = fib6_repair_tree(net, fn);  	} -	if (atomic_read(&rt->rt6i_ref) != 1) { -		/* This route is used as dummy address holder in some split -		 * nodes. It is not leaked, but it still holds other resources, -		 * which must be released in time. So, scan ascendant nodes -		 * and replace dummy references to this route with references -		 * to still alive ones. -		 */ -		while (fn) { -			if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) { -				fn->leaf = fib6_find_prefix(net, fn); -				atomic_inc(&fn->leaf->rt6i_ref); -				rt6_release(rt); -			} -			fn = fn->parent; -		} -		/* No more references are possible at this point. */ -		BUG_ON(atomic_read(&rt->rt6i_ref) != 1); -	} +	fib6_purge_rt(rt, fn, net);  	inet6_rt_notify(RTM_DELROUTE, rt, info);  	rt6_release(rt); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 166e33bed222..495965358d22 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1242,12 +1242,16 @@ restart:  		rt = net->ipv6.ip6_null_entry;  	else if (rt->dst.error) {  		rt = net->ipv6.ip6_null_entry; -	} else if (rt == net->ipv6.ip6_null_entry) { +		goto out; +	} + +	if (rt == net->ipv6.ip6_null_entry) {  		fn = fib6_backtrack(fn, &fl6->saddr);  		if (fn)  			goto restart;  	} +out:  	dst_hold(&rt->dst);  	read_unlock_bh(&table->tb6_lock); diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 5f983644373a..48bf5a06847b 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -130,12 +130,18 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)  {  	struct flowi6 *fl6 = &fl->u.ip6;  	int onlyproto = 0; -	u16 offset = skb_network_header_len(skb);  	const struct ipv6hdr *hdr = ipv6_hdr(skb); +	u16 offset = sizeof(*hdr);  	struct ipv6_opt_hdr *exthdr;  	const unsigned char *nh = skb_network_header(skb); -	u8 nexthdr = nh[IP6CB(skb)->nhoff]; +	u16 nhoff = IP6CB(skb)->nhoff;  	int oif = 0; +	u8 nexthdr; + +	if (!nhoff) +		nhoff = offsetof(struct ipv6hdr, nexthdr); + +	nexthdr = nh[nhoff];  	if (skb_dst(skb))  		oif = skb_dst(skb)->dev->ifindex; diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c index 612a5ddaf93b..799bafc2af39 100644 --- a/net/llc/sysctl_net_llc.c +++ b/net/llc/sysctl_net_llc.c @@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = {  	{  		.procname	= "ack",  		.data		= &sysctl_llc2_ack_timeout, -		.maxlen		= sizeof(long), +		.maxlen		= sizeof(sysctl_llc2_ack_timeout),  		.mode		= 0644,  		.proc_handler   = proc_dointvec_jiffies,  	},  	{  		.procname	= "busy",  		.data		= &sysctl_llc2_busy_timeout, -		.maxlen		= sizeof(long), +		.maxlen		= sizeof(sysctl_llc2_busy_timeout),  		.mode		= 0644,  		.proc_handler   = proc_dointvec_jiffies,  	},  	{  		.procname	= "p",  		.data		= &sysctl_llc2_p_timeout, -		.maxlen		= sizeof(long), +		.maxlen		= sizeof(sysctl_llc2_p_timeout),  		.mode		= 0644,  		.proc_handler   = proc_dointvec_jiffies,  	},  	{  		.procname	= "rej",  		.data		= &sysctl_llc2_rej_timeout, -		.maxlen		= sizeof(long), +		.maxlen		= sizeof(sysctl_llc2_rej_timeout),  		.mode		= 0644,  		.proc_handler   = proc_dointvec_jiffies,  	}, diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 4c5192e0d66c..4a95fe3cffbc 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -86,20 +86,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)  		}  	} -	/* tear down aggregation sessions and remove STAs */ -	mutex_lock(&local->sta_mtx); -	list_for_each_entry(sta, &local->sta_list, list) { -		if (sta->uploaded) { -			enum ieee80211_sta_state state; - -			state = sta->sta_state; -			for (; state > IEEE80211_STA_NOTEXIST; state--) -				WARN_ON(drv_sta_state(local, sta->sdata, sta, -						      state, state - 1)); -		} -	} -	mutex_unlock(&local->sta_mtx); -  	/* remove all interfaces that were created in the driver */  	list_for_each_entry(sdata, &local->interfaces, list) {  		if (!ieee80211_sdata_running(sdata)) @@ -111,6 +97,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)  		case NL80211_IFTYPE_STATION:  			ieee80211_mgd_quiesce(sdata);  			break; +		case NL80211_IFTYPE_WDS: +			/* tear down aggregation sessions and remove STAs */ +			mutex_lock(&local->sta_mtx); +			sta = sdata->u.wds.sta; +			if (sta && sta->uploaded) { +				enum ieee80211_sta_state state; + +				state = sta->sta_state; +				for (; state > IEEE80211_STA_NOTEXIST; state--) +					WARN_ON(drv_sta_state(local, sta->sdata, +							      sta, state, +							      state - 1)); +			} +			mutex_unlock(&local->sta_mtx); +			break;  		default:  			break;  		} diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 683b10f46505..d69ca513848e 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -272,7 +272,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,  	else if (rate && rate->flags & IEEE80211_RATE_ERP_G)  		channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;  	else if (rate) -		channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; +		channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;  	else  		channel_flags |= IEEE80211_CHAN_2GHZ;  	put_unaligned_le16(channel_flags, pos); diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 84c8219c3e1c..f59adf8a4cd7 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -180,6 +180,11 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,  	}  	bpf_size = bpf_len * sizeof(*bpf_ops); +	if (bpf_size != nla_len(tb[TCA_BPF_OPS])) { +		ret = -EINVAL; +		goto errout; +	} +  	bpf_ops = kzalloc(bpf_size, GFP_KERNEL);  	if (bpf_ops == NULL) {  		ret = -ENOMEM; @@ -215,15 +220,21 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,  				   struct cls_bpf_head *head)  {  	unsigned int i = 0x80000000; +	u32 handle;  	do {  		if (++head->hgen == 0x7FFFFFFF)  			head->hgen = 1;  	} while (--i > 0 && cls_bpf_get(tp, head->hgen)); -	if (i == 0) + +	if (unlikely(i == 0)) {  		pr_err("Insufficient number of handles\n"); +		handle = 0; +	} else { +		handle = head->hgen; +	} -	return i; +	return handle;  }  static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, diff --git a/net/sctp/associola.c b/net/sctp/associola.c index f791edd64d6c..26d06dbcc1c8 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1182,7 +1182,6 @@ void sctp_assoc_update(struct sctp_association *asoc,  	asoc->peer.peer_hmacs = new->peer.peer_hmacs;  	new->peer.peer_hmacs = NULL; -	sctp_auth_key_put(asoc->asoc_shared_key);  	sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);  } diff --git a/net/socket.c b/net/socket.c index a2c33a4dc7ba..418795caa897 100644 --- a/net/socket.c +++ b/net/socket.c @@ -869,9 +869,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,  static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,  					 struct sock_iocb *siocb)  { -	if (!is_sync_kiocb(iocb)) -		BUG(); -  	siocb->kiocb = iocb;  	iocb->private = siocb;  	return siocb; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7ca4b5133123..8887c6e5fca8 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2854,6 +2854,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)  	if (!rdev->ops->get_key)  		return -EOPNOTSUPP; +	if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) +		return -ENOENT; +  	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);  	if (!msg)  		return -ENOMEM; @@ -2873,10 +2876,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)  	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))  		goto nla_put_failure; -	if (pairwise && mac_addr && -	    !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) -		return -ENOENT; -  	err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,  			   get_key_callback); @@ -3047,7 +3046,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)  	wdev_lock(dev->ieee80211_ptr);  	err = nl80211_key_allowed(dev->ieee80211_ptr); -	if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr && +	if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&  	    !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))  		err = -ENOENT; diff --git a/net/wireless/util.c b/net/wireless/util.c index d0ac795445b7..5488c3662f7d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -308,6 +308,12 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)  		goto out;  	} +	if (ieee80211_is_mgmt(fc)) { +		if (ieee80211_has_order(fc)) +			hdrlen += IEEE80211_HT_CTL_LEN; +		goto out; +	} +  	if (ieee80211_is_ctl(fc)) {  		/*  		 * ACK and CTS are 10 bytes, all others 16. To see how diff --git a/samples/bpf/test_maps.c b/samples/bpf/test_maps.c index e286b42307f3..6299ee95cd11 100644 --- a/samples/bpf/test_maps.c +++ b/samples/bpf/test_maps.c @@ -69,9 +69,9 @@ static void test_hashmap_sanity(int i, void *data)  	/* iterate over two elements */  	assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 && -	       next_key == 2); +	       (next_key == 1 || next_key == 2));  	assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 && -	       next_key == 1); +	       (next_key == 1 || next_key == 2));  	assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 &&  	       errno == ENOENT); diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c index ec667f158f19..5d905d90d504 100644 --- a/sound/core/seq/seq_dummy.c +++ b/sound/core/seq/seq_dummy.c @@ -82,36 +82,6 @@ struct snd_seq_dummy_port {  static int my_client = -1;  /* - * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events - * to subscribers. - * Note: this callback is called only after all subscribers are removed. - */ -static int -dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info) -{ -	struct snd_seq_dummy_port *p; -	int i; -	struct snd_seq_event ev; - -	p = private_data; -	memset(&ev, 0, sizeof(ev)); -	if (p->duplex) -		ev.source.port = p->connect; -	else -		ev.source.port = p->port; -	ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; -	ev.type = SNDRV_SEQ_EVENT_CONTROLLER; -	for (i = 0; i < 16; i++) { -		ev.data.control.channel = i; -		ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF; -		snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); -		ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS; -		snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); -	} -	return 0; -} - -/*   * event input callback - just redirect events to subscribers   */  static int @@ -175,7 +145,6 @@ create_port(int idx, int type)  		| SNDRV_SEQ_PORT_TYPE_PORT;  	memset(&pcb, 0, sizeof(pcb));  	pcb.owner = THIS_MODULE; -	pcb.unuse = dummy_unuse;  	pcb.event_input = dummy_input;  	pcb.private_free = dummy_free;  	pcb.private_data = rec; diff --git a/sound/soc/adi/axi-i2s.c b/sound/soc/adi/axi-i2s.c index 7752860f7230..4c23381727a1 100644 --- a/sound/soc/adi/axi-i2s.c +++ b/sound/soc/adi/axi-i2s.c @@ -240,6 +240,8 @@ static int axi_i2s_probe(struct platform_device *pdev)  	if (ret)  		goto err_clk_disable; +	return 0; +  err_clk_disable:  	clk_disable_unprepare(i2s->clk);  	return ret; diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c index e5f2fb884bf3..30c673cdc12e 100644 --- a/sound/soc/codecs/pcm512x.c +++ b/sound/soc/codecs/pcm512x.c @@ -188,8 +188,8 @@ static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 80, 0);  static const char * const pcm512x_dsp_program_texts[] = {  	"FIR interpolation with de-emphasis",  	"Low latency IIR with de-emphasis", -	"Fixed process flow",  	"High attenuation with de-emphasis", +	"Fixed process flow",  	"Ringing-less low latency FIR",  }; diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c index 2cd4fe463102..1d1c7f8a9af2 100644 --- a/sound/soc/codecs/rt286.c +++ b/sound/soc/codecs/rt286.c @@ -861,10 +861,8 @@ static int rt286_hw_params(struct snd_pcm_substream *substream,  		RT286_I2S_CTRL1, 0x0018, d_len_code << 3);  	dev_dbg(codec->dev, "format val = 0x%x\n", val); -	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) -		snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val); -	else -		snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val); +	snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val); +	snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);  	return 0;  } diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c index c0fbe1881439..918ada9738b0 100644 --- a/sound/soc/codecs/rt5677.c +++ b/sound/soc/codecs/rt5677.c @@ -2083,10 +2083,14 @@ static int rt5677_set_pll1_event(struct snd_soc_dapm_widget *w,  	struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);  	switch (event) { -	case SND_SOC_DAPM_POST_PMU: +	case SND_SOC_DAPM_PRE_PMU:  		regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x2); +		break; + +	case SND_SOC_DAPM_POST_PMU:  		regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x0);  		break; +  	default:  		return 0;  	} @@ -2101,10 +2105,14 @@ static int rt5677_set_pll2_event(struct snd_soc_dapm_widget *w,  	struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);  	switch (event) { -	case SND_SOC_DAPM_POST_PMU: +	case SND_SOC_DAPM_PRE_PMU:  		regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x2); +		break; + +	case SND_SOC_DAPM_POST_PMU:  		regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x0);  		break; +  	default:  		return 0;  	} @@ -2212,9 +2220,11 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w,  static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {  	SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT, -		0, rt5677_set_pll1_event, SND_SOC_DAPM_POST_PMU), +		0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU | +		SND_SOC_DAPM_POST_PMU),  	SND_SOC_DAPM_SUPPLY("PLL2", RT5677_PWR_ANLG2, RT5677_PWR_PLL2_BIT, -		0, rt5677_set_pll2_event, SND_SOC_DAPM_POST_PMU), +		0, rt5677_set_pll2_event, SND_SOC_DAPM_PRE_PMU | +		SND_SOC_DAPM_POST_PMU),  	/* Input Side */  	/* micbias */ diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c index 1d1205702d23..9f2dced046de 100644 --- a/sound/soc/codecs/ts3a227e.c +++ b/sound/soc/codecs/ts3a227e.c @@ -254,6 +254,7 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,  	struct ts3a227e *ts3a227e;  	struct device *dev = &i2c->dev;  	int ret; +	unsigned int acc_reg;  	ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL);  	if (ts3a227e == NULL) @@ -283,6 +284,11 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,  			   INTB_DISABLE | ADC_COMPLETE_INT_DISABLE,  			   ADC_COMPLETE_INT_DISABLE); +	/* Read jack status because chip might not trigger interrupt at boot. */ +	regmap_read(ts3a227e->regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg); +	ts3a227e_new_jack_state(ts3a227e, acc_reg); +	ts3a227e_jack_report(ts3a227e); +  	return 0;  } diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index 4d2d2b1380d5..75b87c5c0f04 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c @@ -1076,10 +1076,13 @@ static const struct snd_soc_dapm_route adc_intercon[] = {  	{ "Right Capture PGA", NULL, "Right Capture Mux" },  	{ "Right Capture PGA", NULL, "Right Capture Inverting Mux" }, -	{ "AIFOUTL", "Left",  "ADCL" }, -	{ "AIFOUTL", "Right", "ADCR" }, -	{ "AIFOUTR", "Left",  "ADCL" }, -	{ "AIFOUTR", "Right", "ADCR" }, +	{ "AIFOUTL Mux", "Left", "ADCL" }, +	{ "AIFOUTL Mux", "Right", "ADCR" }, +	{ "AIFOUTR Mux", "Left", "ADCL" }, +	{ "AIFOUTR Mux", "Right", "ADCR" }, + +	{ "AIFOUTL", NULL, "AIFOUTL Mux" }, +	{ "AIFOUTR", NULL, "AIFOUTR Mux" },  	{ "ADCL", NULL, "CLK_DSP" },  	{ "ADCL", NULL, "Left Capture PGA" }, @@ -1089,12 +1092,16 @@ static const struct snd_soc_dapm_route adc_intercon[] = {  };  static const struct snd_soc_dapm_route dac_intercon[] = { -	{ "DACL", "Right", "AIFINR" }, -	{ "DACL", "Left",  "AIFINL" }, +	{ "DACL Mux", "Left", "AIFINL" }, +	{ "DACL Mux", "Right", "AIFINR" }, + +	{ "DACR Mux", "Left", "AIFINL" }, +	{ "DACR Mux", "Right", "AIFINR" }, + +	{ "DACL", NULL, "DACL Mux" },  	{ "DACL", NULL, "CLK_DSP" }, -	{ "DACR", "Right", "AIFINR" }, -	{ "DACR", "Left",  "AIFINL" }, +	{ "DACR", NULL, "DACR Mux" },  	{ "DACR", NULL, "CLK_DSP" },  	{ "Charge pump", NULL, "SYSCLK" }, diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index 031a1ae71d94..a96eb497a379 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c @@ -556,7 +556,7 @@ static struct {  	{ 22050, 2 },  	{ 24000, 2 },  	{ 16000, 3 }, -	{ 11250, 4 }, +	{ 11025, 4 },  	{ 12000, 4 },  	{  8000, 5 },  }; diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h index 91a550f4a10d..5e793bbb6b02 100644 --- a/sound/soc/fsl/fsl_esai.h +++ b/sound/soc/fsl/fsl_esai.h @@ -302,7 +302,7 @@  #define ESAI_xCCR_xFP_MASK	(((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT)  #define ESAI_xCCR_xFP(v)	((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK)  #define ESAI_xCCR_xDC_SHIFT     9 -#define ESAI_xCCR_xDC_WIDTH	4 +#define ESAI_xCCR_xDC_WIDTH	5  #define ESAI_xCCR_xDC_MASK	(((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT)  #define ESAI_xCCR_xDC(v)	((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK)  #define ESAI_xCCR_xPSR_SHIFT	8 diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index a65f17d57ffb..059496ed9ad7 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -1362,9 +1362,9 @@ static int fsl_ssi_probe(struct platform_device *pdev)  	}  	ssi_private->irq = platform_get_irq(pdev, 0); -	if (!ssi_private->irq) { +	if (ssi_private->irq < 0) {  		dev_err(&pdev->dev, "no irq for node %s\n", np->full_name); -		return -ENXIO; +		return ssi_private->irq;  	}  	/* Are the RX and the TX clocks locked? */ diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c index 4caacb05a623..cd146d4fa805 100644 --- a/sound/soc/fsl/imx-wm8962.c +++ b/sound/soc/fsl/imx-wm8962.c @@ -257,6 +257,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)  	if (ret)  		goto clk_fail;  	data->card.num_links = 1; +	data->card.owner = THIS_MODULE;  	data->card.dai_link = &data->dai;  	data->card.dapm_widgets = imx_wm8962_dapm_widgets;  	data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets); diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index fb9240fdc9b7..7fe3009b1c43 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c @@ -452,9 +452,8 @@ static int asoc_simple_card_parse_of(struct device_node *node,  }  /* Decrease the reference count of the device nodes */ -static int asoc_simple_card_unref(struct platform_device *pdev) +static int asoc_simple_card_unref(struct snd_soc_card *card)  { -	struct snd_soc_card *card = platform_get_drvdata(pdev);  	struct snd_soc_dai_link *dai_link;  	int num_links; @@ -556,7 +555,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)  		return ret;  err: -	asoc_simple_card_unref(pdev); +	asoc_simple_card_unref(&priv->snd_card);  	return ret;  } @@ -572,7 +571,7 @@ static int asoc_simple_card_remove(struct platform_device *pdev)  		snd_soc_jack_free_gpios(&simple_card_mic_jack, 1,  					&simple_card_mic_jack_gpio); -	return asoc_simple_card_unref(pdev); +	return asoc_simple_card_unref(card);  }  static const struct of_device_id asoc_simple_of_match[] = { diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c index ef2e8b5766a1..b3f9489794a6 100644 --- a/sound/soc/intel/sst-firmware.c +++ b/sound/soc/intel/sst-firmware.c @@ -706,6 +706,7 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba  	struct list_head *block_list)  {  	struct sst_mem_block *block, *tmp; +	struct sst_block_allocator ba_tmp = *ba;  	u32 end = ba->offset + ba->size, block_end;  	int err; @@ -730,9 +731,9 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba  		if (ba->offset >= block->offset && ba->offset < block_end) {  			/* align ba to block boundary */ -			ba->size -= block_end - ba->offset; -			ba->offset = block_end; -			err = block_alloc_contiguous(dsp, ba, block_list); +			ba_tmp.size -= block_end - ba->offset; +			ba_tmp.offset = block_end; +			err = block_alloc_contiguous(dsp, &ba_tmp, block_list);  			if (err < 0)  				return -ENOMEM; @@ -767,10 +768,10 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba  			list_move(&block->list, &dsp->used_block_list);  			list_add(&block->module_list, block_list);  			/* align ba to block boundary */ -			ba->size -= block_end - ba->offset; -			ba->offset = block_end; +			ba_tmp.size -= block_end - ba->offset; +			ba_tmp.offset = block_end; -			err = block_alloc_contiguous(dsp, ba, block_list); +			err = block_alloc_contiguous(dsp, &ba_tmp, block_list);  			if (err < 0)  				return -ENOMEM; diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c index 3f8c48231364..5bf14040c24a 100644 --- a/sound/soc/intel/sst-haswell-ipc.c +++ b/sound/soc/intel/sst-haswell-ipc.c @@ -1228,6 +1228,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)  	struct sst_dsp *sst = hsw->dsp;  	unsigned long flags; +	if (!stream) { +		dev_warn(hsw->dev, "warning: stream is NULL, no stream to free, ignore it.\n"); +		return 0; +	} +  	/* dont free DSP streams that are not commited */  	if (!stream->commited)  		goto out; @@ -1415,6 +1420,16 @@ int sst_hsw_stream_commit(struct sst_hsw *hsw, struct sst_hsw_stream *stream)  	u32 header;  	int ret; +	if (!stream) { +		dev_warn(hsw->dev, "warning: stream is NULL, no stream to commit, ignore it.\n"); +		return 0; +	} + +	if (stream->commited) { +		dev_warn(hsw->dev, "warning: stream is already committed, ignore it.\n"); +		return 0; +	} +  	trace_ipc_request("stream alloc", stream->host_id);  	header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM); @@ -1519,6 +1534,11 @@ int sst_hsw_stream_pause(struct sst_hsw *hsw, struct sst_hsw_stream *stream,  {  	int ret; +	if (!stream) { +		dev_warn(hsw->dev, "warning: stream is NULL, no stream to pause, ignore it.\n"); +		return 0; +	} +  	trace_ipc_request("stream pause", stream->reply.stream_hw_id);  	ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE, @@ -1535,6 +1555,11 @@ int sst_hsw_stream_resume(struct sst_hsw *hsw, struct sst_hsw_stream *stream,  {  	int ret; +	if (!stream) { +		dev_warn(hsw->dev, "warning: stream is NULL, no stream to resume, ignore it.\n"); +		return 0; +	} +  	trace_ipc_request("stream resume", stream->reply.stream_hw_id);  	ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME, @@ -1550,6 +1575,11 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)  {  	int ret, tries = 10; +	if (!stream) { +		dev_warn(hsw->dev, "warning: stream is NULL, no stream to reset, ignore it.\n"); +		return 0; +	} +  	/* dont reset streams that are not commited */  	if (!stream->commited)  		return 0; diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c index 8b79cafab1e2..c7eb9dd67f60 100644 --- a/sound/soc/omap/omap-mcbsp.c +++ b/sound/soc/omap/omap-mcbsp.c @@ -434,7 +434,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,  	case SND_SOC_DAIFMT_CBM_CFS:  		/* McBSP slave. FS clock as output */  		regs->srgr2	|= FSGM; -		regs->pcr0	|= FSXM; +		regs->pcr0	|= FSXM | FSRM;  		break;  	case SND_SOC_DAIFMT_CBM_CFM:  		/* McBSP slave */ diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c index 13d8507333b8..dcc26eda0539 100644 --- a/sound/soc/rockchip/rockchip_i2s.c +++ b/sound/soc/rockchip/rockchip_i2s.c @@ -335,6 +335,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {  			    SNDRV_PCM_FMTBIT_S24_LE),  	},  	.ops = &rockchip_i2s_dai_ops, +	.symmetric_rates = 1,  };  static const struct snd_soc_component_driver rockchip_i2s_component = { diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index 590a82f01d0b..025c38fbe3c0 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c @@ -659,7 +659,8 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)  			rtd->dai_link->stream_name);  		ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num, -				1, 0, &be_pcm); +				rtd->dai_link->dpcm_playback, +				rtd->dai_link->dpcm_capture, &be_pcm);  		if (ret < 0) {  			dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n",  				rtd->dai_link->name); @@ -668,8 +669,10 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)  		rtd->pcm = be_pcm;  		rtd->fe_compr = 1; -		be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; -		be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; +		if (rtd->dai_link->dpcm_playback) +			be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; +		else if (rtd->dai_link->dpcm_capture) +			be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;  		memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));  	} else  		memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));  | 
