From dc4fbba11e4661a6a77a1f89ba32f9082e6395ff Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 29 Oct 2015 11:44:05 +1100 Subject: powerpc: Create disable_kernel_{fp,altivec,vsx,spe}() The enable_kernel_*() functions leave the relevant MSR bits enabled until we exit the kernel sometime later. Create disable versions that wrap the kernel use of FP, Altivec VSX or SPE. While we don't want to disable it normally for performance reasons (MSR writes are slow), it will be used for a debug boot option that does this and catches bad uses in other areas of the kernel. Signed-off-by: Anton Blanchard Signed-off-by: Michael Ellerman --- lib/raid6/altivec.uc | 1 + 1 file changed, 1 insertion(+) (limited to 'lib') diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc index bec27fce7501..682aae8a1fef 100644 --- a/lib/raid6/altivec.uc +++ b/lib/raid6/altivec.uc @@ -101,6 +101,7 @@ static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs) raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs); + disable_kernel_altivec(); preempt_enable(); } -- cgit v1.2.3-59-g8ed1b From de2dd0eb30af55d3893979d5641c50c7a8969c99 Mon Sep 17 00:00:00 2001 From: Zhao Qiang Date: Mon, 30 Nov 2015 10:48:52 +0800 Subject: genalloc:support memory-allocation with bytes-alignment to genalloc Bytes alignment is required to manage some special RAM, so add gen_pool_first_fit_align to genalloc, meanwhile add gen_pool_alloc_algo to pass algo in case user layer using more than one algo, and pass data to gen_pool_first_fit_align(modify gen_pool_alloc as a wrapper) Signed-off-by: Zhao Qiang Signed-off-by: Scott Wood --- include/linux/genalloc.h | 27 +++++++++++++++++---- lib/genalloc.c | 61 +++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 78 insertions(+), 10 deletions(-) (limited to 'lib') diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 7ff168d06967..3c676ce46ee0 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -30,10 +30,12 @@ #ifndef __GENALLOC_H__ #define __GENALLOC_H__ +#include #include struct device; struct device_node; +struct gen_pool; /** * Allocation callback function type definition @@ -47,7 +49,7 @@ typedef unsigned long (*genpool_algo_t)(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data); + void *data, struct gen_pool *pool); /* * General purpose special memory pool descriptor. @@ -75,6 +77,13 @@ struct gen_pool_chunk { unsigned long bits[0]; /* bitmap for allocating memory chunk */ }; +/* + * gen_pool data descriptor for gen_pool_first_fit_align. + */ +struct genpool_data_align { + int align; /* alignment by bytes for starting address */ +}; + extern struct gen_pool *gen_pool_create(int, int); extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t, @@ -98,6 +107,8 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, } extern void gen_pool_destroy(struct gen_pool *); extern unsigned long gen_pool_alloc(struct gen_pool *, size_t); +extern unsigned long gen_pool_alloc_algo(struct gen_pool *, size_t, + genpool_algo_t algo, void *data); extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma); extern void gen_pool_free(struct gen_pool *, unsigned long, size_t); @@ -110,14 +121,22 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data); extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, - unsigned long start, unsigned int nr, void *data); + unsigned long start, unsigned int nr, void *data, + struct gen_pool *pool); + +extern unsigned long gen_pool_first_fit_align(unsigned long *map, + unsigned long size, unsigned long start, unsigned int nr, + void *data, struct gen_pool *pool); + extern unsigned long gen_pool_first_fit_order_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data); + void *data, struct gen_pool *pool); extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, - unsigned long start, unsigned int nr, void *data); + unsigned long start, unsigned int nr, void *data, + struct gen_pool *pool); + extern struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, int nid, const char *name); diff --git a/lib/genalloc.c b/lib/genalloc.c index 116a166b096f..b8cf89d9e17d 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -269,6 +269,25 @@ EXPORT_SYMBOL(gen_pool_destroy); * NMI-safe cmpxchg implementation. */ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) +{ + return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); +} +EXPORT_SYMBOL(gen_pool_alloc); + +/** + * gen_pool_alloc_algo - allocate special memory from the pool + * @pool: pool to allocate from + * @size: number of bytes to allocate from the pool + * @algo: algorithm passed from caller + * @data: data passed to algorithm + * + * Allocate the requested number of bytes from the specified pool. + * Uses the pool allocation function (with first-fit algorithm by default). + * Can not be used in NMI handler on architectures without + * NMI-safe cmpxchg implementation. + */ +unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, + genpool_algo_t algo, void *data) { struct gen_pool_chunk *chunk; unsigned long addr = 0; @@ -290,8 +309,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) end_bit = chunk_size(chunk) >> order; retry: - start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, - pool->data); + start_bit = algo(chunk->bits, end_bit, start_bit, + nbits, data, pool); if (start_bit >= end_bit) continue; remain = bitmap_set_ll(chunk->bits, start_bit, nbits); @@ -310,7 +329,7 @@ retry: rcu_read_unlock(); return addr; } -EXPORT_SYMBOL(gen_pool_alloc); +EXPORT_SYMBOL(gen_pool_alloc_algo); /** * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage @@ -501,14 +520,41 @@ EXPORT_SYMBOL(gen_pool_set_algo); * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @data: additional data - unused + * @pool: pool to find the fit region memory from */ unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, - unsigned long start, unsigned int nr, void *data) + unsigned long start, unsigned int nr, void *data, + struct gen_pool *pool) { return bitmap_find_next_zero_area(map, size, start, nr, 0); } EXPORT_SYMBOL(gen_pool_first_fit); +/** + * gen_pool_first_fit_align - find the first available region + * of memory matching the size requirement (alignment constraint) + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @data: data for alignment + * @pool: pool to get order from + */ +unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, void *data, + struct gen_pool *pool) +{ + struct genpool_data_align *alignment; + unsigned long align_mask; + int order; + + alignment = data; + order = pool->min_alloc_order; + align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; + return bitmap_find_next_zero_area(map, size, start, nr, align_mask); +} +EXPORT_SYMBOL(gen_pool_first_fit_align); + /** * gen_pool_first_fit_order_align - find the first available region * of memory matching the size requirement. The region will be aligned @@ -518,10 +564,11 @@ EXPORT_SYMBOL(gen_pool_first_fit); * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @data: additional data - unused + * @pool: pool to find the fit region memory from */ unsigned long gen_pool_first_fit_order_align(unsigned long *map, unsigned long size, unsigned long start, - unsigned int nr, void *data) + unsigned int nr, void *data, struct gen_pool *pool) { unsigned long align_mask = roundup_pow_of_two(nr) - 1; @@ -537,12 +584,14 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align); * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @data: additional data - unused + * @pool: pool to find the fit region memory from * * Iterate over the bitmap to find the smallest free region * which we can allocate the memory. */ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, - unsigned long start, unsigned int nr, void *data) + unsigned long start, unsigned int nr, void *data, + struct gen_pool *pool) { unsigned long start_bit = size; unsigned long len = size + 1; -- cgit v1.2.3-59-g8ed1b From b26981c8f743d3cb64a6907eb1f5c6c4ba6ca672 Mon Sep 17 00:00:00 2001 From: Zhao Qiang Date: Mon, 30 Nov 2015 10:48:53 +0800 Subject: genalloc:support allocating specific region Add new algo for genalloc, it reserve a specific region of memory Signed-off-by: Zhao Qiang Signed-off-by: Scott Wood --- include/linux/genalloc.h | 11 +++++++++++ lib/genalloc.c | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) (limited to 'lib') diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 3c676ce46ee0..29d4385903d4 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -84,6 +84,13 @@ struct genpool_data_align { int align; /* alignment by bytes for starting address */ }; +/* + * gen_pool data descriptor for gen_pool_fixed_alloc. + */ +struct genpool_data_fixed { + unsigned long offset; /* The offset of the specific region */ +}; + extern struct gen_pool *gen_pool_create(int, int); extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t, @@ -124,6 +131,10 @@ extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, struct gen_pool *pool); +extern unsigned long gen_pool_fixed_alloc(unsigned long *map, + unsigned long size, unsigned long start, unsigned int nr, + void *data, struct gen_pool *pool); + extern unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, struct gen_pool *pool); diff --git a/lib/genalloc.c b/lib/genalloc.c index b8cf89d9e17d..5ec83cd93284 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -555,6 +555,38 @@ unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, } EXPORT_SYMBOL(gen_pool_first_fit_align); +/** + * gen_pool_fixed_alloc - reserve a specific region + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @data: data for alignment + * @pool: pool to get order from + */ +unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, + unsigned long start, unsigned int nr, void *data, + struct gen_pool *pool) +{ + struct genpool_data_fixed *fixed_data; + int order; + unsigned long offset_bit; + unsigned long start_bit; + + fixed_data = data; + order = pool->min_alloc_order; + offset_bit = fixed_data->offset >> order; + if (WARN_ON(fixed_data->offset & (1UL << order - 1))) + return size; + + start_bit = bitmap_find_next_zero_area(map, size, + start + offset_bit, nr, 0); + if (start_bit != offset_bit) + start_bit = size; + return start_bit; +} +EXPORT_SYMBOL(gen_pool_fixed_alloc); + /** * gen_pool_first_fit_order_align - find the first available region * of memory matching the size requirement. The region will be aligned -- cgit v1.2.3-59-g8ed1b From 0e6e01ff694ee222acc5a9184211678473c948e3 Mon Sep 17 00:00:00 2001 From: Zhao Qiang Date: Mon, 30 Nov 2015 10:48:54 +0800 Subject: CPM/QE: use genalloc to manage CPM/QE muram Use genalloc to manage CPM/QE muram instead of rheap. Signed-off-by: Zhao Qiang Signed-off-by: Scott Wood --- arch/powerpc/include/asm/cpm.h | 3 + arch/powerpc/platforms/Kconfig | 4 +- arch/powerpc/sysdev/cpm_common.c | 126 +++++++++++++++++++++++++++------------ lib/genalloc.c | 2 +- 4 files changed, 94 insertions(+), 41 deletions(-) (limited to 'lib') diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h index 4398a6cdcf53..46e86b50abf3 100644 --- a/arch/powerpc/include/asm/cpm.h +++ b/arch/powerpc/include/asm/cpm.h @@ -2,6 +2,7 @@ #define __CPM_H #include +#include #include #include #include @@ -161,6 +162,8 @@ int cpm_muram_init(void); unsigned long cpm_muram_alloc(unsigned long size, unsigned long align); int cpm_muram_free(unsigned long offset); unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size); +unsigned long cpm_muram_alloc_common(unsigned long size, genpool_algo_t algo, + void *data); void __iomem *cpm_muram_addr(unsigned long offset); unsigned long cpm_muram_offset(void __iomem *addr); dma_addr_t cpm_muram_dma(void __iomem *addr); diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index b7f9c408bf24..57069eb8f093 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -275,7 +275,7 @@ config TAU_AVERAGE config QUICC_ENGINE bool "Freescale QUICC Engine (QE) Support" depends on FSL_SOC && PPC32 - select PPC_LIB_RHEAP + select GENERIC_ALLOCATOR select CRC32 help The QUICC Engine (QE) is a new generation of communications @@ -295,7 +295,6 @@ config CPM2 bool "Enable support for the CPM2 (Communications Processor Module)" depends on (FSL_SOC_BOOKE && PPC32) || 8260 select CPM - select PPC_LIB_RHEAP select PPC_PCI_CHOICE select ARCH_REQUIRE_GPIOLIB help @@ -325,6 +324,7 @@ config FSL_ULI1575 config CPM bool + select GENERIC_ALLOCATOR config OF_RTC bool diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c index e00a5ee58fd7..fcc83cd9cc2f 100644 --- a/arch/powerpc/sysdev/cpm_common.c +++ b/arch/powerpc/sysdev/cpm_common.c @@ -17,6 +17,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -27,7 +28,6 @@ #include #include -#include #include #include @@ -65,14 +65,22 @@ void __init udbg_init_cpm(void) } #endif +static struct gen_pool *muram_pool; static spinlock_t cpm_muram_lock; -static rh_block_t cpm_boot_muram_rh_block[16]; -static rh_info_t cpm_muram_info; static u8 __iomem *muram_vbase; static phys_addr_t muram_pbase; -/* Max address size we deal with */ +struct muram_block { + struct list_head head; + unsigned long start; + int size; +}; + +static LIST_HEAD(muram_block_list); + +/* max address size we deal with */ #define OF_MAX_ADDR_CELLS 4 +#define GENPOOL_OFFSET (4096 * 8) int cpm_muram_init(void) { @@ -87,50 +95,51 @@ int cpm_muram_init(void) return 0; spin_lock_init(&cpm_muram_lock); - /* initialize the info header */ - rh_init(&cpm_muram_info, 1, - sizeof(cpm_boot_muram_rh_block) / - sizeof(cpm_boot_muram_rh_block[0]), - cpm_boot_muram_rh_block); - np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data"); if (!np) { /* try legacy bindings */ np = of_find_node_by_name(NULL, "data-only"); if (!np) { - printk(KERN_ERR "Cannot find CPM muram data node"); + pr_err("Cannot find CPM muram data node"); ret = -ENODEV; - goto out; + goto out_muram; } } + muram_pool = gen_pool_create(0, -1); muram_pbase = of_translate_address(np, zero); if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) { - printk(KERN_ERR "Cannot translate zero through CPM muram node"); + pr_err("Cannot translate zero through CPM muram node"); ret = -ENODEV; - goto out; + goto out_pool; } while (of_address_to_resource(np, i++, &r) == 0) { if (r.end > max) max = r.end; - - rh_attach_region(&cpm_muram_info, r.start - muram_pbase, - resource_size(&r)); + ret = gen_pool_add(muram_pool, r.start - muram_pbase + + GENPOOL_OFFSET, resource_size(&r), -1); + if (ret) { + pr_err("QE: couldn't add muram to pool!\n"); + goto out_pool; + } } muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1); if (!muram_vbase) { - printk(KERN_ERR "Cannot map CPM muram"); + pr_err("Cannot map QE muram"); ret = -ENOMEM; + goto out_pool; } - -out: + goto out_muram; +out_pool: + gen_pool_destroy(muram_pool); +out_muram: of_node_put(np); return ret; } -/** +/* * cpm_muram_alloc - allocate the requested size worth of multi-user ram * @size: number of bytes to allocate * @align: requested alignment, in bytes @@ -143,14 +152,13 @@ unsigned long cpm_muram_alloc(unsigned long size, unsigned long align) { unsigned long start; unsigned long flags; + struct genpool_data_align muram_pool_data; spin_lock_irqsave(&cpm_muram_lock, flags); - cpm_muram_info.alignment = align; - start = rh_alloc(&cpm_muram_info, size, "commproc"); - if (!IS_ERR_VALUE(start)) - memset_io(cpm_muram_addr(start), 0, size); + muram_pool_data.align = align; + start = cpm_muram_alloc_common(size, gen_pool_first_fit_align, + &muram_pool_data); spin_unlock_irqrestore(&cpm_muram_lock, flags); - return start; } EXPORT_SYMBOL(cpm_muram_alloc); @@ -161,23 +169,31 @@ EXPORT_SYMBOL(cpm_muram_alloc); */ int cpm_muram_free(unsigned long offset) { - int ret; unsigned long flags; + int size; + struct muram_block *tmp; + size = 0; spin_lock_irqsave(&cpm_muram_lock, flags); - ret = rh_free(&cpm_muram_info, offset); + list_for_each_entry(tmp, &muram_block_list, head) { + if (tmp->start == offset) { + size = tmp->size; + list_del(&tmp->head); + kfree(tmp); + break; + } + } + gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size); spin_unlock_irqrestore(&cpm_muram_lock, flags); - - return ret; + return size; } EXPORT_SYMBOL(cpm_muram_free); -/** +/* * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram - * @offset: the offset into the muram area to reserve - * @size: the number of bytes to reserve - * - * This function returns "start" on success, -ENOMEM on failure. + * @offset: offset of allocation start address + * @size: number of bytes to allocate + * This function returns an offset into the muram area * Use cpm_dpram_addr() to get the virtual address of the area. * Use cpm_muram_free() to free the allocation. */ @@ -185,16 +201,50 @@ unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size) { unsigned long start; unsigned long flags; + struct genpool_data_fixed muram_pool_data_fixed; spin_lock_irqsave(&cpm_muram_lock, flags); - cpm_muram_info.alignment = 1; - start = rh_alloc_fixed(&cpm_muram_info, offset, size, "commproc"); + muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET; + start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc, + &muram_pool_data_fixed); spin_unlock_irqrestore(&cpm_muram_lock, flags); - return start; } EXPORT_SYMBOL(cpm_muram_alloc_fixed); +/* + * cpm_muram_alloc_common - cpm_muram_alloc common code + * @size: number of bytes to allocate + * @algo: algorithm for alloc. + * @data: data for genalloc's algorithm. + * + * This function returns an offset into the muram area. + */ +unsigned long cpm_muram_alloc_common(unsigned long size, genpool_algo_t algo, + void *data) +{ + struct muram_block *entry; + unsigned long start; + + start = gen_pool_alloc_algo(muram_pool, size, algo, data); + if (!start) + goto out2; + start = start - GENPOOL_OFFSET; + memset_io(cpm_muram_addr(start), 0, size); + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + goto out1; + entry->start = start; + entry->size = size; + list_add(&entry->head, &muram_block_list); + + return start; +out1: + gen_pool_free(muram_pool, start, size); +out2: + return (unsigned long)-ENOMEM; +} + /** * cpm_muram_addr - turn a muram offset into a virtual address * @offset: muram offset to convert diff --git a/lib/genalloc.c b/lib/genalloc.c index 5ec83cd93284..0a1139644d32 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -576,7 +576,7 @@ unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, fixed_data = data; order = pool->min_alloc_order; offset_bit = fixed_data->offset >> order; - if (WARN_ON(fixed_data->offset & (1UL << order - 1))) + if (WARN_ON(fixed_data->offset & ((1UL << order) - 1))) return size; start_bit = bitmap_find_next_zero_area(map, size, -- cgit v1.2.3-59-g8ed1b