aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-15 13:18:47 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-15 13:18:47 -0800
commitf689b742f217b2ffe7925f8a6521b208ee995309 (patch)
tree40ed4482ce5808fd5498fe935205b06782bbbca4 /lib
parentMerge tag 'vfio-v4.5-rc1' of git://github.com/awilliam/linux-vfio (diff)
parentMerge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux into next (diff)
downloadlinux-dev-f689b742f217b2ffe7925f8a6521b208ee995309.tar.xz
linux-dev-f689b742f217b2ffe7925f8a6521b208ee995309.zip
Merge tag 'powerpc-4.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "Core: - Ground work for the new Power9 MMU from Aneesh Kumar K.V - Optimise FP/VMX/VSX context switching from Anton Blanchard Misc: - Various cleanups from Krzysztof Kozlowski, John Ogness, Rashmica Gupta, Russell Currey, Gavin Shan, Daniel Axtens, Michael Neuling, Andrew Donnellan - Allow wrapper to work on non-english system from Laurent Vivier - Add rN aliases to the pt_regs_offset table from Rashmica Gupta - Fix module autoload for rackmeter & axonram drivers from Luis de Bethencourt - Include KVM guest test in all interrupt vectors from Paul Mackerras - Fix DSCR inheritance over fork() from Anton Blanchard - Make value-returning atomics & {cmp}xchg* & their atomic_ versions fully ordered from Boqun Feng - Print MSR TM bits in oops messages from Michael Neuling - Add TM signal return & invalid stack selftests from Michael Neuling - Limit EPOW reset event warnings from Vipin K Parashar - Remove the Cell QPACE code from Rashmica Gupta - Append linux_banner to exception information in xmon from Rashmica Gupta - Add selftest to check if VSRs are corrupted from Rashmica Gupta - Remove broken GregorianDay() from Daniel Axtens - Import Anton's context_switch2 benchmark into selftests from Michael Ellerman - Add selftest script to test HMI functionality from Daniel Axtens - Remove obsolete OPAL v2 support from Stewart Smith - Make enter_rtas() private from Michael Ellerman - PPR exception cleanups from Michael Ellerman - Add page soft dirty tracking from Laurent Dufour - Add support for Nvlink NPUs from Alistair Popple - Add support for kexec on 476fpe from Alistair Popple - Enable kernel CPU dlpar from sysfs from Nathan Fontenot - Copy only required pieces of the mm_context_t to the paca from Michael Neuling - Add a kmsg_dumper that flushes OPAL console output on panic from Russell Currey - Implement save_stack_trace_regs() to enable kprobe stack tracing from Steven Rostedt - Add HWCAP bits for Power9 from Michael Ellerman - Fix _PAGE_PTE breaking swapoff from Aneesh Kumar K.V - Fix _PAGE_SWP_SOFT_DIRTY breaking swapoff from Hugh Dickins - scripts/recordmcount.pl: support data in text section on powerpc from Ulrich Weigand - Handle R_PPC64_ENTRY relocations in modules from Ulrich Weigand cxl: - cxl: Fix possible idr warning when contexts are released from Vaibhav Jain - cxl: use correct operator when writing pcie config space values from Andrew Donnellan - cxl: Fix DSI misses when the context owning task exits from Vaibhav Jain - cxl: fix build for GCC 4.6.x from Brian Norris - cxl: use -Werror only with CONFIG_PPC_WERROR from Brian Norris - cxl: Enable PCI device ID for future IBM CXL adapter from Uma Krishnan Freescale: - Freescale updates from Scott: Highlights include moving QE code out of arch/powerpc (to be shared with arm), device tree updates, and minor fixes" * tag 'powerpc-4.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (149 commits) powerpc/module: Handle R_PPC64_ENTRY relocations scripts/recordmcount.pl: support data in text section on powerpc powerpc/powernv: Fix OPAL_CONSOLE_FLUSH prototype and usages powerpc/mm: fix _PAGE_SWP_SOFT_DIRTY breaking swapoff powerpc/mm: Fix _PAGE_PTE breaking swapoff cxl: Enable PCI device ID for future IBM CXL adapter cxl: use -Werror only with CONFIG_PPC_WERROR cxl: fix build for GCC 4.6.x powerpc: Add HWCAP bits for Power9 powerpc/powernv: Reserve PE#0 on NPU powerpc/powernv: Change NPU PE# assignment powerpc/powernv: Fix update of NVLink DMA mask powerpc/powernv: Remove misleading comment in pci.c powerpc: Implement save_stack_trace_regs() to enable kprobe stack tracing powerpc: Fix build break due to paca mm_context_t changes cxl: Fix DSI misses when the context owning task exits MAINTAINERS: Update Scott Wood's e-mail address powerpc/powernv: Fix minor off-by-one error in opal_mce_check_early_recovery() powerpc: Fix style of self-test config prompts powerpc/powernv: Only delay opal_rtc_read() retry when necessary ...
Diffstat (limited to 'lib')
-rw-r--r--lib/genalloc.c93
-rw-r--r--lib/raid6/altivec.uc1
2 files changed, 88 insertions, 6 deletions
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 116a166b096f..0a1139644d32 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -270,6 +270,25 @@ EXPORT_SYMBOL(gen_pool_destroy);
*/
unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
{
+ return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
+}
+EXPORT_SYMBOL(gen_pool_alloc);
+
+/**
+ * gen_pool_alloc_algo - allocate special memory from the pool
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ *
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ */
+unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
+ genpool_algo_t algo, void *data)
+{
struct gen_pool_chunk *chunk;
unsigned long addr = 0;
int order = pool->min_alloc_order;
@@ -290,8 +309,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
end_bit = chunk_size(chunk) >> order;
retry:
- start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
- pool->data);
+ start_bit = algo(chunk->bits, end_bit, start_bit,
+ nbits, data, pool);
if (start_bit >= end_bit)
continue;
remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
@@ -310,7 +329,7 @@ retry:
rcu_read_unlock();
return addr;
}
-EXPORT_SYMBOL(gen_pool_alloc);
+EXPORT_SYMBOL(gen_pool_alloc_algo);
/**
* gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
@@ -501,15 +520,74 @@ EXPORT_SYMBOL(gen_pool_set_algo);
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @data: additional data - unused
+ * @pool: pool to find the fit region memory from
*/
unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
- unsigned long start, unsigned int nr, void *data)
+ unsigned long start, unsigned int nr, void *data,
+ struct gen_pool *pool)
{
return bitmap_find_next_zero_area(map, size, start, nr, 0);
}
EXPORT_SYMBOL(gen_pool_first_fit);
/**
+ * gen_pool_first_fit_align - find the first available region
+ * of memory matching the size requirement (alignment constraint)
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: data for alignment
+ * @pool: pool to get order from
+ */
+unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr, void *data,
+ struct gen_pool *pool)
+{
+ struct genpool_data_align *alignment;
+ unsigned long align_mask;
+ int order;
+
+ alignment = data;
+ order = pool->min_alloc_order;
+ align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
+ return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+}
+EXPORT_SYMBOL(gen_pool_first_fit_align);
+
+/**
+ * gen_pool_fixed_alloc - reserve a specific region
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: data for alignment
+ * @pool: pool to get order from
+ */
+unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr, void *data,
+ struct gen_pool *pool)
+{
+ struct genpool_data_fixed *fixed_data;
+ int order;
+ unsigned long offset_bit;
+ unsigned long start_bit;
+
+ fixed_data = data;
+ order = pool->min_alloc_order;
+ offset_bit = fixed_data->offset >> order;
+ if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
+ return size;
+
+ start_bit = bitmap_find_next_zero_area(map, size,
+ start + offset_bit, nr, 0);
+ if (start_bit != offset_bit)
+ start_bit = size;
+ return start_bit;
+}
+EXPORT_SYMBOL(gen_pool_fixed_alloc);
+
+/**
* gen_pool_first_fit_order_align - find the first available region
* of memory matching the size requirement. The region will be aligned
* to the order of the size specified.
@@ -518,10 +596,11 @@ EXPORT_SYMBOL(gen_pool_first_fit);
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @data: additional data - unused
+ * @pool: pool to find the fit region memory from
*/
unsigned long gen_pool_first_fit_order_align(unsigned long *map,
unsigned long size, unsigned long start,
- unsigned int nr, void *data)
+ unsigned int nr, void *data, struct gen_pool *pool)
{
unsigned long align_mask = roundup_pow_of_two(nr) - 1;
@@ -537,12 +616,14 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align);
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @data: additional data - unused
+ * @pool: pool to find the fit region memory from
*
* Iterate over the bitmap to find the smallest free region
* which we can allocate the memory.
*/
unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
- unsigned long start, unsigned int nr, void *data)
+ unsigned long start, unsigned int nr, void *data,
+ struct gen_pool *pool)
{
unsigned long start_bit = size;
unsigned long len = size + 1;
diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc
index bec27fce7501..682aae8a1fef 100644
--- a/lib/raid6/altivec.uc
+++ b/lib/raid6/altivec.uc
@@ -101,6 +101,7 @@ static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs);
+ disable_kernel_altivec();
preempt_enable();
}