diff options
Diffstat (limited to '')
| -rw-r--r-- | mm/vmalloc.c | 30 | 
1 files changed, 14 insertions, 16 deletions
| diff --git a/mm/vmalloc.c b/mm/vmalloc.c index f64632b67196..2b0aa5486092 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1270,19 +1270,15 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)  }  EXPORT_SYMBOL_GPL(unmap_kernel_range); -int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) +int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)  {  	unsigned long addr = (unsigned long)area->addr;  	unsigned long end = addr + get_vm_area_size(area);  	int err; -	err = vmap_page_range(addr, end, prot, *pages); -	if (err > 0) { -		*pages += err; -		err = 0; -	} +	err = vmap_page_range(addr, end, prot, pages); -	return err; +	return err > 0 ? 0 : err;  }  EXPORT_SYMBOL_GPL(map_vm_area); @@ -1548,7 +1544,7 @@ void *vmap(struct page **pages, unsigned int count,  	if (!area)  		return NULL; -	if (map_vm_area(area, prot, &pages)) { +	if (map_vm_area(area, prot, pages)) {  		vunmap(area->addr);  		return NULL;  	} @@ -1566,7 +1562,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,  	const int order = 0;  	struct page **pages;  	unsigned int nr_pages, array_size, i; -	gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; +	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; +	const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;  	nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;  	array_size = (nr_pages * sizeof(struct page *)); @@ -1589,12 +1586,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,  	for (i = 0; i < area->nr_pages; i++) {  		struct page *page; -		gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;  		if (node == NUMA_NO_NODE) -			page = alloc_page(tmp_mask); +			page = alloc_page(alloc_mask);  		else -			page = alloc_pages_node(node, tmp_mask, order); +			page = alloc_pages_node(node, alloc_mask, order);  		if (unlikely(!page)) {  			/* Successfully allocated i pages, free them in __vunmap() */ @@ -1602,9 +1598,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,  			goto fail;  		}  		area->pages[i] = page; +		if (gfp_mask & __GFP_WAIT) +			cond_resched();  	} -	if (map_vm_area(area, prot, &pages)) +	if (map_vm_area(area, prot, pages))  		goto fail;  	return area->addr; @@ -2690,14 +2688,14 @@ void get_vmalloc_info(struct vmalloc_info *vmi)  	prev_end = VMALLOC_START; -	spin_lock(&vmap_area_lock); +	rcu_read_lock();  	if (list_empty(&vmap_area_list)) {  		vmi->largest_chunk = VMALLOC_TOTAL;  		goto out;  	} -	list_for_each_entry(va, &vmap_area_list, list) { +	list_for_each_entry_rcu(va, &vmap_area_list, list) {  		unsigned long addr = va->va_start;  		/* @@ -2724,7 +2722,7 @@ void get_vmalloc_info(struct vmalloc_info *vmi)  		vmi->largest_chunk = VMALLOC_END - prev_end;  out: -	spin_unlock(&vmap_area_lock); +	rcu_read_unlock();  }  #endif | 
