From 3e6628c4b347a558965041290c5a92791dd4c741 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 29 Apr 2013 16:21:16 -0700 Subject: idr: introduce idr_alloc_cyclic() As Tejun points out, there are several users of the IDR facility that attempt to use it in a cyclic fashion. These users are likely to see -ENOSPC errors after the counter wraps one or more times however. This patchset adds a new idr_alloc_cyclic routine and converts several of these users to it. Many of these users are in obscure parts of the kernel, and I don't have a good way to test some of them. The change is pretty straightforward though, so hopefully it won't be an issue. There is one other cyclic user of idr_alloc that I didn't touch in ipc/util.c. That one is doing some strange stuff that I didn't quite understand, but it looks like it should probably be converted later somehow. This patch: Thus spake Tejun Heo: Ooh, BTW, the cyclic allocation is broken. It's prone to -ENOSPC after the first wraparound. There are several cyclic users in the kernel and I think it probably would be best to implement cyclic support in idr. This patch does that by adding new idr_alloc_cyclic function that such users in the kernel can use. With this, there's no need for a caller to keep track of the last value used as that's now tracked internally. This should prevent the ENOSPC problems that can hit when the "last allocated" counter exceeds INT_MAX. Later patches will convert existing cyclic users to the new interface. Signed-off-by: Jeff Layton Reviewed-by: Tejun Heo Cc: "David S. Miller" Cc: "J. Bruce Fields" Cc: Eric Paris Cc: Jack Morgenstein Cc: John McCutchan Cc: Neil Horman Cc: Or Gerlitz Cc: Robert Love Cc: Roland Dreier Cc: Sridhar Samudrala Cc: Steve Wise Cc: Tom Tucker Cc: Vlad Yasevich Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/idr.h | 2 ++ lib/idr.c | 27 +++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/include/linux/idr.h b/include/linux/idr.h index 2640c7e99e51..a470ac3ef49d 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -42,6 +42,7 @@ struct idr { struct idr_layer *id_free; int layers; /* only valid w/o concurrent changes */ int id_free_cnt; + int cur; /* current pos for cyclic allocation */ spinlock_t lock; }; @@ -75,6 +76,7 @@ struct idr { void *idr_find_slowpath(struct idr *idp, int id); void idr_preload(gfp_t gfp_mask); int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); +int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); int idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data), void *data); void *idr_get_next(struct idr *idp, int *nextid); diff --git a/lib/idr.c b/lib/idr.c index 322e2816f2fb..cca4b9302a71 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -495,6 +495,33 @@ int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) } EXPORT_SYMBOL_GPL(idr_alloc); +/** + * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion + * @idr: the (initialized) idr + * @ptr: pointer to be associated with the new id + * @start: the minimum id (inclusive) + * @end: the maximum id (exclusive, <= 0 for max) + * @gfp_mask: memory allocation flags + * + * Essentially the same as idr_alloc, but prefers to allocate progressively + * higher ids if it can. If the "cur" counter wraps, then it will start again + * at the "start" end of the range and allocate one that has already been used. + */ +int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, + gfp_t gfp_mask) +{ + int id; + + id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask); + if (id == -ENOSPC) + id = idr_alloc(idr, ptr, start, end, gfp_mask); + + if (likely(id >= 0)) + idr->cur = id + 1; + return id; +} +EXPORT_SYMBOL(idr_alloc_cyclic); + static void idr_remove_warning(int id) { printk(KERN_WARNING -- cgit v1.2.3-59-g8ed1b