aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bitmap.h6
-rw-r--r--include/linux/bootmem.h2
-rw-r--r--include/linux/cache.h4
-rw-r--r--include/linux/capability.h3
-rw-r--r--include/linux/cpumask.h22
-rw-r--r--include/linux/cpuset.h4
-rw-r--r--include/linux/dmi.h1
-rw-r--r--include/linux/fb.h44
-rw-r--r--include/linux/fs.h7
-rw-r--r--include/linux/gfp.h44
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/hugetlb.h46
-rw-r--r--include/linux/i2o.h5
-rw-r--r--include/linux/init_task.h3
-rw-r--r--include/linux/kprobes.h34
-rw-r--r--include/linux/list.h9
-rw-r--r--include/linux/memory_hotplug.h33
-rw-r--r--include/linux/mempolicy.h156
-rw-r--r--include/linux/mm.h57
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmzone.h183
-rw-r--r--include/linux/msdos_fs.h10
-rw-r--r--include/linux/ncp_fs.h7
-rw-r--r--include/linux/nodemask.h22
-rw-r--r--include/linux/notifier.h1
-rw-r--r--include/linux/oom.h4
-rw-r--r--include/linux/page-flags.h319
-rw-r--r--include/linux/prctl.h9
-rw-r--r--include/linux/quota.h21
-rw-r--r--include/linux/quotaops.h137
-rw-r--r--include/linux/raid/raid5.h7
-rw-r--r--include/linux/reiserfs_fs.h1
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/securebits.h25
-rw-r--r--include/linux/security.h16
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/shmem_fs.h3
-rw-r--r--include/linux/suspend.h15
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/synclink.h4
-rw-r--r--include/linux/vmalloc.h5
-rw-r--r--include/linux/vmstat.h6
42 files changed, 852 insertions, 439 deletions
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 1dbe074f1c64..43b406def35f 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -46,6 +46,8 @@
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
+ * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
+ * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
* bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf
* bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
@@ -121,6 +123,10 @@ extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, int bits);
extern int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
+extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
+ const unsigned long *relmap, int bits);
+extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
+ int sz, int bits);
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 4e4e340592fb..6a5dbdc8a7dc 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -101,6 +101,8 @@ extern void reserve_bootmem_node(pg_data_t *pgdat,
extern void free_bootmem_node(pg_data_t *pgdat,
unsigned long addr,
unsigned long size);
+extern void *alloc_bootmem_section(unsigned long size,
+ unsigned long section_nr);
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
#define alloc_bootmem_node(pgdat, x) \
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 4552504c0228..97e24881c4c6 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -60,4 +60,8 @@
#endif
#endif
+#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
+#define cache_line_size() L1_CACHE_BYTES
+#endif
+
#endif /* __LINUX_CACHE_H */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 7d50ff6d269f..eaab759b1460 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -155,6 +155,7 @@ typedef struct kernel_cap_struct {
* Add any capability from current's capability bounding set
* to the current process' inheritable set
* Allow taking bits out of capability bounding set
+ * Allow modification of the securebits for a process
*/
#define CAP_SETPCAP 8
@@ -490,8 +491,6 @@ extern const kernel_cap_t __cap_init_eff_set;
int capable(int cap);
int __capable(struct task_struct *t, int cap);
-extern long cap_prctl_drop(unsigned long cap);
-
#endif /* __KERNEL__ */
#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 259c8051155d..9650806fe2ea 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -14,6 +14,8 @@
* bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
* For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
* For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
+ * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
+ * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
*
* The available cpumask operations are:
*
@@ -53,7 +55,9 @@
* int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
* int cpulist_parse(buf, map) Parse ascii string as cpulist
* int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
- * int cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
+ * void cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
+ * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap
+ * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz
*
* for_each_cpu_mask(cpu, mask) for-loop cpu over mask
*
@@ -330,6 +334,22 @@ static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
}
+#define cpus_onto(dst, orig, relmap) \
+ __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS)
+static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp,
+ const cpumask_t *relmapp, int nbits)
+{
+ bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
+}
+
+#define cpus_fold(dst, orig, sz) \
+ __cpus_fold(&(dst), &(orig), sz, NR_CPUS)
+static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
+ int sz, int nbits)
+{
+ bitmap_fold(dstp->bits, origp->bits, sz, nbits);
+}
+
#if NR_CPUS > 1
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = first_cpu(mask); \
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 726761e24003..038578362b47 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -26,7 +26,7 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
void cpuset_update_task_memory_state(void);
-int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl);
+int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask);
@@ -103,7 +103,7 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
static inline void cpuset_init_current_mems_allowed(void) {}
static inline void cpuset_update_task_memory_state(void) {}
-static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
+static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
{
return 1;
}
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 325acdf5c462..2a063b64133f 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -90,6 +90,7 @@ static inline int dmi_check_system(const struct dmi_system_id *list) { return 0;
static inline const char * dmi_get_system_info(int field) { return NULL; }
static inline const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from) { return NULL; }
+static inline void dmi_scan_machine(void) { return; }
static inline int dmi_get_year(int year) { return 0; }
static inline int dmi_name_in_vendors(const char *s) { return 0; }
#define dmi_available 0
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 58c57a33e5dd..72295b099228 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -791,6 +791,17 @@ struct fb_tile_ops {
*/
#define FBINFO_MISC_ALWAYS_SETPAR 0x40000
+/*
+ * Host and GPU endianness differ.
+ */
+#define FBINFO_FOREIGN_ENDIAN 0x100000
+/*
+ * Big endian math. This is the same flags as above, but with different
+ * meaning, it is set by the fb subsystem depending FOREIGN_ENDIAN flag
+ * and host endianness. Drivers should not use this flag.
+ */
+#define FBINFO_BE_MATH 0x100000
+
struct fb_info {
int node;
int flags;
@@ -899,15 +910,11 @@ struct fb_info {
#endif
-#if defined (__BIG_ENDIAN)
-#define FB_LEFT_POS(bpp) (32 - bpp)
-#define FB_SHIFT_HIGH(val, bits) ((val) >> (bits))
-#define FB_SHIFT_LOW(val, bits) ((val) << (bits))
-#else
-#define FB_LEFT_POS(bpp) (0)
-#define FB_SHIFT_HIGH(val, bits) ((val) << (bits))
-#define FB_SHIFT_LOW(val, bits) ((val) >> (bits))
-#endif
+#define FB_LEFT_POS(p, bpp) (fb_be_math(p) ? (32 - (bpp)) : 0)
+#define FB_SHIFT_HIGH(p, val, bits) (fb_be_math(p) ? (val) >> (bits) : \
+ (val) << (bits))
+#define FB_SHIFT_LOW(p, val, bits) (fb_be_math(p) ? (val) << (bits) : \
+ (val) >> (bits))
/*
* `Generic' versions of the frame buffer device operations
@@ -970,6 +977,25 @@ extern void fb_deferred_io_cleanup(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry,
int datasync);
+static inline bool fb_be_math(struct fb_info *info)
+{
+#ifdef CONFIG_FB_FOREIGN_ENDIAN
+#if defined(CONFIG_FB_BOTH_ENDIAN)
+ return info->flags & FBINFO_BE_MATH;
+#elif defined(CONFIG_FB_BIG_ENDIAN)
+ return true;
+#elif defined(CONFIG_FB_LITTLE_ENDIAN)
+ return false;
+#endif /* CONFIG_FB_BOTH_ENDIAN */
+#else
+#ifdef __BIG_ENDIAN
+ return true;
+#else
+ return false;
+#endif /* __BIG_ENDIAN */
+#endif /* CONFIG_FB_FOREIGN_ENDIAN */
+}
+
/* drivers/video/fbsysfs.c */
extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
extern void framebuffer_release(struct fb_info *info);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index d6d7c52055c6..2c925747bc49 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -474,8 +474,8 @@ struct address_space_operations {
int (*releasepage) (struct page *, gfp_t);
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
loff_t offset, unsigned long nr_segs);
- struct page* (*get_xip_page)(struct address_space *, sector_t,
- int);
+ int (*get_xip_mem)(struct address_space *, pgoff_t, int,
+ void **, unsigned long *);
/* migrate the contents of a page to the specified target */
int (*migratepage) (struct address_space *,
struct page *, struct page *);
@@ -1178,7 +1178,8 @@ struct block_device_operations {
int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
long (*unlocked_ioctl) (struct file *, unsigned, unsigned long);
long (*compat_ioctl) (struct file *, unsigned, unsigned long);
- int (*direct_access) (struct block_device *, sector_t, unsigned long *);
+ int (*direct_access) (struct block_device *, sector_t,
+ void **, unsigned long *);
int (*media_changed) (struct gendisk *);
int (*revalidate_disk) (struct gendisk *);
int (*getgeo)(struct block_device *, struct hd_geometry *);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 164be9da3c1b..c37653b6843f 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -119,35 +119,22 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
static inline enum zone_type gfp_zone(gfp_t flags)
{
- int base = 0;
-
-#ifdef CONFIG_NUMA
- if (flags & __GFP_THISNODE)
- base = MAX_NR_ZONES;
-#endif
-
#ifdef CONFIG_ZONE_DMA
if (flags & __GFP_DMA)
- return base + ZONE_DMA;
+ return ZONE_DMA;
#endif
#ifdef CONFIG_ZONE_DMA32
if (flags & __GFP_DMA32)
- return base + ZONE_DMA32;
+ return ZONE_DMA32;
#endif
if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
(__GFP_HIGHMEM | __GFP_MOVABLE))
- return base + ZONE_MOVABLE;
+ return ZONE_MOVABLE;
#ifdef CONFIG_HIGHMEM
if (flags & __GFP_HIGHMEM)
- return base + ZONE_HIGHMEM;
+ return ZONE_HIGHMEM;
#endif
- return base + ZONE_NORMAL;
-}
-
-static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
-{
- BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
- return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags;
+ return ZONE_NORMAL;
}
/*
@@ -157,13 +144,27 @@ static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
* virtual kernel addresses to the allocated page(s).
*/
+static inline int gfp_zonelist(gfp_t flags)
+{
+ if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE))
+ return 1;
+
+ return 0;
+}
+
/*
* We get the zone list from the current node and the gfp_mask.
* This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
+ * There are two zonelists per node, one for all zones with memory and
+ * one containing just zones from the node the zonelist belongs to.
*
* For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
* optimized to &contig_page_data at compile-time.
*/
+static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
+{
+ return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
+}
#ifndef HAVE_ARCH_FREE_PAGE
static inline void arch_free_page(struct page *page, int order) { }
@@ -174,6 +175,10 @@ static inline void arch_alloc_page(struct page *page, int order) { }
extern struct page *__alloc_pages(gfp_t, unsigned int, struct zonelist *);
+extern struct page *
+__alloc_pages_nodemask(gfp_t, unsigned int,
+ struct zonelist *, nodemask_t *nodemask);
+
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
@@ -184,8 +189,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
if (nid < 0)
nid = numa_node_id();
- return __alloc_pages(gfp_mask, order,
- NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask));
+ return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}
#ifdef CONFIG_NUMA
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d951ec411241..4ce3b7a979ba 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -498,13 +498,13 @@ struct hid_parser {
struct hid_class_descriptor {
__u8 bDescriptorType;
- __u16 wDescriptorLength;
+ __le16 wDescriptorLength;
} __attribute__ ((packed));
struct hid_descriptor {
__u8 bLength;
__u8 bDescriptorType;
- __u16 bcdHID;
+ __le16 bcdHID;
__u8 bCountryCode;
__u8 bNumDescriptors;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index addca4cd4f11..a79e80b689d8 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -8,6 +8,7 @@
#include <linux/mempolicy.h>
#include <linux/shm.h>
#include <asm/tlbflush.h>
+#include <asm/hugetlb.h>
struct ctl_table;
@@ -51,51 +52,6 @@ int pmd_huge(pmd_t pmd);
void hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot);
-#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
-#define is_hugepage_only_range(mm, addr, len) 0
-#endif
-
-#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
-#define hugetlb_free_pgd_range free_pgd_range
-#else
-void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
- unsigned long end, unsigned long floor,
- unsigned long ceiling);
-#endif
-
-#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
-/*
- * If the arch doesn't supply something else, assume that hugepage
- * size aligned regions are ok without further preparation.
- */
-static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- return 0;
-}
-#else
-int prepare_hugepage_range(unsigned long addr, unsigned long len);
-#endif
-
-#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
-#define set_huge_pte_at(mm, addr, ptep, pte) set_pte_at(mm, addr, ptep, pte)
-#define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep)
-#else
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
-#endif
-
-#ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK
-#define hugetlb_prefault_arch_hook(mm) do { } while (0)
-#else
-void hugetlb_prefault_arch_hook(struct mm_struct *mm);
-#endif
-
#else /* !CONFIG_HUGETLB_PAGE */
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index e92170dda245..f65e58a1d925 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -613,14 +613,9 @@ struct i2o_sys_tbl {
extern struct list_head i2o_controllers;
/* Message functions */
-static inline struct i2o_message *i2o_msg_get(struct i2o_controller *);
extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int);
-static inline void i2o_msg_post(struct i2o_controller *, struct i2o_message *);
-static inline int i2o_msg_post_wait(struct i2o_controller *,
- struct i2o_message *, unsigned long);
extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *,
unsigned long, struct i2o_dma *);
-static inline void i2o_flush_reply(struct i2o_controller *, u32);
/* IOP functions */
extern int i2o_status_get(struct i2o_controller *);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 37a6f5bc4a92..bf6b8a61f8db 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -9,6 +9,7 @@
#include <linux/ipc.h>
#include <linux/pid_namespace.h>
#include <linux/user_namespace.h>
+#include <linux/securebits.h>
#include <net/net_namespace.h>
#define INIT_FDTABLE \
@@ -172,7 +173,7 @@ extern struct group_info init_groups;
.cap_inheritable = CAP_INIT_INH_SET, \
.cap_permitted = CAP_FULL_SET, \
.cap_bset = CAP_INIT_BSET, \
- .keep_capabilities = 0, \
+ .securebits = SECUREBITS_DEFAULT, \
.user = INIT_USER, \
.comm = "swapper", \
.thread = INIT_THREAD, \
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 0f28486f6360..1036631ff4fa 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -173,6 +173,13 @@ struct kretprobe_blackpoint {
const char *name;
void *addr;
};
+
+struct kprobe_blackpoint {
+ const char *name;
+ unsigned long start_addr;
+ unsigned long range;
+};
+
extern struct kretprobe_blackpoint kretprobe_blacklist[];
static inline void kretprobe_assert(struct kretprobe_instance *ri,
@@ -227,15 +234,21 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
+int register_kprobes(struct kprobe **kps, int num);
+void unregister_kprobes(struct kprobe **kps, int num);
int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
int longjmp_break_handler(struct kprobe *, struct pt_regs *);
int register_jprobe(struct jprobe *p);
void unregister_jprobe(struct jprobe *p);
+int register_jprobes(struct jprobe **jps, int num);
+void unregister_jprobes(struct jprobe **jps, int num);
void jprobe_return(void);
unsigned long arch_deref_entry_point(void *);
int register_kretprobe(struct kretprobe *rp);
void unregister_kretprobe(struct kretprobe *rp);
+int register_kretprobes(struct kretprobe **rps, int num);
+void unregister_kretprobes(struct kretprobe **rps, int num);
void kprobe_flush_task(struct task_struct *tk);
void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
@@ -254,16 +267,30 @@ static inline int register_kprobe(struct kprobe *p)
{
return -ENOSYS;
}
+static inline int register_kprobes(struct kprobe **kps, int num)
+{
+ return -ENOSYS;
+}
static inline void unregister_kprobe(struct kprobe *p)
{
}
+static inline void unregister_kprobes(struct kprobe **kps, int num)
+{
+}
static inline int register_jprobe(struct jprobe *p)
{
return -ENOSYS;
}
+static inline int register_jprobes(struct jprobe **jps, int num)
+{
+ return -ENOSYS;
+}
static inline void unregister_jprobe(struct jprobe *p)
{
}
+static inline void unregister_jprobes(struct jprobe **jps, int num)
+{
+}
static inline void jprobe_return(void)
{
}
@@ -271,9 +298,16 @@ static inline int register_kretprobe(struct kretprobe *rp)
{
return -ENOSYS;
}
+static inline int register_kretprobes(struct kretprobe **rps, int num)
+{
+ return -ENOSYS;
+}
static inline void unregister_kretprobe(struct kretprobe *rp)
{
}
+static inline void unregister_kretprobes(struct kretprobe **rps, int num)
+{
+}
static inline void kprobe_flush_task(struct task_struct *tk)
{
}
diff --git a/include/linux/list.h b/include/linux/list.h
index dac16f99c701..b4a939b6b625 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -319,6 +319,15 @@ static inline int list_empty_careful(const struct list_head *head)
return (next == head) && (next == head->prev);
}
+/**
+ * list_is_singular - tests whether a list has just one entry.
+ * @head: the list to test.
+ */
+static inline int list_is_singular(const struct list_head *head)
+{
+ return !list_empty(head) && (head->next == head->prev);
+}
+
static inline void __list_splice(struct list_head *list,
struct list_head *head)
{
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 8fee7a45736b..73e358612eaf 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -8,8 +8,18 @@
struct page;
struct zone;
struct pglist_data;
+struct mem_section;
#ifdef CONFIG_MEMORY_HOTPLUG
+
+/*
+ * Magic number for free bootmem.
+ * The normal smallest mapcount is -1. Here is smaller value than it.
+ */
+#define SECTION_INFO 0xfffffffe
+#define MIX_INFO 0xfffffffd
+#define NODE_INFO 0xfffffffc
+
/*
* pgdat resizing functions
*/
@@ -64,9 +74,11 @@ extern int offline_pages(unsigned long, unsigned long, unsigned long);
/* reasonably generic interface to expand the physical pages in a zone */
extern int __add_pages(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages);
+extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
+ unsigned long nr_pages);
/*
- * Walk thorugh all memory which is registered as resource.
+ * Walk through all memory which is registered as resource.
* arg is (start_pfn, nr_pages, private_arg_pointer)
*/
extern int walk_memory_resource(unsigned long start_pfn,
@@ -142,6 +154,18 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
#endif /* CONFIG_NUMA */
#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+static inline void put_page_bootmem(struct page *page)
+{
+}
+#else
+extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
+extern void put_page_bootmem(struct page *page);
+#endif
+
#else /* ! CONFIG_MEMORY_HOTPLUG */
/*
* Stub functions for when hotplug is off
@@ -169,6 +193,10 @@ static inline int mhp_notimplemented(const char *func)
return -ENOSYS;
}
+static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+
#endif /* ! CONFIG_MEMORY_HOTPLUG */
extern int add_memory(int nid, u64 start, u64 size);
@@ -176,5 +204,8 @@ extern int arch_add_memory(int nid, u64 start, u64 size);
extern int remove_memory(u64 start, u64 size);
extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
int nr_pages);
+extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
+extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
+ unsigned long pnum);
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 59c4865bc85f..3a39570b81b8 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -8,15 +8,32 @@
* Copyright 2003,2004 Andi Kleen SuSE Labs
*/
+/*
+ * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
+ * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
+ * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
+ */
+
/* Policies */
-#define MPOL_DEFAULT 0
-#define MPOL_PREFERRED 1
-#define MPOL_BIND 2
-#define MPOL_INTERLEAVE 3
+enum {
+ MPOL_DEFAULT,
+ MPOL_PREFERRED,
+ MPOL_BIND,
+ MPOL_INTERLEAVE,
+ MPOL_MAX, /* always last member of enum */
+};
-#define MPOL_MAX MPOL_INTERLEAVE
+/* Flags for set_mempolicy */
+#define MPOL_F_STATIC_NODES (1 << 15)
+#define MPOL_F_RELATIVE_NODES (1 << 14)
-/* Flags for get_mem_policy */
+/*
+ * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
+ * either set_mempolicy() or mbind().
+ */
+#define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
+
+/* Flags for get_mempolicy */
#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
#define MPOL_F_ADDR (1<<1) /* look up vma using address */
#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
@@ -27,6 +44,14 @@
#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
+/*
+ * Internal flags that share the struct mempolicy flags word with
+ * "mode flags". These flags are allocated from bit 0 up, as they
+ * are never OR'ed into the mode in mempolicy API arguments.
+ */
+#define MPOL_F_SHARED (1 << 0) /* identify shared policies */
+#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
+
#ifdef __KERNEL__
#include <linux/mmzone.h>
@@ -35,7 +60,6 @@
#include <linux/spinlock.h>
#include <linux/nodemask.h>
-struct vm_area_struct;
struct mm_struct;
#ifdef CONFIG_NUMA
@@ -54,22 +78,27 @@ struct mm_struct;
* mmap_sem.
*
* Freeing policy:
- * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
- * All other policies don't have any external state. mpol_free() handles this.
+ * Mempolicy objects are reference counted. A mempolicy will be freed when
+ * mpol_put() decrements the reference count to zero.
*
- * Copying policy objects:
- * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this.
+ * Duplicating policy objects:
+ * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
+ * to the new storage. The reference count of the new object is initialized
+ * to 1, representing the caller of mpol_dup().
*/
struct mempolicy {
atomic_t refcnt;
- short policy; /* See MPOL_* above */
+ unsigned short mode; /* See MPOL_* above */
+ unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
union {
- struct zonelist *zonelist; /* bind */
short preferred_node; /* preferred */
- nodemask_t nodes; /* interleave */
+ nodemask_t nodes; /* interleave/bind */
/* undefined for default */
} v;
- nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */
+ union {
+ nodemask_t cpuset_mems_allowed; /* relative to these nodes */
+ nodemask_t user_nodemask; /* nodemask passed by user */
+ } w;
};
/*
@@ -77,18 +106,43 @@ struct mempolicy {
* The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
*/
-extern void __mpol_free(struct mempolicy *pol);
-static inline void mpol_free(struct mempolicy *pol)
+extern void __mpol_put(struct mempolicy *pol);
+static inline void mpol_put(struct mempolicy *pol)
{
if (pol)
- __mpol_free(pol);
+ __mpol_put(pol);
}
-extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
-static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
+/*
+ * Does mempolicy pol need explicit unref after use?
+ * Currently only needed for shared policies.
+ */
+static inline int mpol_needs_cond_ref(struct mempolicy *pol)
+{
+ return (pol && (pol->flags & MPOL_F_SHARED));
+}
+
+static inline void mpol_cond_put(struct mempolicy *pol)
+{
+ if (mpol_needs_cond_ref(pol))
+ __mpol_put(pol);
+}
+
+extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
+ struct mempolicy *frompol);
+static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
+ struct mempolicy *frompol)
+{
+ if (!frompol)
+ return frompol;
+ return __mpol_cond_copy(tompol, frompol);
+}
+
+extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
+static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
{
if (pol)
- pol = __mpol_copy(pol);
+ pol = __mpol_dup(pol);
return pol;
}
@@ -108,11 +162,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
return 1;
return __mpol_equal(a, b);
}
-#define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
-
-/* Could later add inheritance of the process policy here. */
-
-#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
/*
* Tree of shared policies for a shared memory region.
@@ -133,8 +182,7 @@ struct shared_policy {
spinlock_t lock;
};
-void mpol_shared_policy_init(struct shared_policy *info, int policy,
- nodemask_t *nodes);
+void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
int mpol_set_shared_policy(struct shared_policy *info,
struct vm_area_struct *vma,
struct mempolicy *new);
@@ -149,9 +197,9 @@ extern void mpol_rebind_task(struct task_struct *tsk,
extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
extern void mpol_fix_fork_child_flag(struct task_struct *p);
-extern struct mempolicy default_policy;
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
- unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol);
+ unsigned long addr, gfp_t gfp_flags,
+ struct mempolicy **mpol, nodemask_t **nodemask);
extern unsigned slab_node(struct mempolicy *policy);
extern enum zone_type policy_zone;
@@ -165,6 +213,13 @@ static inline void check_highest_zone(enum zone_type k)
int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
+
+#ifdef CONFIG_TMPFS
+extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
+
+extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
+ int no_context);
+#endif
#else
struct mempolicy {};
@@ -173,19 +228,26 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
return 1;
}
-#define vma_mpol_equal(a,b) 1
-#define mpol_set_vma_default(vma) do {} while(0)
+static inline void mpol_put(struct mempolicy *p)
+{
+}
+
+static inline void mpol_cond_put(struct mempolicy *pol)
+{
+}
-static inline void mpol_free(struct mempolicy *p)
+static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
+ struct mempolicy *from)
{
+ return from;
}
static inline void mpol_get(struct mempolicy *pol)
{
}
-static inline struct mempolicy *mpol_copy(struct mempolicy *old)
+static inline struct mempolicy *mpol_dup(struct mempolicy *old)
{
return NULL;
}
@@ -199,8 +261,8 @@ static inline int mpol_set_shared_policy(struct shared_policy *info,
return -EINVAL;
}
-static inline void mpol_shared_policy_init(struct shared_policy *info,
- int policy, nodemask_t *nodes)
+static inline void mpol_shared_policy_init(struct shared_policy *sp,
+ struct mempolicy *mpol)
{
}
@@ -239,9 +301,12 @@ static inline void mpol_fix_fork_child_flag(struct task_struct *p)
}
static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
- unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol)
+ unsigned long addr, gfp_t gfp_flags,
+ struct mempolicy **mpol, nodemask_t **nodemask)
{
- return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags);
+ *mpol = NULL;
+ *nodemask = NULL;
+ return node_zonelist(0, gfp_flags);
}
static inline int do_migrate_pages(struct mm_struct *mm,
@@ -254,6 +319,21 @@ static inline int do_migrate_pages(struct mm_struct *mm,
static inline void check_highest_zone(int k)
{
}
+
+#ifdef CONFIG_TMPFS
+static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
+ int no_context)
+{
+ return 1; /* error */
+}
+
+static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
+ int no_context)
+{
+ return 0;
+}
+#endif
+
#endif /* CONFIG_NUMA */
#endif /* __KERNEL__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 286d31521605..8b7f4a5d4f6a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -107,6 +107,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
+#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
@@ -164,8 +165,6 @@ struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
- struct page *(*nopage)(struct vm_area_struct *area,
- unsigned long address, int *type);
unsigned long (*nopfn)(struct vm_area_struct *area,
unsigned long address);
@@ -173,7 +172,25 @@ struct vm_operations_struct {
* writable, if an error is returned it will cause a SIGBUS */
int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
#ifdef CONFIG_NUMA
+ /*
+ * set_policy() op must add a reference to any non-NULL @new mempolicy
+ * to hold the policy upon return. Caller should pass NULL @new to
+ * remove a policy and fall back to surrounding context--i.e. do not
+ * install a MPOL_DEFAULT policy, nor the task or system default
+ * mempolicy.
+ */
int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
+
+ /*
+ * get_policy() op must add reference [mpol_get()] to any policy at
+ * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
+ * in mm/mempolicy.c will do this automatically.
+ * get_policy() must NOT add a ref if the policy at (vma,addr) is not
+ * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
+ * If no [shared/vma] mempolicy exists at the addr, get_policy() op
+ * must return NULL--i.e., do not "fallback" to task or system default
+ * policy.
+ */
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr);
int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
@@ -397,11 +414,11 @@ static inline void set_compound_order(struct page *page, unsigned long order)
* we have run out of space and have to fall back to an
* alternate (slower) way of determining the node.
*
- * No sparsemem: | NODE | ZONE | ... | FLAGS |
- * with space for node: | SECTION | NODE | ZONE | ... | FLAGS |
- * no space for node: | SECTION | ZONE | ... | FLAGS |
+ * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
+ * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
+ * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
*/
-#ifdef CONFIG_SPARSEMEM
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTIONS_WIDTH SECTIONS_SHIFT
#else
#define SECTIONS_WIDTH 0
@@ -409,9 +426,12 @@ static inline void set_compound_order(struct page *page, unsigned long order)
#define ZONES_WIDTH ZONES_SHIFT
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED
+#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
#define NODES_WIDTH NODES_SHIFT
#else
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#error "Vmemmap: No space for nodes field in page flags"
+#endif
#define NODES_WIDTH 0
#endif
@@ -454,8 +474,8 @@ static inline void set_compound_order(struct page *page, unsigned long order)
#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
-#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
-#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
+#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
+#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#endif
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
@@ -504,10 +524,12 @@ static inline struct zone *page_zone(struct page *page)
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
static inline unsigned long page_to_section(struct page *page)
{
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
+#endif
static inline void set_page_zone(struct page *page, enum zone_type zone)
{
@@ -602,9 +624,12 @@ static inline struct address_space *page_mapping(struct page *page)
struct address_space *mapping = page->mapping;
VM_BUG_ON(PageSlab(page));
+#ifdef CONFIG_SWAP
if (unlikely(PageSwapCache(page)))
mapping = &swapper_space;
- else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
+ else
+#endif
+ if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
mapping = NULL;
return mapping;
}
@@ -649,12 +674,6 @@ static inline int page_mapped(struct page *page)
}
/*
- * Error return values for the *_nopage functions
- */
-#define NOPAGE_SIGBUS (NULL)
-#define NOPAGE_OOM ((struct page *) (-1))
-
-/*
* Error return values for the *_nopfn functions
*/
#define NOPFN_SIGBUS ((unsigned long) -1)
@@ -720,7 +739,9 @@ struct zap_details {
unsigned long truncate_count; /* Compare vm_truncate_count */
};
-struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t);
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
+
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *);
unsigned long unmap_vmas(struct mmu_gather **tlb,
@@ -1149,6 +1170,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
+int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
struct page *follow_page(struct vm_area_struct *, unsigned long address,
unsigned int foll_flags);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index af190ceab971..29adaa781cb6 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -172,6 +172,7 @@ struct mm_struct {
atomic_t mm_users; /* How many users with user space? */
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
int map_count; /* number of VMAs */
+ int core_waiters;
struct rw_semaphore mmap_sem;
spinlock_t page_table_lock; /* Protects page tables and some counters */
@@ -216,11 +217,10 @@ struct mm_struct {
unsigned long flags; /* Must use atomic bitops to access the bits */
/* coredumping support */
- int core_waiters;
struct completion *core_startup_done, core_done;
/* aio bits */
- rwlock_t ioctx_list_lock;
+ rwlock_t ioctx_list_lock; /* aio lock */
struct kioctx *ioctx_list;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
struct mem_cgroup *mem_cgroup;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9f274a687c7e..aad98003176f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -3,6 +3,7 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
+#ifndef __GENERATING_BOUNDS_H
#include <linux/spinlock.h>
#include <linux/list.h>
@@ -15,6 +16,7 @@
#include <linux/seqlock.h>
#include <linux/nodemask.h>
#include <linux/pageblock-flags.h>
+#include <linux/bounds.h>
#include <asm/atomic.h>
#include <asm/page.h>
@@ -129,6 +131,8 @@ struct per_cpu_pageset {
#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
#endif
+#endif /* !__GENERATING_BOUNDS.H */
+
enum zone_type {
#ifdef CONFIG_ZONE_DMA
/*
@@ -177,9 +181,11 @@ enum zone_type {
ZONE_HIGHMEM,
#endif
ZONE_MOVABLE,
- MAX_NR_ZONES
+ __MAX_NR_ZONES
};
+#ifndef __GENERATING_BOUNDS_H
+
/*
* When a memory allocation must conform to specific limitations (such
* as being suitable for DMA) the caller will pass in hints to the
@@ -188,28 +194,15 @@ enum zone_type {
* match the requested limits. See gfp_zone() in include/linux/gfp.h
*/
-/*
- * Count the active zones. Note that the use of defined(X) outside
- * #if and family is not necessarily defined so ensure we cannot use
- * it later. Use __ZONE_COUNT to work out how many shift bits we need.
- */
-#define __ZONE_COUNT ( \
- defined(CONFIG_ZONE_DMA) \
- + defined(CONFIG_ZONE_DMA32) \
- + 1 \
- + defined(CONFIG_HIGHMEM) \
- + 1 \
-)
-#if __ZONE_COUNT < 2
+#if MAX_NR_ZONES < 2
#define ZONES_SHIFT 0
-#elif __ZONE_COUNT <= 2
+#elif MAX_NR_ZONES <= 2
#define ZONES_SHIFT 1
-#elif __ZONE_COUNT <= 4
+#elif MAX_NR_ZONES <= 4
#define ZONES_SHIFT 2
#else
#error ZONES_SHIFT -- too many zones configured adjust calculation
#endif
-#undef __ZONE_COUNT
struct zone {
/* Fields commonly accessed by the page allocator */
@@ -393,10 +386,10 @@ static inline int zone_is_oom_locked(const struct zone *zone)
* The NUMA zonelists are doubled becausse we need zonelists that restrict the
* allocations to a single node for GFP_THISNODE.
*
- * [0 .. MAX_NR_ZONES -1] : Zonelists with fallback
- * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1] : No fallback (GFP_THISNODE)
+ * [0] : Zonelist with fallback
+ * [1] : No fallback (GFP_THISNODE)
*/
-#define MAX_ZONELISTS (2 * MAX_NR_ZONES)
+#define MAX_ZONELISTS 2
/*
@@ -464,11 +457,20 @@ struct zonelist_cache {
unsigned long last_full_zap; /* when last zap'd (jiffies) */
};
#else
-#define MAX_ZONELISTS MAX_NR_ZONES
+#define MAX_ZONELISTS 1
struct zonelist_cache;
#endif
/*
+ * This struct contains information about a zone in a zonelist. It is stored
+ * here to avoid dereferences into large structures and lookups of tables
+ */
+struct zoneref {
+ struct zone *zone; /* Pointer to actual zone */
+ int zone_idx; /* zone_idx(zoneref->zone) */
+};
+
+/*
* One allocation request operates on a zonelist. A zonelist
* is a list of zones, the first one is the 'goal' of the
* allocation, the other zones are fallback zones, in decreasing
@@ -476,34 +478,23 @@ struct zonelist_cache;
*
* If zlcache_ptr is not NULL, then it is just the address of zlcache,
* as explained above. If zlcache_ptr is NULL, there is no zlcache.
+ * *
+ * To speed the reading of the zonelist, the zonerefs contain the zone index
+ * of the entry being read. Helper functions to access information given
+ * a struct zoneref are
+ *
+ * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
+ * zonelist_zone_idx() - Return the index of the zone for an entry
+ * zonelist_node_idx() - Return the index of the node for an entry
*/
-
struct zonelist {
struct zonelist_cache *zlcache_ptr; // NULL or &zlcache
- struct zone *zones[MAX_ZONES_PER_ZONELIST + 1]; // NULL delimited
+ struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
#ifdef CONFIG_NUMA
struct zonelist_cache zlcache; // optional ...
#endif
};
-#ifdef CONFIG_NUMA
-/*
- * Only custom zonelists like MPOL_BIND need to be filtered as part of
- * policies. As described in the comment for struct zonelist_cache, these
- * zonelists will not have a zlcache so zlcache_ptr will not be set. Use
- * that to determine if the zonelists needs to be filtered or not.
- */
-static inline int alloc_should_filter_zonelist(struct zonelist *zonelist)
-{
- return !zonelist->zlcache_ptr;
-}
-#else
-static inline int alloc_should_filter_zonelist(struct zonelist *zonelist)
-{
- return 0;
-}
-#endif /* CONFIG_NUMA */
-
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
struct node_active_region {
unsigned long start_pfn;
@@ -637,9 +628,10 @@ static inline int is_normal_idx(enum zone_type idx)
static inline int is_highmem(struct zone *zone)
{
#ifdef CONFIG_HIGHMEM
- int zone_idx = zone - zone->zone_pgdat->node_zones;
- return zone_idx == ZONE_HIGHMEM ||
- (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem());
+ int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
+ return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
+ (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
+ zone_movable_is_highmem());
#else
return 0;
#endif
@@ -730,32 +722,103 @@ extern struct zone *next_zone(struct zone *zone);
zone; \
zone = next_zone(zone))
-#ifdef CONFIG_SPARSEMEM
-#include <asm/sparsemem.h>
-#endif
+static inline struct zone *zonelist_zone(struct zoneref *zoneref)
+{
+ return zoneref->zone;
+}
-#if BITS_PER_LONG == 32
-/*
- * with 32 bit page->flags field, we reserve 9 bits for node/zone info.
- * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes.
+static inline int zonelist_zone_idx(struct zoneref *zoneref)
+{
+ return zoneref->zone_idx;
+}
+
+static inline int zonelist_node_idx(struct zoneref *zoneref)
+{
+#ifdef CONFIG_NUMA
+ /* zone_to_nid not available in this context */
+ return zoneref->zone->node;
+#else
+ return 0;
+#endif /* CONFIG_NUMA */
+}
+
+/**
+ * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
+ * @z - The cursor used as a starting point for the search
+ * @highest_zoneidx - The zone index of the highest zone to return
+ * @nodes - An optional nodemask to filter the zonelist with
+ * @zone - The first suitable zone found is returned via this parameter
+ *
+ * This function returns the next zone at or below a given zone index that is
+ * within the allowed nodemask using a cursor as the starting point for the
+ * search. The zoneref returned is a cursor that is used as the next starting
+ * point for future calls to next_zones_zonelist().
*/
-#define FLAGS_RESERVED 9
+struct zoneref *next_zones_zonelist(struct zoneref *z,
+ enum zone_type highest_zoneidx,
+ nodemask_t *nodes,
+ struct zone **zone);
-#elif BITS_PER_LONG == 64
-/*
- * with 64 bit flags field, there's plenty of room.
+/**
+ * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
+ * @zonelist - The zonelist to search for a suitable zone
+ * @highest_zoneidx - The zone index of the highest zone to return
+ * @nodes - An optional nodemask to filter the zonelist with
+ * @zone - The first suitable zone found is returned via this parameter
+ *
+ * This function returns the first zone at or below a given zone index that is
+ * within the allowed nodemask. The zoneref returned is a cursor that can be
+ * used to iterate the zonelist with next_zones_zonelist. The cursor should
+ * not be used by the caller as it does not match the value of the zone
+ * returned.
*/
-#define FLAGS_RESERVED 32
+static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
+ enum zone_type highest_zoneidx,
+ nodemask_t *nodes,
+ struct zone **zone)
+{
+ return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
+ zone);
+}
-#else
+/**
+ * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
+ * @zone - The current zone in the iterator
+ * @z - The current pointer within zonelist->zones being iterated
+ * @zlist - The zonelist being iterated
+ * @highidx - The zone index of the highest zone to return
+ * @nodemask - Nodemask allowed by the allocator
+ *
+ * This iterator iterates though all zones at or below a given zone index and
+ * within a given nodemask
+ */
+#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
+ for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
+ zone; \
+ z = next_zones_zonelist(z, highidx, nodemask, &zone)) \
-#error BITS_PER_LONG not defined
+/**
+ * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
+ * @zone - The current zone in the iterator
+ * @z - The current pointer within zonelist->zones being iterated
+ * @zlist - The zonelist being iterated
+ * @highidx - The zone index of the highest zone to return
+ *
+ * This iterator iterates though all zones at or below a given zone index.
+ */
+#define for_each_zone_zonelist(zone, z, zlist, highidx) \
+ for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
+#ifdef CONFIG_SPARSEMEM
+#include <asm/sparsemem.h>
#endif
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
!defined(CONFIG_ARCH_POPULATES_NODE_MAP)
-#define early_pfn_to_nid(nid) (0UL)
+static inline unsigned long early_pfn_to_nid(unsigned long pfn)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_FLATMEM
@@ -833,6 +896,7 @@ static inline struct mem_section *__nr_to_section(unsigned long nr)
return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
}
extern int __section_nr(struct mem_section* ms);
+extern unsigned long usemap_size(void);
/*
* We use the lower bits of the mem_map pointer to store
@@ -938,6 +1002,7 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
#define pfn_valid_within(pfn) (1)
#endif
+#endif /* !__GENERATING_BOUNDS.H */
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index f950921523f5..b03b27457413 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -58,7 +58,11 @@
#define MSDOS_DOTDOT ".. " /* "..", padded to MSDOS_NAME chars */
/* media of boot sector */
-#define FAT_VALID_MEDIA(x) ((0xF8 <= (x) && (x) <= 0xFF) || (x) == 0xF0)
+static inline int fat_valid_media(u8 media)
+{
+ return 0xf8 <= media || media == 0xf0;
+}
+
#define FAT_FIRST_ENT(s, x) ((MSDOS_SB(s)->fat_bits == 32 ? 0x0FFFFF00 : \
MSDOS_SB(s)->fat_bits == 16 ? 0xFF00 : 0xF00) | (x))
@@ -195,6 +199,7 @@ struct fat_mount_options {
char *iocharset; /* Charset used for filename input/display */
unsigned short shortname; /* flags for shortname display/create rule */
unsigned char name_check; /* r = relaxed, n = normal, s = strict */
+ unsigned short allow_utime;/* permission for setting the [am]time */
unsigned quiet:1, /* set = fake successful chmods and chowns */
showexec:1, /* set = only set x bit for com/exe/bat */
sys_immutable:1, /* set = system files are immutable */
@@ -232,6 +237,7 @@ struct msdos_sb_info {
struct mutex fat_lock;
unsigned int prev_free; /* previously allocated cluster number */
unsigned int free_clusters; /* -1 if undefined */
+ unsigned int free_clus_valid; /* is free_clusters valid? */
struct fat_mount_options options;
struct nls_table *nls_disk; /* Codepage used on disk */
struct nls_table *nls_io; /* Charset used for input and display */
@@ -401,7 +407,7 @@ extern int fat_generic_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern const struct file_operations fat_file_operations;
extern const struct inode_operations fat_file_inode_operations;
-extern int fat_notify_change(struct dentry * dentry, struct iattr * attr);
+extern int fat_setattr(struct dentry * dentry, struct iattr * attr);
extern void fat_truncate(struct inode *inode);
extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat);
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h
index 88766e43e121..9f2d76347f19 100644
--- a/include/linux/ncp_fs.h
+++ b/include/linux/ncp_fs.h
@@ -204,6 +204,7 @@ void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
/* linux/fs/ncpfs/dir.c */
extern const struct inode_operations ncp_dir_inode_operations;
extern const struct file_operations ncp_dir_operations;
+extern struct dentry_operations ncp_root_dentry_operations;
int ncp_conn_logged_in(struct super_block *);
int ncp_date_dos2unix(__le16 time, __le16 date);
void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
@@ -223,6 +224,12 @@ int ncp_disconnect(struct ncp_server *server);
void ncp_lock_server(struct ncp_server *server);
void ncp_unlock_server(struct ncp_server *server);
+/* linux/fs/ncpfs/symlink.c */
+#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
+extern const struct address_space_operations ncp_symlink_aops;
+int ncp_symlink(struct inode*, struct dentry*, const char*);
+#endif
+
/* linux/fs/ncpfs/file.c */
extern const struct inode_operations ncp_file_inode_operations;
extern const struct file_operations ncp_file_operations;
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 905e18f4b412..848025cd7087 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -14,6 +14,8 @@
* bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
* For details of node_remap(), see bitmap_bitremap in lib/bitmap.c.
* For details of nodes_remap(), see bitmap_remap in lib/bitmap.c.
+ * For details of nodes_onto(), see bitmap_onto in lib/bitmap.c.
+ * For details of nodes_fold(), see bitmap_fold in lib/bitmap.c.
*
* The available nodemask operations are:
*
@@ -55,7 +57,9 @@
* int nodelist_scnprintf(buf, len, mask) Format nodemask as list for printing
* int nodelist_parse(buf, map) Parse ascii string as nodelist
* int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
- * int nodes_remap(dst, src, old, new) *dst = map(old, new)(dst)
+ * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src)
+ * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap
+ * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz
*
* for_each_node_mask(node, mask) for-loop node over mask
*
@@ -326,6 +330,22 @@ static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
}
+#define nodes_onto(dst, orig, relmap) \
+ __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES)
+static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
+ const nodemask_t *relmapp, int nbits)
+{
+ bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
+}
+
+#define nodes_fold(dst, orig, sz) \
+ __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES)
+static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
+ int sz, int nbits)
+{
+ bitmap_fold(dstp->bits, origp->bits, sz, nbits);
+}
+
#if MAX_NUMNODES > 1
#define for_each_node_mask(node, mask) \
for ((node) = first_node(mask); \
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index f4df40038f0c..20dfed590183 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -247,6 +247,7 @@ extern struct blocking_notifier_head reboot_notifier_list;
#define VT_DEALLOCATE 0x0002 /* Console will be deallocated */
#define VT_WRITE 0x0003 /* A char got output */
#define VT_UPDATE 0x0004 /* A bigger update occurred */
+#define VT_PREWRITE 0x0005 /* A char is about to be written to the console */
#endif /* __KERNEL__ */
#endif /* _LINUX_NOTIFIER_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 3852436b652a..a7979baf1e39 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -23,8 +23,8 @@ enum oom_constraint {
CONSTRAINT_MEMORY_POLICY,
};
-extern int try_set_zone_oom(struct zonelist *zonelist);
-extern void clear_zonelist_oom(struct zonelist *zonelist);
+extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags);
+extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order);
extern int register_oom_notifier(struct notifier_block *nb);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index b5b30f1c1e59..590cff32415d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -6,7 +6,10 @@
#define PAGE_FLAGS_H
#include <linux/types.h>
+#ifndef __GENERATING_BOUNDS_H
#include <linux/mm_types.h>
+#include <linux/bounds.h>
+#endif /* !__GENERATING_BOUNDS_H */
/*
* Various page->flags bits:
@@ -59,77 +62,138 @@
* extends from the high bits downwards.
*
* | FIELD | ... | FLAGS |
- * N-1 ^ 0
- * (N-FLAGS_RESERVED)
+ * N-1 ^ 0
+ * (NR_PAGEFLAGS)
*
- * The fields area is reserved for fields mapping zone, node and SPARSEMEM
- * section. The boundry between these two areas is defined by
- * FLAGS_RESERVED which defines the width of the fields section
- * (see linux/mmzone.h). New flags must _not_ overlap with this area.
+ * The fields area is reserved for fields mapping zone, node (for NUMA) and
+ * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
+ * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
*/
-#define PG_locked 0 /* Page is locked. Don't touch. */
-#define PG_error 1
-#define PG_referenced 2
-#define PG_uptodate 3
+enum pageflags {
+ PG_locked, /* Page is locked. Don't touch. */
+ PG_error,
+ PG_referenced,
+ PG_uptodate,
+ PG_dirty,
+ PG_lru,
+ PG_active,
+ PG_slab,
+ PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
+ PG_arch_1,
+ PG_reserved,
+ PG_private, /* If pagecache, has fs-private data */
+ PG_writeback, /* Page is under writeback */
+#ifdef CONFIG_PAGEFLAGS_EXTENDED
+ PG_head, /* A head page */
+ PG_tail, /* A tail page */
+#else
+ PG_compound, /* A compound page */
+#endif
+ PG_swapcache, /* Swap page: swp_entry_t in private */
+ PG_mappedtodisk, /* Has blocks allocated on-disk */
+ PG_reclaim, /* To be reclaimed asap */
+ PG_buddy, /* Page is free, on buddy lists */
+#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+ PG_uncached, /* Page has been mapped as uncached */
+#endif
+ __NR_PAGEFLAGS
+};
+
+#ifndef __GENERATING_BOUNDS_H
+
+/*
+ * Macros to create function definitions for page flags
+ */
+#define TESTPAGEFLAG(uname, lname) \
+static inline int Page##uname(struct page *page) \
+ { return test_bit(PG_##lname, &page->flags); }
-#define PG_dirty 4
-#define PG_lru 5
-#define PG_active 6
-#define PG_slab 7 /* slab debug (Suparna wants this) */
+#define SETPAGEFLAG(uname, lname) \
+static inline void SetPage##uname(struct page *page) \
+ { set_bit(PG_##lname, &page->flags); }
-#define PG_owner_priv_1 8 /* Owner use. If pagecache, fs may use*/
-#define PG_arch_1 9
-#define PG_reserved 10
-#define PG_private 11 /* If pagecache, has fs-private data */
+#define CLEARPAGEFLAG(uname, lname) \
+static inline void ClearPage##uname(struct page *page) \
+ { clear_bit(PG_##lname, &page->flags); }
-#define PG_writeback 12 /* Page is under writeback */
-#define PG_compound 14 /* Part of a compound page */
-#define PG_swapcache 15 /* Swap page: swp_entry_t in private */
+#define __SETPAGEFLAG(uname, lname) \
+static inline void __SetPage##uname(struct page *page) \
+ { __set_bit(PG_##lname, &page->flags); }
-#define PG_mappedtodisk 16 /* Has blocks allocated on-disk */
-#define PG_reclaim 17 /* To be reclaimed asap */
-#define PG_buddy 19 /* Page is free, on buddy lists */
+#define __CLEARPAGEFLAG(uname, lname) \
+static inline void __ClearPage##uname(struct page *page) \
+ { __clear_bit(PG_##lname, &page->flags); }
+
+#define TESTSETFLAG(uname, lname) \
+static inline int TestSetPage##uname(struct page *page) \
+ { return test_and_set_bit(PG_##lname, &page->flags); }
+
+#define TESTCLEARFLAG(uname, lname) \
+static inline int TestClearPage##uname(struct page *page) \
+ { return test_and_clear_bit(PG_##lname, &page->flags); }
-/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
-#define PG_readahead PG_reclaim /* Reminder to do async read-ahead */
-/* PG_owner_priv_1 users should have descriptive aliases */
-#define PG_checked PG_owner_priv_1 /* Used by some filesystems */
-#define PG_pinned PG_owner_priv_1 /* Xen pinned pagetable */
+#define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
+ SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname)
+
+#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
+ __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname)
+
+#define PAGEFLAG_FALSE(uname) \
+static inline int Page##uname(struct page *page) \
+ { return 0; }
+
+#define TESTSCFLAG(uname, lname) \
+ TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
+
+struct page; /* forward declaration */
+
+PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked)
+PAGEFLAG(Error, error)
+PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
+PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
+PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
+PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
+__PAGEFLAG(Slab, slab)
+PAGEFLAG(Checked, owner_priv_1) /* Used by some filesystems */
+PAGEFLAG(Pinned, owner_priv_1) TESTSCFLAG(Pinned, owner_priv_1) /* Xen */
+PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
+PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
+ __SETPAGEFLAG(Private, private)
-#if (BITS_PER_LONG > 32)
/*
- * 64-bit-only flags build down from bit 31
- *
- * 32 bit -------------------------------| FIELDS | FLAGS |
- * 64 bit | FIELDS | ?????? FLAGS |
- * 63 32 0
+ * Only test-and-set exist for PG_writeback. The unconditional operators are
+ * risky: they bypass page accounting.
*/
-#define PG_uncached 31 /* Page has been mapped as uncached */
-#endif
+TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
+__PAGEFLAG(Buddy, buddy)
+PAGEFLAG(MappedToDisk, mappedtodisk)
+/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
+PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
+PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */
+
+#ifdef CONFIG_HIGHMEM
/*
- * Manipulation of page state flags
+ * Must use a macro here due to header dependency issues. page_zone() is not
+ * available at this point.
*/
-#define PageLocked(page) \
- test_bit(PG_locked, &(page)->flags)
-#define SetPageLocked(page) \
- set_bit(PG_locked, &(page)->flags)
-#define TestSetPageLocked(page) \
- test_and_set_bit(PG_locked, &(page)->flags)
-#define ClearPageLocked(page) \
- clear_bit(PG_locked, &(page)->flags)
-#define TestClearPageLocked(page) \
- test_and_clear_bit(PG_locked, &(page)->flags)
-
-#define PageError(page) test_bit(PG_error, &(page)->flags)
-#define SetPageError(page) set_bit(PG_error, &(page)->flags)
-#define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
-
-#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
-#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
-#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
-#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
+#define PageHighMem(__p) is_highmem(page_zone(__p))
+#else
+PAGEFLAG_FALSE(HighMem)
+#endif
+
+#ifdef CONFIG_SWAP
+PAGEFLAG(SwapCache, swapcache)
+#else
+PAGEFLAG_FALSE(SwapCache)
+#endif
+
+#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+PAGEFLAG(Uncached, uncached)
+#else
+PAGEFLAG_FALSE(Uncached)
+#endif
static inline int PageUptodate(struct page *page)
{
@@ -177,97 +241,59 @@ static inline void SetPageUptodate(struct page *page)
#endif
}
-#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
-
-#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
-#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
-#define TestSetPageDirty(page) test_and_set_bit(PG_dirty, &(page)->flags)
-#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
-#define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags)
-#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
-
-#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
-#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
-#define ClearPageLRU(page) clear_bit(PG_lru, &(page)->flags)
-#define __ClearPageLRU(page) __clear_bit(PG_lru, &(page)->flags)
-
-#define PageActive(page) test_bit(PG_active, &(page)->flags)
-#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
-#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
-#define __ClearPageActive(page) __clear_bit(PG_active, &(page)->flags)
-
-#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
-#define __SetPageSlab(page) __set_bit(PG_slab, &(page)->flags)
-#define __ClearPageSlab(page) __clear_bit(PG_slab, &(page)->flags)
-
-#ifdef CONFIG_HIGHMEM
-#define PageHighMem(page) is_highmem(page_zone(page))
-#else
-#define PageHighMem(page) 0 /* needed to optimize away at compile time */
-#endif
+CLEARPAGEFLAG(Uptodate, uptodate)
-#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
-#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
-#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
-
-#define PagePinned(page) test_bit(PG_pinned, &(page)->flags)
-#define SetPagePinned(page) set_bit(PG_pinned, &(page)->flags)
-#define ClearPagePinned(page) clear_bit(PG_pinned, &(page)->flags)
+extern void cancel_dirty_page(struct page *page, unsigned int account_size);
-#define PageReserved(page) test_bit(PG_reserved, &(page)->flags)
-#define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags)
-#define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags)
-#define __ClearPageReserved(page) __clear_bit(PG_reserved, &(page)->flags)
+int test_clear_page_writeback(struct page *page);
+int test_set_page_writeback(struct page *page);
-#define SetPagePrivate(page) set_bit(PG_private, &(page)->flags)
-#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags)
-#define PagePrivate(page) test_bit(PG_private, &(page)->flags)
-#define __SetPagePrivate(page) __set_bit(PG_private, &(page)->flags)
-#define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags)
+static inline void set_page_writeback(struct page *page)
+{
+ test_set_page_writeback(page);
+}
+#ifdef CONFIG_PAGEFLAGS_EXTENDED
/*
- * Only test-and-set exist for PG_writeback. The unconditional operators are
- * risky: they bypass page accounting.
+ * System with lots of page flags available. This allows separate
+ * flags for PageHead() and PageTail() checks of compound pages so that bit
+ * tests can be used in performance sensitive paths. PageCompound is
+ * generally not used in hot code paths.
*/
-#define PageWriteback(page) test_bit(PG_writeback, &(page)->flags)
-#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback, \
- &(page)->flags)
-#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback, \
- &(page)->flags)
+__PAGEFLAG(Head, head)
+__PAGEFLAG(Tail, tail)
-#define PageBuddy(page) test_bit(PG_buddy, &(page)->flags)
-#define __SetPageBuddy(page) __set_bit(PG_buddy, &(page)->flags)
-#define __ClearPageBuddy(page) __clear_bit(PG_buddy, &(page)->flags)
-
-#define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags)
-#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
-#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
-
-#define PageReadahead(page) test_bit(PG_readahead, &(page)->flags)
-#define SetPageReadahead(page) set_bit(PG_readahead, &(page)->flags)
-#define ClearPageReadahead(page) clear_bit(PG_readahead, &(page)->flags)
-
-#define PageReclaim(page) test_bit(PG_reclaim, &(page)->flags)
-#define SetPageReclaim(page) set_bit(PG_reclaim, &(page)->flags)
-#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
-#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
+static inline int PageCompound(struct page *page)
+{
+ return page->flags & ((1L << PG_head) | (1L << PG_tail));
-#define PageCompound(page) test_bit(PG_compound, &(page)->flags)
-#define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags)
-#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags)
+}
+#else
+/*
+ * Reduce page flag use as much as possible by overlapping
+ * compound page flags with the flags used for page cache pages. Possible
+ * because PageCompound is always set for compound pages and not for
+ * pages on the LRU and/or pagecache.
+ */
+TESTPAGEFLAG(Compound, compound)
+__PAGEFLAG(Head, compound)
/*
* PG_reclaim is used in combination with PG_compound to mark the
- * head and tail of a compound page
+ * head and tail of a compound page. This saves one page flag
+ * but makes it impossible to use compound pages for the page cache.
+ * The PG_reclaim bit would have to be used for reclaim or readahead
+ * if compound pages enter the page cache.
*
* PG_compound & PG_reclaim => Tail page
* PG_compound & ~PG_reclaim => Head page
*/
-
#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
-#define PageTail(page) (((page)->flags & PG_head_tail_mask) \
- == PG_head_tail_mask)
+static inline int PageTail(struct page *page)
+{
+ return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
+}
static inline void __SetPageTail(struct page *page)
{
@@ -279,33 +305,6 @@ static inline void __ClearPageTail(struct page *page)
page->flags &= ~PG_head_tail_mask;
}
-#define PageHead(page) (((page)->flags & PG_head_tail_mask) \
- == (1L << PG_compound))
-#define __SetPageHead(page) __SetPageCompound(page)
-#define __ClearPageHead(page) __ClearPageCompound(page)
-
-#ifdef CONFIG_SWAP
-#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags)
-#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)
-#define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags)
-#else
-#define PageSwapCache(page) 0
-#endif
-
-#define PageUncached(page) test_bit(PG_uncached, &(page)->flags)
-#define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags)
-#define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags)
-
-struct page; /* forward declaration */
-
-extern void cancel_dirty_page(struct page *page, unsigned int account_size);
-
-int test_clear_page_writeback(struct page *page);
-int test_set_page_writeback(struct page *page);
-
-static inline void set_page_writeback(struct page *page)
-{
- test_set_page_writeback(page);
-}
-
+#endif /* !PAGEFLAGS_EXTENDED */
+#endif /* !__GENERATING_BOUNDS_H */
#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 5c80b1939636..5ad79198d6f9 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -16,7 +16,8 @@
# define PR_UNALIGN_NOPRINT 1 /* silently fix up unaligned user accesses */
# define PR_UNALIGN_SIGBUS 2 /* generate SIGBUS on unaligned user access */
-/* Get/set whether or not to drop capabilities on setuid() away from uid 0 */
+/* Get/set whether or not to drop capabilities on setuid() away from
+ * uid 0 (as per security/commoncap.c) */
#define PR_GET_KEEPCAPS 7
#define PR_SET_KEEPCAPS 8
@@ -63,7 +64,7 @@
#define PR_GET_SECCOMP 21
#define PR_SET_SECCOMP 22
-/* Get/set the capability bounding set */
+/* Get/set the capability bounding set (as per security/commoncap.c) */
#define PR_CAPBSET_READ 23
#define PR_CAPBSET_DROP 24
@@ -73,4 +74,8 @@
# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
+/* Get/set securebits (as per security/commoncap.c) */
+#define PR_GET_SECUREBITS 27
+#define PR_SET_SECUREBITS 28
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/quota.h b/include/linux/quota.h
index eb560d031acd..52e49dce6584 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -202,10 +202,14 @@ struct quota_format_type;
struct mem_dqinfo {
struct quota_format_type *dqi_format;
+ int dqi_fmt_id; /* Id of the dqi_format - used when turning
+ * quotas on after remount RW */
struct list_head dqi_dirty_list; /* List of dirty dquots */
unsigned long dqi_flags;
unsigned int dqi_bgrace;
unsigned int dqi_igrace;
+ qsize_t dqi_maxblimit;
+ qsize_t dqi_maxilimit;
union {
struct v1_mem_dqinfo v1_i;
struct v2_mem_dqinfo v2_i;
@@ -296,8 +300,8 @@ struct dquot_operations {
/* Operations handling requests from userspace */
struct quotactl_ops {
- int (*quota_on)(struct super_block *, int, int, char *);
- int (*quota_off)(struct super_block *, int);
+ int (*quota_on)(struct super_block *, int, int, char *, int);
+ int (*quota_off)(struct super_block *, int, int);
int (*quota_sync)(struct super_block *, int);
int (*get_info)(struct super_block *, int, struct if_dqinfo *);
int (*set_info)(struct super_block *, int, struct if_dqinfo *);
@@ -318,6 +322,10 @@ struct quota_format_type {
#define DQUOT_USR_ENABLED 0x01 /* User diskquotas enabled */
#define DQUOT_GRP_ENABLED 0x02 /* Group diskquotas enabled */
+#define DQUOT_USR_SUSPENDED 0x04 /* User diskquotas are off, but
+ * we have necessary info in
+ * memory to turn them on */
+#define DQUOT_GRP_SUSPENDED 0x08 /* The same for group quotas */
struct quota_info {
unsigned int flags; /* Flags for diskquotas on this device */
@@ -329,17 +337,16 @@ struct quota_info {
struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
};
-/* Inline would be better but we need to dereference super_block which is not defined yet */
-int mark_dquot_dirty(struct dquot *dquot);
-
-#define dquot_dirty(dquot) test_bit(DQ_MOD_B, &(dquot)->dq_flags)
-
#define sb_has_quota_enabled(sb, type) ((type)==USRQUOTA ? \
(sb_dqopt(sb)->flags & DQUOT_USR_ENABLED) : (sb_dqopt(sb)->flags & DQUOT_GRP_ENABLED))
#define sb_any_quota_enabled(sb) (sb_has_quota_enabled(sb, USRQUOTA) | \
sb_has_quota_enabled(sb, GRPQUOTA))
+#define sb_has_quota_suspended(sb, type) \
+ ((type) == USRQUOTA ? (sb_dqopt(sb)->flags & DQUOT_USR_SUSPENDED) : \
+ (sb_dqopt(sb)->flags & DQUOT_GRP_SUSPENDED))
+
int register_quota_format(struct quota_format_type *fmt);
void unregister_quota_format(struct quota_format_type *fmt);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 5110201a4159..f86702053853 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -37,11 +37,11 @@ extern int dquot_release(struct dquot *dquot);
extern int dquot_commit_info(struct super_block *sb, int type);
extern int dquot_mark_dquot_dirty(struct dquot *dquot);
-extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path);
+extern int vfs_quota_on(struct super_block *sb, int type, int format_id,
+ char *path, int remount);
extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
int format_id, int type);
-extern int vfs_quota_off(struct super_block *sb, int type);
-#define vfs_quota_off_mount(sb, type) vfs_quota_off(sb, type)
+extern int vfs_quota_off(struct super_block *sb, int type, int remount);
extern int vfs_quota_sync(struct super_block *sb, int type);
extern int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
extern int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
@@ -59,7 +59,7 @@ extern struct quotactl_ops vfs_quotactl_ops;
/* It is better to call this function outside of any transaction as it might
* need a lot of space in journal for dquot structure allocation. */
-static __inline__ void DQUOT_INIT(struct inode *inode)
+static inline void DQUOT_INIT(struct inode *inode)
{
BUG_ON(!inode->i_sb);
if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode))
@@ -67,7 +67,7 @@ static __inline__ void DQUOT_INIT(struct inode *inode)
}
/* The same as with DQUOT_INIT */
-static __inline__ void DQUOT_DROP(struct inode *inode)
+static inline void DQUOT_DROP(struct inode *inode)
{
/* Here we can get arbitrary inode from clear_inode() so we have
* to be careful. OTOH we don't need locking as quota operations
@@ -90,7 +90,7 @@ static __inline__ void DQUOT_DROP(struct inode *inode)
/* The following allocation/freeing/transfer functions *must* be called inside
* a transaction (deadlocks possible otherwise) */
-static __inline__ int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
+static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_enabled(inode->i_sb)) {
/* Used space is updated in alloc_space() */
@@ -102,7 +102,7 @@ static __inline__ int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t
return 0;
}
-static __inline__ int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr)
+static inline int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr)
{
int ret;
if (!(ret = DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr)))
@@ -110,7 +110,7 @@ static __inline__ int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr)
return ret;
}
-static __inline__ int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
+static inline int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_enabled(inode->i_sb)) {
/* Used space is updated in alloc_space() */
@@ -122,7 +122,7 @@ static __inline__ int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
return 0;
}
-static __inline__ int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr)
+static inline int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr)
{
int ret;
if (!(ret = DQUOT_ALLOC_SPACE_NODIRTY(inode, nr)))
@@ -130,7 +130,7 @@ static __inline__ int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr)
return ret;
}
-static __inline__ int DQUOT_ALLOC_INODE(struct inode *inode)
+static inline int DQUOT_ALLOC_INODE(struct inode *inode)
{
if (sb_any_quota_enabled(inode->i_sb)) {
DQUOT_INIT(inode);
@@ -140,7 +140,7 @@ static __inline__ int DQUOT_ALLOC_INODE(struct inode *inode)
return 0;
}
-static __inline__ void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
+static inline void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_enabled(inode->i_sb))
inode->i_sb->dq_op->free_space(inode, nr);
@@ -148,19 +148,19 @@ static __inline__ void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
inode_sub_bytes(inode, nr);
}
-static __inline__ void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr)
+static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr)
{
DQUOT_FREE_SPACE_NODIRTY(inode, nr);
mark_inode_dirty(inode);
}
-static __inline__ void DQUOT_FREE_INODE(struct inode *inode)
+static inline void DQUOT_FREE_INODE(struct inode *inode)
{
if (sb_any_quota_enabled(inode->i_sb))
inode->i_sb->dq_op->free_inode(inode, 1);
}
-static __inline__ int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
+static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
{
if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) {
DQUOT_INIT(inode);
@@ -171,14 +171,32 @@ static __inline__ int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
}
/* The following two functions cannot be called inside a transaction */
-#define DQUOT_SYNC(sb) sync_dquots(sb, -1)
+static inline void DQUOT_SYNC(struct super_block *sb)
+{
+ sync_dquots(sb, -1);
+}
-static __inline__ int DQUOT_OFF(struct super_block *sb)
+static inline int DQUOT_OFF(struct super_block *sb, int remount)
{
int ret = -ENOSYS;
- if (sb_any_quota_enabled(sb) && sb->s_qcop && sb->s_qcop->quota_off)
- ret = sb->s_qcop->quota_off(sb, -1);
+ if (sb->s_qcop && sb->s_qcop->quota_off)
+ ret = sb->s_qcop->quota_off(sb, -1, remount);
+ return ret;
+}
+
+static inline int DQUOT_ON_REMOUNT(struct super_block *sb)
+{
+ int cnt;
+ int ret = 0, err;
+
+ if (!sb->s_qcop || !sb->s_qcop->quota_on)
+ return -ENOSYS;
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1);
+ if (err < 0 && !ret)
+ ret = err;
+ }
return ret;
}
@@ -189,13 +207,43 @@ static __inline__ int DQUOT_OFF(struct super_block *sb)
*/
#define sb_dquot_ops (NULL)
#define sb_quotactl_ops (NULL)
-#define DQUOT_INIT(inode) do { } while(0)
-#define DQUOT_DROP(inode) do { } while(0)
-#define DQUOT_ALLOC_INODE(inode) (0)
-#define DQUOT_FREE_INODE(inode) do { } while(0)
-#define DQUOT_SYNC(sb) do { } while(0)
-#define DQUOT_OFF(sb) do { } while(0)
-#define DQUOT_TRANSFER(inode, iattr) (0)
+
+static inline void DQUOT_INIT(struct inode *inode)
+{
+}
+
+static inline void DQUOT_DROP(struct inode *inode)
+{
+}
+
+static inline int DQUOT_ALLOC_INODE(struct inode *inode)
+{
+ return 0;
+}
+
+static inline void DQUOT_FREE_INODE(struct inode *inode)
+{
+}
+
+static inline void DQUOT_SYNC(struct super_block *sb)
+{
+}
+
+static inline int DQUOT_OFF(struct super_block *sb, int remount)
+{
+ return 0;
+}
+
+static inline int DQUOT_ON_REMOUNT(struct super_block *sb)
+{
+ return 0;
+}
+
+static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
+{
+ return 0;
+}
+
static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
{
inode_add_bytes(inode, nr);
@@ -235,11 +283,38 @@ static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr)
#endif /* CONFIG_QUOTA */
-#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) DQUOT_PREALLOC_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_PREALLOC_BLOCK(inode, nr) DQUOT_PREALLOC_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) DQUOT_ALLOC_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_ALLOC_BLOCK(inode, nr) DQUOT_ALLOC_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) DQUOT_FREE_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_FREE_BLOCK(inode, nr) DQUOT_FREE_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
+static inline int DQUOT_PREALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr)
+{
+ return DQUOT_PREALLOC_SPACE_NODIRTY(inode,
+ nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline int DQUOT_PREALLOC_BLOCK(struct inode *inode, qsize_t nr)
+{
+ return DQUOT_PREALLOC_SPACE(inode,
+ nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline int DQUOT_ALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr)
+{
+ return DQUOT_ALLOC_SPACE_NODIRTY(inode,
+ nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline int DQUOT_ALLOC_BLOCK(struct inode *inode, qsize_t nr)
+{
+ return DQUOT_ALLOC_SPACE(inode,
+ nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline void DQUOT_FREE_BLOCK_NODIRTY(struct inode *inode, qsize_t nr)
+{
+ DQUOT_FREE_SPACE_NODIRTY(inode, nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline void DQUOT_FREE_BLOCK(struct inode *inode, qsize_t nr)
+{
+ DQUOT_FREE_SPACE(inode, nr << inode->i_sb->s_blocksize_bits);
+}
#endif /* _LINUX_QUOTAOPS_ */
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index 93678f57ccbe..f0827d31ae6f 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -252,6 +252,8 @@ struct r6_state {
#define STRIPE_EXPANDING 9
#define STRIPE_EXPAND_SOURCE 10
#define STRIPE_EXPAND_READY 11
+#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */
+#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
/*
* Operations flags (in issue order)
*/
@@ -316,12 +318,17 @@ struct raid5_private_data {
int previous_raid_disks;
struct list_head handle_list; /* stripes needing handling */
+ struct list_head hold_list; /* preread ready stripes */
struct list_head delayed_list; /* stripes that have plugged requests */
struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
struct bio *retry_read_aligned; /* currently retrying aligned bios */
struct bio *retry_read_aligned_list; /* aligned bios retry list */
atomic_t preread_active_stripes; /* stripes with scheduled io */
atomic_t active_aligned_reads;
+ atomic_t pending_full_writes; /* full write backlog */
+ int bypass_count; /* bypassed prereads */
+ int bypass_threshold; /* preread nice */
+ struct list_head *last_hold; /* detect hold_list promotions */
atomic_t reshape_stripes; /* stripes with pending writes for reshape */
/* unfortunately we need two cache names as we temporarily have
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 8e7eff2cd0ab..4aacaeecb56f 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -2176,6 +2176,7 @@ int reiserfs_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
long reiserfs_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
+int reiserfs_unpack(struct inode *inode, struct file *filp);
/* ioctl's command */
#define REISERFS_IOC_UNPACK _IOW(0xCD,1,long)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9a4f3e63e3bf..024d72b47a0c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -68,7 +68,6 @@ struct sched_param {
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/signal.h>
-#include <linux/securebits.h>
#include <linux/fs_struct.h>
#include <linux/compiler.h>
#include <linux/completion.h>
@@ -1133,7 +1132,7 @@ struct task_struct {
gid_t gid,egid,sgid,fsgid;
struct group_info *group_info;
kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
- unsigned keep_capabilities:1;
+ unsigned securebits;
struct user_struct *user;
#ifdef CONFIG_KEYS
struct key *request_key_auth; /* assumed request_key authority */
diff --git a/include/linux/securebits.h b/include/linux/securebits.h
index 5b0617840fa4..c1f19dbceb05 100644
--- a/include/linux/securebits.h
+++ b/include/linux/securebits.h
@@ -3,28 +3,39 @@
#define SECUREBITS_DEFAULT 0x00000000
-extern unsigned securebits;
-
/* When set UID 0 has no special privileges. When unset, we support
inheritance of root-permissions and suid-root executable under
compatibility mode. We raise the effective and inheritable bitmasks
*of the executable file* if the effective uid of the new process is
0. If the real uid is 0, we raise the inheritable bitmask of the
executable file. */
-#define SECURE_NOROOT 0
+#define SECURE_NOROOT 0
+#define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */
/* When set, setuid to/from uid 0 does not trigger capability-"fixes"
to be compatible with old programs relying on set*uid to loose
privileges. When unset, setuid doesn't change privileges. */
-#define SECURE_NO_SETUID_FIXUP 2
+#define SECURE_NO_SETUID_FIXUP 2
+#define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */
+
+/* When set, a process can retain its capabilities even after
+ transitioning to a non-root user (the set-uid fixup suppressed by
+ bit 2). Bit-4 is cleared when a process calls exec(); setting both
+ bit 4 and 5 will create a barrier through exec that no exec()'d
+ child can use this feature again. */
+#define SECURE_KEEP_CAPS 4
+#define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */
/* Each securesetting is implemented using two bits. One bit specify
whether the setting is on or off. The other bit specify whether the
setting is fixed or not. A setting which is fixed cannot be changed
from user-level. */
+#define issecure_mask(X) (1 << (X))
+#define issecure(X) (issecure_mask(X) & current->securebits)
-#define issecure(X) ( (1 << (X+1)) & SECUREBITS_DEFAULT ? \
- (1 << (X)) & SECUREBITS_DEFAULT : \
- (1 << (X)) & securebits )
+#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \
+ issecure_mask(SECURE_NO_SETUID_FIXUP) | \
+ issecure_mask(SECURE_KEEP_CAPS))
+#define SECURE_ALL_LOCKS (SECURE_ALL_BITS << 1)
#endif /* !_LINUX_SECUREBITS_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index a90c06376eec..d0a28fd1747a 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -34,8 +34,6 @@
#include <linux/xfrm.h>
#include <net/flow.h>
-extern unsigned securebits;
-
/* Maximum number of letters for an LSM name string */
#define SECURITY_NAME_MAX 10
@@ -61,6 +59,8 @@ extern int cap_inode_need_killpriv(struct dentry *dentry);
extern int cap_inode_killpriv(struct dentry *dentry);
extern int cap_task_post_setuid(uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags);
extern void cap_task_reparent_to_init(struct task_struct *p);
+extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5, long *rc_p);
extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
extern int cap_task_setioprio(struct task_struct *p, int ioprio);
extern int cap_task_setnice(struct task_struct *p, int nice);
@@ -720,7 +720,9 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @arg3 contains a argument.
* @arg4 contains a argument.
* @arg5 contains a argument.
- * Return 0 if permission is granted.
+ * @rc_p contains a pointer to communicate back the forced return code
+ * Return 0 if permission is granted, and non-zero if the security module
+ * has taken responsibility (setting *rc_p) for the prctl call.
* @task_reparent_to_init:
* Set the security attributes in @p->security for a kernel thread that
* is being reparented to the init task.
@@ -1420,7 +1422,7 @@ struct security_operations {
int (*task_wait) (struct task_struct *p);
int (*task_prctl) (int option, unsigned long arg2,
unsigned long arg3, unsigned long arg4,
- unsigned long arg5);
+ unsigned long arg5, long *rc_p);
void (*task_reparent_to_init) (struct task_struct *p);
void (*task_to_inode) (struct task_struct *p, struct inode *inode);
@@ -1684,7 +1686,7 @@ int security_task_kill(struct task_struct *p, struct siginfo *info,
int sig, u32 secid);
int security_task_wait(struct task_struct *p);
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5);
+ unsigned long arg4, unsigned long arg5, long *rc_p);
void security_task_reparent_to_init(struct task_struct *p);
void security_task_to_inode(struct task_struct *p, struct inode *inode);
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
@@ -2271,9 +2273,9 @@ static inline int security_task_wait(struct task_struct *p)
static inline int security_task_prctl(int option, unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
- unsigned long arg5)
+ unsigned long arg5, long *rc_p)
{
- return 0;
+ return cap_task_prctl(option, arg2, arg3, arg3, arg5, rc_p);
}
static inline void security_task_reparent_to_init(struct task_struct *p)
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 00b65c0a82ca..3d37c94abbc8 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -46,6 +46,7 @@ enum {
PLAT8250_DEV_HUB6,
PLAT8250_DEV_MCA,
PLAT8250_DEV_AU1X00,
+ PLAT8250_DEV_SM501,
};
/*
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 8d5fb36ea047..f2d12d5a21b8 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -34,8 +34,7 @@ struct shmem_sb_info {
uid_t uid; /* Mount uid for root directory */
gid_t gid; /* Mount gid for root directory */
mode_t mode; /* Mount mode for root directory */
- int policy; /* Default NUMA memory alloc policy */
- nodemask_t policy_nodes; /* nodemask for preferred and bind */
+ struct mempolicy *mpol; /* default memory policy for mappings */
};
static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 1d7d4c5797ee..a6977423baf7 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -12,11 +12,22 @@
#include <asm/errno.h>
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
+extern void pm_set_vt_switch(int);
extern int pm_prepare_console(void);
extern void pm_restore_console(void);
#else
-static inline int pm_prepare_console(void) { return 0; }
-static inline void pm_restore_console(void) {}
+static inline void pm_set_vt_switch(int do_switch)
+{
+}
+
+static inline int pm_prepare_console(void)
+{
+ return 0;
+}
+
+static inline void pm_restore_console(void)
+{
+}
#endif
typedef int __bitwise suspend_state_t;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 878459ae0454..0b3377650c85 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -177,11 +177,11 @@ extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
extern int lru_add_drain_all(void);
-extern int rotate_reclaimable_page(struct page *page);
+extern void rotate_reclaimable_page(struct page *page);
extern void swap_setup(void);
/* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask);
diff --git a/include/linux/synclink.h b/include/linux/synclink.h
index 5562fbf72095..45f6bc82d317 100644
--- a/include/linux/synclink.h
+++ b/include/linux/synclink.h
@@ -13,10 +13,6 @@
#define _SYNCLINK_H_
#define SYNCLINK_H_VERSION 3.6
-#define BOOLEAN int
-#define TRUE 1
-#define FALSE 0
-
#define BIT0 0x0001
#define BIT1 0x0002
#define BIT2 0x0004
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index ce8e7da05807..364789aae9f3 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -31,6 +31,7 @@ struct vm_struct {
struct page **pages;
unsigned int nr_pages;
unsigned long phys_addr;
+ void *caller;
};
/*
@@ -66,6 +67,8 @@ static inline size_t get_vm_area_size(const struct vm_struct *area)
}
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
+extern struct vm_struct *get_vm_area_caller(unsigned long size,
+ unsigned long flags, void *caller);
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end);
extern struct vm_struct *get_vm_area_node(unsigned long size,
@@ -87,4 +90,6 @@ extern void free_vm_area(struct vm_struct *area);
extern rwlock_t vmlist_lock;
extern struct vm_struct *vmlist;
+extern const struct seq_operations vmalloc_op;
+
#endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 9f1b4b46151e..e83b69346d23 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -25,6 +25,7 @@
#define HIGHMEM_ZONE(xx)
#endif
+
#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
@@ -37,6 +38,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGSCAN_DIRECT),
PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+#ifdef CONFIG_HUGETLB_PAGE
+ HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
+#endif
NR_VM_EVENT_ITEMS
};
@@ -174,7 +178,7 @@ static inline unsigned long node_page_state(int node,
zone_page_state(&zones[ZONE_MOVABLE], item);
}
-extern void zone_statistics(struct zonelist *, struct zone *);
+extern void zone_statistics(struct zone *, struct zone *);
#else