aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/processor.h1
-rw-r--r--include/linux/altera_uart.h4
-rw-r--r--include/linux/clocksource.h7
-rw-r--r--include/linux/connector.h1
-rw-r--r--include/linux/cpu.h7
-rw-r--r--include/linux/device.h10
-rw-r--r--include/linux/dynamic_debug.h19
-rw-r--r--include/linux/errno.h1
-rw-r--r--include/linux/ftrace.h77
-rw-r--r--include/linux/ftrace_event.h9
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/hyperv.h173
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/interrupt.h7
-rw-r--r--include/linux/iocontext.h10
-rw-r--r--include/linux/jump_label.h162
-rw-r--r--include/linux/kbd_kern.h7
-rw-r--r--include/linux/kernel.h13
-rw-r--r--include/linux/keyboard.h2
-rw-r--r--include/linux/math64.h4
-rw-r--r--include/linux/mod_devicetable.h21
-rw-r--r--include/linux/netdevice.h12
-rw-r--r--include/linux/netfilter.h6
-rw-r--r--include/linux/pci.h13
-rw-r--r--include/linux/perf_event.h108
-rw-r--r--include/linux/platform_data/efm32-uart.h18
-rw-r--r--include/linux/preempt.h5
-rw-r--r--include/linux/printk.h18
-rw-r--r--include/linux/rcupdate.h83
-rw-r--r--include/linux/rcutiny.h10
-rw-r--r--include/linux/rcutree.h19
-rw-r--r--include/linux/sched.h54
-rw-r--r--include/linux/serial.h4
-rw-r--r--include/linux/serialP.h142
-rw-r--r--include/linux/serial_core.h12
-rw-r--r--include/linux/srcu.h15
-rw-r--r--include/linux/static_key.h1
-rw-r--r--include/linux/sunserialcore.h33
-rw-r--r--include/linux/sys_soc.h37
-rw-r--r--include/linux/timex.h17
-rw-r--r--include/linux/tracepoint.h28
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/linux/tty_driver.h8
-rw-r--r--include/linux/vt_kern.h26
-rw-r--r--include/linux/wait.h5
-rw-r--r--include/linux/workqueue.h4
-rw-r--r--include/net/sock.h6
-rw-r--r--include/trace/events/power.h2
-rw-r--r--include/trace/events/printk.h41
-rw-r--r--include/trace/events/rcu.h63
-rw-r--r--include/trace/events/sched.h27
-rw-r--r--include/trace/events/signal.h85
52 files changed, 1047 insertions, 399 deletions
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 8cf7e98a2c7b..9d650476d5dc 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -225,6 +225,7 @@ struct acpi_processor_errata {
} piix4;
};
+extern void acpi_processor_load_module(struct acpi_processor *pr);
extern int acpi_processor_preregister_performance(struct
acpi_processor_performance
__percpu *performance);
diff --git a/include/linux/altera_uart.h b/include/linux/altera_uart.h
index a10a90791976..c022c82db7ca 100644
--- a/include/linux/altera_uart.h
+++ b/include/linux/altera_uart.h
@@ -5,8 +5,6 @@
#ifndef __ALTUART_H
#define __ALTUART_H
-#include <linux/init.h>
-
struct altera_uart_platform_uart {
unsigned long mapbase; /* Physical address base */
unsigned int irq; /* Interrupt vector */
@@ -14,6 +12,4 @@ struct altera_uart_platform_uart {
unsigned int bus_shift; /* Bus shift (address stride) */
};
-int __init early_altera_uart_setup(struct altera_uart_platform_uart *platp);
-
#endif /* __ALTUART_H */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 081147da0564..fbe89e17124e 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -319,13 +319,6 @@ static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz)
__clocksource_updatefreq_scale(cs, 1000, khz);
}
-static inline void
-clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec)
-{
- return clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
- NSEC_PER_SEC, minsec);
-}
-
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void
update_vsyscall(struct timespec *ts, struct timespec *wtm,
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 3c9c54fd5690..76384074262d 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -43,6 +43,7 @@
#define CN_IDX_DRBD 0x8
#define CN_VAL_DRBD 0x1
#define CN_KVP_IDX 0x9 /* HyperV KVP */
+#define CN_KVP_VAL 0x1 /* queries from the kernel */
#define CN_NETLINK_USERS 10 /* Highest index + 1 */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 1f6587590a1a..6e53b4823d7f 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -44,6 +44,13 @@ extern ssize_t arch_cpu_release(const char *, size_t);
#endif
struct notifier_block;
+#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+extern int arch_cpu_uevent(struct device *dev, struct kobj_uevent_env *env);
+extern ssize_t arch_print_cpu_modalias(struct device *dev,
+ struct device_attribute *attr,
+ char *bufptr);
+#endif
+
/*
* CPU notifier priorities.
*/
diff --git a/include/linux/device.h b/include/linux/device.h
index bccccef520dc..7c46bc32fcbf 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -238,8 +238,6 @@ struct device_driver {
extern int __must_check driver_register(struct device_driver *drv);
extern void driver_unregister(struct device_driver *drv);
-extern struct device_driver *get_driver(struct device_driver *drv);
-extern void put_driver(struct device_driver *drv);
extern struct device_driver *driver_find(const char *name,
struct bus_type *bus);
extern int driver_probe_done(void);
@@ -946,14 +944,14 @@ int _dev_info(const struct device *dev, const char *fmt, ...)
#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
-#if defined(DEBUG)
-#define dev_dbg(dev, format, arg...) \
- dev_printk(KERN_DEBUG, dev, format, ##arg)
-#elif defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
#define dev_dbg(dev, format, ...) \
do { \
dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
} while (0)
+#elif defined(DEBUG)
+#define dev_dbg(dev, format, arg...) \
+ dev_printk(KERN_DEBUG, dev, format, ##arg)
#else
#define dev_dbg(dev, format, arg...) \
({ \
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 0564e3c39882..7e3c53a900d8 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -15,20 +15,24 @@ struct _ddebug {
const char *function;
const char *filename;
const char *format;
- unsigned int lineno:24;
+ unsigned int lineno:18;
/*
* The flags field controls the behaviour at the callsite.
* The bits here are changed dynamically when the user
* writes commands to <debugfs>/dynamic_debug/control
*/
-#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */
+#define _DPRINTK_FLAGS_NONE 0
+#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */
#define _DPRINTK_FLAGS_INCL_MODNAME (1<<1)
#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2)
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
#define _DPRINTK_FLAGS_INCL_TID (1<<4)
+#if defined DEBUG
+#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
+#else
#define _DPRINTK_FLAGS_DEFAULT 0
+#endif
unsigned int flags:8;
- char enabled;
} __attribute__((aligned(8)));
@@ -62,21 +66,20 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
.format = (fmt), \
.lineno = __LINE__, \
.flags = _DPRINTK_FLAGS_DEFAULT, \
- .enabled = false, \
}
#define dynamic_pr_debug(fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.enabled)) \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
##__VA_ARGS__); \
} while (0)
#define dynamic_dev_dbg(dev, fmt, ...) \
do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.enabled)) \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
__dynamic_dev_dbg(&descriptor, dev, fmt, \
##__VA_ARGS__); \
} while (0)
@@ -84,7 +87,7 @@ do { \
#define dynamic_netdev_dbg(dev, fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.enabled)) \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
__dynamic_netdev_dbg(&descriptor, dev, fmt, \
##__VA_ARGS__); \
} while (0)
diff --git a/include/linux/errno.h b/include/linux/errno.h
index 46685832ed99..2d09bfa5c262 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -16,6 +16,7 @@
#define ERESTARTNOHAND 514 /* restart if no handler.. */
#define ENOIOCTLCMD 515 /* No ioctl command */
#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */
+#define EPROBE_DEFER 517 /* Driver requests probe retry */
/* Defined for the NFSv3 protocol */
#define EBADHANDLE 521 /* Illegal NFS file handle */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 028e26f0bf08..72a6cabb4d5b 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -31,16 +31,33 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
+/*
+ * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
+ * set in the flags member.
+ *
+ * ENABLED - set/unset when ftrace_ops is registered/unregistered
+ * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops
+ * is part of the global tracers sharing the same filter
+ * via set_ftrace_* debugfs files.
+ * DYNAMIC - set when ftrace_ops is registered to denote dynamically
+ * allocated ftrace_ops which need special care
+ * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
+ * could be controled by following calls:
+ * ftrace_function_local_enable
+ * ftrace_function_local_disable
+ */
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
FTRACE_OPS_FL_GLOBAL = 1 << 1,
FTRACE_OPS_FL_DYNAMIC = 1 << 2,
+ FTRACE_OPS_FL_CONTROL = 1 << 3,
};
struct ftrace_ops {
ftrace_func_t func;
struct ftrace_ops *next;
unsigned long flags;
+ int __percpu *disabled;
#ifdef CONFIG_DYNAMIC_FTRACE
struct ftrace_hash *notrace_hash;
struct ftrace_hash *filter_hash;
@@ -97,6 +114,55 @@ int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
void clear_ftrace_function(void);
+/**
+ * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
+ *
+ * This function enables tracing on current cpu by decreasing
+ * the per cpu control variable.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
+{
+ if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
+ return;
+
+ (*this_cpu_ptr(ops->disabled))--;
+}
+
+/**
+ * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
+ *
+ * This function enables tracing on current cpu by decreasing
+ * the per cpu control variable.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
+{
+ if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
+ return;
+
+ (*this_cpu_ptr(ops->disabled))++;
+}
+
+/**
+ * ftrace_function_local_disabled - returns ftrace_ops disabled value
+ * on current cpu
+ *
+ * This function returns value of ftrace_ops::disabled on current cpu.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
+{
+ WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
+ return *this_cpu_ptr(ops->disabled);
+}
+
extern void ftrace_stub(unsigned long a0, unsigned long a1);
#else /* !CONFIG_FUNCTION_TRACER */
@@ -178,12 +244,13 @@ struct dyn_ftrace {
};
int ftrace_force_update(void);
-void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
-void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
+void ftrace_free_filter(struct ftrace_ops *ops);
int register_ftrace_command(struct ftrace_func_command *cmd);
int unregister_ftrace_command(struct ftrace_func_command *cmd);
@@ -314,9 +381,6 @@ extern void ftrace_enable_daemon(void);
#else
static inline int skip_trace(unsigned long ip) { return 0; }
static inline int ftrace_force_update(void) { return 0; }
-static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
-{
-}
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_release_mod(struct module *mod) {}
@@ -340,6 +404,9 @@ static inline int ftrace_text_reserved(void *start, void *end)
*/
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
+#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
+#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
+#define ftrace_free_filter(ops) do { } while (0)
static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos) { return -ENODEV; }
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index c3da42dd22ba..dd478fc8f9f5 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -146,6 +146,10 @@ enum trace_reg {
TRACE_REG_UNREGISTER,
TRACE_REG_PERF_REGISTER,
TRACE_REG_PERF_UNREGISTER,
+ TRACE_REG_PERF_OPEN,
+ TRACE_REG_PERF_CLOSE,
+ TRACE_REG_PERF_ADD,
+ TRACE_REG_PERF_DEL,
};
struct ftrace_event_call;
@@ -157,7 +161,7 @@ struct ftrace_event_class {
void *perf_probe;
#endif
int (*reg)(struct ftrace_event_call *event,
- enum trace_reg type);
+ enum trace_reg type, void *data);
int (*define_fields)(struct ftrace_event_call *);
struct list_head *(*get_fields)(struct ftrace_event_call *);
struct list_head fields;
@@ -165,7 +169,7 @@ struct ftrace_event_class {
};
extern int ftrace_event_reg(struct ftrace_event_call *event,
- enum trace_reg type);
+ enum trace_reg type, void *data);
enum {
TRACE_EVENT_FL_ENABLED_BIT,
@@ -241,6 +245,7 @@ enum {
FILTER_STATIC_STRING,
FILTER_DYN_STRING,
FILTER_PTR_STRING,
+ FILTER_TRACE_FN,
};
#define EVENT_STORAGE_SIZE 128
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index fe23ee768589..e61d3192448e 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -596,6 +596,7 @@ extern char *disk_name (struct gendisk *hd, int partno, char *buf);
extern int disk_expand_part_tbl(struct gendisk *disk, int target);
extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
+extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
int partno, sector_t start,
sector_t len, int flags,
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 0ae065a5fcb2..5852545e6bba 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -25,6 +25,173 @@
#ifndef _HYPERV_H
#define _HYPERV_H
+#include <linux/types.h>
+
+/*
+ * An implementation of HyperV key value pair (KVP) functionality for Linux.
+ *
+ *
+ * Copyright (C) 2010, Novell, Inc.
+ * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
+ *
+ */
+
+/*
+ * Maximum value size - used for both key names and value data, and includes
+ * any applicable NULL terminators.
+ *
+ * Note: This limit is somewhat arbitrary, but falls easily within what is
+ * supported for all native guests (back to Win 2000) and what is reasonable
+ * for the IC KVP exchange functionality. Note that Windows Me/98/95 are
+ * limited to 255 character key names.
+ *
+ * MSDN recommends not storing data values larger than 2048 bytes in the
+ * registry.
+ *
+ * Note: This value is used in defining the KVP exchange message - this value
+ * cannot be modified without affecting the message size and compatibility.
+ */
+
+/*
+ * bytes, including any null terminators
+ */
+#define HV_KVP_EXCHANGE_MAX_VALUE_SIZE (2048)
+
+
+/*
+ * Maximum key size - the registry limit for the length of an entry name
+ * is 256 characters, including the null terminator
+ */
+
+#define HV_KVP_EXCHANGE_MAX_KEY_SIZE (512)
+
+/*
+ * In Linux, we implement the KVP functionality in two components:
+ * 1) The kernel component which is packaged as part of the hv_utils driver
+ * is responsible for communicating with the host and responsible for
+ * implementing the host/guest protocol. 2) A user level daemon that is
+ * responsible for data gathering.
+ *
+ * Host/Guest Protocol: The host iterates over an index and expects the guest
+ * to assign a key name to the index and also return the value corresponding to
+ * the key. The host will have atmost one KVP transaction outstanding at any
+ * given point in time. The host side iteration stops when the guest returns
+ * an error. Microsoft has specified the following mapping of key names to
+ * host specified index:
+ *
+ * Index Key Name
+ * 0 FullyQualifiedDomainName
+ * 1 IntegrationServicesVersion
+ * 2 NetworkAddressIPv4
+ * 3 NetworkAddressIPv6
+ * 4 OSBuildNumber
+ * 5 OSName
+ * 6 OSMajorVersion
+ * 7 OSMinorVersion
+ * 8 OSVersion
+ * 9 ProcessorArchitecture
+ *
+ * The Windows host expects the Key Name and Key Value to be encoded in utf16.
+ *
+ * Guest Kernel/KVP Daemon Protocol: As noted earlier, we implement all of the
+ * data gathering functionality in a user mode daemon. The user level daemon
+ * is also responsible for binding the key name to the index as well. The
+ * kernel and user-level daemon communicate using a connector channel.
+ *
+ * The user mode component first registers with the
+ * the kernel component. Subsequently, the kernel component requests, data
+ * for the specified keys. In response to this message the user mode component
+ * fills in the value corresponding to the specified key. We overload the
+ * sequence field in the cn_msg header to define our KVP message types.
+ *
+ *
+ * The kernel component simply acts as a conduit for communication between the
+ * Windows host and the user-level daemon. The kernel component passes up the
+ * index received from the Host to the user-level daemon. If the index is
+ * valid (supported), the corresponding key as well as its
+ * value (both are strings) is returned. If the index is invalid
+ * (not supported), a NULL key string is returned.
+ */
+
+
+/*
+ * Registry value types.
+ */
+
+#define REG_SZ 1
+#define REG_U32 4
+#define REG_U64 8
+
+enum hv_kvp_exchg_op {
+ KVP_OP_GET = 0,
+ KVP_OP_SET,
+ KVP_OP_DELETE,
+ KVP_OP_ENUMERATE,
+ KVP_OP_REGISTER,
+ KVP_OP_COUNT /* Number of operations, must be last. */
+};
+
+enum hv_kvp_exchg_pool {
+ KVP_POOL_EXTERNAL = 0,
+ KVP_POOL_GUEST,
+ KVP_POOL_AUTO,
+ KVP_POOL_AUTO_EXTERNAL,
+ KVP_POOL_AUTO_INTERNAL,
+ KVP_POOL_COUNT /* Number of pools, must be last. */
+};
+
+struct hv_kvp_hdr {
+ __u8 operation;
+ __u8 pool;
+ __u16 pad;
+} __attribute__((packed));
+
+struct hv_kvp_exchg_msg_value {
+ __u32 value_type;
+ __u32 key_size;
+ __u32 value_size;
+ __u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
+ union {
+ __u8 value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE];
+ __u32 value_u32;
+ __u64 value_u64;
+ };
+} __attribute__((packed));
+
+struct hv_kvp_msg_enumerate {
+ __u32 index;
+ struct hv_kvp_exchg_msg_value data;
+} __attribute__((packed));
+
+struct hv_kvp_msg_get {
+ struct hv_kvp_exchg_msg_value data;
+};
+
+struct hv_kvp_msg_set {
+ struct hv_kvp_exchg_msg_value data;
+};
+
+struct hv_kvp_msg_delete {
+ __u32 key_size;
+ __u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
+};
+
+struct hv_kvp_register {
+ __u8 version[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
+};
+
+struct hv_kvp_msg {
+ struct hv_kvp_hdr kvp_hdr;
+ union {
+ struct hv_kvp_msg_get kvp_get;
+ struct hv_kvp_msg_set kvp_set;
+ struct hv_kvp_msg_delete kvp_delete;
+ struct hv_kvp_msg_enumerate kvp_enum_data;
+ struct hv_kvp_register kvp_register;
+ } body;
+} __attribute__((packed));
+
+#ifdef __KERNEL__
#include <linux/scatterlist.h>
#include <linux/list.h>
#include <linux/uuid.h>
@@ -785,6 +952,7 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver);
#define HV_S_OK 0x00000000
#define HV_E_FAIL 0x80004005
+#define HV_S_CONT 0x80070103
#define HV_ERROR_NOT_SUPPORTED 0x80070032
#define HV_ERROR_MACHINE_LOCKED 0x800704F7
@@ -870,4 +1038,9 @@ struct hyperv_service_callback {
extern void vmbus_prep_negotiate_resp(struct icmsg_hdr *,
struct icmsg_negotiate *, u8 *);
+int hv_kvp_init(struct hv_util_service *);
+void hv_kvp_deinit(void);
+void hv_kvp_onchannelcallback(void *);
+
+#endif /* __KERNEL__ */
#endif /* _HYPERV_H */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 9c66b1ada9d7..f994d51f70f2 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -149,7 +149,7 @@ extern struct cred init_cred;
}, \
.rt = { \
.run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
- .time_slice = HZ, \
+ .time_slice = RR_TIMESLICE, \
.nr_cpus_allowed = NR_CPUS, \
}, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a64b00e286f5..3f830e005118 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -20,7 +20,6 @@
#include <linux/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
-#include <trace/events/irq.h>
/*
* These correspond to the IORESOURCE_IRQ_* defines in
@@ -456,11 +455,7 @@ asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
-static inline void __raise_softirq_irqoff(unsigned int nr)
-{
- trace_softirq_raise(nr);
- or_softirq_pending(1UL << nr);
-}
+extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 119773eebe31..1a3018063034 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -6,8 +6,11 @@
#include <linux/workqueue.h>
enum {
- ICQ_IOPRIO_CHANGED,
- ICQ_CGROUP_CHANGED,
+ ICQ_IOPRIO_CHANGED = 1 << 0,
+ ICQ_CGROUP_CHANGED = 1 << 1,
+ ICQ_EXITED = 1 << 2,
+
+ ICQ_CHANGED_MASK = ICQ_IOPRIO_CHANGED | ICQ_CGROUP_CHANGED,
};
/*
@@ -88,7 +91,7 @@ struct io_cq {
struct rcu_head __rcu_head;
};
- unsigned long changed;
+ unsigned int flags;
};
/*
@@ -139,6 +142,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
gfp_t gfp_flags, int node);
void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
void ioc_cgroup_changed(struct io_context *ioc);
+unsigned int icq_get_changed(struct io_cq *icq);
#else
struct io_context;
static inline void put_io_context(struct io_context *ioc) { }
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 5ce8b140428f..c513a40510f5 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -1,22 +1,69 @@
#ifndef _LINUX_JUMP_LABEL_H
#define _LINUX_JUMP_LABEL_H
+/*
+ * Jump label support
+ *
+ * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
+ * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
+ *
+ * Jump labels provide an interface to generate dynamic branches using
+ * self-modifying code. Assuming toolchain and architecture support the result
+ * of a "if (static_key_false(&key))" statement is a unconditional branch (which
+ * defaults to false - and the true block is placed out of line).
+ *
+ * However at runtime we can change the branch target using
+ * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
+ * object and for as long as there are references all branches referring to
+ * that particular key will point to the (out of line) true block.
+ *
+ * Since this relies on modifying code the static_key_slow_{inc,dec}() functions
+ * must be considered absolute slow paths (machine wide synchronization etc.).
+ * OTOH, since the affected branches are unconditional their runtime overhead
+ * will be absolutely minimal, esp. in the default (off) case where the total
+ * effect is a single NOP of appropriate size. The on case will patch in a jump
+ * to the out-of-line block.
+ *
+ * When the control is directly exposed to userspace it is prudent to delay the
+ * decrement to avoid high frequency code modifications which can (and do)
+ * cause significant performance degradation. Struct static_key_deferred and
+ * static_key_slow_dec_deferred() provide for this.
+ *
+ * Lacking toolchain and or architecture support, it falls back to a simple
+ * conditional branch.
+ *
+ * struct static_key my_key = STATIC_KEY_INIT_TRUE;
+ *
+ * if (static_key_true(&my_key)) {
+ * }
+ *
+ * will result in the true case being in-line and starts the key with a single
+ * reference. Mixing static_key_true() and static_key_false() on the same key is not
+ * allowed.
+ *
+ * Not initializing the key (static data is initialized to 0s anyway) is the
+ * same as using STATIC_KEY_INIT_FALSE and static_key_false() is
+ * equivalent with static_branch().
+ *
+*/
+
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/workqueue.h>
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
-struct jump_label_key {
+struct static_key {
atomic_t enabled;
+/* Set lsb bit to 1 if branch is default true, 0 ot */
struct jump_entry *entries;
#ifdef CONFIG_MODULES
- struct jump_label_mod *next;
+ struct static_key_mod *next;
#endif
};
-struct jump_label_key_deferred {
- struct jump_label_key key;
+struct static_key_deferred {
+ struct static_key key;
unsigned long timeout;
struct delayed_work work;
};
@@ -34,13 +81,34 @@ struct module;
#ifdef HAVE_JUMP_LABEL
-#ifdef CONFIG_MODULES
-#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL, NULL}
-#else
-#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL}
-#endif
+#define JUMP_LABEL_TRUE_BRANCH 1UL
+
+static
+inline struct jump_entry *jump_label_get_entries(struct static_key *key)
+{
+ return (struct jump_entry *)((unsigned long)key->entries
+ & ~JUMP_LABEL_TRUE_BRANCH);
+}
-static __always_inline bool static_branch(struct jump_label_key *key)
+static inline bool jump_label_get_branch_default(struct static_key *key)
+{
+ if ((unsigned long)key->entries & JUMP_LABEL_TRUE_BRANCH)
+ return true;
+ return false;
+}
+
+static __always_inline bool static_key_false(struct static_key *key)
+{
+ return arch_static_branch(key);
+}
+
+static __always_inline bool static_key_true(struct static_key *key)
+{
+ return !static_key_false(key);
+}
+
+/* Deprecated. Please use 'static_key_false() instead. */
+static __always_inline bool static_branch(struct static_key *key)
{
return arch_static_branch(key);
}
@@ -56,21 +124,23 @@ extern void arch_jump_label_transform(struct jump_entry *entry,
extern void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type);
extern int jump_label_text_reserved(void *start, void *end);
-extern void jump_label_inc(struct jump_label_key *key);
-extern void jump_label_dec(struct jump_label_key *key);
-extern void jump_label_dec_deferred(struct jump_label_key_deferred *key);
-extern bool jump_label_enabled(struct jump_label_key *key);
+extern void static_key_slow_inc(struct static_key *key);
+extern void static_key_slow_dec(struct static_key *key);
+extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
extern void jump_label_apply_nops(struct module *mod);
-extern void jump_label_rate_limit(struct jump_label_key_deferred *key,
- unsigned long rl);
+extern void
+jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
+
+#define STATIC_KEY_INIT_TRUE ((struct static_key) \
+ { .enabled = ATOMIC_INIT(1), .entries = (void *)1 })
+#define STATIC_KEY_INIT_FALSE ((struct static_key) \
+ { .enabled = ATOMIC_INIT(0), .entries = (void *)0 })
#else /* !HAVE_JUMP_LABEL */
#include <linux/atomic.h>
-#define JUMP_LABEL_INIT {ATOMIC_INIT(0)}
-
-struct jump_label_key {
+struct static_key {
atomic_t enabled;
};
@@ -78,30 +148,45 @@ static __always_inline void jump_label_init(void)
{
}
-struct jump_label_key_deferred {
- struct jump_label_key key;
+struct static_key_deferred {
+ struct static_key key;
};
-static __always_inline bool static_branch(struct jump_label_key *key)
+static __always_inline bool static_key_false(struct static_key *key)
+{
+ if (unlikely(atomic_read(&key->enabled)) > 0)
+ return true;
+ return false;
+}
+
+static __always_inline bool static_key_true(struct static_key *key)
{
- if (unlikely(atomic_read(&key->enabled)))
+ if (likely(atomic_read(&key->enabled)) > 0)
return true;
return false;
}
-static inline void jump_label_inc(struct jump_label_key *key)
+/* Deprecated. Please use 'static_key_false() instead. */
+static __always_inline bool static_branch(struct static_key *key)
+{
+ if (unlikely(atomic_read(&key->enabled)) > 0)
+ return true;
+ return false;
+}
+
+static inline void static_key_slow_inc(struct static_key *key)
{
atomic_inc(&key->enabled);
}
-static inline void jump_label_dec(struct jump_label_key *key)
+static inline void static_key_slow_dec(struct static_key *key)
{
atomic_dec(&key->enabled);
}
-static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key)
+static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
- jump_label_dec(&key->key);
+ static_key_slow_dec(&key->key);
}
static inline int jump_label_text_reserved(void *start, void *end)
@@ -112,23 +197,30 @@ static inline int jump_label_text_reserved(void *start, void *end)
static inline void jump_label_lock(void) {}
static inline void jump_label_unlock(void) {}
-static inline bool jump_label_enabled(struct jump_label_key *key)
-{
- return !!atomic_read(&key->enabled);
-}
-
static inline int jump_label_apply_nops(struct module *mod)
{
return 0;
}
-static inline void jump_label_rate_limit(struct jump_label_key_deferred *key,
+static inline void
+jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
}
+
+#define STATIC_KEY_INIT_TRUE ((struct static_key) \
+ { .enabled = ATOMIC_INIT(1) })
+#define STATIC_KEY_INIT_FALSE ((struct static_key) \
+ { .enabled = ATOMIC_INIT(0) })
+
#endif /* HAVE_JUMP_LABEL */
-#define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), })
-#define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), })
+#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
+#define jump_label_enabled static_key_enabled
+
+static inline bool static_key_enabled(struct static_key *key)
+{
+ return (atomic_read(&key->enabled) > 0);
+}
#endif /* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index ec2d17bc1f1e..daf4a3a40ee0 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -7,8 +7,6 @@
extern struct tasklet_struct keyboard_tasklet;
-extern int shift_state;
-
extern char *func_table[MAX_NR_FUNC];
extern char func_buf[];
extern char *funcbufptr;
@@ -65,8 +63,6 @@ struct kbd_struct {
#define VC_META 4 /* 0 - meta, 1 - meta=prefix with ESC */
};
-extern struct kbd_struct kbd_table[];
-
extern int kbd_init(void);
extern unsigned char getledstate(void);
@@ -79,6 +75,7 @@ extern void (*kbd_ledfunc)(unsigned int led);
extern int set_console(int nr);
extern void schedule_console_callback(void);
+/* FIXME: review locking for vt.c callers */
static inline void set_leds(void)
{
tasklet_schedule(&keyboard_tasklet);
@@ -142,8 +139,6 @@ static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag)
struct console;
-int getkeycode(unsigned int scancode);
-int setkeycode(unsigned int scancode, unsigned int keycode);
void compute_shiftstate(void);
/* defkeymap.c */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e8343422240a..d801acb5e680 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -85,6 +85,19 @@
} \
)
+/*
+ * Multiplies an integer by a fraction, while avoiding unnecessary
+ * overflow or loss of precision.
+ */
+#define mult_frac(x, numer, denom)( \
+{ \
+ typeof(x) quot = (x) / (denom); \
+ typeof(x) rem = (x) % (denom); \
+ (quot * (numer)) + ((rem * (numer)) / (denom)); \
+} \
+)
+
+
#define _RET_IP_ (unsigned long)__builtin_return_address(0)
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
diff --git a/include/linux/keyboard.h b/include/linux/keyboard.h
index 33a63f62d57f..86e5214ae735 100644
--- a/include/linux/keyboard.h
+++ b/include/linux/keyboard.h
@@ -24,8 +24,6 @@
#ifdef __KERNEL__
struct notifier_block;
-extern const int NR_TYPES;
-extern const int max_vals[];
extern unsigned short *key_maps[MAX_NR_KEYMAPS];
extern unsigned short plain_map[NR_KEYS];
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 23fcdfcba81b..b8ba85544721 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -6,6 +6,8 @@
#if BITS_PER_LONG == 64
+#define div64_long(x,y) div64_s64((x),(y))
+
/**
* div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
*
@@ -45,6 +47,8 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
#elif BITS_PER_LONG == 32
+#define div64_long(x,y) div_s64((x),(y))
+
#ifndef div_u64_rem
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 83ac0713ed0a..fb69ad191ad7 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -560,4 +560,25 @@ struct amba_id {
#endif
};
+/*
+ * Match x86 CPUs for CPU specific drivers.
+ * See documentation of "x86_match_cpu" for details.
+ */
+
+struct x86_cpu_id {
+ __u16 vendor;
+ __u16 family;
+ __u16 model;
+ __u16 feature; /* bit index */
+ kernel_ulong_t driver_data;
+};
+
+#define X86_FEATURE_MATCH(x) \
+ { X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, x }
+
+#define X86_VENDOR_ANY 0xffff
+#define X86_FAMILY_ANY 0
+#define X86_MODEL_ANY 0
+#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0eac07c95255..3e5cb2546e4f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -214,8 +214,8 @@ enum {
#include <linux/skbuff.h>
#ifdef CONFIG_RPS
-#include <linux/jump_label.h>
-extern struct jump_label_key rps_needed;
+#include <linux/static_key.h>
+extern struct static_key rps_needed;
#endif
struct neighbour;
@@ -2687,14 +2687,14 @@ int netdev_info(const struct net_device *dev, const char *format, ...);
#define MODULE_ALIAS_NETDEV(device) \
MODULE_ALIAS("netdev-" device)
-#if defined(DEBUG)
-#define netdev_dbg(__dev, format, args...) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args)
-#elif defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
#define netdev_dbg(__dev, format, args...) \
do { \
dynamic_netdev_dbg(__dev, format, ##args); \
} while (0)
+#elif defined(DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args)
#else
#define netdev_dbg(__dev, format, args...) \
({ \
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index b809265607d0..29734be334c1 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -163,13 +163,13 @@ extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
#if defined(CONFIG_JUMP_LABEL)
-#include <linux/jump_label.h>
-extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+#include <linux/static_key.h>
+extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
{
if (__builtin_constant_p(pf) &&
__builtin_constant_p(hook))
- return static_branch(&nf_hooks_needed[pf][hook]);
+ return static_key_false(&nf_hooks_needed[pf][hook]);
return !list_empty(&nf_hooks[pf][hook]);
}
diff --git a/include/linux/pci.h b/include/linux/pci.h
index a16b1df3deff..d4afd703e948 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -946,6 +946,19 @@ int __must_check __pci_register_driver(struct pci_driver *, struct module *,
__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
void pci_unregister_driver(struct pci_driver *dev);
+
+/**
+ * module_pci_driver() - Helper macro for registering a PCI driver
+ * @__pci_driver: pci_driver struct
+ *
+ * Helper macro for PCI drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_pci_driver(__pci_driver) \
+ module_driver(__pci_driver, pci_register_driver, \
+ pci_unregister_driver)
+
void pci_remove_behind_bridge(struct pci_dev *dev);
struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
int pci_add_dynid(struct pci_driver *drv,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index abb2776be1ba..bd9f55a5958d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -129,11 +129,40 @@ enum perf_event_sample_format {
PERF_SAMPLE_PERIOD = 1U << 8,
PERF_SAMPLE_STREAM_ID = 1U << 9,
PERF_SAMPLE_RAW = 1U << 10,
+ PERF_SAMPLE_BRANCH_STACK = 1U << 11,
- PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
+ PERF_SAMPLE_MAX = 1U << 12, /* non-ABI */
};
/*
+ * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
+ *
+ * If the user does not pass priv level information via branch_sample_type,
+ * the kernel uses the event's priv level. Branch and event priv levels do
+ * not have to match. Branch priv level is checked for permissions.
+ *
+ * The branch types can be combined, however BRANCH_ANY covers all types
+ * of branches and therefore it supersedes all the other types.
+ */
+enum perf_branch_sample_type {
+ PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */
+ PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */
+ PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */
+
+ PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */
+ PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */
+ PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */
+ PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */
+
+ PERF_SAMPLE_BRANCH_MAX = 1U << 7, /* non-ABI */
+};
+
+#define PERF_SAMPLE_BRANCH_PLM_ALL \
+ (PERF_SAMPLE_BRANCH_USER|\
+ PERF_SAMPLE_BRANCH_KERNEL|\
+ PERF_SAMPLE_BRANCH_HV)
+
+/*
* The format of the data returned by read() on a perf event fd,
* as specified by attr.read_format:
*
@@ -163,6 +192,8 @@ enum perf_event_read_format {
};
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
+#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
+#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -240,6 +271,7 @@ struct perf_event_attr {
__u64 bp_len;
__u64 config2; /* extension of config1 */
};
+ __u64 branch_sample_type; /* enum branch_sample_type */
};
/*
@@ -291,12 +323,14 @@ struct perf_event_mmap_page {
__s64 offset; /* add to hardware event value */
__u64 time_enabled; /* time event active */
__u64 time_running; /* time event on cpu */
+ __u32 time_mult, time_shift;
+ __u64 time_offset;
/*
* Hole for extension of the self monitor capabilities
*/
- __u64 __reserved[123]; /* align to 1k */
+ __u64 __reserved[121]; /* align to 1k */
/*
* Control data for the mmap() data buffer.
@@ -456,6 +490,8 @@ enum perf_event_type {
*
* { u32 size;
* char data[size];}&& PERF_SAMPLE_RAW
+ *
+ * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
* };
*/
PERF_RECORD_SAMPLE = 9,
@@ -512,7 +548,7 @@ struct perf_guest_info_callbacks {
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/irq_work.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
#include <linux/atomic.h>
#include <asm/local.h>
@@ -528,12 +564,34 @@ struct perf_raw_record {
void *data;
};
+/*
+ * single taken branch record layout:
+ *
+ * from: source instruction (may not always be a branch insn)
+ * to: branch target
+ * mispred: branch target was mispredicted
+ * predicted: branch target was predicted
+ *
+ * support for mispred, predicted is optional. In case it
+ * is not supported mispred = predicted = 0.
+ */
struct perf_branch_entry {
- __u64 from;
- __u64 to;
- __u64 flags;
+ __u64 from;
+ __u64 to;
+ __u64 mispred:1, /* target mispredicted */
+ predicted:1,/* target predicted */
+ reserved:62;
};
+/*
+ * branch stack layout:
+ * nr: number of taken branches stored in entries[]
+ *
+ * Note that nr can vary from sample to sample
+ * branches (to, from) are stored from most recent
+ * to least recent, i.e., entries[0] contains the most
+ * recent branch.
+ */
struct perf_branch_stack {
__u64 nr;
struct perf_branch_entry entries[0];
@@ -564,7 +622,9 @@ struct hw_perf_event {
unsigned long event_base;
int idx;
int last_cpu;
+
struct hw_perf_event_extra extra_reg;
+ struct hw_perf_event_extra branch_reg;
};
struct { /* software */
struct hrtimer hrtimer;
@@ -616,6 +676,7 @@ struct pmu {
struct list_head entry;
struct device *dev;
+ const struct attribute_group **attr_groups;
char *name;
int type;
@@ -681,6 +742,17 @@ struct pmu {
* for each successful ->add() during the transaction.
*/
void (*cancel_txn) (struct pmu *pmu); /* optional */
+
+ /*
+ * Will return the value for perf_event_mmap_page::index for this event,
+ * if no implementation is provided it will default to: event->hw.idx + 1.
+ */
+ int (*event_idx) (struct perf_event *event); /*optional */
+
+ /*
+ * flush branch stack on context-switches (needed in cpu-wide mode)
+ */
+ void (*flush_branch_stack) (void);
};
/**
@@ -850,6 +922,9 @@ struct perf_event {
#ifdef CONFIG_EVENT_TRACING
struct ftrace_event_call *tp_event;
struct event_filter *filter;
+#ifdef CONFIG_FUNCTION_TRACER
+ struct ftrace_ops ftrace_ops;
+#endif
#endif
#ifdef CONFIG_CGROUP_PERF
@@ -911,7 +986,8 @@ struct perf_event_context {
u64 parent_gen;
u64 generation;
int pin_count;
- int nr_cgroups; /* cgroup events present */
+ int nr_cgroups; /* cgroup evts */
+ int nr_branch_stack; /* branch_stack evt */
struct rcu_head rcu_head;
};
@@ -976,6 +1052,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
+
struct perf_sample_data {
u64 type;
@@ -995,12 +1072,14 @@ struct perf_sample_data {
u64 period;
struct perf_callchain_entry *callchain;
struct perf_raw_record *raw;
+ struct perf_branch_stack *br_stack;
};
static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
{
data->addr = addr;
data->raw = NULL;
+ data->br_stack = NULL;
}
extern void perf_output_sample(struct perf_output_handle *handle,
@@ -1029,7 +1108,7 @@ static inline int is_software_event(struct perf_event *event)
return event->pmu->task_ctx_nr == perf_sw_context;
}
-extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
+extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
@@ -1057,7 +1136,7 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
struct pt_regs hot_regs;
- if (static_branch(&perf_swevent_enabled[event_id])) {
+ if (static_key_false(&perf_swevent_enabled[event_id])) {
if (!regs) {
perf_fetch_caller_regs(&hot_regs);
regs = &hot_regs;
@@ -1066,12 +1145,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
}
}
-extern struct jump_label_key_deferred perf_sched_events;
+extern struct static_key_deferred perf_sched_events;
static inline void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
- if (static_branch(&perf_sched_events.key))
+ if (static_key_false(&perf_sched_events.key))
__perf_event_task_sched_in(prev, task);
}
@@ -1080,7 +1159,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
{
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
- if (static_branch(&perf_sched_events.key))
+ if (static_key_false(&perf_sched_events.key))
__perf_event_task_sched_out(prev, next);
}
@@ -1139,6 +1218,11 @@ extern void perf_bp_event(struct perf_event *event, void *data);
# define perf_instruction_pointer(regs) instruction_pointer(regs)
#endif
+static inline bool has_branch_stack(struct perf_event *event)
+{
+ return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
+}
+
extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size);
extern void perf_output_end(struct perf_output_handle *handle);
diff --git a/include/linux/platform_data/efm32-uart.h b/include/linux/platform_data/efm32-uart.h
new file mode 100644
index 000000000000..ed0e975b3c54
--- /dev/null
+++ b/include/linux/platform_data/efm32-uart.h
@@ -0,0 +1,18 @@
+/*
+ *
+ *
+ */
+#ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__
+#define __LINUX_PLATFORM_DATA_EFM32_UART_H__
+
+#include <linux/types.h>
+
+/**
+ * struct efm32_uart_pdata
+ * @location: pinmux location for the I/O pins (to be written to the ROUTE
+ * register)
+ */
+struct efm32_uart_pdata {
+ u8 location;
+};
+#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__ */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 58969b2a8a82..5a710b9c578e 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -48,12 +48,14 @@ do { \
barrier(); \
} while (0)
-#define preempt_enable_no_resched() \
+#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
dec_preempt_count(); \
} while (0)
+#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
+
#define preempt_enable() \
do { \
preempt_enable_no_resched(); \
@@ -92,6 +94,7 @@ do { \
#else /* !CONFIG_PREEMPT_COUNT */
#define preempt_disable() do { } while (0)
+#define sched_preempt_enable_no_resched() do { } while (0)
#define preempt_enable_no_resched() do { } while (0)
#define preempt_enable() do { } while (0)
diff --git a/include/linux/printk.h b/include/linux/printk.h
index f0e22f75143f..0525927f203f 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -101,6 +101,11 @@ asmlinkage __printf(1, 2) __cold
int printk(const char *fmt, ...);
/*
+ * Special printk facility for scheduler use only, _DO_NOT_USE_ !
+ */
+__printf(1, 2) __cold int printk_sched(const char *fmt, ...);
+
+/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
* with all other unrelated printk_ratelimit() callsites. Instead use
* printk_ratelimited() or plain old __ratelimit().
@@ -127,6 +132,11 @@ int printk(const char *s, ...)
{
return 0;
}
+static inline __printf(1, 2) __cold
+int printk_sched(const char *s, ...)
+{
+ return 0;
+}
static inline int printk_ratelimit(void)
{
return 0;
@@ -180,13 +190,13 @@ extern void dump_stack(void) __cold;
#endif
/* If you are writing a driver, please use dev_dbg instead */
-#if defined(DEBUG)
-#define pr_debug(fmt, ...) \
- printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
-#elif defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
#define pr_debug(fmt, ...) \
dynamic_pr_debug(fmt, ##__VA_ARGS__)
+#elif defined(DEBUG)
+#define pr_debug(fmt, ...) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#else
#define pr_debug(fmt, ...) \
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 81c04f4348ec..937217425c47 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -190,6 +190,33 @@ extern void rcu_idle_exit(void);
extern void rcu_irq_enter(void);
extern void rcu_irq_exit(void);
+/**
+ * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
+ * @a: Code that RCU needs to pay attention to.
+ *
+ * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
+ * in the inner idle loop, that is, between the rcu_idle_enter() and
+ * the rcu_idle_exit() -- RCU will happily ignore any such read-side
+ * critical sections. However, things like powertop need tracepoints
+ * in the inner idle loop.
+ *
+ * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
+ * will tell RCU that it needs to pay attending, invoke its argument
+ * (in this example, a call to the do_something_with_RCU() function),
+ * and then tell RCU to go back to ignoring this CPU. It is permissible
+ * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
+ * quite limited. If deeper nesting is required, it will be necessary
+ * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
+ *
+ * This macro may be used from process-level code only.
+ */
+#define RCU_NONIDLE(a) \
+ do { \
+ rcu_idle_exit(); \
+ do { a; } while (0); \
+ rcu_idle_enter(); \
+ } while (0)
+
/*
* Infrastructure to implement the synchronize_() primitives in
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -226,6 +253,15 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
}
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
+bool rcu_lockdep_current_cpu_online(void);
+#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
+static inline bool rcu_lockdep_current_cpu_online(void)
+{
+ return 1;
+}
+#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#ifdef CONFIG_PROVE_RCU
@@ -239,13 +275,11 @@ static inline int rcu_is_cpu_idle(void)
static inline void rcu_lock_acquire(struct lockdep_map *map)
{
- WARN_ON_ONCE(rcu_is_cpu_idle());
lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
}
static inline void rcu_lock_release(struct lockdep_map *map)
{
- WARN_ON_ONCE(rcu_is_cpu_idle());
lock_release(map, 1, _THIS_IP_);
}
@@ -270,6 +304,9 @@ extern int debug_lockdep_rcu_enabled(void);
* occur in the same context, for example, it is illegal to invoke
* rcu_read_unlock() in process context if the matching rcu_read_lock()
* was invoked from within an irq handler.
+ *
+ * Note that rcu_read_lock() is disallowed if the CPU is either idle or
+ * offline from an RCU perspective, so check for those as well.
*/
static inline int rcu_read_lock_held(void)
{
@@ -277,6 +314,8 @@ static inline int rcu_read_lock_held(void)
return 1;
if (rcu_is_cpu_idle())
return 0;
+ if (!rcu_lockdep_current_cpu_online())
+ return 0;
return lock_is_held(&rcu_lock_map);
}
@@ -313,6 +352,9 @@ extern int rcu_read_lock_bh_held(void);
* notice an extended quiescent state to other CPUs that started a grace
* period. Otherwise we would delay any grace period as long as we run in
* the idle task.
+ *
+ * Similarly, we avoid claiming an SRCU read lock held if the current
+ * CPU is offline.
*/
#ifdef CONFIG_PREEMPT_COUNT
static inline int rcu_read_lock_sched_held(void)
@@ -323,6 +365,8 @@ static inline int rcu_read_lock_sched_held(void)
return 1;
if (rcu_is_cpu_idle())
return 0;
+ if (!rcu_lockdep_current_cpu_online())
+ return 0;
if (debug_locks)
lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
@@ -381,8 +425,22 @@ extern int rcu_my_thread_group_empty(void);
} \
} while (0)
+#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
+static inline void rcu_preempt_sleep_check(void)
+{
+ rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
+ "Illegal context switch in RCU read-side "
+ "critical section");
+}
+#else /* #ifdef CONFIG_PROVE_RCU */
+static inline void rcu_preempt_sleep_check(void)
+{
+}
+#endif /* #else #ifdef CONFIG_PROVE_RCU */
+
#define rcu_sleep_check() \
do { \
+ rcu_preempt_sleep_check(); \
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
"Illegal context switch in RCU-bh" \
" read-side critical section"); \
@@ -470,6 +528,13 @@ extern int rcu_my_thread_group_empty(void);
* NULL. Although rcu_access_pointer() may also be used in cases where
* update-side locks prevent the value of the pointer from changing, you
* should instead use rcu_dereference_protected() for this use case.
+ *
+ * It is also permissible to use rcu_access_pointer() when read-side
+ * access to the pointer was removed at least one grace period ago, as
+ * is the case in the context of the RCU callback that is freeing up
+ * the data, or after a synchronize_rcu() returns. This can be useful
+ * when tearing down multi-linked structures after a grace period
+ * has elapsed.
*/
#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
@@ -659,6 +724,8 @@ static inline void rcu_read_lock(void)
__rcu_read_lock();
__acquire(RCU);
rcu_lock_acquire(&rcu_lock_map);
+ rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ "rcu_read_lock() used illegally while idle");
}
/*
@@ -678,6 +745,8 @@ static inline void rcu_read_lock(void)
*/
static inline void rcu_read_unlock(void)
{
+ rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ "rcu_read_unlock() used illegally while idle");
rcu_lock_release(&rcu_lock_map);
__release(RCU);
__rcu_read_unlock();
@@ -705,6 +774,8 @@ static inline void rcu_read_lock_bh(void)
local_bh_disable();
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
+ rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ "rcu_read_lock_bh() used illegally while idle");
}
/*
@@ -714,6 +785,8 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
+ rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ "rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
local_bh_enable();
@@ -737,6 +810,8 @@ static inline void rcu_read_lock_sched(void)
preempt_disable();
__acquire(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
+ rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ "rcu_read_lock_sched() used illegally while idle");
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -753,6 +828,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
*/
static inline void rcu_read_unlock_sched(void)
{
+ rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ "rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED);
preempt_enable();
@@ -841,7 +918,7 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset)
/* See the kfree_rcu() header comment. */
BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));
- call_rcu(head, (rcu_callback)offset);
+ kfree_call_rcu(head, (rcu_callback)offset);
}
/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 00b7a5e493d2..e93df77176d1 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,13 +27,9 @@
#include <linux/cache.h>
-#ifdef CONFIG_RCU_BOOST
static inline void rcu_init(void)
{
}
-#else /* #ifdef CONFIG_RCU_BOOST */
-void rcu_init(void);
-#endif /* #else #ifdef CONFIG_RCU_BOOST */
static inline void rcu_barrier_bh(void)
{
@@ -83,6 +79,12 @@ static inline void synchronize_sched_expedited(void)
synchronize_sched();
}
+static inline void kfree_call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu))
+{
+ call_rcu(head, func);
+}
+
#ifdef CONFIG_TINY_RCU
static inline void rcu_preempt_note_context_switch(void)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 67458468f1a8..e8ee5dd0854c 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -61,6 +61,24 @@ extern void synchronize_rcu_bh(void);
extern void synchronize_sched_expedited(void);
extern void synchronize_rcu_expedited(void);
+void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
+
+/**
+ * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
+ *
+ * Wait for an RCU-bh grace period to elapse, but use a "big hammer"
+ * approach to force the grace period to end quickly. This consumes
+ * significant time on all CPUs and is unfriendly to real-time workloads,
+ * so is thus not recommended for any sort of common-case code. In fact,
+ * if you are using synchronize_rcu_bh_expedited() in a loop, please
+ * restructure your code to batch your updates, and then use a single
+ * synchronize_rcu_bh() instead.
+ *
+ * Note that it is illegal to call this function while holding any lock
+ * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
+ * to call this function from a CPU-hotplug notifier. Failing to observe
+ * these restriction will result in deadlock.
+ */
static inline void synchronize_rcu_bh_expedited(void)
{
synchronize_sched_expedited();
@@ -83,6 +101,7 @@ extern void rcu_sched_force_quiescent_state(void);
/* A context switch is a grace period for RCU-sched and RCU-bh. */
static inline int rcu_blocking_is_gp(void)
{
+ might_sleep(); /* Check for RCU read-side critical section. */
return num_online_cpus() == 1;
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0657368bd78f..e074e1e54f85 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -361,6 +361,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void);
+extern void schedule_preempt_disabled(void);
extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
struct nsproxy;
@@ -905,6 +906,7 @@ struct sched_group_power {
* single CPU.
*/
unsigned int power, power_orig;
+ unsigned long next_update;
/*
* Number of busy cpus in this group.
*/
@@ -1052,6 +1054,8 @@ static inline int test_sd_parent(struct sched_domain *sd, int flag)
unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
+bool cpus_share_cache(int this_cpu, int that_cpu);
+
#else /* CONFIG_SMP */
struct sched_domain_attr;
@@ -1061,6 +1065,12 @@ partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
}
+
+static inline bool cpus_share_cache(int this_cpu, int that_cpu)
+{
+ return true;
+}
+
#endif /* !CONFIG_SMP */
@@ -1225,6 +1235,12 @@ struct sched_rt_entity {
#endif
};
+/*
+ * default timeslice is 100 msecs (used only for SCHED_RR tasks).
+ * Timeslices get refilled after they expire.
+ */
+#define RR_TIMESLICE (100 * HZ / 1000)
+
struct rcu_node;
enum perf_event_task_context {
@@ -1319,6 +1335,11 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
+#ifdef CONFIG_GENERIC_HARDIRQS
+ /* IRQ handler threads */
+ unsigned irq_thread:1;
+#endif
+
pid_t pid;
pid_t tgid;
@@ -1427,11 +1448,6 @@ struct task_struct {
* mempolicy */
spinlock_t alloc_lock;
-#ifdef CONFIG_GENERIC_HARDIRQS
- /* IRQ handler threads */
- struct irqaction *irqaction;
-#endif
-
/* Protection of the PI data structures: */
raw_spinlock_t pi_lock;
@@ -1863,8 +1879,7 @@ extern void task_clear_jobctl_pending(struct task_struct *task,
#ifdef CONFIG_PREEMPT_RCU
#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
-#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */
-#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
+#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
static inline void rcu_copy_process(struct task_struct *p)
{
@@ -2048,7 +2063,7 @@ extern void sched_autogroup_fork(struct signal_struct *sig);
extern void sched_autogroup_exit(struct signal_struct *sig);
#ifdef CONFIG_PROC_FS
extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
-extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice);
+extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
#endif
#else
static inline void sched_autogroup_create_attach(struct task_struct *p) { }
@@ -2065,12 +2080,20 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
extern void rt_mutex_adjust_pi(struct task_struct *p);
+static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+{
+ return tsk->pi_blocked_on != NULL;
+}
#else
static inline int rt_mutex_getprio(struct task_struct *p)
{
return p->normal_prio;
}
# define rt_mutex_adjust_pi(p) do { } while (0)
+static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+{
+ return false;
+}
#endif
extern bool yield_to(struct task_struct *p, bool preempt);
@@ -2389,12 +2412,15 @@ static inline void task_unlock(struct task_struct *p)
extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
unsigned long *flags);
-#define lock_task_sighand(tsk, flags) \
-({ struct sighand_struct *__ss; \
- __cond_lock(&(tsk)->sighand->siglock, \
- (__ss = __lock_task_sighand(tsk, flags))); \
- __ss; \
-}) \
+static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags)
+{
+ struct sighand_struct *ret;
+
+ ret = __lock_task_sighand(tsk, flags);
+ (void)__cond_lock(&tsk->sighand->siglock, ret);
+ return ret;
+}
static inline void unlock_task_sighand(struct task_struct *tsk,
unsigned long *flags)
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 3d86517fe7d5..441980ecc4e5 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -152,8 +152,8 @@ struct serial_uart_config {
#define ASYNC_AUTOPROBE (1U << ASYNCB_AUTOPROBE)
#define ASYNC_FLAGS ((1U << (ASYNCB_LAST_USER + 1)) - 1)
-#define ASYNC_USR_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI| \
- ASYNC_CALLOUT_NOHUP|ASYNC_SPD_SHI|ASYNC_LOW_LATENCY)
+#define ASYNC_USR_MASK (ASYNC_SPD_MASK|ASYNC_CALLOUT_NOHUP| \
+ ASYNC_LOW_LATENCY)
#define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI)
#define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI)
#define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI)
diff --git a/include/linux/serialP.h b/include/linux/serialP.h
deleted file mode 100644
index e811a615f696..000000000000
--- a/include/linux/serialP.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Private header file for the (dumb) serial driver
- *
- * Copyright (C) 1997 by Theodore Ts'o.
- *
- * Redistribution of this file is permitted under the terms of the GNU
- * Public License (GPL)
- */
-
-#ifndef _LINUX_SERIALP_H
-#define _LINUX_SERIALP_H
-
-/*
- * This is our internal structure for each serial port's state.
- *
- * Many fields are paralleled by the structure used by the serial_struct
- * structure.
- *
- * For definitions of the flags field, see tty.h
- */
-
-#include <linux/termios.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/circ_buf.h>
-#include <linux/wait.h>
-
-struct serial_state {
- int magic;
- int baud_base;
- unsigned long port;
- int irq;
- int flags;
- int hub6;
- int type;
- int line;
- int revision; /* Chip revision (950) */
- int xmit_fifo_size;
- int custom_divisor;
- int count;
- u8 *iomem_base;
- u16 iomem_reg_shift;
- unsigned short close_delay;
- unsigned short closing_wait; /* time to wait before closing */
- struct async_icount icount;
- int io_type;
- struct async_struct *info;
- struct pci_dev *dev;
-};
-
-struct async_struct {
- int magic;
- unsigned long port;
- int hub6;
- int flags;
- int xmit_fifo_size;
- struct serial_state *state;
- struct tty_struct *tty;
- int read_status_mask;
- int ignore_status_mask;
- int timeout;
- int quot;
- int x_char; /* xon/xoff character */
- int close_delay;
- unsigned short closing_wait;
- unsigned short closing_wait2; /* obsolete */
- int IER; /* Interrupt Enable Register */
- int MCR; /* Modem control register */
- int LCR; /* Line control register */
- int ACR; /* 16950 Additional Control Reg. */
- unsigned long event;
- unsigned long last_active;
- int line;
- int blocked_open; /* # of blocked opens */
- struct circ_buf xmit;
- spinlock_t xmit_lock;
- u8 *iomem_base;
- u16 iomem_reg_shift;
- int io_type;
- struct work_struct work;
- struct tasklet_struct tlet;
-#ifdef DECLARE_WAITQUEUE
- wait_queue_head_t open_wait;
- wait_queue_head_t close_wait;
- wait_queue_head_t delta_msr_wait;
-#else
- struct wait_queue *open_wait;
- struct wait_queue *close_wait;
- struct wait_queue *delta_msr_wait;
-#endif
- struct async_struct *next_port; /* For the linked list */
- struct async_struct *prev_port;
-};
-
-#define CONFIGURED_SERIAL_PORT(info) ((info)->port || ((info)->iomem_base))
-
-#define SERIAL_MAGIC 0x5301
-#define SSTATE_MAGIC 0x5302
-
-/*
- * Events are used to schedule things to happen at timer-interrupt
- * time, instead of at rs interrupt time.
- */
-#define RS_EVENT_WRITE_WAKEUP 0
-
-/*
- * Multiport serial configuration structure --- internal structure
- */
-struct rs_multiport_struct {
- int port1;
- unsigned char mask1, match1;
- int port2;
- unsigned char mask2, match2;
- int port3;
- unsigned char mask3, match3;
- int port4;
- unsigned char mask4, match4;
- int port_monitor;
-};
-
-#if defined(__alpha__) && !defined(CONFIG_PCI)
-/*
- * Digital did something really horribly wrong with the OUT1 and OUT2
- * lines on at least some ALPHA's. The failure mode is that if either
- * is cleared, the machine locks up with endless interrupts.
- *
- * This is still used by arch/mips/au1000/common/serial.c for some weird
- * reason (mips != alpha!)
- */
-#define ALPHA_KLUDGE_MCR (UART_MCR_OUT2 | UART_MCR_OUT1)
-#elif defined(CONFIG_SBC8560)
-/*
- * WindRiver did something similarly broken on their SBC8560 board. The
- * UART tristates its IRQ output while OUT2 is clear, but they pulled
- * the interrupt line _up_ instead of down, so if we register the IRQ
- * while the UART is in that state, we die in an IRQ storm. */
-#define ALPHA_KLUDGE_MCR (UART_MCR_OUT2)
-#else
-#define ALPHA_KLUDGE_MCR 0
-#endif
-
-#endif /* _LINUX_SERIAL_H */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index c91ace70c21d..f51bf2e70c69 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -210,6 +210,8 @@
/* Atheros AR933X SoC */
#define PORT_AR933X 99
+/* Energy Micro efm32 SoC */
+#define PORT_EFMUART 100
#ifdef __KERNEL__
@@ -381,6 +383,16 @@ struct uart_port {
void *private_data; /* generic platform data pointer */
};
+static inline int serial_port_in(struct uart_port *up, int offset)
+{
+ return up->serial_in(up, offset);
+}
+
+static inline void serial_port_out(struct uart_port *up, int offset, int value)
+{
+ up->serial_out(up, offset, value);
+}
+
/*
* This is the state information which is persistent across opens.
*/
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index e1b005918bbb..d3d5fa54f25e 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -99,15 +99,18 @@ long srcu_batches_completed(struct srcu_struct *sp);
* power mode. This way we can notice an extended quiescent state to
* other CPUs that started a grace period. Otherwise we would delay any
* grace period as long as we run in the idle task.
+ *
+ * Similarly, we avoid claiming an SRCU read lock held if the current
+ * CPU is offline.
*/
static inline int srcu_read_lock_held(struct srcu_struct *sp)
{
- if (rcu_is_cpu_idle())
- return 0;
-
if (!debug_lockdep_rcu_enabled())
return 1;
-
+ if (rcu_is_cpu_idle())
+ return 0;
+ if (!rcu_lockdep_current_cpu_online())
+ return 0;
return lock_is_held(&sp->dep_map);
}
@@ -169,6 +172,8 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
int retval = __srcu_read_lock(sp);
rcu_lock_acquire(&(sp)->dep_map);
+ rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ "srcu_read_lock() used illegally while idle");
return retval;
}
@@ -182,6 +187,8 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
__releases(sp)
{
+ rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ "srcu_read_unlock() used illegally while idle");
rcu_lock_release(&(sp)->dep_map);
__srcu_read_unlock(sp, idx);
}
diff --git a/include/linux/static_key.h b/include/linux/static_key.h
new file mode 100644
index 000000000000..27bd3f8a0857
--- /dev/null
+++ b/include/linux/static_key.h
@@ -0,0 +1 @@
+#include <linux/jump_label.h>
diff --git a/include/linux/sunserialcore.h b/include/linux/sunserialcore.h
new file mode 100644
index 000000000000..68e7430bb0fe
--- /dev/null
+++ b/include/linux/sunserialcore.h
@@ -0,0 +1,33 @@
+/* sunserialcore.h
+ *
+ * Generic SUN serial/kbd/ms layer. Based entirely
+ * upon drivers/sbus/char/sunserial.h which is:
+ *
+ * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
+ *
+ * Port to new UART layer is:
+ *
+ * Copyright (C) 2002 David S. Miller (davem@redhat.com)
+ */
+
+#ifndef _SERIAL_SUN_H
+#define _SERIAL_SUN_H
+
+/* Serial keyboard defines for L1-A processing... */
+#define SUNKBD_RESET 0xff
+#define SUNKBD_L1 0x01
+#define SUNKBD_UP 0x80
+#define SUNKBD_A 0x4d
+
+extern unsigned int suncore_mouse_baud_cflag_next(unsigned int, int *);
+extern int suncore_mouse_baud_detection(unsigned char, int);
+
+extern int sunserial_register_minors(struct uart_driver *, int);
+extern void sunserial_unregister_minors(struct uart_driver *, int);
+
+extern int sunserial_console_match(struct console *, struct device_node *,
+ struct uart_driver *, int, bool);
+extern void sunserial_console_termios(struct console *,
+ struct device_node *);
+
+#endif /* !(_SERIAL_SUN_H) */
diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h
new file mode 100644
index 000000000000..2739ccb69571
--- /dev/null
+++ b/include/linux/sys_soc.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#ifndef __SOC_BUS_H
+#define __SOC_BUS_H
+
+#include <linux/device.h>
+
+struct soc_device_attribute {
+ const char *machine;
+ const char *family;
+ const char *revision;
+ const char *soc_id;
+};
+
+/**
+ * soc_device_register - register SoC as a device
+ * @soc_plat_dev_attr: Attributes passed from platform to be attributed to a SoC
+ */
+struct soc_device *soc_device_register(
+ struct soc_device_attribute *soc_plat_dev_attr);
+
+/**
+ * soc_device_unregister - unregister SoC device
+ * @dev: SoC device to be unregistered
+ */
+void soc_device_unregister(struct soc_device *soc_dev);
+
+/**
+ * soc_device_to_device - helper function to fetch struct device
+ * @soc: Previously registered SoC device container
+ */
+struct device *soc_device_to_device(struct soc_device *soc);
+
+#endif /* __SOC_BUS_H */
diff --git a/include/linux/timex.h b/include/linux/timex.h
index aa60fe7b6ed6..b75e1864ed19 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -234,23 +234,9 @@ struct timex {
extern unsigned long tick_usec; /* USER_HZ period (usec) */
extern unsigned long tick_nsec; /* ACTHZ period (nsec) */
-/*
- * phase-lock loop variables
- */
-extern int time_status; /* clock synchronization status bits */
-
extern void ntp_init(void);
extern void ntp_clear(void);
-/**
- * ntp_synced - Returns 1 if the NTP status is not UNSYNC
- *
- */
-static inline int ntp_synced(void)
-{
- return !(time_status & STA_UNSYNC);
-}
-
/* Required to safely shift negative values */
#define shift_right(x, s) ({ \
__typeof__(x) __x = (x); \
@@ -264,10 +250,9 @@ static inline int ntp_synced(void)
#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
-extern u64 tick_length;
+extern u64 ntp_tick_length(void);
extern void second_overflow(void);
-extern void update_ntp_one_tick(void);
extern int do_adjtimex(struct timex *);
extern void hardpps(const struct timespec *, const struct timespec *);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index df0a779c1bbd..bd96ecd0e05c 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -17,7 +17,7 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/rcupdate.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
struct module;
struct tracepoint;
@@ -29,7 +29,7 @@ struct tracepoint_func {
struct tracepoint {
const char *name; /* Tracepoint name */
- struct jump_label_key key;
+ struct static_key key;
void (*regfunc)(void);
void (*unregfunc)(void);
struct tracepoint_func __rcu *funcs;
@@ -114,7 +114,7 @@ static inline void tracepoint_synchronize_unregister(void)
* as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
* "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
*/
-#define __DO_TRACE(tp, proto, args, cond) \
+#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \
do { \
struct tracepoint_func *it_func_ptr; \
void *it_func; \
@@ -122,6 +122,7 @@ static inline void tracepoint_synchronize_unregister(void)
\
if (!(cond)) \
return; \
+ prercu; \
rcu_read_lock_sched_notrace(); \
it_func_ptr = rcu_dereference_sched((tp)->funcs); \
if (it_func_ptr) { \
@@ -132,6 +133,7 @@ static inline void tracepoint_synchronize_unregister(void)
} while ((++it_func_ptr)->func); \
} \
rcu_read_unlock_sched_notrace(); \
+ postrcu; \
} while (0)
/*
@@ -139,15 +141,25 @@ static inline void tracepoint_synchronize_unregister(void)
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
*/
-#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \
{ \
+ if (static_key_false(&__tracepoint_##name.key)) \
+ __DO_TRACE(&__tracepoint_##name, \
+ TP_PROTO(data_proto), \
+ TP_ARGS(data_args), \
+ TP_CONDITION(cond),,); \
+ } \
+ static inline void trace_##name##_rcuidle(proto) \
+ { \
if (static_branch(&__tracepoint_##name.key)) \
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
- TP_CONDITION(cond)); \
+ TP_CONDITION(cond), \
+ rcu_idle_exit(), \
+ rcu_idle_enter()); \
} \
static inline int \
register_trace_##name(void (*probe)(data_proto), void *data) \
@@ -176,7 +188,7 @@ static inline void tracepoint_synchronize_unregister(void)
__attribute__((section("__tracepoints_strings"))) = #name; \
struct tracepoint __tracepoint_##name \
__attribute__((section("__tracepoints"))) = \
- { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\
+ { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\
static struct tracepoint * const __tracepoint_ptr_##name __used \
__attribute__((section("__tracepoints_ptrs"))) = \
&__tracepoint_##name;
@@ -190,9 +202,11 @@ static inline void tracepoint_synchronize_unregister(void)
EXPORT_SYMBOL(__tracepoint_##name)
#else /* !CONFIG_TRACEPOINTS */
-#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
static inline void trace_##name(proto) \
{ } \
+ static inline void trace_##name##_rcuidle(proto) \
+ { } \
static inline int \
register_trace_##name(void (*probe)(data_proto), \
void *data) \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 5dbb3cb05a82..a91ff403b3bf 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -52,6 +52,7 @@
* hardcoded at present.)
*/
#define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */
+#define NR_UNIX98_PTY_RESERVE 1024 /* Default reserve for main devpts */
#define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */
/*
@@ -480,10 +481,11 @@ extern void free_tty_struct(struct tty_struct *tty);
extern void initialize_tty_struct(struct tty_struct *tty,
struct tty_driver *driver, int idx);
extern void deinitialize_tty_struct(struct tty_struct *tty);
-extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx,
- int first_ok);
+extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
extern int tty_release(struct inode *inode, struct file *filp);
extern int tty_init_termios(struct tty_struct *tty);
+extern int tty_standard_install(struct tty_driver *driver,
+ struct tty_struct *tty);
extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty);
extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 5cf685086dd3..6e6dbb7447b6 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -50,6 +50,8 @@
* Note that tty_shutdown() is not called if ops->shutdown is defined.
* This means one is responsible to take care of calling ops->remove (e.g.
* via tty_driver_remove_tty) and releasing tty->termios.
+ * Note that this hook may be called from *all* the contexts where one
+ * uses tty refcounting (e.g. tty_port_tty_get).
*
*
* void (*cleanup)(struct tty_struct * tty);
@@ -234,6 +236,7 @@
* if provided (otherwise EINVAL will be returned).
*/
+#include <linux/export.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/cdev.h>
@@ -298,7 +301,6 @@ struct tty_driver {
int name_base; /* offset of printed name */
int major; /* major device number */
int minor_start; /* start of minor device number */
- int minor_num; /* number of *possible* devices */
int num; /* number of devices allocated */
short type; /* type of tty driver */
short subtype; /* subtype of tty driver */
@@ -324,7 +326,7 @@ struct tty_driver {
extern struct list_head tty_drivers;
-extern struct tty_driver *alloc_tty_driver(int lines);
+extern struct tty_driver *__alloc_tty_driver(int lines, struct module *owner);
extern void put_tty_driver(struct tty_driver *driver);
extern void tty_set_operations(struct tty_driver *driver,
const struct tty_operations *op);
@@ -332,6 +334,8 @@ extern struct tty_driver *tty_find_polling_driver(char *name, int *line);
extern void tty_driver_kref_put(struct tty_driver *driver);
+#define alloc_tty_driver(lines) __alloc_tty_driver(lines, THIS_MODULE)
+
static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d)
{
kref_get(&d->kref);
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index c2164fad0083..e33d77f15bda 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -167,4 +167,30 @@ extern int unregister_vt_notifier(struct notifier_block *nb);
extern void hide_boot_cursor(bool hide);
+/* keyboard provided interfaces */
+extern int vt_do_diacrit(unsigned int cmd, void __user *up, int eperm);
+extern int vt_do_kdskbmode(int console, unsigned int arg);
+extern int vt_do_kdskbmeta(int console, unsigned int arg);
+extern int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc,
+ int perm);
+extern int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe,
+ int perm, int console);
+extern int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb,
+ int perm);
+extern int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm);
+extern int vt_do_kdgkbmode(int console);
+extern int vt_do_kdgkbmeta(int console);
+extern void vt_reset_unicode(int console);
+extern int vt_get_shift_state(void);
+extern void vt_reset_keyboard(int console);
+extern int vt_get_leds(int console, int flag);
+extern int vt_get_kbd_mode_bit(int console, int bit);
+extern void vt_set_kbd_mode_bit(int console, int bit);
+extern void vt_clr_kbd_mode_bit(int console, int bit);
+extern void vt_set_led_state(int console, int leds);
+extern void vt_set_led_state(int console, int leds);
+extern void vt_kbd_con_start(int console);
+extern void vt_kbd_con_stop(int console);
+
+
#endif /* _VT_KERN_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a9ce45e8501c..7d9a9e990ce6 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -157,7 +157,7 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
void *key);
-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
+void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_bit(wait_queue_head_t *, void *, int);
int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
@@ -170,7 +170,8 @@ wait_queue_head_t *bit_waitqueue(void *, int);
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
-#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL)
+#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
+#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index eb8b9f15f2e0..af155450cabb 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -289,12 +289,16 @@ enum {
*
* system_freezable_wq is equivalent to system_wq except that it's
* freezable.
+ *
+ * system_nrt_freezable_wq is equivalent to system_nrt_wq except that
+ * it's freezable.
*/
extern struct workqueue_struct *system_wq;
extern struct workqueue_struct *system_long_wq;
extern struct workqueue_struct *system_nrt_wq;
extern struct workqueue_struct *system_unbound_wq;
extern struct workqueue_struct *system_freezable_wq;
+extern struct workqueue_struct *system_nrt_freezable_wq;
extern struct workqueue_struct *
__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
diff --git a/include/net/sock.h b/include/net/sock.h
index 91c1c8baf020..dcde2d9268cd 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -55,7 +55,7 @@
#include <linux/uaccess.h>
#include <linux/memcontrol.h>
#include <linux/res_counter.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
@@ -924,13 +924,13 @@ inline void sk_refcnt_debug_release(const struct sock *sk)
#endif /* SOCK_REFCNT_DEBUG */
#if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET)
-extern struct jump_label_key memcg_socket_limit_enabled;
+extern struct static_key memcg_socket_limit_enabled;
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
struct cg_proto *cg_proto)
{
return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
}
-#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled)
+#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled)
#else
#define mem_cgroup_sockets_enabled 0
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 1bcc2a8c00e2..14b38940062b 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -151,6 +151,8 @@ enum {
events get removed */
static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {};
static inline void trace_power_end(u64 cpuid) {};
+static inline void trace_power_start_rcuidle(u64 type, u64 state, u64 cpuid) {};
+static inline void trace_power_end_rcuidle(u64 cpuid) {};
static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {};
#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
diff --git a/include/trace/events/printk.h b/include/trace/events/printk.h
new file mode 100644
index 000000000000..94ec79cc011a
--- /dev/null
+++ b/include/trace/events/printk.h
@@ -0,0 +1,41 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM printk
+
+#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PRINTK_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT_CONDITION(console,
+ TP_PROTO(const char *log_buf, unsigned start, unsigned end,
+ unsigned log_buf_len),
+
+ TP_ARGS(log_buf, start, end, log_buf_len),
+
+ TP_CONDITION(start != end),
+
+ TP_STRUCT__entry(
+ __dynamic_array(char, msg, end - start + 1)
+ ),
+
+ TP_fast_assign(
+ if ((start & (log_buf_len - 1)) > (end & (log_buf_len - 1))) {
+ memcpy(__get_dynamic_array(msg),
+ log_buf + (start & (log_buf_len - 1)),
+ log_buf_len - (start & (log_buf_len - 1)));
+ memcpy((char *)__get_dynamic_array(msg) +
+ log_buf_len - (start & (log_buf_len - 1)),
+ log_buf, end & (log_buf_len - 1));
+ } else
+ memcpy(__get_dynamic_array(msg),
+ log_buf + (start & (log_buf_len - 1)),
+ end - start);
+ ((char *)__get_dynamic_array(msg))[end - start] = 0;
+ ),
+
+ TP_printk("%s", __get_str(msg))
+);
+#endif /* _TRACE_PRINTK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index d2d88bed891b..337099783f37 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -313,19 +313,22 @@ TRACE_EVENT(rcu_prep_idle,
/*
* Tracepoint for the registration of a single RCU callback function.
* The first argument is the type of RCU, the second argument is
- * a pointer to the RCU callback itself, and the third element is the
- * new RCU callback queue length for the current CPU.
+ * a pointer to the RCU callback itself, the third element is the
+ * number of lazy callbacks queued, and the fourth element is the
+ * total number of callbacks queued.
*/
TRACE_EVENT(rcu_callback,
- TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen),
+ TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
+ long qlen),
- TP_ARGS(rcuname, rhp, qlen),
+ TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
TP_STRUCT__entry(
__field(char *, rcuname)
__field(void *, rhp)
__field(void *, func)
+ __field(long, qlen_lazy)
__field(long, qlen)
),
@@ -333,11 +336,13 @@ TRACE_EVENT(rcu_callback,
__entry->rcuname = rcuname;
__entry->rhp = rhp;
__entry->func = rhp->func;
+ __entry->qlen_lazy = qlen_lazy;
__entry->qlen = qlen;
),
- TP_printk("%s rhp=%p func=%pf %ld",
- __entry->rcuname, __entry->rhp, __entry->func, __entry->qlen)
+ TP_printk("%s rhp=%p func=%pf %ld/%ld",
+ __entry->rcuname, __entry->rhp, __entry->func,
+ __entry->qlen_lazy, __entry->qlen)
);
/*
@@ -345,20 +350,21 @@ TRACE_EVENT(rcu_callback,
* kfree() form. The first argument is the RCU type, the second argument
* is a pointer to the RCU callback, the third argument is the offset
* of the callback within the enclosing RCU-protected data structure,
- * and the fourth argument is the new RCU callback queue length for the
- * current CPU.
+ * the fourth argument is the number of lazy callbacks queued, and the
+ * fifth argument is the total number of callbacks queued.
*/
TRACE_EVENT(rcu_kfree_callback,
TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
- long qlen),
+ long qlen_lazy, long qlen),
- TP_ARGS(rcuname, rhp, offset, qlen),
+ TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
TP_STRUCT__entry(
__field(char *, rcuname)
__field(void *, rhp)
__field(unsigned long, offset)
+ __field(long, qlen_lazy)
__field(long, qlen)
),
@@ -366,41 +372,45 @@ TRACE_EVENT(rcu_kfree_callback,
__entry->rcuname = rcuname;
__entry->rhp = rhp;
__entry->offset = offset;
+ __entry->qlen_lazy = qlen_lazy;
__entry->qlen = qlen;
),
- TP_printk("%s rhp=%p func=%ld %ld",
+ TP_printk("%s rhp=%p func=%ld %ld/%ld",
__entry->rcuname, __entry->rhp, __entry->offset,
- __entry->qlen)
+ __entry->qlen_lazy, __entry->qlen)
);
/*
* Tracepoint for marking the beginning rcu_do_batch, performed to start
* RCU callback invocation. The first argument is the RCU flavor,
- * the second is the total number of callbacks (including those that
- * are not yet ready to be invoked), and the third argument is the
- * current RCU-callback batch limit.
+ * the second is the number of lazy callbacks queued, the third is
+ * the total number of callbacks queued, and the fourth argument is
+ * the current RCU-callback batch limit.
*/
TRACE_EVENT(rcu_batch_start,
- TP_PROTO(char *rcuname, long qlen, int blimit),
+ TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
- TP_ARGS(rcuname, qlen, blimit),
+ TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
TP_STRUCT__entry(
__field(char *, rcuname)
+ __field(long, qlen_lazy)
__field(long, qlen)
__field(int, blimit)
),
TP_fast_assign(
__entry->rcuname = rcuname;
+ __entry->qlen_lazy = qlen_lazy;
__entry->qlen = qlen;
__entry->blimit = blimit;
),
- TP_printk("%s CBs=%ld bl=%d",
- __entry->rcuname, __entry->qlen, __entry->blimit)
+ TP_printk("%s CBs=%ld/%ld bl=%d",
+ __entry->rcuname, __entry->qlen_lazy, __entry->qlen,
+ __entry->blimit)
);
/*
@@ -531,16 +541,21 @@ TRACE_EVENT(rcu_torture_read,
#else /* #ifdef CONFIG_RCU_TRACE */
#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, qsmask) do { } while (0)
+#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+ qsmask) do { } while (0)
#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
-#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0)
+#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
+ grplo, grphi, gp_tasks) do { } \
+ while (0)
#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
#define trace_rcu_prep_idle(reason) do { } while (0)
-#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
-#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
-#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
+#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
+#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
+ do { } while (0)
+#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
+ do { } while (0)
#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index e33ed1bfa113..fbc7b1ad929b 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/tracepoint.h>
+#include <linux/binfmts.h>
/*
* Tracepoint for calling kthread_stop, performed to end a kthread:
@@ -276,6 +277,32 @@ TRACE_EVENT(sched_process_fork,
);
/*
+ * Tracepoint for exec:
+ */
+TRACE_EVENT(sched_process_exec,
+
+ TP_PROTO(struct task_struct *p, pid_t old_pid,
+ struct linux_binprm *bprm),
+
+ TP_ARGS(p, old_pid, bprm),
+
+ TP_STRUCT__entry(
+ __string( filename, bprm->filename )
+ __field( pid_t, pid )
+ __field( pid_t, old_pid )
+ ),
+
+ TP_fast_assign(
+ __assign_str(filename, bprm->filename);
+ __entry->pid = p->pid;
+ __entry->old_pid = p->pid;
+ ),
+
+ TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
+ __entry->pid, __entry->old_pid)
+);
+
+/*
* XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
* adding sched_stat support to SCHED_FIFO/RR would be welcome.
*/
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
index 17df43464df0..39a8a430d90f 100644
--- a/include/trace/events/signal.h
+++ b/include/trace/events/signal.h
@@ -23,11 +23,23 @@
} \
} while (0)
+#ifndef TRACE_HEADER_MULTI_READ
+enum {
+ TRACE_SIGNAL_DELIVERED,
+ TRACE_SIGNAL_IGNORED,
+ TRACE_SIGNAL_ALREADY_PENDING,
+ TRACE_SIGNAL_OVERFLOW_FAIL,
+ TRACE_SIGNAL_LOSE_INFO,
+};
+#endif
+
/**
* signal_generate - called when a signal is generated
* @sig: signal number
* @info: pointer to struct siginfo
* @task: pointer to struct task_struct
+ * @group: shared or private
+ * @result: TRACE_SIGNAL_*
*
* Current process sends a 'sig' signal to 'task' process with
* 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV,
@@ -37,9 +49,10 @@
*/
TRACE_EVENT(signal_generate,
- TP_PROTO(int sig, struct siginfo *info, struct task_struct *task),
+ TP_PROTO(int sig, struct siginfo *info, struct task_struct *task,
+ int group, int result),
- TP_ARGS(sig, info, task),
+ TP_ARGS(sig, info, task, group, result),
TP_STRUCT__entry(
__field( int, sig )
@@ -47,6 +60,8 @@ TRACE_EVENT(signal_generate,
__field( int, code )
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
+ __field( int, group )
+ __field( int, result )
),
TP_fast_assign(
@@ -54,11 +69,14 @@ TRACE_EVENT(signal_generate,
TP_STORE_SIGINFO(__entry, info);
memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
__entry->pid = task->pid;
+ __entry->group = group;
+ __entry->result = result;
),
- TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d",
+ TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d grp=%d res=%d",
__entry->sig, __entry->errno, __entry->code,
- __entry->comm, __entry->pid)
+ __entry->comm, __entry->pid, __entry->group,
+ __entry->result)
);
/**
@@ -101,65 +119,6 @@ TRACE_EVENT(signal_deliver,
__entry->sa_handler, __entry->sa_flags)
);
-DECLARE_EVENT_CLASS(signal_queue_overflow,
-
- TP_PROTO(int sig, int group, struct siginfo *info),
-
- TP_ARGS(sig, group, info),
-
- TP_STRUCT__entry(
- __field( int, sig )
- __field( int, group )
- __field( int, errno )
- __field( int, code )
- ),
-
- TP_fast_assign(
- __entry->sig = sig;
- __entry->group = group;
- TP_STORE_SIGINFO(__entry, info);
- ),
-
- TP_printk("sig=%d group=%d errno=%d code=%d",
- __entry->sig, __entry->group, __entry->errno, __entry->code)
-);
-
-/**
- * signal_overflow_fail - called when signal queue is overflow
- * @sig: signal number
- * @group: signal to process group or not (bool)
- * @info: pointer to struct siginfo
- *
- * Kernel fails to generate 'sig' signal with 'info' siginfo, because
- * siginfo queue is overflow, and the signal is dropped.
- * 'group' is not 0 if the signal will be sent to a process group.
- * 'sig' is always one of RT signals.
- */
-DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
-
- TP_PROTO(int sig, int group, struct siginfo *info),
-
- TP_ARGS(sig, group, info)
-);
-
-/**
- * signal_lose_info - called when siginfo is lost
- * @sig: signal number
- * @group: signal to process group or not (bool)
- * @info: pointer to struct siginfo
- *
- * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo
- * queue is overflow.
- * 'group' is not 0 if the signal will be sent to a process group.
- * 'sig' is always one of non-RT signals.
- */
-DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
-
- TP_PROTO(int sig, int group, struct siginfo *info),
-
- TP_ARGS(sig, group, info)
-);
-
#endif /* _TRACE_SIGNAL_H */
/* This part must be outside protection */