aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/bpf/Makefile3
-rw-r--r--kernel/bpf/btf.c16
-rw-r--r--kernel/bpf/cgroup.c27
-rw-r--r--kernel/bpf/devmap.c339
-rw-r--r--kernel/bpf/inode.c92
-rw-r--r--kernel/bpf/syscall.c21
-rw-r--r--kernel/bpf/sysfs_btf.c46
-rw-r--r--kernel/bpf/verifier.c11
-rw-r--r--kernel/bpf/xskmap.c133
-rw-r--r--kernel/cgroup/cgroup-v1.c27
-rw-r--r--kernel/cgroup/cgroup.c4
-rw-r--r--kernel/debug/debug_core.c36
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/dma/Kconfig12
-rw-r--r--kernel/dma/coherent.c13
-rw-r--r--kernel/dma/mapping.c113
-rw-r--r--kernel/dma/remap.c51
-rw-r--r--kernel/dma/swiotlb.c3
-rw-r--r--kernel/elfcore.c1
-rw-r--r--kernel/events/uprobes.c81
-rw-r--r--kernel/extable.c11
-rw-r--r--kernel/fork.c17
-rw-r--r--kernel/gcov/Kconfig2
-rw-r--r--kernel/irq/pm.c20
-rw-r--r--kernel/kexec_core.c2
-rw-r--r--kernel/kprobes.c56
-rw-r--r--kernel/livepatch/core.c1
-rw-r--r--kernel/module.c76
-rw-r--r--kernel/padata.c307
-rw-r--r--kernel/panic.c42
-rw-r--r--kernel/power/autosleep.c2
-rw-r--r--kernel/power/main.c99
-rw-r--r--kernel/power/qos.c48
-rw-r--r--kernel/power/suspend.c65
-rw-r--r--kernel/power/wakelock.c32
-rw-r--r--kernel/printk/braille.c15
-rw-r--r--kernel/printk/printk.c24
-rw-r--r--kernel/resource.c49
-rw-r--r--kernel/sched/core.c19
-rw-r--r--kernel/sched/cpufreq_schedutil.c7
-rw-r--r--kernel/sched/idle.c8
-rw-r--r--kernel/sysctl.c6
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/trace/Kconfig3
-rw-r--r--kernel/trace/fgraph.c4
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/trace.c14
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_dynevent.c10
-rw-r--r--kernel/trace/trace_dynevent.h7
-rw-r--r--kernel/trace/trace_events_hist.c25
-rw-r--r--kernel/trace/trace_kprobe.c269
-rw-r--r--kernel/trace/trace_output.c6
-rw-r--r--kernel/trace/trace_probe.c178
-rw-r--r--kernel/trace/trace_probe.h68
-rw-r--r--kernel/trace/trace_stack.c112
-rw-r--r--kernel/trace/trace_uprobe.c300
-rw-r--r--kernel/workqueue.c25
59 files changed, 2144 insertions, 827 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 48c5376d290a..25f9d83d1bbf 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -127,7 +127,7 @@ $(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
$(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
quiet_cmd_genikh = CHK $(obj)/kheaders_data.tar.xz
-cmd_genikh = $(CONFIG_SHELL) $(srctree)/kernel/gen_kheaders.sh $@
+ cmd_genikh = $(BASH) $(srctree)/kernel/gen_kheaders.sh $@
$(obj)/kheaders_data.tar.xz: FORCE
$(call cmd,genikh)
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 29d781061cd5..e1d9adb212f9 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -22,3 +22,6 @@ obj-$(CONFIG_CGROUP_BPF) += cgroup.o
ifeq ($(CONFIG_INET),y)
obj-$(CONFIG_BPF_SYSCALL) += reuseport_array.o
endif
+ifeq ($(CONFIG_SYSFS),y)
+obj-$(CONFIG_DEBUG_INFO_BTF) += sysfs_btf.o
+endif
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 5fcc7a17eb5a..adb3adcebe3c 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -195,8 +195,8 @@
i < btf_type_vlen(struct_type); \
i++, member++)
-static DEFINE_IDR(btf_idr);
-static DEFINE_SPINLOCK(btf_idr_lock);
+DEFINE_IDR(btf_idr);
+DEFINE_SPINLOCK(btf_idr_lock);
struct btf {
void *data;
@@ -3376,6 +3376,15 @@ void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
}
+#ifdef CONFIG_PROC_FS
+static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
+{
+ const struct btf *btf = filp->private_data;
+
+ seq_printf(m, "btf_id:\t%u\n", btf->id);
+}
+#endif
+
static int btf_release(struct inode *inode, struct file *filp)
{
btf_put(filp->private_data);
@@ -3383,6 +3392,9 @@ static int btf_release(struct inode *inode, struct file *filp)
}
const struct file_operations btf_fops = {
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = bpf_btf_show_fdinfo,
+#endif
.release = btf_release,
};
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 0a00eaca6fae..ddd8addcdb5c 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -964,7 +964,6 @@ static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
return -ENOMEM;
ctx->optval_end = ctx->optval + max_optlen;
- ctx->optlen = max_optlen;
return 0;
}
@@ -984,7 +983,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
.level = *level,
.optname = *optname,
};
- int ret;
+ int ret, max_optlen;
/* Opportunistic check to see whether we have any BPF program
* attached to the hook so we don't waste time allocating
@@ -994,10 +993,18 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
return 0;
- ret = sockopt_alloc_buf(&ctx, *optlen);
+ /* Allocate a bit more than the initial user buffer for
+ * BPF program. The canonical use case is overriding
+ * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
+ */
+ max_optlen = max_t(int, 16, *optlen);
+
+ ret = sockopt_alloc_buf(&ctx, max_optlen);
if (ret)
return ret;
+ ctx.optlen = *optlen;
+
if (copy_from_user(ctx.optval, optval, *optlen) != 0) {
ret = -EFAULT;
goto out;
@@ -1016,7 +1023,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
if (ctx.optlen == -1) {
/* optlen set to -1, bypass kernel */
ret = 1;
- } else if (ctx.optlen > *optlen || ctx.optlen < -1) {
+ } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
/* optlen is out of bounds */
ret = -EFAULT;
} else {
@@ -1063,6 +1070,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
if (ret)
return ret;
+ ctx.optlen = max_optlen;
+
if (!retval) {
/* If kernel getsockopt finished successfully,
* copy whatever was returned to the user back
@@ -1325,6 +1334,7 @@ static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
struct bpf_prog *prog, u32 *target_size)
{
struct bpf_insn *insn = insn_buf;
+ u32 read_size;
switch (si->off) {
case offsetof(struct bpf_sysctl, write):
@@ -1356,7 +1366,9 @@ static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
treg, si->dst_reg,
offsetof(struct bpf_sysctl_kern, ppos));
*insn++ = BPF_STX_MEM(
- BPF_SIZEOF(u32), treg, si->src_reg, 0);
+ BPF_SIZEOF(u32), treg, si->src_reg,
+ bpf_ctx_narrow_access_offset(
+ 0, sizeof(u32), sizeof(loff_t)));
*insn++ = BPF_LDX_MEM(
BPF_DW, treg, si->dst_reg,
offsetof(struct bpf_sysctl_kern, tmp_reg));
@@ -1365,8 +1377,11 @@ static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
si->dst_reg, si->src_reg,
offsetof(struct bpf_sysctl_kern, ppos));
+ read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
*insn++ = BPF_LDX_MEM(
- BPF_SIZE(si->code), si->dst_reg, si->dst_reg, 0);
+ BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
+ bpf_ctx_narrow_access_offset(
+ 0, read_size, sizeof(loff_t)));
}
*target_size = sizeof(u32);
break;
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index d83cf8ccc872..d27f3b60ff6d 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -37,6 +37,12 @@
* notifier hook walks the map we know that new dev references can not be
* added by the user because core infrastructure ensures dev_get_by_index()
* calls will fail at this point.
+ *
+ * The devmap_hash type is a map type which interprets keys as ifindexes and
+ * indexes these using a hashmap. This allows maps that use ifindex as key to be
+ * densely packed instead of having holes in the lookup array for unused
+ * ifindexes. The setup and packet enqueue/send code is shared between the two
+ * types of devmap; only the lookup and insertion is different.
*/
#include <linux/bpf.h>
#include <net/xdp.h>
@@ -59,10 +65,11 @@ struct xdp_bulk_queue {
struct bpf_dtab_netdev {
struct net_device *dev; /* must be first member, due to tracepoint */
+ struct hlist_node index_hlist;
struct bpf_dtab *dtab;
- unsigned int bit;
struct xdp_bulk_queue __percpu *bulkq;
struct rcu_head rcu;
+ unsigned int idx; /* keep track of map index for tracepoint */
};
struct bpf_dtab {
@@ -70,33 +77,45 @@ struct bpf_dtab {
struct bpf_dtab_netdev **netdev_map;
struct list_head __percpu *flush_list;
struct list_head list;
+
+ /* these are only used for DEVMAP_HASH type maps */
+ struct hlist_head *dev_index_head;
+ spinlock_t index_lock;
+ unsigned int items;
+ u32 n_buckets;
};
static DEFINE_SPINLOCK(dev_map_lock);
static LIST_HEAD(dev_map_list);
-static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
+static struct hlist_head *dev_map_create_hash(unsigned int entries)
+{
+ int i;
+ struct hlist_head *hash;
+
+ hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL);
+ if (hash != NULL)
+ for (i = 0; i < entries; i++)
+ INIT_HLIST_HEAD(&hash[i]);
+
+ return hash;
+}
+
+static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
{
- struct bpf_dtab *dtab;
int err, cpu;
u64 cost;
- if (!capable(CAP_NET_ADMIN))
- return ERR_PTR(-EPERM);
-
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
/* Lookup returns a pointer straight to dev->ifindex, so make sure the
* verifier prevents writes from the BPF side
*/
attr->map_flags |= BPF_F_RDONLY_PROG;
- dtab = kzalloc(sizeof(*dtab), GFP_USER);
- if (!dtab)
- return ERR_PTR(-ENOMEM);
bpf_map_init_from_attr(&dtab->map, attr);
@@ -104,12 +123,18 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
cost += sizeof(struct list_head) * num_possible_cpus();
+ if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+ dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
+
+ if (!dtab->n_buckets) /* Overflow check */
+ return -EINVAL;
+ cost += sizeof(struct hlist_head) * dtab->n_buckets;
+ }
+
/* if map size is larger than memlock limit, reject it */
err = bpf_map_charge_init(&dtab->map.memory, cost);
if (err)
- goto free_dtab;
-
- err = -ENOMEM;
+ return -EINVAL;
dtab->flush_list = alloc_percpu(struct list_head);
if (!dtab->flush_list)
@@ -124,19 +149,48 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
if (!dtab->netdev_map)
goto free_percpu;
- spin_lock(&dev_map_lock);
- list_add_tail_rcu(&dtab->list, &dev_map_list);
- spin_unlock(&dev_map_lock);
+ if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+ dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
+ if (!dtab->dev_index_head)
+ goto free_map_area;
- return &dtab->map;
+ spin_lock_init(&dtab->index_lock);
+ }
+ return 0;
+
+free_map_area:
+ bpf_map_area_free(dtab->netdev_map);
free_percpu:
free_percpu(dtab->flush_list);
free_charge:
bpf_map_charge_finish(&dtab->map.memory);
-free_dtab:
- kfree(dtab);
- return ERR_PTR(err);
+ return -ENOMEM;
+}
+
+static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
+{
+ struct bpf_dtab *dtab;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return ERR_PTR(-EPERM);
+
+ dtab = kzalloc(sizeof(*dtab), GFP_USER);
+ if (!dtab)
+ return ERR_PTR(-ENOMEM);
+
+ err = dev_map_init_map(dtab, attr);
+ if (err) {
+ kfree(dtab);
+ return ERR_PTR(err);
+ }
+
+ spin_lock(&dev_map_lock);
+ list_add_tail_rcu(&dtab->list, &dev_map_list);
+ spin_unlock(&dev_map_lock);
+
+ return &dtab->map;
}
static void dev_map_free(struct bpf_map *map)
@@ -188,6 +242,7 @@ static void dev_map_free(struct bpf_map *map)
free_percpu(dtab->flush_list);
bpf_map_area_free(dtab->netdev_map);
+ kfree(dtab->dev_index_head);
kfree(dtab);
}
@@ -208,6 +263,70 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return 0;
}
+static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
+ int idx)
+{
+ return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
+}
+
+struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
+{
+ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ struct hlist_head *head = dev_map_index_hash(dtab, key);
+ struct bpf_dtab_netdev *dev;
+
+ hlist_for_each_entry_rcu(dev, head, index_hlist)
+ if (dev->idx == key)
+ return dev;
+
+ return NULL;
+}
+
+static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
+ void *next_key)
+{
+ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ u32 idx, *next = next_key;
+ struct bpf_dtab_netdev *dev, *next_dev;
+ struct hlist_head *head;
+ int i = 0;
+
+ if (!key)
+ goto find_first;
+
+ idx = *(u32 *)key;
+
+ dev = __dev_map_hash_lookup_elem(map, idx);
+ if (!dev)
+ goto find_first;
+
+ next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
+ struct bpf_dtab_netdev, index_hlist);
+
+ if (next_dev) {
+ *next = next_dev->idx;
+ return 0;
+ }
+
+ i = idx & (dtab->n_buckets - 1);
+ i++;
+
+ find_first:
+ for (; i < dtab->n_buckets; i++) {
+ head = dev_map_index_hash(dtab, i);
+
+ next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
+ struct bpf_dtab_netdev,
+ index_hlist);
+ if (next_dev) {
+ *next = next_dev->idx;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,
bool in_napi_ctx)
{
@@ -235,7 +354,7 @@ static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,
out:
bq->count = 0;
- trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit,
+ trace_xdp_devmap_xmit(&obj->dtab->map, obj->idx,
sent, drops, bq->dev_rx, dev, err);
bq->dev_rx = NULL;
__list_del_clearprev(&bq->flush_node);
@@ -363,6 +482,15 @@ static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
return dev ? &dev->ifindex : NULL;
}
+static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
+ *(u32 *)key);
+ struct net_device *dev = obj ? obj->dev : NULL;
+
+ return dev ? &dev->ifindex : NULL;
+}
+
static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
{
if (dev->dev->netdev_ops->ndo_xdp_xmit) {
@@ -412,17 +540,74 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key)
return 0;
}
-static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
- u64 map_flags)
+static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
- struct net *net = current->nsproxy->net_ns;
+ struct bpf_dtab_netdev *old_dev;
+ int k = *(u32 *)key;
+ unsigned long flags;
+ int ret = -ENOENT;
+
+ spin_lock_irqsave(&dtab->index_lock, flags);
+
+ old_dev = __dev_map_hash_lookup_elem(map, k);
+ if (old_dev) {
+ dtab->items--;
+ hlist_del_init_rcu(&old_dev->index_hlist);
+ call_rcu(&old_dev->rcu, __dev_map_entry_free);
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&dtab->index_lock, flags);
+
+ return ret;
+}
+
+static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
+ struct bpf_dtab *dtab,
+ u32 ifindex,
+ unsigned int idx)
+{
gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+ struct bpf_dtab_netdev *dev;
+ struct xdp_bulk_queue *bq;
+ int cpu;
+
+ dev = kmalloc_node(sizeof(*dev), gfp, dtab->map.numa_node);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
+ sizeof(void *), gfp);
+ if (!dev->bulkq) {
+ kfree(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_possible_cpu(cpu) {
+ bq = per_cpu_ptr(dev->bulkq, cpu);
+ bq->obj = dev;
+ }
+
+ dev->dev = dev_get_by_index(net, ifindex);
+ if (!dev->dev) {
+ free_percpu(dev->bulkq);
+ kfree(dev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ dev->idx = idx;
+ dev->dtab = dtab;
+
+ return dev;
+}
+
+static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
+ void *key, void *value, u64 map_flags)
+{
+ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *dev, *old_dev;
u32 ifindex = *(u32 *)value;
- struct xdp_bulk_queue *bq;
u32 i = *(u32 *)key;
- int cpu;
if (unlikely(map_flags > BPF_EXIST))
return -EINVAL;
@@ -434,31 +619,9 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
if (!ifindex) {
dev = NULL;
} else {
- dev = kmalloc_node(sizeof(*dev), gfp, map->numa_node);
- if (!dev)
- return -ENOMEM;
-
- dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
- sizeof(void *), gfp);
- if (!dev->bulkq) {
- kfree(dev);
- return -ENOMEM;
- }
-
- for_each_possible_cpu(cpu) {
- bq = per_cpu_ptr(dev->bulkq, cpu);
- bq->obj = dev;
- }
-
- dev->dev = dev_get_by_index(net, ifindex);
- if (!dev->dev) {
- free_percpu(dev->bulkq);
- kfree(dev);
- return -EINVAL;
- }
-
- dev->bit = i;
- dev->dtab = dtab;
+ dev = __dev_map_alloc_node(net, dtab, ifindex, i);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
}
/* Use call_rcu() here to ensure rcu critical sections have completed
@@ -472,6 +635,70 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
return 0;
}
+static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
+ u64 map_flags)
+{
+ return __dev_map_update_elem(current->nsproxy->net_ns,
+ map, key, value, map_flags);
+}
+
+static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
+ void *key, void *value, u64 map_flags)
+{
+ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ struct bpf_dtab_netdev *dev, *old_dev;
+ u32 ifindex = *(u32 *)value;
+ u32 idx = *(u32 *)key;
+ unsigned long flags;
+ int err = -EEXIST;
+
+ if (unlikely(map_flags > BPF_EXIST || !ifindex))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dtab->index_lock, flags);
+
+ old_dev = __dev_map_hash_lookup_elem(map, idx);
+ if (old_dev && (map_flags & BPF_NOEXIST))
+ goto out_err;
+
+ dev = __dev_map_alloc_node(net, dtab, ifindex, idx);
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ goto out_err;
+ }
+
+ if (old_dev) {
+ hlist_del_rcu(&old_dev->index_hlist);
+ } else {
+ if (dtab->items >= dtab->map.max_entries) {
+ spin_unlock_irqrestore(&dtab->index_lock, flags);
+ call_rcu(&dev->rcu, __dev_map_entry_free);
+ return -E2BIG;
+ }
+ dtab->items++;
+ }
+
+ hlist_add_head_rcu(&dev->index_hlist,
+ dev_map_index_hash(dtab, idx));
+ spin_unlock_irqrestore(&dtab->index_lock, flags);
+
+ if (old_dev)
+ call_rcu(&old_dev->rcu, __dev_map_entry_free);
+
+ return 0;
+
+out_err:
+ spin_unlock_irqrestore(&dtab->index_lock, flags);
+ return err;
+}
+
+static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
+ u64 map_flags)
+{
+ return __dev_map_hash_update_elem(current->nsproxy->net_ns,
+ map, key, value, map_flags);
+}
+
const struct bpf_map_ops dev_map_ops = {
.map_alloc = dev_map_alloc,
.map_free = dev_map_free,
@@ -482,6 +709,16 @@ const struct bpf_map_ops dev_map_ops = {
.map_check_btf = map_check_no_btf,
};
+const struct bpf_map_ops dev_map_hash_ops = {
+ .map_alloc = dev_map_alloc,
+ .map_free = dev_map_free,
+ .map_get_next_key = dev_map_hash_get_next_key,
+ .map_lookup_elem = dev_map_hash_lookup_elem,
+ .map_update_elem = dev_map_hash_update_elem,
+ .map_delete_elem = dev_map_hash_delete_elem,
+ .map_check_btf = map_check_no_btf,
+};
+
static int dev_map_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index cc0d0cf114e3..a70f7209cda3 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -14,8 +14,9 @@
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/kdev_t.h>
-#include <linux/parser.h>
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
@@ -583,58 +584,52 @@ static const struct super_operations bpf_super_ops = {
enum {
OPT_MODE,
- OPT_ERR,
};
-static const match_table_t bpf_mount_tokens = {
- { OPT_MODE, "mode=%o" },
- { OPT_ERR, NULL },
+static const struct fs_parameter_spec bpf_param_specs[] = {
+ fsparam_u32oct ("mode", OPT_MODE),
+ {}
+};
+
+static const struct fs_parameter_description bpf_fs_parameters = {
+ .name = "bpf",
+ .specs = bpf_param_specs,
};
struct bpf_mount_opts {
umode_t mode;
};
-static int bpf_parse_options(char *data, struct bpf_mount_opts *opts)
+static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- substring_t args[MAX_OPT_ARGS];
- int option, token;
- char *ptr;
+ struct bpf_mount_opts *opts = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
- opts->mode = S_IRWXUGO;
-
- while ((ptr = strsep(&data, ",")) != NULL) {
- if (!*ptr)
- continue;
-
- token = match_token(ptr, bpf_mount_tokens, args);
- switch (token) {
- case OPT_MODE:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->mode = option & S_IALLUGO;
- break;
+ opt = fs_parse(fc, &bpf_fs_parameters, param, &result);
+ if (opt < 0)
/* We might like to report bad mount options here, but
* traditionally we've ignored all mount options, so we'd
* better continue to ignore non-existing options for bpf.
*/
- }
+ return opt == -ENOPARAM ? 0 : opt;
+
+ switch (opt) {
+ case OPT_MODE:
+ opts->mode = result.uint_32 & S_IALLUGO;
+ break;
}
return 0;
}
-static int bpf_fill_super(struct super_block *sb, void *data, int silent)
+static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
{
static const struct tree_descr bpf_rfiles[] = { { "" } };
- struct bpf_mount_opts opts;
+ struct bpf_mount_opts *opts = fc->fs_private;
struct inode *inode;
int ret;
- ret = bpf_parse_options(data, &opts);
- if (ret)
- return ret;
-
ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
if (ret)
return ret;
@@ -644,21 +639,50 @@ static int bpf_fill_super(struct super_block *sb, void *data, int silent)
inode = sb->s_root->d_inode;
inode->i_op = &bpf_dir_iops;
inode->i_mode &= ~S_IALLUGO;
- inode->i_mode |= S_ISVTX | opts.mode;
+ inode->i_mode |= S_ISVTX | opts->mode;
return 0;
}
-static struct dentry *bpf_mount(struct file_system_type *type, int flags,
- const char *dev_name, void *data)
+static int bpf_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, bpf_fill_super);
+}
+
+static void bpf_free_fc(struct fs_context *fc)
{
- return mount_nodev(type, flags, data, bpf_fill_super);
+ kfree(fc->fs_private);
+}
+
+static const struct fs_context_operations bpf_context_ops = {
+ .free = bpf_free_fc,
+ .parse_param = bpf_parse_param,
+ .get_tree = bpf_get_tree,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+static int bpf_init_fs_context(struct fs_context *fc)
+{
+ struct bpf_mount_opts *opts;
+
+ opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ opts->mode = S_IRWXUGO;
+
+ fc->fs_private = opts;
+ fc->ops = &bpf_context_ops;
+ return 0;
}
static struct file_system_type bpf_fs_type = {
.owner = THIS_MODULE,
.name = "bpf",
- .mount = bpf_mount,
+ .init_fs_context = bpf_init_fs_context,
+ .parameters = &bpf_fs_parameters,
.kill_sb = kill_litter_super,
};
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 272071e9112f..82eabd4e38ad 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -683,8 +683,8 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
}
/* map_idr_lock should have been held */
-static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
- bool uref)
+static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map,
+ bool uref)
{
int refold;
@@ -704,6 +704,16 @@ static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
return map;
}
+struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
+{
+ spin_lock_bh(&map_idr_lock);
+ map = __bpf_map_inc_not_zero(map, uref);
+ spin_unlock_bh(&map_idr_lock);
+
+ return map;
+}
+EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
+
int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
{
return -ENOTSUPP;
@@ -1619,6 +1629,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
BPF_F_ANY_ALIGNMENT |
+ BPF_F_TEST_STATE_FREQ |
BPF_F_TEST_RND_HI32))
return -EINVAL;
@@ -2183,7 +2194,7 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
spin_lock_bh(&map_idr_lock);
map = idr_find(&map_idr, id);
if (map)
- map = bpf_map_inc_not_zero(map, true);
+ map = __bpf_map_inc_not_zero(map, true);
else
map = ERR_PTR(-ENOENT);
spin_unlock_bh(&map_idr_lock);
@@ -2880,6 +2891,10 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
err = bpf_obj_get_next_id(&attr, uattr,
&map_idr, &map_idr_lock);
break;
+ case BPF_BTF_GET_NEXT_ID:
+ err = bpf_obj_get_next_id(&attr, uattr,
+ &btf_idr, &btf_idr_lock);
+ break;
case BPF_PROG_GET_FD_BY_ID:
err = bpf_prog_get_fd_by_id(&attr);
break;
diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c
new file mode 100644
index 000000000000..7ae5dddd1fe6
--- /dev/null
+++ b/kernel/bpf/sysfs_btf.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Provide kernel BTF information for introspection and use by eBPF tools.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kobject.h>
+#include <linux/init.h>
+#include <linux/sysfs.h>
+
+/* See scripts/link-vmlinux.sh, gen_btf() func for details */
+extern char __weak _binary__btf_vmlinux_bin_start[];
+extern char __weak _binary__btf_vmlinux_bin_end[];
+
+static ssize_t
+btf_vmlinux_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t len)
+{
+ memcpy(buf, _binary__btf_vmlinux_bin_start + off, len);
+ return len;
+}
+
+static struct bin_attribute bin_attr_btf_vmlinux __ro_after_init = {
+ .attr = { .name = "vmlinux", .mode = 0444, },
+ .read = btf_vmlinux_read,
+};
+
+static struct kobject *btf_kobj;
+
+static int __init btf_vmlinux_init(void)
+{
+ if (!_binary__btf_vmlinux_bin_start)
+ return 0;
+
+ btf_kobj = kobject_create_and_add("btf", kernel_kobj);
+ if (!btf_kobj)
+ return -ENOMEM;
+
+ bin_attr_btf_vmlinux.size = _binary__btf_vmlinux_bin_end -
+ _binary__btf_vmlinux_bin_start;
+
+ return sysfs_create_bin_file(btf_kobj, &bin_attr_btf_vmlinux);
+}
+
+subsys_initcall(btf_vmlinux_init);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c36a719fee6d..ffc3e53f5300 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3463,6 +3463,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
goto error;
break;
case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_DEVMAP_HASH:
if (func_id != BPF_FUNC_redirect_map &&
func_id != BPF_FUNC_map_lookup_elem)
goto error;
@@ -3545,6 +3546,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
break;
case BPF_FUNC_redirect_map:
if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
+ map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
map->map_type != BPF_MAP_TYPE_CPUMAP &&
map->map_type != BPF_MAP_TYPE_XSKMAP)
goto error;
@@ -7226,7 +7228,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
struct bpf_verifier_state_list *sl, **pprev;
struct bpf_verifier_state *cur = env->cur_state, *new;
int i, j, err, states_cnt = 0;
- bool add_new_state = false;
+ bool add_new_state = env->test_state_freq ? true : false;
cur->last_insn_idx = env->prev_insn_idx;
if (!env->insn_aux_data[insn_idx].prune_point)
@@ -8622,8 +8624,8 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
}
if (is_narrower_load && size < target_size) {
- u8 shift = bpf_ctx_narrow_load_shift(off, size,
- size_default);
+ u8 shift = bpf_ctx_narrow_access_offset(
+ off, size, size_default) * 8;
if (ctx_field_size <= 4) {
if (shift)
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
@@ -9266,6 +9268,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
env->allow_ptr_leaks = is_priv;
+ if (is_priv)
+ env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
+
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;
diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index 9bb96ace9fa1..942c662e2eed 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -13,8 +13,71 @@ struct xsk_map {
struct bpf_map map;
struct xdp_sock **xsk_map;
struct list_head __percpu *flush_list;
+ spinlock_t lock; /* Synchronize map updates */
};
+int xsk_map_inc(struct xsk_map *map)
+{
+ struct bpf_map *m = &map->map;
+
+ m = bpf_map_inc(m, false);
+ return PTR_ERR_OR_ZERO(m);
+}
+
+void xsk_map_put(struct xsk_map *map)
+{
+ bpf_map_put(&map->map);
+}
+
+static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
+ struct xdp_sock **map_entry)
+{
+ struct xsk_map_node *node;
+ int err;
+
+ node = kzalloc(sizeof(*node), GFP_ATOMIC | __GFP_NOWARN);
+ if (!node)
+ return NULL;
+
+ err = xsk_map_inc(map);
+ if (err) {
+ kfree(node);
+ return ERR_PTR(err);
+ }
+
+ node->map = map;
+ node->map_entry = map_entry;
+ return node;
+}
+
+static void xsk_map_node_free(struct xsk_map_node *node)
+{
+ xsk_map_put(node->map);
+ kfree(node);
+}
+
+static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
+{
+ spin_lock_bh(&xs->map_list_lock);
+ list_add_tail(&node->node, &xs->map_list);
+ spin_unlock_bh(&xs->map_list_lock);
+}
+
+static void xsk_map_sock_delete(struct xdp_sock *xs,
+ struct xdp_sock **map_entry)
+{
+ struct xsk_map_node *n, *tmp;
+
+ spin_lock_bh(&xs->map_list_lock);
+ list_for_each_entry_safe(n, tmp, &xs->map_list, node) {
+ if (map_entry == n->map_entry) {
+ list_del(&n->node);
+ xsk_map_node_free(n);
+ }
+ }
+ spin_unlock_bh(&xs->map_list_lock);
+}
+
static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
{
struct xsk_map *m;
@@ -34,6 +97,7 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
return ERR_PTR(-ENOMEM);
bpf_map_init_from_attr(&m->map, attr);
+ spin_lock_init(&m->lock);
cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *);
cost += sizeof(struct list_head) * num_possible_cpus();
@@ -71,21 +135,9 @@ free_m:
static void xsk_map_free(struct bpf_map *map)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
- int i;
bpf_clear_redirect_map(map);
synchronize_net();
-
- for (i = 0; i < map->max_entries; i++) {
- struct xdp_sock *xs;
-
- xs = m->xsk_map[i];
- if (!xs)
- continue;
-
- sock_put((struct sock *)xs);
- }
-
free_percpu(m->flush_list);
bpf_map_area_free(m->xsk_map);
kfree(m);
@@ -164,8 +216,9 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
u64 map_flags)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
+ struct xdp_sock *xs, *old_xs, **map_entry;
u32 i = *(u32 *)key, fd = *(u32 *)value;
- struct xdp_sock *xs, *old_xs;
+ struct xsk_map_node *node;
struct socket *sock;
int err;
@@ -173,8 +226,6 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
return -EINVAL;
if (unlikely(i >= m->map.max_entries))
return -E2BIG;
- if (unlikely(map_flags == BPF_NOEXIST))
- return -EEXIST;
sock = sockfd_lookup(fd, &err);
if (!sock)
@@ -192,32 +243,70 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
return -EOPNOTSUPP;
}
- sock_hold(sock->sk);
+ map_entry = &m->xsk_map[i];
+ node = xsk_map_node_alloc(m, map_entry);
+ if (IS_ERR(node)) {
+ sockfd_put(sock);
+ return PTR_ERR(node);
+ }
- old_xs = xchg(&m->xsk_map[i], xs);
+ spin_lock_bh(&m->lock);
+ old_xs = READ_ONCE(*map_entry);
+ if (old_xs == xs) {
+ err = 0;
+ goto out;
+ } else if (old_xs && map_flags == BPF_NOEXIST) {
+ err = -EEXIST;
+ goto out;
+ } else if (!old_xs && map_flags == BPF_EXIST) {
+ err = -ENOENT;
+ goto out;
+ }
+ xsk_map_sock_add(xs, node);
+ WRITE_ONCE(*map_entry, xs);
if (old_xs)
- sock_put((struct sock *)old_xs);
-
+ xsk_map_sock_delete(old_xs, map_entry);
+ spin_unlock_bh(&m->lock);
sockfd_put(sock);
return 0;
+
+out:
+ spin_unlock_bh(&m->lock);
+ sockfd_put(sock);
+ xsk_map_node_free(node);
+ return err;
}
static int xsk_map_delete_elem(struct bpf_map *map, void *key)
{
struct xsk_map *m = container_of(map, struct xsk_map, map);
- struct xdp_sock *old_xs;
+ struct xdp_sock *old_xs, **map_entry;
int k = *(u32 *)key;
if (k >= map->max_entries)
return -EINVAL;
- old_xs = xchg(&m->xsk_map[k], NULL);
+ spin_lock_bh(&m->lock);
+ map_entry = &m->xsk_map[k];
+ old_xs = xchg(map_entry, NULL);
if (old_xs)
- sock_put((struct sock *)old_xs);
+ xsk_map_sock_delete(old_xs, map_entry);
+ spin_unlock_bh(&m->lock);
return 0;
}
+void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
+ struct xdp_sock **map_entry)
+{
+ spin_lock_bh(&map->lock);
+ if (READ_ONCE(*map_entry) == xs) {
+ WRITE_ONCE(*map_entry, NULL);
+ xsk_map_sock_delete(xs, map_entry);
+ }
+ spin_unlock_bh(&map->lock);
+}
+
const struct bpf_map_ops xsk_map_ops = {
.map_alloc = xsk_map_alloc,
.map_free = xsk_map_free,
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 88006be40ea3..7f83f4121d8d 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -194,25 +194,6 @@ struct cgroup_pidlist {
};
/*
- * The following two functions "fix" the issue where there are more pids
- * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
- * TODO: replace with a kernel-wide solution to this problem
- */
-#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
-static void *pidlist_allocate(int count)
-{
- if (PIDLIST_TOO_LARGE(count))
- return vmalloc(array_size(count, sizeof(pid_t)));
- else
- return kmalloc_array(count, sizeof(pid_t), GFP_KERNEL);
-}
-
-static void pidlist_free(void *p)
-{
- kvfree(p);
-}
-
-/*
* Used to destroy all pidlists lingering waiting for destroy timer. None
* should be left afterwards.
*/
@@ -244,7 +225,7 @@ static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
*/
if (!delayed_work_pending(dwork)) {
list_del(&l->links);
- pidlist_free(l->list);
+ kvfree(l->list);
put_pid_ns(l->key.ns);
tofree = l;
}
@@ -365,7 +346,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
* show up until sometime later on.
*/
length = cgroup_task_count(cgrp);
- array = pidlist_allocate(length);
+ array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
if (!array)
return -ENOMEM;
/* now, populate the array */
@@ -390,12 +371,12 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
l = cgroup_pidlist_find_create(cgrp, type);
if (!l) {
- pidlist_free(array);
+ kvfree(array);
return -ENOMEM;
}
/* store array, freeing old if necessary */
- pidlist_free(l->list);
+ kvfree(l->list);
l->list = array;
l->length = length;
*lp = l;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index a7ce73a2c401..080561bb8a4b 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -488,7 +488,7 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
rcu_read_lock();
css = cgroup_css(cgrp, ss);
- if (!css || !css_tryget_online(css))
+ if (css && !css_tryget_online(css))
css = NULL;
rcu_read_unlock();
@@ -2894,7 +2894,7 @@ static void cgroup_print_ss_mask(struct seq_file *seq, u16 ss_mask)
do_each_subsys_mask(ss, ssid, ss_mask) {
if (printed)
seq_putc(seq, ' ');
- seq_printf(seq, "%s", ss->name);
+ seq_puts(seq, ss->name);
printed = true;
} while_each_subsys_mask();
if (printed)
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 5cc608de6883..f76d6f77dd5e 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -787,11 +787,8 @@ out:
}
/*
- * GDB places a breakpoint at this function to know dynamically
- * loaded objects. It's not defined static so that only one instance with this
- * name exists in the kernel.
+ * GDB places a breakpoint at this function to know dynamically loaded objects.
*/
-
static int module_event(struct notifier_block *self, unsigned long val,
void *data)
{
@@ -896,30 +893,25 @@ static struct sysrq_key_op sysrq_dbg_op = {
};
#endif
-static int kgdb_panic_event(struct notifier_block *self,
- unsigned long val,
- void *data)
+void kgdb_panic(const char *msg)
{
+ if (!kgdb_io_module_registered)
+ return;
+
/*
- * Avoid entering the debugger if we were triggered due to a panic
- * We don't want to get stuck waiting for input from user in such case.
- * panic_timeout indicates the system should automatically
+ * We don't want to get stuck waiting for input from user if
+ * "panic_timeout" indicates the system should automatically
* reboot on panic.
*/
if (panic_timeout)
- return NOTIFY_DONE;
+ return;
if (dbg_kdb_mode)
- kdb_printf("PANIC: %s\n", (char *)data);
+ kdb_printf("PANIC: %s\n", msg);
+
kgdb_breakpoint();
- return NOTIFY_DONE;
}
-static struct notifier_block kgdb_panic_event_nb = {
- .notifier_call = kgdb_panic_event,
- .priority = INT_MAX,
-};
-
void __weak kgdb_arch_late(void)
{
}
@@ -968,8 +960,6 @@ static void kgdb_register_callbacks(void)
kgdb_arch_late();
register_module_notifier(&dbg_module_load_nb);
register_reboot_notifier(&dbg_reboot_notifier);
- atomic_notifier_chain_register(&panic_notifier_list,
- &kgdb_panic_event_nb);
#ifdef CONFIG_MAGIC_SYSRQ
register_sysrq_key('g', &sysrq_dbg_op);
#endif
@@ -983,16 +973,14 @@ static void kgdb_register_callbacks(void)
static void kgdb_unregister_callbacks(void)
{
/*
- * When this routine is called KGDB should unregister from the
- * panic handler and clean up, making sure it is not handling any
+ * When this routine is called KGDB should unregister from
+ * handlers and clean up, making sure it is not handling any
* break exceptions at the time.
*/
if (kgdb_io_module_registered) {
kgdb_io_module_registered = 0;
unregister_reboot_notifier(&dbg_reboot_notifier);
unregister_module_notifier(&dbg_module_load_nb);
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &kgdb_panic_event_nb);
kgdb_arch_exit();
#ifdef CONFIG_MAGIC_SYSRQ
unregister_sysrq_key('g', &sysrq_dbg_op);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 9ecfa37c7fbf..4567fe998c30 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -830,7 +830,7 @@ static void parse_grep(const char *str)
cp++;
while (isspace(*cp))
cp++;
- if (strncmp(cp, "grep ", 5)) {
+ if (!str_has_prefix(cp, "grep ")) {
kdb_printf("invalid 'pipe', see grephelp\n");
return;
}
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 9decbba255fc..73c5c2b8e824 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -20,6 +20,15 @@ config ARCH_HAS_DMA_COHERENCE_H
config ARCH_HAS_DMA_SET_MASK
bool
+#
+# Select this option if the architecture needs special handling for
+# DMA_ATTR_WRITE_COMBINE. Normally the "uncached" mapping should be what
+# people thing of when saying write combine, so very few platforms should
+# need to enable this.
+#
+config ARCH_HAS_DMA_WRITE_COMBINE
+ bool
+
config DMA_DECLARE_COHERENT
bool
@@ -45,9 +54,6 @@ config ARCH_HAS_DMA_PREP_COHERENT
config ARCH_HAS_DMA_COHERENT_TO_PFN
bool
-config ARCH_HAS_DMA_MMAP_PGPROT
- bool
-
config ARCH_HAS_FORCE_DMA_UNENCRYPTED
bool
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 29fd6590dc1e..545e3869b0e3 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -122,18 +122,6 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_release_coherent_memory(mem);
return ret;
}
-EXPORT_SYMBOL(dma_declare_coherent_memory);
-
-void dma_release_declared_memory(struct device *dev)
-{
- struct dma_coherent_mem *mem = dev->dma_mem;
-
- if (!mem)
- return;
- dma_release_coherent_memory(mem);
- dev->dma_mem = NULL;
-}
-EXPORT_SYMBOL(dma_release_declared_memory);
static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
ssize_t size, dma_addr_t *dma_handle)
@@ -288,7 +276,6 @@ int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
}
-EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
size_t size, int *ret)
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index b0038ca3aa92..d9334f31a5af 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -136,17 +136,29 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
return ret;
}
+/*
+ * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
+ * that the intention is to allow exporting memory allocated via the
+ * coherent DMA APIs through the dma_buf API, which only accepts a
+ * scattertable. This presents a couple of problems:
+ * 1. Not all memory allocated via the coherent DMA APIs is backed by
+ * a struct page
+ * 2. Passing coherent DMA memory into the streaming APIs is not allowed
+ * as we will try to flush the memory through a different alias to that
+ * actually being used (and the flushes are redundant.)
+ */
int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
- if (!dma_is_direct(ops) && ops->get_sgtable)
- return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
- attrs);
- return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
- attrs);
+ if (dma_is_direct(ops))
+ return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr,
+ size, attrs);
+ if (!ops->get_sgtable)
+ return -ENXIO;
+ return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
}
EXPORT_SYMBOL(dma_get_sgtable_attrs);
@@ -161,9 +173,11 @@ pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
(IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
(attrs & DMA_ATTR_NON_CONSISTENT)))
return prot;
- if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT))
- return arch_dma_mmap_pgprot(dev, prot, attrs);
- return pgprot_noncached(prot);
+#ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
+ if (attrs & DMA_ATTR_WRITE_COMBINE)
+ return pgprot_writecombine(prot);
+#endif
+ return pgprot_dmacoherent(prot);
}
#endif /* CONFIG_MMU */
@@ -174,7 +188,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
-#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
+#ifdef CONFIG_MMU
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
@@ -205,8 +219,29 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
user_count << PAGE_SHIFT, vma->vm_page_prot);
#else
return -ENXIO;
-#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
+#endif /* CONFIG_MMU */
+}
+
+/**
+ * dma_can_mmap - check if a given device supports dma_mmap_*
+ * @dev: device to check
+ *
+ * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
+ * map DMA allocations to userspace.
+ */
+bool dma_can_mmap(struct device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (dma_is_direct(ops)) {
+ return IS_ENABLED(CONFIG_MMU) &&
+ (dev_is_dma_coherent(dev) ||
+ IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN));
+ }
+
+ return ops->mmap != NULL;
}
+EXPORT_SYMBOL_GPL(dma_can_mmap);
/**
* dma_mmap_attrs - map a coherent DMA allocation into user space
@@ -227,31 +262,15 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
{
const struct dma_map_ops *ops = get_dma_ops(dev);
- if (!dma_is_direct(ops) && ops->mmap)
- return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+ if (dma_is_direct(ops))
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size,
+ attrs);
+ if (!ops->mmap)
+ return -ENXIO;
+ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}
EXPORT_SYMBOL(dma_mmap_attrs);
-static u64 dma_default_get_required_mask(struct device *dev)
-{
- u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
- u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
- u64 mask;
-
- if (!high_totalram) {
- /* convert to mask just covering totalram */
- low_totalram = (1 << (fls(low_totalram) - 1));
- low_totalram += low_totalram - 1;
- mask = low_totalram;
- } else {
- high_totalram = (1 << (fls(high_totalram) - 1));
- high_totalram += high_totalram - 1;
- mask = (((u64)high_totalram) << 32) + 0xffffffff;
- }
- return mask;
-}
-
u64 dma_get_required_mask(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
@@ -260,7 +279,16 @@ u64 dma_get_required_mask(struct device *dev)
return dma_direct_get_required_mask(dev);
if (ops->get_required_mask)
return ops->get_required_mask(dev);
- return dma_default_get_required_mask(dev);
+
+ /*
+ * We require every DMA ops implementation to at least support a 32-bit
+ * DMA mask (and use bounce buffering if that isn't supported in
+ * hardware). As the direct mapping code has its own routine to
+ * actually report an optimal mask we default to 32-bit here as that
+ * is the right thing for most IOMMUs, and at least not actively
+ * harmful in general.
+ */
+ return DMA_BIT_MASK(32);
}
EXPORT_SYMBOL_GPL(dma_get_required_mask);
@@ -317,12 +345,6 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
EXPORT_SYMBOL(dma_free_attrs);
-static inline void dma_check_mask(struct device *dev, u64 mask)
-{
- if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
- dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
-}
-
int dma_supported(struct device *dev, u64 mask)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
@@ -353,7 +375,6 @@ int dma_set_mask(struct device *dev, u64 mask)
return -EIO;
arch_dma_set_mask(dev, mask);
- dma_check_mask(dev, mask);
*dev->dma_mask = mask;
return 0;
}
@@ -371,7 +392,6 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
if (!dma_supported(dev, mask))
return -EIO;
- dma_check_mask(dev, mask);
dev->coherent_dma_mask = mask;
return 0;
}
@@ -405,3 +425,14 @@ size_t dma_max_mapping_size(struct device *dev)
return size;
}
EXPORT_SYMBOL_GPL(dma_max_mapping_size);
+
+unsigned long dma_get_merge_boundary(struct device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (!ops || !ops->get_merge_boundary)
+ return 0; /* can't merge */
+
+ return ops->get_merge_boundary(dev);
+}
+EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index ffe78f0b2fe4..ca4e5d44b571 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -11,13 +11,21 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
+struct page **dma_common_find_pages(void *cpu_addr)
+{
+ struct vm_struct *area = find_vm_area(cpu_addr);
+
+ if (!area || area->flags != VM_DMA_COHERENT)
+ return NULL;
+ return area->pages;
+}
+
static struct vm_struct *__dma_common_pages_remap(struct page **pages,
- size_t size, unsigned long vm_flags, pgprot_t prot,
- const void *caller)
+ size_t size, pgprot_t prot, const void *caller)
{
struct vm_struct *area;
- area = get_vm_area_caller(size, vm_flags, caller);
+ area = get_vm_area_caller(size, VM_DMA_COHERENT, caller);
if (!area)
return NULL;
@@ -34,12 +42,11 @@ static struct vm_struct *__dma_common_pages_remap(struct page **pages,
* Cannot be used in non-sleeping contexts
*/
void *dma_common_pages_remap(struct page **pages, size_t size,
- unsigned long vm_flags, pgprot_t prot,
- const void *caller)
+ pgprot_t prot, const void *caller)
{
struct vm_struct *area;
- area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
+ area = __dma_common_pages_remap(pages, size, prot, caller);
if (!area)
return NULL;
@@ -53,7 +60,6 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
* Cannot be used in non-sleeping contexts
*/
void *dma_common_contiguous_remap(struct page *page, size_t size,
- unsigned long vm_flags,
pgprot_t prot, const void *caller)
{
int i;
@@ -67,7 +73,7 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
for (i = 0; i < (size >> PAGE_SHIFT); i++)
pages[i] = nth_page(page, i);
- area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
+ area = __dma_common_pages_remap(pages, size, prot, caller);
kfree(pages);
@@ -79,11 +85,11 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
/*
* Unmaps a range previously mapped by dma_common_*_remap
*/
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
+void dma_common_free_remap(void *cpu_addr, size_t size)
{
- struct vm_struct *area = find_vm_area(cpu_addr);
+ struct page **pages = dma_common_find_pages(cpu_addr);
- if (!area || (area->flags & vm_flags) != vm_flags) {
+ if (!pages) {
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
return;
}
@@ -105,7 +111,16 @@ static int __init early_coherent_pool(char *p)
}
early_param("coherent_pool", early_coherent_pool);
-int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot)
+static gfp_t dma_atomic_pool_gfp(void)
+{
+ if (IS_ENABLED(CONFIG_ZONE_DMA))
+ return GFP_DMA;
+ if (IS_ENABLED(CONFIG_ZONE_DMA32))
+ return GFP_DMA32;
+ return GFP_KERNEL;
+}
+
+static int __init dma_atomic_pool_init(void)
{
unsigned int pool_size_order = get_order(atomic_pool_size);
unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
@@ -117,7 +132,7 @@ int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot)
page = dma_alloc_from_contiguous(NULL, nr_pages,
pool_size_order, false);
else
- page = alloc_pages(gfp, pool_size_order);
+ page = alloc_pages(dma_atomic_pool_gfp(), pool_size_order);
if (!page)
goto out;
@@ -127,8 +142,9 @@ int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot)
if (!atomic_pool)
goto free_page;
- addr = dma_common_contiguous_remap(page, atomic_pool_size, VM_USERMAP,
- prot, __builtin_return_address(0));
+ addr = dma_common_contiguous_remap(page, atomic_pool_size,
+ pgprot_dmacoherent(PAGE_KERNEL),
+ __builtin_return_address(0));
if (!addr)
goto destroy_genpool;
@@ -143,7 +159,7 @@ int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot)
return 0;
remove_mapping:
- dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
+ dma_common_free_remap(addr, atomic_pool_size);
destroy_genpool:
gen_pool_destroy(atomic_pool);
atomic_pool = NULL;
@@ -155,6 +171,7 @@ out:
atomic_pool_size / 1024);
return -ENOMEM;
}
+postcore_initcall(dma_atomic_pool_init);
bool dma_in_atomic_pool(void *start, size_t size)
{
@@ -217,7 +234,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
arch_dma_prep_coherent(page, size);
/* create a coherent mapping */
- ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
+ ret = dma_common_contiguous_remap(page, size,
dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0));
if (!ret) {
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 796a44f8ef5a..673a2cdb2656 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -463,8 +463,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
if (mem_encrypt_active())
- pr_warn_once("%s is active and system is using DMA bounce buffers\n",
- sme_active() ? "SME" : "SEV");
+ pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
if (mapping_size > alloc_size) {
dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
diff --git a/kernel/elfcore.c b/kernel/elfcore.c
index fc482c8e0bd8..57fb4dcff434 100644
--- a/kernel/elfcore.c
+++ b/kernel/elfcore.c
@@ -3,6 +3,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/binfmts.h>
+#include <linux/elfcore.h>
Elf_Half __weak elf_core_extra_phdrs(void)
{
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 84fa00497c49..94d38a39d72e 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -26,6 +26,7 @@
#include <linux/percpu-rwsem.h>
#include <linux/task_work.h>
#include <linux/shmem_fs.h>
+#include <linux/khugepaged.h>
#include <linux/uprobes.h>
@@ -143,17 +144,19 @@ static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
*
* @vma: vma that holds the pte pointing to page
* @addr: address the old @page is mapped at
- * @page: the cowed page we are replacing by kpage
- * @kpage: the modified page we replace page by
+ * @old_page: the page we are replacing by new_page
+ * @new_page: the modified page we replace page by
*
- * Returns 0 on success, -EFAULT on failure.
+ * If @new_page is NULL, only unmap @old_page.
+ *
+ * Returns 0 on success, negative error code otherwise.
*/
static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
struct page *old_page, struct page *new_page)
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
- .page = old_page,
+ .page = compound_head(old_page),
.vma = vma,
.address = addr,
};
@@ -164,12 +167,12 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
addr + PAGE_SIZE);
- VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
-
- err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
- false);
- if (err)
- return err;
+ if (new_page) {
+ err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
+ &memcg, false);
+ if (err)
+ return err;
+ }
/* For try_to_free_swap() and munlock_vma_page() below */
lock_page(old_page);
@@ -177,15 +180,20 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
mmu_notifier_invalidate_range_start(&range);
err = -EAGAIN;
if (!page_vma_mapped_walk(&pvmw)) {
- mem_cgroup_cancel_charge(new_page, memcg, false);
+ if (new_page)
+ mem_cgroup_cancel_charge(new_page, memcg, false);
goto unlock;
}
VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
- get_page(new_page);
- page_add_new_anon_rmap(new_page, vma, addr, false);
- mem_cgroup_commit_charge(new_page, memcg, false, false);
- lru_cache_add_active_or_unevictable(new_page, vma);
+ if (new_page) {
+ get_page(new_page);
+ page_add_new_anon_rmap(new_page, vma, addr, false);
+ mem_cgroup_commit_charge(new_page, memcg, false, false);
+ lru_cache_add_active_or_unevictable(new_page, vma);
+ } else
+ /* no new page, just dec_mm_counter for old_page */
+ dec_mm_counter(mm, MM_ANONPAGES);
if (!PageAnon(old_page)) {
dec_mm_counter(mm, mm_counter_file(old_page));
@@ -194,8 +202,9 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
ptep_clear_flush_notify(vma, addr, pvmw.pte);
- set_pte_at_notify(mm, addr, pvmw.pte,
- mk_pte(new_page, vma->vm_page_prot));
+ if (new_page)
+ set_pte_at_notify(mm, addr, pvmw.pte,
+ mk_pte(new_page, vma->vm_page_prot));
page_remove_rmap(old_page, false);
if (!page_mapped(old_page))
@@ -464,6 +473,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
struct page *old_page, *new_page;
struct vm_area_struct *vma;
int ret, is_register, ref_ctr_updated = 0;
+ bool orig_page_huge = false;
is_register = is_swbp_insn(&opcode);
uprobe = container_of(auprobe, struct uprobe, arch);
@@ -471,7 +481,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
retry:
/* Read the page with vaddr into memory */
ret = get_user_pages_remote(NULL, mm, vaddr, 1,
- FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL);
+ FOLL_FORCE | FOLL_SPLIT_PMD, &old_page, &vma, NULL);
if (ret <= 0)
return ret;
@@ -488,6 +498,10 @@ retry:
ref_ctr_updated = 1;
}
+ ret = 0;
+ if (!is_register && !PageAnon(old_page))
+ goto put_old;
+
ret = anon_vma_prepare(vma);
if (ret)
goto put_old;
@@ -501,8 +515,33 @@ retry:
copy_highpage(new_page, old_page);
copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
+ if (!is_register) {
+ struct page *orig_page;
+ pgoff_t index;
+
+ VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
+
+ index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
+ orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
+ index);
+
+ if (orig_page) {
+ if (PageUptodate(orig_page) &&
+ pages_identical(new_page, orig_page)) {
+ /* let go new_page */
+ put_page(new_page);
+ new_page = NULL;
+
+ if (PageCompound(orig_page))
+ orig_page_huge = true;
+ }
+ put_page(orig_page);
+ }
+ }
+
ret = __replace_page(vma, vaddr, old_page, new_page);
- put_page(new_page);
+ if (new_page)
+ put_page(new_page);
put_old:
put_page(old_page);
@@ -513,6 +552,10 @@ put_old:
if (ret && is_register && ref_ctr_updated)
update_ref_ctr(uprobe, mm, -1);
+ /* try collapse pmd for compound page */
+ if (!ret && orig_page_huge)
+ collapse_pte_mapped_thp(mm, vaddr);
+
return ret;
}
diff --git a/kernel/extable.c b/kernel/extable.c
index e23cce6e6092..f6c9406eec7d 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -40,13 +40,20 @@ void __init sort_main_extable(void)
}
}
+/* Given an address, look for it in the kernel exception table */
+const
+struct exception_table_entry *search_kernel_exception_table(unsigned long addr)
+{
+ return search_extable(__start___ex_table,
+ __stop___ex_table - __start___ex_table, addr);
+}
+
/* Given an address, look for it in the exception tables. */
const struct exception_table_entry *search_exception_tables(unsigned long addr)
{
const struct exception_table_entry *e;
- e = search_extable(__start___ex_table,
- __stop___ex_table - __start___ex_table, addr);
+ e = search_kernel_exception_table(addr);
if (!e)
e = search_module_extables(addr);
return e;
diff --git a/kernel/fork.c b/kernel/fork.c
index 53e780748fe3..60763c043aa3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -125,6 +125,15 @@ int nr_threads; /* The idle threads do not count.. */
static int max_threads; /* tunable limit on nr_threads */
+#define NAMED_ARRAY_INDEX(x) [x] = __stringify(x)
+
+static const char * const resident_page_types[] = {
+ NAMED_ARRAY_INDEX(MM_FILEPAGES),
+ NAMED_ARRAY_INDEX(MM_ANONPAGES),
+ NAMED_ARRAY_INDEX(MM_SWAPENTS),
+ NAMED_ARRAY_INDEX(MM_SHMEMPAGES),
+};
+
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
@@ -645,12 +654,15 @@ static void check_mm(struct mm_struct *mm)
{
int i;
+ BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS,
+ "Please make sure 'struct resident_page_types[]' is updated as well");
+
for (i = 0; i < NR_MM_COUNTERS; i++) {
long x = atomic_long_read(&mm->rss_stat.count[i]);
if (unlikely(x))
- printk(KERN_ALERT "BUG: Bad rss-counter state "
- "mm:%p idx:%d val:%ld\n", mm, i, x);
+ pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
+ mm, resident_page_types[i], x);
}
if (mm_pgtables_bytes(mm))
@@ -1009,7 +1021,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm_init_owner(mm, p);
RCU_INIT_POINTER(mm->exe_file, NULL);
mmu_notifier_mm_init(mm);
- hmm_mm_init(mm);
init_tlb_flush_pending(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
mm->pmd_huge_pte = NULL;
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 3941a9c48f83..060e8e726755 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -4,7 +4,7 @@ menu "GCOV-based kernel profiling"
config GCOV_KERNEL
bool "Enable gcov-based kernel profiling"
depends on DEBUG_FS
- select CONSTRUCTORS if !UML
+ select CONSTRUCTORS
default n
---help---
This option enables gcov-based code profiling (e.g. for code coverage
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index d6961d3c6f9e..8f557fa1f4fe 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -177,6 +177,26 @@ static void resume_irqs(bool want_early)
}
/**
+ * rearm_wake_irq - rearm a wakeup interrupt line after signaling wakeup
+ * @irq: Interrupt to rearm
+ */
+void rearm_wake_irq(unsigned int irq)
+{
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
+
+ if (!desc || !(desc->istate & IRQS_SUSPENDED) ||
+ !irqd_is_wakeup_set(&desc->irq_data))
+ return;
+
+ desc->istate &= ~IRQS_SUSPENDED;
+ irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED);
+ __enable_irq(desc);
+
+ irq_put_desc_busunlock(desc, flags);
+}
+
+/**
* irq_pm_syscore_ops - enable interrupt lines early
*
* Enable all interrupt lines with %IRQF_EARLY_RESUME set.
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index d5870723b8ad..15d70a90b50d 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -300,6 +300,8 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
{
struct page *pages;
+ if (fatal_signal_pending(current))
+ return NULL;
pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
if (pages) {
unsigned int count, i;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1b66ccbb744a..53534aa258a6 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -962,8 +962,15 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
#ifdef CONFIG_KPROBES_ON_FTRACE
static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
.func = kprobe_ftrace_handler,
+ .flags = FTRACE_OPS_FL_SAVE_REGS,
+};
+
+static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
+ .func = kprobe_ftrace_handler,
.flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
};
+
+static int kprobe_ipmodify_enabled;
static int kprobe_ftrace_enabled;
/* Must ensure p->addr is really on ftrace */
@@ -976,58 +983,75 @@ static int prepare_kprobe(struct kprobe *p)
}
/* Caller must lock kprobe_mutex */
-static int arm_kprobe_ftrace(struct kprobe *p)
+static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
+ int *cnt)
{
int ret = 0;
- ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
- (unsigned long)p->addr, 0, 0);
+ ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
if (ret) {
pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
p->addr, ret);
return ret;
}
- if (kprobe_ftrace_enabled == 0) {
- ret = register_ftrace_function(&kprobe_ftrace_ops);
+ if (*cnt == 0) {
+ ret = register_ftrace_function(ops);
if (ret) {
pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
goto err_ftrace;
}
}
- kprobe_ftrace_enabled++;
+ (*cnt)++;
return ret;
err_ftrace:
/*
- * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a
- * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental
- * empty filter_hash which would undesirably trace all functions.
+ * At this point, sinec ops is not registered, we should be sefe from
+ * registering empty filter.
*/
- ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
+ ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
return ret;
}
+static int arm_kprobe_ftrace(struct kprobe *p)
+{
+ bool ipmodify = (p->post_handler != NULL);
+
+ return __arm_kprobe_ftrace(p,
+ ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
+ ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
+}
+
/* Caller must lock kprobe_mutex */
-static int disarm_kprobe_ftrace(struct kprobe *p)
+static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
+ int *cnt)
{
int ret = 0;
- if (kprobe_ftrace_enabled == 1) {
- ret = unregister_ftrace_function(&kprobe_ftrace_ops);
+ if (*cnt == 1) {
+ ret = unregister_ftrace_function(ops);
if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
return ret;
}
- kprobe_ftrace_enabled--;
+ (*cnt)--;
- ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
- (unsigned long)p->addr, 1, 0);
+ ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
p->addr, ret);
return ret;
}
+
+static int disarm_kprobe_ftrace(struct kprobe *p)
+{
+ bool ipmodify = (p->post_handler != NULL);
+
+ return __disarm_kprobe_ftrace(p,
+ ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
+ ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
+}
#else /* !CONFIG_KPROBES_ON_FTRACE */
#define prepare_kprobe(p) arch_prepare_kprobe(p)
#define arm_kprobe_ftrace(p) (-ENODEV)
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index c4ce08f43bd6..ab4a4606d19b 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -1175,6 +1175,7 @@ err:
pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
patch->mod->name, obj->mod->name, obj->mod->name);
mod->klp_alive = false;
+ obj->mod = NULL;
klp_cleanup_module_patches_limited(mod, patch);
mutex_unlock(&klp_mutex);
diff --git a/kernel/module.c b/kernel/module.c
index 9ee93421269c..32873bcce738 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -544,12 +544,20 @@ static const char *kernel_symbol_name(const struct kernel_symbol *sym)
#endif
}
-static int cmp_name(const void *va, const void *vb)
+static const char *kernel_symbol_namespace(const struct kernel_symbol *sym)
{
- const char *a;
- const struct kernel_symbol *b;
- a = va; b = vb;
- return strcmp(a, kernel_symbol_name(b));
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+ if (!sym->namespace_offset)
+ return NULL;
+ return offset_to_ptr(&sym->namespace_offset);
+#else
+ return sym->namespace;
+#endif
+}
+
+static int cmp_name(const void *name, const void *sym)
+{
+ return strcmp(name, kernel_symbol_name(sym));
}
static bool find_exported_symbol_in_section(const struct symsearch *syms,
@@ -1379,6 +1387,41 @@ static inline int same_magic(const char *amagic, const char *bmagic,
}
#endif /* CONFIG_MODVERSIONS */
+static char *get_modinfo(const struct load_info *info, const char *tag);
+static char *get_next_modinfo(const struct load_info *info, const char *tag,
+ char *prev);
+
+static int verify_namespace_is_imported(const struct load_info *info,
+ const struct kernel_symbol *sym,
+ struct module *mod)
+{
+ const char *namespace;
+ char *imported_namespace;
+
+ namespace = kernel_symbol_namespace(sym);
+ if (namespace) {
+ imported_namespace = get_modinfo(info, "import_ns");
+ while (imported_namespace) {
+ if (strcmp(namespace, imported_namespace) == 0)
+ return 0;
+ imported_namespace = get_next_modinfo(
+ info, "import_ns", imported_namespace);
+ }
+#ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
+ pr_warn(
+#else
+ pr_err(
+#endif
+ "%s: module uses symbol (%s) from namespace %s, but does not import it.\n",
+ mod->name, kernel_symbol_name(sym), namespace);
+#ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
+ return -EINVAL;
+#endif
+ }
+ return 0;
+}
+
+
/* Resolve a symbol for this module. I.e. if we find one, record usage. */
static const struct kernel_symbol *resolve_symbol(struct module *mod,
const struct load_info *info,
@@ -1407,6 +1450,12 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
goto getname;
}
+ err = verify_namespace_is_imported(info, sym, mod);
+ if (err) {
+ sym = ERR_PTR(err);
+ goto getname;
+ }
+
err = ref_module(mod, owner);
if (err) {
sym = ERR_PTR(err);
@@ -2481,7 +2530,8 @@ static char *next_string(char *string, unsigned long *secsize)
return string;
}
-static char *get_modinfo(struct load_info *info, const char *tag)
+static char *get_next_modinfo(const struct load_info *info, const char *tag,
+ char *prev)
{
char *p;
unsigned int taglen = strlen(tag);
@@ -2492,13 +2542,25 @@ static char *get_modinfo(struct load_info *info, const char *tag)
* get_modinfo() calls made before rewrite_section_headers()
* must use sh_offset, as sh_addr isn't set!
*/
- for (p = (char *)info->hdr + infosec->sh_offset; p; p = next_string(p, &size)) {
+ char *modinfo = (char *)info->hdr + infosec->sh_offset;
+
+ if (prev) {
+ size -= prev - modinfo;
+ modinfo = next_string(prev, &size);
+ }
+
+ for (p = modinfo; p; p = next_string(p, &size)) {
if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
return p + taglen + 1;
}
return NULL;
}
+static char *get_modinfo(const struct load_info *info, const char *tag)
+{
+ return get_next_modinfo(info, tag, NULL);
+}
+
static void setup_modinfo(struct module *mod, struct load_info *info)
{
struct module_attribute *attr;
diff --git a/kernel/padata.c b/kernel/padata.c
index 15a8ad63f4ff..c3fec1413295 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -46,18 +46,13 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
return target_cpu;
}
-static int padata_cpu_hash(struct parallel_data *pd)
+static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
{
- unsigned int seq_nr;
- int cpu_index;
-
/*
* Hash the sequence numbers to the cpus by taking
* seq_nr mod. number of cpus in use.
*/
-
- seq_nr = atomic_inc_return(&pd->seq_nr);
- cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
+ int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
return padata_index_to_cpu(pd, cpu_index);
}
@@ -94,17 +89,19 @@ static void padata_parallel_worker(struct work_struct *parallel_work)
*
* @pinst: padata instance
* @padata: object to be parallelized
- * @cb_cpu: cpu the serialization callback function will run on,
- * must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
+ * @cb_cpu: pointer to the CPU that the serialization callback function should
+ * run on. If it's not in the serial cpumask of @pinst
+ * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
+ * none found, returns -EINVAL.
*
* The parallelization callback function will run with BHs off.
* Note: Every object which is parallelized by padata_do_parallel
* must be seen by padata_do_serial.
*/
int padata_do_parallel(struct padata_instance *pinst,
- struct padata_priv *padata, int cb_cpu)
+ struct padata_priv *padata, int *cb_cpu)
{
- int target_cpu, err;
+ int i, cpu, cpu_index, target_cpu, err;
struct padata_parallel_queue *queue;
struct parallel_data *pd;
@@ -116,8 +113,19 @@ int padata_do_parallel(struct padata_instance *pinst,
if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
goto out;
- if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
- goto out;
+ if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
+ if (!cpumask_weight(pd->cpumask.cbcpu))
+ goto out;
+
+ /* Select an alternate fallback CPU and notify the caller. */
+ cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
+
+ cpu = cpumask_first(pd->cpumask.cbcpu);
+ for (i = 0; i < cpu_index; i++)
+ cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
+
+ *cb_cpu = cpu;
+ }
err = -EBUSY;
if ((pinst->flags & PADATA_RESET))
@@ -129,9 +137,10 @@ int padata_do_parallel(struct padata_instance *pinst,
err = 0;
atomic_inc(&pd->refcnt);
padata->pd = pd;
- padata->cb_cpu = cb_cpu;
+ padata->cb_cpu = *cb_cpu;
- target_cpu = padata_cpu_hash(pd);
+ padata->seq_nr = atomic_inc_return(&pd->seq_nr);
+ target_cpu = padata_cpu_hash(pd, padata->seq_nr);
padata->cpu = target_cpu;
queue = per_cpu_ptr(pd->pqueue, target_cpu);
@@ -139,7 +148,7 @@ int padata_do_parallel(struct padata_instance *pinst,
list_add_tail(&padata->list, &queue->parallel.list);
spin_unlock(&queue->parallel.lock);
- queue_work_on(target_cpu, pinst->wq, &queue->work);
+ queue_work(pinst->parallel_wq, &queue->work);
out:
rcu_read_unlock_bh();
@@ -149,63 +158,53 @@ out:
EXPORT_SYMBOL(padata_do_parallel);
/*
- * padata_get_next - Get the next object that needs serialization.
+ * padata_find_next - Find the next object that needs serialization.
*
* Return values are:
*
* A pointer to the control struct of the next object that needs
* serialization, if present in one of the percpu reorder queues.
*
- * -EINPROGRESS, if the next object that needs serialization will
+ * NULL, if the next object that needs serialization will
* be parallel processed by another cpu and is not yet present in
* the cpu's reorder queue.
- *
- * -ENODATA, if this cpu has to do the parallel processing for
- * the next object.
*/
-static struct padata_priv *padata_get_next(struct parallel_data *pd)
+static struct padata_priv *padata_find_next(struct parallel_data *pd,
+ bool remove_object)
{
- int cpu, num_cpus;
- unsigned int next_nr, next_index;
struct padata_parallel_queue *next_queue;
struct padata_priv *padata;
struct padata_list *reorder;
+ int cpu = pd->cpu;
- num_cpus = cpumask_weight(pd->cpumask.pcpu);
-
- /*
- * Calculate the percpu reorder queue and the sequence
- * number of the next object.
- */
- next_nr = pd->processed;
- next_index = next_nr % num_cpus;
- cpu = padata_index_to_cpu(pd, next_index);
next_queue = per_cpu_ptr(pd->pqueue, cpu);
-
reorder = &next_queue->reorder;
spin_lock(&reorder->lock);
- if (!list_empty(&reorder->list)) {
- padata = list_entry(reorder->list.next,
- struct padata_priv, list);
-
- list_del_init(&padata->list);
- atomic_dec(&pd->reorder_objects);
+ if (list_empty(&reorder->list)) {
+ spin_unlock(&reorder->lock);
+ return NULL;
+ }
- pd->processed++;
+ padata = list_entry(reorder->list.next, struct padata_priv, list);
+ /*
+ * Checks the rare case where two or more parallel jobs have hashed to
+ * the same CPU and one of the later ones finishes first.
+ */
+ if (padata->seq_nr != pd->processed) {
spin_unlock(&reorder->lock);
- goto out;
+ return NULL;
}
- spin_unlock(&reorder->lock);
- if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
- padata = ERR_PTR(-ENODATA);
- goto out;
+ if (remove_object) {
+ list_del_init(&padata->list);
+ atomic_dec(&pd->reorder_objects);
+ ++pd->processed;
+ pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
}
- padata = ERR_PTR(-EINPROGRESS);
-out:
+ spin_unlock(&reorder->lock);
return padata;
}
@@ -215,6 +214,7 @@ static void padata_reorder(struct parallel_data *pd)
struct padata_priv *padata;
struct padata_serial_queue *squeue;
struct padata_instance *pinst = pd->pinst;
+ struct padata_parallel_queue *next_queue;
/*
* We need to ensure that only one cpu can work on dequeueing of
@@ -230,27 +230,16 @@ static void padata_reorder(struct parallel_data *pd)
return;
while (1) {
- padata = padata_get_next(pd);
+ padata = padata_find_next(pd, true);
/*
* If the next object that needs serialization is parallel
* processed by another cpu and is still on it's way to the
* cpu's reorder queue, nothing to do for now.
*/
- if (PTR_ERR(padata) == -EINPROGRESS)
+ if (!padata)
break;
- /*
- * This cpu has to do the parallel processing of the next
- * object. It's waiting in the cpu's parallelization queue,
- * so exit immediately.
- */
- if (PTR_ERR(padata) == -ENODATA) {
- del_timer(&pd->timer);
- spin_unlock_bh(&pd->lock);
- return;
- }
-
cb_cpu = padata->cb_cpu;
squeue = per_cpu_ptr(pd->squeue, cb_cpu);
@@ -258,77 +247,37 @@ static void padata_reorder(struct parallel_data *pd)
list_add_tail(&padata->list, &squeue->serial.list);
spin_unlock(&squeue->serial.lock);
- queue_work_on(cb_cpu, pinst->wq, &squeue->work);
+ queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
}
spin_unlock_bh(&pd->lock);
/*
* The next object that needs serialization might have arrived to
- * the reorder queues in the meantime, we will be called again
- * from the timer function if no one else cares for it.
+ * the reorder queues in the meantime.
*
- * Ensure reorder_objects is read after pd->lock is dropped so we see
- * an increment from another task in padata_do_serial. Pairs with
+ * Ensure reorder queue is read after pd->lock is dropped so we see
+ * new objects from another task in padata_do_serial. Pairs with
* smp_mb__after_atomic in padata_do_serial.
*/
smp_mb();
- if (atomic_read(&pd->reorder_objects)
- && !(pinst->flags & PADATA_RESET))
- mod_timer(&pd->timer, jiffies + HZ);
- else
- del_timer(&pd->timer);
- return;
+ next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
+ if (!list_empty(&next_queue->reorder.list) &&
+ padata_find_next(pd, false))
+ queue_work(pinst->serial_wq, &pd->reorder_work);
}
static void invoke_padata_reorder(struct work_struct *work)
{
- struct padata_parallel_queue *pqueue;
struct parallel_data *pd;
local_bh_disable();
- pqueue = container_of(work, struct padata_parallel_queue, reorder_work);
- pd = pqueue->pd;
+ pd = container_of(work, struct parallel_data, reorder_work);
padata_reorder(pd);
local_bh_enable();
}
-static void padata_reorder_timer(struct timer_list *t)
-{
- struct parallel_data *pd = from_timer(pd, t, timer);
- unsigned int weight;
- int target_cpu, cpu;
-
- cpu = get_cpu();
-
- /* We don't lock pd here to not interfere with parallel processing
- * padata_reorder() calls on other CPUs. We just need any CPU out of
- * the cpumask.pcpu set. It would be nice if it's the right one but
- * it doesn't matter if we're off to the next one by using an outdated
- * pd->processed value.
- */
- weight = cpumask_weight(pd->cpumask.pcpu);
- target_cpu = padata_index_to_cpu(pd, pd->processed % weight);
-
- /* ensure to call the reorder callback on the correct CPU */
- if (cpu != target_cpu) {
- struct padata_parallel_queue *pqueue;
- struct padata_instance *pinst;
-
- /* The timer function is serialized wrt itself -- no locking
- * needed.
- */
- pinst = pd->pinst;
- pqueue = per_cpu_ptr(pd->pqueue, target_cpu);
- queue_work_on(target_cpu, pinst->wq, &pqueue->reorder_work);
- } else {
- padata_reorder(pd);
- }
-
- put_cpu();
-}
-
static void padata_serial_worker(struct work_struct *serial_work)
{
struct padata_serial_queue *squeue;
@@ -367,47 +316,28 @@ static void padata_serial_worker(struct work_struct *serial_work)
*/
void padata_do_serial(struct padata_priv *padata)
{
- int cpu;
- struct padata_parallel_queue *pqueue;
- struct parallel_data *pd;
- int reorder_via_wq = 0;
-
- pd = padata->pd;
-
- cpu = get_cpu();
-
- /* We need to run on the same CPU padata_do_parallel(.., padata, ..)
- * was called on -- or, at least, enqueue the padata object into the
- * correct per-cpu queue.
- */
- if (cpu != padata->cpu) {
- reorder_via_wq = 1;
- cpu = padata->cpu;
- }
-
- pqueue = per_cpu_ptr(pd->pqueue, cpu);
+ struct parallel_data *pd = padata->pd;
+ struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
+ padata->cpu);
+ struct padata_priv *cur;
spin_lock(&pqueue->reorder.lock);
+ /* Sort in ascending order of sequence number. */
+ list_for_each_entry_reverse(cur, &pqueue->reorder.list, list)
+ if (cur->seq_nr < padata->seq_nr)
+ break;
+ list_add(&padata->list, &cur->list);
atomic_inc(&pd->reorder_objects);
- list_add_tail(&padata->list, &pqueue->reorder.list);
spin_unlock(&pqueue->reorder.lock);
/*
- * Ensure the atomic_inc of reorder_objects above is ordered correctly
+ * Ensure the addition to the reorder list is ordered correctly
* with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
* in padata_reorder.
*/
smp_mb__after_atomic();
- put_cpu();
-
- /* If we're running on the wrong CPU, call padata_reorder() via a
- * kernel worker.
- */
- if (reorder_via_wq)
- queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work);
- else
- padata_reorder(pd);
+ padata_reorder(pd);
}
EXPORT_SYMBOL(padata_do_serial);
@@ -415,17 +345,36 @@ static int padata_setup_cpumasks(struct parallel_data *pd,
const struct cpumask *pcpumask,
const struct cpumask *cbcpumask)
{
- if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
- return -ENOMEM;
+ struct workqueue_attrs *attrs;
+ int err = -ENOMEM;
+ if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
+ goto out;
cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
- if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
- free_cpumask_var(pd->cpumask.pcpu);
- return -ENOMEM;
- }
+ if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
+ goto free_pcpu_mask;
cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
+
+ attrs = alloc_workqueue_attrs();
+ if (!attrs)
+ goto free_cbcpu_mask;
+
+ /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
+ cpumask_copy(attrs->cpumask, pd->cpumask.pcpu);
+ err = apply_workqueue_attrs(pd->pinst->parallel_wq, attrs);
+ free_workqueue_attrs(attrs);
+ if (err < 0)
+ goto free_cbcpu_mask;
+
return 0;
+
+free_cbcpu_mask:
+ free_cpumask_var(pd->cpumask.cbcpu);
+free_pcpu_mask:
+ free_cpumask_var(pd->cpumask.pcpu);
+out:
+ return err;
}
static void __padata_list_init(struct padata_list *pd_list)
@@ -451,26 +400,15 @@ static void padata_init_squeues(struct parallel_data *pd)
/* Initialize all percpu queues used by parallel workers */
static void padata_init_pqueues(struct parallel_data *pd)
{
- int cpu_index, cpu;
+ int cpu;
struct padata_parallel_queue *pqueue;
- cpu_index = 0;
- for_each_possible_cpu(cpu) {
+ for_each_cpu(cpu, pd->cpumask.pcpu) {
pqueue = per_cpu_ptr(pd->pqueue, cpu);
- if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) {
- pqueue->cpu_index = -1;
- continue;
- }
-
- pqueue->pd = pd;
- pqueue->cpu_index = cpu_index;
- cpu_index++;
-
__padata_list_init(&pqueue->reorder);
__padata_list_init(&pqueue->parallel);
INIT_WORK(&pqueue->work, padata_parallel_worker);
- INIT_WORK(&pqueue->reorder_work, invoke_padata_reorder);
atomic_set(&pqueue->num_obj, 0);
}
}
@@ -493,17 +431,19 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
pd->squeue = alloc_percpu(struct padata_serial_queue);
if (!pd->squeue)
goto err_free_pqueue;
+
+ pd->pinst = pinst;
if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
goto err_free_squeue;
padata_init_pqueues(pd);
padata_init_squeues(pd);
- timer_setup(&pd->timer, padata_reorder_timer, 0);
atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0);
atomic_set(&pd->refcnt, 0);
- pd->pinst = pinst;
spin_lock_init(&pd->lock);
+ pd->cpu = cpumask_first(pd->cpumask.pcpu);
+ INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
return pd;
@@ -538,8 +478,6 @@ static void padata_flush_queues(struct parallel_data *pd)
flush_work(&pqueue->work);
}
- del_timer_sync(&pd->timer);
-
if (atomic_read(&pd->reorder_objects))
padata_reorder(pd);
@@ -883,6 +821,8 @@ static void __padata_free(struct padata_instance *pinst)
padata_free_pd(pinst->pd);
free_cpumask_var(pinst->cpumask.pcpu);
free_cpumask_var(pinst->cpumask.cbcpu);
+ destroy_workqueue(pinst->serial_wq);
+ destroy_workqueue(pinst->parallel_wq);
kfree(pinst);
}
@@ -1016,13 +956,11 @@ static struct kobj_type padata_attr_type = {
* padata_alloc - allocate and initialize a padata instance and specify
* cpumasks for serial and parallel workers.
*
- * @wq: workqueue to use for the allocated padata instance
+ * @name: used to identify the instance
* @pcpumask: cpumask that will be used for padata parallelization
* @cbcpumask: cpumask that will be used for padata serialization
- *
- * Must be called from a cpus_read_lock() protected region
*/
-static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+static struct padata_instance *padata_alloc(const char *name,
const struct cpumask *pcpumask,
const struct cpumask *cbcpumask)
{
@@ -1033,11 +971,23 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
if (!pinst)
goto err;
- if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
+ pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
+ name);
+ if (!pinst->parallel_wq)
goto err_free_inst;
+
+ get_online_cpus();
+
+ pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
+ WQ_CPU_INTENSIVE, 1, name);
+ if (!pinst->serial_wq)
+ goto err_put_cpus;
+
+ if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
+ goto err_free_serial_wq;
if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
free_cpumask_var(pinst->cpumask.pcpu);
- goto err_free_inst;
+ goto err_free_serial_wq;
}
if (!padata_validate_cpumask(pinst, pcpumask) ||
!padata_validate_cpumask(pinst, cbcpumask))
@@ -1049,8 +999,6 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
rcu_assign_pointer(pinst->pd, pd);
- pinst->wq = wq;
-
cpumask_copy(pinst->cpumask.pcpu, pcpumask);
cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
@@ -1063,11 +1011,19 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
#ifdef CONFIG_HOTPLUG_CPU
cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
#endif
+
+ put_online_cpus();
+
return pinst;
err_free_masks:
free_cpumask_var(pinst->cpumask.pcpu);
free_cpumask_var(pinst->cpumask.cbcpu);
+err_free_serial_wq:
+ destroy_workqueue(pinst->serial_wq);
+err_put_cpus:
+ put_online_cpus();
+ destroy_workqueue(pinst->parallel_wq);
err_free_inst:
kfree(pinst);
err:
@@ -1079,14 +1035,11 @@ err:
* Use the cpu_possible_mask for serial and
* parallel workers.
*
- * @wq: workqueue to use for the allocated padata instance
- *
- * Must be called from a cpus_read_lock() protected region
+ * @name: used to identify the instance
*/
-struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
+struct padata_instance *padata_alloc_possible(const char *name)
{
- lockdep_assert_cpus_held();
- return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
+ return padata_alloc(name, cpu_possible_mask, cpu_possible_mask);
}
EXPORT_SYMBOL(padata_alloc_possible);
diff --git a/kernel/panic.c b/kernel/panic.c
index 057540b6eee9..47e8ebccc22b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -12,6 +12,7 @@
#include <linux/debug_locks.h>
#include <linux/sched/debug.h>
#include <linux/interrupt.h>
+#include <linux/kgdb.h>
#include <linux/kmsg_dump.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
@@ -220,6 +221,13 @@ void panic(const char *fmt, ...)
#endif
/*
+ * If kgdb is enabled, give it a chance to run before we stop all
+ * the other CPUs or else we won't be able to debug processes left
+ * running on them.
+ */
+ kgdb_panic(buf);
+
+ /*
* If we have crashed and we have a crash kernel loaded let it handle
* everything else.
* If we want to run this after calling panic_notifiers, pass
@@ -551,9 +559,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
{
disable_trace_on_warning();
- if (args)
- pr_warn(CUT_HERE);
-
if (file)
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
raw_smp_processor_id(), current->pid, file, line,
@@ -591,37 +596,26 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
add_taint(taint, LOCKDEP_STILL_OK);
}
-#ifdef WANT_WARN_ON_SLOWPATH
-void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
+#ifndef __WARN_FLAGS
+void warn_slowpath_fmt(const char *file, int line, unsigned taint,
+ const char *fmt, ...)
{
struct warn_args args;
- args.fmt = fmt;
- va_start(args.args, fmt);
- __warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL,
- &args);
- va_end(args.args);
-}
-EXPORT_SYMBOL(warn_slowpath_fmt);
+ pr_warn(CUT_HERE);
-void warn_slowpath_fmt_taint(const char *file, int line,
- unsigned taint, const char *fmt, ...)
-{
- struct warn_args args;
+ if (!fmt) {
+ __warn(file, line, __builtin_return_address(0), taint,
+ NULL, NULL);
+ return;
+ }
args.fmt = fmt;
va_start(args.args, fmt);
__warn(file, line, __builtin_return_address(0), taint, NULL, &args);
va_end(args.args);
}
-EXPORT_SYMBOL(warn_slowpath_fmt_taint);
-
-void warn_slowpath_null(const char *file, int line)
-{
- pr_warn(CUT_HERE);
- __warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, NULL);
-}
-EXPORT_SYMBOL(warn_slowpath_null);
+EXPORT_SYMBOL(warn_slowpath_fmt);
#else
void __warn_printk(const char *fmt, ...)
{
diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c
index 41e83a779e19..9af5a50d3489 100644
--- a/kernel/power/autosleep.c
+++ b/kernel/power/autosleep.c
@@ -116,7 +116,7 @@ int pm_autosleep_set_state(suspend_state_t state)
int __init pm_autosleep_init(void)
{
- autosleep_ws = wakeup_source_register("autosleep");
+ autosleep_ws = wakeup_source_register(NULL, "autosleep");
if (!autosleep_ws)
return -ENOMEM;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index bdbd605c4215..e8710d179b35 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -254,7 +254,6 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
power_attr(pm_test);
#endif /* CONFIG_PM_SLEEP_DEBUG */
-#ifdef CONFIG_DEBUG_FS
static char *suspend_step_name(enum suspend_stat_step step)
{
switch (step) {
@@ -275,6 +274,92 @@ static char *suspend_step_name(enum suspend_stat_step step)
}
}
+#define suspend_attr(_name) \
+static ssize_t _name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%d\n", suspend_stats._name); \
+} \
+static struct kobj_attribute _name = __ATTR_RO(_name)
+
+suspend_attr(success);
+suspend_attr(fail);
+suspend_attr(failed_freeze);
+suspend_attr(failed_prepare);
+suspend_attr(failed_suspend);
+suspend_attr(failed_suspend_late);
+suspend_attr(failed_suspend_noirq);
+suspend_attr(failed_resume);
+suspend_attr(failed_resume_early);
+suspend_attr(failed_resume_noirq);
+
+static ssize_t last_failed_dev_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int index;
+ char *last_failed_dev = NULL;
+
+ index = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+ index %= REC_FAILED_NUM;
+ last_failed_dev = suspend_stats.failed_devs[index];
+
+ return sprintf(buf, "%s\n", last_failed_dev);
+}
+static struct kobj_attribute last_failed_dev = __ATTR_RO(last_failed_dev);
+
+static ssize_t last_failed_errno_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int index;
+ int last_failed_errno;
+
+ index = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
+ index %= REC_FAILED_NUM;
+ last_failed_errno = suspend_stats.errno[index];
+
+ return sprintf(buf, "%d\n", last_failed_errno);
+}
+static struct kobj_attribute last_failed_errno = __ATTR_RO(last_failed_errno);
+
+static ssize_t last_failed_step_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int index;
+ enum suspend_stat_step step;
+ char *last_failed_step = NULL;
+
+ index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
+ index %= REC_FAILED_NUM;
+ step = suspend_stats.failed_steps[index];
+ last_failed_step = suspend_step_name(step);
+
+ return sprintf(buf, "%s\n", last_failed_step);
+}
+static struct kobj_attribute last_failed_step = __ATTR_RO(last_failed_step);
+
+static struct attribute *suspend_attrs[] = {
+ &success.attr,
+ &fail.attr,
+ &failed_freeze.attr,
+ &failed_prepare.attr,
+ &failed_suspend.attr,
+ &failed_suspend_late.attr,
+ &failed_suspend_noirq.attr,
+ &failed_resume.attr,
+ &failed_resume_early.attr,
+ &failed_resume_noirq.attr,
+ &last_failed_dev.attr,
+ &last_failed_errno.attr,
+ &last_failed_step.attr,
+ NULL,
+};
+
+static struct attribute_group suspend_attr_group = {
+ .name = "suspend_stats",
+ .attrs = suspend_attrs,
+};
+
+#ifdef CONFIG_DEBUG_FS
static int suspend_stats_show(struct seq_file *s, void *unused)
{
int i, index, last_dev, last_errno, last_step;
@@ -495,7 +580,7 @@ static suspend_state_t decode_state(const char *buf, size_t n)
len = p ? p - buf : n;
/* Check hibernation first. */
- if (len == 4 && !strncmp(buf, "disk", len))
+ if (len == 4 && str_has_prefix(buf, "disk"))
return PM_SUSPEND_MAX;
#ifdef CONFIG_SUSPEND
@@ -794,6 +879,14 @@ static const struct attribute_group attr_group = {
.attrs = g,
};
+static const struct attribute_group *attr_groups[] = {
+ &attr_group,
+#ifdef CONFIG_PM_SLEEP
+ &suspend_attr_group,
+#endif
+ NULL,
+};
+
struct workqueue_struct *pm_wq;
EXPORT_SYMBOL_GPL(pm_wq);
@@ -815,7 +908,7 @@ static int __init pm_init(void)
power_kobj = kobject_create_and_add("power", NULL);
if (!power_kobj)
return -ENOMEM;
- error = sysfs_create_group(power_kobj, &attr_group);
+ error = sysfs_create_groups(power_kobj, attr_groups);
if (error)
return error;
pm_print_times_init();
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 33e3febaba53..9568a2fe7c11 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -78,57 +78,9 @@ static struct pm_qos_object cpu_dma_pm_qos = {
.name = "cpu_dma_latency",
};
-static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
-static struct pm_qos_constraints network_lat_constraints = {
- .list = PLIST_HEAD_INIT(network_lat_constraints.list),
- .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
- .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
- .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
- .type = PM_QOS_MIN,
- .notifiers = &network_lat_notifier,
-};
-static struct pm_qos_object network_lat_pm_qos = {
- .constraints = &network_lat_constraints,
- .name = "network_latency",
-};
-
-
-static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
-static struct pm_qos_constraints network_tput_constraints = {
- .list = PLIST_HEAD_INIT(network_tput_constraints.list),
- .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
- .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
- .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
- .type = PM_QOS_MAX,
- .notifiers = &network_throughput_notifier,
-};
-static struct pm_qos_object network_throughput_pm_qos = {
- .constraints = &network_tput_constraints,
- .name = "network_throughput",
-};
-
-
-static BLOCKING_NOTIFIER_HEAD(memory_bandwidth_notifier);
-static struct pm_qos_constraints memory_bw_constraints = {
- .list = PLIST_HEAD_INIT(memory_bw_constraints.list),
- .target_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
- .default_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
- .no_constraint_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
- .type = PM_QOS_SUM,
- .notifiers = &memory_bandwidth_notifier,
-};
-static struct pm_qos_object memory_bandwidth_pm_qos = {
- .constraints = &memory_bw_constraints,
- .name = "memory_bandwidth",
-};
-
-
static struct pm_qos_object *pm_qos_array[] = {
&null_pm_qos,
&cpu_dma_pm_qos,
- &network_lat_pm_qos,
- &network_throughput_pm_qos,
- &memory_bandwidth_pm_qos,
};
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index c874a7026e24..f3b7239f1892 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -121,43 +121,25 @@ static void s2idle_loop(void)
{
pm_pr_dbg("suspend-to-idle\n");
+ /*
+ * Suspend-to-idle equals:
+ * frozen processes + suspended devices + idle processors.
+ * Thus s2idle_enter() should be called right after all devices have
+ * been suspended.
+ *
+ * Wakeups during the noirq suspend of devices may be spurious, so try
+ * to avoid them upfront.
+ */
for (;;) {
- int error;
-
- dpm_noirq_begin();
-
- /*
- * Suspend-to-idle equals
- * frozen processes + suspended devices + idle processors.
- * Thus s2idle_enter() should be called right after
- * all devices have been suspended.
- *
- * Wakeups during the noirq suspend of devices may be spurious,
- * so prevent them from terminating the loop right away.
- */
- error = dpm_noirq_suspend_devices(PMSG_SUSPEND);
- if (!error)
- s2idle_enter();
- else if (error == -EBUSY && pm_wakeup_pending())
- error = 0;
-
- if (!error && s2idle_ops && s2idle_ops->wake)
+ if (s2idle_ops && s2idle_ops->wake)
s2idle_ops->wake();
- dpm_noirq_resume_devices(PMSG_RESUME);
-
- dpm_noirq_end();
-
- if (error)
- break;
-
- if (s2idle_ops && s2idle_ops->sync)
- s2idle_ops->sync();
-
if (pm_wakeup_pending())
break;
pm_wakeup_clear(false);
+
+ s2idle_enter();
}
pm_pr_dbg("resume from suspend-to-idle\n");
@@ -271,14 +253,21 @@ static int platform_suspend_prepare_late(suspend_state_t state)
static int platform_suspend_prepare_noirq(suspend_state_t state)
{
- return state != PM_SUSPEND_TO_IDLE && suspend_ops->prepare_late ?
- suspend_ops->prepare_late() : 0;
+ if (state == PM_SUSPEND_TO_IDLE)
+ return s2idle_ops && s2idle_ops->prepare_late ?
+ s2idle_ops->prepare_late() : 0;
+
+ return suspend_ops->prepare_late ? suspend_ops->prepare_late() : 0;
}
static void platform_resume_noirq(suspend_state_t state)
{
- if (state != PM_SUSPEND_TO_IDLE && suspend_ops->wake)
+ if (state == PM_SUSPEND_TO_IDLE) {
+ if (s2idle_ops && s2idle_ops->restore_early)
+ s2idle_ops->restore_early();
+ } else if (suspend_ops->wake) {
suspend_ops->wake();
+ }
}
static void platform_resume_early(suspend_state_t state)
@@ -415,11 +404,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
if (error)
goto Devices_early_resume;
- if (state == PM_SUSPEND_TO_IDLE && pm_test_level != TEST_PLATFORM) {
- s2idle_loop();
- goto Platform_early_resume;
- }
-
error = dpm_suspend_noirq(PMSG_SUSPEND);
if (error) {
pr_err("noirq suspend of devices failed\n");
@@ -432,6 +416,11 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
if (suspend_test(TEST_PLATFORM))
goto Platform_wake;
+ if (state == PM_SUSPEND_TO_IDLE) {
+ s2idle_loop();
+ goto Platform_wake;
+ }
+
error = suspend_disable_secondary_cpus();
if (error || suspend_test(TEST_CPUS))
goto Enable_cpus;
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
index 4210152e56f0..105df4dfc783 100644
--- a/kernel/power/wakelock.c
+++ b/kernel/power/wakelock.c
@@ -27,7 +27,7 @@ static DEFINE_MUTEX(wakelocks_lock);
struct wakelock {
char *name;
struct rb_node node;
- struct wakeup_source ws;
+ struct wakeup_source *ws;
#ifdef CONFIG_PM_WAKELOCKS_GC
struct list_head lru;
#endif
@@ -46,7 +46,7 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active)
for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
wl = rb_entry(node, struct wakelock, node);
- if (wl->ws.active == show_active)
+ if (wl->ws->active == show_active)
str += scnprintf(str, end - str, "%s ", wl->name);
}
if (str > buf)
@@ -112,16 +112,16 @@ static void __wakelocks_gc(struct work_struct *work)
u64 idle_time_ns;
bool active;
- spin_lock_irq(&wl->ws.lock);
- idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws.last_time));
- active = wl->ws.active;
- spin_unlock_irq(&wl->ws.lock);
+ spin_lock_irq(&wl->ws->lock);
+ idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws->last_time));
+ active = wl->ws->active;
+ spin_unlock_irq(&wl->ws->lock);
if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC))
break;
if (!active) {
- wakeup_source_remove(&wl->ws);
+ wakeup_source_unregister(wl->ws);
rb_erase(&wl->node, &wakelocks_tree);
list_del(&wl->lru);
kfree(wl->name);
@@ -187,9 +187,15 @@ static struct wakelock *wakelock_lookup_add(const char *name, size_t len,
kfree(wl);
return ERR_PTR(-ENOMEM);
}
- wl->ws.name = wl->name;
- wl->ws.last_time = ktime_get();
- wakeup_source_add(&wl->ws);
+
+ wl->ws = wakeup_source_register(NULL, wl->name);
+ if (!wl->ws) {
+ kfree(wl->name);
+ kfree(wl);
+ return ERR_PTR(-ENOMEM);
+ }
+ wl->ws->last_time = ktime_get();
+
rb_link_node(&wl->node, parent, node);
rb_insert_color(&wl->node, &wakelocks_tree);
wakelocks_lru_add(wl);
@@ -233,9 +239,9 @@ int pm_wake_lock(const char *buf)
u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1;
do_div(timeout_ms, NSEC_PER_MSEC);
- __pm_wakeup_event(&wl->ws, timeout_ms);
+ __pm_wakeup_event(wl->ws, timeout_ms);
} else {
- __pm_stay_awake(&wl->ws);
+ __pm_stay_awake(wl->ws);
}
wakelocks_lru_most_recent(wl);
@@ -271,7 +277,7 @@ int pm_wake_unlock(const char *buf)
ret = PTR_ERR(wl);
goto out;
}
- __pm_relax(&wl->ws);
+ __pm_relax(wl->ws);
wakelocks_lru_most_recent(wl);
wakelocks_gc();
diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c
index 1d21ebacfdb8..17a9591e54ff 100644
--- a/kernel/printk/braille.c
+++ b/kernel/printk/braille.c
@@ -11,11 +11,18 @@
int _braille_console_setup(char **str, char **brl_options)
{
- if (!strncmp(*str, "brl,", 4)) {
+ size_t len;
+
+ len = str_has_prefix(*str, "brl,");
+ if (len) {
*brl_options = "";
- *str += 4;
- } else if (!strncmp(*str, "brl=", 4)) {
- *brl_options = *str + 4;
+ *str += len;
+ return 0;
+ }
+
+ len = str_has_prefix(*str, "brl=");
+ if (len) {
+ *brl_options = *str + len;
*str = strchr(*brl_options, ',');
if (!*str) {
pr_err("need port name after brl=\n");
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 1888f6a3b694..ca65327a6de8 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -118,19 +118,29 @@ static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
static int __control_devkmsg(char *str)
{
+ size_t len;
+
if (!str)
return -EINVAL;
- if (!strncmp(str, "on", 2)) {
+ len = str_has_prefix(str, "on");
+ if (len) {
devkmsg_log = DEVKMSG_LOG_MASK_ON;
- return 2;
- } else if (!strncmp(str, "off", 3)) {
+ return len;
+ }
+
+ len = str_has_prefix(str, "off");
+ if (len) {
devkmsg_log = DEVKMSG_LOG_MASK_OFF;
- return 3;
- } else if (!strncmp(str, "ratelimit", 9)) {
+ return len;
+ }
+
+ len = str_has_prefix(str, "ratelimit");
+ if (len) {
devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
- return 9;
+ return len;
}
+
return -EINVAL;
}
@@ -3274,7 +3284,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
/* move first record forward until length fits into the buffer */
seq = dumper->cur_seq;
idx = dumper->cur_idx;
- while (l > size && seq < dumper->next_seq) {
+ while (l >= size && seq < dumper->next_seq) {
struct printk_log *msg = log_from_idx(idx);
l -= msg_print_text(msg, true, time, NULL, 0);
diff --git a/kernel/resource.c b/kernel/resource.c
index 7ea4306503c5..76036a41143b 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -487,8 +487,8 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
while (start < end &&
!find_next_iomem_res(start, end, flags, IORES_DESC_NONE,
false, &res)) {
- pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
- end_pfn = (res.end + 1) >> PAGE_SHIFT;
+ pfn = PFN_UP(res.start);
+ end_pfn = PFN_DOWN(res.end + 1);
if (end_pfn > pfn)
ret = (*func)(pfn, end_pfn - pfn, arg);
if (ret)
@@ -1644,19 +1644,8 @@ void resource_list_free(struct list_head *head)
EXPORT_SYMBOL(resource_list_free);
#ifdef CONFIG_DEVICE_PRIVATE
-/**
- * devm_request_free_mem_region - find free region for device private memory
- *
- * @dev: device struct to bind the resource to
- * @size: size in bytes of the device memory to add
- * @base: resource tree to look in
- *
- * This function tries to find an empty range of physical address big enough to
- * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
- * memory, which in turn allocates struct pages.
- */
-struct resource *devm_request_free_mem_region(struct device *dev,
- struct resource *base, unsigned long size)
+static struct resource *__request_free_mem_region(struct device *dev,
+ struct resource *base, unsigned long size, const char *name)
{
resource_size_t end, addr;
struct resource *res;
@@ -1670,7 +1659,10 @@ struct resource *devm_request_free_mem_region(struct device *dev,
REGION_DISJOINT)
continue;
- res = devm_request_mem_region(dev, addr, size, dev_name(dev));
+ if (dev)
+ res = devm_request_mem_region(dev, addr, size, name);
+ else
+ res = request_mem_region(addr, size, name);
if (!res)
return ERR_PTR(-ENOMEM);
res->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
@@ -1679,7 +1671,32 @@ struct resource *devm_request_free_mem_region(struct device *dev,
return ERR_PTR(-ERANGE);
}
+
+/**
+ * devm_request_free_mem_region - find free region for device private memory
+ *
+ * @dev: device struct to bind the resource to
+ * @size: size in bytes of the device memory to add
+ * @base: resource tree to look in
+ *
+ * This function tries to find an empty range of physical address big enough to
+ * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
+ * memory, which in turn allocates struct pages.
+ */
+struct resource *devm_request_free_mem_region(struct device *dev,
+ struct resource *base, unsigned long size)
+{
+ return __request_free_mem_region(dev, base, size, dev_name(dev));
+}
EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
+
+struct resource *request_free_mem_region(struct resource *base,
+ unsigned long size, const char *name)
+{
+ return __request_free_mem_region(NULL, base, size, name);
+}
+EXPORT_SYMBOL_GPL(request_free_mem_region);
+
#endif /* CONFIG_DEVICE_PRIVATE */
static int __init strict_iomem(char *str)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5e8387bdd09c..f9a1346a5fa9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3871,13 +3871,22 @@ static noinline void __schedule_bug(struct task_struct *prev)
/*
* Various schedule()-time debugging checks and statistics:
*/
-static inline void schedule_debug(struct task_struct *prev)
+static inline void schedule_debug(struct task_struct *prev, bool preempt)
{
#ifdef CONFIG_SCHED_STACK_END_CHECK
if (task_stack_end_corrupted(prev))
panic("corrupted stack end detected inside scheduler\n");
#endif
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ if (!preempt && prev->state && prev->non_block_count) {
+ printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
+ prev->comm, prev->pid, prev->non_block_count);
+ dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+ }
+#endif
+
if (unlikely(in_atomic_preempt_off())) {
__schedule_bug(prev);
preempt_count_set(PREEMPT_DISABLED);
@@ -3989,7 +3998,7 @@ static void __sched notrace __schedule(bool preempt)
rq = cpu_rq(cpu);
prev = rq->curr;
- schedule_debug(prev);
+ schedule_debug(prev, preempt);
if (sched_feat(HRTICK))
hrtick_clear(rq);
@@ -6763,7 +6772,7 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
rcu_sleep_check();
if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
- !is_idle_task(current)) ||
+ !is_idle_task(current) && !current->non_block_count) ||
system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
oops_in_progress)
return;
@@ -6779,8 +6788,8 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
"BUG: sleeping function called from invalid context at %s:%d\n",
file, line);
printk(KERN_ERR
- "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
- in_atomic(), irqs_disabled(),
+ "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
+ in_atomic(), irqs_disabled(), current->non_block_count,
current->pid, current->comm);
if (task_stack_end_corrupted(current))
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index fdce9cfaca05..86800b4d5453 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -117,6 +117,7 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
unsigned int next_freq)
{
struct cpufreq_policy *policy = sg_policy->policy;
+ int cpu;
if (!sugov_update_next_freq(sg_policy, time, next_freq))
return;
@@ -126,7 +127,11 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
return;
policy->cur = next_freq;
- trace_cpu_frequency(next_freq, smp_processor_id());
+
+ if (trace_cpu_frequency_enabled()) {
+ for_each_cpu(cpu, policy->cpus)
+ trace_cpu_frequency(next_freq, cpu);
+ }
}
static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 8bfeb6395bdd..8dad5aa600ea 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -238,7 +238,6 @@ static void do_idle(void)
tick_nohz_idle_enter();
while (!need_resched()) {
- check_pgt_cache();
rmb();
local_irq_disable();
@@ -312,7 +311,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-void play_idle(unsigned long duration_ms)
+void play_idle(unsigned long duration_us)
{
struct idle_timer it;
@@ -324,7 +323,7 @@ void play_idle(unsigned long duration_ms)
WARN_ON_ONCE(current->nr_cpus_allowed != 1);
WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
- WARN_ON_ONCE(!duration_ms);
+ WARN_ON_ONCE(!duration_us);
rcu_sleep_check();
preempt_disable();
@@ -334,7 +333,8 @@ void play_idle(unsigned long duration_ms)
it.done = 0;
hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
it.timer.function = idle_inject_timer_fn;
- hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED);
+ hrtimer_start(&it.timer, ns_to_ktime(duration_us * NSEC_PER_USEC),
+ HRTIMER_MODE_REL_PINNED);
while (!READ_ONCE(it.done))
do_idle();
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 078950d9605b..00fcea236eba 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -264,7 +264,8 @@ extern struct ctl_table epoll_table[];
extern struct ctl_table firmware_config_table[];
#endif
-#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
+#if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \
+ defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT)
int sysctl_legacy_va_layout;
#endif
@@ -1573,7 +1574,8 @@ static struct ctl_table vm_table[] = {
.proc_handler = proc_dointvec,
.extra1 = SYSCTL_ZERO,
},
-#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
+#if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \
+ defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT)
{
.procname = "legacy_va_layout",
.data = &sysctl_legacy_va_layout,
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 271ce6c12907..451f9d05ccfe 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -97,7 +97,7 @@ static int alarmtimer_rtc_add_device(struct device *dev,
if (!device_may_wakeup(rtc->dev.parent))
return -1;
- __ws = wakeup_source_register("alarmtimer");
+ __ws = wakeup_source_register(dev, "alarmtimer");
spin_lock_irqsave(&rtcdev_lock, flags);
if (!rtcdev) {
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 6a64d7772870..e08527f50d2a 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -520,7 +520,8 @@ config BPF_EVENTS
bool
default y
help
- This allows the user to attach BPF programs to kprobe events.
+ This allows the user to attach BPF programs to kprobe, uprobe, and
+ tracepoint events.
config DYNAMIC_EVENTS
def_bool n
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 8dfd5021b933..7950a0356042 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -276,7 +276,7 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
int index = task->curr_ret_stack;
int i;
- if (ret != (unsigned long)return_to_handler)
+ if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
return ret;
if (index < 0)
@@ -294,7 +294,7 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
{
int task_idx;
- if (ret != (unsigned long)return_to_handler)
+ if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
return ret;
task_idx = task->curr_ret_stack;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 356b848c697a..62a50bf399d6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -6036,11 +6036,7 @@ clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
{
struct ftrace_func_entry *entry;
- if (ftrace_hash_empty(hash))
- return;
-
- entry = __ftrace_lookup_ip(hash, func->ip);
-
+ entry = ftrace_lookup_ip(hash, func->ip);
/*
* Do not allow this rec to match again.
* Yeah, it may waste some memory, but will be removed
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 947ba433865f..252f79c435f8 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1854,7 +1854,7 @@ int __init register_tracer(struct tracer *type)
return ret;
}
-void tracing_reset(struct trace_buffer *buf, int cpu)
+static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)
{
struct ring_buffer *buffer = buf->buffer;
@@ -4251,7 +4251,7 @@ static int tracing_open(struct inode *inode, struct file *file)
if (cpu == RING_BUFFER_ALL_CPUS)
tracing_reset_online_cpus(trace_buf);
else
- tracing_reset(trace_buf, cpu);
+ tracing_reset_cpu(trace_buf, cpu);
}
if (file->f_mode & FMODE_READ) {
@@ -4815,15 +4815,15 @@ static const char readme_msg[] =
#endif
#endif /* CONFIG_STACK_TRACER */
#ifdef CONFIG_DYNAMIC_EVENTS
- " dynamic_events\t\t- Add/remove/show the generic dynamic events\n"
+ " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
"\t\t\t Write into this file to define/undefine new trace events.\n"
#endif
#ifdef CONFIG_KPROBE_EVENTS
- " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
+ " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
"\t\t\t Write into this file to define/undefine new trace events.\n"
#endif
#ifdef CONFIG_UPROBE_EVENTS
- " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
+ " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
"\t\t\t Write into this file to define/undefine new trace events.\n"
#endif
#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
@@ -4848,7 +4848,7 @@ static const char readme_msg[] =
#else
"\t $stack<index>, $stack, $retval, $comm,\n"
#endif
- "\t +|-[u]<offset>(<fetcharg>)\n"
+ "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
"\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
"\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
"\t <type>\\[<array-size>\\]\n"
@@ -6742,7 +6742,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
tracing_reset_online_cpus(&tr->max_buffer);
else
- tracing_reset(&tr->max_buffer, iter->cpu_file);
+ tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
}
break;
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 005f08629b8b..26b0a08f3c7d 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -677,7 +677,6 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
-void tracing_reset(struct trace_buffer *buf, int cpu);
void tracing_reset_online_cpus(struct trace_buffer *buf);
void tracing_reset_current(int cpu);
void tracing_reset_all_online_cpus(void);
diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
index fa100ed3b4de..a41fed46c285 100644
--- a/kernel/trace/trace_dynevent.c
+++ b/kernel/trace/trace_dynevent.c
@@ -47,6 +47,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type)
return -EINVAL;
event++;
}
+ argc--; argv++;
p = strchr(event, '/');
if (p) {
@@ -61,10 +62,13 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type)
for_each_dyn_event_safe(pos, n) {
if (type && type != pos->ops)
continue;
- if (pos->ops->match(system, event, pos)) {
- ret = pos->ops->free(pos);
+ if (!pos->ops->match(system, event,
+ argc, (const char **)argv, pos))
+ continue;
+
+ ret = pos->ops->free(pos);
+ if (ret)
break;
- }
}
mutex_unlock(&event_mutex);
diff --git a/kernel/trace/trace_dynevent.h b/kernel/trace/trace_dynevent.h
index 8c334064e4d6..46898138d2df 100644
--- a/kernel/trace/trace_dynevent.h
+++ b/kernel/trace/trace_dynevent.h
@@ -31,8 +31,9 @@ struct dyn_event;
* @is_busy: Check whether given event is busy so that it can not be deleted.
* Return true if it is busy, otherwides false.
* @free: Delete the given event. Return 0 if success, otherwides error.
- * @match: Check whether given event and system name match this event.
- * Return true if it matches, otherwides false.
+ * @match: Check whether given event and system name match this event. The argc
+ * and argv is used for exact match. Return true if it matches, otherwides
+ * false.
*
* Except for @create, these methods are called under holding event_mutex.
*/
@@ -43,7 +44,7 @@ struct dyn_event_operations {
bool (*is_busy)(struct dyn_event *ev);
int (*free)(struct dyn_event *ev);
bool (*match)(const char *system, const char *event,
- struct dyn_event *ev);
+ int argc, const char **argv, struct dyn_event *ev);
};
/* Register new dyn_event type -- must be called at first */
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index ca6b0dff60c5..9468bd8d44a2 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -13,6 +13,10 @@
#include <linux/rculist.h>
#include <linux/tracefs.h>
+/* for gfp flag names */
+#include <linux/trace_events.h>
+#include <trace/events/mmflags.h>
+
#include "tracing_map.h"
#include "trace.h"
#include "trace_dynevent.h"
@@ -374,7 +378,7 @@ static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
static int synth_event_release(struct dyn_event *ev);
static bool synth_event_is_busy(struct dyn_event *ev);
static bool synth_event_match(const char *system, const char *event,
- struct dyn_event *ev);
+ int argc, const char **argv, struct dyn_event *ev);
static struct dyn_event_operations synth_event_ops = {
.create = synth_event_create,
@@ -422,7 +426,7 @@ static bool synth_event_is_busy(struct dyn_event *ev)
}
static bool synth_event_match(const char *system, const char *event,
- struct dyn_event *ev)
+ int argc, const char **argv, struct dyn_event *ev)
{
struct synth_event *sev = to_synth_event(ev);
@@ -752,6 +756,8 @@ static int synth_field_size(char *type)
size = sizeof(unsigned long);
else if (strcmp(type, "pid_t") == 0)
size = sizeof(pid_t);
+ else if (strcmp(type, "gfp_t") == 0)
+ size = sizeof(gfp_t);
else if (synth_field_is_string(type))
size = synth_field_string_size(type);
@@ -792,6 +798,8 @@ static const char *synth_field_fmt(char *type)
fmt = "%lu";
else if (strcmp(type, "pid_t") == 0)
fmt = "%d";
+ else if (strcmp(type, "gfp_t") == 0)
+ fmt = "%x";
else if (synth_field_is_string(type))
fmt = "%s";
@@ -834,9 +842,20 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
i == se->n_fields - 1 ? "" : " ");
n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
} else {
+ struct trace_print_flags __flags[] = {
+ __def_gfpflag_names, {-1, NULL} };
+
trace_seq_printf(s, print_fmt, se->fields[i]->name,
entry->fields[n_u64],
i == se->n_fields - 1 ? "" : " ");
+
+ if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
+ trace_seq_puts(s, " (");
+ trace_print_flags_seq(s, "|",
+ entry->fields[n_u64],
+ __flags);
+ trace_seq_putc(s, ')');
+ }
n_u64++;
}
}
@@ -2785,6 +2804,8 @@ static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
return NULL;
}
+ alias->var_ref_idx = var_ref->var_ref_idx;
+
return alias;
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 9d483ad9bb6c..402dc3ce88d3 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -39,7 +39,7 @@ static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
static int trace_kprobe_release(struct dyn_event *ev);
static bool trace_kprobe_is_busy(struct dyn_event *ev);
static bool trace_kprobe_match(const char *system, const char *event,
- struct dyn_event *ev);
+ int argc, const char **argv, struct dyn_event *ev);
static struct dyn_event_operations trace_kprobe_ops = {
.create = trace_kprobe_create,
@@ -137,13 +137,36 @@ static bool trace_kprobe_is_busy(struct dyn_event *ev)
return trace_probe_is_enabled(&tk->tp);
}
+static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
+ int argc, const char **argv)
+{
+ char buf[MAX_ARGSTR_LEN + 1];
+
+ if (!argc)
+ return true;
+
+ if (!tk->symbol)
+ snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
+ else if (tk->rp.kp.offset)
+ snprintf(buf, sizeof(buf), "%s+%u",
+ trace_kprobe_symbol(tk), tk->rp.kp.offset);
+ else
+ snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
+ if (strcmp(buf, argv[0]))
+ return false;
+ argc--; argv++;
+
+ return trace_probe_match_command_args(&tk->tp, argc, argv);
+}
+
static bool trace_kprobe_match(const char *system, const char *event,
- struct dyn_event *ev)
+ int argc, const char **argv, struct dyn_event *ev)
{
struct trace_kprobe *tk = to_trace_kprobe(ev);
return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
- (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0);
+ (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
+ trace_kprobe_match_command_head(tk, argc, argv);
}
static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
@@ -180,20 +203,33 @@ unsigned long trace_kprobe_address(struct trace_kprobe *tk)
return addr;
}
+static nokprobe_inline struct trace_kprobe *
+trace_kprobe_primary_from_call(struct trace_event_call *call)
+{
+ struct trace_probe *tp;
+
+ tp = trace_probe_primary_from_call(call);
+ if (WARN_ON_ONCE(!tp))
+ return NULL;
+
+ return container_of(tp, struct trace_kprobe, tp);
+}
+
bool trace_kprobe_on_func_entry(struct trace_event_call *call)
{
- struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
+ struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
- return kprobe_on_func_entry(tk->rp.kp.addr,
+ return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
- tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
+ tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
}
bool trace_kprobe_error_injectable(struct trace_event_call *call)
{
- struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
+ struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
- return within_error_injection_list(trace_kprobe_address(tk));
+ return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
+ false;
}
static int register_kprobe_event(struct trace_kprobe *tk);
@@ -291,32 +327,68 @@ static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
return ret;
}
+static void __disable_trace_kprobe(struct trace_probe *tp)
+{
+ struct trace_probe *pos;
+ struct trace_kprobe *tk;
+
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+ tk = container_of(pos, struct trace_kprobe, tp);
+ if (!trace_kprobe_is_registered(tk))
+ continue;
+ if (trace_kprobe_is_return(tk))
+ disable_kretprobe(&tk->rp);
+ else
+ disable_kprobe(&tk->rp.kp);
+ }
+}
+
/*
* Enable trace_probe
* if the file is NULL, enable "perf" handler, or enable "trace" handler.
*/
-static int
-enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
+static int enable_trace_kprobe(struct trace_event_call *call,
+ struct trace_event_file *file)
{
- bool enabled = trace_probe_is_enabled(&tk->tp);
+ struct trace_probe *pos, *tp;
+ struct trace_kprobe *tk;
+ bool enabled;
int ret = 0;
+ tp = trace_probe_primary_from_call(call);
+ if (WARN_ON_ONCE(!tp))
+ return -ENODEV;
+ enabled = trace_probe_is_enabled(tp);
+
+ /* This also changes "enabled" state */
if (file) {
- ret = trace_probe_add_file(&tk->tp, file);
+ ret = trace_probe_add_file(tp, file);
if (ret)
return ret;
} else
- trace_probe_set_flag(&tk->tp, TP_FLAG_PROFILE);
+ trace_probe_set_flag(tp, TP_FLAG_PROFILE);
if (enabled)
return 0;
- ret = __enable_trace_kprobe(tk);
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+ tk = container_of(pos, struct trace_kprobe, tp);
+ if (trace_kprobe_has_gone(tk))
+ continue;
+ ret = __enable_trace_kprobe(tk);
+ if (ret)
+ break;
+ enabled = true;
+ }
+
if (ret) {
+ /* Failed to enable one of them. Roll back all */
+ if (enabled)
+ __disable_trace_kprobe(tp);
if (file)
- trace_probe_remove_file(&tk->tp, file);
+ trace_probe_remove_file(tp, file);
else
- trace_probe_clear_flag(&tk->tp, TP_FLAG_PROFILE);
+ trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
}
return ret;
@@ -326,11 +398,14 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
* Disable trace_probe
* if the file is NULL, disable "perf" handler, or disable "trace" handler.
*/
-static int
-disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
+static int disable_trace_kprobe(struct trace_event_call *call,
+ struct trace_event_file *file)
{
- struct trace_probe *tp = &tk->tp;
- int ret = 0;
+ struct trace_probe *tp;
+
+ tp = trace_probe_primary_from_call(call);
+ if (WARN_ON_ONCE(!tp))
+ return -ENODEV;
if (file) {
if (!trace_probe_get_file_link(tp, file))
@@ -341,12 +416,8 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
} else
trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
- if (!trace_probe_is_enabled(tp) && trace_kprobe_is_registered(tk)) {
- if (trace_kprobe_is_return(tk))
- disable_kretprobe(&tk->rp);
- else
- disable_kprobe(&tk->rp.kp);
- }
+ if (!trace_probe_is_enabled(tp))
+ __disable_trace_kprobe(tp);
out:
if (file)
@@ -358,7 +429,7 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
*/
trace_probe_remove_file(tp, file);
- return ret;
+ return 0;
}
#if defined(CONFIG_KPROBES_ON_FTRACE) && \
@@ -437,6 +508,10 @@ static void __unregister_trace_kprobe(struct trace_kprobe *tk)
/* Unregister a trace_probe and probe_event */
static int unregister_trace_kprobe(struct trace_kprobe *tk)
{
+ /* If other probes are on the event, just unregister kprobe */
+ if (trace_probe_has_sibling(&tk->tp))
+ goto unreg;
+
/* Enabled event can not be unregistered */
if (trace_probe_is_enabled(&tk->tp))
return -EBUSY;
@@ -445,12 +520,82 @@ static int unregister_trace_kprobe(struct trace_kprobe *tk)
if (unregister_kprobe_event(tk))
return -EBUSY;
+unreg:
__unregister_trace_kprobe(tk);
dyn_event_remove(&tk->devent);
+ trace_probe_unlink(&tk->tp);
return 0;
}
+static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
+ struct trace_kprobe *comp)
+{
+ struct trace_probe_event *tpe = orig->tp.event;
+ struct trace_probe *pos;
+ int i;
+
+ list_for_each_entry(pos, &tpe->probes, list) {
+ orig = container_of(pos, struct trace_kprobe, tp);
+ if (strcmp(trace_kprobe_symbol(orig),
+ trace_kprobe_symbol(comp)) ||
+ trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
+ continue;
+
+ /*
+ * trace_probe_compare_arg_type() ensured that nr_args and
+ * each argument name and type are same. Let's compare comm.
+ */
+ for (i = 0; i < orig->tp.nr_args; i++) {
+ if (strcmp(orig->tp.args[i].comm,
+ comp->tp.args[i].comm))
+ break;
+ }
+
+ if (i == orig->tp.nr_args)
+ return true;
+ }
+
+ return false;
+}
+
+static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
+{
+ int ret;
+
+ ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
+ if (ret) {
+ /* Note that argument starts index = 2 */
+ trace_probe_log_set_index(ret + 1);
+ trace_probe_log_err(0, DIFF_ARG_TYPE);
+ return -EEXIST;
+ }
+ if (trace_kprobe_has_same_kprobe(to, tk)) {
+ trace_probe_log_set_index(0);
+ trace_probe_log_err(0, SAME_PROBE);
+ return -EEXIST;
+ }
+
+ /* Append to existing event */
+ ret = trace_probe_append(&tk->tp, &to->tp);
+ if (ret)
+ return ret;
+
+ /* Register k*probe */
+ ret = __register_trace_kprobe(tk);
+ if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
+ pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
+ ret = 0;
+ }
+
+ if (ret)
+ trace_probe_unlink(&tk->tp);
+ else
+ dyn_event_add(&tk->devent);
+
+ return ret;
+}
+
/* Register a trace_probe and probe_event */
static int register_trace_kprobe(struct trace_kprobe *tk)
{
@@ -459,14 +604,17 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
mutex_lock(&event_mutex);
- /* Delete old (same name) event if exist */
old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
trace_probe_group_name(&tk->tp));
if (old_tk) {
- ret = unregister_trace_kprobe(old_tk);
- if (ret < 0)
- goto end;
- free_trace_kprobe(old_tk);
+ if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
+ trace_probe_log_set_index(0);
+ trace_probe_log_err(0, DIFF_PROBE_TYPE);
+ ret = -EEXIST;
+ } else {
+ ret = append_trace_kprobe(tk, old_tk);
+ }
+ goto end;
}
/* Register new event */
@@ -700,7 +848,7 @@ static int trace_kprobe_create(int argc, const char *argv[])
trace_probe_log_err(0, BAD_INSN_BNDRY);
else if (ret == -ENOENT)
trace_probe_log_err(0, BAD_PROBE_ADDR);
- else if (ret != -ENOMEM)
+ else if (ret != -ENOMEM && ret != -EEXIST)
trace_probe_log_err(0, FAIL_REG_PROBE);
goto error;
}
@@ -965,6 +1113,9 @@ retry:
case FETCH_OP_COMM:
val = (unsigned long)current->comm;
break;
+ case FETCH_OP_DATA:
+ val = (unsigned long)code->data;
+ break;
#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
case FETCH_OP_ARG:
val = regs_get_kernel_argument(regs, code->param);
@@ -1089,7 +1240,10 @@ print_kprobe_event(struct trace_iterator *iter, int flags,
struct trace_probe *tp;
field = (struct kprobe_trace_entry_head *)iter->ent;
- tp = container_of(event, struct trace_probe, call.event);
+ tp = trace_probe_primary_from_call(
+ container_of(event, struct trace_event_call, event));
+ if (WARN_ON_ONCE(!tp))
+ goto out;
trace_seq_printf(s, "%s: (", trace_probe_name(tp));
@@ -1116,7 +1270,10 @@ print_kretprobe_event(struct trace_iterator *iter, int flags,
struct trace_probe *tp;
field = (struct kretprobe_trace_entry_head *)iter->ent;
- tp = container_of(event, struct trace_probe, call.event);
+ tp = trace_probe_primary_from_call(
+ container_of(event, struct trace_event_call, event));
+ if (WARN_ON_ONCE(!tp))
+ goto out;
trace_seq_printf(s, "%s: (", trace_probe_name(tp));
@@ -1145,23 +1302,31 @@ static int kprobe_event_define_fields(struct trace_event_call *event_call)
{
int ret;
struct kprobe_trace_entry_head field;
- struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
+ struct trace_probe *tp;
+
+ tp = trace_probe_primary_from_call(event_call);
+ if (WARN_ON_ONCE(!tp))
+ return -ENOENT;
DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
- return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
+ return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
}
static int kretprobe_event_define_fields(struct trace_event_call *event_call)
{
int ret;
struct kretprobe_trace_entry_head field;
- struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
+ struct trace_probe *tp;
+
+ tp = trace_probe_primary_from_call(event_call);
+ if (WARN_ON_ONCE(!tp))
+ return -ENOENT;
DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
- return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp);
+ return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
}
#ifdef CONFIG_PERF_EVENTS
@@ -1289,20 +1454,19 @@ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
static int kprobe_register(struct trace_event_call *event,
enum trace_reg type, void *data)
{
- struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
struct trace_event_file *file = data;
switch (type) {
case TRACE_REG_REGISTER:
- return enable_trace_kprobe(tk, file);
+ return enable_trace_kprobe(event, file);
case TRACE_REG_UNREGISTER:
- return disable_trace_kprobe(tk, file);
+ return disable_trace_kprobe(event, file);
#ifdef CONFIG_PERF_EVENTS
case TRACE_REG_PERF_REGISTER:
- return enable_trace_kprobe(tk, NULL);
+ return enable_trace_kprobe(event, NULL);
case TRACE_REG_PERF_UNREGISTER:
- return disable_trace_kprobe(tk, NULL);
+ return disable_trace_kprobe(event, NULL);
case TRACE_REG_PERF_OPEN:
case TRACE_REG_PERF_CLOSE:
case TRACE_REG_PERF_ADD:
@@ -1369,7 +1533,6 @@ static inline void init_trace_event_call(struct trace_kprobe *tk)
call->flags = TRACE_EVENT_FL_KPROBE;
call->class->reg = kprobe_register;
- call->data = tk;
}
static int register_kprobe_event(struct trace_kprobe *tk)
@@ -1432,7 +1595,9 @@ void destroy_local_trace_kprobe(struct trace_event_call *event_call)
{
struct trace_kprobe *tk;
- tk = container_of(event_call, struct trace_kprobe, tp.call);
+ tk = trace_kprobe_primary_from_call(event_call);
+ if (unlikely(!tk))
+ return;
if (trace_probe_is_enabled(&tk->tp)) {
WARN_ON(1);
@@ -1577,7 +1742,8 @@ static __init int kprobe_trace_self_tests_init(void)
pr_warn("error on getting probe file.\n");
warn++;
} else
- enable_trace_kprobe(tk, file);
+ enable_trace_kprobe(
+ trace_probe_event_call(&tk->tp), file);
}
}
@@ -1598,7 +1764,8 @@ static __init int kprobe_trace_self_tests_init(void)
pr_warn("error on getting probe file.\n");
warn++;
} else
- enable_trace_kprobe(tk, file);
+ enable_trace_kprobe(
+ trace_probe_event_call(&tk->tp), file);
}
}
@@ -1631,7 +1798,8 @@ static __init int kprobe_trace_self_tests_init(void)
pr_warn("error on getting probe file.\n");
warn++;
} else
- disable_trace_kprobe(tk, file);
+ disable_trace_kprobe(
+ trace_probe_event_call(&tk->tp), file);
}
tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
@@ -1649,7 +1817,8 @@ static __init int kprobe_trace_self_tests_init(void)
pr_warn("error on getting probe file.\n");
warn++;
} else
- disable_trace_kprobe(tk, file);
+ disable_trace_kprobe(
+ trace_probe_event_call(&tk->tp), file);
}
ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index cab4a5398f1d..d54ce252b05a 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -219,10 +219,10 @@ trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len,
{
int i;
const char *ret = trace_seq_buffer_ptr(p);
+ const char *fmt = concatenate ? "%*phN" : "%*ph";
- for (i = 0; i < buf_len; i++)
- trace_seq_printf(p, "%s%2.2x", concatenate || i == 0 ? "" : " ",
- buf[i]);
+ for (i = 0; i < buf_len; i += 16)
+ trace_seq_printf(p, fmt, min(buf_len - i, 16), &buf[i]);
trace_seq_putc(p, 0);
return ret;
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index fb6bfbc5bf86..baf58a3612c0 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -316,6 +316,29 @@ inval_var:
return -EINVAL;
}
+static int str_to_immediate(char *str, unsigned long *imm)
+{
+ if (isdigit(str[0]))
+ return kstrtoul(str, 0, imm);
+ else if (str[0] == '-')
+ return kstrtol(str, 0, (long *)imm);
+ else if (str[0] == '+')
+ return kstrtol(str + 1, 0, (long *)imm);
+ return -EINVAL;
+}
+
+static int __parse_imm_string(char *str, char **pbuf, int offs)
+{
+ size_t len = strlen(str);
+
+ if (str[len - 1] != '"') {
+ trace_probe_log_err(offs + len, IMMSTR_NO_CLOSE);
+ return -EINVAL;
+ }
+ *pbuf = kstrndup(str, len - 1, GFP_KERNEL);
+ return 0;
+}
+
/* Recursive argument parser */
static int
parse_probe_arg(char *arg, const struct fetch_type *type,
@@ -430,7 +453,8 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
ret = parse_probe_arg(arg, t2, &code, end, flags, offs);
if (ret)
break;
- if (code->op == FETCH_OP_COMM) {
+ if (code->op == FETCH_OP_COMM ||
+ code->op == FETCH_OP_DATA) {
trace_probe_log_err(offs, COMM_CANT_DEREF);
return -EINVAL;
}
@@ -444,6 +468,21 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
code->offset = offset;
}
break;
+ case '\\': /* Immediate value */
+ if (arg[1] == '"') { /* Immediate string */
+ ret = __parse_imm_string(arg + 2, &tmp, offs + 2);
+ if (ret)
+ break;
+ code->op = FETCH_OP_DATA;
+ code->data = tmp;
+ } else {
+ ret = str_to_immediate(arg + 1, &code->immediate);
+ if (ret)
+ trace_probe_log_err(offs + 1, BAD_IMM);
+ else
+ code->op = FETCH_OP_IMM;
+ }
+ break;
}
if (!ret && code->op == FETCH_OP_NOP) {
/* Parsed, but do not find fetch method */
@@ -542,8 +581,11 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
}
}
- /* Since $comm can not be dereferred, we can find $comm by strcmp */
- if (strcmp(arg, "$comm") == 0) {
+ /*
+ * Since $comm and immediate string can not be dereferred,
+ * we can find those by strcmp.
+ */
+ if (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0) {
/* The type of $comm must be "string", and not an array. */
if (parg->count || (t && strcmp(t, "string")))
return -EINVAL;
@@ -580,7 +622,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
if (!strcmp(parg->type->name, "string") ||
!strcmp(parg->type->name, "ustring")) {
if (code->op != FETCH_OP_DEREF && code->op != FETCH_OP_UDEREF &&
- code->op != FETCH_OP_IMM && code->op != FETCH_OP_COMM) {
+ code->op != FETCH_OP_IMM && code->op != FETCH_OP_COMM &&
+ code->op != FETCH_OP_DATA) {
trace_probe_log_err(offset + (t ? (t - arg) : 0),
BAD_STRING);
ret = -EINVAL;
@@ -589,9 +632,10 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM) ||
parg->count) {
/*
- * IMM and COMM is pointing actual address, those must
- * be kept, and if parg->count != 0, this is an array
- * of string pointers instead of string address itself.
+ * IMM, DATA and COMM is pointing actual address, those
+ * must be kept, and if parg->count != 0, this is an
+ * array of string pointers instead of string address
+ * itself.
*/
code++;
if (code->op != FETCH_OP_NOP) {
@@ -665,7 +709,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
fail:
if (ret) {
for (code = tmp; code < tmp + FETCH_INSN_MAX; code++)
- if (code->op == FETCH_NOP_SYMBOL)
+ if (code->op == FETCH_NOP_SYMBOL ||
+ code->op == FETCH_OP_DATA)
kfree(code->data);
}
kfree(tmp);
@@ -736,7 +781,8 @@ void traceprobe_free_probe_arg(struct probe_arg *arg)
struct fetch_insn *code = arg->code;
while (code && code->op != FETCH_OP_END) {
- if (code->op == FETCH_NOP_SYMBOL)
+ if (code->op == FETCH_NOP_SYMBOL ||
+ code->op == FETCH_OP_DATA)
kfree(code->data);
code++;
}
@@ -886,44 +932,85 @@ int traceprobe_define_arg_fields(struct trace_event_call *event_call,
return 0;
}
+static void trace_probe_event_free(struct trace_probe_event *tpe)
+{
+ kfree(tpe->class.system);
+ kfree(tpe->call.name);
+ kfree(tpe->call.print_fmt);
+ kfree(tpe);
+}
+
+int trace_probe_append(struct trace_probe *tp, struct trace_probe *to)
+{
+ if (trace_probe_has_sibling(tp))
+ return -EBUSY;
+
+ list_del_init(&tp->list);
+ trace_probe_event_free(tp->event);
+
+ tp->event = to->event;
+ list_add_tail(&tp->list, trace_probe_probe_list(to));
+
+ return 0;
+}
+
+void trace_probe_unlink(struct trace_probe *tp)
+{
+ list_del_init(&tp->list);
+ if (list_empty(trace_probe_probe_list(tp)))
+ trace_probe_event_free(tp->event);
+ tp->event = NULL;
+}
void trace_probe_cleanup(struct trace_probe *tp)
{
- struct trace_event_call *call = trace_probe_event_call(tp);
int i;
for (i = 0; i < tp->nr_args; i++)
traceprobe_free_probe_arg(&tp->args[i]);
- if (call->class)
- kfree(call->class->system);
- kfree(call->name);
- kfree(call->print_fmt);
+ if (tp->event)
+ trace_probe_unlink(tp);
}
int trace_probe_init(struct trace_probe *tp, const char *event,
const char *group)
{
- struct trace_event_call *call = trace_probe_event_call(tp);
+ struct trace_event_call *call;
+ int ret = 0;
if (!event || !group)
return -EINVAL;
- call->class = &tp->class;
- call->name = kstrdup(event, GFP_KERNEL);
- if (!call->name)
+ tp->event = kzalloc(sizeof(struct trace_probe_event), GFP_KERNEL);
+ if (!tp->event)
return -ENOMEM;
- tp->class.system = kstrdup(group, GFP_KERNEL);
- if (!tp->class.system) {
- kfree(call->name);
- call->name = NULL;
- return -ENOMEM;
+ INIT_LIST_HEAD(&tp->event->files);
+ INIT_LIST_HEAD(&tp->event->class.fields);
+ INIT_LIST_HEAD(&tp->event->probes);
+ INIT_LIST_HEAD(&tp->list);
+ list_add(&tp->event->probes, &tp->list);
+
+ call = trace_probe_event_call(tp);
+ call->class = &tp->event->class;
+ call->name = kstrdup(event, GFP_KERNEL);
+ if (!call->name) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ tp->event->class.system = kstrdup(group, GFP_KERNEL);
+ if (!tp->event->class.system) {
+ ret = -ENOMEM;
+ goto error;
}
- INIT_LIST_HEAD(&tp->files);
- INIT_LIST_HEAD(&tp->class.fields);
return 0;
+
+error:
+ trace_probe_cleanup(tp);
+ return ret;
}
int trace_probe_register_event_call(struct trace_probe *tp)
@@ -952,7 +1039,7 @@ int trace_probe_add_file(struct trace_probe *tp, struct trace_event_file *file)
link->file = file;
INIT_LIST_HEAD(&link->list);
- list_add_tail_rcu(&link->list, &tp->files);
+ list_add_tail_rcu(&link->list, &tp->event->files);
trace_probe_set_flag(tp, TP_FLAG_TRACE);
return 0;
}
@@ -983,8 +1070,45 @@ int trace_probe_remove_file(struct trace_probe *tp,
synchronize_rcu();
kfree(link);
- if (list_empty(&tp->files))
+ if (list_empty(&tp->event->files))
trace_probe_clear_flag(tp, TP_FLAG_TRACE);
return 0;
}
+
+/*
+ * Return the smallest index of different type argument (start from 1).
+ * If all argument types and name are same, return 0.
+ */
+int trace_probe_compare_arg_type(struct trace_probe *a, struct trace_probe *b)
+{
+ int i;
+
+ for (i = 0; i < a->nr_args; i++) {
+ if ((b->nr_args <= i) ||
+ ((a->args[i].type != b->args[i].type) ||
+ (a->args[i].count != b->args[i].count) ||
+ strcmp(a->args[i].name, b->args[i].name)))
+ return i + 1;
+ }
+
+ return 0;
+}
+
+bool trace_probe_match_command_args(struct trace_probe *tp,
+ int argc, const char **argv)
+{
+ char buf[MAX_ARGSTR_LEN + 1];
+ int i;
+
+ if (tp->nr_args < argc)
+ return false;
+
+ for (i = 0; i < argc; i++) {
+ snprintf(buf, sizeof(buf), "%s=%s",
+ tp->args[i].name, tp->args[i].comm);
+ if (strcmp(buf, argv[i]))
+ return false;
+ }
+ return true;
+}
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index d1714820efe1..4ee703728aec 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -89,6 +89,7 @@ enum fetch_op {
FETCH_OP_COMM, /* Current comm */
FETCH_OP_ARG, /* Function argument : .param */
FETCH_OP_FOFFS, /* File offset: .immediate */
+ FETCH_OP_DATA, /* Allocated data: .data */
// Stage 2 (dereference) op
FETCH_OP_DEREF, /* Dereference: .offset */
FETCH_OP_UDEREF, /* User-space Dereference: .offset */
@@ -222,11 +223,18 @@ struct probe_arg {
const struct fetch_type *type; /* Type of this argument */
};
-struct trace_probe {
+/* Event call and class holder */
+struct trace_probe_event {
unsigned int flags; /* For TP_FLAG_* */
struct trace_event_class class;
struct trace_event_call call;
struct list_head files;
+ struct list_head probes;
+};
+
+struct trace_probe {
+ struct list_head list;
+ struct trace_probe_event *event;
ssize_t size; /* trace entry size */
unsigned int nr_args;
struct probe_arg args[];
@@ -240,19 +248,19 @@ struct event_file_link {
static inline bool trace_probe_test_flag(struct trace_probe *tp,
unsigned int flag)
{
- return !!(tp->flags & flag);
+ return !!(tp->event->flags & flag);
}
static inline void trace_probe_set_flag(struct trace_probe *tp,
unsigned int flag)
{
- tp->flags |= flag;
+ tp->event->flags |= flag;
}
static inline void trace_probe_clear_flag(struct trace_probe *tp,
unsigned int flag)
{
- tp->flags &= ~flag;
+ tp->event->flags &= ~flag;
}
static inline bool trace_probe_is_enabled(struct trace_probe *tp)
@@ -262,45 +270,76 @@ static inline bool trace_probe_is_enabled(struct trace_probe *tp)
static inline const char *trace_probe_name(struct trace_probe *tp)
{
- return trace_event_name(&tp->call);
+ return trace_event_name(&tp->event->call);
}
static inline const char *trace_probe_group_name(struct trace_probe *tp)
{
- return tp->call.class->system;
+ return tp->event->call.class->system;
}
static inline struct trace_event_call *
trace_probe_event_call(struct trace_probe *tp)
{
- return &tp->call;
+ return &tp->event->call;
+}
+
+static inline struct trace_probe_event *
+trace_probe_event_from_call(struct trace_event_call *event_call)
+{
+ return container_of(event_call, struct trace_probe_event, call);
+}
+
+static inline struct trace_probe *
+trace_probe_primary_from_call(struct trace_event_call *call)
+{
+ struct trace_probe_event *tpe = trace_probe_event_from_call(call);
+
+ return list_first_entry(&tpe->probes, struct trace_probe, list);
+}
+
+static inline struct list_head *trace_probe_probe_list(struct trace_probe *tp)
+{
+ return &tp->event->probes;
+}
+
+static inline bool trace_probe_has_sibling(struct trace_probe *tp)
+{
+ struct list_head *list = trace_probe_probe_list(tp);
+
+ return !list_empty(list) && !list_is_singular(list);
}
static inline int trace_probe_unregister_event_call(struct trace_probe *tp)
{
/* tp->event is unregistered in trace_remove_event_call() */
- return trace_remove_event_call(&tp->call);
+ return trace_remove_event_call(&tp->event->call);
}
static inline bool trace_probe_has_single_file(struct trace_probe *tp)
{
- return !!list_is_singular(&tp->files);
+ return !!list_is_singular(&tp->event->files);
}
int trace_probe_init(struct trace_probe *tp, const char *event,
const char *group);
void trace_probe_cleanup(struct trace_probe *tp);
+int trace_probe_append(struct trace_probe *tp, struct trace_probe *to);
+void trace_probe_unlink(struct trace_probe *tp);
int trace_probe_register_event_call(struct trace_probe *tp);
int trace_probe_add_file(struct trace_probe *tp, struct trace_event_file *file);
int trace_probe_remove_file(struct trace_probe *tp,
struct trace_event_file *file);
struct event_file_link *trace_probe_get_file_link(struct trace_probe *tp,
struct trace_event_file *file);
+int trace_probe_compare_arg_type(struct trace_probe *a, struct trace_probe *b);
+bool trace_probe_match_command_args(struct trace_probe *tp,
+ int argc, const char **argv);
#define trace_probe_for_each_link(pos, tp) \
- list_for_each_entry(pos, &(tp)->files, list)
+ list_for_each_entry(pos, &(tp)->event->files, list)
#define trace_probe_for_each_link_rcu(pos, tp) \
- list_for_each_entry_rcu(pos, &(tp)->files, list)
+ list_for_each_entry_rcu(pos, &(tp)->event->files, list)
/* Check the name is good for event/group/fields */
static inline bool is_good_name(const char *name)
@@ -370,6 +409,8 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
C(BAD_VAR, "Invalid $-valiable specified"), \
C(BAD_REG_NAME, "Invalid register name"), \
C(BAD_MEM_ADDR, "Invalid memory address"), \
+ C(BAD_IMM, "Invalid immediate value"), \
+ C(IMMSTR_NO_CLOSE, "String is not closed with '\"'"), \
C(FILE_ON_KPROBE, "File offset is not available with kprobe"), \
C(BAD_FILE_OFFS, "Invalid file offset value"), \
C(SYM_ON_UPROBE, "Symbol is not available with uprobe"), \
@@ -393,7 +434,10 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
C(ARG_TOO_LONG, "Argument expression is too long"), \
C(NO_ARG_BODY, "No argument expression"), \
C(BAD_INSN_BNDRY, "Probe point is not an instruction boundary"),\
- C(FAIL_REG_PROBE, "Failed to register probe event"),
+ C(FAIL_REG_PROBE, "Failed to register probe event"),\
+ C(DIFF_PROBE_TYPE, "Probe type is different from existing probe"),\
+ C(DIFF_ARG_TYPE, "Argument type or name is different from existing probe"),\
+ C(SAME_PROBE, "There is already the exact same probe event"),
#undef C
#define C(a, b) TP_ERR_##a
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 5d16f73898db..ec9a34a97129 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -53,6 +53,104 @@ static void print_max_stack(void)
}
}
+/*
+ * The stack tracer looks for a maximum stack at each call from a function. It
+ * registers a callback from ftrace, and in that callback it examines the stack
+ * size. It determines the stack size from the variable passed in, which is the
+ * address of a local variable in the stack_trace_call() callback function.
+ * The stack size is calculated by the address of the local variable to the top
+ * of the current stack. If that size is smaller than the currently saved max
+ * stack size, nothing more is done.
+ *
+ * If the size of the stack is greater than the maximum recorded size, then the
+ * following algorithm takes place.
+ *
+ * For architectures (like x86) that store the function's return address before
+ * saving the function's local variables, the stack will look something like
+ * this:
+ *
+ * [ top of stack ]
+ * 0: sys call entry frame
+ * 10: return addr to entry code
+ * 11: start of sys_foo frame
+ * 20: return addr to sys_foo
+ * 21: start of kernel_func_bar frame
+ * 30: return addr to kernel_func_bar
+ * 31: [ do trace stack here ]
+ *
+ * The save_stack_trace() is called returning all the functions it finds in the
+ * current stack. Which would be (from the bottom of the stack to the top):
+ *
+ * return addr to kernel_func_bar
+ * return addr to sys_foo
+ * return addr to entry code
+ *
+ * Now to figure out how much each of these functions' local variable size is,
+ * a search of the stack is made to find these values. When a match is made, it
+ * is added to the stack_dump_trace[] array. The offset into the stack is saved
+ * in the stack_trace_index[] array. The above example would show:
+ *
+ * stack_dump_trace[] | stack_trace_index[]
+ * ------------------ + -------------------
+ * return addr to kernel_func_bar | 30
+ * return addr to sys_foo | 20
+ * return addr to entry | 10
+ *
+ * The print_max_stack() function above, uses these values to print the size of
+ * each function's portion of the stack.
+ *
+ * for (i = 0; i < nr_entries; i++) {
+ * size = i == nr_entries - 1 ? stack_trace_index[i] :
+ * stack_trace_index[i] - stack_trace_index[i+1]
+ * print "%d %d %d %s\n", i, stack_trace_index[i], size, stack_dump_trace[i]);
+ * }
+ *
+ * The above shows
+ *
+ * depth size location
+ * ----- ---- --------
+ * 0 30 10 kernel_func_bar
+ * 1 20 10 sys_foo
+ * 2 10 10 entry code
+ *
+ * Now for architectures that might save the return address after the functions
+ * local variables (saving the link register before calling nested functions),
+ * this will cause the stack to look a little different:
+ *
+ * [ top of stack ]
+ * 0: sys call entry frame
+ * 10: start of sys_foo_frame
+ * 19: return addr to entry code << lr saved before calling kernel_func_bar
+ * 20: start of kernel_func_bar frame
+ * 29: return addr to sys_foo_frame << lr saved before calling next function
+ * 30: [ do trace stack here ]
+ *
+ * Although the functions returned by save_stack_trace() may be the same, the
+ * placement in the stack will be different. Using the same algorithm as above
+ * would yield:
+ *
+ * stack_dump_trace[] | stack_trace_index[]
+ * ------------------ + -------------------
+ * return addr to kernel_func_bar | 30
+ * return addr to sys_foo | 29
+ * return addr to entry | 19
+ *
+ * Where the mapping is off by one:
+ *
+ * kernel_func_bar stack frame size is 29 - 19 not 30 - 29!
+ *
+ * To fix this, if the architecture sets ARCH_RET_ADDR_AFTER_LOCAL_VARS the
+ * values in stack_trace_index[] are shifted by one to and the number of
+ * stack trace entries is decremented by one.
+ *
+ * stack_dump_trace[] | stack_trace_index[]
+ * ------------------ + -------------------
+ * return addr to kernel_func_bar | 29
+ * return addr to sys_foo | 19
+ *
+ * Although the entry function is not displayed, the first function (sys_foo)
+ * will still include the stack size of it.
+ */
static void check_stack(unsigned long ip, unsigned long *stack)
{
unsigned long this_size, flags; unsigned long *p, *top, *start;
@@ -158,6 +256,20 @@ static void check_stack(unsigned long ip, unsigned long *stack)
i++;
}
+#ifdef ARCH_FTRACE_SHIFT_STACK_TRACER
+ /*
+ * Some archs will store the link register before calling
+ * nested functions. This means the saved return address
+ * comes after the local storage, and we need to shift
+ * for that.
+ */
+ if (x > 1) {
+ memmove(&stack_trace_index[0], &stack_trace_index[1],
+ sizeof(stack_trace_index[0]) * (x - 1));
+ x--;
+ }
+#endif
+
stack_trace_nr_entries = x;
if (task_stack_end_corrupted(current)) {
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 1ceedb9146b1..dd884341f5c5 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -44,7 +44,7 @@ static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
static int trace_uprobe_release(struct dyn_event *ev);
static bool trace_uprobe_is_busy(struct dyn_event *ev);
static bool trace_uprobe_match(const char *system, const char *event,
- struct dyn_event *ev);
+ int argc, const char **argv, struct dyn_event *ev);
static struct dyn_event_operations trace_uprobe_ops = {
.create = trace_uprobe_create,
@@ -248,6 +248,9 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
case FETCH_OP_COMM:
val = FETCH_TOKEN_COMM;
break;
+ case FETCH_OP_DATA:
+ val = (unsigned long)code->data;
+ break;
case FETCH_OP_FOFFS:
val = translate_user_vaddr(code->immediate);
break;
@@ -284,13 +287,54 @@ static bool trace_uprobe_is_busy(struct dyn_event *ev)
return trace_probe_is_enabled(&tu->tp);
}
+static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
+ int argc, const char **argv)
+{
+ char buf[MAX_ARGSTR_LEN + 1];
+ int len;
+
+ if (!argc)
+ return true;
+
+ len = strlen(tu->filename);
+ if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
+ return false;
+
+ if (tu->ref_ctr_offset == 0)
+ snprintf(buf, sizeof(buf), "0x%0*lx",
+ (int)(sizeof(void *) * 2), tu->offset);
+ else
+ snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
+ (int)(sizeof(void *) * 2), tu->offset,
+ tu->ref_ctr_offset);
+ if (strcmp(buf, &argv[0][len + 1]))
+ return false;
+
+ argc--; argv++;
+
+ return trace_probe_match_command_args(&tu->tp, argc, argv);
+}
+
static bool trace_uprobe_match(const char *system, const char *event,
- struct dyn_event *ev)
+ int argc, const char **argv, struct dyn_event *ev)
{
struct trace_uprobe *tu = to_trace_uprobe(ev);
return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
- (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0);
+ (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
+ trace_uprobe_match_command_head(tu, argc, argv);
+}
+
+static nokprobe_inline struct trace_uprobe *
+trace_uprobe_primary_from_call(struct trace_event_call *call)
+{
+ struct trace_probe *tp;
+
+ tp = trace_probe_primary_from_call(call);
+ if (WARN_ON_ONCE(!tp))
+ return NULL;
+
+ return container_of(tp, struct trace_uprobe, tp);
}
/*
@@ -352,15 +396,76 @@ static int unregister_trace_uprobe(struct trace_uprobe *tu)
{
int ret;
+ if (trace_probe_has_sibling(&tu->tp))
+ goto unreg;
+
ret = unregister_uprobe_event(tu);
if (ret)
return ret;
+unreg:
dyn_event_remove(&tu->devent);
+ trace_probe_unlink(&tu->tp);
free_trace_uprobe(tu);
return 0;
}
+static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
+ struct trace_uprobe *comp)
+{
+ struct trace_probe_event *tpe = orig->tp.event;
+ struct trace_probe *pos;
+ struct inode *comp_inode = d_real_inode(comp->path.dentry);
+ int i;
+
+ list_for_each_entry(pos, &tpe->probes, list) {
+ orig = container_of(pos, struct trace_uprobe, tp);
+ if (comp_inode != d_real_inode(orig->path.dentry) ||
+ comp->offset != orig->offset)
+ continue;
+
+ /*
+ * trace_probe_compare_arg_type() ensured that nr_args and
+ * each argument name and type are same. Let's compare comm.
+ */
+ for (i = 0; i < orig->tp.nr_args; i++) {
+ if (strcmp(orig->tp.args[i].comm,
+ comp->tp.args[i].comm))
+ break;
+ }
+
+ if (i == orig->tp.nr_args)
+ return true;
+ }
+
+ return false;
+}
+
+static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
+{
+ int ret;
+
+ ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
+ if (ret) {
+ /* Note that argument starts index = 2 */
+ trace_probe_log_set_index(ret + 1);
+ trace_probe_log_err(0, DIFF_ARG_TYPE);
+ return -EEXIST;
+ }
+ if (trace_uprobe_has_same_uprobe(to, tu)) {
+ trace_probe_log_set_index(0);
+ trace_probe_log_err(0, SAME_PROBE);
+ return -EEXIST;
+ }
+
+ /* Append to existing event */
+ ret = trace_probe_append(&tu->tp, &to->tp);
+ if (!ret)
+ dyn_event_add(&tu->devent);
+
+ return ret;
+}
+
/*
* Uprobe with multiple reference counter is not allowed. i.e.
* If inode and offset matches, reference counter offset *must*
@@ -370,25 +475,21 @@ static int unregister_trace_uprobe(struct trace_uprobe *tu)
* as the new one does not conflict with any other existing
* ones.
*/
-static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new)
+static int validate_ref_ctr_offset(struct trace_uprobe *new)
{
struct dyn_event *pos;
- struct trace_uprobe *tmp, *old = NULL;
+ struct trace_uprobe *tmp;
struct inode *new_inode = d_real_inode(new->path.dentry);
- old = find_probe_event(trace_probe_name(&new->tp),
- trace_probe_group_name(&new->tp));
-
for_each_trace_uprobe(tmp, pos) {
- if ((old ? old != tmp : true) &&
- new_inode == d_real_inode(tmp->path.dentry) &&
+ if (new_inode == d_real_inode(tmp->path.dentry) &&
new->offset == tmp->offset &&
new->ref_ctr_offset != tmp->ref_ctr_offset) {
pr_warn("Reference counter offset mismatch.");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
}
- return old;
+ return 0;
}
/* Register a trace_uprobe and probe_event */
@@ -399,18 +500,22 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
mutex_lock(&event_mutex);
- /* register as an event */
- old_tu = find_old_trace_uprobe(tu);
- if (IS_ERR(old_tu)) {
- ret = PTR_ERR(old_tu);
+ ret = validate_ref_ctr_offset(tu);
+ if (ret)
goto end;
- }
+ /* register as an event */
+ old_tu = find_probe_event(trace_probe_name(&tu->tp),
+ trace_probe_group_name(&tu->tp));
if (old_tu) {
- /* delete old event */
- ret = unregister_trace_uprobe(old_tu);
- if (ret)
- goto end;
+ if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
+ trace_probe_log_set_index(0);
+ trace_probe_log_err(0, DIFF_PROBE_TYPE);
+ ret = -EEXIST;
+ } else {
+ ret = append_trace_uprobe(tu, old_tu);
+ }
+ goto end;
}
ret = register_uprobe_event(tu);
@@ -897,7 +1002,10 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e
u8 *data;
entry = (struct uprobe_trace_entry_head *)iter->ent;
- tu = container_of(event, struct trace_uprobe, tp.call.event);
+ tu = trace_uprobe_primary_from_call(
+ container_of(event, struct trace_event_call, event));
+ if (unlikely(!tu))
+ goto out;
if (is_ret_probe(tu)) {
trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
@@ -924,27 +1032,71 @@ typedef bool (*filter_func_t)(struct uprobe_consumer *self,
enum uprobe_filter_ctx ctx,
struct mm_struct *mm);
-static int
-probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
- filter_func_t filter)
+static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
{
- bool enabled = trace_probe_is_enabled(&tu->tp);
int ret;
+ tu->consumer.filter = filter;
+ tu->inode = d_real_inode(tu->path.dentry);
+
+ if (tu->ref_ctr_offset)
+ ret = uprobe_register_refctr(tu->inode, tu->offset,
+ tu->ref_ctr_offset, &tu->consumer);
+ else
+ ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
+
+ if (ret)
+ tu->inode = NULL;
+
+ return ret;
+}
+
+static void __probe_event_disable(struct trace_probe *tp)
+{
+ struct trace_probe *pos;
+ struct trace_uprobe *tu;
+
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+ tu = container_of(pos, struct trace_uprobe, tp);
+ if (!tu->inode)
+ continue;
+
+ WARN_ON(!uprobe_filter_is_empty(&tu->filter));
+
+ uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
+ tu->inode = NULL;
+ }
+}
+
+static int probe_event_enable(struct trace_event_call *call,
+ struct trace_event_file *file, filter_func_t filter)
+{
+ struct trace_probe *pos, *tp;
+ struct trace_uprobe *tu;
+ bool enabled;
+ int ret;
+
+ tp = trace_probe_primary_from_call(call);
+ if (WARN_ON_ONCE(!tp))
+ return -ENODEV;
+ enabled = trace_probe_is_enabled(tp);
+
+ /* This may also change "enabled" state */
if (file) {
- if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
+ if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
return -EINTR;
- ret = trace_probe_add_file(&tu->tp, file);
+ ret = trace_probe_add_file(tp, file);
if (ret < 0)
return ret;
} else {
- if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
+ if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
return -EINTR;
- trace_probe_set_flag(&tu->tp, TP_FLAG_PROFILE);
+ trace_probe_set_flag(tp, TP_FLAG_PROFILE);
}
+ tu = container_of(tp, struct trace_uprobe, tp);
WARN_ON(!uprobe_filter_is_empty(&tu->filter));
if (enabled)
@@ -954,18 +1106,15 @@ probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
if (ret)
goto err_flags;
- tu->consumer.filter = filter;
- tu->inode = d_real_inode(tu->path.dentry);
- if (tu->ref_ctr_offset) {
- ret = uprobe_register_refctr(tu->inode, tu->offset,
- tu->ref_ctr_offset, &tu->consumer);
- } else {
- ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+ tu = container_of(pos, struct trace_uprobe, tp);
+ ret = trace_uprobe_enable(tu, filter);
+ if (ret) {
+ __probe_event_disable(tp);
+ goto err_buffer;
+ }
}
- if (ret)
- goto err_buffer;
-
return 0;
err_buffer:
@@ -973,33 +1122,35 @@ probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
err_flags:
if (file)
- trace_probe_remove_file(&tu->tp, file);
+ trace_probe_remove_file(tp, file);
else
- trace_probe_clear_flag(&tu->tp, TP_FLAG_PROFILE);
+ trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
return ret;
}
-static void
-probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
+static void probe_event_disable(struct trace_event_call *call,
+ struct trace_event_file *file)
{
- if (!trace_probe_is_enabled(&tu->tp))
+ struct trace_probe *tp;
+
+ tp = trace_probe_primary_from_call(call);
+ if (WARN_ON_ONCE(!tp))
+ return;
+
+ if (!trace_probe_is_enabled(tp))
return;
if (file) {
- if (trace_probe_remove_file(&tu->tp, file) < 0)
+ if (trace_probe_remove_file(tp, file) < 0)
return;
- if (trace_probe_is_enabled(&tu->tp))
+ if (trace_probe_is_enabled(tp))
return;
} else
- trace_probe_clear_flag(&tu->tp, TP_FLAG_PROFILE);
-
- WARN_ON(!uprobe_filter_is_empty(&tu->filter));
-
- uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
- tu->inode = NULL;
+ trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
+ __probe_event_disable(tp);
uprobe_buffer_disable();
}
@@ -1007,7 +1158,11 @@ static int uprobe_event_define_fields(struct trace_event_call *event_call)
{
int ret, size;
struct uprobe_trace_entry_head field;
- struct trace_uprobe *tu = event_call->data;
+ struct trace_uprobe *tu;
+
+ tu = trace_uprobe_primary_from_call(event_call);
+ if (unlikely(!tu))
+ return -ENODEV;
if (is_ret_probe(tu)) {
DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
@@ -1100,6 +1255,27 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
return err;
}
+static int uprobe_perf_multi_call(struct trace_event_call *call,
+ struct perf_event *event,
+ int (*op)(struct trace_uprobe *tu, struct perf_event *event))
+{
+ struct trace_probe *pos, *tp;
+ struct trace_uprobe *tu;
+ int ret = 0;
+
+ tp = trace_probe_primary_from_call(call);
+ if (WARN_ON_ONCE(!tp))
+ return -ENODEV;
+
+ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+ tu = container_of(pos, struct trace_uprobe, tp);
+ ret = op(tu, event);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
static bool uprobe_perf_filter(struct uprobe_consumer *uc,
enum uprobe_filter_ctx ctx, struct mm_struct *mm)
{
@@ -1213,30 +1389,29 @@ static int
trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
void *data)
{
- struct trace_uprobe *tu = event->data;
struct trace_event_file *file = data;
switch (type) {
case TRACE_REG_REGISTER:
- return probe_event_enable(tu, file, NULL);
+ return probe_event_enable(event, file, NULL);
case TRACE_REG_UNREGISTER:
- probe_event_disable(tu, file);
+ probe_event_disable(event, file);
return 0;
#ifdef CONFIG_PERF_EVENTS
case TRACE_REG_PERF_REGISTER:
- return probe_event_enable(tu, NULL, uprobe_perf_filter);
+ return probe_event_enable(event, NULL, uprobe_perf_filter);
case TRACE_REG_PERF_UNREGISTER:
- probe_event_disable(tu, NULL);
+ probe_event_disable(event, NULL);
return 0;
case TRACE_REG_PERF_OPEN:
- return uprobe_perf_open(tu, data);
+ return uprobe_perf_multi_call(event, data, uprobe_perf_open);
case TRACE_REG_PERF_CLOSE:
- return uprobe_perf_close(tu, data);
+ return uprobe_perf_multi_call(event, data, uprobe_perf_close);
#endif
default:
@@ -1330,7 +1505,6 @@ static inline void init_trace_event_call(struct trace_uprobe *tu)
call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
call->class->reg = trace_uprobe_register;
- call->data = tu;
}
static int register_uprobe_event(struct trace_uprobe *tu)
@@ -1399,7 +1573,7 @@ void destroy_local_trace_uprobe(struct trace_event_call *event_call)
{
struct trace_uprobe *tu;
- tu = container_of(event_call, struct trace_uprobe, tp.call);
+ tu = trace_uprobe_primary_from_call(event_call);
free_trace_uprobe(tu);
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 601d61150b65..bc2e09a8ea61 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3329,7 +3329,7 @@ EXPORT_SYMBOL_GPL(execute_in_process_context);
*
* Undo alloc_workqueue_attrs().
*/
-static void free_workqueue_attrs(struct workqueue_attrs *attrs)
+void free_workqueue_attrs(struct workqueue_attrs *attrs)
{
if (attrs) {
free_cpumask_var(attrs->cpumask);
@@ -3345,7 +3345,7 @@ static void free_workqueue_attrs(struct workqueue_attrs *attrs)
*
* Return: The allocated new workqueue_attr on success. %NULL on failure.
*/
-static struct workqueue_attrs *alloc_workqueue_attrs(void)
+struct workqueue_attrs *alloc_workqueue_attrs(void)
{
struct workqueue_attrs *attrs;
@@ -4030,16 +4030,20 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
*
* Performs GFP_KERNEL allocations.
*
+ * Assumes caller has CPU hotplug read exclusion, i.e. get_online_cpus().
+ *
* Return: 0 on success and -errno on failure.
*/
-static int apply_workqueue_attrs(struct workqueue_struct *wq,
+int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs)
{
int ret;
- apply_wqattrs_lock();
+ lockdep_assert_cpus_held();
+
+ mutex_lock(&wq_pool_mutex);
ret = apply_workqueue_attrs_locked(wq, attrs);
- apply_wqattrs_unlock();
+ mutex_unlock(&wq_pool_mutex);
return ret;
}
@@ -4152,16 +4156,21 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
mutex_unlock(&wq->mutex);
}
return 0;
- } else if (wq->flags & __WQ_ORDERED) {
+ }
+
+ get_online_cpus();
+ if (wq->flags & __WQ_ORDERED) {
ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
/* there should only be single pwq for ordering guarantee */
WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
"ordering guarantee broken for workqueue %s\n", wq->name);
- return ret;
} else {
- return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
+ ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
}
+ put_online_cpus();
+
+ return ret;
}
static int wq_clamp_max_active(int max_active, unsigned int flags,