aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorYonghong Song <yhs@fb.com>2017-06-22 15:07:39 -0700
committerDavid S. Miller <davem@davemloft.net>2017-06-23 14:04:11 -0400
commit239946314e57711d7da546b67964d0b387a3ee42 (patch)
tree958d35fbbbc439b561832c75de22f5fdfa825f7c /kernel/trace
parentnet: stmmac: make some functions static (diff)
downloadlinux-dev-239946314e57711d7da546b67964d0b387a3ee42.tar.xz
linux-dev-239946314e57711d7da546b67964d0b387a3ee42.zip
bpf: possibly avoid extra masking for narrower load in verifier
Commit 31fd85816dbe ("bpf: permits narrower load from bpf program context fields") permits narrower load for certain ctx fields. The commit however will already generate a masking even if the prog-specific ctx conversion produces the result with narrower size. For example, for __sk_buff->protocol, the ctx conversion loads the data into register with 2-byte load. A narrower 2-byte load should not generate masking. For __sk_buff->vlan_present, the conversion function set the result as either 0 or 1, essentially a byte. The narrower 2-byte or 1-byte load should not generate masking. To avoid unnecessary masking, prog-specific *_is_valid_access now passes converted_op_size back to verifier, which indicates the valid data width after perceived future conversion. Based on this information, verifier is able to avoid unnecessary marking. Since we want more information back from prog-specific *_is_valid_access checking, all of them are packed into one data structure for more clarity. Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Yonghong Song <yhs@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/bpf_trace.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 9d3ec8253131..97c46b440cd6 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -479,7 +479,7 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
/* bpf+kprobe programs can access fields of 'struct pt_regs' */
static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
- enum bpf_reg_type *reg_type, int *ctx_field_size)
+ struct bpf_insn_access_aux *info)
{
if (off < 0 || off >= sizeof(struct pt_regs))
return false;
@@ -562,7 +562,7 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
}
static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
- enum bpf_reg_type *reg_type, int *ctx_field_size)
+ struct bpf_insn_access_aux *info)
{
if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
return false;
@@ -581,7 +581,7 @@ const struct bpf_verifier_ops tracepoint_prog_ops = {
};
static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
- enum bpf_reg_type *reg_type, int *ctx_field_size)
+ struct bpf_insn_access_aux *info)
{
int sample_period_off;
@@ -595,12 +595,17 @@ static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type
/* permit 1, 2, 4 byte narrower and 8 normal read access to sample_period */
sample_period_off = offsetof(struct bpf_perf_event_data, sample_period);
if (off >= sample_period_off && off < sample_period_off + sizeof(__u64)) {
- *ctx_field_size = 8;
+ int allowed;
+
#ifdef __LITTLE_ENDIAN
- return (off & 0x7) == 0 && size <= 8 && (size & (size - 1)) == 0;
+ allowed = (off & 0x7) == 0 && size <= 8 && (size & (size - 1)) == 0;
#else
- return ((off & 0x7) + size) == 8 && size <= 8 && (size & (size - 1)) == 0;
+ allowed = ((off & 0x7) + size) == 8 && size <= 8 && (size & (size - 1)) == 0;
#endif
+ if (!allowed)
+ return false;
+ info->ctx_field_size = 8;
+ info->converted_op_size = 8;
} else {
if (size != sizeof(long))
return false;