aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-01-11 12:51:49 -0800
committerDavid S. Miller <davem@davemloft.net>2019-01-11 12:51:49 -0800
commite8b108b050e84b6d7497d2cd29fe7623d0a33ed6 (patch)
tree3312df1c78318fd34accb112866e869cedf84635 /kernel
parentnet: ethernet: mediatek: fix warning in phy_start_aneg (diff)
parentMerge branch 'bpf-fix-bitfield-printing' (diff)
downloadwireguard-linux-e8b108b050e84b6d7497d2cd29fe7623d0a33ed6.tar.xz
wireguard-linux-e8b108b050e84b6d7497d2cd29fe7623d0a33ed6.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2019-01-11 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix TCP-BPF support for correctly setting the initial window via TCP_BPF_IW on an active TFO sender, from Yuchung. 2) Fix a panic in BPF's stack_map_get_build_id()'s ELF parsing on 32 bit archs caused by page_address() returning NULL, from Song. 3) Fix BTF pretty print in kernel and bpftool when bitfield member offset is greater than 256. Also add test cases, from Yonghong. 4) Fix improper argument handling in xdp1 sample, from Ioana. 5) Install missing tcp_server.py and tcp_client.py files from BPF selftests, from Anders. 6) Add test_libbpf to gitignore in libbpf and BPF selftests, from Stanislav. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/btf.c12
-rw-r--r--kernel/bpf/stackmap.c3
2 files changed, 8 insertions, 7 deletions
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 715f9fcf4712..a2f53642592b 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -1219,8 +1219,6 @@ static void btf_bitfield_seq_show(void *data, u8 bits_offset,
u8 nr_copy_bits;
u64 print_num;
- data += BITS_ROUNDDOWN_BYTES(bits_offset);
- bits_offset = BITS_PER_BYTE_MASKED(bits_offset);
nr_copy_bits = nr_bits + bits_offset;
nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
@@ -1255,7 +1253,9 @@ static void btf_int_bits_seq_show(const struct btf *btf,
* BTF_INT_OFFSET() cannot exceed 64 bits.
*/
total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
- btf_bitfield_seq_show(data, total_bits_offset, nr_bits, m);
+ data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
+ bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
+ btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
}
static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
@@ -2001,12 +2001,12 @@ static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
member_offset = btf_member_bit_offset(t, member);
bitfield_size = btf_member_bitfield_size(t, member);
+ bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
+ bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
if (bitfield_size) {
- btf_bitfield_seq_show(data, member_offset,
+ btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
bitfield_size, m);
} else {
- bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
- bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
ops = btf_type_ops(member_type);
ops->seq_show(btf, member_type, member->type,
data + bytes_offset, bits8_offset, m);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 90daf285de03..d9e2483669d0 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -260,7 +260,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
return -EFAULT; /* page not mapped */
ret = -EINVAL;
- page_addr = page_address(page);
+ page_addr = kmap_atomic(page);
ehdr = (Elf32_Ehdr *)page_addr;
/* compare magic x7f "ELF" */
@@ -276,6 +276,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
ret = stack_map_get_build_id_64(page_addr, build_id);
out:
+ kunmap_atomic(page_addr);
put_page(page);
return ret;
}