Commit e8b108b0 authored by David S. Miller's avatar David S. Miller
Browse files


Daniel Borkmann says:

====================
pull-request: bpf 2019-01-11

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix TCP-BPF support for correctly setting the initial window
   via TCP_BPF_IW on an active TFO sender, from Yuchung.

2) Fix a panic in BPF's stack_map_get_build_id()'s ELF parsing on
   32 bit archs caused by page_address() returning NULL, from Song.

3) Fix BTF pretty print in kernel and bpftool when bitfield member
   offset is greater than 256. Also add test cases, from Yonghong.

4) Fix improper argument handling in xdp1 sample, from Ioana.

5) Install missing tcp_server.py and tcp_client.py files from
   BPF selftests, from Anders.

6) Add test_libbpf to gitignore in libbpf and BPF selftests,
   from Stanislav.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b19bce03 fb4129b9
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -1219,8 +1219,6 @@ static void btf_bitfield_seq_show(void *data, u8 bits_offset,
	u8 nr_copy_bits;
	u64 print_num;

	data += BITS_ROUNDDOWN_BYTES(bits_offset);
	bits_offset = BITS_PER_BYTE_MASKED(bits_offset);
	nr_copy_bits = nr_bits + bits_offset;
	nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);

@@ -1255,7 +1253,9 @@ static void btf_int_bits_seq_show(const struct btf *btf,
	 * BTF_INT_OFFSET() cannot exceed 64 bits.
	 */
	total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
	btf_bitfield_seq_show(data, total_bits_offset, nr_bits, m);
	data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
	bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
	btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
}

static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
@@ -2001,12 +2001,12 @@ static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,

		member_offset = btf_member_bit_offset(t, member);
		bitfield_size = btf_member_bitfield_size(t, member);
		bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
		bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
		if (bitfield_size) {
			btf_bitfield_seq_show(data, member_offset,
			btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
					      bitfield_size, m);
		} else {
			bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
			bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
			ops = btf_type_ops(member_type);
			ops->seq_show(btf, member_type, member->type,
				      data + bytes_offset, bits8_offset, m);
+2 −1
Original line number Diff line number Diff line
@@ -260,7 +260,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
		return -EFAULT;	/* page not mapped */

	ret = -EINVAL;
	page_addr = page_address(page);
	page_addr = kmap_atomic(page);
	ehdr = (Elf32_Ehdr *)page_addr;

	/* compare magic x7f "ELF" */
@@ -276,6 +276,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
	else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
		ret = stack_map_get_build_id_64(page_addr, build_id);
out:
	kunmap_atomic(page_addr);
	put_page(page);
	return ret;
}
+1 −1
Original line number Diff line number Diff line
@@ -4203,7 +4203,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
			/* Only some options are supported */
			switch (optname) {
			case TCP_BPF_IW:
				if (val <= 0 || tp->data_segs_out > 0)
				if (val <= 0 || tp->data_segs_out > tp->syn_data)
					ret = -EINVAL;
				else
					tp->snd_cwnd = val;
+1 −1
Original line number Diff line number Diff line
@@ -103,7 +103,7 @@ int main(int argc, char **argv)
		return 1;
	}

	ifindex = if_nametoindex(argv[1]);
	ifindex = if_nametoindex(argv[optind]);
	if (!ifindex) {
		perror("if_nametoindex");
		return 1;
+7 −6
Original line number Diff line number Diff line
@@ -82,8 +82,6 @@ static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset,
	int bits_to_copy;
	__u64 print_num;

	data += BITS_ROUNDDOWN_BYTES(bit_offset);
	bit_offset = BITS_PER_BYTE_MASKED(bit_offset);
	bits_to_copy = bit_offset + nr_bits;
	bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);

@@ -118,7 +116,9 @@ static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
	 * BTF_INT_OFFSET() cannot exceed 64 bits.
	 */
	total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
	btf_dumper_bitfield(nr_bits, total_bits_offset, data, jw,
	data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
	bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
	btf_dumper_bitfield(nr_bits, bit_offset, data, jw,
			    is_plain_text);
}

@@ -216,11 +216,12 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
		}

		jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off));
		data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
		if (bitfield_size) {
			btf_dumper_bitfield(bitfield_size, bit_offset,
					    data, d->jw, d->is_plain_text);
			btf_dumper_bitfield(bitfield_size,
					    BITS_PER_BYTE_MASKED(bit_offset),
					    data_off, d->jw, d->is_plain_text);
		} else {
			data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
			ret = btf_dumper_do_type(d, m[i].type,
						 BITS_PER_BYTE_MASKED(bit_offset),
						 data_off);
Loading