Commit e7d4005d authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'Introduce-sendpage_ok-to-detect-misused-sendpage-in-network-related-drivers'



Coly Li says:

====================
Introduce sendpage_ok() to detect misused sendpage in network related drivers

As Sagi Grimberg suggested, the original fix is refind to a more common
inline routine:
    static inline bool sendpage_ok(struct page *page)
    {
        return  (!PageSlab(page) && page_count(page) >= 1);
    }
If sendpage_ok() returns true, the checking page can be handled by the
concrete zero-copy sendpage method in network layer.

The v10 series has 7 patches, fixes a WARN_ONCE() usage from v9 series,
- The 1st patch in this series introduces sendpage_ok() in header file
  include/linux/net.h.
- The 2nd patch adds WARN_ONCE() for improper zero-copy send in
  kernel_sendpage().
- The 3rd patch fixes the page checking issue in nvme-over-tcp driver.
- The 4th patch adds page_count check by using sendpage_ok() in
  do_tcp_sendpages() as Eric Dumazet suggested.
- The 5th and 6th patches just replace existing open coded checks with
  the inline sendpage_ok() routine.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f30e25a9 40efc4dc
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1553,7 +1553,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
	 * put_page(); and would cause either a VM_BUG directly, or
	 * __page_cache_release a page that would actually still be referenced
	 * by someone, leading to some obscure delayed Oops somewhere else. */
	if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page))
	if (drbd_disable_sendpage || !sendpage_ok(page))
		return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);

	msg_flags |= MSG_NOSIGNAL;
+3 −4
Original line number Diff line number Diff line
@@ -913,12 +913,11 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
		else
			flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;

		/* can't zcopy slab pages */
		if (unlikely(PageSlab(page))) {
			ret = sock_no_sendpage(queue->sock, page, offset, len,
		if (sendpage_ok(page)) {
			ret = kernel_sendpage(queue->sock, page, offset, len,
					flags);
		} else {
			ret = kernel_sendpage(queue->sock, page, offset, len,
			ret = sock_no_sendpage(queue->sock, page, offset, len,
					flags);
		}
		if (ret <= 0)
+1 −1
Original line number Diff line number Diff line
@@ -128,7 +128,7 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
	 * coalescing neighboring slab objects into a single frag which
	 * triggers one of hardened usercopy checks.
	 */
	if (!recv && page_count(sg_page(sg)) >= 1 && !PageSlab(sg_page(sg)))
	if (!recv && sendpage_ok(sg_page(sg)))
		return;

	if (recv) {
+16 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include <linux/rcupdate.h>
#include <linux/once.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/sockptr.h>

#include <uapi/linux/net.h>
@@ -286,6 +287,21 @@ do { \
#define net_get_random_once_wait(buf, nbytes)			\
	get_random_once_wait((buf), (nbytes))

/*
 * E.g. XFS meta- & log-data is in slab pages, or bcache meta
 * data pages, or other high order pages allocated by
 * __get_free_pages() without __GFP_COMP, which have a page_count
 * of 0 and/or have PageSlab() set. We cannot use send_page for
 * those, as that does get_page(); put_page(); and would cause
 * either a VM_BUG directly, or __page_cache_release a page that
 * would actually still be referenced by someone, leading to some
 * obscure delayed Oops somewhere else.
 */
static inline bool sendpage_ok(struct page *page)
{
	return !PageSlab(page) && page_count(page) >= 1;
}

int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
		   size_t num, size_t len);
int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+1 −1
Original line number Diff line number Diff line
@@ -575,7 +575,7 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
	 * coalescing neighboring slab objects into a single frag which
	 * triggers one of hardened usercopy checks.
	 */
	if (page_count(page) >= 1 && !PageSlab(page))
	if (sendpage_ok(page))
		sendpage = sock->ops->sendpage;
	else
		sendpage = sock_no_sendpage;
Loading