If a program of type ``BPF_PROG_TYPE_SK_MSG`` is run on a ``msg`` it can only
parse data that the (``data``, ``data_end``) pointers have already consumed.
For ``sendmsg()`` hooks this is likely the first scatterlist element. But for
-calls relying on the ``sendpage`` handler (e.g., ``sendfile()``) this will be
-the range (**0**, **0**) because the data is shared with user space and by
-default the objective is to avoid allowing user space to modify data while (or
-after) BPF verdict is being decided. This helper can be used to pull in data
-and to set the start and end pointers to given values. Data will be copied if
+calls relying on MSG_SPLICE_PAGES (e.g., ``sendfile()``) this will be the
+range (**0**, **0**) because the data is shared with user space and by default
+the objective is to avoid allowing user space to modify data while (or after)
+BPF verdict is being decided. This helper can be used to pull in data and to
+set the start and end pointers to given values. Data will be copied if
necessary (i.e., if data was not linear and if start and end pointers do not
point to the same chunk).
int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
- ssize_t (*sendpage) (struct file *, struct page *, int, size_t,
- loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long);
int (*check_flags)(int);
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
- ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
int (*check_flags)(int);
int (*flock) (struct file *, int, struct file_lock *);
rps_sock_flow_table is a global flow table that contains the *desired* CPU
for flows: the CPU that is currently processing the flow in userspace.
Each table value is a CPU index that is updated during calls to recvmsg
-and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
-and tcp_splice_read()).
+and sendmsg (specifically, inet_recvmsg(), inet_sendmsg() and
+tcp_splice_read()).
When the scheduler moves a thread to a new CPU while it has outstanding
receive packets on the old CPU, packets may arrive out of order. To
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
.sendmsg = sock_no_sendmsg,
.recvmsg = sock_no_recvmsg,
EXPORT_SYMBOL_GPL(af_alg_sendmsg);
/**
- * af_alg_sendpage - sendpage system call handler
- * @sock: socket of connection to user space to write to
- * @page: data to send
- * @offset: offset into page to begin sending
- * @size: length of data
- * @flags: message send/receive flags
- *
- * This is a generic implementation of sendpage to fill ctx->tsgl_list.
- */
-ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- struct bio_vec bvec;
- struct msghdr msg = {
- .msg_flags = flags | MSG_SPLICE_PAGES,
- };
-
- if (flags & MSG_SENDPAGE_NOTLAST)
- msg.msg_flags |= MSG_MORE;
-
- bvec_set_page(&bvec, page, size, offset);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
- return sock_sendmsg(sock, &msg);
-}
-EXPORT_SYMBOL_GPL(af_alg_sendpage);
-
-/**
* af_alg_free_resources - release resources required for crypto request
* @areq: Request holding the TX and RX SGL
*/
* The following concept of the memory management is used:
*
* The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
- * filled by user space with the data submitted via sendpage. Filling up
- * the TX SGL does not cause a crypto operation -- the data will only be
- * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
- * provide a buffer which is tracked with the RX SGL.
+ * filled by user space with the data submitted via sendmsg (maybe with
+ * MSG_SPLICE_PAGES). Filling up the TX SGL does not cause a crypto operation
+ * -- the data will only be tracked by the kernel. Upon receipt of one recvmsg
+ * call, the caller must provide a buffer which is tracked with the RX SGL.
*
* During the processing of the recvmsg operation, the cipher request is
* allocated and prepared. As part of the recvmsg operation, the processed
.release = af_alg_release,
.sendmsg = aead_sendmsg,
- .sendpage = af_alg_sendpage,
.recvmsg = aead_recvmsg,
.poll = af_alg_poll,
};
return aead_sendmsg(sock, msg, size);
}
-static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- int err;
-
- err = aead_check_key(sock);
- if (err)
- return err;
-
- return af_alg_sendpage(sock, page, offset, size, flags);
-}
-
static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
.release = af_alg_release,
.sendmsg = aead_sendmsg_nokey,
- .sendpage = aead_sendpage_nokey,
.recvmsg = aead_recvmsg_nokey,
.poll = af_alg_poll,
};
.bind = sock_no_bind,
.accept = sock_no_accept,
.sendmsg = sock_no_sendmsg,
- .sendpage = sock_no_sendpage,
.release = af_alg_release,
.recvmsg = rng_recvmsg,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
- .sendpage = sock_no_sendpage,
.release = af_alg_release,
.recvmsg = rng_test_recvmsg,
.release = af_alg_release,
.sendmsg = skcipher_sendmsg,
- .sendpage = af_alg_sendpage,
.recvmsg = skcipher_recvmsg,
.poll = af_alg_poll,
};
return skcipher_sendmsg(sock, msg, size);
}
-static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- int err;
-
- err = skcipher_check_key(sock);
- if (err)
- return err;
-
- return af_alg_sendpage(sock, page, offset, size, flags);
-}
-
static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
.release = af_alg_release,
.sendmsg = skcipher_sendmsg_nokey,
- .sendpage = skcipher_sendpage_nokey,
.recvmsg = skcipher_recvmsg_nokey,
.poll = af_alg_poll,
};
int chtls_recvmsg(struct sock *sk, struct msghdr *msg,
size_t len, int flags, int *addr_len);
void chtls_splice_eof(struct socket *sock);
-int chtls_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
int send_tx_flowc_wr(struct sock *sk, int compl,
u32 snd_nxt, u32 rcv_nxt);
void chtls_tcp_push(struct sock *sk, int flags);
release_sock(sk);
}
-int chtls_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags)
-{
- struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
- struct bio_vec bvec;
-
- if (flags & MSG_SENDPAGE_NOTLAST)
- msg.msg_flags |= MSG_MORE;
-
- bvec_set_page(&bvec, page, size, offset);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
- return chtls_sendmsg(sk, &msg, size);
-}
-
static void chtls_select_window(struct sock *sk)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
chtls_cpl_prot.shutdown = chtls_shutdown;
chtls_cpl_prot.sendmsg = chtls_sendmsg;
chtls_cpl_prot.splice_eof = chtls_splice_eof;
- chtls_cpl_prot.sendpage = chtls_sendpage;
chtls_cpl_prot.recvmsg = chtls_recvmsg;
chtls_cpl_prot.setsockopt = chtls_setsockopt;
chtls_cpl_prot.getsockopt = chtls_getsockopt;
/*
* Grab and keep cached pages associated with a file in the svc_rqst
- * so that they can be passed to the network sendmsg/sendpage routines
+ * so that they can be passed to the network sendmsg routines
* directly. They will be released after the sending has completed.
*
* Return values: Number of bytes consumed, or -EIO if there are no
int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize);
-ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(void *data, int err);
__poll_t af_alg_poll(struct file *file, struct socket *sock,
size_t total_len, int flags);
int (*mmap) (struct file *file, struct socket *sock,
struct vm_area_struct * vma);
- ssize_t (*sendpage) (struct socket *sock, struct page *page,
- int offset, size_t size, int flags);
ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
void (*splice_eof)(struct socket *sock);
sk_read_actor_t recv_actor);
/* This is different from read_sock(), it reads an entire skb at a time. */
int (*read_skb)(struct sock *sk, skb_read_actor_t recv_actor);
- int (*sendpage_locked)(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
size_t size);
int (*set_rcvlowat)(struct sock *sk, int val);
int flags);
int kernel_getsockname(struct socket *sock, struct sockaddr *addr);
int kernel_getpeername(struct socket *sock, struct sockaddr *addr);
-int kernel_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags);
-int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
- size_t size, int flags);
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
/* Routine returns the IP overhead imposed by a (caller-protected) socket. */
int inet_send_prepare(struct sock *sk);
int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
void inet_splice_eof(struct socket *sock);
-ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags);
int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags);
int inet_shutdown(struct socket *sock, int how);
size_t len);
int (*recvmsg)(struct sock *sk, struct msghdr *msg,
size_t len, int flags, int *addr_len);
- int (*sendpage)(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
void (*splice_eof)(struct socket *sock);
int (*bind)(struct sock *sk,
struct sockaddr *addr, int addr_len);
int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
int sock_no_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma);
-ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags);
-ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
/*
* Functions to fill in entries in struct proto_ops when a protocol
int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
size_t size, struct ubuf_info *uarg);
void tcp_splice_eof(struct socket *sock);
-int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
- int flags);
-int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
- size_t size, int flags);
int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
int tcp_wmem_schedule(struct sock *sk, int copy);
void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
.sendmsg = atalk_sendmsg,
.recvmsg = atalk_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct notifier_block ddp_notifier = {
.sendmsg = vcc_sendmsg,
.recvmsg = vcc_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
.sendmsg = vcc_sendmsg,
.recvmsg = vcc_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
.sendmsg = ax25_sendmsg,
.recvmsg = ax25_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
/*
.sendmsg = caif_seqpkt_sendmsg,
.recvmsg = caif_seqpkt_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static const struct proto_ops caif_stream_ops = {
.sendmsg = caif_stream_sendmsg,
.recvmsg = caif_stream_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
/* This function is called when a socket is finally destroyed. */
.sendmsg = bcm_sendmsg,
.recvmsg = bcm_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct proto bcm_proto __read_mostly = {
.sendmsg = isotp_sendmsg,
.recvmsg = isotp_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct proto isotp_proto __read_mostly = {
.sendmsg = j1939_sk_sendmsg,
.recvmsg = j1939_sk_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct proto j1939_proto __read_mostly = {
.sendmsg = raw_sendmsg,
.recvmsg = raw_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct proto raw_proto __read_mostly = {
}
}
-ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
-{
- ssize_t res;
- struct msghdr msg = {.msg_flags = flags};
- struct kvec iov;
- char *kaddr = kmap(page);
- iov.iov_base = kaddr + offset;
- iov.iov_len = size;
- res = kernel_sendmsg(sock, &msg, &iov, 1, size);
- kunmap(page);
- return res;
-}
-EXPORT_SYMBOL(sock_no_sendpage);
-
-ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
- int offset, size_t size, int flags)
-{
- ssize_t res;
- struct msghdr msg = {.msg_flags = flags};
- struct kvec iov;
- char *kaddr = kmap(page);
-
- iov.iov_base = kaddr + offset;
- iov.iov_len = size;
- res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
- kunmap(page);
- return res;
-}
-EXPORT_SYMBOL(sock_no_sendpage_locked);
-
/*
* Default Socket Callbacks
*/
{
seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
- "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
+ "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
proto->name,
proto->obj_size,
sock_prot_inuse_get(seq_file_net(seq), proto),
proto_method_implemented(proto->getsockopt),
proto_method_implemented(proto->sendmsg),
proto_method_implemented(proto->recvmsg),
- proto_method_implemented(proto->sendpage),
proto_method_implemented(proto->bind),
proto_method_implemented(proto->backlog_rcv),
proto_method_implemented(proto->hash),
"maxhdr",
"slab",
"module",
- "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
+ "cl co di ac io in de sh ss gs se re bi br ha uh gp em\n");
else
proto_seq_printf(seq, list_entry(v, struct proto, node));
return 0;
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct inet_protosw dccp_v4_protosw = {
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet6_compat_ioctl,
#endif
.sendmsg = ieee802154_sock_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
/* DGRAM Sockets (802.15.4 dataframes) */
.sendmsg = ieee802154_sock_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static void ieee802154_sock_destruct(struct sock *sk)
}
EXPORT_SYMBOL_GPL(inet_splice_eof);
-ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags)
-{
- struct sock *sk = sock->sk;
- const struct proto *prot;
-
- if (unlikely(inet_send_prepare(sk)))
- return -EAGAIN;
-
- /* IPV6_ADDRFORM can change sk->sk_prot under us. */
- prot = READ_ONCE(sk->sk_prot);
- if (prot->sendpage)
- return prot->sendpage(sk, page, offset, size, flags);
- return sock_no_sendpage(sock, page, offset, size, flags);
-}
-EXPORT_SYMBOL(inet_sendpage);
-
INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *, struct msghdr *,
size_t, int, int *));
int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
.mmap = tcp_mmap,
#endif
.splice_eof = inet_splice_eof,
- .sendpage = inet_sendpage,
.splice_read = tcp_splice_read,
.read_sock = tcp_read_sock,
.read_skb = tcp_read_skb,
.sendmsg_locked = tcp_sendmsg_locked,
- .sendpage_locked = tcp_sendpage_locked,
.peek_len = tcp_peek_len,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet_compat_ioctl,
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.splice_eof = inet_splice_eof,
- .sendpage = inet_sendpage,
.set_peek_off = sk_set_peek_off,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet_compat_ioctl,
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.splice_eof = inet_splice_eof,
- .sendpage = inet_sendpage,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet_compat_ioctl,
#endif
return mss_now;
}
-/* In some cases, both sendpage() and sendmsg() could have added
- * an skb to the write queue, but failed adding payload on it.
- * We need to remove it to consume less memory, but more
- * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
- * users.
+/* In some cases, both sendmsg() could have added an skb to the write queue,
+ * but failed adding payload on it. We need to remove it to consume less
+ * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
+ * epoll() users.
*/
void tcp_remove_empty_skb(struct sock *sk)
{
return min(copy, sk->sk_forward_alloc);
}
-int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
- size_t size, int flags)
-{
- struct bio_vec bvec;
- struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
-
- if (!(sk->sk_route_caps & NETIF_F_SG))
- return sock_no_sendpage_locked(sk, page, offset, size, flags);
-
- tcp_rate_check_app_limited(sk); /* is sending application-limited? */
-
- bvec_set_page(&bvec, page, size, offset);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
-
- if (flags & MSG_SENDPAGE_NOTLAST)
- msg.msg_flags |= MSG_MORE;
-
- return tcp_sendmsg_locked(sk, &msg, size);
-}
-EXPORT_SYMBOL_GPL(tcp_sendpage_locked);
-
-int tcp_sendpage(struct sock *sk, struct page *page, int offset,
- size_t size, int flags)
-{
- int ret;
-
- lock_sock(sk);
- ret = tcp_sendpage_locked(sk, page, offset, size, flags);
- release_sock(sk);
-
- return ret;
-}
-EXPORT_SYMBOL(tcp_sendpage);
-
void tcp_free_fastopen_req(struct tcp_sock *tp)
{
if (tp->fastopen_req) {
long timeo;
int flags;
- /* Don't let internal sendpage flags through */
+ /* Don't let internal flags through */
flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
flags |= MSG_NO_SHARED_FRAGS;
return copied ? copied : err;
}
-static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset,
- size_t size, int flags)
-{
- struct bio_vec bvec;
- struct msghdr msg = {
- .msg_flags = flags | MSG_SPLICE_PAGES,
- };
-
- bvec_set_page(&bvec, page, size, offset);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
-
- if (flags & MSG_SENDPAGE_NOTLAST)
- msg.msg_flags |= MSG_MORE;
-
- return tcp_bpf_sendmsg(sk, &msg, size);
-}
-
enum {
TCP_BPF_IPV4,
TCP_BPF_IPV6,
prot[TCP_BPF_TX] = prot[TCP_BPF_BASE];
prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg;
- prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage;
prot[TCP_BPF_RX] = prot[TCP_BPF_BASE];
prot[TCP_BPF_RX].recvmsg = tcp_bpf_recvmsg_parser;
* indeed valid assumptions.
*/
return ops->recvmsg == tcp_recvmsg &&
- ops->sendmsg == tcp_sendmsg &&
- ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP;
+ ops->sendmsg == tcp_sendmsg ? 0 : -ENOTSUPP;
}
int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
.recvmsg = tcp_recvmsg,
.sendmsg = tcp_sendmsg,
.splice_eof = tcp_splice_eof,
- .sendpage = tcp_sendpage,
.backlog_rcv = tcp_v4_do_rcv,
.release_cb = tcp_release_cb,
.hash = inet_hash,
}
EXPORT_SYMBOL_GPL(udp_splice_eof);
-int udp_sendpage(struct sock *sk, struct page *page, int offset,
- size_t size, int flags)
-{
- struct bio_vec bvec;
- struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES };
-
- if (flags & MSG_SENDPAGE_NOTLAST)
- msg.msg_flags |= MSG_MORE;
-
- bvec_set_page(&bvec, page, size, offset);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
- return udp_sendmsg(sk, &msg, size);
-}
-
#define UDP_SKB_IS_STATELESS 0x80000000
/* all head states (dst, sk, nf conntrack) except skb extensions are
.sendmsg = udp_sendmsg,
.recvmsg = udp_recvmsg,
.splice_eof = udp_splice_eof,
- .sendpage = udp_sendpage,
.release_cb = ip4_datagram_release_cb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
int *addr_len);
-int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
- int flags);
void udp_destroy_sock(struct sock *sk);
#ifdef CONFIG_PROC_FS
.getsockopt = udp_getsockopt,
.sendmsg = udp_sendmsg,
.recvmsg = udp_recvmsg,
- .sendpage = udp_sendpage,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.rehash = udp_v4_rehash,
.mmap = tcp_mmap,
#endif
.splice_eof = inet_splice_eof,
- .sendpage = inet_sendpage,
.sendmsg_locked = tcp_sendmsg_locked,
- .sendpage_locked = tcp_sendpage_locked,
.splice_read = tcp_splice_read,
.read_sock = tcp_read_sock,
.read_skb = tcp_read_skb,
.recvmsg = inet6_recvmsg, /* retpoline's sake */
.read_skb = udp_read_skb,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
.set_peek_off = sk_set_peek_off,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet6_compat_ioctl,
.sendmsg = inet_sendmsg, /* ok */
.recvmsg = sock_common_recvmsg, /* ok */
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet6_compat_ioctl,
#endif
.recvmsg = tcp_recvmsg,
.sendmsg = tcp_sendmsg,
.splice_eof = tcp_splice_eof,
- .sendpage = tcp_sendpage,
.backlog_rcv = tcp_v6_do_rcv,
.release_cb = tcp_release_cb,
.hash = inet6_hash,
release_sock(sk);
}
-static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-
-{
- struct bio_vec bvec;
- struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
-
- if (flags & MSG_SENDPAGE_NOTLAST)
- msg.msg_flags |= MSG_MORE;
-
- if (flags & MSG_OOB)
- return -EOPNOTSUPP;
-
- bvec_set_page(&bvec, page, size, offset);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
- return kcm_sendmsg(sock, &msg, size);
-}
-
static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
.recvmsg = kcm_recvmsg,
.mmap = sock_no_mmap,
.splice_eof = kcm_splice_eof,
- .sendpage = kcm_sendpage,
};
static const struct proto_ops kcm_seqpacket_ops = {
.recvmsg = kcm_recvmsg,
.mmap = sock_no_mmap,
.splice_eof = kcm_splice_eof,
- .sendpage = kcm_sendpage,
.splice_read = kcm_splice_read,
};
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
/* Now the operations that really occur. */
.release = pfkey_release,
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct inet_protosw l2tp_ip_protosw = {
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet6_compat_ioctl,
#endif
.sendmsg = llc_ui_sendmsg,
.recvmsg = llc_ui_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static const char llc_proc_err_msg[] __initconst =
.sendmsg = mctp_sendmsg,
.recvmsg = mctp_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_ioctl = mctp_compat_ioctl,
#endif
.sendmsg = inet_sendmsg,
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = inet_sendpage,
};
static struct inet_protosw mptcp_protosw = {
.sendmsg = inet6_sendmsg,
.recvmsg = inet6_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = inet_sendpage,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet6_compat_ioctl,
#endif
.sendmsg = netlink_sendmsg,
.recvmsg = netlink_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static const struct net_proto_family netlink_family_ops = {
.sendmsg = nr_sendmsg,
.recvmsg = nr_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct notifier_block nr_dev_notifier = {
.sendmsg = packet_sendmsg_spkt,
.recvmsg = packet_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static const struct proto_ops packet_ops = {
.sendmsg = packet_sendmsg,
.recvmsg = packet_recvmsg,
.mmap = packet_mmap,
- .sendpage = sock_no_sendpage,
};
static const struct net_proto_family packet_family_ops = {
.sendmsg = pn_socket_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
const struct proto_ops phonet_stream_ops = {
.sendmsg = pn_socket_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
EXPORT_SYMBOL(phonet_stream_ops);
.shutdown = sock_no_shutdown,
.release = qrtr_release,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct proto qrtr_proto = {
.sendmsg = rds_sendmsg,
.recvmsg = rds_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static void rds_sock_destruct(struct sock *sk)
.sendmsg = rose_sendmsg,
.recvmsg = rose_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct notifier_block rose_dev_notifier = {
.sendmsg = rxrpc_sendmsg,
.recvmsg = rxrpc_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct proto rxrpc_proto = {
.sendmsg = inet_sendmsg,
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
/* Registration with AF_INET family. */
EXPORT_SYMBOL(kernel_getpeername);
/**
- * kernel_sendpage - send a &page through a socket (kernel space)
- * @sock: socket
- * @page: page
- * @offset: page offset
- * @size: total size in bytes
- * @flags: flags (MSG_DONTWAIT, ...)
- *
- * Returns the total amount sent in bytes or an error.
- */
-
-int kernel_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags)
-{
- if (sock->ops->sendpage) {
- /* Warn in case the improper page to zero-copy send */
- WARN_ONCE(!sendpage_ok(page), "improper page for zero-copy send");
- return sock->ops->sendpage(sock, page, offset, size, flags);
- }
- return sock_no_sendpage(sock, page, offset, size, flags);
-}
-EXPORT_SYMBOL(kernel_sendpage);
-
-/**
- * kernel_sendpage_locked - send a &page through the locked sock (kernel space)
- * @sk: sock
- * @page: page
- * @offset: page offset
- * @size: total size in bytes
- * @flags: flags (MSG_DONTWAIT, ...)
- *
- * Returns the total amount sent in bytes or an error.
- * Caller must hold @sk.
- */
-
-int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
- size_t size, int flags)
-{
- struct socket *sock = sk->sk_socket;
-
- if (sock->ops->sendpage_locked)
- return sock->ops->sendpage_locked(sk, page, offset, size,
- flags);
-
- return sock_no_sendpage_locked(sk, page, offset, size, flags);
-}
-EXPORT_SYMBOL(kernel_sendpage_locked);
-
-/**
* kernel_sock_shutdown - shut down part of a full-duplex connection (kernel space)
* @sock: socket
* @how: connection part
.sendmsg = tipc_sendmsg,
.recvmsg = tipc_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage
};
static const struct proto_ops packet_ops = {
.sendmsg = tipc_send_packet,
.recvmsg = tipc_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage
};
static const struct proto_ops stream_ops = {
.sendmsg = tipc_sendstream,
.recvmsg = tipc_recvstream,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage
};
static const struct net_proto_family tipc_family_ops = {
void tls_sw_strparser_done(struct tls_context *tls_ctx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
void tls_sw_splice_eof(struct socket *sock);
-int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
-int tls_sw_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
void tls_sw_release_resources_tx(struct sock *sk);
void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
void tls_device_splice_eof(struct socket *sock);
-int tls_device_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
int tls_tx_records(struct sock *sk, int flags);
void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
mutex_unlock(&tls_ctx->tx_lock);
}
-int tls_device_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags)
-{
- struct bio_vec bvec;
- struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
-
- if (flags & MSG_SENDPAGE_NOTLAST)
- msg.msg_flags |= MSG_MORE;
-
- if (flags & MSG_OOB)
- return -EOPNOTSUPP;
-
- bvec_set_page(&bvec, page, size, offset);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
- return tls_device_sendmsg(sk, &msg, size);
-}
-
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn)
{
ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
ops[TLS_SW ][TLS_BASE].splice_eof = tls_sw_splice_eof;
- ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked;
ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE];
ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read;
#ifdef CONFIG_TLS_DEVICE
ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
- ops[TLS_HW ][TLS_BASE].sendpage_locked = NULL;
ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ];
- ops[TLS_HW ][TLS_SW ].sendpage_locked = NULL;
ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ];
ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ];
ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ];
- ops[TLS_HW ][TLS_HW ].sendpage_locked = NULL;
#endif
#ifdef CONFIG_TLS_TOE
ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
prot[TLS_SW][TLS_BASE].splice_eof = tls_sw_splice_eof;
- prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage;
prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg;
prot[TLS_HW][TLS_BASE].splice_eof = tls_device_splice_eof;
- prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage;
prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
prot[TLS_HW][TLS_SW].splice_eof = tls_device_splice_eof;
- prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage;
prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
mutex_unlock(&tls_ctx->tx_lock);
}
-int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
- int offset, size_t size, int flags)
-{
- struct bio_vec bvec;
- struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
-
- if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
- MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
- MSG_NO_SHARED_FRAGS))
- return -EOPNOTSUPP;
- if (flags & MSG_SENDPAGE_NOTLAST)
- msg.msg_flags |= MSG_MORE;
-
- bvec_set_page(&bvec, page, size, offset);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
- return tls_sw_sendmsg_locked(sk, &msg, size);
-}
-
-int tls_sw_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags)
-{
- struct bio_vec bvec;
- struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
-
- if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
- MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
- return -EOPNOTSUPP;
- if (flags & MSG_SENDPAGE_NOTLAST)
- msg.msg_flags |= MSG_MORE;
-
- bvec_set_page(&bvec, page, size, offset);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
- return tls_sw_sendmsg(sk, &msg, size);
-}
-
static int
tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
bool released)
static int unix_shutdown(struct socket *, int);
static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
-static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
- size_t size, int flags);
static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
struct pipe_inode_info *, size_t size,
unsigned int flags);
.recvmsg = unix_stream_recvmsg,
.read_skb = unix_stream_read_skb,
.mmap = sock_no_mmap,
- .sendpage = unix_stream_sendpage,
.splice_read = unix_stream_splice_read,
.set_peek_off = unix_set_peek_off,
.show_fdinfo = unix_show_fdinfo,
.read_skb = unix_read_skb,
.recvmsg = unix_dgram_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
.set_peek_off = unix_set_peek_off,
.show_fdinfo = unix_show_fdinfo,
};
.sendmsg = unix_seqpacket_sendmsg,
.recvmsg = unix_seqpacket_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
.set_peek_off = unix_set_peek_off,
.show_fdinfo = unix_show_fdinfo,
};
return sent ? : err;
}
-static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
- int offset, size_t size, int flags)
-{
- struct bio_vec bvec;
- struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES };
-
- if (flags & MSG_SENDPAGE_NOTLAST)
- msg.msg_flags |= MSG_MORE;
-
- bvec_set_page(&bvec, page, size, offset);
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
- return unix_stream_sendmsg(socket, &msg, size);
-}
-
static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
.sendmsg = vsock_dgram_sendmsg,
.recvmsg = vsock_dgram_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
.read_skb = vsock_read_skb,
};
.sendmsg = vsock_connectible_sendmsg,
.recvmsg = vsock_connectible_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
.set_rcvlowat = vsock_set_rcvlowat,
.read_skb = vsock_read_skb,
};
.sendmsg = vsock_connectible_sendmsg,
.recvmsg = vsock_connectible_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
.read_skb = vsock_read_skb,
};
.sendmsg = x25_sendmsg,
.recvmsg = x25_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
};
static struct packet_type x25_packet_type __read_mostly = {
.sendmsg = xsk_sendmsg,
.recvmsg = xsk_recvmsg,
.mmap = xsk_mmap,
- .sendpage = sock_no_sendpage,
};
static void xsk_destruct(struct sock *sk)