From: Paolo Abeni <pabeni@redhat.com>
To: mptcp@lists.linux.dev
Subject: Re: [PATCH mptcp-next] mptcp: drop tx skb cache
Date: Fri, 21 May 2021 18:19:14 +0200 [thread overview]
Message-ID: <52d5aa83a8c304e4346b5a43cc5cd8858c6db1bd.camel@redhat.com> (raw)
In-Reply-To: <f3cf8531c9215fb77194e332a1aae5e954dbd2ea.1621592920.git.pabeni@redhat.com>
On Fri, 2021-05-21 at 12:30 +0200, Paolo Abeni wrote:
> The mentioned cache was introduced to reduce the number of skb
> allocation in atomic context, but the required complexity is
> excessive.
>
> This change remove the mentioned cache.
>
> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
> ---
> formally submitting this one. I keept a VM running self-tests for ~24h
> with no issues
> ---
> net/mptcp/protocol.c | 89 ++------------------------------------------
> 1 file changed, 4 insertions(+), 85 deletions(-)
>
> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
> index 446acfb85493..1114a914d845 100644
> --- a/net/mptcp/protocol.c
> +++ b/net/mptcp/protocol.c
> @@ -903,22 +903,14 @@ static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
> df->data_seq + df->data_len == msk->write_seq;
> }
>
> -static int mptcp_wmem_with_overhead(struct sock *sk, int size)
> +static int mptcp_wmem_with_overhead(int size)
> {
> - struct mptcp_sock *msk = mptcp_sk(sk);
> - int ret, skbs;
> -
> - ret = size + ((sizeof(struct mptcp_data_frag) * size) >> PAGE_SHIFT);
> - skbs = (msk->tx_pending_data + size) / msk->size_goal_cache;
> - if (skbs < msk->skb_tx_cache.qlen)
> - return ret;
> -
> - return ret + (skbs - msk->skb_tx_cache.qlen) * SKB_TRUESIZE(MAX_TCP_HEADER);
> + return size + ((sizeof(struct mptcp_data_frag) * size) >> PAGE_SHIFT);
> }
>
> static void __mptcp_wmem_reserve(struct sock *sk, int size)
> {
> - int amount = mptcp_wmem_with_overhead(sk, size);
> + int amount = mptcp_wmem_with_overhead(size);
> struct mptcp_sock *msk = mptcp_sk(sk);
>
> WARN_ON_ONCE(msk->wmem_reserved);
> @@ -1213,49 +1205,8 @@ static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
> return NULL;
> }
>
> -static bool mptcp_tx_cache_refill(struct sock *sk, int size,
> - struct sk_buff_head *skbs, int *total_ts)
> -{
> - struct mptcp_sock *msk = mptcp_sk(sk);
> - struct sk_buff *skb;
> - int space_needed;
> -
> - if (unlikely(tcp_under_memory_pressure(sk))) {
> - mptcp_mem_reclaim_partial(sk);
> -
> - /* under pressure pre-allocate at most a single skb */
> - if (msk->skb_tx_cache.qlen)
> - return true;
> - space_needed = msk->size_goal_cache;
> - } else {
> - space_needed = msk->tx_pending_data + size -
> - msk->skb_tx_cache.qlen * msk->size_goal_cache;
> - }
> -
> - while (space_needed > 0) {
> - skb = __mptcp_do_alloc_tx_skb(sk, sk->sk_allocation);
> - if (unlikely(!skb)) {
> - /* under memory pressure, try to pass the caller a
> - * single skb to allow forward progress
> - */
> - while (skbs->qlen > 1) {
> - skb = __skb_dequeue_tail(skbs);
> - *total_ts -= skb->truesize;
> - __kfree_skb(skb);
> - }
> - return skbs->qlen > 0;
> - }
> -
> - *total_ts += skb->truesize;
> - __skb_queue_tail(skbs, skb);
> - space_needed -= msk->size_goal_cache;
> - }
> - return true;
> -}
> -
> static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
> {
> - struct mptcp_sock *msk = mptcp_sk(sk);
> struct sk_buff *skb;
>
> if (ssk->sk_tx_skb_cache) {
> @@ -1266,22 +1217,6 @@ static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
> return true;
> }
>
> - skb = skb_peek(&msk->skb_tx_cache);
> - if (skb) {
> - if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
> - skb = __skb_dequeue(&msk->skb_tx_cache);
> - if (WARN_ON_ONCE(!skb))
> - return false;
> -
> - mptcp_wmem_uncharge(sk, skb->truesize);
> - ssk->sk_tx_skb_cache = skb;
> - return true;
> - }
> -
> - /* over memory limit, no point to try to allocate a new skb */
> - return false;
> - }
> -
> skb = __mptcp_do_alloc_tx_skb(sk, gfp);
> if (!skb)
> return false;
> @@ -1297,7 +1232,6 @@ static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
> static bool mptcp_must_reclaim_memory(struct sock *sk, struct sock *ssk)
> {
> return !ssk->sk_tx_skb_cache &&
> - !skb_peek(&mptcp_sk(sk)->skb_tx_cache) &&
> tcp_under_memory_pressure(sk);
> }
>
> @@ -1340,7 +1274,6 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
> /* compute send limit */
> info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
> avail_size = info->size_goal;
> - msk->size_goal_cache = info->size_goal;
> skb = tcp_write_queue_tail(ssk);
> if (skb) {
> /* Limit the write to the size available in the
> @@ -1689,7 +1622,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
> while (msg_data_left(msg)) {
> int total_ts, frag_truesize = 0;
> struct mptcp_data_frag *dfrag;
> - struct sk_buff_head skbs;
> bool dfrag_collapsed;
> size_t psize, offset;
>
> @@ -1722,16 +1654,10 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
> psize = pfrag->size - offset;
> psize = min_t(size_t, psize, msg_data_left(msg));
> total_ts = psize + frag_truesize;
> - __skb_queue_head_init(&skbs);
> - if (!mptcp_tx_cache_refill(sk, psize, &skbs, &total_ts))
> - goto wait_for_memory;
>
> - if (!mptcp_wmem_alloc(sk, total_ts)) {
> - __skb_queue_purge(&skbs);
> + if (!mptcp_wmem_alloc(sk, total_ts))
> goto wait_for_memory;
> - }
>
> - skb_queue_splice_tail(&skbs, &msk->skb_tx_cache);
> if (copy_page_from_iter(dfrag->page, offset, psize,
> &msg->msg_iter) != psize) {
> mptcp_wmem_uncharge(sk, psize + frag_truesize);
> @@ -2460,13 +2386,11 @@ static int __mptcp_init_sock(struct sock *sk)
> INIT_LIST_HEAD(&msk->rtx_queue);
> INIT_WORK(&msk->work, mptcp_worker);
> __skb_queue_head_init(&msk->receive_queue);
> - __skb_queue_head_init(&msk->skb_tx_cache);
whoops, I forgot to remove the 'skb_tx_cache' field from the msk.
v2 is coming...
/P
prev parent reply other threads:[~2021-05-21 16:19 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-21 10:30 [PATCH mptcp-next] mptcp: drop tx skb cache Paolo Abeni
2021-05-21 16:19 ` Paolo Abeni [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=52d5aa83a8c304e4346b5a43cc5cd8858c6db1bd.camel@redhat.com \
--to=pabeni@redhat.com \
--cc=mptcp@lists.linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).