From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1759493Ab2EJNsi (ORCPT ); Thu, 10 May 2012 09:48:38 -0400 Received: from cantor2.suse.de ([195.135.220.15]:37235 "EHLO mx2.suse.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932311Ab2EJNpY (ORCPT ); Thu, 10 May 2012 09:45:24 -0400 From: Mel Gorman To: Andrew Morton Cc: Linux-MM , Linux-Netdev , LKML , David Miller , Neil Brown , Peter Zijlstra , Mike Christie , Eric B Munson , Mel Gorman Subject: [PATCH 08/17] net: Introduce sk_allocation() to allow addition of GFP flags depending on the individual socket Date: Thu, 10 May 2012 14:45:01 +0100 Message-Id: <1336657510-24378-9-git-send-email-mgorman@suse.de> X-Mailer: git-send-email 1.7.9.2 In-Reply-To: <1336657510-24378-1-git-send-email-mgorman@suse.de> References: <1336657510-24378-1-git-send-email-mgorman@suse.de> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Introduce sk_allocation(), this function allows to inject sock specific flags to each sock related allocation. It is only used on allocation paths that may be required for writing pages back to network storage. Signed-off-by: Peter Zijlstra Signed-off-by: Mel Gorman --- include/net/sock.h | 5 +++++ net/ipv4/tcp.c | 3 ++- net/ipv4/tcp_output.c | 16 +++++++++------- net/ipv6/tcp_ipv6.c | 8 +++++--- 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 188532e..bbf2f71 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -644,6 +644,11 @@ static inline int sock_flag(struct sock *sk, enum sock_flags flag) return test_bit(flag, &sk->sk_flags); } +static inline gfp_t sk_allocation(struct sock *sk, gfp_t gfp_mask) +{ + return gfp_mask; +} + static inline void sk_acceptq_removed(struct sock *sk) { sk->sk_ack_backlog--; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 8bb6ade..0027282 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -698,7 +698,8 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) /* The TCP header must be at least 32-bit aligned. */ size = ALIGN(size, 4); - skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); + skb = alloc_skb_fclone(size + sk->sk_prot->max_header, + sk_allocation(sk, gfp)); if (skb) { if (sk_wmem_schedule(sk, skb->truesize)) { skb_reserve(skb, sk->sk_prot->max_header); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7ac6423..838bd37 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2346,7 +2346,7 @@ void tcp_send_fin(struct sock *sk) /* Socket is locked, keep trying until memory is available. */ for (;;) { skb = alloc_skb_fclone(MAX_TCP_HEADER, - sk->sk_allocation); + sk_allocation(sk, sk->sk_allocation)); if (skb) break; yield(); @@ -2372,7 +2372,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) struct sk_buff *skb; /* NOTE: No TCP options attached and we never retransmit this. */ - skb = alloc_skb(MAX_TCP_HEADER, priority); + skb = alloc_skb(MAX_TCP_HEADER, sk_allocation(sk, priority)); if (!skb) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); return; @@ -2445,7 +2445,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) s_data_desired = cvp->s_data_desired; - skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC); + skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, + sk_allocation(sk, GFP_ATOMIC)); if (skb == NULL) return NULL; @@ -2635,7 +2636,8 @@ int tcp_connect(struct sock *sk) tcp_connect_init(sk); - buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); + buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, + sk_allocation(sk, sk->sk_allocation)); if (unlikely(buff == NULL)) return -ENOBUFS; @@ -2741,7 +2743,7 @@ void tcp_send_ack(struct sock *sk) * tcp_transmit_skb() will set the ownership to this * sock. */ - buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); + buff = alloc_skb(MAX_TCP_HEADER, sk_allocation(sk, GFP_ATOMIC)); if (buff == NULL) { inet_csk_schedule_ack(sk); inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; @@ -2756,7 +2758,7 @@ void tcp_send_ack(struct sock *sk) /* Send it off, this clears delayed acks for us. */ TCP_SKB_CB(buff)->when = tcp_time_stamp; - tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); + tcp_transmit_skb(sk, buff, 0, sk_allocation(sk, GFP_ATOMIC)); } /* This routine sends a packet with an out of date sequence @@ -2776,7 +2778,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) struct sk_buff *skb; /* We don't queue it, tcp_transmit_skb() sets ownership. */ - skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); + skb = alloc_skb(MAX_TCP_HEADER, sk_allocation(sk, GFP_ATOMIC)); if (skb == NULL) return -1; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 98256cf..c16f08e 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1352,7 +1352,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, /* Clone pktoptions received with SYN */ newnp->pktoptions = NULL; if (treq->pktopts != NULL) { - newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC); + newnp->pktoptions = skb_clone(treq->pktopts, + sk_allocation(sk, GFP_ATOMIC)); kfree_skb(treq->pktopts); treq->pktopts = NULL; if (newnp->pktoptions) @@ -1405,7 +1406,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, * across. Shucks. */ tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr, - AF_INET6, key->key, key->keylen, GFP_ATOMIC); + AF_INET6, key->key, key->keylen, + sk_allocation(sk, GFP_ATOMIC)); } #endif @@ -1500,7 +1502,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) --ANK (980728) */ if (np->rxopt.all) - opt_skb = skb_clone(skb, GFP_ATOMIC); + opt_skb = skb_clone(skb, sk_allocation(sk, GFP_ATOMIC)); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ sock_rps_save_rxhash(sk, skb); -- 1.7.9.2