aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-10-05 06:23:55 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-10-28 10:56:16 -0700
commit541321132ac28c92394b1d57361448d2c6dd5faf (patch)
treea7d17762841432532967d94479a435834f72f435 /include
parente9ad2fc50a33c322bf343661851b1f179598b072 (diff)
net: remove skb recycling
[ Upstream commits acb600def2110b1310466c0e485c0d26299898ae and 66eef59f22275002f621ff9d951886b513d011b3. ] Over time, skb recycling infrastructure got litle interest and many bugs. Generic rx path skb allocation is now using page fragments for efficient GRO / TCP coalescing, and recyling a tx skb for rx path is not worth the pain. Last identified bug is that fat skbs can be recycled and it can endup using high order pages after few iterations. With help from Maxime Bizon, who pointed out that commit 87151b8689d (net: allow pskb_expand_head() to get maximum tailroom) introduced this regression for recycled skbs. Instead of fixing this bug, lets remove skb recycling. Drivers wanting really hot skbs should use build_skb() anyway, to allocate/populate sk_buff right before netif_receive_skb() Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Maxime Bizon <mbizon@freebox.fr> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/skbuff.h24
1 files changed, 0 insertions, 24 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 7632c87da2c9..f3165d26c163 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -589,9 +589,6 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
}
-extern void skb_recycle(struct sk_buff *skb);
-extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
-
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
@@ -2642,27 +2639,6 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
-static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
-{
- if (irqs_disabled())
- return false;
-
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
- return false;
-
- if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
- return false;
-
- skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
- if (skb_end_offset(skb) < skb_size)
- return false;
-
- if (skb_shared(skb) || skb_cloned(skb))
- return false;
-
- return true;
-}
-
/**
* skb_head_is_locked - Determine if the skb->head is locked down
* @skb: skb to check