aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-10-05 06:23:55 +0000
committerHerton Ronaldo Krzesinski <herton.krzesinski@canonical.com>2012-12-11 14:54:22 -0200
commit5e68bdb7c1a87fb7021baebc03ff951321cc6c87 (patch)
treed295662eb3876110de29a42f3733df9c15d35ae6 /include/linux
parentd7cc34776d55031f8571116757a9206966ab3f47 (diff)
net: remove skb recycling
commit acb600def2110b1310466c0e485c0d26299898ae upstream. commit 66eef59f22275002f621ff9d951886b513d011b3 upstream. Over time, skb recycling infrastructure got litle interest and many bugs. Generic rx path skb allocation is now using page fragments for efficient GRO / TCP coalescing, and recyling a tx skb for rx path is not worth the pain. Last identified bug is that fat skbs can be recycled and it can endup using high order pages after few iterations. With help from Maxime Bizon, who pointed out that commit 87151b8689d (net: allow pskb_expand_head() to get maximum tailroom) introduced this regression for recycled skbs. Instead of fixing this bug, lets remove skb recycling. Drivers wanting really hot skbs should use build_skb() anyway, to allocate/populate sk_buff right before netif_receive_skb() Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Maxime Bizon <mbizon@freebox.fr> Signed-off-by: David S. Miller <davem@davemloft.net> [ herton: adjust context/backport for 3.5 ] Signed-off-by: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/skbuff.h24
1 files changed, 0 insertions, 24 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 642cb7355df3..ce7365780ec0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -579,9 +579,6 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
}
-extern void skb_recycle(struct sk_buff *skb);
-extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
-
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
@@ -2550,27 +2547,6 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
-static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
-{
- if (irqs_disabled())
- return false;
-
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
- return false;
-
- if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
- return false;
-
- skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
- if (skb_end_offset(skb) < skb_size)
- return false;
-
- if (skb_shared(skb) || skb_cloned(skb))
- return false;
-
- return true;
-}
-
/**
* skb_head_is_locked - Determine if the skb->head is locked down
* @skb: skb to check