aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/ip-sysctl.txt4
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_main.c13
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ibmveth.c4
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c56
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--include/linux/ptp_classify.h13
-rw-r--r--net/can/bcm.c53
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/rds/iw_rdma.c13
15 files changed, 100 insertions, 82 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 81546990f41..ca5cdcd0f0e 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1042,7 +1042,7 @@ conf/interface/*:
The functional behaviour for certain settings is different
depending on whether local forwarding is enabled or not.
-accept_ra - BOOLEAN
+accept_ra - INTEGER
Accept Router Advertisements; autoconfigure using them.
Possible values are:
@@ -1106,7 +1106,7 @@ dad_transmits - INTEGER
The amount of Duplicate Address Detection probes to send.
Default: 1
-forwarding - BOOLEAN
+forwarding - INTEGER
Configure interface-specific Host/Router behaviour.
Note: It is recommended to have the same setting on all
diff --git a/MAINTAINERS b/MAINTAINERS
index ae8820e173a..ace8f9c81b9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6374,7 +6374,6 @@ S: Supported
F: arch/arm/mach-tegra
TEHUTI ETHERNET DRIVER
-M: Alexander Indenbaum <baum@tehutinetworks.net>
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
S: Supported
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index a047eb973e3..47b928ed08f 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
re_arm:
- queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
+ if (!bond->kill_timers)
+ queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
out:
read_unlock(&bond->lock);
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 7f8b20a34ee..d4fbd2e6261 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work)
}
re_arm:
- queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
+ if (!bond->kill_timers)
+ queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
out:
read_unlock(&bond->lock);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 43f2ea54108..6d79b78cfc7 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -777,6 +777,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
read_lock(&bond->lock);
+ if (bond->kill_timers)
+ goto out;
+
/* rejoin all groups on bond device */
__bond_resend_igmp_join_requests(bond->dev);
@@ -790,9 +793,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
__bond_resend_igmp_join_requests(vlan_dev);
}
- if (--bond->igmp_retrans > 0)
+ if ((--bond->igmp_retrans > 0) && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
-
+out:
read_unlock(&bond->lock);
}
@@ -2538,7 +2541,7 @@ void bond_mii_monitor(struct work_struct *work)
}
re_arm:
- if (bond->params.miimon)
+ if (bond->params.miimon && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->mii_work,
msecs_to_jiffies(bond->params.miimon));
out:
@@ -2886,7 +2889,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
}
re_arm:
- if (bond->params.arp_interval)
+ if (bond->params.arp_interval && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out:
read_unlock(&bond->lock);
@@ -3154,7 +3157,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
bond_ab_arp_probe(bond);
re_arm:
- if (bond->params.arp_interval)
+ if (bond->params.arp_interval && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out:
read_unlock(&bond->lock);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index c9957b7f17b..b4efa292fd6 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -3712,6 +3712,9 @@ static int __devinit init_one(struct pci_dev *pdev,
setup_debugfs(adapter);
}
+ /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+ pdev->needs_freset = 1;
+
if (is_offload(adapter))
attach_ulds(adapter);
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 8dd5fccef72..d393f1e764e 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -636,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev)
netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
netdev->irq, rc);
do {
- rc = h_free_logical_lan(adapter->vdev->unit_address);
- } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
+ lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
+ } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
goto err_out;
}
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 567ff10889b..b8b4ba27b0e 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -1199,6 +1199,8 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
&hw->reg->INT_EN);
pch_gbe_stop_receive(adapter);
+ int_st |= ioread32(&hw->reg->INT_ST);
+ int_st = int_st & ioread32(&hw->reg->INT_EN);
}
if (int_st & PCH_GBE_INT_RX_DMA_ERR)
adapter->stats.intr_rx_dma_err_count++;
@@ -1218,14 +1220,11 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
/* Set Pause packet */
pch_gbe_mac_set_pause_packet(hw);
}
- if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
- == 0) {
- return IRQ_HANDLED;
- }
}
/* When request status is Receive interruption */
- if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
+ if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
+ (adapter->rx_stop_flag == true)) {
if (likely(napi_schedule_prep(&adapter->napi))) {
/* Enable only Rx Descriptor empty */
atomic_inc(&adapter->irq_sem);
@@ -1385,7 +1384,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
struct sk_buff *skb;
unsigned int i;
unsigned int cleaned_count = 0;
- bool cleaned = false;
+ bool cleaned = true;
pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
@@ -1396,7 +1395,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
- cleaned = true;
buffer_info = &tx_ring->buffer_info[i];
skb = buffer_info->skb;
@@ -1439,8 +1437,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
/* weight of a sort for tx, to avoid endless transmit cleanup */
- if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
+ if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
+ cleaned = false;
break;
+ }
}
pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
cleaned_count);
@@ -2168,7 +2168,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
{
struct pch_gbe_adapter *adapter =
container_of(napi, struct pch_gbe_adapter, napi);
- struct net_device *netdev = adapter->netdev;
int work_done = 0;
bool poll_end_flag = false;
bool cleaned = false;
@@ -2176,33 +2175,32 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
pr_debug("budget : %d\n", budget);
- /* Keep link state information with original netdev */
- if (!netif_carrier_ok(netdev)) {
+ pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
+ cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
+
+ if (!cleaned)
+ work_done = budget;
+ /* If no Tx and not enough Rx work done,
+ * exit the polling mode
+ */
+ if (work_done < budget)
poll_end_flag = true;
- } else {
- pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
+
+ if (poll_end_flag) {
+ napi_complete(napi);
+ if (adapter->rx_stop_flag) {
+ adapter->rx_stop_flag = false;
+ pch_gbe_start_receive(&adapter->hw);
+ }
+ pch_gbe_irq_enable(adapter);
+ } else
if (adapter->rx_stop_flag) {
adapter->rx_stop_flag = false;
pch_gbe_start_receive(&adapter->hw);
int_en = ioread32(&adapter->hw.reg->INT_EN);
iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
- &adapter->hw.reg->INT_EN);
+ &adapter->hw.reg->INT_EN);
}
- cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
-
- if (cleaned)
- work_done = budget;
- /* If no Tx and not enough Rx work done,
- * exit the polling mode
- */
- if ((work_done < budget) || !netif_running(netdev))
- poll_end_flag = true;
- }
-
- if (poll_end_flag) {
- napi_complete(napi);
- pch_gbe_irq_enable(adapter);
- }
pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
poll_end_flag, work_done, budget);
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index cb6e0b486b1..edd7304773e 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -589,7 +589,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
prune_rx_ts(dp83640);
if (list_empty(&dp83640->rxpool)) {
- pr_warning("dp83640: rx timestamp pool is empty\n");
+ pr_debug("dp83640: rx timestamp pool is empty\n");
goto out;
}
rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
@@ -612,7 +612,7 @@ static void decode_txts(struct dp83640_private *dp83640,
skb = skb_dequeue(&dp83640->tx_queue);
if (!skb) {
- pr_warning("dp83640: have timestamp but tx_queue empty\n");
+ pr_debug("dp83640: have timestamp but tx_queue empty\n");
return;
}
ns = phy2txts(phy_txts);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 0ca86f9ec4e..182562952c7 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
xenvif_get(vif);
rtnl_lock();
- if (netif_running(vif->dev))
- xenvif_up(vif);
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
dev_set_mtu(vif->dev, ETH_DATA_LEN);
netdev_update_features(vif->dev);
netif_carrier_on(vif->dev);
+ if (netif_running(vif->dev))
+ xenvif_up(vif);
rtnl_unlock();
return 0;
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index e07e2742a86..1dc420ba213 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -51,6 +51,7 @@
#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN)
#define PTP_EV_PORT 319
+#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
#define OFF_ETYPE 12
#define OFF_IHL 14
@@ -116,14 +117,20 @@ static inline int ptp_filter_init(struct sock_filter *f, int len)
{OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \
{OP_RETA, 0, 0, 0 }, /* */ \
/*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \
-/*L40*/ {OP_JEQ, 0, 6, ETH_P_8021Q }, /* f goto L50 */ \
+/*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \
{OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \
- {OP_JEQ, 0, 9, ETH_P_1588 }, /* f goto L60 */ \
+ {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \
+ {OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
+ {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
+ {OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \
{OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
{OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
{OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \
{OP_RETA, 0, 0, 0 }, /* */ \
-/*L50*/ {OP_JEQ, 0, 4, ETH_P_1588 }, /* f goto L61 */ \
+/*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \
+ {OP_LDB, 0, 0, ETH_HLEN }, /* */ \
+ {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
+ {OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \
{OP_LDH, 0, 0, ETH_HLEN }, /* */ \
{OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
{OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \
diff --git a/net/can/bcm.c b/net/can/bcm.c
index d6c8ae5b2e6..c84963d2dee 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -344,6 +344,18 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
}
}
+static void bcm_tx_start_timer(struct bcm_op *op)
+{
+ if (op->kt_ival1.tv64 && op->count)
+ hrtimer_start(&op->timer,
+ ktime_add(ktime_get(), op->kt_ival1),
+ HRTIMER_MODE_ABS);
+ else if (op->kt_ival2.tv64)
+ hrtimer_start(&op->timer,
+ ktime_add(ktime_get(), op->kt_ival2),
+ HRTIMER_MODE_ABS);
+}
+
static void bcm_tx_timeout_tsklet(unsigned long data)
{
struct bcm_op *op = (struct bcm_op *)data;
@@ -365,26 +377,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
bcm_send_to_user(op, &msg_head, NULL, 0);
}
- }
-
- if (op->kt_ival1.tv64 && (op->count > 0)) {
-
- /* send (next) frame */
bcm_can_tx(op);
- hrtimer_start(&op->timer,
- ktime_add(ktime_get(), op->kt_ival1),
- HRTIMER_MODE_ABS);
- } else {
- if (op->kt_ival2.tv64) {
+ } else if (op->kt_ival2.tv64)
+ bcm_can_tx(op);
- /* send (next) frame */
- bcm_can_tx(op);
- hrtimer_start(&op->timer,
- ktime_add(ktime_get(), op->kt_ival2),
- HRTIMER_MODE_ABS);
- }
- }
+ bcm_tx_start_timer(op);
}
/*
@@ -964,23 +962,20 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
hrtimer_cancel(&op->timer);
}
- if ((op->flags & STARTTIMER) &&
- ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) {
-
+ if (op->flags & STARTTIMER) {
+ hrtimer_cancel(&op->timer);
/* spec: send can_frame when starting timer */
op->flags |= TX_ANNOUNCE;
-
- if (op->kt_ival1.tv64 && (op->count > 0)) {
- /* op->count-- is done in bcm_tx_timeout_handler */
- hrtimer_start(&op->timer, op->kt_ival1,
- HRTIMER_MODE_REL);
- } else
- hrtimer_start(&op->timer, op->kt_ival2,
- HRTIMER_MODE_REL);
}
- if (op->flags & TX_ANNOUNCE)
+ if (op->flags & TX_ANNOUNCE) {
bcm_can_tx(op);
+ if (op->count)
+ op->count--;
+ }
+
+ if (op->flags & STARTTIMER)
+ bcm_tx_start_timer(op);
return msg_head->nframes * CFSIZ + MHSIZ;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 3c9fa618b69..79cc6469508 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1383,6 +1383,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
@@ -1447,6 +1449,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
First: no IPv4 options.
*/
newinet->inet_opt = NULL;
+ newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
/* Clone RX bits */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c698cec0a44..fabb4fafa28 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -961,7 +961,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
return 0;
drop_n_acct:
- po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
+ spin_lock(&sk->sk_receive_queue.lock);
+ po->stats.tp_drops++;
+ atomic_inc(&sk->sk_drops);
+ spin_unlock(&sk->sk_receive_queue.lock);
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 8b77edbab27..4e1de171866 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -84,7 +84,8 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
struct list_head *unmap_list,
- struct list_head *kill_list);
+ struct list_head *kill_list,
+ int *unpinned);
static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
@@ -499,7 +500,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
LIST_HEAD(unmap_list);
LIST_HEAD(kill_list);
unsigned long flags;
- unsigned int nfreed = 0, ncleaned = 0, free_goal;
+ unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal;
int ret = 0;
rds_iw_stats_inc(s_iw_rdma_mr_pool_flush);
@@ -524,7 +525,8 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
* will be destroyed by the unmap function.
*/
if (!list_empty(&unmap_list)) {
- ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list);
+ ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list,
+ &kill_list, &unpinned);
/* If we've been asked to destroy all MRs, move those
* that were simply cleaned to the kill list */
if (free_all)
@@ -548,6 +550,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
spin_unlock_irqrestore(&pool->list_lock, flags);
}
+ atomic_sub(unpinned, &pool->free_pinned);
atomic_sub(ncleaned, &pool->dirty_count);
atomic_sub(nfreed, &pool->item_count);
@@ -828,7 +831,8 @@ static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool,
static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
struct list_head *unmap_list,
- struct list_head *kill_list)
+ struct list_head *kill_list,
+ int *unpinned)
{
struct rds_iw_mapping *mapping, *next;
unsigned int ncleaned = 0;
@@ -855,6 +859,7 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
spin_lock_irqsave(&pool->list_lock, flags);
list_for_each_entry_safe(mapping, next, unmap_list, m_list) {
+ *unpinned += mapping->m_sg.len;
list_move(&mapping->m_list, &laundered);
ncleaned++;
}