aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2018-09-25 09:16:48 -0700
committerMark Brown <broonie@kernel.org>2018-09-25 09:16:48 -0700
commit8b665c82acaa5a92671c7937521b2adc909bf40d (patch)
treebf090b90bdaa8e4f40ae44107247a83f850c6bb0 /include/linux
parente5a0a3460305fa0be3bbddfe4c536bdf871528bf (diff)
parent917fa4e672939669f74f1c879b3aae6239f6719b (diff)
Merge tag 'v4.14.71-rt44' into linux-linaro-lsk-v4.14-rt
Linux 4.14.71-rt44
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mm_types_task.h2
-rw-r--r--include/linux/rhashtable.h8
-rw-r--r--include/linux/skbuff.h50
-rw-r--r--include/linux/tpm.h2
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/vmacache.h5
7 files changed, 44 insertions, 26 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5a716df466a0..63317710311e 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -355,7 +355,7 @@ struct kioctx_table;
struct mm_struct {
struct vm_area_struct *mmap; /* list of VMAs */
struct rb_root mm_rb;
- u32 vmacache_seqnum; /* per-thread vmacache */
+ u64 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 5fe87687664c..d7016dcb245e 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -32,7 +32,7 @@
#define VMACACHE_MASK (VMACACHE_SIZE - 1)
struct vmacache {
- u32 seqnum;
+ u64 seqnum;
struct vm_area_struct *vmas[VMACACHE_SIZE];
};
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 7fd514f36e74..a4be6388a980 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -152,25 +152,25 @@ struct rhashtable_params {
/**
* struct rhashtable - Hash table handle
* @tbl: Bucket table
- * @nelems: Number of elements in table
* @key_len: Key length for hashfn
- * @p: Configuration parameters
* @max_elems: Maximum number of elements in table
+ * @p: Configuration parameters
* @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
* @lock: Spin lock to protect walker list
+ * @nelems: Number of elements in table
*/
struct rhashtable {
struct bucket_table __rcu *tbl;
- atomic_t nelems;
unsigned int key_len;
- struct rhashtable_params p;
unsigned int max_elems;
+ struct rhashtable_params p;
bool rhlist;
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
+ atomic_t nelems;
};
/**
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 795b2d513874..07576a062ac0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -664,21 +664,26 @@ struct sk_buff {
struct sk_buff *prev;
union {
- ktime_t tstamp;
- u64 skb_mstamp;
+ struct net_device *dev;
+ /* Some protocols might use this space to store information,
+ * while device pointer would be NULL.
+ * UDP receive path is one user.
+ */
+ unsigned long dev_scratch;
};
};
- struct rb_node rbnode; /* used in netem & tcp stack */
+ struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
+ struct list_head list;
};
- struct sock *sk;
union {
- struct net_device *dev;
- /* Some protocols might use this space to store information,
- * while device pointer would be NULL.
- * UDP receive path is one user.
- */
- unsigned long dev_scratch;
+ struct sock *sk;
+ int ip_defrag_offset;
+ };
+
+ union {
+ ktime_t tstamp;
+ u64 skb_mstamp;
};
/*
* This is the control buffer. It is free to use for every
@@ -2587,7 +2592,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}
-void skb_rbtree_purge(struct rb_root *root);
+unsigned int skb_rbtree_purge(struct rb_root *root);
void *netdev_alloc_frag(unsigned int fragsz);
@@ -3141,6 +3146,7 @@ static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
return skb->data;
}
+int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
/**
* pskb_trim_rcsum - trim received skb and update checksum
* @skb: buffer to trim
@@ -3154,9 +3160,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
if (likely(len >= skb->len))
return 0;
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = CHECKSUM_NONE;
- return __pskb_trim(skb, len);
+ return pskb_trim_rcsum_slow(skb, len);
}
static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
@@ -3176,6 +3180,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
+#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
+#define skb_rb_first(root) rb_to_skb(rb_first(root))
+#define skb_rb_last(root) rb_to_skb(rb_last(root))
+#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
+#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
+
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \
@@ -3190,6 +3200,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
for (; skb != (struct sk_buff *)(queue); \
skb = skb->next)
+#define skb_rbtree_walk(skb, root) \
+ for (skb = skb_rb_first(root); skb != NULL; \
+ skb = skb_rb_next(skb))
+
+#define skb_rbtree_walk_from(skb) \
+ for (; skb != NULL; \
+ skb = skb_rb_next(skb))
+
+#define skb_rbtree_walk_from_safe(skb, tmp) \
+ for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
+ skb = tmp)
+
#define skb_queue_walk_from_safe(queue, skb, tmp) \
for (tmp = skb->next; \
skb != (struct sk_buff *)(queue); \
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 2a6c3d96b31f..7f7b29f86c59 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -48,6 +48,8 @@ struct tpm_class_ops {
u8 (*status) (struct tpm_chip *chip);
bool (*update_timeouts)(struct tpm_chip *chip,
unsigned long *timeout_cap);
+ int (*go_idle)(struct tpm_chip *chip);
+ int (*cmd_ready)(struct tpm_chip *chip);
int (*request_locality)(struct tpm_chip *chip, int loc);
int (*relinquish_locality)(struct tpm_chip *chip, int loc);
void (*clk_enable)(struct tpm_chip *chip, bool value);
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 5c7f010676a7..47a3441cf4c4 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_DEBUG_VM_VMACACHE
VMACACHE_FIND_CALLS,
VMACACHE_FIND_HITS,
- VMACACHE_FULL_FLUSHES,
#endif
#ifdef CONFIG_SWAP
SWAP_RA,
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
index a5b3aa8d281f..a09b28f76460 100644
--- a/include/linux/vmacache.h
+++ b/include/linux/vmacache.h
@@ -16,7 +16,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
}
-extern void vmacache_flush_all(struct mm_struct *mm);
extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
unsigned long addr);
@@ -30,10 +29,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
static inline void vmacache_invalidate(struct mm_struct *mm)
{
mm->vmacache_seqnum++;
-
- /* deal with overflows */
- if (unlikely(mm->vmacache_seqnum == 0))
- vmacache_flush_all(mm);
}
#endif /* __LINUX_VMACACHE_H */