aboutsummaryrefslogtreecommitdiff
path: root/datapath/linux-2.6/compat-2.6
diff options
context:
space:
mode:
Diffstat (limited to 'datapath/linux-2.6/compat-2.6')
-rw-r--r--datapath/linux-2.6/compat-2.6/compat26.h37
-rw-r--r--datapath/linux-2.6/compat-2.6/genetlink-brcompat.c20
-rw-r--r--datapath/linux-2.6/compat-2.6/genetlink-openvswitch.c22
-rw-r--r--datapath/linux-2.6/compat-2.6/include/asm-generic/bug.h19
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/cpumask.h11
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/dmi.h114
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/err.h21
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/icmp.h13
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/if_arp.h15
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/ip.h18
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/ipv6.h13
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/jiffies.h26
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/kernel.h9
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/lockdep.h450
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/log2.h17
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/mutex.h59
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/netdevice.h35
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/netfilter_bridge.h24
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/netfilter_ipv4.h19
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/netlink.h24
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/percpu.h10
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/random.h17
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/rculist.h12
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/rtnetlink.h29
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/skbuff.h170
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/tcp.h18
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/timer.h96
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/types.h14
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/udp.h13
-rw-r--r--datapath/linux-2.6/compat-2.6/include/linux/workqueue.h42
-rw-r--r--datapath/linux-2.6/compat-2.6/include/net/checksum.h16
-rw-r--r--datapath/linux-2.6/compat-2.6/include/net/genetlink.h123
-rw-r--r--datapath/linux-2.6/compat-2.6/include/net/netlink.h22
-rw-r--r--datapath/linux-2.6/compat-2.6/random32.c144
-rw-r--r--datapath/linux-2.6/compat-2.6/veth.c537
35 files changed, 2229 insertions, 0 deletions
diff --git a/datapath/linux-2.6/compat-2.6/compat26.h b/datapath/linux-2.6/compat-2.6/compat26.h
new file mode 100644
index 00000000..61448d63
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/compat26.h
@@ -0,0 +1,37 @@
+#ifndef __COMPAT26_H
+#define __COMPAT26_H 1
+
+#include <linux/version.h>
+
+#if defined(CONFIG_PREEMPT) && LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
+#error "CONFIG_PREEMPT is broken with 2.6.x before 2.6.21--see commit 4498121ca3, \"[NET]: Handle disabled preemption in gfp_any()\""
+#endif
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
+/*----------------------------------------------------------------------------
+ * In 2.6.24, a namespace argument became required for dev_get_by_name. */
+
+#define dev_get_by_name(net, name) \
+ dev_get_by_name((name))
+
+#define dev_get_by_index(net, ifindex) \
+ dev_get_by_index((ifindex))
+
+#define __dev_get_by_name(net, name) \
+ __dev_get_by_name((name))
+
+#define __dev_get_by_index(net, ifindex) \
+ __dev_get_by_index((ifindex))
+
+#endif /* linux kernel <= 2.6.23 */
+
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)
+/*----------------------------------------------------------------------------
+ * In 2.6.23, the last argument was dropped from kmem_cache_create. */
+#define kmem_cache_create(n, s, a, f, c) \
+ kmem_cache_create((n), (s), (a), (f), (c), NULL)
+
+#endif /* linux kernel <= 2.6.22 */
+
+#endif /* compat26.h */
diff --git a/datapath/linux-2.6/compat-2.6/genetlink-brcompat.c b/datapath/linux-2.6/compat-2.6/genetlink-brcompat.c
new file mode 100644
index 00000000..c43b3ce4
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/genetlink-brcompat.c
@@ -0,0 +1,20 @@
+#include "net/genetlink.h"
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+
+/* We fix grp->id to 32 so that it doesn't collide with any of the multicast
+ * groups selected by openvswitch_mod, which uses groups 16 through 31.
+ * Collision isn't fatal--multicast listeners should check that the family is
+ * the one that they want and discard others--but it wastes time and memory to
+ * receive unwanted messages. */
+int genl_register_mc_group(struct genl_family *family,
+ struct genl_multicast_group *grp)
+{
+ grp->id = 32;
+ grp->family = family;
+
+ return 0;
+}
+
+#endif /* kernel < 2.6.23 */
diff --git a/datapath/linux-2.6/compat-2.6/genetlink-openvswitch.c b/datapath/linux-2.6/compat-2.6/genetlink-openvswitch.c
new file mode 100644
index 00000000..9e09215f
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/genetlink-openvswitch.c
@@ -0,0 +1,22 @@
+#include "net/genetlink.h"
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+
+/* We use multicast groups 16 through 31 to avoid colliding with the multicast
+ * group selected by brcompat_mod, which uses groups 32. Collision isn't
+ * fatal--multicast listeners should check that the family is the one that they
+ * want and discard others--but it wastes time and memory to receive unwanted
+ * messages. */
+int genl_register_mc_group(struct genl_family *family,
+ struct genl_multicast_group *grp)
+{
+ /* This code is called single-threaded. */
+ static unsigned int next_id = 0;
+ grp->id = next_id++ % 16 + 16;
+ grp->family = family;
+
+ return 0;
+}
+
+#endif /* kernel < 2.6.23 */
diff --git a/datapath/linux-2.6/compat-2.6/include/asm-generic/bug.h b/datapath/linux-2.6/compat-2.6/include/asm-generic/bug.h
new file mode 100644
index 00000000..1d9b3140
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/asm-generic/bug.h
@@ -0,0 +1,19 @@
+#ifndef __ASM_GENERIC_BUG_WRAPPER_H
+#define __ASM_GENERIC_BUG_WRAPPER_H
+
+#include_next <asm-generic/bug.h>
+
+#ifndef WARN_ON_ONCE
+#define WARN_ON_ONCE(condition) ({ \
+ static int __warned; \
+ int __ret_warn_once = !!(condition); \
+ \
+ if (unlikely(__ret_warn_once) && !__warned) { \
+ WARN_ON(1); \
+ __warned = 1; \
+ } \
+ unlikely(__ret_warn_once); \
+})
+#endif
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/cpumask.h b/datapath/linux-2.6/compat-2.6/include/linux/cpumask.h
new file mode 100644
index 00000000..48c73aa8
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/cpumask.h
@@ -0,0 +1,11 @@
+#ifndef __LINUX_CPUMASK_WRAPPER_H
+#define __LINUX_CPUMASK_WRAPPER_H
+
+#include_next <linux/cpumask.h>
+
+/* for_each_cpu was renamed for_each_possible_cpu in 2.6.18. */
+#ifndef for_each_possible_cpu
+#define for_each_possible_cpu for_each_cpu
+#endif
+
+#endif /* linux/cpumask.h wrapper */
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/dmi.h b/datapath/linux-2.6/compat-2.6/include/linux/dmi.h
new file mode 100644
index 00000000..52916fec
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/dmi.h
@@ -0,0 +1,114 @@
+#ifndef __LINUX_DMI_WRAPPER_H
+#define __LINUX_DMI_WRAPPER_H 1
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
+
+#include_next <linux/dmi.h>
+
+#else /* linux version >= 2.6.23 */
+
+#ifndef __DMI_H__
+#define __DMI_H__
+
+#include <linux/list.h>
+
+enum dmi_field {
+ DMI_NONE,
+ DMI_BIOS_VENDOR,
+ DMI_BIOS_VERSION,
+ DMI_BIOS_DATE,
+ DMI_SYS_VENDOR,
+ DMI_PRODUCT_NAME,
+ DMI_PRODUCT_VERSION,
+ DMI_PRODUCT_SERIAL,
+ DMI_PRODUCT_UUID,
+ DMI_BOARD_VENDOR,
+ DMI_BOARD_NAME,
+ DMI_BOARD_VERSION,
+ DMI_BOARD_SERIAL,
+ DMI_BOARD_ASSET_TAG,
+ DMI_CHASSIS_VENDOR,
+ DMI_CHASSIS_TYPE,
+ DMI_CHASSIS_VERSION,
+ DMI_CHASSIS_SERIAL,
+ DMI_CHASSIS_ASSET_TAG,
+ DMI_STRING_MAX,
+};
+
+enum dmi_device_type {
+ DMI_DEV_TYPE_ANY = 0,
+ DMI_DEV_TYPE_OTHER,
+ DMI_DEV_TYPE_UNKNOWN,
+ DMI_DEV_TYPE_VIDEO,
+ DMI_DEV_TYPE_SCSI,
+ DMI_DEV_TYPE_ETHERNET,
+ DMI_DEV_TYPE_TOKENRING,
+ DMI_DEV_TYPE_SOUND,
+ DMI_DEV_TYPE_IPMI = -1,
+ DMI_DEV_TYPE_OEM_STRING = -2
+};
+
+struct dmi_header {
+ u8 type;
+ u8 length;
+ u16 handle;
+};
+
+/*
+ * DMI callbacks for problem boards
+ */
+struct dmi_strmatch {
+ u8 slot;
+ char *substr;
+};
+
+struct dmi_system_id {
+ int (*callback)(struct dmi_system_id *);
+ const char *ident;
+ struct dmi_strmatch matches[4];
+ void *driver_data;
+};
+
+#define DMI_MATCH(a, b) { a, b }
+
+struct dmi_device {
+ struct list_head list;
+ int type;
+ const char *name;
+ void *device_data; /* Type specific data */
+};
+
+/* No CONFIG_DMI before 2.6.16 */
+#if defined(CONFIG_DMI) || defined(CONFIG_X86_32)
+
+extern int dmi_check_system(struct dmi_system_id *list);
+extern char * dmi_get_system_info(int field);
+extern struct dmi_device * dmi_find_device(int type, const char *name,
+ struct dmi_device *from);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+extern void dmi_scan_machine(void);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+extern int dmi_get_year(int field);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+extern int dmi_name_in_vendors(char *str);
+#endif
+
+#else
+
+static inline int dmi_check_system(struct dmi_system_id *list) { return 0; }
+static inline char * dmi_get_system_info(int field) { return NULL; }
+static inline struct dmi_device * dmi_find_device(int type, const char *name,
+ struct dmi_device *from) { return NULL; }
+static inline int dmi_get_year(int year) { return 0; }
+static inline int dmi_name_in_vendors(char *s) { return 0; }
+
+#endif
+
+#endif /* __DMI_H__ */
+
+#endif /* linux kernel < 2.6.22 */
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/err.h b/datapath/linux-2.6/compat-2.6/include/linux/err.h
new file mode 100644
index 00000000..50faf2a1
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/err.h
@@ -0,0 +1,21 @@
+#ifndef __LINUX_ERR_WRAPPER_H
+#define __LINUX_ERR_WRAPPER_H 1
+
+#include_next <linux/err.h>
+
+#ifndef HAVE_ERR_CAST
+/**
+ * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
+ * @ptr: The pointer to cast.
+ *
+ * Explicitly cast an error-valued pointer to another pointer type in such a
+ * way as to make it clear that's what's going on.
+ */
+static inline void *ERR_CAST(const void *ptr)
+{
+ /* cast away the const */
+ return (void *) ptr;
+}
+#endif /* HAVE_ERR_CAST */
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/icmp.h b/datapath/linux-2.6/compat-2.6/include/linux/icmp.h
new file mode 100644
index 00000000..89b354e4
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/icmp.h
@@ -0,0 +1,13 @@
+#ifndef __LINUX_ICMP_WRAPPER_H
+#define __LINUX_ICMP_WRAPPER_H 1
+
+#include_next <linux/icmp.h>
+
+#ifndef HAVE_SKBUFF_HEADER_HELPERS
+static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
+{
+ return (struct icmphdr *)skb_transport_header(skb);
+}
+#endif
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/if_arp.h b/datapath/linux-2.6/compat-2.6/include/linux/if_arp.h
new file mode 100644
index 00000000..e48d6ba0
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/if_arp.h
@@ -0,0 +1,15 @@
+#ifndef __LINUX_IF_ARP_WRAPPER_H
+#define __LINUX_IF_ARP_WRAPPER_H 1
+
+#include_next <linux/if_arp.h>
+
+#ifndef HAVE_SKBUFF_HEADER_HELPERS
+#include <linux/skbuff.h>
+
+static inline struct arphdr *arp_hdr(const struct sk_buff *skb)
+{
+ return (struct arphdr *)skb_network_header(skb);
+}
+#endif /* !HAVE_SKBUFF_HEADER_HELPERS */
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/ip.h b/datapath/linux-2.6/compat-2.6/include/linux/ip.h
new file mode 100644
index 00000000..36765396
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/ip.h
@@ -0,0 +1,18 @@
+#ifndef __LINUX_IP_WRAPPER_H
+#define __LINUX_IP_WRAPPER_H 1
+
+#include_next <linux/ip.h>
+
+#ifndef HAVE_SKBUFF_HEADER_HELPERS
+static inline struct iphdr *ip_hdr(const struct sk_buff *skb)
+{
+ return (struct iphdr *)skb_network_header(skb);
+}
+
+static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
+{
+ return ip_hdr(skb)->ihl * 4;
+}
+#endif /* !HAVE_SKBUFF_HEADER_HELPERS */
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/ipv6.h b/datapath/linux-2.6/compat-2.6/include/linux/ipv6.h
new file mode 100644
index 00000000..25a5431a
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/ipv6.h
@@ -0,0 +1,13 @@
+#ifndef __LINUX_IPV6_WRAPPER_H
+#define __LINUX_IPV6_WRAPPER_H 1
+
+#include_next <linux/ipv6.h>
+
+#ifndef HAVE_SKBUFF_HEADER_HELPERS
+static inline struct ipv6hdr *ipv6_hdr(const struct sk_buff *skb)
+{
+ return (struct ipv6hdr *)skb_network_header(skb);
+}
+#endif
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/jiffies.h b/datapath/linux-2.6/compat-2.6/include/linux/jiffies.h
new file mode 100644
index 00000000..3286e634
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/jiffies.h
@@ -0,0 +1,26 @@
+#ifndef __LINUX_JIFFIES_WRAPPER_H
+#define __LINUX_JIFFIES_WRAPPER_H 1
+
+#include_next <linux/jiffies.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+
+/* Same as above, but does so with platform independent 64bit types.
+ * These must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64() */
+#define time_after64(a,b) \
+ (typecheck(__u64, a) && \
+ typecheck(__u64, b) && \
+ ((__s64)(b) - (__s64)(a) < 0))
+#define time_before64(a,b) time_after64(b,a)
+
+#define time_after_eq64(a,b) \
+ (typecheck(__u64, a) && \
+ typecheck(__u64, b) && \
+ ((__s64)(a) - (__s64)(b) >= 0))
+#define time_before_eq64(a,b) time_after_eq64(b,a)
+
+#endif /* linux kernel < 2.6.19 */
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/kernel.h b/datapath/linux-2.6/compat-2.6/include/linux/kernel.h
new file mode 100644
index 00000000..9459155d
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/kernel.h
@@ -0,0 +1,9 @@
+#ifndef __KERNEL_H_WRAPPER
+#define __KERNEL_H_WRAPPER 1
+
+#include_next <linux/kernel.h>
+#ifndef HAVE_LOG2_H
+#include <linux/log2.h>
+#endif
+
+#endif /* linux/kernel.h */
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/lockdep.h b/datapath/linux-2.6/compat-2.6/include/linux/lockdep.h
new file mode 100644
index 00000000..1c839423
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/lockdep.h
@@ -0,0 +1,450 @@
+/*
+ * Runtime locking correctness validator
+ *
+ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *
+ * see Documentation/lockdep-design.txt for more details.
+ */
+#ifndef __LINUX_LOCKDEP_WRAPPER_H
+#define __LINUX_LOCKDEP_WRAPPER_H
+
+#include_next <linux/lockdep.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+
+struct task_struct;
+struct lockdep_map;
+
+#ifdef CONFIG_LOCKDEP
+
+#include <linux/linkage.h>
+#include <linux/list.h>
+#include <linux/debug_locks.h>
+#include <linux/stacktrace.h>
+
+/*
+ * Lock-class usage-state bits:
+ */
+enum lock_usage_bit
+{
+ LOCK_USED = 0,
+ LOCK_USED_IN_HARDIRQ,
+ LOCK_USED_IN_SOFTIRQ,
+ LOCK_ENABLED_SOFTIRQS,
+ LOCK_ENABLED_HARDIRQS,
+ LOCK_USED_IN_HARDIRQ_READ,
+ LOCK_USED_IN_SOFTIRQ_READ,
+ LOCK_ENABLED_SOFTIRQS_READ,
+ LOCK_ENABLED_HARDIRQS_READ,
+ LOCK_USAGE_STATES
+};
+
+/*
+ * Usage-state bitmasks:
+ */
+#define LOCKF_USED (1 << LOCK_USED)
+#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
+#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
+#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
+#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
+
+#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
+#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
+
+#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
+#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
+#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
+#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
+
+#define LOCKF_ENABLED_IRQS_READ \
+ (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
+#define LOCKF_USED_IN_IRQ_READ \
+ (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
+
+#define MAX_LOCKDEP_SUBCLASSES 8UL
+
+/*
+ * Lock-classes are keyed via unique addresses, by embedding the
+ * lockclass-key into the kernel (or module) .data section. (For
+ * static locks we use the lock address itself as the key.)
+ */
+struct lockdep_subclass_key {
+ char __one_byte;
+} __attribute__ ((__packed__));
+
+struct lock_class_key {
+ struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
+};
+
+/*
+ * The lock-class itself:
+ */
+struct lock_class {
+ /*
+ * class-hash:
+ */
+ struct list_head hash_entry;
+
+ /*
+ * global list of all lock-classes:
+ */
+ struct list_head lock_entry;
+
+ struct lockdep_subclass_key *key;
+ unsigned int subclass;
+
+ /*
+ * IRQ/softirq usage tracking bits:
+ */
+ unsigned long usage_mask;
+ struct stack_trace usage_traces[LOCK_USAGE_STATES];
+
+ /*
+ * These fields represent a directed graph of lock dependencies,
+ * to every node we attach a list of "forward" and a list of
+ * "backward" graph nodes.
+ */
+ struct list_head locks_after, locks_before;
+
+ /*
+ * Generation counter, when doing certain classes of graph walking,
+ * to ensure that we check one node only once:
+ */
+ unsigned int version;
+
+ /*
+ * Statistics counter:
+ */
+ unsigned long ops;
+
+ const char *name;
+ int name_version;
+
+#ifdef CONFIG_LOCK_STAT
+ unsigned long contention_point[4];
+#endif
+};
+
+#ifdef CONFIG_LOCK_STAT
+struct lock_time {
+ s64 min;
+ s64 max;
+ s64 total;
+ unsigned long nr;
+};
+
+enum bounce_type {
+ bounce_acquired_write,
+ bounce_acquired_read,
+ bounce_contended_write,
+ bounce_contended_read,
+ nr_bounce_types,
+
+ bounce_acquired = bounce_acquired_write,
+ bounce_contended = bounce_contended_write,
+};
+
+struct lock_class_stats {
+ unsigned long contention_point[4];
+ struct lock_time read_waittime;
+ struct lock_time write_waittime;
+ struct lock_time read_holdtime;
+ struct lock_time write_holdtime;
+ unsigned long bounces[nr_bounce_types];
+};
+
+struct lock_class_stats lock_stats(struct lock_class *class);
+void clear_lock_stats(struct lock_class *class);
+#endif
+
+/*
+ * Map the lock object (the lock instance) to the lock-class object.
+ * This is embedded into specific lock instances:
+ */
+struct lockdep_map {
+ struct lock_class_key *key;
+ struct lock_class *class_cache;
+ const char *name;
+#ifdef CONFIG_LOCK_STAT
+ int cpu;
+#endif
+};
+
+/*
+ * Every lock has a list of other locks that were taken after it.
+ * We only grow the list, never remove from it:
+ */
+struct lock_list {
+ struct list_head entry;
+ struct lock_class *class;
+ struct stack_trace trace;
+ int distance;
+};
+
+/*
+ * We record lock dependency chains, so that we can cache them:
+ */
+struct lock_chain {
+ struct list_head entry;
+ u64 chain_key;
+};
+
+struct held_lock {
+ /*
+ * One-way hash of the dependency chain up to this point. We
+ * hash the hashes step by step as the dependency chain grows.
+ *
+ * We use it for dependency-caching and we skip detection
+ * passes and dependency-updates if there is a cache-hit, so
+ * it is absolutely critical for 100% coverage of the validator
+ * to have a unique key value for every unique dependency path
+ * that can occur in the system, to make a unique hash value
+ * as likely as possible - hence the 64-bit width.
+ *
+ * The task struct holds the current hash value (initialized
+ * with zero), here we store the previous hash value:
+ */
+ u64 prev_chain_key;
+ struct lock_class *class;
+ unsigned long acquire_ip;
+ struct lockdep_map *instance;
+
+#ifdef CONFIG_LOCK_STAT
+ u64 waittime_stamp;
+ u64 holdtime_stamp;
+#endif
+ /*
+ * The lock-stack is unified in that the lock chains of interrupt
+ * contexts nest ontop of process context chains, but we 'separate'
+ * the hashes by starting with 0 if we cross into an interrupt
+ * context, and we also keep do not add cross-context lock
+ * dependencies - the lock usage graph walking covers that area
+ * anyway, and we'd just unnecessarily increase the number of
+ * dependencies otherwise. [Note: hardirq and softirq contexts
+ * are separated from each other too.]
+ *
+ * The following field is used to detect when we cross into an
+ * interrupt context:
+ */
+ int irq_context;
+ int trylock;
+ int read;
+ int check;
+ int hardirqs_off;
+};
+
+/*
+ * Initialization, self-test and debugging-output methods:
+ */
+extern void lockdep_init(void);
+extern void lockdep_info(void);
+extern void lockdep_reset(void);
+extern void lockdep_reset_lock(struct lockdep_map *lock);
+extern void lockdep_free_key_range(void *start, unsigned long size);
+
+extern void lockdep_off(void);
+extern void lockdep_on(void);
+
+/*
+ * These methods are used by specific locking variants (spinlocks,
+ * rwlocks, mutexes and rwsems) to pass init/acquire/release events
+ * to lockdep:
+ */
+
+extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass);
+
+/*
+ * Reinitialize a lock key - for cases where there is special locking or
+ * special initialization of locks so that the validator gets the scope
+ * of dependencies wrong: they are either too broad (they need a class-split)
+ * or they are too narrow (they suffer from a false class-split):
+ */
+#define lockdep_set_class(lock, key) \
+ lockdep_init_map(&(lock)->dep_map, #key, key, 0)
+#define lockdep_set_class_and_name(lock, key, name) \
+ lockdep_init_map(&(lock)->dep_map, name, key, 0)
+#define lockdep_set_class_and_subclass(lock, key, sub) \
+ lockdep_init_map(&(lock)->dep_map, #key, key, sub)
+#define lockdep_set_subclass(lock, sub) \
+ lockdep_init_map(&(lock)->dep_map, #lock, \
+ (lock)->dep_map.key, sub)
+
+/*
+ * Acquire a lock.
+ *
+ * Values for "read":
+ *
+ * 0: exclusive (write) acquire
+ * 1: read-acquire (no recursion allowed)
+ * 2: read-acquire with same-instance recursion allowed
+ *
+ * Values for check:
+ *
+ * 0: disabled
+ * 1: simple checks (freeing, held-at-exit-time, etc.)
+ * 2: full validation
+ */
+extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+ int trylock, int read, int check, unsigned long ip);
+
+extern void lock_release(struct lockdep_map *lock, int nested,
+ unsigned long ip);
+
+# define INIT_LOCKDEP .lockdep_recursion = 0,
+
+#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
+
+#else /* !LOCKDEP */
+
+static inline void lockdep_off(void)
+{
+}
+
+static inline void lockdep_on(void)
+{
+}
+
+# define lock_acquire(l, s, t, r, c, i) do { } while (0)
+# define lock_release(l, n, i) do { } while (0)
+# define lockdep_init() do { } while (0)
+# define lockdep_info() do { } while (0)
+# define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
+# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
+# define lockdep_set_class_and_name(lock, key, name) \
+ do { (void)(key); } while (0)
+#define lockdep_set_class_and_subclass(lock, key, sub) \
+ do { (void)(key); } while (0)
+#define lockdep_set_subclass(lock, sub) do { } while (0)
+
+# define INIT_LOCKDEP
+# define lockdep_reset() do { debug_locks = 1; } while (0)
+# define lockdep_free_key_range(start, size) do { } while (0)
+/*
+ * The class key takes no space if lockdep is disabled:
+ */
+struct lock_class_key { };
+
+#define lockdep_depth(tsk) (0)
+
+#endif /* !LOCKDEP */
+
+#ifdef CONFIG_LOCK_STAT
+
+extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
+extern void lock_acquired(struct lockdep_map *lock);
+
+#define LOCK_CONTENDED(_lock, try, lock) \
+do { \
+ if (!try(_lock)) { \
+ lock_contended(&(_lock)->dep_map, _RET_IP_); \
+ lock(_lock); \
+ } \
+ lock_acquired(&(_lock)->dep_map); \
+} while (0)
+
+#else /* CONFIG_LOCK_STAT */
+
+#define lock_contended(lockdep_map, ip) do {} while (0)
+#define lock_acquired(lockdep_map) do {} while (0)
+
+#define LOCK_CONTENDED(_lock, try, lock) \
+ lock(_lock)
+
+#endif /* CONFIG_LOCK_STAT */
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
+extern void early_init_irq_lock_class(void);
+#else
+static inline void early_init_irq_lock_class(void)
+{
+}
+#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+extern void early_boot_irqs_off(void);
+extern void early_boot_irqs_on(void);
+extern void print_irqtrace_events(struct task_struct *curr);
+#else
+static inline void early_boot_irqs_off(void)
+{
+}
+static inline void early_boot_irqs_on(void)
+{
+}
+static inline void print_irqtrace_events(struct task_struct *curr)
+{
+}
+#endif
+
+/*
+ * For trivial one-depth nesting of a lock-class, the following
+ * global define can be used. (Subsystems with multiple levels
+ * of nesting should define their own lock-nesting subclasses.)
+ */
+#define SINGLE_DEPTH_NESTING 1
+
+/*
+ * Map the dependency ops to NOP or to real lockdep ops, depending
+ * on the per lock-class debug mode:
+ */
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# else
+# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# endif
+# define spin_release(l, n, i) lock_release(l, n, i)
+#else
+# define spin_acquire(l, s, t, i) do { } while (0)
+# define spin_release(l, n, i) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
+# else
+# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
+# endif
+# define rwlock_release(l, n, i) lock_release(l, n, i)
+#else
+# define rwlock_acquire(l, s, t, i) do { } while (0)
+# define rwlock_acquire_read(l, s, t, i) do { } while (0)
+# define rwlock_release(l, n, i) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# else
+# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# endif
+# define mutex_release(l, n, i) lock_release(l, n, i)
+#else
+# define mutex_acquire(l, s, t, i) do { } while (0)
+# define mutex_release(l, n, i) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
+# else
+# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
+# endif
+# define rwsem_release(l, n, i) lock_release(l, n, i)
+#else
+# define rwsem_acquire(l, s, t, i) do { } while (0)
+# define rwsem_acquire_read(l, s, t, i) do { } while (0)
+# define rwsem_release(l, n, i) do { } while (0)
+#endif
+
+#endif /* linux kernel < 2.6.18 */
+
+#endif /* __LINUX_LOCKDEP_WRAPPER_H */
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/log2.h b/datapath/linux-2.6/compat-2.6/include/linux/log2.h
new file mode 100644
index 00000000..69abae5e
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/log2.h
@@ -0,0 +1,17 @@
+#ifndef __LINUX_LOG2_WRAPPER
+#define __LINUX_LOG2_WRAPPER
+
+#ifdef HAVE_LOG2_H
+#include_next <linux/log2.h>
+#else
+/* This is very stripped down because log2.h has far too many dependencies. */
+
+extern __attribute__((const, noreturn))
+int ____ilog2_NaN(void);
+
+#define ilog2(n) ((n) == 4 ? 2 : \
+ (n) == 8 ? 3 : \
+ ____ilog2_NaN())
+#endif
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/mutex.h b/datapath/linux-2.6/compat-2.6/include/linux/mutex.h
new file mode 100644
index 00000000..93dfa3b2
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/mutex.h
@@ -0,0 +1,59 @@
+#ifndef __LINUX_MUTEX_WRAPPER_H
+#define __LINUX_MUTEX_WRAPPER_H
+
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+
+#include <asm/semaphore.h>
+
+struct mutex {
+ struct semaphore sema;
+};
+
+#define mutex_init(mutex) init_MUTEX(&(mutex)->sema)
+#define mutex_destroy(mutex) do { } while (0)
+
+#define __MUTEX_INITIALIZER(name) \
+ __SEMAPHORE_INITIALIZER(name,1)
+
+#define DEFINE_MUTEX(mutexname) \
+ struct mutex mutexname = { __MUTEX_INITIALIZER(mutexname.sema) }
+
+/*
+ * See kernel/mutex.c for detailed documentation of these APIs.
+ * Also see Documentation/mutex-design.txt.
+ */
+static inline void mutex_lock(struct mutex *lock)
+{
+ down(&lock->sema);
+}
+
+static inline int mutex_lock_interruptible(struct mutex *lock)
+{
+ return down_interruptible(&lock->sema);
+}
+
+#define mutex_lock_nested(lock, subclass) mutex_lock(lock)
+#define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
+
+/*
+ * NOTE: mutex_trylock() follows the spin_trylock() convention,
+ * not the down_trylock() convention!
+ */
+static inline int mutex_trylock(struct mutex *lock)
+{
+ return !down_trylock(&lock->sema);
+}
+
+static inline void mutex_unlock(struct mutex *lock)
+{
+ up(&lock->sema);
+}
+#else
+
+#include_next <linux/mutex.h>
+
+#endif /* linux version < 2.6.16 */
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/netdevice.h b/datapath/linux-2.6/compat-2.6/include/linux/netdevice.h
new file mode 100644
index 00000000..32e1735d
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/netdevice.h
@@ -0,0 +1,35 @@
+#ifndef __LINUX_NETDEVICE_WRAPPER_H
+#define __LINUX_NETDEVICE_WRAPPER_H 1
+
+#include_next <linux/netdevice.h>
+
+struct net;
+
+#ifndef to_net_dev
+#define to_net_dev(class) container_of(class, struct net_device, class_dev)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
+static inline
+struct net *dev_net(const struct net_device *dev)
+{
+ return NULL;
+}
+#endif /* linux kernel < 2.6.26 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+#define proc_net init_net.proc_net
+#endif
+
+#ifndef for_each_netdev
+/* Linux before 2.6.22 didn't have for_each_netdev at all. */
+#define for_each_netdev(net, d) for (d = dev_base; d; d = d->next)
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+/* Linux 2.6.24 added a network namespace pointer to the macro. */
+#undef for_each_netdev
+#define for_each_netdev(net,d) list_for_each_entry(d, &dev_base_head, dev_list)
+#endif
+
+
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/netfilter_bridge.h b/datapath/linux-2.6/compat-2.6/include/linux/netfilter_bridge.h
new file mode 100644
index 00000000..1c8183c8
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/netfilter_bridge.h
@@ -0,0 +1,24 @@
+#ifndef __LINUX_NETFILTER_BRIDGE_WRAPPER_H
+#define __LINUX_NETFILTER_BRIDGE_WRAPPER_H
+
+#include_next <linux/netfilter_bridge.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
+
+#include <linux/if_vlan.h>
+#include <linux/if_pppox.h>
+
+static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
+{
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_8021Q):
+ return VLAN_HLEN;
+ default:
+ return 0;
+ }
+}
+
+#endif /* linux version < 2.6.22 */
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/netfilter_ipv4.h b/datapath/linux-2.6/compat-2.6/include/linux/netfilter_ipv4.h
new file mode 100644
index 00000000..ed8a5d94
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/netfilter_ipv4.h
@@ -0,0 +1,19 @@
+#ifndef __LINUX_NETFILTER_IPV4_WRAPPER_H
+#define __LINUX_NETFILTER_IPV4_WRAPPER_H 1
+
+#include_next <linux/netfilter_ipv4.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+
+#ifdef __KERNEL__
+
+#define NF_INET_PRE_ROUTING NF_IP_PRE_ROUTING
+#define NF_INET_POST_ROUTING NF_IP_POST_ROUTING
+#define NF_INET_FORWARD NF_IP_FORWARD
+
+#endif /* __KERNEL__ */
+
+#endif /* linux kernel < 2.6.25 */
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/netlink.h b/datapath/linux-2.6/compat-2.6/include/linux/netlink.h
new file mode 100644
index 00000000..c5f83bd0
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/netlink.h
@@ -0,0 +1,24 @@
+#ifndef __LINUX_NETLINK_WRAPPER_H
+#define __LINUX_NETLINK_WRAPPER_H 1
+
+#include <linux/skbuff.h>
+#include_next <linux/netlink.h>
+#include <net/netlink.h>
+
+#include <linux/version.h>
+
+#ifndef NLMSG_DEFAULT_SIZE
+#define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+#define nlmsg_new(s, f) nlmsg_new_proper((s), (f))
+static inline struct sk_buff *nlmsg_new_proper(int size, gfp_t flags)
+{
+ return alloc_skb(size, flags);
+}
+
+#endif /* linux kernel < 2.6.19 */
+
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/percpu.h b/datapath/linux-2.6/compat-2.6/include/linux/percpu.h
new file mode 100644
index 00000000..0f68bb25
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/percpu.h
@@ -0,0 +1,10 @@
+#ifndef __LINUX_PERCPU_H_WRAPPER
+#define __LINUX_PERCPU_H_WRAPPER 1
+
+#include_next <linux/percpu.h>
+
+#ifndef percpu_ptr
+#define percpu_ptr per_cpu_ptr
+#endif
+
+#endif /* linux/percpu.h wrapper */
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/random.h b/datapath/linux-2.6/compat-2.6/include/linux/random.h
new file mode 100644
index 00000000..4e4932c9
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/random.h
@@ -0,0 +1,17 @@
+#ifndef __LINUX_RANDOM_WRAPPER_H
+#define __LINUX_RANDOM_WRAPPER_H 1
+
+#include_next <linux/random.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+
+#ifdef __KERNEL__
+u32 random32(void);
+void srandom32(u32 seed);
+#endif /* __KERNEL__ */
+
+#endif /* linux kernel < 2.6.19 */
+
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/rculist.h b/datapath/linux-2.6/compat-2.6/include/linux/rculist.h
new file mode 100644
index 00000000..4164c0e9
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/rculist.h
@@ -0,0 +1,12 @@
+#ifndef __LINUX_RCULIST_WRAPPER_H
+#define __LINUX_RCULIST_WRAPPER_H
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include_next <linux/rculist.h>
+#else
+/* Prior to 2.6.26, the contents of rculist.h were part of list.h. */
+#include <linux/list.h>
+#endif
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/rtnetlink.h b/datapath/linux-2.6/compat-2.6/include/linux/rtnetlink.h
new file mode 100644
index 00000000..8bc51560
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/rtnetlink.h
@@ -0,0 +1,29 @@
+#ifndef __RTNETLINK_WRAPPER_H
+#define __RTNETLINK_WRAPPER_H 1
+
+#include_next <linux/rtnetlink.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+static inline int rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
+ u32 group, struct nlmsghdr *nlh, gfp_t flags)
+{
+ BUG_ON(nlh); /* not implemented */
+ if (group) {
+ /* errors reported via destination sk->sk_err */
+ nlmsg_multicast(rtnl, skb, 0, group);
+ }
+ return 0;
+}
+
+static inline void rtnl_set_sk_err(struct net *net, u32 group, int error)
+{
+ netlink_set_err(rtnl, 0, group, error);
+}
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+#define rtnl_notify(skb, net, pid, group, nlh, flags) \
+ ((void) (net), rtnl_notify(skb, pid, group, nlh, flags))
+#define rtnl_set_sk_err(net, group, error) \
+ ((void) (net), rtnl_set_sk_err(group, error))
+#endif /* linux kernel < 2.6.25 */
+
+#endif /* linux/rtnetlink.h wrapper */
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/skbuff.h b/datapath/linux-2.6/compat-2.6/include/linux/skbuff.h
new file mode 100644
index 00000000..666ef850
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/skbuff.h
@@ -0,0 +1,170 @@
+#ifndef __LINUX_SKBUFF_WRAPPER_H
+#define __LINUX_SKBUFF_WRAPPER_H 1
+
+#include_next <linux/skbuff.h>
+
+#include <linux/version.h>
+
+#ifndef HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET
+static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
+ const int offset, void *to,
+ const unsigned int len)
+{
+ memcpy(to, skb->data + offset, len);
+}
+
+static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
+ const int offset,
+ const void *from,
+ const unsigned int len)
+{
+ memcpy(skb->data + offset, from, len);
+}
+
+#endif /* !HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET */
+
+/*
+ * The networking layer reserves some headroom in skb data (via
+ * dev_alloc_skb). This is used to avoid having to reallocate skb data when
+ * the header has to grow. In the default case, if the header has to grow
+ * 16 bytes or less we avoid the reallocation.
+ *
+ * Unfortunately this headroom changes the DMA alignment of the resulting
+ * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
+ * on some architectures. An architecture can override this value,
+ * perhaps setting it to a cacheline in size (since that will maintain
+ * cacheline alignment of the DMA). It must be a power of 2.
+ *
+ * Various parts of the networking layer expect at least 16 bytes of
+ * headroom, you should not reduce this.
+ */
+#ifndef NET_SKB_PAD
+#define NET_SKB_PAD 16
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
+ int cloned)
+{
+ int delta = 0;
+
+ if (headroom < NET_SKB_PAD)
+ headroom = NET_SKB_PAD;
+ if (headroom > skb_headroom(skb))
+ delta = headroom - skb_headroom(skb);
+
+ if (delta || cloned)
+ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
+ GFP_ATOMIC);
+ return 0;
+}
+
+static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
+{
+ return __skb_cow(skb, headroom, skb_header_cloned(skb));
+}
+#endif /* linux < 2.6.23 */
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+/* Emulate Linux 2.6.17 and later behavior, in which kfree_skb silently ignores
+ * null pointer arguments. */
+#define kfree_skb(skb) kfree_skb_maybe_null(skb)
+static inline void kfree_skb_maybe_null(struct sk_buff *skb)
+{
+ if (likely(skb != NULL))
+ (kfree_skb)(skb);
+}
+#endif
+
+
+#ifndef CHECKSUM_PARTIAL
+/* Note that CHECKSUM_PARTIAL is not implemented, but this allows us to at
+ * least test against it: see update_csum() in forward.c. */
+#define CHECKSUM_PARTIAL 3
+#endif
+#ifndef CHECKSUM_COMPLETE
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifdef HAVE_MAC_RAW
+#define mac_header mac.raw
+#define network_header nh.raw
+#endif
+
+#ifndef HAVE_SKBUFF_HEADER_HELPERS
+static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
+{
+ return skb->h.raw;
+}
+
+static inline void skb_reset_transport_header(struct sk_buff *skb)
+{
+ skb->h.raw = skb->data;
+}
+
+static inline void skb_set_transport_header(struct sk_buff *skb,
+ const int offset)
+{
+ skb->h.raw = skb->data + offset;
+}
+
+static inline unsigned char *skb_network_header(const struct sk_buff *skb)
+{
+ return skb->nh.raw;
+}
+
+static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
+{
+ skb->nh.raw = skb->data + offset;
+}
+
+static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
+{
+ return skb->mac.raw;
+}
+
+static inline void skb_reset_mac_header(struct sk_buff *skb)
+{
+ skb->mac_header = skb->data;
+}
+
+static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
+{
+ skb->mac.raw = skb->data + offset;
+}
+
+static inline int skb_transport_offset(const struct sk_buff *skb)
+{
+ return skb_transport_header(skb) - skb->data;
+}
+
+static inline int skb_network_offset(const struct sk_buff *skb)
+{
+ return skb_network_header(skb) - skb->data;
+}
+
+static inline void skb_copy_to_linear_data(struct sk_buff *skb,
+ const void *from,
+ const unsigned int len)
+{
+ memcpy(skb->data, from, len);
+}
+#endif /* !HAVE_SKBUFF_HEADER_HELPERS */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+#warning "TSO/UFO not supported on kernels earlier than 2.6.18"
+
+static inline int skb_is_gso(const struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+ int features)
+{
+ return NULL;
+}
+#endif /* before 2.6.18 */
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/tcp.h b/datapath/linux-2.6/compat-2.6/include/linux/tcp.h
new file mode 100644
index 00000000..6fad1933
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/tcp.h
@@ -0,0 +1,18 @@
+#ifndef __LINUX_TCP_WRAPPER_H
+#define __LINUX_TCP_WRAPPER_H 1
+
+#include_next <linux/tcp.h>
+
+#ifndef HAVE_SKBUFF_HEADER_HELPERS
+static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
+{
+ return (struct tcphdr *)skb_transport_header(skb);
+}
+
+static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
+{
+ return tcp_hdr(skb)->doff * 4;
+}
+#endif /* !HAVE_SKBUFF_HEADER_HELPERS */
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/timer.h b/datapath/linux-2.6/compat-2.6/include/linux/timer.h
new file mode 100644
index 00000000..6c3a9b0f
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/timer.h
@@ -0,0 +1,96 @@
+#ifndef __LINUX_TIMER_WRAPPER_H
+#define __LINUX_TIMER_WRAPPER_H 1
+
+#include_next <linux/timer.h>
+
+#include <linux/version.h>
+
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(X,Y) ( 0 )
+#endif
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) && \
+ (!defined(RHEL_RELEASE_CODE) || \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,1))))
+
+extern unsigned long volatile jiffies;
+
+/**
+ * __round_jiffies - function to round jiffies to a full second
+ * @j: the time in (absolute) jiffies that should be rounded
+ * @cpu: the processor number on which the timeout will happen
+ *
+ * __round_jiffies() rounds an absolute time in the future (in jiffies)
+ * up or down to (approximately) full seconds. This is useful for timers
+ * for which the exact time they fire does not matter too much, as long as
+ * they fire approximately every X seconds.
+ *
+ * By rounding these timers to whole seconds, all such timers will fire
+ * at the same time, rather than at various times spread out. The goal
+ * of this is to have the CPU wake up less, which saves power.
+ *
+ * The exact rounding is skewed for each processor to avoid all
+ * processors firing at the exact same time, which could lead
+ * to lock contention or spurious cache line bouncing.
+ *
+ * The return value is the rounded version of the @j parameter.
+ */
+static inline unsigned long __round_jiffies(unsigned long j, int cpu)
+{
+ int rem;
+ unsigned long original = j;
+
+ /*
+ * We don't want all cpus firing their timers at once hitting the
+ * same lock or cachelines, so we skew each extra cpu with an extra
+ * 3 jiffies. This 3 jiffies came originally from the mm/ code which
+ * already did this.
+ * The skew is done by adding 3*cpunr, then round, then subtract this
+ * extra offset again.
+ */
+ j += cpu * 3;
+
+ rem = j % HZ;
+
+ /*
+ * If the target jiffie is just after a whole second (which can happen
+ * due to delays of the timer irq, long irq off times etc etc) then
+ * we should round down to the whole second, not up. Use 1/4th second
+ * as cutoff for this rounding as an extreme upper bound for this.
+ */
+ if (rem < HZ/4) /* round down */
+ j = j - rem;
+ else /* round up */
+ j = j - rem + HZ;
+
+ /* now that we have rounded, subtract the extra skew again */
+ j -= cpu * 3;
+
+ if (j <= jiffies) /* rounding ate our timeout entirely; */
+ return original;
+ return j;
+}
+
+
+/**
+ * round_jiffies - function to round jiffies to a full second
+ * @j: the time in (absolute) jiffies that should be rounded
+ *
+ * round_jiffies() rounds an absolute time in the future (in jiffies)
+ * up or down to (approximately) full seconds. This is useful for timers
+ * for which the exact time they fire does not matter too much, as long as
+ * they fire approximately every X seconds.
+ *
+ * By rounding these timers to whole seconds, all such timers will fire
+ * at the same time, rather than at various times spread out. The goal
+ * of this is to have the CPU wake up less, which saves power.
+ *
+ * The return value is the rounded version of the @j parameter.
+ */
+static inline unsigned long round_jiffies(unsigned long j)
+{
+ return __round_jiffies(j, 0); // FIXME
+}
+
+#endif /* linux kernel < 2.6.20 */
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/types.h b/datapath/linux-2.6/compat-2.6/include/linux/types.h
new file mode 100644
index 00000000..c1f375eb
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/types.h
@@ -0,0 +1,14 @@
+#ifndef __LINUX_TYPES_WRAPPER_H
+#define __LINUX_TYPES_WRAPPER_H 1
+
+#include_next <linux/types.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+
+typedef __u16 __bitwise __sum16;
+typedef __u32 __bitwise __wsum;
+
+#endif /* linux kernel < 2.6.20 */
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/udp.h b/datapath/linux-2.6/compat-2.6/include/linux/udp.h
new file mode 100644
index 00000000..6fe4721b
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/udp.h
@@ -0,0 +1,13 @@
+#ifndef __LINUX_UDP_WRAPPER_H
+#define __LINUX_UDP_WRAPPER_H 1
+
+#include_next <linux/udp.h>
+
+#ifndef HAVE_SKBUFF_HEADER_HELPERS
+static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
+{
+ return (struct udphdr *)skb_transport_header(skb);
+}
+#endif /* HAVE_SKBUFF_HEADER_HELPERS */
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/linux/workqueue.h b/datapath/linux-2.6/compat-2.6/include/linux/workqueue.h
new file mode 100644
index 00000000..1ac3b6ec
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/linux/workqueue.h
@@ -0,0 +1,42 @@
+#ifndef __LINUX_WORKQUEUE_WRAPPER_H
+#define __LINUX_WORKQUEUE_WRAPPER_H 1
+
+#include_next <linux/workqueue.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+
+#ifdef __KERNEL__
+/*
+ * initialize a work-struct's func and data pointers:
+ */
+#undef PREPARE_WORK
+#define PREPARE_WORK(_work, _func) \
+ do { \
+ (_work)->func = (void(*)(void*)) _func; \
+ (_work)->data = _work; \
+ } while (0)
+
+/*
+ * initialize all of a work-struct:
+ */
+#undef INIT_WORK
+#define INIT_WORK(_work, _func) \
+ do { \
+ INIT_LIST_HEAD(&(_work)->entry); \
+ (_work)->pending = 0; \
+ PREPARE_WORK((_work), (_func)); \
+ init_timer(&(_work)->timer); \
+ } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* linux kernel < 2.6.20 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
+/* There is no equivalent to cancel_work_sync() so just flush all
+ * pending work. */
+#define cancel_work_sync(_work) flush_scheduled_work()
+#endif
+
+#endif
diff --git a/datapath/linux-2.6/compat-2.6/include/net/checksum.h b/datapath/linux-2.6/compat-2.6/include/net/checksum.h
new file mode 100644
index 00000000..c64c6bd0
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/net/checksum.h
@@ -0,0 +1,16 @@
+#ifndef __NET_CHECKSUM_WRAPPER_H
+#define __NET_CHECKSUM_WRAPPER_H 1
+
+#include_next <net/checksum.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+
+static inline __wsum csum_unfold(__sum16 n)
+{
+ return (__force __wsum)n;
+}
+
+#endif /* linux kernel < 2.6.20 */
+
+#endif /* checksum.h */
diff --git a/datapath/linux-2.6/compat-2.6/include/net/genetlink.h b/datapath/linux-2.6/compat-2.6/include/net/genetlink.h
new file mode 100644
index 00000000..57a47316
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/net/genetlink.h
@@ -0,0 +1,123 @@
+#ifndef __NET_GENERIC_NETLINK_WRAPPER_H
+#define __NET_GENERIC_NETLINK_WRAPPER_H 1
+
+
+#include <linux/netlink.h>
+#include_next <net/genetlink.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+
+#include <linux/genetlink.h>
+
+/*----------------------------------------------------------------------------
+ * In 2.6.23, registering of multicast groups was added. Our compatability
+ * layer just supports registering a single group, since that's all we
+ * need.
+ */
+
+/**
+ * struct genl_multicast_group - generic netlink multicast group
+ * @name: name of the multicast group, names are per-family
+ * @id: multicast group ID, assigned by the core, to use with
+ * genlmsg_multicast().
+ * @list: list entry for linking
+ * @family: pointer to family, need not be set before registering
+ */
+struct genl_multicast_group
+{
+ struct genl_family *family; /* private */
+ struct list_head list; /* private */
+ char name[GENL_NAMSIZ];
+ u32 id;
+};
+
+int genl_register_mc_group(struct genl_family *family,
+ struct genl_multicast_group *grp);
+#endif /* linux kernel < 2.6.23 */
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+/**
+ * genlmsg_msg_size - length of genetlink message not including padding
+ * @payload: length of message payload
+ */
+static inline int genlmsg_msg_size(int payload)
+{
+ return GENL_HDRLEN + payload;
+}
+
+/**
+ * genlmsg_total_size - length of genetlink message including padding
+ * @payload: length of message payload
+ */
+static inline int genlmsg_total_size(int payload)
+{
+ return NLMSG_ALIGN(genlmsg_msg_size(payload));
+}
+
+#define genlmsg_multicast(s, p, g, f) \
+ genlmsg_multicast_flags((s), (p), (g), (f))
+
+static inline int genlmsg_multicast_flags(struct sk_buff *skb, u32 pid,
+ unsigned int group, gfp_t flags)
+{
+ int err;
+
+ NETLINK_CB(skb).dst_group = group;
+
+ err = netlink_broadcast(genl_sock, skb, pid, group, flags);
+ if (err > 0)
+ err = 0;
+
+ return err;
+}
+#endif /* linux kernel < 2.6.19 */
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+
+#define genlmsg_put(skb, p, seq, fam, flg, c) \
+ genlmsg_put((skb), (p), (seq), (fam)->id, (fam)->hdrsize, \
+ (flg), (c), (fam)->version)
+
+/**
+ * genlmsg_put_reply - Add generic netlink header to a reply message
+ * @skb: socket buffer holding the message
+ * @info: receiver info
+ * @family: generic netlink family
+ * @flags: netlink message flags
+ * @cmd: generic netlink command
+ *
+ * Returns pointer to user specific header
+ */
+static inline void *genlmsg_put_reply(struct sk_buff *skb,
+ struct genl_info *info, struct genl_family *family,
+ int flags, u8 cmd)
+{
+ return genlmsg_put(skb, info->snd_pid, info->snd_seq, family,
+ flags, cmd);
+}
+
+/**
+ * genlmsg_reply - reply to a request
+ * @skb: netlink message to be sent back
+ * @info: receiver information
+ */
+static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
+{
+ return genlmsg_unicast(skb, info->snd_pid);
+}
+
+/**
+ * genlmsg_new - Allocate a new generic netlink message
+ * @payload: size of the message payload
+ * @flags: the type of memory to allocate.
+ */
+static inline struct sk_buff *genlmsg_new(size_t payload, gfp_t flags)
+{
+ return nlmsg_new(genlmsg_total_size(payload), flags);
+}
+#endif /* linux kernel < 2.6.20 */
+
+#endif /* genetlink.h */
diff --git a/datapath/linux-2.6/compat-2.6/include/net/netlink.h b/datapath/linux-2.6/compat-2.6/include/net/netlink.h
new file mode 100644
index 00000000..e0d594d7
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/include/net/netlink.h
@@ -0,0 +1,22 @@
+#ifndef __NET_NETLINK_WRAPPER_H
+#define __NET_NETLINK_WRAPPER_H 1
+
+#include_next <net/netlink.h>
+
+#ifndef HAVE_NLA_NUL_STRING
+#define NLA_NUL_STRING NLA_STRING
+
+static inline int VERIFY_NUL_STRING(struct nlattr *attr)
+{
+ return (!attr || (nla_len(attr)
+ && memchr(nla_data(attr), '\0', nla_len(attr)))
+ ? 0 : EINVAL);
+}
+#else
+static inline int VERIFY_NUL_STRING(struct nlattr *attr)
+{
+ return 0;
+}
+#endif /* !HAVE_NLA_NUL_STRING */
+
+#endif /* net/netlink.h */
diff --git a/datapath/linux-2.6/compat-2.6/random32.c b/datapath/linux-2.6/compat-2.6/random32.c
new file mode 100644
index 00000000..b0dd2a32
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/random32.c
@@ -0,0 +1,144 @@
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+
+/*
+ This is a maximally equidistributed combined Tausworthe generator
+ based on code from GNU Scientific Library 1.5 (30 Jun 2004)
+
+ x_n = (s1_n ^ s2_n ^ s3_n)
+
+ s1_{n+1} = (((s1_n & 4294967294) <<12) ^ (((s1_n <<13) ^ s1_n) >>19))
+ s2_{n+1} = (((s2_n & 4294967288) << 4) ^ (((s2_n << 2) ^ s2_n) >>25))
+ s3_{n+1} = (((s3_n & 4294967280) <<17) ^ (((s3_n << 3) ^ s3_n) >>11))
+
+ The period of this generator is about 2^88.
+
+ From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
+ Generators", Mathematics of Computation, 65, 213 (1996), 203--213.
+
+ This is available on the net from L'Ecuyer's home page,
+
+ http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
+ ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
+
+ There is an erratum in the paper "Tables of Maximally
+ Equidistributed Combined LFSR Generators", Mathematics of
+ Computation, 68, 225 (1999), 261--269:
+ http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
+
+ ... the k_j most significant bits of z_j must be non-
+ zero, for each j. (Note: this restriction also applies to the
+ computer code given in [4], but was mistakenly not mentioned in
+ that paper.)
+
+ This affects the seeding procedure by imposing the requirement
+ s1 > 1, s2 > 7, s3 > 15.
+
+*/
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+
+#include "compat26.h"
+
+struct rnd_state {
+ u32 s1, s2, s3;
+};
+
+static struct rnd_state net_rand_state[NR_CPUS];
+
+static u32 __random32(struct rnd_state *state)
+{
+#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
+
+ state->s1 = TAUSWORTHE(state->s1, 13, 19, 4294967294UL, 12);
+ state->s2 = TAUSWORTHE(state->s2, 2, 25, 4294967288UL, 4);
+ state->s3 = TAUSWORTHE(state->s3, 3, 11, 4294967280UL, 17);
+
+ return (state->s1 ^ state->s2 ^ state->s3);
+}
+
+static void __set_random32(struct rnd_state *state, unsigned long s)
+{
+ if (s == 0)
+ s = 1; /* default seed is 1 */
+
+#define LCG(n) (69069 * n)
+ state->s1 = LCG(s);
+ state->s2 = LCG(state->s1);
+ state->s3 = LCG(state->s2);
+
+ /* "warm it up" */
+ __random32(state);
+ __random32(state);
+ __random32(state);
+ __random32(state);
+ __random32(state);
+ __random32(state);
+}
+
+/**
+ * random32 - pseudo random number generator
+ *
+ * A 32 bit pseudo-random number is generated using a fast
+ * algorithm suitable for simulation. This algorithm is NOT
+ * considered safe for cryptographic use.
+ */
+u32 random32(void)
+{
+ return __random32(&net_rand_state[smp_processor_id()]);
+}
+
+/**
+ * srandom32 - add entropy to pseudo random number generator
+ * @seed: seed value
+ *
+ * Add some additional seeding to the random32() pool.
+ * Note: this pool is per cpu so it only affects current CPU.
+ */
+void srandom32(u32 entropy)
+{
+ struct rnd_state *state = &net_rand_state[smp_processor_id()];
+ __set_random32(state, state->s1 ^ entropy);
+}
+
+static int __init random32_reseed(void);
+
+/*
+ * Generate some initially weak seeding values to allow
+ * to start the random32() engine.
+ */
+int __init random32_init(void)
+{
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ struct rnd_state *state = &net_rand_state[i];
+ __set_random32(state, i + jiffies);
+ }
+ random32_reseed();
+ return 0;
+}
+
+/*
+ * Generate better values after random number generator
+ * is fully initalized.
+ */
+static int __init random32_reseed(void)
+{
+ int i;
+ unsigned long seed;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ struct rnd_state *state = &net_rand_state[i];
+
+ get_random_bytes(&seed, sizeof(seed));
+ __set_random32(state, seed);
+ }
+ return 0;
+}
+
+#endif /* kernel < 2.6.19 */
diff --git a/datapath/linux-2.6/compat-2.6/veth.c b/datapath/linux-2.6/compat-2.6/veth.c
new file mode 100644
index 00000000..3cda3365
--- /dev/null
+++ b/datapath/linux-2.6/compat-2.6/veth.c
@@ -0,0 +1,537 @@
+/* veth driver port to Linux 2.6.18 */
+
+/*
+ * drivers/net/veth.c
+ *
+ * Copyright (C) 2007, 2009 OpenVZ http://openvz.org, SWsoft Inc
+ *
+ * Author: Pavel Emelianov <xemul@openvz.org>
+ * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
+ *
+ */
+
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+
+#include <net/dst.h>
+#include <net/xfrm.h>
+
+#define DRV_NAME "veth"
+#define DRV_VERSION "1.0"
+
+struct veth_net_stats {
+ unsigned long rx_packets;
+ unsigned long tx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_bytes;
+ unsigned long tx_dropped;
+};
+
+struct veth_priv {
+ struct net_device *peer;
+ struct net_device *dev;
+ struct list_head list;
+ struct veth_net_stats *stats;
+ unsigned ip_summed;
+ struct net_device_stats dev_stats;
+};
+
+static LIST_HEAD(veth_list);
+
+/*
+ * ethtool interface
+ */
+
+static struct {
+ const char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+ { "peer_ifindex" },
+};
+
+static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = 0;
+ cmd->advertising = 0;
+ cmd->speed = SPEED_10000;
+ cmd->duplex = DUPLEX_FULL;
+ cmd->port = PORT_TP;
+ cmd->phy_address = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->maxtxpkt = 0;
+ cmd->maxrxpkt = 0;
+ return 0;
+}
+
+static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->fw_version, "N/A");
+}
+
+static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ switch(stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ break;
+ }
+}
+
+static void veth_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct veth_priv *priv;
+
+ priv = netdev_priv(dev);
+ data[0] = priv->peer->ifindex;
+}
+
+static u32 veth_get_rx_csum(struct net_device *dev)
+{
+ struct veth_priv *priv;
+
+ priv = netdev_priv(dev);
+ return priv->ip_summed == CHECKSUM_UNNECESSARY;
+}
+
+static int veth_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct veth_priv *priv;
+
+ priv = netdev_priv(dev);
+ priv->ip_summed = data ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
+ return 0;
+}
+
+static u32 veth_get_tx_csum(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_NO_CSUM) != 0;
+}
+
+static int veth_set_tx_csum(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= NETIF_F_NO_CSUM;
+ else
+ dev->features &= ~NETIF_F_NO_CSUM;
+ return 0;
+}
+
+static struct ethtool_ops veth_ethtool_ops = {
+ .get_settings = veth_get_settings,
+ .get_drvinfo = veth_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_rx_csum = veth_get_rx_csum,
+ .set_rx_csum = veth_set_rx_csum,
+ .get_tx_csum = veth_get_tx_csum,
+ .set_tx_csum = veth_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_strings = veth_get_strings,
+ .get_ethtool_stats = veth_get_ethtool_stats,
+};
+
+/*
+ * xmit
+ */
+
+static int veth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_device *rcv = NULL;
+ struct veth_priv *priv, *rcv_priv;
+ struct veth_net_stats *stats;
+ int length, cpu;
+
+ skb_orphan(skb);
+
+ priv = netdev_priv(dev);
+ rcv = priv->peer;
+ rcv_priv = netdev_priv(rcv);
+
+ cpu = smp_processor_id();
+ stats = per_cpu_ptr(priv->stats, cpu);
+
+ if (!(rcv->flags & IFF_UP))
+ goto outf;
+
+ skb->dev = rcv;
+ skb->pkt_type = PACKET_HOST;
+ skb->protocol = eth_type_trans(skb, rcv);
+ if (dev->features & NETIF_F_NO_CSUM)
+ skb->ip_summed = rcv_priv->ip_summed;
+
+ dst_release(skb->dst);
+ skb->dst = NULL;
+ secpath_reset(skb);
+ nf_reset(skb);
+
+ length = skb->len;
+
+ stats->tx_bytes += length;
+ stats->tx_packets++;
+
+ stats = per_cpu_ptr(rcv_priv->stats, cpu);
+ stats->rx_bytes += length;
+ stats->rx_packets++;
+
+ netif_rx(skb);
+ return 0;
+
+outf:
+ kfree_skb(skb);
+ stats->tx_dropped++;
+ return 0;
+}
+
+/*
+ * general routines
+ */
+
+static struct net_device_stats *veth_get_stats(struct net_device *dev)
+{
+ struct veth_priv *priv;
+ struct net_device_stats *dev_stats;
+ int cpu;
+ struct veth_net_stats *stats;
+
+ priv = netdev_priv(dev);
+ dev_stats = &priv->dev_stats;
+
+ dev_stats->rx_packets = 0;
+ dev_stats->tx_packets = 0;
+ dev_stats->rx_bytes = 0;
+ dev_stats->tx_bytes = 0;
+ dev_stats->tx_dropped = 0;
+
+ for_each_online_cpu(cpu) {
+ stats = per_cpu_ptr(priv->stats, cpu);
+
+ dev_stats->rx_packets += stats->rx_packets;
+ dev_stats->tx_packets += stats->tx_packets;
+ dev_stats->rx_bytes += stats->rx_bytes;
+ dev_stats->tx_bytes += stats->tx_bytes;
+ dev_stats->tx_dropped += stats->tx_dropped;
+ }
+
+ return dev_stats;
+}
+
+static int veth_open(struct net_device *dev)
+{
+ struct veth_priv *priv;
+
+ priv = netdev_priv(dev);
+ if (priv->peer == NULL)
+ return -ENOTCONN;
+
+ if (priv->peer->flags & IFF_UP) {
+ netif_carrier_on(dev);
+ netif_carrier_on(priv->peer);
+ }
+ return 0;
+}
+
+static int veth_dev_init(struct net_device *dev)
+{
+ struct veth_net_stats *stats;
+ struct veth_priv *priv;
+
+ stats = alloc_percpu(struct veth_net_stats);
+ if (stats == NULL)
+ return -ENOMEM;
+
+ priv = netdev_priv(dev);
+ priv->stats = stats;
+ return 0;
+}
+
+static void veth_dev_free(struct net_device *dev)
+{
+ struct veth_priv *priv;
+
+ priv = netdev_priv(dev);
+ free_percpu(priv->stats);
+ free_netdev(dev);
+}
+
+static void veth_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->hard_start_xmit = veth_xmit;
+ dev->get_stats = veth_get_stats;
+ dev->open = veth_open;
+ dev->ethtool_ops = &veth_ethtool_ops;
+ dev->features |= NETIF_F_LLTX;
+ dev->init = veth_dev_init;
+ dev->destructor = veth_dev_free;
+}
+
+static void veth_change_state(struct net_device *dev)
+{
+ struct net_device *peer;
+ struct veth_priv *priv;
+
+ priv = netdev_priv(dev);
+ peer = priv->peer;
+
+ if (netif_carrier_ok(peer)) {
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ } else {
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+ }
+}
+
+static int veth_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = ptr;
+
+ if (dev->open != veth_open)
+ goto out;
+
+ switch (event) {
+ case NETDEV_CHANGE:
+ veth_change_state(dev);
+ break;
+ }
+out:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block veth_notifier_block __read_mostly = {
+ .notifier_call = veth_device_event,
+};
+
+/*
+ * netlink interface
+ */
+
+static int veth_newlink(const char *devname, const char *peername)
+{
+ int err;
+ const char *names[2];
+ struct net_device *devs[2];
+ int i;
+
+ names[0] = devname;
+ names[1] = peername;
+ devs[0] = devs[1] = NULL;
+
+ for (i = 0; i < 2; i++) {
+ struct net_device *dev;
+
+ err = -ENOMEM;
+ devs[i] = alloc_netdev(sizeof(struct veth_priv),
+ names[i], veth_setup);
+ if (!devs[i]) {
+ goto err;
+ }
+
+ dev = devs[i];
+
+ if (strchr(dev->name, '%')) {
+ err = dev_alloc_name(dev, dev->name);
+ if (err < 0)
+ goto err;
+ }
+ random_ether_addr(dev->dev_addr);
+
+ err = register_netdevice(dev);
+ if (err < 0)
+ goto err;
+
+ netif_carrier_off(dev);
+ }
+
+ /*
+ * tie the devices together
+ */
+
+ for (i = 0; i < 2; i++) {
+ struct veth_priv *priv = netdev_priv(devs[i]);
+ priv->dev = devs[i];
+ priv->peer = devs[!i];
+ if (!i)
+ list_add(&priv->list, &veth_list);
+ else
+ INIT_LIST_HEAD(&priv->list);
+ }
+ return 0;
+
+err:
+ for (i = 0; i < 2; i++) {
+ if (devs[i]) {
+ if (devs[i]->reg_state != NETREG_UNINITIALIZED)
+ unregister_netdevice(devs[i]);
+ else
+ free_netdev(devs[i]);
+ }
+ }
+ return err;
+}
+
+static void veth_dellink(struct net_device *dev)
+{
+ struct veth_priv *priv;
+ struct net_device *peer;
+
+ priv = netdev_priv(dev);
+ peer = priv->peer;
+
+ if (!list_empty(&priv->list))
+ list_del(&priv->list);
+
+ priv = netdev_priv(peer);
+ if (!list_empty(&priv->list))
+ list_del(&priv->list);
+
+ unregister_netdevice(dev);
+ unregister_netdevice(peer);
+}
+
+/*
+ * sysfs
+ */
+
+/*
+ * "show" function for the veth_pairs attribute.
+ * The class parameter is ignored.
+ */
+static ssize_t veth_show_veth_pairs(struct class *cls, char *buffer)
+{
+ int res = 0;
+ struct veth_priv *priv;
+
+ list_for_each_entry(priv, &veth_list, list) {
+ if (res > (PAGE_SIZE - (IFNAMSIZ * 2 + 1))) {
+ /* not enough space for another interface name */
+ if ((PAGE_SIZE - res) > 10)
+ res = PAGE_SIZE - 10;
+ res += sprintf(buffer + res, "++more++");
+ break;
+ }
+ res += sprintf(buffer + res, "%s,%s ",
+ priv->dev->name, priv->peer->name);
+ }
+ res += sprintf(buffer + res, "\n");
+ res++;
+ return res;
+}
+
+/*
+ * "store" function for the veth_pairs attribute. This is what
+ * creates and deletes veth pairs.
+ *
+ * The class parameter is ignored.
+ *
+ */
+static ssize_t veth_store_veth_pairs(struct class *cls, const char *buffer,
+ size_t count)
+{
+ int c = *buffer++;
+ int retval;
+ printk("1\n");
+ if (c == '+') {
+ char devname[IFNAMSIZ + 1] = "";
+ char peername[IFNAMSIZ + 1] = "";
+ char *comma = strchr(buffer, ',');
+ printk("2\n");
+ if (!comma)
+ goto err_no_cmd;
+ strncat(devname, buffer,
+ min_t(int, sizeof devname, comma - buffer));
+ strncat(peername, comma + 1,
+ min_t(int, sizeof peername, strcspn(comma + 1, "\n")));
+ printk("3 '%s' '%s'\n", devname, peername);
+ if (!dev_valid_name(devname) || !dev_valid_name(peername))
+ goto err_no_cmd;
+ printk("4\n");
+ rtnl_lock();
+ retval = veth_newlink(devname, peername);
+ rtnl_unlock();
+ return retval ? retval : count;
+ } else if (c == '-') {
+ struct net_device *dev;
+
+ rtnl_lock();
+ dev = dev_get_by_name(buffer);
+ if (!dev)
+ retval = -ENODEV;
+ else if (dev->init != veth_dev_init)
+ retval = -EINVAL;
+ else {
+ veth_dellink(dev);
+ retval = count;
+ }
+ rtnl_unlock();
+
+ return retval;
+ }
+
+err_no_cmd:
+ printk(KERN_ERR DRV_NAME ": no command found in veth_pairs. Use +ifname,peername or -ifname.\n");
+ return -EPERM;
+}
+
+/* class attribute for veth_pairs file. This ends up in /sys/class/net */
+static CLASS_ATTR(veth_pairs, S_IWUSR | S_IRUGO,
+ veth_show_veth_pairs, veth_store_veth_pairs);
+
+static struct class *netdev_class;
+
+/*
+ * Initialize sysfs. This sets up the veth_pairs file in
+ * /sys/class/net.
+ */
+int veth_create_sysfs(void)
+{
+ struct net_device *dev = dev_get_by_name("lo");
+ if (!dev)
+ return -ESRCH;
+ netdev_class = dev->class_dev.class;
+ if (!netdev_class)
+ return -ENODEV;
+
+ return class_create_file(netdev_class, &class_attr_veth_pairs);
+}
+
+/*
+ * Remove /sys/class/net/veth_pairs.
+ */
+void veth_destroy_sysfs(void)
+{
+ class_remove_file(netdev_class, &class_attr_veth_pairs);
+}
+
+
+
+/*
+ * init/fini
+ */
+
+static __init int veth_init(void)
+{
+ int retval = veth_create_sysfs();
+ if (retval)
+ return retval;
+ register_netdevice_notifier(&veth_notifier_block);
+ return 0;
+}
+
+static __exit void veth_exit(void)
+{
+ unregister_netdevice_notifier(&veth_notifier_block);
+}
+
+module_init(veth_init);
+module_exit(veth_exit);
+
+MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
+MODULE_LICENSE("GPL v2");