--with-l26-source has been renamed --with-linux-source. The old
names will be removed after the next release, so please update
your scripts.
+ - The "-2.6" suffix has been dropped from the datapath/linux-2.6 and
+ datapath/linux-2.6/compat-2.6 directories.
- Feature removals:
- Dropped support for "tun_id_from_cookie" OpenFlow extension.
(Use the extensible match extensions instead.)
when invoking the configure script. For example, to build for MIPS
with Linux:
- % ./configure --with-linux=/path/to/linux-2.6 KARCH=mips
+ % ./configure --with-linux=/path/to/linux KARCH=mips
The configure script accepts a number of other options and honors
additional environment variables. For a full list, invoke
6. If you built kernel modules, you may load them with "insmod", e.g.:
- % insmod datapath/linux-2.6/openvswitch_mod.ko
+ % insmod datapath/linux/openvswitch_mod.ko
You may need to specify a full path to insmod, e.g. /sbin/insmod.
To verify that the modules have been loaded, run "/sbin/lsmod" and
2. Load the brcompat kernel module (which was built in step 1), e.g.:
- % insmod datapath/linux-2.6/brcompat_mod.ko
+ % insmod datapath/linux/brcompat_mod.ko
(openvswitch_mod.ko should already have been loaded.)
dnl
dnl Defines NAME to 1 in kcompat.h.
AC_DEFUN([OVS_DEFINE], [
- echo '#define $1 1' >> datapath/linux-2.6/kcompat.h.new
+ echo '#define $1 1' >> datapath/linux/kcompat.h.new
])
AC_DEFUN([OVS_CHECK_LOG2_H], [
dnl Runs various Autoconf checks on the Linux 2.6 kernel source in
dnl the directory in $KBUILD.
AC_DEFUN([OVS_CHECK_LINUX_COMPAT], [
- rm -f datapath/linux-2.6/kcompat.h.new
- mkdir -p datapath/linux-2.6
- : > datapath/linux-2.6/kcompat.h.new
+ rm -f datapath/linux/kcompat.h.new
+ mkdir -p datapath/linux
+ : > datapath/linux/kcompat.h.new
OVS_GREP_IFELSE([$KSRC/arch/x86/include/asm/checksum_32.h], [src_err,],
[OVS_DEFINE([HAVE_CSUM_COPY_DBG])])
OVS_CHECK_LOG2_H
- if cmp -s datapath/linux-2.6/kcompat.h.new \
- datapath/linux-2.6/kcompat.h >/dev/null 2>&1; then
- rm datapath/linux-2.6/kcompat.h.new
+ if cmp -s datapath/linux/kcompat.h.new \
+ datapath/linux/kcompat.h >/dev/null 2>&1; then
+ rm datapath/linux/kcompat.h.new
else
- mv datapath/linux-2.6/kcompat.h.new datapath/linux-2.6/kcompat.h
+ mv datapath/linux/kcompat.h.new datapath/linux/kcompat.h
fi
])
AC_CONFIG_FILES([Makefile
datapath/Makefile
-datapath/linux-2.6/Kbuild
-datapath/linux-2.6/Makefile
-datapath/linux-2.6/Makefile.main
+datapath/linux/Kbuild
+datapath/linux/Makefile
+datapath/linux/Makefile.main
tests/atlocal])
dnl This makes sure that include/openflow gets created in the build directory.
SUBDIRS =
if LINUX_ENABLED
-SUBDIRS += linux-2.6
+SUBDIRS += linux
endif
EXTRA_DIST = $(dist_headers) $(dist_sources)
AUTOMAKE_OPTIONS = -Wno-portability
include Modules.mk
-include linux-2.6/Modules.mk
+include linux/Modules.mk
# The following is based on commands for the Automake "distdir" target.
distfiles: Makefile
+++ /dev/null
-/Kbuild
-/Makefile
-/Makefile.main
-/Module.markers
-/actions.c
-/addrconf_core-openvswitch.c
-/brc_sysfs_dp.c
-/brc_sysfs_if.c
-/brcompat.c
-/checksum.c
-/dev-openvswitch.c
-/dp_sysfs_dp.c
-/dp_sysfs_if.c
-/datapath.c
-/dp_dev.c
-/dp_notify.c
-/flow.c
-/genetlink-brcompat.c
-/genetlink-openvswitch.c
-/ip_output-openvswitch.c
-/kcompat.h
-/kmemdup.c
-/linux-2.6
-/loop_counter.c
-/modules.order
-/netdevice.c
-/random32.c
-/skbuff-openvswitch.c
-/table.c
-/time.c
-/tmp
-/tunnel.c
-/vlan.c
-/vport-capwap.c
-/vport-generic.c
-/vport-gre.c
-/vport-internal_dev.c
-/vport-netdev.c
-/vport-patch.c
-/vport.c
+++ /dev/null
-# -*- makefile -*-
-export builddir = @abs_builddir@
-export srcdir = @abs_srcdir@
-export top_srcdir = @abs_top_srcdir@
-export VERSION = @VERSION@
-export BUILDNR = @BUILDNR@
-
-include $(srcdir)/../Modules.mk
-include $(srcdir)/Modules.mk
-
-EXTRA_CFLAGS := -DVERSION=\"$(VERSION)\"
-EXTRA_CFLAGS += -I$(srcdir)/..
-EXTRA_CFLAGS += -I$(builddir)/..
-ifeq '$(BUILDNR)' '0'
-EXTRA_CFLAGS += -DBUILDNR=\"\"
-else
-EXTRA_CFLAGS += -DBUILDNR=\"+build$(BUILDNR)\"
-endif
-EXTRA_CFLAGS += -g
-EXTRA_CFLAGS += -include $(builddir)/kcompat.h
-
-# These include directories have to go before -I$(KSRC)/include.
-# NOSTDINC_FLAGS just happens to be a variable that goes in the
-# right place, even though it's conceptually incorrect.
-NOSTDINC_FLAGS += -I$(top_srcdir)/include -I$(srcdir)/compat-2.6 -I$(srcdir)/compat-2.6/include
-
-obj-m := $(patsubst %,%_mod.o,$(build_modules))
-
-define module_template
-$(1)_mod-y = $$(notdir $$(patsubst %.c,%.o,$($(1)_sources)))
-endef
-
-$(foreach module,$(build_modules),$(eval $(call module_template,$(module))))
+++ /dev/null
-ifeq ($(KERNELRELEASE),)
-# We're being called directly by running make in this directory.
-include Makefile.main
-else
-# We're being included by the Linux kernel build system
-include Kbuild
-endif
-
-
+++ /dev/null
-# -*- makefile -*-
-export builddir = @abs_builddir@
-export srcdir = @abs_srcdir@
-export top_srcdir = @abs_top_srcdir@
-export KSRC = @KBUILD@
-export VERSION = @VERSION@
-
-include $(srcdir)/../Modules.mk
-include $(srcdir)/Modules.mk
-
-default: $(build_links)
-
-$(foreach s,$(sort $(foreach m,$(build_modules),$($(m)_sources))), \
- $(eval $(notdir $(s)): ; ln -s $(srcdir)/../$(s) $@))
-
-distclean: clean
- rm -f kcompat.h
-distdir: clean
-install:
-all: default
-check: all
-clean:
- rm -f *.o *.ko *_mod.* Module.symvers *.cmd kcompat.h.new
- for d in $(build_links); do if test -h $$d; then rm $$d; fi; done
-
-ifneq ($(KSRC),)
-
-ifeq (/lib/modules/$(shell uname -r)/source, $(KSRC))
- KOBJ := /lib/modules/$(shell uname -r)/build
-else
- KOBJ := $(KSRC)
-endif
-
-VERSION_FILE := $(KOBJ)/include/linux/version.h
-ifeq (,$(wildcard $(VERSION_FILE)))
- $(error Linux kernel source not configured - missing version.h)
-endif
-
-CONFIG_FILE := $(KSRC)/include/generated/autoconf.h
-ifeq (,$(wildcard $(CONFIG_FILE)))
- CONFIG_FILE := $(KSRC)/include/linux/autoconf.h
- ifeq (,$(wildcard $(CONFIG_FILE)))
- $(error Linux kernel source not configured - missing autoconf.h)
- endif
-endif
-
-default:
- $(MAKE) -C $(KSRC) M=$(builddir) modules
-
-modules_install:
- $(MAKE) -C $(KSRC) M=$(builddir) modules_install
-endif
-
-# Much of the kernel build system in this file is derived from Intel's
-# e1000 distribution, with the following license:
-
-################################################################################
-#
-# Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2007, 2009 Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# The full GNU General Public License is included in this distribution in
-# the file called "COPYING".
-#
-# Contact Information:
-# Linux NICS <linux.nics@intel.com>
-# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-#
-################################################################################
+++ /dev/null
-openvswitch_sources += \
- linux-2.6/compat-2.6/addrconf_core-openvswitch.c \
- linux-2.6/compat-2.6/dev-openvswitch.c \
- linux-2.6/compat-2.6/genetlink-openvswitch.c \
- linux-2.6/compat-2.6/ip_output-openvswitch.c \
- linux-2.6/compat-2.6/kmemdup.c \
- linux-2.6/compat-2.6/netdevice.c \
- linux-2.6/compat-2.6/skbuff-openvswitch.c \
- linux-2.6/compat-2.6/time.c
-openvswitch_headers += \
- linux-2.6/compat-2.6/include/asm-generic/bug.h \
- linux-2.6/compat-2.6/include/linux/bottom_half.h \
- linux-2.6/compat-2.6/include/linux/compiler.h \
- linux-2.6/compat-2.6/include/linux/compiler-gcc.h \
- linux-2.6/compat-2.6/include/linux/cpumask.h \
- linux-2.6/compat-2.6/include/linux/dmi.h \
- linux-2.6/compat-2.6/include/linux/err.h \
- linux-2.6/compat-2.6/include/linux/genetlink.h \
- linux-2.6/compat-2.6/include/linux/icmp.h \
- linux-2.6/compat-2.6/include/linux/icmpv6.h \
- linux-2.6/compat-2.6/include/linux/if.h \
- linux-2.6/compat-2.6/include/linux/if_arp.h \
- linux-2.6/compat-2.6/include/linux/if_ether.h \
- linux-2.6/compat-2.6/include/linux/if_vlan.h \
- linux-2.6/compat-2.6/include/linux/in.h \
- linux-2.6/compat-2.6/include/linux/inetdevice.h \
- linux-2.6/compat-2.6/include/linux/ip.h \
- linux-2.6/compat-2.6/include/linux/ipv6.h \
- linux-2.6/compat-2.6/include/linux/jiffies.h \
- linux-2.6/compat-2.6/include/linux/kernel.h \
- linux-2.6/compat-2.6/include/linux/kobject.h \
- linux-2.6/compat-2.6/include/linux/lockdep.h \
- linux-2.6/compat-2.6/include/linux/log2.h \
- linux-2.6/compat-2.6/include/linux/mutex.h \
- linux-2.6/compat-2.6/include/linux/netdevice.h \
- linux-2.6/compat-2.6/include/linux/netfilter_bridge.h \
- linux-2.6/compat-2.6/include/linux/netfilter_ipv4.h \
- linux-2.6/compat-2.6/include/linux/netlink.h \
- linux-2.6/compat-2.6/include/linux/rculist.h \
- linux-2.6/compat-2.6/include/linux/rcupdate.h \
- linux-2.6/compat-2.6/include/linux/rtnetlink.h \
- linux-2.6/compat-2.6/include/linux/skbuff.h \
- linux-2.6/compat-2.6/include/linux/slab.h \
- linux-2.6/compat-2.6/include/linux/stddef.h \
- linux-2.6/compat-2.6/include/linux/tcp.h \
- linux-2.6/compat-2.6/include/linux/timer.h \
- linux-2.6/compat-2.6/include/linux/types.h \
- linux-2.6/compat-2.6/include/linux/udp.h \
- linux-2.6/compat-2.6/include/linux/workqueue.h \
- linux-2.6/compat-2.6/include/net/checksum.h \
- linux-2.6/compat-2.6/include/net/dst.h \
- linux-2.6/compat-2.6/include/net/genetlink.h \
- linux-2.6/compat-2.6/include/net/ip.h \
- linux-2.6/compat-2.6/include/net/net_namespace.h \
- linux-2.6/compat-2.6/include/net/netlink.h \
- linux-2.6/compat-2.6/include/net/protocol.h \
- linux-2.6/compat-2.6/include/net/route.h \
- linux-2.6/compat-2.6/genetlink.inc
-
-both_modules += brcompat
-brcompat_sources = linux-2.6/compat-2.6/genetlink-brcompat.c brcompat.c
-brcompat_headers =
+++ /dev/null
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
-
-/*
- * IPv6 library code, needed by static components when full IPv6 support is
- * not configured or static.
- */
-
-#include <net/ipv6.h>
-
-#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16)
-
-static inline unsigned ipv6_addr_scope2type(unsigned scope)
-{
- switch(scope) {
- case IPV6_ADDR_SCOPE_NODELOCAL:
- return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) |
- IPV6_ADDR_LOOPBACK);
- case IPV6_ADDR_SCOPE_LINKLOCAL:
- return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL) |
- IPV6_ADDR_LINKLOCAL);
- case IPV6_ADDR_SCOPE_SITELOCAL:
- return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL) |
- IPV6_ADDR_SITELOCAL);
- }
- return IPV6_ADDR_SCOPE_TYPE(scope);
-}
-
-int __ipv6_addr_type(const struct in6_addr *addr)
-{
- __be32 st;
-
- st = addr->s6_addr32[0];
-
- /* Consider all addresses with the first three bits different of
- 000 and 111 as unicasts.
- */
- if ((st & htonl(0xE0000000)) != htonl(0x00000000) &&
- (st & htonl(0xE0000000)) != htonl(0xE0000000))
- return (IPV6_ADDR_UNICAST |
- IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL));
-
- if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) {
- /* multicast */
- /* addr-select 3.1 */
- return (IPV6_ADDR_MULTICAST |
- ipv6_addr_scope2type(IPV6_ADDR_MC_SCOPE(addr)));
- }
-
- if ((st & htonl(0xFFC00000)) == htonl(0xFE800000))
- return (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST |
- IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.1 */
- if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000))
- return (IPV6_ADDR_SITELOCAL | IPV6_ADDR_UNICAST |
- IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL)); /* addr-select 3.1 */
- if ((st & htonl(0xFE000000)) == htonl(0xFC000000))
- return (IPV6_ADDR_UNICAST |
- IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* RFC 4193 */
-
- if ((addr->s6_addr32[0] | addr->s6_addr32[1]) == 0) {
- if (addr->s6_addr32[2] == 0) {
- if (addr->s6_addr32[3] == 0)
- return IPV6_ADDR_ANY;
-
- if (addr->s6_addr32[3] == htonl(0x00000001))
- return (IPV6_ADDR_LOOPBACK | IPV6_ADDR_UNICAST |
- IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.4 */
-
- return (IPV6_ADDR_COMPATv4 | IPV6_ADDR_UNICAST |
- IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */
- }
-
- if (addr->s6_addr32[2] == htonl(0x0000ffff))
- return (IPV6_ADDR_MAPPED |
- IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */
- }
-
- return (IPV6_ADDR_RESERVED |
- IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.4 */
-}
-
-#endif /* kernel < 2.6.21 */
+++ /dev/null
-#ifndef HAVE_DEV_DISABLE_LRO
-
-#include <linux/netdevice.h>
-
-#ifdef NETIF_F_LRO
-#include <linux/ethtool.h>
-
-/**
- * dev_disable_lro - disable Large Receive Offload on a device
- * @dev: device
- *
- * Disable Large Receive Offload (LRO) on a net device. Must be
- * called under RTNL. This is needed if received packets may be
- * forwarded to another interface.
- */
-void dev_disable_lro(struct net_device *dev)
-{
- if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
- dev->ethtool_ops->set_flags) {
- u32 flags = dev->ethtool_ops->get_flags(dev);
- if (flags & ETH_FLAG_LRO) {
- flags &= ~ETH_FLAG_LRO;
- dev->ethtool_ops->set_flags(dev, flags);
- }
- }
- WARN_ON(dev->features & NETIF_F_LRO);
-}
-#else
-void dev_disable_lro(struct net_device *dev) { }
-#endif /* NETIF_F_LRO */
-
-#endif /* HAVE_DEV_DISABLE_LRO */
+++ /dev/null
-/* We fix grp->id to 32 so that it doesn't collide with any of the multicast
- * groups selected by openvswitch_mod, which uses groups 16 through 31.
- * Collision isn't fatal--multicast listeners should check that the family is
- * the one that they want and discard others--but it wastes time and memory to
- * receive unwanted messages. */
-
-#define GENL_FIRST_MCGROUP 32
-#define GENL_LAST_MCGROUP 32
-
-#include "genetlink.inc"
+++ /dev/null
-#define GENL_FIRST_MCGROUP 16
-#define GENL_LAST_MCGROUP 31
-
-#include "genetlink.inc"
+++ /dev/null
-/* -*- c -*- */
-
-#include <net/genetlink.h>
-#include <linux/version.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
-#include <linux/mutex.h>
-
-static DEFINE_MUTEX(mc_group_mutex);
-
-int genl_register_mc_group(struct genl_family *family,
- struct genl_multicast_group *grp)
-{
- static int next_group = GENL_FIRST_MCGROUP;
-
- mutex_lock(&mc_group_mutex);
- grp->id = next_group;
- grp->family = family;
-
- if (++next_group > GENL_LAST_MCGROUP)
- next_group = GENL_FIRST_MCGROUP;
- mutex_unlock(&mc_group_mutex);
-
- return 0;
-}
-#endif /* kernel < 2.6.23 */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
-/**
- * genl_register_family_with_ops - register a generic netlink family
- * @family: generic netlink family
- * @ops: operations to be registered
- * @n_ops: number of elements to register
- *
- * Registers the specified family and operations from the specified table.
- * Only one family may be registered with the same family name or identifier.
- *
- * The family id may equal GENL_ID_GENERATE causing an unique id to
- * be automatically generated and assigned.
- *
- * Either a doit or dumpit callback must be specified for every registered
- * operation or the function will fail. Only one operation structure per
- * command identifier may be registered.
- *
- * See include/net/genetlink.h for more documenation on the operations
- * structure.
- *
- * This is equivalent to calling genl_register_family() followed by
- * genl_register_ops() for every operation entry in the table taking
- * care to unregister the family on error path.
- *
- * Return 0 on success or a negative error code.
- */
-int genl_register_family_with_ops(struct genl_family *family,
- struct genl_ops *ops, size_t n_ops)
-{
- int err, i;
-
- err = genl_register_family(family);
- if (err)
- return err;
-
- for (i = 0; i < n_ops; ++i, ++ops) {
- err = genl_register_ops(family, ops);
- if (err)
- goto err_out;
- }
- return 0;
-err_out:
- genl_unregister_family(family);
- return err;
-}
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-/**
- * nlmsg_notify - send a notification netlink message
- * @sk: netlink socket to use
- * @skb: notification message
- * @pid: destination netlink pid for reports or 0
- * @group: destination multicast group or 0
- * @report: 1 to report back, 0 to disable
- * @flags: allocation flags
- */
-int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
- unsigned int group, int report, gfp_t flags)
-{
- int err = 0;
-
- if (group) {
- int exclude_pid = 0;
-
- if (report) {
- atomic_inc(&skb->users);
- exclude_pid = pid;
- }
-
- /* errors reported via destination sk->sk_err, but propagate
- * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
- err = nlmsg_multicast(sk, skb, exclude_pid, group, flags);
- }
-
- if (report) {
- int err2;
-
- err2 = nlmsg_unicast(sk, skb, pid);
- if (!err || err == -ESRCH)
- err = err2;
- }
-
- return err;
-}
-#endif
-
-/* This is analogous to rtnl_notify() but uses genl_sock instead of rtnl.
- *
- * This is not (yet) in any upstream kernel. */
-void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
- struct nlmsghdr *nlh, gfp_t flags)
-{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
- struct sock *sk = net->genl_sock;
-#else
- struct sock *sk = genl_sock;
-#endif
- int report = 0;
-
- if (nlh)
- report = nlmsg_report(nlh);
-
- nlmsg_notify(sk, skb, pid, group, report, flags);
-}
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
-/* This function wasn't exported before 2.6.30. Lose! */
-void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
-{
-}
-#endif
+++ /dev/null
-#ifndef __ASM_GENERIC_BUG_WRAPPER_H
-#define __ASM_GENERIC_BUG_WRAPPER_H
-
-#include_next <asm-generic/bug.h>
-
-#ifndef WARN_ON_ONCE
-#define WARN_ON_ONCE(condition) ({ \
- static int __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once) && !__warned) { \
- WARN_ON(1); \
- __warned = 1; \
- } \
- unlikely(__ret_warn_once); \
-})
-#endif
-
-#endif
+++ /dev/null
-#ifndef __LINUX_BH_WRAPPER_H
-#define __LINUX_BH_WRAPPER_H 1
-
-#include_next <linux/bottom_half.h>
-
-/* This is not, strictly speaking, compatibility code in the sense that it is
- * not needed by older kernels. However, it is used on kernels with the
- * realtime patchset applied to create an environment more similar to what we
- * would see on normal kernels.
- */
-
-#ifdef CONFIG_PREEMPT_HARDIRQS
-#undef local_bh_disable
-#define local_bh_disable preempt_disable
-#undef local_bh_enable
-#define local_bh_enable preempt_enable
-#endif
-
-#endif
+++ /dev/null
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#include_next <linux/compiler-gcc.h>
-
-#ifndef __packed
-#define __packed __attribute__((packed))
-#endif
+++ /dev/null
-#ifndef __LINUX_COMPILER_WRAPPER_H
-#define __LINUX_COMPILER_WRAPPER_H 1
-
-#include_next <linux/compiler.h>
-
-#ifndef __percpu
-#define __percpu
-#endif
-
-#ifndef __rcu
-#define __rcu
-#endif
-
-#endif
+++ /dev/null
-#ifndef __LINUX_CPUMASK_WRAPPER_H
-#define __LINUX_CPUMASK_WRAPPER_H
-
-#include_next <linux/cpumask.h>
-
-/* for_each_cpu was renamed for_each_possible_cpu in 2.6.18. */
-#ifndef for_each_possible_cpu
-#define for_each_possible_cpu for_each_cpu
-#endif
-
-#endif /* linux/cpumask.h wrapper */
+++ /dev/null
-#ifndef __LINUX_DMI_WRAPPER_H
-#define __LINUX_DMI_WRAPPER_H 1
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-
-#include_next <linux/dmi.h>
-
-#else /* linux version >= 2.6.23 */
-
-#ifndef __DMI_H__
-#define __DMI_H__
-
-#include <linux/list.h>
-
-enum dmi_field {
- DMI_NONE,
- DMI_BIOS_VENDOR,
- DMI_BIOS_VERSION,
- DMI_BIOS_DATE,
- DMI_SYS_VENDOR,
- DMI_PRODUCT_NAME,
- DMI_PRODUCT_VERSION,
- DMI_PRODUCT_SERIAL,
- DMI_PRODUCT_UUID,
- DMI_BOARD_VENDOR,
- DMI_BOARD_NAME,
- DMI_BOARD_VERSION,
- DMI_BOARD_SERIAL,
- DMI_BOARD_ASSET_TAG,
- DMI_CHASSIS_VENDOR,
- DMI_CHASSIS_TYPE,
- DMI_CHASSIS_VERSION,
- DMI_CHASSIS_SERIAL,
- DMI_CHASSIS_ASSET_TAG,
- DMI_STRING_MAX,
-};
-
-enum dmi_device_type {
- DMI_DEV_TYPE_ANY = 0,
- DMI_DEV_TYPE_OTHER,
- DMI_DEV_TYPE_UNKNOWN,
- DMI_DEV_TYPE_VIDEO,
- DMI_DEV_TYPE_SCSI,
- DMI_DEV_TYPE_ETHERNET,
- DMI_DEV_TYPE_TOKENRING,
- DMI_DEV_TYPE_SOUND,
- DMI_DEV_TYPE_IPMI = -1,
- DMI_DEV_TYPE_OEM_STRING = -2
-};
-
-struct dmi_header {
- u8 type;
- u8 length;
- u16 handle;
-};
-
-/*
- * DMI callbacks for problem boards
- */
-struct dmi_strmatch {
- u8 slot;
- char *substr;
-};
-
-struct dmi_system_id {
- int (*callback)(struct dmi_system_id *);
- const char *ident;
- struct dmi_strmatch matches[4];
- void *driver_data;
-};
-
-#define DMI_MATCH(a, b) { a, b }
-
-struct dmi_device {
- struct list_head list;
- int type;
- const char *name;
- void *device_data; /* Type specific data */
-};
-
-/* No CONFIG_DMI before 2.6.16 */
-#if defined(CONFIG_DMI) || defined(CONFIG_X86_32)
-
-extern int dmi_check_system(struct dmi_system_id *list);
-extern char * dmi_get_system_info(int field);
-extern struct dmi_device * dmi_find_device(int type, const char *name,
- struct dmi_device *from);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-extern void dmi_scan_machine(void);
-#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-extern int dmi_get_year(int field);
-#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
-extern int dmi_name_in_vendors(char *str);
-#endif
-
-#else
-
-static inline int dmi_check_system(struct dmi_system_id *list) { return 0; }
-static inline char * dmi_get_system_info(int field) { return NULL; }
-static inline struct dmi_device * dmi_find_device(int type, const char *name,
- struct dmi_device *from) { return NULL; }
-static inline int dmi_get_year(int year) { return 0; }
-static inline int dmi_name_in_vendors(char *s) { return 0; }
-
-#endif
-
-#endif /* __DMI_H__ */
-
-#endif /* linux kernel < 2.6.22 */
-
-#endif
+++ /dev/null
-#ifndef __LINUX_ERR_WRAPPER_H
-#define __LINUX_ERR_WRAPPER_H 1
-
-#include_next <linux/err.h>
-
-#ifndef HAVE_ERR_CAST
-/**
- * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
- * @ptr: The pointer to cast.
- *
- * Explicitly cast an error-valued pointer to another pointer type in such a
- * way as to make it clear that's what's going on.
- */
-static inline void *ERR_CAST(const void *ptr)
-{
- /* cast away the const */
- return (void *) ptr;
-}
-#endif /* HAVE_ERR_CAST */
-
-#endif
+++ /dev/null
-#ifndef __GENETLINK_WRAPPER_H
-#define __GENETLINK_WRAPPER_H 1
-
-#include_next <linux/genetlink.h>
-
-#ifdef CONFIG_PROVE_LOCKING
-/* No version of the kernel has this function, but our locking scheme depends
- * on genl_mutex so for clarity we use it where appropriate. */
-static inline int lockdep_genl_is_held(void)
-{
- return 1;
-}
-#endif
-
-#endif /* linux/genetlink.h wrapper */
+++ /dev/null
-#ifndef __LINUX_ICMP_WRAPPER_H
-#define __LINUX_ICMP_WRAPPER_H 1
-
-#include_next <linux/icmp.h>
-
-#ifndef HAVE_SKBUFF_HEADER_HELPERS
-static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
-{
- return (struct icmphdr *)skb_transport_header(skb);
-}
-#endif
-
-#endif
+++ /dev/null
-#ifndef __LINUX_ICMPV6_WRAPPER_H
-#define __LINUX_ICMPV6_WRAPPER_H 1
-
-#include_next <linux/icmpv6.h>
-
-#ifndef HAVE_ICMP6_HDR
-static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
-{
- return (struct icmp6hdr *)skb_transport_header(skb);
-}
-#endif
-
-#endif
+++ /dev/null
-#ifndef __LINUX_IF_WRAPPER_H
-#define __LINUX_IF_WRAPPER_H 1
-
-#include_next <linux/if.h>
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
-
-#define IFF_XMIT_DST_RELEASE 0
-
-#endif /* linux kernel < 2.6.31 */
-
-#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,36)
-#define IFF_OVS_DATAPATH IFF_BRIDGE_PORT
-#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
-#define IFF_OVS_DATAPATH 0 /* no-op flag */
-#endif
-
-#endif
+++ /dev/null
-#ifndef __LINUX_IF_ARP_WRAPPER_H
-#define __LINUX_IF_ARP_WRAPPER_H 1
-
-#include_next <linux/if_arp.h>
-
-#ifndef HAVE_SKBUFF_HEADER_HELPERS
-#include <linux/skbuff.h>
-
-static inline struct arphdr *arp_hdr(const struct sk_buff *skb)
-{
- return (struct arphdr *)skb_network_header(skb);
-}
-#endif /* !HAVE_SKBUFF_HEADER_HELPERS */
-
-#endif
+++ /dev/null
-#ifndef __LINUX_IF_ETHER_WRAPPER_H
-#define __LINUX_IF_ETHER_WRAPPER_H 1
-
-#include_next <linux/if_ether.h>
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-
-#define ETH_P_TEB 0x6558 /* Trans Ether Bridging */
-
-#endif /* linux kernel < 2.6.28 */
-
-#endif
+++ /dev/null
-#ifndef __LINUX_IF_VLAN_WRAPPER_H
-#define __LINUX_IF_VLAN_WRAPPER_H 1
-
-#include_next <linux/if_vlan.h>
-#include <linux/skbuff.h>
-
-/*
- * The behavior of __vlan_put_tag() has changed over time:
- *
- * - In 2.6.26 and earlier, it adjusted both MAC and network header
- * pointers. (The latter didn't make any sense.)
- *
- * - In 2.6.27 and 2.6.28, it did not adjust any header pointers at all.
- *
- * - In 2.6.29 and later, it adjusts the MAC header pointer only.
- *
- * This is the version from 2.6.33. We unconditionally substitute this version
- * to avoid the need to guess whether the version in the kernel tree is
- * acceptable.
- */
-#define __vlan_put_tag rpl_vlan_put_tag
-static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
-{
- struct vlan_ethhdr *veth;
-
- if (skb_cow_head(skb, VLAN_HLEN) < 0) {
- kfree_skb(skb);
- return NULL;
- }
- veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
-
- /* Move the mac addresses to the beginning of the new header. */
- memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN);
- skb->mac_header -= VLAN_HLEN;
-
- /* first, the ethernet type */
- veth->h_vlan_proto = htons(ETH_P_8021Q);
-
- /* now, the TCI */
- veth->h_vlan_TCI = htons(vlan_tci);
-
- skb->protocol = htons(ETH_P_8021Q);
-
- return skb;
-}
-
-
-/* All of these were introduced in a single commit preceding 2.6.33, so
- * presumably all of them or none of them are present. */
-#ifndef VLAN_PRIO_MASK
-#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
-#define VLAN_PRIO_SHIFT 13
-#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
-#define VLAN_TAG_PRESENT VLAN_CFI_MASK
-#endif
-
-#endif /* linux/if_vlan.h wrapper */
+++ /dev/null
-#ifndef __LINUX_IN_WRAPPER_H
-#define __LINUX_IN_WRAPPER_H 1
-
-#include_next <linux/in.h>
-
-#ifndef HAVE_IPV4_IS_MULTICAST
-
-static inline bool ipv4_is_loopback(__be32 addr)
-{
- return (addr & htonl(0xff000000)) == htonl(0x7f000000);
-}
-
-static inline bool ipv4_is_multicast(__be32 addr)
-{
- return (addr & htonl(0xf0000000)) == htonl(0xe0000000);
-}
-
-static inline bool ipv4_is_local_multicast(__be32 addr)
-{
- return (addr & htonl(0xffffff00)) == htonl(0xe0000000);
-}
-
-static inline bool ipv4_is_lbcast(__be32 addr)
-{
- /* limited broadcast */
- return addr == htonl(INADDR_BROADCAST);
-}
-
-static inline bool ipv4_is_zeronet(__be32 addr)
-{
- return (addr & htonl(0xff000000)) == htonl(0x00000000);
-}
-
-#endif /* !HAVE_IPV4_IS_MULTICAST */
-
-#endif
+++ /dev/null
-#ifndef __LINUX_INETDEVICE_WRAPPER_H
-#define __LINUX_INETDEVICE_WRAPPER_H 1
-
-#include_next <linux/inetdevice.h>
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
-
-#define inetdev_by_index(net, ifindex) \
- inetdev_by_index((ifindex))
-
-#endif /* linux kernel < 2.6.25 */
-
-#endif
+++ /dev/null
-#ifndef __LINUX_IP_WRAPPER_H
-#define __LINUX_IP_WRAPPER_H 1
-
-#include_next <linux/ip.h>
-
-#ifndef HAVE_SKBUFF_HEADER_HELPERS
-#include <linux/skbuff.h>
-static inline struct iphdr *ip_hdr(const struct sk_buff *skb)
-{
- return (struct iphdr *)skb_network_header(skb);
-}
-
-static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
-{
- return ip_hdr(skb)->ihl * 4;
-}
-#endif /* !HAVE_SKBUFF_HEADER_HELPERS */
-
-#endif
+++ /dev/null
-#ifndef __LINUX_IPV6_WRAPPER_H
-#define __LINUX_IPV6_WRAPPER_H 1
-
-#include_next <linux/ipv6.h>
-
-#ifndef HAVE_SKBUFF_HEADER_HELPERS
-static inline struct ipv6hdr *ipv6_hdr(const struct sk_buff *skb)
-{
- return (struct ipv6hdr *)skb_network_header(skb);
-}
-#endif
-
-#endif
+++ /dev/null
-#ifndef __LINUX_JIFFIES_WRAPPER_H
-#define __LINUX_JIFFIES_WRAPPER_H 1
-
-#include_next <linux/jiffies.h>
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-
-/* Same as above, but does so with platform independent 64bit types.
- * These must be used when utilizing jiffies_64 (i.e. return value of
- * get_jiffies_64() */
-#define time_after64(a,b) \
- (typecheck(__u64, a) && \
- typecheck(__u64, b) && \
- ((__s64)(b) - (__s64)(a) < 0))
-#define time_before64(a,b) time_after64(b,a)
-
-#define time_after_eq64(a,b) \
- (typecheck(__u64, a) && \
- typecheck(__u64, b) && \
- ((__s64)(a) - (__s64)(b) >= 0))
-#define time_before_eq64(a,b) time_after_eq64(b,a)
-
-#endif /* linux kernel < 2.6.19 */
-
-#endif
+++ /dev/null
-#ifndef __KERNEL_H_WRAPPER
-#define __KERNEL_H_WRAPPER 1
-
-#include_next <linux/kernel.h>
-#ifndef HAVE_LOG2_H
-#include <linux/log2.h>
-#endif
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-#undef pr_emerg
-#define pr_emerg(fmt, ...) \
- printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
-#undef pr_alert
-#define pr_alert(fmt, ...) \
- printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
-#undef pr_crit
-#define pr_crit(fmt, ...) \
- printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
-#undef pr_err
-#define pr_err(fmt, ...) \
- printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
-#undef pr_warning
-#define pr_warning(fmt, ...) \
- printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
-#undef pr_notice
-#define pr_notice(fmt, ...) \
- printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
-#undef pr_info
-#define pr_info(fmt, ...) \
- printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-#undef pr_cont
-#define pr_cont(fmt, ...) \
- printk(KERN_CONT fmt, ##__VA_ARGS__)
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
-#define pr_warn pr_warning
-#endif
-
-#ifndef BUILD_BUG_ON_NOT_POWER_OF_2
-/* Force a compilation error if a constant expression is not a power of 2 */
-#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
- BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
-#endif
-
-#if defined(CONFIG_PREEMPT) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
-#error "CONFIG_PREEMPT is broken before 2.6.21--see commit 4498121ca3, \"[NET]: Handle disabled preemption in gfp_any()\""
-#endif
-
-#ifndef USHRT_MAX
-#define USHRT_MAX ((u16)(~0U))
-#define SHRT_MAX ((s16)(USHRT_MAX>>1))
-#define SHRT_MIN ((s16)(-SHRT_MAX - 1))
-#endif
-
-#ifndef DIV_ROUND_UP
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-#endif
-
-#endif /* linux/kernel.h */
+++ /dev/null
-#ifndef __LINUX_KOBJECT_WRAPPER_H
-#define __LINUX_KOBJECT_WRAPPER_H 1
-
-#include_next <linux/kobject.h>
-
-#include <linux/version.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
-#define kobject_init(kobj, ktype) rpl_kobject_init(kobj, ktype)
-static inline void rpl_kobject_init(struct kobject *kobj, struct kobj_type *ktype)
-{
- kobj->ktype = ktype;
- (kobject_init)(kobj);
-}
-
-#define kobject_add(kobj, parent, name) rpl_kobject_add(kobj, parent, name)
-static inline int rpl_kobject_add(struct kobject *kobj,
- struct kobject *parent,
- const char *name)
-{
- int err = kobject_set_name(kobj, "%s", name);
- if (err)
- return err;
- kobj->parent = parent;
- return (kobject_add)(kobj);
-}
-#endif
-
-
-#endif /* linux/kobject.h wrapper */
+++ /dev/null
-/*
- * Runtime locking correctness validator
- *
- * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
- *
- * see Documentation/lockdep-design.txt for more details.
- */
-#ifndef __LINUX_LOCKDEP_WRAPPER_H
-#define __LINUX_LOCKDEP_WRAPPER_H
-
-#include_next <linux/lockdep.h>
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
-
-struct task_struct;
-struct lockdep_map;
-
-#ifdef CONFIG_LOCKDEP
-
-#include <linux/linkage.h>
-#include <linux/list.h>
-#include <linux/debug_locks.h>
-#include <linux/stacktrace.h>
-
-/*
- * Lock-class usage-state bits:
- */
-enum lock_usage_bit
-{
- LOCK_USED = 0,
- LOCK_USED_IN_HARDIRQ,
- LOCK_USED_IN_SOFTIRQ,
- LOCK_ENABLED_SOFTIRQS,
- LOCK_ENABLED_HARDIRQS,
- LOCK_USED_IN_HARDIRQ_READ,
- LOCK_USED_IN_SOFTIRQ_READ,
- LOCK_ENABLED_SOFTIRQS_READ,
- LOCK_ENABLED_HARDIRQS_READ,
- LOCK_USAGE_STATES
-};
-
-/*
- * Usage-state bitmasks:
- */
-#define LOCKF_USED (1 << LOCK_USED)
-#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
-#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
-#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
-#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
-
-#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
-#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
-
-#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
-#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
-#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
-#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
-
-#define LOCKF_ENABLED_IRQS_READ \
- (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
-#define LOCKF_USED_IN_IRQ_READ \
- (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
-
-#define MAX_LOCKDEP_SUBCLASSES 8UL
-
-/*
- * Lock-classes are keyed via unique addresses, by embedding the
- * lockclass-key into the kernel (or module) .data section. (For
- * static locks we use the lock address itself as the key.)
- */
-struct lockdep_subclass_key {
- char __one_byte;
-} __attribute__ ((__packed__));
-
-struct lock_class_key {
- struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
-};
-
-/*
- * The lock-class itself:
- */
-struct lock_class {
- /*
- * class-hash:
- */
- struct list_head hash_entry;
-
- /*
- * global list of all lock-classes:
- */
- struct list_head lock_entry;
-
- struct lockdep_subclass_key *key;
- unsigned int subclass;
-
- /*
- * IRQ/softirq usage tracking bits:
- */
- unsigned long usage_mask;
- struct stack_trace usage_traces[LOCK_USAGE_STATES];
-
- /*
- * These fields represent a directed graph of lock dependencies,
- * to every node we attach a list of "forward" and a list of
- * "backward" graph nodes.
- */
- struct list_head locks_after, locks_before;
-
- /*
- * Generation counter, when doing certain classes of graph walking,
- * to ensure that we check one node only once:
- */
- unsigned int version;
-
- /*
- * Statistics counter:
- */
- unsigned long ops;
-
- const char *name;
- int name_version;
-
-#ifdef CONFIG_LOCK_STAT
- unsigned long contention_point[4];
-#endif
-};
-
-#ifdef CONFIG_LOCK_STAT
-struct lock_time {
- s64 min;
- s64 max;
- s64 total;
- unsigned long nr;
-};
-
-enum bounce_type {
- bounce_acquired_write,
- bounce_acquired_read,
- bounce_contended_write,
- bounce_contended_read,
- nr_bounce_types,
-
- bounce_acquired = bounce_acquired_write,
- bounce_contended = bounce_contended_write,
-};
-
-struct lock_class_stats {
- unsigned long contention_point[4];
- struct lock_time read_waittime;
- struct lock_time write_waittime;
- struct lock_time read_holdtime;
- struct lock_time write_holdtime;
- unsigned long bounces[nr_bounce_types];
-};
-
-struct lock_class_stats lock_stats(struct lock_class *class);
-void clear_lock_stats(struct lock_class *class);
-#endif
-
-/*
- * Map the lock object (the lock instance) to the lock-class object.
- * This is embedded into specific lock instances:
- */
-struct lockdep_map {
- struct lock_class_key *key;
- struct lock_class *class_cache;
- const char *name;
-#ifdef CONFIG_LOCK_STAT
- int cpu;
-#endif
-};
-
-/*
- * Every lock has a list of other locks that were taken after it.
- * We only grow the list, never remove from it:
- */
-struct lock_list {
- struct list_head entry;
- struct lock_class *class;
- struct stack_trace trace;
- int distance;
-};
-
-/*
- * We record lock dependency chains, so that we can cache them:
- */
-struct lock_chain {
- struct list_head entry;
- u64 chain_key;
-};
-
-struct held_lock {
- /*
- * One-way hash of the dependency chain up to this point. We
- * hash the hashes step by step as the dependency chain grows.
- *
- * We use it for dependency-caching and we skip detection
- * passes and dependency-updates if there is a cache-hit, so
- * it is absolutely critical for 100% coverage of the validator
- * to have a unique key value for every unique dependency path
- * that can occur in the system, to make a unique hash value
- * as likely as possible - hence the 64-bit width.
- *
- * The task struct holds the current hash value (initialized
- * with zero), here we store the previous hash value:
- */
- u64 prev_chain_key;
- struct lock_class *class;
- unsigned long acquire_ip;
- struct lockdep_map *instance;
-
-#ifdef CONFIG_LOCK_STAT
- u64 waittime_stamp;
- u64 holdtime_stamp;
-#endif
- /*
- * The lock-stack is unified in that the lock chains of interrupt
- * contexts nest ontop of process context chains, but we 'separate'
- * the hashes by starting with 0 if we cross into an interrupt
- * context, and we also keep do not add cross-context lock
- * dependencies - the lock usage graph walking covers that area
- * anyway, and we'd just unnecessarily increase the number of
- * dependencies otherwise. [Note: hardirq and softirq contexts
- * are separated from each other too.]
- *
- * The following field is used to detect when we cross into an
- * interrupt context:
- */
- int irq_context;
- int trylock;
- int read;
- int check;
- int hardirqs_off;
-};
-
-/*
- * Initialization, self-test and debugging-output methods:
- */
-extern void lockdep_init(void);
-extern void lockdep_info(void);
-extern void lockdep_reset(void);
-extern void lockdep_reset_lock(struct lockdep_map *lock);
-extern void lockdep_free_key_range(void *start, unsigned long size);
-
-extern void lockdep_off(void);
-extern void lockdep_on(void);
-
-/*
- * These methods are used by specific locking variants (spinlocks,
- * rwlocks, mutexes and rwsems) to pass init/acquire/release events
- * to lockdep:
- */
-
-extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass);
-
-/*
- * Reinitialize a lock key - for cases where there is special locking or
- * special initialization of locks so that the validator gets the scope
- * of dependencies wrong: they are either too broad (they need a class-split)
- * or they are too narrow (they suffer from a false class-split):
- */
-#define lockdep_set_class(lock, key) \
- lockdep_init_map(&(lock)->dep_map, #key, key, 0)
-#define lockdep_set_class_and_name(lock, key, name) \
- lockdep_init_map(&(lock)->dep_map, name, key, 0)
-#define lockdep_set_class_and_subclass(lock, key, sub) \
- lockdep_init_map(&(lock)->dep_map, #key, key, sub)
-#define lockdep_set_subclass(lock, sub) \
- lockdep_init_map(&(lock)->dep_map, #lock, \
- (lock)->dep_map.key, sub)
-
-/*
- * Acquire a lock.
- *
- * Values for "read":
- *
- * 0: exclusive (write) acquire
- * 1: read-acquire (no recursion allowed)
- * 2: read-acquire with same-instance recursion allowed
- *
- * Values for check:
- *
- * 0: disabled
- * 1: simple checks (freeing, held-at-exit-time, etc.)
- * 2: full validation
- */
-extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
- int trylock, int read, int check, unsigned long ip);
-
-extern void lock_release(struct lockdep_map *lock, int nested,
- unsigned long ip);
-
-# define INIT_LOCKDEP .lockdep_recursion = 0,
-
-#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
-
-#else /* !LOCKDEP */
-
-static inline void lockdep_off(void)
-{
-}
-
-static inline void lockdep_on(void)
-{
-}
-
-# define lock_acquire(l, s, t, r, c, i) do { } while (0)
-# define lock_release(l, n, i) do { } while (0)
-# define lockdep_init() do { } while (0)
-# define lockdep_info() do { } while (0)
-# define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
-# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
-# define lockdep_set_class_and_name(lock, key, name) \
- do { (void)(key); } while (0)
-#define lockdep_set_class_and_subclass(lock, key, sub) \
- do { (void)(key); } while (0)
-#define lockdep_set_subclass(lock, sub) do { } while (0)
-
-# define INIT_LOCKDEP
-# define lockdep_reset() do { debug_locks = 1; } while (0)
-# define lockdep_free_key_range(start, size) do { } while (0)
-/*
- * The class key takes no space if lockdep is disabled:
- */
-struct lock_class_key { };
-
-#define lockdep_depth(tsk) (0)
-
-#endif /* !LOCKDEP */
-
-#ifdef CONFIG_LOCK_STAT
-
-extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
-extern void lock_acquired(struct lockdep_map *lock);
-
-#define LOCK_CONTENDED(_lock, try, lock) \
-do { \
- if (!try(_lock)) { \
- lock_contended(&(_lock)->dep_map, _RET_IP_); \
- lock(_lock); \
- } \
- lock_acquired(&(_lock)->dep_map); \
-} while (0)
-
-#else /* CONFIG_LOCK_STAT */
-
-#define lock_contended(lockdep_map, ip) do {} while (0)
-#define lock_acquired(lockdep_map) do {} while (0)
-
-#define LOCK_CONTENDED(_lock, try, lock) \
- lock(_lock)
-
-#endif /* CONFIG_LOCK_STAT */
-
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
-extern void early_init_irq_lock_class(void);
-#else
-static inline void early_init_irq_lock_class(void)
-{
-}
-#endif
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-extern void early_boot_irqs_off(void);
-extern void early_boot_irqs_on(void);
-extern void print_irqtrace_events(struct task_struct *curr);
-#else
-static inline void early_boot_irqs_off(void)
-{
-}
-static inline void early_boot_irqs_on(void)
-{
-}
-static inline void print_irqtrace_events(struct task_struct *curr)
-{
-}
-#endif
-
-/*
- * For trivial one-depth nesting of a lock-class, the following
- * global define can be used. (Subsystems with multiple levels
- * of nesting should define their own lock-nesting subclasses.)
- */
-#define SINGLE_DEPTH_NESTING 1
-
-/*
- * Map the dependency ops to NOP or to real lockdep ops, depending
- * on the per lock-class debug mode:
- */
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# ifdef CONFIG_PROVE_LOCKING
-# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
-# else
-# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
-# endif
-# define spin_release(l, n, i) lock_release(l, n, i)
-#else
-# define spin_acquire(l, s, t, i) do { } while (0)
-# define spin_release(l, n, i) do { } while (0)
-#endif
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# ifdef CONFIG_PROVE_LOCKING
-# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
-# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
-# else
-# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
-# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
-# endif
-# define rwlock_release(l, n, i) lock_release(l, n, i)
-#else
-# define rwlock_acquire(l, s, t, i) do { } while (0)
-# define rwlock_acquire_read(l, s, t, i) do { } while (0)
-# define rwlock_release(l, n, i) do { } while (0)
-#endif
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# ifdef CONFIG_PROVE_LOCKING
-# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
-# else
-# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
-# endif
-# define mutex_release(l, n, i) lock_release(l, n, i)
-#else
-# define mutex_acquire(l, s, t, i) do { } while (0)
-# define mutex_release(l, n, i) do { } while (0)
-#endif
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# ifdef CONFIG_PROVE_LOCKING
-# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
-# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
-# else
-# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
-# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
-# endif
-# define rwsem_release(l, n, i) lock_release(l, n, i)
-#else
-# define rwsem_acquire(l, s, t, i) do { } while (0)
-# define rwsem_acquire_read(l, s, t, i) do { } while (0)
-# define rwsem_release(l, n, i) do { } while (0)
-#endif
-
-#endif /* linux kernel < 2.6.18 */
-
-#endif /* __LINUX_LOCKDEP_WRAPPER_H */
+++ /dev/null
-#ifndef __LINUX_LOG2_WRAPPER
-#define __LINUX_LOG2_WRAPPER
-
-#ifdef HAVE_LOG2_H
-#include_next <linux/log2.h>
-#else
-/* This is very stripped down because log2.h has far too many dependencies. */
-
-extern __attribute__((const, noreturn))
-int ____ilog2_NaN(void);
-
-#define ilog2(n) ((n) == 4 ? 2 : \
- (n) == 8 ? 3 : \
- ____ilog2_NaN())
-#endif
-
-#endif
+++ /dev/null
-#ifndef __LINUX_MUTEX_WRAPPER_H
-#define __LINUX_MUTEX_WRAPPER_H
-
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
-
-#include <asm/semaphore.h>
-
-struct mutex {
- struct semaphore sema;
-};
-
-#define mutex_init(mutex) init_MUTEX(&(mutex)->sema)
-#define mutex_destroy(mutex) do { } while (0)
-
-#define __MUTEX_INITIALIZER(name) \
- __SEMAPHORE_INITIALIZER(name,1)
-
-#define DEFINE_MUTEX(mutexname) \
- struct mutex mutexname = { __MUTEX_INITIALIZER(mutexname.sema) }
-
-/*
- * See kernel/mutex.c for detailed documentation of these APIs.
- * Also see Documentation/mutex-design.txt.
- */
-static inline void mutex_lock(struct mutex *lock)
-{
- down(&lock->sema);
-}
-
-static inline int mutex_lock_interruptible(struct mutex *lock)
-{
- return down_interruptible(&lock->sema);
-}
-
-#define mutex_lock_nested(lock, subclass) mutex_lock(lock)
-#define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
-
-/*
- * NOTE: mutex_trylock() follows the spin_trylock() convention,
- * not the down_trylock() convention!
- */
-static inline int mutex_trylock(struct mutex *lock)
-{
- return !down_trylock(&lock->sema);
-}
-
-static inline void mutex_unlock(struct mutex *lock)
-{
- up(&lock->sema);
-}
-#else
-
-#include_next <linux/mutex.h>
-
-#endif /* linux version < 2.6.16 */
-
-#endif
+++ /dev/null
-#ifndef __LINUX_NETDEVICE_WRAPPER_H
-#define __LINUX_NETDEVICE_WRAPPER_H 1
-
-#include_next <linux/netdevice.h>
-
-struct net;
-
-#include <linux/version.h>
-/* Before 2.6.21, struct net_device has a "struct class_device" member named
- * class_dev. Beginning with 2.6.21, struct net_device instead has a "struct
- * device" member named dev. Otherwise the usage of these members is pretty
- * much the same. */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
-#define NETDEV_DEV_MEMBER class_dev
-#else
-#define NETDEV_DEV_MEMBER dev
-#endif
-
-#ifndef to_net_dev
-#define to_net_dev(class) container_of(class, struct net_device, NETDEV_DEV_MEMBER)
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
-static inline
-struct net *dev_net(const struct net_device *dev)
-{
-#ifdef CONFIG_NET_NS
- return dev->nd_net;
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
- return &init_net;
-#else
- return NULL;
-#endif
-}
-
-static inline
-void dev_net_set(struct net_device *dev, const struct net *net)
-{
-#ifdef CONFIG_NET_NS
- dev->nd_dev = net;
-#endif
-}
-#endif /* linux kernel < 2.6.26 */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
-#define NETIF_F_NETNS_LOCAL 0
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
-#define proc_net init_net.proc_net
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
-typedef int netdev_tx_t;
-#endif
-
-#ifndef for_each_netdev
-/* Linux before 2.6.22 didn't have for_each_netdev at all. */
-#define for_each_netdev(net, d) for (d = dev_base; d; d = d->next)
-#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
-/* Linux 2.6.24 added a network namespace pointer to the macro. */
-#undef for_each_netdev
-#define for_each_netdev(net,d) list_for_each_entry(d, &dev_base_head, dev_list)
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
-extern void unregister_netdevice_queue(struct net_device *dev,
- struct list_head *head);
-extern void unregister_netdevice_many(struct list_head *head);
-#endif
-
-#ifndef HAVE_DEV_DISABLE_LRO
-extern void dev_disable_lro(struct net_device *dev);
-#endif
-
-/* Linux 2.6.28 introduced dev_get_stats():
- * const struct net_device_stats *dev_get_stats(struct net_device *dev);
- *
- * Linux 2.6.36 changed dev_get_stats() to:
- * struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
- * struct rtnl_link_stats64 *storage);
- */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
-#define dev_get_stats(dev, storage) rpl_dev_get_stats(dev, storage)
-struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
- struct rtnl_link_stats64 *storage);
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-#define skb_checksum_help(skb) skb_checksum_help((skb), 0)
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
-static inline int netdev_rx_handler_register(struct net_device *dev,
- void *rx_handler,
- void *rx_handler_data)
-{
- if (dev->br_port)
- return -EBUSY;
- rcu_assign_pointer(dev->br_port, rx_handler_data);
- return 0;
-}
-static inline void netdev_rx_handler_unregister(struct net_device *dev)
-{
- rcu_assign_pointer(dev->br_port, NULL);
-}
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-#undef SET_ETHTOOL_OPS
-#define SET_ETHTOOL_OPS(netdev, ops) \
- ( (netdev)->ethtool_ops = (struct ethtool_ops *)(ops) )
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
-#define dev_get_by_name(net, name) dev_get_by_name(name)
-#define dev_get_by_index(net, ifindex) dev_get_by_index(ifindex)
-#define __dev_get_by_name(net, name) __dev_get_by_name(name)
-#define __dev_get_by_index(net, ifindex) __dev_get_by_index(ifindex)
-#define dev_get_by_index_rcu(net, ifindex) dev_get_by_index_rcu(ifindex)
-#endif
-
-#ifndef HAVE_DEV_GET_BY_INDEX_RCU
-static inline struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
-{
- struct net_device *dev;
-
- read_lock(&dev_base_lock);
- dev = __dev_get_by_index(net, ifindex);
- read_unlock(&dev_base_lock);
-
- return dev;
-}
-#endif
-
-#ifndef NETIF_F_FSO
-#define NETIF_F_FSO 0
-#endif
-
-#endif
+++ /dev/null
-#ifndef __LINUX_NETFILTER_BRIDGE_WRAPPER_H
-#define __LINUX_NETFILTER_BRIDGE_WRAPPER_H
-
-#include_next <linux/netfilter_bridge.h>
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
-
-#include <linux/if_vlan.h>
-#include <linux/if_pppox.h>
-
-static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
-{
- switch (skb->protocol) {
- case __constant_htons(ETH_P_8021Q):
- return VLAN_HLEN;
- default:
- return 0;
- }
-}
-
-#endif /* linux version < 2.6.22 */
-
-#endif
+++ /dev/null
-#ifndef __LINUX_NETFILTER_IPV4_WRAPPER_H
-#define __LINUX_NETFILTER_IPV4_WRAPPER_H 1
-
-#include_next <linux/netfilter_ipv4.h>
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
-
-#ifdef __KERNEL__
-
-#define NF_INET_PRE_ROUTING NF_IP_PRE_ROUTING
-#define NF_INET_POST_ROUTING NF_IP_POST_ROUTING
-#define NF_INET_FORWARD NF_IP_FORWARD
-
-#endif /* __KERNEL__ */
-
-#endif /* linux kernel < 2.6.25 */
-
-#endif
+++ /dev/null
-#ifndef __LINUX_NETLINK_WRAPPER_H
-#define __LINUX_NETLINK_WRAPPER_H 1
-
-#include <linux/skbuff.h>
-#include_next <linux/netlink.h>
-
-#ifndef NLA_TYPE_MASK
-#define NLA_F_NESTED (1 << 15)
-#define NLA_F_NET_BYTEORDER (1 << 14)
-#define NLA_TYPE_MASK ~(NLA_F_NESTED | NLA_F_NET_BYTEORDER)
-#endif
-
-#include <net/netlink.h>
-#include <linux/version.h>
-
-#ifndef NLMSG_DEFAULT_SIZE
-#define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN)
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-#define nlmsg_new(s, f) nlmsg_new_proper((s), (f))
-static inline struct sk_buff *nlmsg_new_proper(int size, gfp_t flags)
-{
- return alloc_skb(size, flags);
-}
-#endif /* linux kernel < 2.6.19 */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
-static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
-{
- return (struct nlmsghdr *)skb->data;
-}
-#endif
-
-#endif
+++ /dev/null
-#ifndef __LINUX_RCULIST_WRAPPER_H
-#define __LINUX_RCULIST_WRAPPER_H
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
-#include_next <linux/rculist.h>
-#else
-/* Prior to 2.6.26, the contents of rculist.h were part of list.h. */
-#include <linux/list.h>
-#endif
-
-#endif
+++ /dev/null
-#ifndef __RCUPDATE_WRAPPER_H
-#define __RCUPDATE_WRAPPER_H 1
-
-#include_next <linux/rcupdate.h>
-
-#ifndef rcu_dereference_check
-#define rcu_dereference_check(p, c) rcu_dereference(p)
-#endif
-
-#ifndef rcu_dereference_protected
-#define rcu_dereference_protected(p, c) (p)
-#endif
-
-#ifndef HAVE_RCU_READ_LOCK_HELD
-static inline int rcu_read_lock_held(void)
-{
- return 1;
-}
-#endif
-
-#endif /* linux/rcupdate.h wrapper */
+++ /dev/null
-#ifndef __RTNETLINK_WRAPPER_H
-#define __RTNETLINK_WRAPPER_H 1
-
-#include_next <linux/rtnetlink.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-static inline void rtnl_notify(struct sk_buff *skb, u32 pid, u32 group,
- struct nlmsghdr *nlh, gfp_t flags)
-{
- BUG_ON(nlh != NULL); /* not implemented */
- if (group) {
- /* errors reported via destination sk->sk_err */
- nlmsg_multicast(rtnl, skb, 0, group, flags);
- }
-}
-
-static inline void rtnl_set_sk_err(u32 group, int error)
-{
- netlink_set_err(rtnl, 0, group, error);
-}
-#endif
-
-/* No 'net' parameter in these versions. */
-#define rtnl_notify(skb, net, pid, group, nlh, flags) \
- ((void) rtnl_notify(skb, pid, group, nlh, flags))
-#define rtnl_set_sk_err(net, group, error) \
- (rtnl_set_sk_err(group, error))
-#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
-/* Make the return type effectively 'void' to match Linux 2.6.30+. */
-#define rtnl_notify(skb, net, pid, group, nlh, flags) \
- ((void) rtnl_notify(skb, net, pid, group, nlh, flags))
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
-static inline int rtnl_is_locked(void)
-{
- if (unlikely(rtnl_trylock())) {
- rtnl_unlock();
- return 0;
- }
-
- return 1;
-}
-
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
-#ifdef CONFIG_PROVE_LOCKING
-static inline int lockdep_rtnl_is_held(void)
-{
- return 1;
-}
-#endif
-#endif
-
-#ifndef rcu_dereference_rtnl
-/**
- * rcu_dereference_rtnl - rcu_dereference with debug checking
- * @p: The pointer to read, prior to dereferencing
- *
- * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
- * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference()
- */
-#define rcu_dereference_rtnl(p) \
- rcu_dereference_check(p, rcu_read_lock_held() || \
- lockdep_rtnl_is_held())
-#endif
-
-#ifndef rtnl_dereference
-/**
- * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
- * @p: The pointer to read, prior to dereferencing
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
- * caller holds RTNL.
- */
-#define rtnl_dereference(p) \
- rcu_dereference_protected(p, lockdep_rtnl_is_held())
-#endif
-
-#endif /* linux/rtnetlink.h wrapper */
+++ /dev/null
-#ifndef __LINUX_SKBUFF_WRAPPER_H
-#define __LINUX_SKBUFF_WRAPPER_H 1
-
-#include_next <linux/skbuff.h>
-
-#include <linux/version.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
-/* In version 2.6.24 the return type of skb_headroom() changed from 'int' to
- * 'unsigned int'. We use skb_headroom() as one arm of a min(a,b) invocation
- * in make_writable() in actions.c, so we need the correct type. */
-#define skb_headroom rpl_skb_headroom
-static inline unsigned int rpl_skb_headroom(const struct sk_buff *skb)
-{
- return skb->data - skb->head;
-}
-#endif
-
-#ifndef HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET
-static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
- const int offset, void *to,
- const unsigned int len)
-{
- memcpy(to, skb->data + offset, len);
-}
-
-static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
- const int offset,
- const void *from,
- const unsigned int len)
-{
- memcpy(skb->data + offset, from, len);
-}
-
-#endif /* !HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET */
-
-/*
- * The networking layer reserves some headroom in skb data (via
- * dev_alloc_skb). This is used to avoid having to reallocate skb data when
- * the header has to grow. In the default case, if the header has to grow
- * 16 bytes or less we avoid the reallocation.
- *
- * Unfortunately this headroom changes the DMA alignment of the resulting
- * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
- * on some architectures. An architecture can override this value,
- * perhaps setting it to a cacheline in size (since that will maintain
- * cacheline alignment of the DMA). It must be a power of 2.
- *
- * Various parts of the networking layer expect at least 16 bytes of
- * headroom, you should not reduce this.
- */
-#ifndef NET_SKB_PAD
-#define NET_SKB_PAD 16
-#endif
-
-#ifndef HAVE_SKB_COW_HEAD
-static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
- int cloned)
-{
- int delta = 0;
-
- if (headroom < NET_SKB_PAD)
- headroom = NET_SKB_PAD;
- if (headroom > skb_headroom(skb))
- delta = headroom - skb_headroom(skb);
-
- if (delta || cloned)
- return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
- GFP_ATOMIC);
- return 0;
-}
-
-static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
-{
- return __skb_cow(skb, headroom, skb_header_cloned(skb));
-}
-#endif /* !HAVE_SKB_COW_HEAD */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
-static inline int skb_clone_writable(struct sk_buff *skb, int len)
-{
- return false;
-}
-#endif
-
-#ifndef HAVE_SKB_DST_ACCESSOR_FUNCS
-static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
-{
- return (struct dst_entry *)skb->dst;
-}
-
-static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
-{
- skb->dst = dst;
-}
-
-static inline struct rtable *skb_rtable(const struct sk_buff *skb)
-{
- return (struct rtable *)skb->dst;
-}
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-/* Emulate Linux 2.6.17 and later behavior, in which kfree_skb silently ignores
- * null pointer arguments. */
-#define kfree_skb(skb) kfree_skb_maybe_null(skb)
-static inline void kfree_skb_maybe_null(struct sk_buff *skb)
-{
- if (likely(skb != NULL))
- (kfree_skb)(skb);
-}
-#endif
-
-
-#ifndef CHECKSUM_PARTIAL
-#define CHECKSUM_PARTIAL CHECKSUM_HW
-#endif
-#ifndef CHECKSUM_COMPLETE
-#define CHECKSUM_COMPLETE CHECKSUM_HW
-#endif
-
-#ifdef HAVE_MAC_RAW
-#define mac_header mac.raw
-#define network_header nh.raw
-#define transport_header h.raw
-#endif
-
-#ifndef HAVE_SKBUFF_HEADER_HELPERS
-static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
-{
- return skb->h.raw;
-}
-
-static inline void skb_reset_transport_header(struct sk_buff *skb)
-{
- skb->h.raw = skb->data;
-}
-
-static inline void skb_set_transport_header(struct sk_buff *skb,
- const int offset)
-{
- skb->h.raw = skb->data + offset;
-}
-
-static inline unsigned char *skb_network_header(const struct sk_buff *skb)
-{
- return skb->nh.raw;
-}
-
-static inline void skb_reset_network_header(struct sk_buff *skb)
-{
- skb->nh.raw = skb->data;
-}
-
-static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
-{
- skb->nh.raw = skb->data + offset;
-}
-
-static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
-{
- return skb->mac.raw;
-}
-
-static inline void skb_reset_mac_header(struct sk_buff *skb)
-{
- skb->mac_header = skb->data;
-}
-
-static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
-{
- skb->mac.raw = skb->data + offset;
-}
-
-static inline int skb_transport_offset(const struct sk_buff *skb)
-{
- return skb_transport_header(skb) - skb->data;
-}
-
-static inline int skb_network_offset(const struct sk_buff *skb)
-{
- return skb_network_header(skb) - skb->data;
-}
-
-static inline void skb_copy_to_linear_data(struct sk_buff *skb,
- const void *from,
- const unsigned int len)
-{
- memcpy(skb->data, from, len);
-}
-#endif /* !HAVE_SKBUFF_HEADER_HELPERS */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
-#warning "TSO/UFO not supported on kernels earlier than 2.6.18"
-
-static inline int skb_is_gso(const struct sk_buff *skb)
-{
- return 0;
-}
-
-static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb,
- int features)
-{
- return NULL;
-}
-#endif /* before 2.6.18 */
-
-#ifndef HAVE_SKB_WARN_LRO
-#ifndef NETIF_F_LRO
-static inline bool skb_warn_if_lro(const struct sk_buff *skb)
-{
- return false;
-}
-#else
-extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
-
-static inline bool skb_warn_if_lro(const struct sk_buff *skb)
-{
- /* LRO sets gso_size but not gso_type, whereas if GSO is really
- * wanted then gso_type will be set. */
- struct skb_shared_info *shinfo = skb_shinfo(skb);
- if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
- __skb_warn_lro_forwarding(skb);
- return true;
- }
- return false;
-}
-#endif /* NETIF_F_LRO */
-#endif /* HAVE_SKB_WARN_LRO */
-
-#ifndef HAVE_CONSUME_SKB
-#define consume_skb kfree_skb
-#endif
-
-#endif
+++ /dev/null
-#ifndef __LINUX_SLAB_WRAPPER_H
-#define __LINUX_SLAB_WRAPPER_H 1
-
-#include_next <linux/slab.h>
-
-#ifndef HAVE_KMEMDUP
-extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
-#define kmem_cache_create(n, s, a, f, c) kmem_cache_create(n, s, a, f, c, NULL)
-#endif
-
-#endif
+++ /dev/null
-#ifndef __LINUX_STDDEF_WRAPPER_H
-#define __LINUX_STDDEF_WRAPPER_H 1
-
-#include_next <linux/stddef.h>
-
-#ifdef __KERNEL__
-
-#ifndef HAVE_BOOL_TYPE
-enum {
- false = 0,
- true = 1
-};
-#endif /* !HAVE_BOOL_TYPE */
-
-#endif /* __KERNEL__ */
-
-#endif
+++ /dev/null
-#ifndef __LINUX_TCP_WRAPPER_H
-#define __LINUX_TCP_WRAPPER_H 1
-
-#include_next <linux/tcp.h>
-
-#ifndef HAVE_SKBUFF_HEADER_HELPERS
-static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
-{
- return (struct tcphdr *)skb_transport_header(skb);
-}
-
-static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
-{
- return tcp_hdr(skb)->doff * 4;
-}
-#endif /* !HAVE_SKBUFF_HEADER_HELPERS */
-
-#endif
+++ /dev/null
-#ifndef __LINUX_TIMER_WRAPPER_H
-#define __LINUX_TIMER_WRAPPER_H 1
-
-#include_next <linux/timer.h>
-
-#include <linux/version.h>
-
-#ifndef RHEL_RELEASE_VERSION
-#define RHEL_RELEASE_VERSION(X,Y) ( 0 )
-#endif
-#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) && \
- (!defined(RHEL_RELEASE_CODE) || \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,1))))
-
-extern unsigned long volatile jiffies;
-
-/**
- * __round_jiffies - function to round jiffies to a full second
- * @j: the time in (absolute) jiffies that should be rounded
- * @cpu: the processor number on which the timeout will happen
- *
- * __round_jiffies() rounds an absolute time in the future (in jiffies)
- * up or down to (approximately) full seconds. This is useful for timers
- * for which the exact time they fire does not matter too much, as long as
- * they fire approximately every X seconds.
- *
- * By rounding these timers to whole seconds, all such timers will fire
- * at the same time, rather than at various times spread out. The goal
- * of this is to have the CPU wake up less, which saves power.
- *
- * The exact rounding is skewed for each processor to avoid all
- * processors firing at the exact same time, which could lead
- * to lock contention or spurious cache line bouncing.
- *
- * The return value is the rounded version of the @j parameter.
- */
-static inline unsigned long __round_jiffies(unsigned long j, int cpu)
-{
- int rem;
- unsigned long original = j;
-
- /*
- * We don't want all cpus firing their timers at once hitting the
- * same lock or cachelines, so we skew each extra cpu with an extra
- * 3 jiffies. This 3 jiffies came originally from the mm/ code which
- * already did this.
- * The skew is done by adding 3*cpunr, then round, then subtract this
- * extra offset again.
- */
- j += cpu * 3;
-
- rem = j % HZ;
-
- /*
- * If the target jiffie is just after a whole second (which can happen
- * due to delays of the timer irq, long irq off times etc etc) then
- * we should round down to the whole second, not up. Use 1/4th second
- * as cutoff for this rounding as an extreme upper bound for this.
- */
- if (rem < HZ/4) /* round down */
- j = j - rem;
- else /* round up */
- j = j - rem + HZ;
-
- /* now that we have rounded, subtract the extra skew again */
- j -= cpu * 3;
-
- if (j <= jiffies) /* rounding ate our timeout entirely; */
- return original;
- return j;
-}
-
-
-/**
- * round_jiffies - function to round jiffies to a full second
- * @j: the time in (absolute) jiffies that should be rounded
- *
- * round_jiffies() rounds an absolute time in the future (in jiffies)
- * up or down to (approximately) full seconds. This is useful for timers
- * for which the exact time they fire does not matter too much, as long as
- * they fire approximately every X seconds.
- *
- * By rounding these timers to whole seconds, all such timers will fire
- * at the same time, rather than at various times spread out. The goal
- * of this is to have the CPU wake up less, which saves power.
- *
- * The return value is the rounded version of the @j parameter.
- */
-static inline unsigned long round_jiffies(unsigned long j)
-{
- return __round_jiffies(j, 0); // FIXME
-}
-
-#endif /* linux kernel < 2.6.20 */
-
-#endif
+++ /dev/null
-#ifndef __LINUX_TYPES_WRAPPER_H
-#define __LINUX_TYPES_WRAPPER_H 1
-
-#include_next <linux/types.h>
-
-#ifndef HAVE_CSUM_TYPES
-typedef __u16 __bitwise __sum16;
-typedef __u32 __bitwise __wsum;
-#endif
-
-#ifndef HAVE_BOOL_TYPE
-typedef _Bool bool;
-#endif /* !HAVE_BOOL_TYPE */
-
-#endif
+++ /dev/null
-#ifndef __LINUX_UDP_WRAPPER_H
-#define __LINUX_UDP_WRAPPER_H 1
-
-#include_next <linux/udp.h>
-
-#ifndef HAVE_SKBUFF_HEADER_HELPERS
-static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
-{
- return (struct udphdr *)skb_transport_header(skb);
-}
-#endif /* HAVE_SKBUFF_HEADER_HELPERS */
-
-#endif
+++ /dev/null
-#ifndef __LINUX_WORKQUEUE_WRAPPER_H
-#define __LINUX_WORKQUEUE_WRAPPER_H 1
-
-#include_next <linux/workqueue.h>
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
-
-/* Older kernels have an implementation of work queues with some very bad
- * characteristics when trying to cancel work (potential deadlocks, use after
- * free, etc. Here we directly use timers instead for delayed work. It's not
- * optimal but it is better than the alternative. Note that work queues
- * normally run in process context but this will cause them to operate in
- * softirq context.
- */
-
-#include <linux/timer.h>
-
-#undef DECLARE_DELAYED_WORK
-#define DECLARE_DELAYED_WORK(n, f) \
- struct timer_list n = TIMER_INITIALIZER((void (*)(unsigned long))f, 0, 0)
-
-#define schedule_delayed_work rpl_schedule_delayed_work
-static inline int schedule_delayed_work(struct timer_list *timer, unsigned long delay)
-{
- if (timer_pending(timer))
- return 0;
-
- mod_timer(timer, jiffies + delay);
- return 1;
-}
-
-#define cancel_delayed_work_sync rpl_cancel_delayed_work_sync
-static inline int cancel_delayed_work_sync(struct timer_list *timer)
-{
- return del_timer_sync(timer);
-}
-
-#endif /* kernel version < 2.6.23 */
-
-#endif
+++ /dev/null
-#ifndef __NET_CHECKSUM_WRAPPER_H
-#define __NET_CHECKSUM_WRAPPER_H 1
-
-#include_next <net/checksum.h>
-
-#ifndef HAVE_CSUM_UNFOLD
-static inline __wsum csum_unfold(__sum16 n)
-{
- return (__force __wsum)n;
-}
-#endif /* !HAVE_CSUM_UNFOLD */
-
-/* Workaround for debugging included in certain versions of XenServer. It only
- * applies to 32-bit x86.
- */
-#if defined(HAVE_CSUM_COPY_DBG) && defined(CONFIG_X86_32)
-#define csum_and_copy_to_user(src, dst, len, sum, err_ptr) \
- csum_and_copy_to_user(src, dst, len, sum, NULL, err_ptr)
-#endif
-
-#ifndef HAVE_CSUM_REPLACE4
-static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
-{
- __be32 diff[] = { ~from, to };
-
- *sum = csum_fold(csum_partial((char *)diff, sizeof(diff), ~csum_unfold(*sum)));
-}
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
-#define inet_proto_csum_replace2(sum, skb, from, to, pseudohdr) \
- inet_proto_csum_replace4(sum, skb, (__force __be32)(from), \
- (__force __be32)(to), pseudohdr)
-#endif
-
-#endif /* checksum.h */
+++ /dev/null
-#ifndef __NET_DST_WRAPPER_H
-#define __NET_DST_WRAPPER_H 1
-
-#include_next <net/dst.h>
-
-#ifndef HAVE_SKB_DST_ACCESSOR_FUNCS
-
-static inline void skb_dst_drop(struct sk_buff *skb)
-{
- if (skb->dst)
- dst_release(skb_dst(skb));
- skb->dst = NULL;
-}
-
-#endif
-
-#endif
+++ /dev/null
-#ifndef __NET_GENERIC_NETLINK_WRAPPER_H
-#define __NET_GENERIC_NETLINK_WRAPPER_H 1
-
-#include <linux/version.h>
-#include <linux/netlink.h>
-#include <net/net_namespace.h>
-
-/* Very special super-nasty workaround here:
- *
- * Before 2.6.19, nlmsg_multicast() lacked a 'flags' parameter. We work
- * around that in our <net/netlink.h> replacement, so that nlmsg_multicast
- * is a macro that expands to rpl_nlmsg_multicast, which in turn has the
- * 'flags' parameter.
- *
- * However, also before 2.6.19, <net/genetlink.h> contains an inline definition
- * of genlmsg_multicast() that, of course, calls it without the 'flags'
- * parameter. This causes a build failure.
- *
- * This works around the problem by temporarily renaming both nlmsg_multicast
- * and genlmsg_multicast with a "busted_" prefix. (Nothing actually defines
- * busted_nlmsg_multicast(), so if anything actually tries to call it, then
- * we'll get a link error.)
- */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-#undef nlmsg_multicast
-#define nlmsg_multicast busted_nlmsg_multicast
-#define genlmsg_multicast busted_genlmsg_multicast
-extern int busted_nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
- u32 pid, unsigned int group);
-#endif /* linux kernel < v2.6.19 */
-
-#include_next <net/genetlink.h>
-
-/* Drop the "busted_" prefix described above. */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-#undef nlmsg_multicast
-#undef genlmsg_multicast
-#define nlmsg_multicast rpl_nlmsg_multicast
-#endif /* linux kernel < v2.6.19 */
-
-#include <net/net_namespace.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
-
-#include <linux/genetlink.h>
-
-/**
- * struct genl_multicast_group - generic netlink multicast group
- * @name: name of the multicast group, names are per-family
- * @id: multicast group ID, assigned by the core, to use with
- * genlmsg_multicast().
- * @list: list entry for linking
- * @family: pointer to family, need not be set before registering
- */
-struct genl_multicast_group
-{
- struct genl_family *family; /* private */
- struct list_head list; /* private */
- char name[GENL_NAMSIZ];
- u32 id;
-};
-
-int genl_register_mc_group(struct genl_family *family,
- struct genl_multicast_group *grp);
-#endif /* linux kernel < 2.6.23 */
-
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-/**
- * genlmsg_msg_size - length of genetlink message not including padding
- * @payload: length of message payload
- */
-static inline int genlmsg_msg_size(int payload)
-{
- return GENL_HDRLEN + payload;
-}
-
-/**
- * genlmsg_total_size - length of genetlink message including padding
- * @payload: length of message payload
- */
-static inline int genlmsg_total_size(int payload)
-{
- return NLMSG_ALIGN(genlmsg_msg_size(payload));
-}
-
-#define genlmsg_multicast(s, p, g, f) \
- genlmsg_multicast_flags((s), (p), (g), (f))
-
-static inline int genlmsg_multicast_flags(struct sk_buff *skb, u32 pid,
- unsigned int group, gfp_t flags)
-{
- int err;
-
- NETLINK_CB(skb).dst_group = group;
-
- err = netlink_broadcast(genl_sock, skb, pid, group, flags);
- if (err > 0)
- err = 0;
-
- return err;
-}
-#endif /* linux kernel < 2.6.19 */
-
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-
-#define genlmsg_put(skb, p, seq, fam, flg, c) \
- genlmsg_put((skb), (p), (seq), (fam)->id, (fam)->hdrsize, \
- (flg), (c), (fam)->version)
-
-/**
- * genlmsg_put_reply - Add generic netlink header to a reply message
- * @skb: socket buffer holding the message
- * @info: receiver info
- * @family: generic netlink family
- * @flags: netlink message flags
- * @cmd: generic netlink command
- *
- * Returns pointer to user specific header
- */
-static inline void *genlmsg_put_reply(struct sk_buff *skb,
- struct genl_info *info, struct genl_family *family,
- int flags, u8 cmd)
-{
- return genlmsg_put(skb, info->snd_pid, info->snd_seq, family,
- flags, cmd);
-}
-
-/**
- * genlmsg_reply - reply to a request
- * @skb: netlink message to be sent back
- * @info: receiver information
- */
-static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
-{
- return genlmsg_unicast(skb, info->snd_pid);
-}
-
-/**
- * genlmsg_new - Allocate a new generic netlink message
- * @payload: size of the message payload
- * @flags: the type of memory to allocate.
- */
-static inline struct sk_buff *genlmsg_new(size_t payload, gfp_t flags)
-{
- return nlmsg_new(genlmsg_total_size(payload), flags);
-}
-#endif /* linux kernel < 2.6.20 */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
-int genl_register_family_with_ops(struct genl_family *family,
- struct genl_ops *ops, size_t n_ops);
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
-#define genl_notify(skb, net, pid, group, nlh, flags) \
- genl_notify(skb, pid, group, nlh, flags)
-#endif
-extern void genl_notify(struct sk_buff *skb, struct net *net, u32 pid,
- u32 group, struct nlmsghdr *nlh, gfp_t flags);
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) && \
- LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
-static inline struct net *genl_info_net(struct genl_info *info)
-{
- return &init_net;
-}
-#endif
-
-#endif /* genetlink.h */
+++ /dev/null
-#ifndef __NET_IP_WRAPPER_H
-#define __NET_IP_WRAPPER_H 1
-
-#include_next <net/ip.h>
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
-
-extern int __ip_local_out(struct sk_buff *skb);
-extern int ip_local_out(struct sk_buff *skb);
-
-#endif /* linux kernel < 2.6.25 */
-
-#endif
+++ /dev/null
-#ifndef __NET_NET_NAMESPACE_WRAPPER_H
-#define __NET_NET_NAMESPACE_WRAPPER_H 1
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
-/* <net/net_namespace.h> exists, go ahead and include it. */
-#include_next <net/net_namespace.h>
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
-#define INIT_NET_GENL_SOCK init_net.genl_sock
-#else
-#define INIT_NET_GENL_SOCK genl_sock
-#endif
-
-#endif /* net/net_namespace.h wrapper */
+++ /dev/null
-#ifndef __NET_NETLINK_WRAPPER_H
-#define __NET_NETLINK_WRAPPER_H 1
-
-#include <linux/version.h>
-#include_next <net/netlink.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
-/* Before v2.6.29, a NLA_NESTED attribute, if it was present, was not allowed
- * to be empty. However, OVS depends on the ability to accept empty
- * attributes. For example, a present but empty ODP_FLOW_ATTR_ACTIONS on
- * ODP_FLOW_CMD_SET replaces the existing set of actions by an empty "drop"
- * action, whereas a missing ODP_FLOW_ATTR_ACTIONS leaves the existing
- * actions, if any, unchanged.
- *
- * NLA_NESTED is different from NLA_UNSPEC in only two ways:
- *
- * - If the size of the nested attributes is zero, no further size checks
- * are performed.
- *
- * - If the size of the nested attributes is not zero and no length
- * parameter is specified the minimum size of nested attributes is
- * NLA_HDRLEN.
- *
- * nla_parse_nested() validates that there is at least enough space for
- * NLA_HDRLEN, so neither of these conditions are important, and we might
- * as well use NLA_UNSPEC with old kernels.
- */
-#undef NLA_NESTED
-#define NLA_NESTED NLA_UNSPEC
-#endif
-
-#ifndef NLA_PUT_BE16
-#define NLA_PUT_BE16(skb, attrtype, value) \
- NLA_PUT_TYPE(skb, __be16, attrtype, value)
-#endif /* !NLA_PUT_BE16 */
-
-#ifndef NLA_PUT_BE32
-#define NLA_PUT_BE32(skb, attrtype, value) \
- NLA_PUT_TYPE(skb, __be32, attrtype, value)
-#endif /* !NLA_PUT_BE32 */
-
-#ifndef NLA_PUT_BE64
-#define NLA_PUT_BE64(skb, attrtype, value) \
- NLA_PUT_TYPE(skb, __be64, attrtype, value)
-#endif /* !NLA_PUT_BE64 */
-
-#ifndef HAVE_NLA_GET_BE16
-/**
- * nla_get_be16 - return payload of __be16 attribute
- * @nla: __be16 netlink attribute
- */
-static inline __be16 nla_get_be16(const struct nlattr *nla)
-{
- return *(__be16 *) nla_data(nla);
-}
-#endif /* !HAVE_NLA_GET_BE16 */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-/**
- * nla_get_be32 - return payload of __be32 attribute
- * @nla: __be32 netlink attribute
- */
-static inline __be32 nla_get_be32(const struct nlattr *nla)
-{
- return *(__be32 *) nla_data(nla);
-}
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
-/* These functions' nlattr source arguments weren't "const" before 2.6.29, so
- * cast their arguments to the non-"const" versions. Using macros for this
- * isn't exactly a brilliant idea, but it seems less error-prone than copying
- * the definitions of all umpteen functions. */
-#define nla_get_u64(nla) (nla_get_u64) ((struct nlattr *) (nla))
-#define nla_get_u32(nla) (nla_get_u32) ((struct nlattr *) (nla))
-#define nla_get_u16(nla) (nla_get_u16) ((struct nlattr *) (nla))
-#define nla_get_u8(nla) (nla_get_u8) ((struct nlattr *) (nla))
-/* nla_get_be64 is handled separately below. */
-#define nla_get_be32(nla) (nla_get_be32) ((struct nlattr *) (nla))
-#define nla_get_be16(nla) (nla_get_be16) ((struct nlattr *) (nla))
-#define nla_get_be8(nla) (nla_get_be8) ((struct nlattr *) (nla))
-#define nla_get_flag(nla) (nla_get_flag) ((struct nlattr *) (nla))
-#define nla_get_msecs(nla) (nla_get_msecs)((struct nlattr *) (nla))
-#define nla_memcpy(dst, src, count) \
- (nla_memcpy)(dst, (struct nlattr *)(src), count)
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
-/* This function was introduced in 2.6.31, but initially it performed an
- * unaligned access, so we replace it up to 2.6.34 where it was fixed. */
-#define nla_get_be64 rpl_nla_get_be64
-static inline __be64 nla_get_be64(const struct nlattr *nla)
-{
- __be64 tmp;
-
- /* The additional cast is necessary because */
- nla_memcpy(&tmp, (struct nlattr *) nla, sizeof(tmp));
-
- return tmp;
-}
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
-/**
- * nla_type - attribute type
- * @nla: netlink attribute
- */
-static inline int nla_type(const struct nlattr *nla)
-{
- return nla->nla_type & NLA_TYPE_MASK;
-}
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
-#define nla_parse_nested(tb, maxtype, nla, policy) \
- nla_parse_nested(tb, maxtype, (struct nlattr *)(nla), (struct nla_policy *)(policy))
-#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
-#define nla_parse_nested(tb, maxtype, nla, policy) \
- nla_parse_nested(tb, maxtype, (struct nlattr *)(nla), policy)
-#endif
-
-#ifndef nla_for_each_nested
-#define nla_for_each_nested(pos, nla, rem) \
- nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem)
-#endif
-
-#ifndef HAVE_NLA_FIND_NESTED
-static inline struct nlattr *nla_find_nested(struct nlattr *nla, int attrtype)
-{
- return nla_find(nla_data(nla), nla_len(nla), attrtype);
-}
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-/**
- * nlmsg_report - need to report back to application?
- * @nlh: netlink message header
- *
- * Returns 1 if a report back to the application is requested.
- */
-static inline int nlmsg_report(const struct nlmsghdr *nlh)
-{
- return !!(nlh->nlmsg_flags & NLM_F_ECHO);
-}
-
-extern int nlmsg_notify(struct sock *sk, struct sk_buff *skb,
- u32 pid, unsigned int group, int report,
- gfp_t flags);
-#endif /* linux kernel < 2.6.19 */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-/* Before 2.6.19 the 'flags' parameter was missing, so replace it. We have to
- * #include <net/genetlink.h> first because the 2.6.18 version of that header
- * has an inline call to nlmsg_multicast() without, of course, any 'flags'
- * argument. */
-#define nlmsg_multicast rpl_nlmsg_multicast
-static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
- u32 pid, unsigned int group, gfp_t flags)
-{
- int err;
-
- NETLINK_CB(skb).dst_group = group;
-
- err = netlink_broadcast(sk, skb, pid, group, flags);
- if (err > 0)
- err = 0;
-
- return err;
-}
-#endif /* linux kernel < 2.6.19 */
-
-#endif /* net/netlink.h */
+++ /dev/null
-#ifndef __NET_PROTOCOL_WRAPPER_H
-#define __NET_PROTOCOL_WRAPPER_H 1
-
-#include_next <net/protocol.h>
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
-#define inet_add_protocol(prot, num) inet_add_protocol((struct net_protocol *)(prot), num)
-#define inet_del_protocol(prot, num) inet_del_protocol((struct net_protocol *)(prot), num)
-#endif
-
-#endif
+++ /dev/null
-#ifndef __NET_ROUTE_WRAPPER_H
-#define __NET_ROUTE_WRAPPER_H 1
-
-#include_next <net/route.h>
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
-
-#define ip_route_output_key(net, rp, flp) \
- ip_route_output_key((rp), (flp))
-
-#endif /* linux kernel < 2.6.25 */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
-static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
-{
- return dst_metric(dst, RTAX_HOPLIMIT);
-}
-#endif
-
-#endif
+++ /dev/null
-#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
-
-#include <linux/netfilter_ipv4.h>
-#include <net/ip.h>
-
-int __ip_local_out(struct sk_buff *skb)
-{
- struct iphdr *iph = ip_hdr(skb);
-
- iph->tot_len = htons(skb->len);
- ip_send_check(iph);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
- return nf_hook(PF_INET, NF_IP_LOCAL_OUT, &skb, NULL, skb->dst->dev,
- dst_output);
-#else
- return nf_hook(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, skb->dst->dev,
- dst_output);
-#endif /* kernel < 2.6.24 */
-}
-
-int ip_local_out(struct sk_buff *skb)
-{
- int err;
-
- err = __ip_local_out(skb);
- if (likely(err == 1))
- err = dst_output(skb);
-
- return err;
-}
-
-#endif /* kernel < 2.6.25 */
+++ /dev/null
-#ifndef HAVE_KMEMDUP
-
-#include <linux/slab.h>
-#include <linux/string.h>
-
-/**
- * kmemdup - duplicate region of memory
- *
- * @src: memory region to duplicate
- * @len: memory region length
- * @gfp: GFP mask to use
- */
-void *kmemdup(const void *src, size_t len, gfp_t gfp)
-{
- void *p;
-
- p = kmalloc(len, gfp);
- if (p)
- memcpy(p, src, len);
- return p;
-}
-#endif
+++ /dev/null
-#include <linux/if_link.h>
-#include <linux/netdevice.h>
-
-/* Linux 2.6.28 introduced dev_get_stats():
- * const struct net_device_stats *dev_get_stats(struct net_device *dev);
- *
- * Linux 2.6.36 changed dev_get_stats() to:
- * struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
- * struct rtnl_link_stats64 *storage);
- */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
-struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
- struct rtnl_link_stats64 *storage)
-{
- const struct net_device_stats *stats;
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
- stats = dev->get_stats(dev);
-#else /* 2.6.28 < kernel version < 2.6.36 */
- stats = (dev_get_stats)(dev);
-#endif /* 2.6.28 < kernel version < 2.6.36 */
-
- storage->rx_packets = stats->rx_packets;
- storage->tx_packets = stats->tx_packets;
- storage->rx_bytes = stats->rx_bytes;
- storage->tx_bytes = stats->tx_bytes;
- storage->rx_errors = stats->rx_errors;
- storage->tx_errors = stats->tx_errors;
- storage->rx_dropped = stats->rx_dropped;
- storage->tx_dropped = stats->tx_dropped;
- storage->multicast = stats->multicast;
- storage->collisions = stats->collisions;
- storage->rx_length_errors = stats->rx_length_errors;
- storage->rx_over_errors = stats->rx_over_errors;
- storage->rx_crc_errors = stats->rx_crc_errors;
- storage->rx_frame_errors = stats->rx_frame_errors;
- storage->rx_fifo_errors = stats->rx_fifo_errors;
- storage->rx_missed_errors = stats->rx_missed_errors;
- storage->tx_aborted_errors = stats->tx_aborted_errors;
- storage->tx_carrier_errors = stats->tx_carrier_errors;
- storage->tx_fifo_errors = stats->tx_fifo_errors;
- storage->tx_heartbeat_errors = stats->tx_heartbeat_errors;
- storage->tx_window_errors = stats->tx_window_errors;
- storage->rx_compressed = stats->rx_compressed;
- storage->tx_compressed = stats->tx_compressed;
-
- return storage;
-}
-#endif /* kernel version < 2.6.36 */
+++ /dev/null
-#if !defined(HAVE_SKB_WARN_LRO) && defined(NETIF_F_LRO)
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/netdevice.h>
-
-void __skb_warn_lro_forwarding(const struct sk_buff *skb)
-{
- if (net_ratelimit())
- pr_warn("%s: received packets cannot be forwarded while LRO is enabled\n",
- skb->dev->name);
-}
-
-#endif
+++ /dev/null
-#include <linux/time.h>
-
-#include <linux/version.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
-
-/* "set_normalized_timespec" is defined but not exported in kernels
- * before 2.6.26. */
-
-/**
- * set_normalized_timespec - set timespec sec and nsec parts and normalize
- *
- * @ts: pointer to timespec variable to be set
- * @sec: seconds to set
- * @nsec: nanoseconds to set
- *
- * Set seconds and nanoseconds field of a timespec variable and
- * normalize to the timespec storage format
- *
- * Note: The tv_nsec part is always in the range of
- * 0 <= tv_nsec < NSEC_PER_SEC
- * For negative values only the tv_sec field is negative !
- */
-void set_normalized_timespec(struct timespec *ts,
- time_t sec, long nsec)
-{
- while (nsec >= NSEC_PER_SEC) {
- nsec -= NSEC_PER_SEC;
- ++sec;
- }
- while (nsec < 0) {
- nsec += NSEC_PER_SEC;
- --sec;
- }
- ts->tv_sec = sec;
- ts->tv_nsec = nsec;
-}
-
-#endif /* linux kernel < 2.6.26 */
--- /dev/null
+/Kbuild
+/Makefile
+/Makefile.main
+/Module.markers
+/actions.c
+/addrconf_core-openvswitch.c
+/brc_sysfs_dp.c
+/brc_sysfs_if.c
+/brcompat.c
+/checksum.c
+/dev-openvswitch.c
+/dp_sysfs_dp.c
+/dp_sysfs_if.c
+/datapath.c
+/dp_dev.c
+/dp_notify.c
+/flow.c
+/genetlink-brcompat.c
+/genetlink-openvswitch.c
+/ip_output-openvswitch.c
+/kcompat.h
+/kmemdup.c
+/loop_counter.c
+/modules.order
+/netdevice.c
+/random32.c
+/skbuff-openvswitch.c
+/table.c
+/time.c
+/tmp
+/tunnel.c
+/vlan.c
+/vport-capwap.c
+/vport-generic.c
+/vport-gre.c
+/vport-internal_dev.c
+/vport-netdev.c
+/vport-patch.c
+/vport.c
--- /dev/null
+# -*- makefile -*-
+export builddir = @abs_builddir@
+export srcdir = @abs_srcdir@
+export top_srcdir = @abs_top_srcdir@
+export VERSION = @VERSION@
+export BUILDNR = @BUILDNR@
+
+include $(srcdir)/../Modules.mk
+include $(srcdir)/Modules.mk
+
+EXTRA_CFLAGS := -DVERSION=\"$(VERSION)\"
+EXTRA_CFLAGS += -I$(srcdir)/..
+EXTRA_CFLAGS += -I$(builddir)/..
+ifeq '$(BUILDNR)' '0'
+EXTRA_CFLAGS += -DBUILDNR=\"\"
+else
+EXTRA_CFLAGS += -DBUILDNR=\"+build$(BUILDNR)\"
+endif
+EXTRA_CFLAGS += -g
+EXTRA_CFLAGS += -include $(builddir)/kcompat.h
+
+# These include directories have to go before -I$(KSRC)/include.
+# NOSTDINC_FLAGS just happens to be a variable that goes in the
+# right place, even though it's conceptually incorrect.
+NOSTDINC_FLAGS += -I$(top_srcdir)/include -I$(srcdir)/compat -I$(srcdir)/compat/include
+
+obj-m := $(patsubst %,%_mod.o,$(build_modules))
+
+define module_template
+$(1)_mod-y = $$(notdir $$(patsubst %.c,%.o,$($(1)_sources)))
+endef
+
+$(foreach module,$(build_modules),$(eval $(call module_template,$(module))))
--- /dev/null
+ifeq ($(KERNELRELEASE),)
+# We're being called directly by running make in this directory.
+include Makefile.main
+else
+# We're being included by the Linux kernel build system
+include Kbuild
+endif
+
+
--- /dev/null
+# -*- makefile -*-
+export builddir = @abs_builddir@
+export srcdir = @abs_srcdir@
+export top_srcdir = @abs_top_srcdir@
+export KSRC = @KBUILD@
+export VERSION = @VERSION@
+
+include $(srcdir)/../Modules.mk
+include $(srcdir)/Modules.mk
+
+default: $(build_links)
+
+$(foreach s,$(sort $(foreach m,$(build_modules),$($(m)_sources))), \
+ $(eval $(notdir $(s)): ; ln -s $(srcdir)/../$(s) $@))
+
+distclean: clean
+ rm -f kcompat.h
+distdir: clean
+install:
+all: default
+check: all
+clean:
+ rm -f *.o *.ko *_mod.* Module.symvers *.cmd kcompat.h.new
+ for d in $(build_links); do if test -h $$d; then rm $$d; fi; done
+
+ifneq ($(KSRC),)
+
+ifeq (/lib/modules/$(shell uname -r)/source, $(KSRC))
+ KOBJ := /lib/modules/$(shell uname -r)/build
+else
+ KOBJ := $(KSRC)
+endif
+
+VERSION_FILE := $(KOBJ)/include/linux/version.h
+ifeq (,$(wildcard $(VERSION_FILE)))
+ $(error Linux kernel source not configured - missing version.h)
+endif
+
+CONFIG_FILE := $(KSRC)/include/generated/autoconf.h
+ifeq (,$(wildcard $(CONFIG_FILE)))
+ CONFIG_FILE := $(KSRC)/include/linux/autoconf.h
+ ifeq (,$(wildcard $(CONFIG_FILE)))
+ $(error Linux kernel source not configured - missing autoconf.h)
+ endif
+endif
+
+default:
+ $(MAKE) -C $(KSRC) M=$(builddir) modules
+
+modules_install:
+ $(MAKE) -C $(KSRC) M=$(builddir) modules_install
+endif
+
+# Much of the kernel build system in this file is derived from Intel's
+# e1000 distribution, with the following license:
+
+################################################################################
+#
+# Intel PRO/1000 Linux driver
+# Copyright(c) 1999 - 2007, 2009 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# Linux NICS <linux.nics@intel.com>
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
--- /dev/null
+openvswitch_sources += \
+ linux/compat/addrconf_core-openvswitch.c \
+ linux/compat/dev-openvswitch.c \
+ linux/compat/genetlink-openvswitch.c \
+ linux/compat/ip_output-openvswitch.c \
+ linux/compat/kmemdup.c \
+ linux/compat/netdevice.c \
+ linux/compat/skbuff-openvswitch.c \
+ linux/compat/time.c
+openvswitch_headers += \
+ linux/compat/include/asm-generic/bug.h \
+ linux/compat/include/linux/bottom_half.h \
+ linux/compat/include/linux/compiler.h \
+ linux/compat/include/linux/compiler-gcc.h \
+ linux/compat/include/linux/cpumask.h \
+ linux/compat/include/linux/dmi.h \
+ linux/compat/include/linux/err.h \
+ linux/compat/include/linux/genetlink.h \
+ linux/compat/include/linux/icmp.h \
+ linux/compat/include/linux/icmpv6.h \
+ linux/compat/include/linux/if.h \
+ linux/compat/include/linux/if_arp.h \
+ linux/compat/include/linux/if_ether.h \
+ linux/compat/include/linux/if_vlan.h \
+ linux/compat/include/linux/in.h \
+ linux/compat/include/linux/inetdevice.h \
+ linux/compat/include/linux/ip.h \
+ linux/compat/include/linux/ipv6.h \
+ linux/compat/include/linux/jiffies.h \
+ linux/compat/include/linux/kernel.h \
+ linux/compat/include/linux/kobject.h \
+ linux/compat/include/linux/lockdep.h \
+ linux/compat/include/linux/log2.h \
+ linux/compat/include/linux/mutex.h \
+ linux/compat/include/linux/netdevice.h \
+ linux/compat/include/linux/netfilter_bridge.h \
+ linux/compat/include/linux/netfilter_ipv4.h \
+ linux/compat/include/linux/netlink.h \
+ linux/compat/include/linux/rculist.h \
+ linux/compat/include/linux/rcupdate.h \
+ linux/compat/include/linux/rtnetlink.h \
+ linux/compat/include/linux/skbuff.h \
+ linux/compat/include/linux/slab.h \
+ linux/compat/include/linux/stddef.h \
+ linux/compat/include/linux/tcp.h \
+ linux/compat/include/linux/timer.h \
+ linux/compat/include/linux/types.h \
+ linux/compat/include/linux/udp.h \
+ linux/compat/include/linux/workqueue.h \
+ linux/compat/include/net/checksum.h \
+ linux/compat/include/net/dst.h \
+ linux/compat/include/net/genetlink.h \
+ linux/compat/include/net/ip.h \
+ linux/compat/include/net/net_namespace.h \
+ linux/compat/include/net/netlink.h \
+ linux/compat/include/net/protocol.h \
+ linux/compat/include/net/route.h \
+ linux/compat/genetlink.inc
+
+both_modules += brcompat
+brcompat_sources = linux/compat/genetlink-brcompat.c brcompat.c
+brcompat_headers =
--- /dev/null
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
+
+/*
+ * IPv6 library code, needed by static components when full IPv6 support is
+ * not configured or static.
+ */
+
+#include <net/ipv6.h>
+
+#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16)
+
+static inline unsigned ipv6_addr_scope2type(unsigned scope)
+{
+ switch(scope) {
+ case IPV6_ADDR_SCOPE_NODELOCAL:
+ return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) |
+ IPV6_ADDR_LOOPBACK);
+ case IPV6_ADDR_SCOPE_LINKLOCAL:
+ return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL) |
+ IPV6_ADDR_LINKLOCAL);
+ case IPV6_ADDR_SCOPE_SITELOCAL:
+ return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL) |
+ IPV6_ADDR_SITELOCAL);
+ }
+ return IPV6_ADDR_SCOPE_TYPE(scope);
+}
+
+int __ipv6_addr_type(const struct in6_addr *addr)
+{
+ __be32 st;
+
+ st = addr->s6_addr32[0];
+
+ /* Consider all addresses with the first three bits different of
+ 000 and 111 as unicasts.
+ */
+ if ((st & htonl(0xE0000000)) != htonl(0x00000000) &&
+ (st & htonl(0xE0000000)) != htonl(0xE0000000))
+ return (IPV6_ADDR_UNICAST |
+ IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL));
+
+ if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) {
+ /* multicast */
+ /* addr-select 3.1 */
+ return (IPV6_ADDR_MULTICAST |
+ ipv6_addr_scope2type(IPV6_ADDR_MC_SCOPE(addr)));
+ }
+
+ if ((st & htonl(0xFFC00000)) == htonl(0xFE800000))
+ return (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST |
+ IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.1 */
+ if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000))
+ return (IPV6_ADDR_SITELOCAL | IPV6_ADDR_UNICAST |
+ IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL)); /* addr-select 3.1 */
+ if ((st & htonl(0xFE000000)) == htonl(0xFC000000))
+ return (IPV6_ADDR_UNICAST |
+ IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* RFC 4193 */
+
+ if ((addr->s6_addr32[0] | addr->s6_addr32[1]) == 0) {
+ if (addr->s6_addr32[2] == 0) {
+ if (addr->s6_addr32[3] == 0)
+ return IPV6_ADDR_ANY;
+
+ if (addr->s6_addr32[3] == htonl(0x00000001))
+ return (IPV6_ADDR_LOOPBACK | IPV6_ADDR_UNICAST |
+ IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.4 */
+
+ return (IPV6_ADDR_COMPATv4 | IPV6_ADDR_UNICAST |
+ IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */
+ }
+
+ if (addr->s6_addr32[2] == htonl(0x0000ffff))
+ return (IPV6_ADDR_MAPPED |
+ IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */
+ }
+
+ return (IPV6_ADDR_RESERVED |
+ IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.4 */
+}
+
+#endif /* kernel < 2.6.21 */
--- /dev/null
+#ifndef HAVE_DEV_DISABLE_LRO
+
+#include <linux/netdevice.h>
+
+#ifdef NETIF_F_LRO
+#include <linux/ethtool.h>
+
+/**
+ * dev_disable_lro - disable Large Receive Offload on a device
+ * @dev: device
+ *
+ * Disable Large Receive Offload (LRO) on a net device. Must be
+ * called under RTNL. This is needed if received packets may be
+ * forwarded to another interface.
+ */
+void dev_disable_lro(struct net_device *dev)
+{
+ if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
+ dev->ethtool_ops->set_flags) {
+ u32 flags = dev->ethtool_ops->get_flags(dev);
+ if (flags & ETH_FLAG_LRO) {
+ flags &= ~ETH_FLAG_LRO;
+ dev->ethtool_ops->set_flags(dev, flags);
+ }
+ }
+ WARN_ON(dev->features & NETIF_F_LRO);
+}
+#else
+void dev_disable_lro(struct net_device *dev) { }
+#endif /* NETIF_F_LRO */
+
+#endif /* HAVE_DEV_DISABLE_LRO */
--- /dev/null
+/* We fix grp->id to 32 so that it doesn't collide with any of the multicast
+ * groups selected by openvswitch_mod, which uses groups 16 through 31.
+ * Collision isn't fatal--multicast listeners should check that the family is
+ * the one that they want and discard others--but it wastes time and memory to
+ * receive unwanted messages. */
+
+#define GENL_FIRST_MCGROUP 32
+#define GENL_LAST_MCGROUP 32
+
+#include "genetlink.inc"
--- /dev/null
+#define GENL_FIRST_MCGROUP 16
+#define GENL_LAST_MCGROUP 31
+
+#include "genetlink.inc"
--- /dev/null
+/* -*- c -*- */
+
+#include <net/genetlink.h>
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+#include <linux/mutex.h>
+
+static DEFINE_MUTEX(mc_group_mutex);
+
+int genl_register_mc_group(struct genl_family *family,
+ struct genl_multicast_group *grp)
+{
+ static int next_group = GENL_FIRST_MCGROUP;
+
+ mutex_lock(&mc_group_mutex);
+ grp->id = next_group;
+ grp->family = family;
+
+ if (++next_group > GENL_LAST_MCGROUP)
+ next_group = GENL_FIRST_MCGROUP;
+ mutex_unlock(&mc_group_mutex);
+
+ return 0;
+}
+#endif /* kernel < 2.6.23 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
+/**
+ * genl_register_family_with_ops - register a generic netlink family
+ * @family: generic netlink family
+ * @ops: operations to be registered
+ * @n_ops: number of elements to register
+ *
+ * Registers the specified family and operations from the specified table.
+ * Only one family may be registered with the same family name or identifier.
+ *
+ * The family id may equal GENL_ID_GENERATE causing an unique id to
+ * be automatically generated and assigned.
+ *
+ * Either a doit or dumpit callback must be specified for every registered
+ * operation or the function will fail. Only one operation structure per
+ * command identifier may be registered.
+ *
+ * See include/net/genetlink.h for more documenation on the operations
+ * structure.
+ *
+ * This is equivalent to calling genl_register_family() followed by
+ * genl_register_ops() for every operation entry in the table taking
+ * care to unregister the family on error path.
+ *
+ * Return 0 on success or a negative error code.
+ */
+int genl_register_family_with_ops(struct genl_family *family,
+ struct genl_ops *ops, size_t n_ops)
+{
+ int err, i;
+
+ err = genl_register_family(family);
+ if (err)
+ return err;
+
+ for (i = 0; i < n_ops; ++i, ++ops) {
+ err = genl_register_ops(family, ops);
+ if (err)
+ goto err_out;
+ }
+ return 0;
+err_out:
+ genl_unregister_family(family);
+ return err;
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+/**
+ * nlmsg_notify - send a notification netlink message
+ * @sk: netlink socket to use
+ * @skb: notification message
+ * @pid: destination netlink pid for reports or 0
+ * @group: destination multicast group or 0
+ * @report: 1 to report back, 0 to disable
+ * @flags: allocation flags
+ */
+int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
+ unsigned int group, int report, gfp_t flags)
+{
+ int err = 0;
+
+ if (group) {
+ int exclude_pid = 0;
+
+ if (report) {
+ atomic_inc(&skb->users);
+ exclude_pid = pid;
+ }
+
+ /* errors reported via destination sk->sk_err, but propagate
+ * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
+ err = nlmsg_multicast(sk, skb, exclude_pid, group, flags);
+ }
+
+ if (report) {
+ int err2;
+
+ err2 = nlmsg_unicast(sk, skb, pid);
+ if (!err || err == -ESRCH)
+ err = err2;
+ }
+
+ return err;
+}
+#endif
+
+/* This is analogous to rtnl_notify() but uses genl_sock instead of rtnl.
+ *
+ * This is not (yet) in any upstream kernel. */
+void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
+ struct nlmsghdr *nlh, gfp_t flags)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
+ struct sock *sk = net->genl_sock;
+#else
+ struct sock *sk = genl_sock;
+#endif
+ int report = 0;
+
+ if (nlh)
+ report = nlmsg_report(nlh);
+
+ nlmsg_notify(sk, skb, pid, group, report, flags);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
+/* This function wasn't exported before 2.6.30. Lose! */
+void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
+{
+}
+#endif
--- /dev/null
+#ifndef __ASM_GENERIC_BUG_WRAPPER_H
+#define __ASM_GENERIC_BUG_WRAPPER_H
+
+#include_next <asm-generic/bug.h>
+
+#ifndef WARN_ON_ONCE
+#define WARN_ON_ONCE(condition) ({ \
+ static int __warned; \
+ int __ret_warn_once = !!(condition); \
+ \
+ if (unlikely(__ret_warn_once) && !__warned) { \
+ WARN_ON(1); \
+ __warned = 1; \
+ } \
+ unlikely(__ret_warn_once); \
+})
+#endif
+
+#endif
--- /dev/null
+#ifndef __LINUX_BH_WRAPPER_H
+#define __LINUX_BH_WRAPPER_H 1
+
+#include_next <linux/bottom_half.h>
+
+/* This is not, strictly speaking, compatibility code in the sense that it is
+ * not needed by older kernels. However, it is used on kernels with the
+ * realtime patchset applied to create an environment more similar to what we
+ * would see on normal kernels.
+ */
+
+#ifdef CONFIG_PREEMPT_HARDIRQS
+#undef local_bh_disable
+#define local_bh_disable preempt_disable
+#undef local_bh_enable
+#define local_bh_enable preempt_enable
+#endif
+
+#endif
--- /dev/null
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#include_next <linux/compiler-gcc.h>
+
+#ifndef __packed
+#define __packed __attribute__((packed))
+#endif
--- /dev/null
+#ifndef __LINUX_COMPILER_WRAPPER_H
+#define __LINUX_COMPILER_WRAPPER_H 1
+
+#include_next <linux/compiler.h>
+
+#ifndef __percpu
+#define __percpu
+#endif
+
+#ifndef __rcu
+#define __rcu
+#endif
+
+#endif
--- /dev/null
+#ifndef __LINUX_CPUMASK_WRAPPER_H
+#define __LINUX_CPUMASK_WRAPPER_H
+
+#include_next <linux/cpumask.h>
+
+/* for_each_cpu was renamed for_each_possible_cpu in 2.6.18. */
+#ifndef for_each_possible_cpu
+#define for_each_possible_cpu for_each_cpu
+#endif
+
+#endif /* linux/cpumask.h wrapper */
--- /dev/null
+#ifndef __LINUX_DMI_WRAPPER_H
+#define __LINUX_DMI_WRAPPER_H 1
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
+
+#include_next <linux/dmi.h>
+
+#else /* linux version >= 2.6.23 */
+
+#ifndef __DMI_H__
+#define __DMI_H__
+
+#include <linux/list.h>
+
+enum dmi_field {
+ DMI_NONE,
+ DMI_BIOS_VENDOR,
+ DMI_BIOS_VERSION,
+ DMI_BIOS_DATE,
+ DMI_SYS_VENDOR,
+ DMI_PRODUCT_NAME,
+ DMI_PRODUCT_VERSION,
+ DMI_PRODUCT_SERIAL,
+ DMI_PRODUCT_UUID,
+ DMI_BOARD_VENDOR,
+ DMI_BOARD_NAME,
+ DMI_BOARD_VERSION,
+ DMI_BOARD_SERIAL,
+ DMI_BOARD_ASSET_TAG,
+ DMI_CHASSIS_VENDOR,
+ DMI_CHASSIS_TYPE,
+ DMI_CHASSIS_VERSION,
+ DMI_CHASSIS_SERIAL,
+ DMI_CHASSIS_ASSET_TAG,
+ DMI_STRING_MAX,
+};
+
+enum dmi_device_type {
+ DMI_DEV_TYPE_ANY = 0,
+ DMI_DEV_TYPE_OTHER,
+ DMI_DEV_TYPE_UNKNOWN,
+ DMI_DEV_TYPE_VIDEO,
+ DMI_DEV_TYPE_SCSI,
+ DMI_DEV_TYPE_ETHERNET,
+ DMI_DEV_TYPE_TOKENRING,
+ DMI_DEV_TYPE_SOUND,
+ DMI_DEV_TYPE_IPMI = -1,
+ DMI_DEV_TYPE_OEM_STRING = -2
+};
+
+struct dmi_header {
+ u8 type;
+ u8 length;
+ u16 handle;
+};
+
+/*
+ * DMI callbacks for problem boards
+ */
+struct dmi_strmatch {
+ u8 slot;
+ char *substr;
+};
+
+struct dmi_system_id {
+ int (*callback)(struct dmi_system_id *);
+ const char *ident;
+ struct dmi_strmatch matches[4];
+ void *driver_data;
+};
+
+#define DMI_MATCH(a, b) { a, b }
+
+struct dmi_device {
+ struct list_head list;
+ int type;
+ const char *name;
+ void *device_data; /* Type specific data */
+};
+
+/* No CONFIG_DMI before 2.6.16 */
+#if defined(CONFIG_DMI) || defined(CONFIG_X86_32)
+
+extern int dmi_check_system(struct dmi_system_id *list);
+extern char * dmi_get_system_info(int field);
+extern struct dmi_device * dmi_find_device(int type, const char *name,
+ struct dmi_device *from);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+extern void dmi_scan_machine(void);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+extern int dmi_get_year(int field);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+extern int dmi_name_in_vendors(char *str);
+#endif
+
+#else
+
+static inline int dmi_check_system(struct dmi_system_id *list) { return 0; }
+static inline char * dmi_get_system_info(int field) { return NULL; }
+static inline struct dmi_device * dmi_find_device(int type, const char *name,
+ struct dmi_device *from) { return NULL; }
+static inline int dmi_get_year(int year) { return 0; }
+static inline int dmi_name_in_vendors(char *s) { return 0; }
+
+#endif
+
+#endif /* __DMI_H__ */
+
+#endif /* linux kernel < 2.6.22 */
+
+#endif
--- /dev/null
+#ifndef __LINUX_ERR_WRAPPER_H
+#define __LINUX_ERR_WRAPPER_H 1
+
+#include_next <linux/err.h>
+
+#ifndef HAVE_ERR_CAST
+/**
+ * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
+ * @ptr: The pointer to cast.
+ *
+ * Explicitly cast an error-valued pointer to another pointer type in such a
+ * way as to make it clear that's what's going on.
+ */
+static inline void *ERR_CAST(const void *ptr)
+{
+ /* cast away the const */
+ return (void *) ptr;
+}
+#endif /* HAVE_ERR_CAST */
+
+#endif
--- /dev/null
+#ifndef __GENETLINK_WRAPPER_H
+#define __GENETLINK_WRAPPER_H 1
+
+#include_next <linux/genetlink.h>
+
+#ifdef CONFIG_PROVE_LOCKING
+/* No version of the kernel has this function, but our locking scheme depends
+ * on genl_mutex so for clarity we use it where appropriate. */
+static inline int lockdep_genl_is_held(void)
+{
+ return 1;
+}
+#endif
+
+#endif /* linux/genetlink.h wrapper */
--- /dev/null
+#ifndef __LINUX_ICMP_WRAPPER_H
+#define __LINUX_ICMP_WRAPPER_H 1
+
+#include_next <linux/icmp.h>
+
+#ifndef HAVE_SKBUFF_HEADER_HELPERS
+static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
+{
+ return (struct icmphdr *)skb_transport_header(skb);
+}
+#endif
+
+#endif
--- /dev/null
+#ifndef __LINUX_ICMPV6_WRAPPER_H
+#define __LINUX_ICMPV6_WRAPPER_H 1
+
+#include_next <linux/icmpv6.h>
+
+#ifndef HAVE_ICMP6_HDR
+static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
+{
+ return (struct icmp6hdr *)skb_transport_header(skb);
+}
+#endif
+
+#endif
--- /dev/null
+#ifndef __LINUX_IF_WRAPPER_H
+#define __LINUX_IF_WRAPPER_H 1
+
+#include_next <linux/if.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
+
+#define IFF_XMIT_DST_RELEASE 0
+
+#endif /* linux kernel < 2.6.31 */
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,36)
+#define IFF_OVS_DATAPATH IFF_BRIDGE_PORT
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+#define IFF_OVS_DATAPATH 0 /* no-op flag */
+#endif
+
+#endif
--- /dev/null
+#ifndef __LINUX_IF_ARP_WRAPPER_H
+#define __LINUX_IF_ARP_WRAPPER_H 1
+
+#include_next <linux/if_arp.h>
+
+#ifndef HAVE_SKBUFF_HEADER_HELPERS
+#include <linux/skbuff.h>
+
+static inline struct arphdr *arp_hdr(const struct sk_buff *skb)
+{
+ return (struct arphdr *)skb_network_header(skb);
+}
+#endif /* !HAVE_SKBUFF_HEADER_HELPERS */
+
+#endif
--- /dev/null
+#ifndef __LINUX_IF_ETHER_WRAPPER_H
+#define __LINUX_IF_ETHER_WRAPPER_H 1
+
+#include_next <linux/if_ether.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+
+#define ETH_P_TEB 0x6558 /* Trans Ether Bridging */
+
+#endif /* linux kernel < 2.6.28 */
+
+#endif
--- /dev/null
+#ifndef __LINUX_IF_VLAN_WRAPPER_H
+#define __LINUX_IF_VLAN_WRAPPER_H 1
+
+#include_next <linux/if_vlan.h>
+#include <linux/skbuff.h>
+
+/*
+ * The behavior of __vlan_put_tag() has changed over time:
+ *
+ * - In 2.6.26 and earlier, it adjusted both MAC and network header
+ * pointers. (The latter didn't make any sense.)
+ *
+ * - In 2.6.27 and 2.6.28, it did not adjust any header pointers at all.
+ *
+ * - In 2.6.29 and later, it adjusts the MAC header pointer only.
+ *
+ * This is the version from 2.6.33. We unconditionally substitute this version
+ * to avoid the need to guess whether the version in the kernel tree is
+ * acceptable.
+ */
+#define __vlan_put_tag rpl_vlan_put_tag
+static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
+{
+ struct vlan_ethhdr *veth;
+
+ if (skb_cow_head(skb, VLAN_HLEN) < 0) {
+ kfree_skb(skb);
+ return NULL;
+ }
+ veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
+
+ /* Move the mac addresses to the beginning of the new header. */
+ memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN);
+ skb->mac_header -= VLAN_HLEN;
+
+ /* first, the ethernet type */
+ veth->h_vlan_proto = htons(ETH_P_8021Q);
+
+ /* now, the TCI */
+ veth->h_vlan_TCI = htons(vlan_tci);
+
+ skb->protocol = htons(ETH_P_8021Q);
+
+ return skb;
+}
+
+
+/* All of these were introduced in a single commit preceding 2.6.33, so
+ * presumably all of them or none of them are present. */
+#ifndef VLAN_PRIO_MASK
+#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
+#define VLAN_PRIO_SHIFT 13
+#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
+#define VLAN_TAG_PRESENT VLAN_CFI_MASK
+#endif
+
+#endif /* linux/if_vlan.h wrapper */
--- /dev/null
+#ifndef __LINUX_IN_WRAPPER_H
+#define __LINUX_IN_WRAPPER_H 1
+
+#include_next <linux/in.h>
+
+#ifndef HAVE_IPV4_IS_MULTICAST
+
+static inline bool ipv4_is_loopback(__be32 addr)
+{
+ return (addr & htonl(0xff000000)) == htonl(0x7f000000);
+}
+
+static inline bool ipv4_is_multicast(__be32 addr)
+{
+ return (addr & htonl(0xf0000000)) == htonl(0xe0000000);
+}
+
+static inline bool ipv4_is_local_multicast(__be32 addr)
+{
+ return (addr & htonl(0xffffff00)) == htonl(0xe0000000);
+}
+
+static inline bool ipv4_is_lbcast(__be32 addr)
+{
+ /* limited broadcast */
+ return addr == htonl(INADDR_BROADCAST);
+}
+
+static inline bool ipv4_is_zeronet(__be32 addr)
+{
+ return (addr & htonl(0xff000000)) == htonl(0x00000000);
+}
+
+#endif /* !HAVE_IPV4_IS_MULTICAST */
+
+#endif
--- /dev/null
+#ifndef __LINUX_INETDEVICE_WRAPPER_H
+#define __LINUX_INETDEVICE_WRAPPER_H 1
+
+#include_next <linux/inetdevice.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+
+#define inetdev_by_index(net, ifindex) \
+ inetdev_by_index((ifindex))
+
+#endif /* linux kernel < 2.6.25 */
+
+#endif
--- /dev/null
+#ifndef __LINUX_IP_WRAPPER_H
+#define __LINUX_IP_WRAPPER_H 1
+
+#include_next <linux/ip.h>
+
+#ifndef HAVE_SKBUFF_HEADER_HELPERS
+#include <linux/skbuff.h>
+static inline struct iphdr *ip_hdr(const struct sk_buff *skb)
+{
+ return (struct iphdr *)skb_network_header(skb);
+}
+
+static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
+{
+ return ip_hdr(skb)->ihl * 4;
+}
+#endif /* !HAVE_SKBUFF_HEADER_HELPERS */
+
+#endif
--- /dev/null
+#ifndef __LINUX_IPV6_WRAPPER_H
+#define __LINUX_IPV6_WRAPPER_H 1
+
+#include_next <linux/ipv6.h>
+
+#ifndef HAVE_SKBUFF_HEADER_HELPERS
+static inline struct ipv6hdr *ipv6_hdr(const struct sk_buff *skb)
+{
+ return (struct ipv6hdr *)skb_network_header(skb);
+}
+#endif
+
+#endif
--- /dev/null
+#ifndef __LINUX_JIFFIES_WRAPPER_H
+#define __LINUX_JIFFIES_WRAPPER_H 1
+
+#include_next <linux/jiffies.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+
+/* Same as above, but does so with platform independent 64bit types.
+ * These must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64() */
+#define time_after64(a,b) \
+ (typecheck(__u64, a) && \
+ typecheck(__u64, b) && \
+ ((__s64)(b) - (__s64)(a) < 0))
+#define time_before64(a,b) time_after64(b,a)
+
+#define time_after_eq64(a,b) \
+ (typecheck(__u64, a) && \
+ typecheck(__u64, b) && \
+ ((__s64)(a) - (__s64)(b) >= 0))
+#define time_before_eq64(a,b) time_after_eq64(b,a)
+
+#endif /* linux kernel < 2.6.19 */
+
+#endif
--- /dev/null
+#ifndef __KERNEL_H_WRAPPER
+#define __KERNEL_H_WRAPPER 1
+
+#include_next <linux/kernel.h>
+#ifndef HAVE_LOG2_H
+#include <linux/log2.h>
+#endif
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+#undef pr_emerg
+#define pr_emerg(fmt, ...) \
+ printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#undef pr_alert
+#define pr_alert(fmt, ...) \
+ printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#undef pr_crit
+#define pr_crit(fmt, ...) \
+ printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#undef pr_err
+#define pr_err(fmt, ...) \
+ printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#undef pr_warning
+#define pr_warning(fmt, ...) \
+ printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#undef pr_notice
+#define pr_notice(fmt, ...) \
+ printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#undef pr_info
+#define pr_info(fmt, ...) \
+ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+#undef pr_cont
+#define pr_cont(fmt, ...) \
+ printk(KERN_CONT fmt, ##__VA_ARGS__)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#define pr_warn pr_warning
+#endif
+
+#ifndef BUILD_BUG_ON_NOT_POWER_OF_2
+/* Force a compilation error if a constant expression is not a power of 2 */
+#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
+ BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
+#endif
+
+#if defined(CONFIG_PREEMPT) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
+#error "CONFIG_PREEMPT is broken before 2.6.21--see commit 4498121ca3, \"[NET]: Handle disabled preemption in gfp_any()\""
+#endif
+
+#ifndef USHRT_MAX
+#define USHRT_MAX ((u16)(~0U))
+#define SHRT_MAX ((s16)(USHRT_MAX>>1))
+#define SHRT_MIN ((s16)(-SHRT_MAX - 1))
+#endif
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#endif
+
+#endif /* linux/kernel.h */
--- /dev/null
+#ifndef __LINUX_KOBJECT_WRAPPER_H
+#define __LINUX_KOBJECT_WRAPPER_H 1
+
+#include_next <linux/kobject.h>
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+#define kobject_init(kobj, ktype) rpl_kobject_init(kobj, ktype)
+static inline void rpl_kobject_init(struct kobject *kobj, struct kobj_type *ktype)
+{
+ kobj->ktype = ktype;
+ (kobject_init)(kobj);
+}
+
+#define kobject_add(kobj, parent, name) rpl_kobject_add(kobj, parent, name)
+static inline int rpl_kobject_add(struct kobject *kobj,
+ struct kobject *parent,
+ const char *name)
+{
+ int err = kobject_set_name(kobj, "%s", name);
+ if (err)
+ return err;
+ kobj->parent = parent;
+ return (kobject_add)(kobj);
+}
+#endif
+
+
+#endif /* linux/kobject.h wrapper */
--- /dev/null
+/*
+ * Runtime locking correctness validator
+ *
+ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *
+ * see Documentation/lockdep-design.txt for more details.
+ */
+#ifndef __LINUX_LOCKDEP_WRAPPER_H
+#define __LINUX_LOCKDEP_WRAPPER_H
+
+#include_next <linux/lockdep.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+
+struct task_struct;
+struct lockdep_map;
+
+#ifdef CONFIG_LOCKDEP
+
+#include <linux/linkage.h>
+#include <linux/list.h>
+#include <linux/debug_locks.h>
+#include <linux/stacktrace.h>
+
+/*
+ * Lock-class usage-state bits:
+ */
+enum lock_usage_bit
+{
+ LOCK_USED = 0,
+ LOCK_USED_IN_HARDIRQ,
+ LOCK_USED_IN_SOFTIRQ,
+ LOCK_ENABLED_SOFTIRQS,
+ LOCK_ENABLED_HARDIRQS,
+ LOCK_USED_IN_HARDIRQ_READ,
+ LOCK_USED_IN_SOFTIRQ_READ,
+ LOCK_ENABLED_SOFTIRQS_READ,
+ LOCK_ENABLED_HARDIRQS_READ,
+ LOCK_USAGE_STATES
+};
+
+/*
+ * Usage-state bitmasks:
+ */
+#define LOCKF_USED (1 << LOCK_USED)
+#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
+#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
+#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
+#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
+
+#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
+#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
+
+#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
+#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
+#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
+#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
+
+#define LOCKF_ENABLED_IRQS_READ \
+ (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
+#define LOCKF_USED_IN_IRQ_READ \
+ (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
+
+#define MAX_LOCKDEP_SUBCLASSES 8UL
+
+/*
+ * Lock-classes are keyed via unique addresses, by embedding the
+ * lockclass-key into the kernel (or module) .data section. (For
+ * static locks we use the lock address itself as the key.)
+ */
+struct lockdep_subclass_key {
+ char __one_byte;
+} __attribute__ ((__packed__));
+
+struct lock_class_key {
+ struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
+};
+
+/*
+ * The lock-class itself:
+ */
+struct lock_class {
+ /*
+ * class-hash:
+ */
+ struct list_head hash_entry;
+
+ /*
+ * global list of all lock-classes:
+ */
+ struct list_head lock_entry;
+
+ struct lockdep_subclass_key *key;
+ unsigned int subclass;
+
+ /*
+ * IRQ/softirq usage tracking bits:
+ */
+ unsigned long usage_mask;
+ struct stack_trace usage_traces[LOCK_USAGE_STATES];
+
+ /*
+ * These fields represent a directed graph of lock dependencies,
+ * to every node we attach a list of "forward" and a list of
+ * "backward" graph nodes.
+ */
+ struct list_head locks_after, locks_before;
+
+ /*
+ * Generation counter, when doing certain classes of graph walking,
+ * to ensure that we check one node only once:
+ */
+ unsigned int version;
+
+ /*
+ * Statistics counter:
+ */
+ unsigned long ops;
+
+ const char *name;
+ int name_version;
+
+#ifdef CONFIG_LOCK_STAT
+ unsigned long contention_point[4];
+#endif
+};
+
+#ifdef CONFIG_LOCK_STAT
+struct lock_time {
+ s64 min;
+ s64 max;
+ s64 total;
+ unsigned long nr;
+};
+
+enum bounce_type {
+ bounce_acquired_write,
+ bounce_acquired_read,
+ bounce_contended_write,
+ bounce_contended_read,
+ nr_bounce_types,
+
+ bounce_acquired = bounce_acquired_write,
+ bounce_contended = bounce_contended_write,
+};
+
+struct lock_class_stats {
+ unsigned long contention_point[4];
+ struct lock_time read_waittime;
+ struct lock_time write_waittime;
+ struct lock_time read_holdtime;
+ struct lock_time write_holdtime;
+ unsigned long bounces[nr_bounce_types];
+};
+
+struct lock_class_stats lock_stats(struct lock_class *class);
+void clear_lock_stats(struct lock_class *class);
+#endif
+
+/*
+ * Map the lock object (the lock instance) to the lock-class object.
+ * This is embedded into specific lock instances:
+ */
+struct lockdep_map {
+ struct lock_class_key *key;
+ struct lock_class *class_cache;
+ const char *name;
+#ifdef CONFIG_LOCK_STAT
+ int cpu;
+#endif
+};
+
+/*
+ * Every lock has a list of other locks that were taken after it.
+ * We only grow the list, never remove from it:
+ */
+struct lock_list {
+ struct list_head entry;
+ struct lock_class *class;
+ struct stack_trace trace;
+ int distance;
+};
+
+/*
+ * We record lock dependency chains, so that we can cache them:
+ */
+struct lock_chain {
+ struct list_head entry;
+ u64 chain_key;
+};
+
+struct held_lock {
+ /*
+ * One-way hash of the dependency chain up to this point. We
+ * hash the hashes step by step as the dependency chain grows.
+ *
+ * We use it for dependency-caching and we skip detection
+ * passes and dependency-updates if there is a cache-hit, so
+ * it is absolutely critical for 100% coverage of the validator
+ * to have a unique key value for every unique dependency path
+ * that can occur in the system, to make a unique hash value
+ * as likely as possible - hence the 64-bit width.
+ *
+ * The task struct holds the current hash value (initialized
+ * with zero), here we store the previous hash value:
+ */
+ u64 prev_chain_key;
+ struct lock_class *class;
+ unsigned long acquire_ip;
+ struct lockdep_map *instance;
+
+#ifdef CONFIG_LOCK_STAT
+ u64 waittime_stamp;
+ u64 holdtime_stamp;
+#endif
+ /*
+ * The lock-stack is unified in that the lock chains of interrupt
+ * contexts nest ontop of process context chains, but we 'separate'
+ * the hashes by starting with 0 if we cross into an interrupt
+ * context, and we also keep do not add cross-context lock
+ * dependencies - the lock usage graph walking covers that area
+ * anyway, and we'd just unnecessarily increase the number of
+ * dependencies otherwise. [Note: hardirq and softirq contexts
+ * are separated from each other too.]
+ *
+ * The following field is used to detect when we cross into an
+ * interrupt context:
+ */
+ int irq_context;
+ int trylock;
+ int read;
+ int check;
+ int hardirqs_off;
+};
+
+/*
+ * Initialization, self-test and debugging-output methods:
+ */
+extern void lockdep_init(void);
+extern void lockdep_info(void);
+extern void lockdep_reset(void);
+extern void lockdep_reset_lock(struct lockdep_map *lock);
+extern void lockdep_free_key_range(void *start, unsigned long size);
+
+extern void lockdep_off(void);
+extern void lockdep_on(void);
+
+/*
+ * These methods are used by specific locking variants (spinlocks,
+ * rwlocks, mutexes and rwsems) to pass init/acquire/release events
+ * to lockdep:
+ */
+
+extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass);
+
+/*
+ * Reinitialize a lock key - for cases where there is special locking or
+ * special initialization of locks so that the validator gets the scope
+ * of dependencies wrong: they are either too broad (they need a class-split)
+ * or they are too narrow (they suffer from a false class-split):
+ */
+#define lockdep_set_class(lock, key) \
+ lockdep_init_map(&(lock)->dep_map, #key, key, 0)
+#define lockdep_set_class_and_name(lock, key, name) \
+ lockdep_init_map(&(lock)->dep_map, name, key, 0)
+#define lockdep_set_class_and_subclass(lock, key, sub) \
+ lockdep_init_map(&(lock)->dep_map, #key, key, sub)
+#define lockdep_set_subclass(lock, sub) \
+ lockdep_init_map(&(lock)->dep_map, #lock, \
+ (lock)->dep_map.key, sub)
+
+/*
+ * Acquire a lock.
+ *
+ * Values for "read":
+ *
+ * 0: exclusive (write) acquire
+ * 1: read-acquire (no recursion allowed)
+ * 2: read-acquire with same-instance recursion allowed
+ *
+ * Values for check:
+ *
+ * 0: disabled
+ * 1: simple checks (freeing, held-at-exit-time, etc.)
+ * 2: full validation
+ */
+extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+ int trylock, int read, int check, unsigned long ip);
+
+extern void lock_release(struct lockdep_map *lock, int nested,
+ unsigned long ip);
+
+# define INIT_LOCKDEP .lockdep_recursion = 0,
+
+#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
+
+#else /* !LOCKDEP */
+
+static inline void lockdep_off(void)
+{
+}
+
+static inline void lockdep_on(void)
+{
+}
+
+# define lock_acquire(l, s, t, r, c, i) do { } while (0)
+# define lock_release(l, n, i) do { } while (0)
+# define lockdep_init() do { } while (0)
+# define lockdep_info() do { } while (0)
+# define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
+# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
+# define lockdep_set_class_and_name(lock, key, name) \
+ do { (void)(key); } while (0)
+#define lockdep_set_class_and_subclass(lock, key, sub) \
+ do { (void)(key); } while (0)
+#define lockdep_set_subclass(lock, sub) do { } while (0)
+
+# define INIT_LOCKDEP
+# define lockdep_reset() do { debug_locks = 1; } while (0)
+# define lockdep_free_key_range(start, size) do { } while (0)
+/*
+ * The class key takes no space if lockdep is disabled:
+ */
+struct lock_class_key { };
+
+#define lockdep_depth(tsk) (0)
+
+#endif /* !LOCKDEP */
+
+#ifdef CONFIG_LOCK_STAT
+
+extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
+extern void lock_acquired(struct lockdep_map *lock);
+
+#define LOCK_CONTENDED(_lock, try, lock) \
+do { \
+ if (!try(_lock)) { \
+ lock_contended(&(_lock)->dep_map, _RET_IP_); \
+ lock(_lock); \
+ } \
+ lock_acquired(&(_lock)->dep_map); \
+} while (0)
+
+#else /* CONFIG_LOCK_STAT */
+
+#define lock_contended(lockdep_map, ip) do {} while (0)
+#define lock_acquired(lockdep_map) do {} while (0)
+
+#define LOCK_CONTENDED(_lock, try, lock) \
+ lock(_lock)
+
+#endif /* CONFIG_LOCK_STAT */
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
+extern void early_init_irq_lock_class(void);
+#else
+static inline void early_init_irq_lock_class(void)
+{
+}
+#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+extern void early_boot_irqs_off(void);
+extern void early_boot_irqs_on(void);
+extern void print_irqtrace_events(struct task_struct *curr);
+#else
+static inline void early_boot_irqs_off(void)
+{
+}
+static inline void early_boot_irqs_on(void)
+{
+}
+static inline void print_irqtrace_events(struct task_struct *curr)
+{
+}
+#endif
+
+/*
+ * For trivial one-depth nesting of a lock-class, the following
+ * global define can be used. (Subsystems with multiple levels
+ * of nesting should define their own lock-nesting subclasses.)
+ */
+#define SINGLE_DEPTH_NESTING 1
+
+/*
+ * Map the dependency ops to NOP or to real lockdep ops, depending
+ * on the per lock-class debug mode:
+ */
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# else
+# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# endif
+# define spin_release(l, n, i) lock_release(l, n, i)
+#else
+# define spin_acquire(l, s, t, i) do { } while (0)
+# define spin_release(l, n, i) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
+# else
+# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
+# endif
+# define rwlock_release(l, n, i) lock_release(l, n, i)
+#else
+# define rwlock_acquire(l, s, t, i) do { } while (0)
+# define rwlock_acquire_read(l, s, t, i) do { } while (0)
+# define rwlock_release(l, n, i) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# else
+# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# endif
+# define mutex_release(l, n, i) lock_release(l, n, i)
+#else
+# define mutex_acquire(l, s, t, i) do { } while (0)
+# define mutex_release(l, n, i) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
+# else
+# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
+# endif
+# define rwsem_release(l, n, i) lock_release(l, n, i)
+#else
+# define rwsem_acquire(l, s, t, i) do { } while (0)
+# define rwsem_acquire_read(l, s, t, i) do { } while (0)
+# define rwsem_release(l, n, i) do { } while (0)
+#endif
+
+#endif /* linux kernel < 2.6.18 */
+
+#endif /* __LINUX_LOCKDEP_WRAPPER_H */
--- /dev/null
+#ifndef __LINUX_LOG2_WRAPPER
+#define __LINUX_LOG2_WRAPPER
+
+#ifdef HAVE_LOG2_H
+#include_next <linux/log2.h>
+#else
+/* This is very stripped down because log2.h has far too many dependencies. */
+
+extern __attribute__((const, noreturn))
+int ____ilog2_NaN(void);
+
+#define ilog2(n) ((n) == 4 ? 2 : \
+ (n) == 8 ? 3 : \
+ ____ilog2_NaN())
+#endif
+
+#endif
--- /dev/null
+#ifndef __LINUX_MUTEX_WRAPPER_H
+#define __LINUX_MUTEX_WRAPPER_H
+
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
+
+#include <asm/semaphore.h>
+
+struct mutex {
+ struct semaphore sema;
+};
+
+#define mutex_init(mutex) init_MUTEX(&(mutex)->sema)
+#define mutex_destroy(mutex) do { } while (0)
+
+#define __MUTEX_INITIALIZER(name) \
+ __SEMAPHORE_INITIALIZER(name,1)
+
+#define DEFINE_MUTEX(mutexname) \
+ struct mutex mutexname = { __MUTEX_INITIALIZER(mutexname.sema) }
+
+/*
+ * See kernel/mutex.c for detailed documentation of these APIs.
+ * Also see Documentation/mutex-design.txt.
+ */
+static inline void mutex_lock(struct mutex *lock)
+{
+ down(&lock->sema);
+}
+
+static inline int mutex_lock_interruptible(struct mutex *lock)
+{
+ return down_interruptible(&lock->sema);
+}
+
+#define mutex_lock_nested(lock, subclass) mutex_lock(lock)
+#define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
+
+/*
+ * NOTE: mutex_trylock() follows the spin_trylock() convention,
+ * not the down_trylock() convention!
+ */
+static inline int mutex_trylock(struct mutex *lock)
+{
+ return !down_trylock(&lock->sema);
+}
+
+static inline void mutex_unlock(struct mutex *lock)
+{
+ up(&lock->sema);
+}
+#else
+
+#include_next <linux/mutex.h>
+
+#endif /* linux version < 2.6.16 */
+
+#endif
--- /dev/null
+#ifndef __LINUX_NETDEVICE_WRAPPER_H
+#define __LINUX_NETDEVICE_WRAPPER_H 1
+
+#include_next <linux/netdevice.h>
+
+struct net;
+
+#include <linux/version.h>
+/* Before 2.6.21, struct net_device has a "struct class_device" member named
+ * class_dev. Beginning with 2.6.21, struct net_device instead has a "struct
+ * device" member named dev. Otherwise the usage of these members is pretty
+ * much the same. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
+#define NETDEV_DEV_MEMBER class_dev
+#else
+#define NETDEV_DEV_MEMBER dev
+#endif
+
+#ifndef to_net_dev
+#define to_net_dev(class) container_of(class, struct net_device, NETDEV_DEV_MEMBER)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
+static inline
+struct net *dev_net(const struct net_device *dev)
+{
+#ifdef CONFIG_NET_NS
+ return dev->nd_net;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
+ return &init_net;
+#else
+ return NULL;
+#endif
+}
+
+static inline
+void dev_net_set(struct net_device *dev, const struct net *net)
+{
+#ifdef CONFIG_NET_NS
+ dev->nd_dev = net;
+#endif
+}
+#endif /* linux kernel < 2.6.26 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+#define NETIF_F_NETNS_LOCAL 0
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+#define proc_net init_net.proc_net
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
+typedef int netdev_tx_t;
+#endif
+
+#ifndef for_each_netdev
+/* Linux before 2.6.22 didn't have for_each_netdev at all. */
+#define for_each_netdev(net, d) for (d = dev_base; d; d = d->next)
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+/* Linux 2.6.24 added a network namespace pointer to the macro. */
+#undef for_each_netdev
+#define for_each_netdev(net,d) list_for_each_entry(d, &dev_base_head, dev_list)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
+extern void unregister_netdevice_queue(struct net_device *dev,
+ struct list_head *head);
+extern void unregister_netdevice_many(struct list_head *head);
+#endif
+
+#ifndef HAVE_DEV_DISABLE_LRO
+extern void dev_disable_lro(struct net_device *dev);
+#endif
+
+/* Linux 2.6.28 introduced dev_get_stats():
+ * const struct net_device_stats *dev_get_stats(struct net_device *dev);
+ *
+ * Linux 2.6.36 changed dev_get_stats() to:
+ * struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+ * struct rtnl_link_stats64 *storage);
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+#define dev_get_stats(dev, storage) rpl_dev_get_stats(dev, storage)
+struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *storage);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+#define skb_checksum_help(skb) skb_checksum_help((skb), 0)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+static inline int netdev_rx_handler_register(struct net_device *dev,
+ void *rx_handler,
+ void *rx_handler_data)
+{
+ if (dev->br_port)
+ return -EBUSY;
+ rcu_assign_pointer(dev->br_port, rx_handler_data);
+ return 0;
+}
+static inline void netdev_rx_handler_unregister(struct net_device *dev)
+{
+ rcu_assign_pointer(dev->br_port, NULL);
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+#undef SET_ETHTOOL_OPS
+#define SET_ETHTOOL_OPS(netdev, ops) \
+ ( (netdev)->ethtool_ops = (struct ethtool_ops *)(ops) )
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+#define dev_get_by_name(net, name) dev_get_by_name(name)
+#define dev_get_by_index(net, ifindex) dev_get_by_index(ifindex)
+#define __dev_get_by_name(net, name) __dev_get_by_name(name)
+#define __dev_get_by_index(net, ifindex) __dev_get_by_index(ifindex)
+#define dev_get_by_index_rcu(net, ifindex) dev_get_by_index_rcu(ifindex)
+#endif
+
+#ifndef HAVE_DEV_GET_BY_INDEX_RCU
+static inline struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
+{
+ struct net_device *dev;
+
+ read_lock(&dev_base_lock);
+ dev = __dev_get_by_index(net, ifindex);
+ read_unlock(&dev_base_lock);
+
+ return dev;
+}
+#endif
+
+#ifndef NETIF_F_FSO
+#define NETIF_F_FSO 0
+#endif
+
+#endif
--- /dev/null
+#ifndef __LINUX_NETFILTER_BRIDGE_WRAPPER_H
+#define __LINUX_NETFILTER_BRIDGE_WRAPPER_H
+
+#include_next <linux/netfilter_bridge.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
+
+#include <linux/if_vlan.h>
+#include <linux/if_pppox.h>
+
+static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
+{
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_8021Q):
+ return VLAN_HLEN;
+ default:
+ return 0;
+ }
+}
+
+#endif /* linux version < 2.6.22 */
+
+#endif
--- /dev/null
+#ifndef __LINUX_NETFILTER_IPV4_WRAPPER_H
+#define __LINUX_NETFILTER_IPV4_WRAPPER_H 1
+
+#include_next <linux/netfilter_ipv4.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+
+#ifdef __KERNEL__
+
+#define NF_INET_PRE_ROUTING NF_IP_PRE_ROUTING
+#define NF_INET_POST_ROUTING NF_IP_POST_ROUTING
+#define NF_INET_FORWARD NF_IP_FORWARD
+
+#endif /* __KERNEL__ */
+
+#endif /* linux kernel < 2.6.25 */
+
+#endif
--- /dev/null
+#ifndef __LINUX_NETLINK_WRAPPER_H
+#define __LINUX_NETLINK_WRAPPER_H 1
+
+#include <linux/skbuff.h>
+#include_next <linux/netlink.h>
+
+#ifndef NLA_TYPE_MASK
+#define NLA_F_NESTED (1 << 15)
+#define NLA_F_NET_BYTEORDER (1 << 14)
+#define NLA_TYPE_MASK ~(NLA_F_NESTED | NLA_F_NET_BYTEORDER)
+#endif
+
+#include <net/netlink.h>
+#include <linux/version.h>
+
+#ifndef NLMSG_DEFAULT_SIZE
+#define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+#define nlmsg_new(s, f) nlmsg_new_proper((s), (f))
+static inline struct sk_buff *nlmsg_new_proper(int size, gfp_t flags)
+{
+ return alloc_skb(size, flags);
+}
+#endif /* linux kernel < 2.6.19 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
+static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
+{
+ return (struct nlmsghdr *)skb->data;
+}
+#endif
+
+#endif
--- /dev/null
+#ifndef __LINUX_RCULIST_WRAPPER_H
+#define __LINUX_RCULIST_WRAPPER_H
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include_next <linux/rculist.h>
+#else
+/* Prior to 2.6.26, the contents of rculist.h were part of list.h. */
+#include <linux/list.h>
+#endif
+
+#endif
--- /dev/null
+#ifndef __RCUPDATE_WRAPPER_H
+#define __RCUPDATE_WRAPPER_H 1
+
+#include_next <linux/rcupdate.h>
+
+#ifndef rcu_dereference_check
+#define rcu_dereference_check(p, c) rcu_dereference(p)
+#endif
+
+#ifndef rcu_dereference_protected
+#define rcu_dereference_protected(p, c) (p)
+#endif
+
+#ifndef HAVE_RCU_READ_LOCK_HELD
+static inline int rcu_read_lock_held(void)
+{
+ return 1;
+}
+#endif
+
+#endif /* linux/rcupdate.h wrapper */
--- /dev/null
+#ifndef __RTNETLINK_WRAPPER_H
+#define __RTNETLINK_WRAPPER_H 1
+
+#include_next <linux/rtnetlink.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+static inline void rtnl_notify(struct sk_buff *skb, u32 pid, u32 group,
+ struct nlmsghdr *nlh, gfp_t flags)
+{
+ BUG_ON(nlh != NULL); /* not implemented */
+ if (group) {
+ /* errors reported via destination sk->sk_err */
+ nlmsg_multicast(rtnl, skb, 0, group, flags);
+ }
+}
+
+static inline void rtnl_set_sk_err(u32 group, int error)
+{
+ netlink_set_err(rtnl, 0, group, error);
+}
+#endif
+
+/* No 'net' parameter in these versions. */
+#define rtnl_notify(skb, net, pid, group, nlh, flags) \
+ ((void) rtnl_notify(skb, pid, group, nlh, flags))
+#define rtnl_set_sk_err(net, group, error) \
+ (rtnl_set_sk_err(group, error))
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
+/* Make the return type effectively 'void' to match Linux 2.6.30+. */
+#define rtnl_notify(skb, net, pid, group, nlh, flags) \
+ ((void) rtnl_notify(skb, net, pid, group, nlh, flags))
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
+static inline int rtnl_is_locked(void)
+{
+ if (unlikely(rtnl_trylock())) {
+ rtnl_unlock();
+ return 0;
+ }
+
+ return 1;
+}
+
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
+#ifdef CONFIG_PROVE_LOCKING
+static inline int lockdep_rtnl_is_held(void)
+{
+ return 1;
+}
+#endif
+#endif
+
+#ifndef rcu_dereference_rtnl
+/**
+ * rcu_dereference_rtnl - rcu_dereference with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
+ * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference()
+ */
+#define rcu_dereference_rtnl(p) \
+ rcu_dereference_check(p, rcu_read_lock_held() || \
+ lockdep_rtnl_is_held())
+#endif
+
+#ifndef rtnl_dereference
+/**
+ * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Return the value of the specified RCU-protected pointer, but omit
+ * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * caller holds RTNL.
+ */
+#define rtnl_dereference(p) \
+ rcu_dereference_protected(p, lockdep_rtnl_is_held())
+#endif
+
+#endif /* linux/rtnetlink.h wrapper */
--- /dev/null
+#ifndef __LINUX_SKBUFF_WRAPPER_H
+#define __LINUX_SKBUFF_WRAPPER_H 1
+
+#include_next <linux/skbuff.h>
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+/* In version 2.6.24 the return type of skb_headroom() changed from 'int' to
+ * 'unsigned int'. We use skb_headroom() as one arm of a min(a,b) invocation
+ * in make_writable() in actions.c, so we need the correct type. */
+#define skb_headroom rpl_skb_headroom
+static inline unsigned int rpl_skb_headroom(const struct sk_buff *skb)
+{
+ return skb->data - skb->head;
+}
+#endif
+
+#ifndef HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET
+static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
+ const int offset, void *to,
+ const unsigned int len)
+{
+ memcpy(to, skb->data + offset, len);
+}
+
+static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
+ const int offset,
+ const void *from,
+ const unsigned int len)
+{
+ memcpy(skb->data + offset, from, len);
+}
+
+#endif /* !HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET */
+
+/*
+ * The networking layer reserves some headroom in skb data (via
+ * dev_alloc_skb). This is used to avoid having to reallocate skb data when
+ * the header has to grow. In the default case, if the header has to grow
+ * 16 bytes or less we avoid the reallocation.
+ *
+ * Unfortunately this headroom changes the DMA alignment of the resulting
+ * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
+ * on some architectures. An architecture can override this value,
+ * perhaps setting it to a cacheline in size (since that will maintain
+ * cacheline alignment of the DMA). It must be a power of 2.
+ *
+ * Various parts of the networking layer expect at least 16 bytes of
+ * headroom, you should not reduce this.
+ */
+#ifndef NET_SKB_PAD
+#define NET_SKB_PAD 16
+#endif
+
+#ifndef HAVE_SKB_COW_HEAD
+static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
+ int cloned)
+{
+ int delta = 0;
+
+ if (headroom < NET_SKB_PAD)
+ headroom = NET_SKB_PAD;
+ if (headroom > skb_headroom(skb))
+ delta = headroom - skb_headroom(skb);
+
+ if (delta || cloned)
+ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
+ GFP_ATOMIC);
+ return 0;
+}
+
+static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
+{
+ return __skb_cow(skb, headroom, skb_header_cloned(skb));
+}
+#endif /* !HAVE_SKB_COW_HEAD */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+static inline int skb_clone_writable(struct sk_buff *skb, int len)
+{
+ return false;
+}
+#endif
+
+#ifndef HAVE_SKB_DST_ACCESSOR_FUNCS
+static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
+{
+ return (struct dst_entry *)skb->dst;
+}
+
+static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
+{
+ skb->dst = dst;
+}
+
+static inline struct rtable *skb_rtable(const struct sk_buff *skb)
+{
+ return (struct rtable *)skb->dst;
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+/* Emulate Linux 2.6.17 and later behavior, in which kfree_skb silently ignores
+ * null pointer arguments. */
+#define kfree_skb(skb) kfree_skb_maybe_null(skb)
+static inline void kfree_skb_maybe_null(struct sk_buff *skb)
+{
+ if (likely(skb != NULL))
+ (kfree_skb)(skb);
+}
+#endif
+
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#endif
+#ifndef CHECKSUM_COMPLETE
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifdef HAVE_MAC_RAW
+#define mac_header mac.raw
+#define network_header nh.raw
+#define transport_header h.raw
+#endif
+
+#ifndef HAVE_SKBUFF_HEADER_HELPERS
+static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
+{
+ return skb->h.raw;
+}
+
+static inline void skb_reset_transport_header(struct sk_buff *skb)
+{
+ skb->h.raw = skb->data;
+}
+
+static inline void skb_set_transport_header(struct sk_buff *skb,
+ const int offset)
+{
+ skb->h.raw = skb->data + offset;
+}
+
+static inline unsigned char *skb_network_header(const struct sk_buff *skb)
+{
+ return skb->nh.raw;
+}
+
+static inline void skb_reset_network_header(struct sk_buff *skb)
+{
+ skb->nh.raw = skb->data;
+}
+
+static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
+{
+ skb->nh.raw = skb->data + offset;
+}
+
+static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
+{
+ return skb->mac.raw;
+}
+
+static inline void skb_reset_mac_header(struct sk_buff *skb)
+{
+ skb->mac_header = skb->data;
+}
+
+static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
+{
+ skb->mac.raw = skb->data + offset;
+}
+
+static inline int skb_transport_offset(const struct sk_buff *skb)
+{
+ return skb_transport_header(skb) - skb->data;
+}
+
+static inline int skb_network_offset(const struct sk_buff *skb)
+{
+ return skb_network_header(skb) - skb->data;
+}
+
+static inline void skb_copy_to_linear_data(struct sk_buff *skb,
+ const void *from,
+ const unsigned int len)
+{
+ memcpy(skb->data, from, len);
+}
+#endif /* !HAVE_SKBUFF_HEADER_HELPERS */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+#warning "TSO/UFO not supported on kernels earlier than 2.6.18"
+
+static inline int skb_is_gso(const struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+ int features)
+{
+ return NULL;
+}
+#endif /* before 2.6.18 */
+
+#ifndef HAVE_SKB_WARN_LRO
+#ifndef NETIF_F_LRO
+static inline bool skb_warn_if_lro(const struct sk_buff *skb)
+{
+ return false;
+}
+#else
+extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
+
+static inline bool skb_warn_if_lro(const struct sk_buff *skb)
+{
+ /* LRO sets gso_size but not gso_type, whereas if GSO is really
+ * wanted then gso_type will be set. */
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
+ __skb_warn_lro_forwarding(skb);
+ return true;
+ }
+ return false;
+}
+#endif /* NETIF_F_LRO */
+#endif /* HAVE_SKB_WARN_LRO */
+
+#ifndef HAVE_CONSUME_SKB
+#define consume_skb kfree_skb
+#endif
+
+#endif
--- /dev/null
+#ifndef __LINUX_SLAB_WRAPPER_H
+#define __LINUX_SLAB_WRAPPER_H 1
+
+#include_next <linux/slab.h>
+
+#ifndef HAVE_KMEMDUP
+extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+#define kmem_cache_create(n, s, a, f, c) kmem_cache_create(n, s, a, f, c, NULL)
+#endif
+
+#endif
--- /dev/null
+#ifndef __LINUX_STDDEF_WRAPPER_H
+#define __LINUX_STDDEF_WRAPPER_H 1
+
+#include_next <linux/stddef.h>
+
+#ifdef __KERNEL__
+
+#ifndef HAVE_BOOL_TYPE
+enum {
+ false = 0,
+ true = 1
+};
+#endif /* !HAVE_BOOL_TYPE */
+
+#endif /* __KERNEL__ */
+
+#endif
--- /dev/null
+#ifndef __LINUX_TCP_WRAPPER_H
+#define __LINUX_TCP_WRAPPER_H 1
+
+#include_next <linux/tcp.h>
+
+#ifndef HAVE_SKBUFF_HEADER_HELPERS
+static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
+{
+ return (struct tcphdr *)skb_transport_header(skb);
+}
+
+static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
+{
+ return tcp_hdr(skb)->doff * 4;
+}
+#endif /* !HAVE_SKBUFF_HEADER_HELPERS */
+
+#endif
--- /dev/null
+#ifndef __LINUX_TIMER_WRAPPER_H
+#define __LINUX_TIMER_WRAPPER_H 1
+
+#include_next <linux/timer.h>
+
+#include <linux/version.h>
+
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(X,Y) ( 0 )
+#endif
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) && \
+ (!defined(RHEL_RELEASE_CODE) || \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,1))))
+
+extern unsigned long volatile jiffies;
+
+/**
+ * __round_jiffies - function to round jiffies to a full second
+ * @j: the time in (absolute) jiffies that should be rounded
+ * @cpu: the processor number on which the timeout will happen
+ *
+ * __round_jiffies() rounds an absolute time in the future (in jiffies)
+ * up or down to (approximately) full seconds. This is useful for timers
+ * for which the exact time they fire does not matter too much, as long as
+ * they fire approximately every X seconds.
+ *
+ * By rounding these timers to whole seconds, all such timers will fire
+ * at the same time, rather than at various times spread out. The goal
+ * of this is to have the CPU wake up less, which saves power.
+ *
+ * The exact rounding is skewed for each processor to avoid all
+ * processors firing at the exact same time, which could lead
+ * to lock contention or spurious cache line bouncing.
+ *
+ * The return value is the rounded version of the @j parameter.
+ */
+static inline unsigned long __round_jiffies(unsigned long j, int cpu)
+{
+ int rem;
+ unsigned long original = j;
+
+ /*
+ * We don't want all cpus firing their timers at once hitting the
+ * same lock or cachelines, so we skew each extra cpu with an extra
+ * 3 jiffies. This 3 jiffies came originally from the mm/ code which
+ * already did this.
+ * The skew is done by adding 3*cpunr, then round, then subtract this
+ * extra offset again.
+ */
+ j += cpu * 3;
+
+ rem = j % HZ;
+
+ /*
+ * If the target jiffie is just after a whole second (which can happen
+ * due to delays of the timer irq, long irq off times etc etc) then
+ * we should round down to the whole second, not up. Use 1/4th second
+ * as cutoff for this rounding as an extreme upper bound for this.
+ */
+ if (rem < HZ/4) /* round down */
+ j = j - rem;
+ else /* round up */
+ j = j - rem + HZ;
+
+ /* now that we have rounded, subtract the extra skew again */
+ j -= cpu * 3;
+
+ if (j <= jiffies) /* rounding ate our timeout entirely; */
+ return original;
+ return j;
+}
+
+
+/**
+ * round_jiffies - function to round jiffies to a full second
+ * @j: the time in (absolute) jiffies that should be rounded
+ *
+ * round_jiffies() rounds an absolute time in the future (in jiffies)
+ * up or down to (approximately) full seconds. This is useful for timers
+ * for which the exact time they fire does not matter too much, as long as
+ * they fire approximately every X seconds.
+ *
+ * By rounding these timers to whole seconds, all such timers will fire
+ * at the same time, rather than at various times spread out. The goal
+ * of this is to have the CPU wake up less, which saves power.
+ *
+ * The return value is the rounded version of the @j parameter.
+ */
+static inline unsigned long round_jiffies(unsigned long j)
+{
+ return __round_jiffies(j, 0); // FIXME
+}
+
+#endif /* linux kernel < 2.6.20 */
+
+#endif
--- /dev/null
+#ifndef __LINUX_TYPES_WRAPPER_H
+#define __LINUX_TYPES_WRAPPER_H 1
+
+#include_next <linux/types.h>
+
+#ifndef HAVE_CSUM_TYPES
+typedef __u16 __bitwise __sum16;
+typedef __u32 __bitwise __wsum;
+#endif
+
+#ifndef HAVE_BOOL_TYPE
+typedef _Bool bool;
+#endif /* !HAVE_BOOL_TYPE */
+
+#endif
--- /dev/null
+#ifndef __LINUX_UDP_WRAPPER_H
+#define __LINUX_UDP_WRAPPER_H 1
+
+#include_next <linux/udp.h>
+
+#ifndef HAVE_SKBUFF_HEADER_HELPERS
+static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
+{
+ return (struct udphdr *)skb_transport_header(skb);
+}
+#endif /* HAVE_SKBUFF_HEADER_HELPERS */
+
+#endif
--- /dev/null
+#ifndef __LINUX_WORKQUEUE_WRAPPER_H
+#define __LINUX_WORKQUEUE_WRAPPER_H 1
+
+#include_next <linux/workqueue.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+
+/* Older kernels have an implementation of work queues with some very bad
+ * characteristics when trying to cancel work (potential deadlocks, use after
+ * free, etc. Here we directly use timers instead for delayed work. It's not
+ * optimal but it is better than the alternative. Note that work queues
+ * normally run in process context but this will cause them to operate in
+ * softirq context.
+ */
+
+#include <linux/timer.h>
+
+#undef DECLARE_DELAYED_WORK
+#define DECLARE_DELAYED_WORK(n, f) \
+ struct timer_list n = TIMER_INITIALIZER((void (*)(unsigned long))f, 0, 0)
+
+#define schedule_delayed_work rpl_schedule_delayed_work
+static inline int schedule_delayed_work(struct timer_list *timer, unsigned long delay)
+{
+ if (timer_pending(timer))
+ return 0;
+
+ mod_timer(timer, jiffies + delay);
+ return 1;
+}
+
+#define cancel_delayed_work_sync rpl_cancel_delayed_work_sync
+static inline int cancel_delayed_work_sync(struct timer_list *timer)
+{
+ return del_timer_sync(timer);
+}
+
+#endif /* kernel version < 2.6.23 */
+
+#endif
--- /dev/null
+#ifndef __NET_CHECKSUM_WRAPPER_H
+#define __NET_CHECKSUM_WRAPPER_H 1
+
+#include_next <net/checksum.h>
+
+#ifndef HAVE_CSUM_UNFOLD
+static inline __wsum csum_unfold(__sum16 n)
+{
+ return (__force __wsum)n;
+}
+#endif /* !HAVE_CSUM_UNFOLD */
+
+/* Workaround for debugging included in certain versions of XenServer. It only
+ * applies to 32-bit x86.
+ */
+#if defined(HAVE_CSUM_COPY_DBG) && defined(CONFIG_X86_32)
+#define csum_and_copy_to_user(src, dst, len, sum, err_ptr) \
+ csum_and_copy_to_user(src, dst, len, sum, NULL, err_ptr)
+#endif
+
+#ifndef HAVE_CSUM_REPLACE4
+static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
+{
+ __be32 diff[] = { ~from, to };
+
+ *sum = csum_fold(csum_partial((char *)diff, sizeof(diff), ~csum_unfold(*sum)));
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+#define inet_proto_csum_replace2(sum, skb, from, to, pseudohdr) \
+ inet_proto_csum_replace4(sum, skb, (__force __be32)(from), \
+ (__force __be32)(to), pseudohdr)
+#endif
+
+#endif /* checksum.h */
--- /dev/null
+#ifndef __NET_DST_WRAPPER_H
+#define __NET_DST_WRAPPER_H 1
+
+#include_next <net/dst.h>
+
+#ifndef HAVE_SKB_DST_ACCESSOR_FUNCS
+
+static inline void skb_dst_drop(struct sk_buff *skb)
+{
+ if (skb->dst)
+ dst_release(skb_dst(skb));
+ skb->dst = NULL;
+}
+
+#endif
+
+#endif
--- /dev/null
+#ifndef __NET_GENERIC_NETLINK_WRAPPER_H
+#define __NET_GENERIC_NETLINK_WRAPPER_H 1
+
+#include <linux/version.h>
+#include <linux/netlink.h>
+#include <net/net_namespace.h>
+
+/* Very special super-nasty workaround here:
+ *
+ * Before 2.6.19, nlmsg_multicast() lacked a 'flags' parameter. We work
+ * around that in our <net/netlink.h> replacement, so that nlmsg_multicast
+ * is a macro that expands to rpl_nlmsg_multicast, which in turn has the
+ * 'flags' parameter.
+ *
+ * However, also before 2.6.19, <net/genetlink.h> contains an inline definition
+ * of genlmsg_multicast() that, of course, calls it without the 'flags'
+ * parameter. This causes a build failure.
+ *
+ * This works around the problem by temporarily renaming both nlmsg_multicast
+ * and genlmsg_multicast with a "busted_" prefix. (Nothing actually defines
+ * busted_nlmsg_multicast(), so if anything actually tries to call it, then
+ * we'll get a link error.)
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+#undef nlmsg_multicast
+#define nlmsg_multicast busted_nlmsg_multicast
+#define genlmsg_multicast busted_genlmsg_multicast
+extern int busted_nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
+ u32 pid, unsigned int group);
+#endif /* linux kernel < v2.6.19 */
+
+#include_next <net/genetlink.h>
+
+/* Drop the "busted_" prefix described above. */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+#undef nlmsg_multicast
+#undef genlmsg_multicast
+#define nlmsg_multicast rpl_nlmsg_multicast
+#endif /* linux kernel < v2.6.19 */
+
+#include <net/net_namespace.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+
+#include <linux/genetlink.h>
+
+/**
+ * struct genl_multicast_group - generic netlink multicast group
+ * @name: name of the multicast group, names are per-family
+ * @id: multicast group ID, assigned by the core, to use with
+ * genlmsg_multicast().
+ * @list: list entry for linking
+ * @family: pointer to family, need not be set before registering
+ */
+struct genl_multicast_group
+{
+ struct genl_family *family; /* private */
+ struct list_head list; /* private */
+ char name[GENL_NAMSIZ];
+ u32 id;
+};
+
+int genl_register_mc_group(struct genl_family *family,
+ struct genl_multicast_group *grp);
+#endif /* linux kernel < 2.6.23 */
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+/**
+ * genlmsg_msg_size - length of genetlink message not including padding
+ * @payload: length of message payload
+ */
+static inline int genlmsg_msg_size(int payload)
+{
+ return GENL_HDRLEN + payload;
+}
+
+/**
+ * genlmsg_total_size - length of genetlink message including padding
+ * @payload: length of message payload
+ */
+static inline int genlmsg_total_size(int payload)
+{
+ return NLMSG_ALIGN(genlmsg_msg_size(payload));
+}
+
+#define genlmsg_multicast(s, p, g, f) \
+ genlmsg_multicast_flags((s), (p), (g), (f))
+
+static inline int genlmsg_multicast_flags(struct sk_buff *skb, u32 pid,
+ unsigned int group, gfp_t flags)
+{
+ int err;
+
+ NETLINK_CB(skb).dst_group = group;
+
+ err = netlink_broadcast(genl_sock, skb, pid, group, flags);
+ if (err > 0)
+ err = 0;
+
+ return err;
+}
+#endif /* linux kernel < 2.6.19 */
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+
+#define genlmsg_put(skb, p, seq, fam, flg, c) \
+ genlmsg_put((skb), (p), (seq), (fam)->id, (fam)->hdrsize, \
+ (flg), (c), (fam)->version)
+
+/**
+ * genlmsg_put_reply - Add generic netlink header to a reply message
+ * @skb: socket buffer holding the message
+ * @info: receiver info
+ * @family: generic netlink family
+ * @flags: netlink message flags
+ * @cmd: generic netlink command
+ *
+ * Returns pointer to user specific header
+ */
+static inline void *genlmsg_put_reply(struct sk_buff *skb,
+ struct genl_info *info, struct genl_family *family,
+ int flags, u8 cmd)
+{
+ return genlmsg_put(skb, info->snd_pid, info->snd_seq, family,
+ flags, cmd);
+}
+
+/**
+ * genlmsg_reply - reply to a request
+ * @skb: netlink message to be sent back
+ * @info: receiver information
+ */
+static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
+{
+ return genlmsg_unicast(skb, info->snd_pid);
+}
+
+/**
+ * genlmsg_new - Allocate a new generic netlink message
+ * @payload: size of the message payload
+ * @flags: the type of memory to allocate.
+ */
+static inline struct sk_buff *genlmsg_new(size_t payload, gfp_t flags)
+{
+ return nlmsg_new(genlmsg_total_size(payload), flags);
+}
+#endif /* linux kernel < 2.6.20 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
+int genl_register_family_with_ops(struct genl_family *family,
+ struct genl_ops *ops, size_t n_ops);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+#define genl_notify(skb, net, pid, group, nlh, flags) \
+ genl_notify(skb, pid, group, nlh, flags)
+#endif
+extern void genl_notify(struct sk_buff *skb, struct net *net, u32 pid,
+ u32 group, struct nlmsghdr *nlh, gfp_t flags);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
+static inline struct net *genl_info_net(struct genl_info *info)
+{
+ return &init_net;
+}
+#endif
+
+#endif /* genetlink.h */
--- /dev/null
+#ifndef __NET_IP_WRAPPER_H
+#define __NET_IP_WRAPPER_H 1
+
+#include_next <net/ip.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+
+extern int __ip_local_out(struct sk_buff *skb);
+extern int ip_local_out(struct sk_buff *skb);
+
+#endif /* linux kernel < 2.6.25 */
+
+#endif
--- /dev/null
+#ifndef __NET_NET_NAMESPACE_WRAPPER_H
+#define __NET_NET_NAMESPACE_WRAPPER_H 1
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+/* <net/net_namespace.h> exists, go ahead and include it. */
+#include_next <net/net_namespace.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
+#define INIT_NET_GENL_SOCK init_net.genl_sock
+#else
+#define INIT_NET_GENL_SOCK genl_sock
+#endif
+
+#endif /* net/net_namespace.h wrapper */
--- /dev/null
+#ifndef __NET_NETLINK_WRAPPER_H
+#define __NET_NETLINK_WRAPPER_H 1
+
+#include <linux/version.h>
+#include_next <net/netlink.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
+/* Before v2.6.29, a NLA_NESTED attribute, if it was present, was not allowed
+ * to be empty. However, OVS depends on the ability to accept empty
+ * attributes. For example, a present but empty ODP_FLOW_ATTR_ACTIONS on
+ * ODP_FLOW_CMD_SET replaces the existing set of actions by an empty "drop"
+ * action, whereas a missing ODP_FLOW_ATTR_ACTIONS leaves the existing
+ * actions, if any, unchanged.
+ *
+ * NLA_NESTED is different from NLA_UNSPEC in only two ways:
+ *
+ * - If the size of the nested attributes is zero, no further size checks
+ * are performed.
+ *
+ * - If the size of the nested attributes is not zero and no length
+ * parameter is specified the minimum size of nested attributes is
+ * NLA_HDRLEN.
+ *
+ * nla_parse_nested() validates that there is at least enough space for
+ * NLA_HDRLEN, so neither of these conditions are important, and we might
+ * as well use NLA_UNSPEC with old kernels.
+ */
+#undef NLA_NESTED
+#define NLA_NESTED NLA_UNSPEC
+#endif
+
+#ifndef NLA_PUT_BE16
+#define NLA_PUT_BE16(skb, attrtype, value) \
+ NLA_PUT_TYPE(skb, __be16, attrtype, value)
+#endif /* !NLA_PUT_BE16 */
+
+#ifndef NLA_PUT_BE32
+#define NLA_PUT_BE32(skb, attrtype, value) \
+ NLA_PUT_TYPE(skb, __be32, attrtype, value)
+#endif /* !NLA_PUT_BE32 */
+
+#ifndef NLA_PUT_BE64
+#define NLA_PUT_BE64(skb, attrtype, value) \
+ NLA_PUT_TYPE(skb, __be64, attrtype, value)
+#endif /* !NLA_PUT_BE64 */
+
+#ifndef HAVE_NLA_GET_BE16
+/**
+ * nla_get_be16 - return payload of __be16 attribute
+ * @nla: __be16 netlink attribute
+ */
+static inline __be16 nla_get_be16(const struct nlattr *nla)
+{
+ return *(__be16 *) nla_data(nla);
+}
+#endif /* !HAVE_NLA_GET_BE16 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+/**
+ * nla_get_be32 - return payload of __be32 attribute
+ * @nla: __be32 netlink attribute
+ */
+static inline __be32 nla_get_be32(const struct nlattr *nla)
+{
+ return *(__be32 *) nla_data(nla);
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
+/* These functions' nlattr source arguments weren't "const" before 2.6.29, so
+ * cast their arguments to the non-"const" versions. Using macros for this
+ * isn't exactly a brilliant idea, but it seems less error-prone than copying
+ * the definitions of all umpteen functions. */
+#define nla_get_u64(nla) (nla_get_u64) ((struct nlattr *) (nla))
+#define nla_get_u32(nla) (nla_get_u32) ((struct nlattr *) (nla))
+#define nla_get_u16(nla) (nla_get_u16) ((struct nlattr *) (nla))
+#define nla_get_u8(nla) (nla_get_u8) ((struct nlattr *) (nla))
+/* nla_get_be64 is handled separately below. */
+#define nla_get_be32(nla) (nla_get_be32) ((struct nlattr *) (nla))
+#define nla_get_be16(nla) (nla_get_be16) ((struct nlattr *) (nla))
+#define nla_get_be8(nla) (nla_get_be8) ((struct nlattr *) (nla))
+#define nla_get_flag(nla) (nla_get_flag) ((struct nlattr *) (nla))
+#define nla_get_msecs(nla) (nla_get_msecs)((struct nlattr *) (nla))
+#define nla_memcpy(dst, src, count) \
+ (nla_memcpy)(dst, (struct nlattr *)(src), count)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
+/* This function was introduced in 2.6.31, but initially it performed an
+ * unaligned access, so we replace it up to 2.6.34 where it was fixed. */
+#define nla_get_be64 rpl_nla_get_be64
+static inline __be64 nla_get_be64(const struct nlattr *nla)
+{
+ __be64 tmp;
+
+ /* The additional cast is necessary because */
+ nla_memcpy(&tmp, (struct nlattr *) nla, sizeof(tmp));
+
+ return tmp;
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+/**
+ * nla_type - attribute type
+ * @nla: netlink attribute
+ */
+static inline int nla_type(const struct nlattr *nla)
+{
+ return nla->nla_type & NLA_TYPE_MASK;
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
+#define nla_parse_nested(tb, maxtype, nla, policy) \
+ nla_parse_nested(tb, maxtype, (struct nlattr *)(nla), (struct nla_policy *)(policy))
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
+#define nla_parse_nested(tb, maxtype, nla, policy) \
+ nla_parse_nested(tb, maxtype, (struct nlattr *)(nla), policy)
+#endif
+
+#ifndef nla_for_each_nested
+#define nla_for_each_nested(pos, nla, rem) \
+ nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem)
+#endif
+
+#ifndef HAVE_NLA_FIND_NESTED
+static inline struct nlattr *nla_find_nested(struct nlattr *nla, int attrtype)
+{
+ return nla_find(nla_data(nla), nla_len(nla), attrtype);
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+/**
+ * nlmsg_report - need to report back to application?
+ * @nlh: netlink message header
+ *
+ * Returns 1 if a report back to the application is requested.
+ */
+static inline int nlmsg_report(const struct nlmsghdr *nlh)
+{
+ return !!(nlh->nlmsg_flags & NLM_F_ECHO);
+}
+
+extern int nlmsg_notify(struct sock *sk, struct sk_buff *skb,
+ u32 pid, unsigned int group, int report,
+ gfp_t flags);
+#endif /* linux kernel < 2.6.19 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+/* Before 2.6.19 the 'flags' parameter was missing, so replace it. We have to
+ * #include <net/genetlink.h> first because the 2.6.18 version of that header
+ * has an inline call to nlmsg_multicast() without, of course, any 'flags'
+ * argument. */
+#define nlmsg_multicast rpl_nlmsg_multicast
+static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
+ u32 pid, unsigned int group, gfp_t flags)
+{
+ int err;
+
+ NETLINK_CB(skb).dst_group = group;
+
+ err = netlink_broadcast(sk, skb, pid, group, flags);
+ if (err > 0)
+ err = 0;
+
+ return err;
+}
+#endif /* linux kernel < 2.6.19 */
+
+#endif /* net/netlink.h */
--- /dev/null
+#ifndef __NET_PROTOCOL_WRAPPER_H
+#define __NET_PROTOCOL_WRAPPER_H 1
+
+#include_next <net/protocol.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
+#define inet_add_protocol(prot, num) inet_add_protocol((struct net_protocol *)(prot), num)
+#define inet_del_protocol(prot, num) inet_del_protocol((struct net_protocol *)(prot), num)
+#endif
+
+#endif
--- /dev/null
+#ifndef __NET_ROUTE_WRAPPER_H
+#define __NET_ROUTE_WRAPPER_H 1
+
+#include_next <net/route.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+
+#define ip_route_output_key(net, rp, flp) \
+ ip_route_output_key((rp), (flp))
+
+#endif /* linux kernel < 2.6.25 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
+static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
+{
+ return dst_metric(dst, RTAX_HOPLIMIT);
+}
+#endif
+
+#endif
--- /dev/null
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+
+#include <linux/netfilter_ipv4.h>
+#include <net/ip.h>
+
+int __ip_local_out(struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+
+ iph->tot_len = htons(skb->len);
+ ip_send_check(iph);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+ return nf_hook(PF_INET, NF_IP_LOCAL_OUT, &skb, NULL, skb->dst->dev,
+ dst_output);
+#else
+ return nf_hook(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, skb->dst->dev,
+ dst_output);
+#endif /* kernel < 2.6.24 */
+}
+
+int ip_local_out(struct sk_buff *skb)
+{
+ int err;
+
+ err = __ip_local_out(skb);
+ if (likely(err == 1))
+ err = dst_output(skb);
+
+ return err;
+}
+
+#endif /* kernel < 2.6.25 */
--- /dev/null
+#ifndef HAVE_KMEMDUP
+
+#include <linux/slab.h>
+#include <linux/string.h>
+
+/**
+ * kmemdup - duplicate region of memory
+ *
+ * @src: memory region to duplicate
+ * @len: memory region length
+ * @gfp: GFP mask to use
+ */
+void *kmemdup(const void *src, size_t len, gfp_t gfp)
+{
+ void *p;
+
+ p = kmalloc(len, gfp);
+ if (p)
+ memcpy(p, src, len);
+ return p;
+}
+#endif
--- /dev/null
+#include <linux/if_link.h>
+#include <linux/netdevice.h>
+
+/* Linux 2.6.28 introduced dev_get_stats():
+ * const struct net_device_stats *dev_get_stats(struct net_device *dev);
+ *
+ * Linux 2.6.36 changed dev_get_stats() to:
+ * struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+ * struct rtnl_link_stats64 *storage);
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ const struct net_device_stats *stats;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
+ stats = dev->get_stats(dev);
+#else /* 2.6.28 < kernel version < 2.6.36 */
+ stats = (dev_get_stats)(dev);
+#endif /* 2.6.28 < kernel version < 2.6.36 */
+
+ storage->rx_packets = stats->rx_packets;
+ storage->tx_packets = stats->tx_packets;
+ storage->rx_bytes = stats->rx_bytes;
+ storage->tx_bytes = stats->tx_bytes;
+ storage->rx_errors = stats->rx_errors;
+ storage->tx_errors = stats->tx_errors;
+ storage->rx_dropped = stats->rx_dropped;
+ storage->tx_dropped = stats->tx_dropped;
+ storage->multicast = stats->multicast;
+ storage->collisions = stats->collisions;
+ storage->rx_length_errors = stats->rx_length_errors;
+ storage->rx_over_errors = stats->rx_over_errors;
+ storage->rx_crc_errors = stats->rx_crc_errors;
+ storage->rx_frame_errors = stats->rx_frame_errors;
+ storage->rx_fifo_errors = stats->rx_fifo_errors;
+ storage->rx_missed_errors = stats->rx_missed_errors;
+ storage->tx_aborted_errors = stats->tx_aborted_errors;
+ storage->tx_carrier_errors = stats->tx_carrier_errors;
+ storage->tx_fifo_errors = stats->tx_fifo_errors;
+ storage->tx_heartbeat_errors = stats->tx_heartbeat_errors;
+ storage->tx_window_errors = stats->tx_window_errors;
+ storage->rx_compressed = stats->rx_compressed;
+ storage->tx_compressed = stats->tx_compressed;
+
+ return storage;
+}
+#endif /* kernel version < 2.6.36 */
--- /dev/null
+#if !defined(HAVE_SKB_WARN_LRO) && defined(NETIF_F_LRO)
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/netdevice.h>
+
+void __skb_warn_lro_forwarding(const struct sk_buff *skb)
+{
+ if (net_ratelimit())
+ pr_warn("%s: received packets cannot be forwarded while LRO is enabled\n",
+ skb->dev->name);
+}
+
+#endif
--- /dev/null
+#include <linux/time.h>
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
+
+/* "set_normalized_timespec" is defined but not exported in kernels
+ * before 2.6.26. */
+
+/**
+ * set_normalized_timespec - set timespec sec and nsec parts and normalize
+ *
+ * @ts: pointer to timespec variable to be set
+ * @sec: seconds to set
+ * @nsec: nanoseconds to set
+ *
+ * Set seconds and nanoseconds field of a timespec variable and
+ * normalize to the timespec storage format
+ *
+ * Note: The tv_nsec part is always in the range of
+ * 0 <= tv_nsec < NSEC_PER_SEC
+ * For negative values only the tv_sec field is negative !
+ */
+void set_normalized_timespec(struct timespec *ts,
+ time_t sec, long nsec)
+{
+ while (nsec >= NSEC_PER_SEC) {
+ nsec -= NSEC_PER_SEC;
+ ++sec;
+ }
+ while (nsec < 0) {
+ nsec += NSEC_PER_SEC;
+ --sec;
+ }
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+#endif /* linux kernel < 2.6.26 */
dh_clean -k
tar xzf openvswitch.tar.gz
cd openvswitch && ./configure --with-linux=$(KSRC) $(DATAPATH_CONFIGURE_OPTS) --with-build-number=$(BUILD_NUMBER)
- cd openvswitch && $(MAKE) -C datapath/linux-2.6
+ cd openvswitch && $(MAKE) -C datapath/linux
install -d -m755 $(DSTDIR)
- install -m644 openvswitch/datapath/linux-2.6/*_mod.ko $(DSTDIR)/
+ install -m644 openvswitch/datapath/linux/*_mod.ko $(DSTDIR)/
dh_installmodules
dh_installdocs
dh_installchangelogs
KSRC=%{_usrsrc}/kernels/%{kversion}${kvariant:+-$kvariant}-%{_target_cpu}
cd _kmod_build_$kvariant
../openvswitch-%{version}/configure --with-linux="$KSRC"
- %{__make} -C datapath/linux-2.6 %{?_smp_mflags}
+ %{__make} -C datapath/linux %{?_smp_mflags}
cd ..
done
export INSTALL_MOD_DIR=extra/%{kmod_name}
for kvariant in %{kvariants} ; do
KSRC=%{_usrsrc}/kernels/%{kversion}${kvariant:+-$kvariant}-%{_target_cpu}
- %{__make} -C "${KSRC}" modules_install M=$PWD/_kmod_build_$kvariant/datapath/linux-2.6
+ %{__make} -C "${KSRC}" modules_install M=$PWD/_kmod_build_$kvariant/datapath/linux
done
%{__install} -d %{buildroot}%{_sysconfdir}/depmod.d/
%{__install} kmod-%{kmod_name}.conf %{buildroot}%{_sysconfdir}/depmod.d/
for flavor in %flavors_to_build; do
mkdir _$flavor
(cd _$flavor && ../configure --with-linux="%{kernel_source $flavor}")
- %{__make} -C _$flavor/datapath/linux-2.6 %{?_smp_mflags}
+ %{__make} -C _$flavor/datapath/linux %{?_smp_mflags}
done
%install
export INSTALL_MOD_DIR=extra/%{name}
for flavor in %flavors_to_build ; do
make -C %{kernel_source $flavor} modules_install \
- M=$PWD/_$flavor/datapath/linux-2.6
+ M=$PWD/_$flavor/datapath/linux
done
%clean
$RPM_BUILD_ROOT/usr/lib/xsconsole/plugins-base/XSFeatureVSwitch.py
install -d -m 755 $RPM_BUILD_ROOT/lib/modules/%{xen_version}/extra/openvswitch
-find datapath/linux-2.6 -name *.ko -exec install -m 755 \{\} $RPM_BUILD_ROOT/lib/modules/%{xen_version}/extra/openvswitch \;
+find datapath/linux -name *.ko -exec install -m 755 \{\} $RPM_BUILD_ROOT/lib/modules/%{xen_version}/extra/openvswitch \;
install xenserver/uuid.py $RPM_BUILD_ROOT/usr/share/openvswitch/python
# Get rid of stuff we don't want to make RPM happy.