add newer, fixed IMQ patch for 2.6.25 and 2.6.26

SVN-Revision: 11433
master
Imre Kaloz 16 years ago
parent 46e3535d08
commit 02a4a8019e
  1. 207
      target/linux/generic-2.6/patches-2.6.25/150-netfilter_imq.patch
  2. 217
      target/linux/generic-2.6/patches-2.6.26/150-netfilter_imq.patch

@ -1,6 +1,6 @@
--- /dev/null --- /dev/null
+++ b/drivers/net/imq.c +++ b/drivers/net/imq.c
@@ -0,0 +1,410 @@ @@ -0,0 +1,464 @@
+/* +/*
+ * Pseudo-driver for the intermediate queue device. + * Pseudo-driver for the intermediate queue device.
+ * + *
@ -24,8 +24,8 @@
+ * of IMQ again: http://www.linuximq.net + * of IMQ again: http://www.linuximq.net
+ * + *
+ * + *
+ * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7 including + * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7
+ * the following changes: + * including the following changes:
+ * + *
+ * - Correction of ipv6 support "+"s issue (Hasso Tepper) + * - Correction of ipv6 support "+"s issue (Hasso Tepper)
+ * - Correction of imq_init_devs() issue that resulted in + * - Correction of imq_init_devs() issue that resulted in
@ -49,6 +49,12 @@
+ * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully + * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
+ * I didn't forget anybody). I apologize again for my lack of time. + * I didn't forget anybody). I apologize again for my lack of time.
+ * + *
+ *
+ * 2008/06/07 - Changed imq.c to use qdisc_run() instead of
+ * qdisc_restart() and moved qdisc_run() to tasklet to avoid
+ * recursive locking. (Jussi Kivilinna)
+ *
+ *
+ * More info at: http://www.linuximq.net/ (Andre Correa) + * More info at: http://www.linuximq.net/ (Andre Correa)
+ */ + */
+ +
@ -61,14 +67,17 @@
+#include <linux/if_arp.h> +#include <linux/if_arp.h>
+#include <linux/netfilter.h> +#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h> +#include <linux/netfilter_ipv4.h>
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ #include <linux/netfilter_ipv6.h> + #include <linux/netfilter_ipv6.h>
+#endif +#endif
+#include <linux/imq.h> +#include <linux/imq.h>
+#include <net/pkt_sched.h> +#include <net/pkt_sched.h>
+#include <net/netfilter/nf_queue.h> +#include <net/netfilter/nf_queue.h>
+ +
+extern int qdisc_restart1(struct net_device *dev); +struct imq_private {
+ struct tasklet_struct tasklet;
+ int tasklet_pending;
+};
+ +
+static nf_hookfn imq_nf_hook; +static nf_hookfn imq_nf_hook;
+ +
@ -96,7 +105,7 @@
+#endif +#endif
+}; +};
+ +
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static struct nf_hook_ops imq_ingress_ipv6 = { +static struct nf_hook_ops imq_ingress_ipv6 = {
+ .hook = imq_nf_hook, + .hook = imq_nf_hook,
+ .owner = THIS_MODULE, + .owner = THIS_MODULE,
@ -132,29 +141,27 @@
+ +
+static struct net_device_stats *imq_get_stats(struct net_device *dev) +static struct net_device_stats *imq_get_stats(struct net_device *dev)
+{ +{
+ return (struct net_device_stats *)dev->priv; + return &dev->stats;
+} +}
+ +
+/* called for packets kfree'd in qdiscs at places other than enqueue */ +/* called for packets kfree'd in qdiscs at places other than enqueue */
+static void imq_skb_destructor(struct sk_buff *skb) +static void imq_skb_destructor(struct sk_buff *skb)
+{ +{
+ struct nf_queue_entry *info = skb->nf_queue_entry; + struct nf_queue_entry *entry = skb->nf_queue_entry;
+ +
+ if (info) { + if (entry) {
+ if (info->indev) + if (entry->indev)
+ dev_put(info->indev); + dev_put(entry->indev);
+ if (info->outdev) + if (entry->outdev)
+ dev_put(info->outdev); + dev_put(entry->outdev);
+ kfree(info); + kfree(entry);
+ } + }
+} +}
+ +
+static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev) +static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{ +{
+ struct net_device_stats *stats = (struct net_device_stats*) dev->priv; + dev->stats.tx_bytes += skb->len;
+ + dev->stats.tx_packets++;
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ +
+ skb->imq_flags = 0; + skb->imq_flags = 0;
+ skb->destructor = NULL; + skb->destructor = NULL;
@ -164,58 +171,56 @@
+ return 0; + return 0;
+} +}
+ +
+static int imq_nf_queue(struct nf_queue_entry *info, unsigned queue_num) +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
+{ +{
+ struct net_device *dev; + struct net_device *dev;
+ struct net_device_stats *stats; + struct imq_private *priv;
+ struct sk_buff *skb2 = NULL; + struct sk_buff *skb2 = NULL;
+ struct Qdisc *q; + struct Qdisc *q;
+ unsigned int index = info->skb->imq_flags&IMQ_F_IFMASK; + unsigned int index = entry->skb->imq_flags & IMQ_F_IFMASK;
+ int ret = -1; + int ret = -1;
+ +
+ if (index > numdevs) + if (index > numdevs)
+ return -1; + return -1;
+ +
+ dev = imq_devs + index; + dev = imq_devs + index;
+ priv = netdev_priv(dev);
+ if (!(dev->flags & IFF_UP)) { + if (!(dev->flags & IFF_UP)) {
+ info->skb->imq_flags = 0; + entry->skb->imq_flags = 0;
+ nf_reinject(info, NF_ACCEPT); + nf_reinject(entry, NF_ACCEPT);
+ return 0; + return 0;
+ } + }
+ dev->last_rx = jiffies; + dev->last_rx = jiffies;
+ +
+ if (info->skb->destructor) { + if (entry->skb->destructor) {
+ skb2 = info->skb; + skb2 = entry->skb;
+ info->skb = skb_clone(info->skb, GFP_ATOMIC); + entry->skb = skb_clone(entry->skb, GFP_ATOMIC);
+ if (!info->skb) + if (!entry->skb)
+ return -1; + return -1;
+ } + }
+ info->skb->nf_queue_entry = info; + entry->skb->nf_queue_entry = entry;
+ +
+ stats = (struct net_device_stats *)dev->priv; + dev->stats.rx_bytes += entry->skb->len;
+ stats->rx_bytes+= info->skb->len; + dev->stats.rx_packets++;
+ stats->rx_packets++;
+ +
+ spin_lock_bh(&dev->queue_lock); + spin_lock_bh(&dev->queue_lock);
+ q = dev->qdisc; + q = dev->qdisc;
+ if (q->enqueue) { + if (q->enqueue) {
+ q->enqueue(skb_get(info->skb), q); + q->enqueue(skb_get(entry->skb), q);
+ if (skb_shared(info->skb)) { + if (skb_shared(entry->skb)) {
+ info->skb->destructor = imq_skb_destructor; + entry->skb->destructor = imq_skb_destructor;
+ kfree_skb(info->skb); + kfree_skb(entry->skb);
+ ret = 0; + ret = 0;
+ } + }
+ } + }
+ if (spin_is_locked(&dev->_xmit_lock))
+ netif_schedule(dev);
+ else
+ while (!netif_queue_stopped(dev) && qdisc_restart1(dev) < 0)
+ /* NOTHING */;
+ +
+ spin_unlock_bh(&dev->queue_lock); + spin_unlock_bh(&dev->queue_lock);
+ +
+ if (!test_and_set_bit(1, &priv->tasklet_pending))
+ tasklet_schedule(&priv->tasklet);
+
+ if (skb2) + if (skb2)
+ kfree_skb(ret ? info->skb : skb2); + kfree_skb(ret ? entry->skb : skb2);
+ +
+ return ret; + return ret;
+} +}
@ -225,6 +230,18 @@
+ .outfn = imq_nf_queue, + .outfn = imq_nf_queue,
+}; +};
+ +
+static void qdisc_run_tasklet(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ struct imq_private *priv = netdev_priv(dev);
+
+ spin_lock(&dev->queue_lock);
+ qdisc_run(dev);
+ spin_unlock(&dev->queue_lock);
+
+ clear_bit(1, &priv->tasklet_pending);
+}
+
+static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb, +static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb,
+ const struct net_device *indev, + const struct net_device *indev,
+ const struct net_device *outdev, + const struct net_device *outdev,
@ -236,30 +253,59 @@
+ return NF_ACCEPT; + return NF_ACCEPT;
+} +}
+ +
+static int imq_close(struct net_device *dev)
+{
+ struct imq_private *priv = netdev_priv(dev);
+
+ tasklet_kill(&priv->tasklet);
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+static int imq_open(struct net_device *dev)
+{
+ struct imq_private *priv = netdev_priv(dev);
+
+ tasklet_init(&priv->tasklet, qdisc_run_tasklet, (unsigned long)dev);
+ netif_start_queue(dev);
+
+ return 0;
+}
+ +
+static int __init imq_init_hooks(void) +static int __init imq_init_hooks(void)
+{ +{
+ int err; + int err;
+ +
+ err = nf_register_queue_handler(PF_INET, &nfqh); + err = nf_register_queue_handler(PF_INET, &nfqh);
+ if (err > 0) + if (err)
+ goto err1; + goto err1;
+ if ((err = nf_register_hook(&imq_ingress_ipv4))) +
+ err = nf_register_hook(&imq_ingress_ipv4);
+ if (err)
+ goto err2; + goto err2;
+ if ((err = nf_register_hook(&imq_egress_ipv4))) +
+ err = nf_register_hook(&imq_egress_ipv4);
+ if (err)
+ goto err3; + goto err3;
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +
+ if ((err = nf_register_queue_handler(PF_INET6, &nfqh))) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ err = nf_register_queue_handler(PF_INET6, &nfqh);
+ if (err)
+ goto err4; + goto err4;
+ if ((err = nf_register_hook(&imq_ingress_ipv6))) +
+ err = nf_register_hook(&imq_ingress_ipv6);
+ if (err)
+ goto err5; + goto err5;
+ if ((err = nf_register_hook(&imq_egress_ipv6))) +
+ err = nf_register_hook(&imq_egress_ipv6);
+ if (err)
+ goto err6; + goto err6;
+#endif +#endif
+ +
+ return 0; + return 0;
+ +
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+err6: +err6:
+ nf_unregister_hook(&imq_ingress_ipv6); + nf_unregister_hook(&imq_ingress_ipv6);
+err5: +err5:
@ -277,7 +323,7 @@
+ +
+static void __exit imq_unhook(void) +static void __exit imq_unhook(void)
+{ +{
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ nf_unregister_hook(&imq_ingress_ipv6); + nf_unregister_hook(&imq_ingress_ipv6);
+ nf_unregister_hook(&imq_egress_ipv6); + nf_unregister_hook(&imq_egress_ipv6);
+ nf_unregister_queue_handler(PF_INET6, &nfqh); + nf_unregister_queue_handler(PF_INET6, &nfqh);
@ -290,14 +336,17 @@
+static int __init imq_dev_init(struct net_device *dev) +static int __init imq_dev_init(struct net_device *dev)
+{ +{
+ dev->hard_start_xmit = imq_dev_xmit; + dev->hard_start_xmit = imq_dev_xmit;
+ dev->open = imq_open;
+ dev->get_stats = imq_get_stats;
+ dev->stop = imq_close;
+ dev->type = ARPHRD_VOID; + dev->type = ARPHRD_VOID;
+ dev->mtu = 16000; + dev->mtu = 16000;
+ dev->tx_queue_len = 11000; + dev->tx_queue_len = 11000;
+ dev->flags = IFF_NOARP; + dev->flags = IFF_NOARP;
+ dev->priv = kzalloc(sizeof(struct net_device_stats), GFP_KERNEL); +
+ dev->priv = kzalloc(sizeof(struct imq_private), GFP_KERNEL);
+ if (dev->priv == NULL) + if (dev->priv == NULL)
+ return -ENOMEM; + return -ENOMEM;
+ dev->get_stats = imq_get_stats;
+ +
+ return 0; + return 0;
+} +}
@ -310,8 +359,7 @@
+static int __init imq_init_devs(struct net *net) +static int __init imq_init_devs(struct net *net)
+{ +{
+ struct net_device *dev; + struct net_device *dev;
+ int i,j; + int i, j;
+ j = numdevs;
+ +
+ if (!numdevs || numdevs > IMQ_MAX_DEVS) { + if (!numdevs || numdevs > IMQ_MAX_DEVS) {
+ printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n", + printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
@ -324,9 +372,9 @@
+ return -ENOMEM; + return -ENOMEM;
+ +
+ /* we start counting at zero */ + /* we start counting at zero */
+ numdevs--; + j = numdevs - 1;
+ +
+ for (i = 0, dev = imq_devs; i <= numdevs; i++, dev++) { + for (i = 0, dev = imq_devs; i <= j; i++, dev++) {
+ strcpy(dev->name, "imq%d"); + strcpy(dev->name, "imq%d");
+ dev->init = imq_dev_init; + dev->init = imq_dev_init;
+ dev->uninit = imq_dev_uninit; + dev->uninit = imq_dev_uninit;
@ -335,7 +383,7 @@
+ if (register_netdev(dev) < 0) + if (register_netdev(dev) < 0)
+ goto err_register; + goto err_register;
+ } + }
+ printk(KERN_INFO "IMQ starting with %u devices...\n", j); + printk(KERN_INFO "IMQ starting with %u devices...\n", numdevs);
+ return 0; + return 0;
+ +
+err_register: +err_register:
@ -360,11 +408,14 @@
+{ +{
+ int err; + int err;
+ +
+ if ((err = imq_init_devs(net))) { + err = imq_init_devs(net);
+ if (err) {
+ printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n"); + printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n");
+ return err; + return err;
+ } + }
+ if ((err = imq_init_hooks())) { +
+ err = imq_init_hooks();
+ if (err) {
+ printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n"); + printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
+ imq_cleanup_devs(); + imq_cleanup_devs();
+ return err; + return err;
@ -394,23 +445,26 @@
+} +}
+ +
+static struct pernet_operations __net_initdata imq_net_ops = { +static struct pernet_operations __net_initdata imq_net_ops = {
+ .init = imq_init_module, + .init = imq_init_module,
+ .exit = imq_exit_module, + .exit = imq_exit_module,
+}; +};
+ +
+static int __init imq_init(void) +static int __init imq_init(void)
+{ +{
+ return register_pernet_device(&imq_net_ops); + return register_pernet_device(&imq_net_ops);
+} +}
+ +
+module_init(imq_init); +module_init(imq_init);
+//module_exit(imq_cleanup_module); +/*module_exit(imq_cleanup_module);*/
+ +
+module_param(numdevs, int, 0); +module_param(numdevs, int, 0);
+MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)"); +MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will "
+ "be created)");
+MODULE_AUTHOR("http://www.linuximq.net"); +MODULE_AUTHOR("http://www.linuximq.net");
+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information."); +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See "
+ "http://www.linuximq.net/ for more information.");
+MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL");
+
--- a/drivers/net/Kconfig --- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig +++ b/drivers/net/Kconfig
@@ -117,6 +117,129 @@ @@ -117,6 +117,129 @@
@ -716,7 +770,7 @@
+config IP_NF_TARGET_IMQ +config IP_NF_TARGET_IMQ
+ tristate "IMQ target support" + tristate "IMQ target support"
+ depends on IP_NF_MANGLE && IMQ + depends on IP_NF_MANGLE
+ help + help
+ This option adds a `IMQ' target which is used to specify if and + This option adds a `IMQ' target which is used to specify if and
+ to which IMQ device packets should get enqueued/dequeued. + to which IMQ device packets should get enqueued/dequeued.
@ -818,7 +872,7 @@
+config IP6_NF_TARGET_IMQ +config IP6_NF_TARGET_IMQ
+ tristate "IMQ target support" + tristate "IMQ target support"
+ depends on IP6_NF_MANGLE && IMQ + depends on IP6_NF_MANGLE
+ help + help
+ This option adds a `IMQ' target which is used to specify if and + This option adds a `IMQ' target which is used to specify if and
+ to which imq device packets should get enqueued/dequeued. + to which imq device packets should get enqueued/dequeued.
@ -840,16 +894,11 @@
--- a/net/sched/sch_generic.c --- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c
@@ -182,6 +182,12 @@ @@ -203,6 +203,7 @@
return ret;
clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
} }
+EXPORT_SYMBOL(__qdisc_run);
+int qdisc_restart1(struct net_device *dev) static void dev_watchdog(unsigned long arg)
+{
+ return qdisc_restart(dev);
+}
+EXPORT_SYMBOL(qdisc_restart1);
+
void __qdisc_run(struct net_device *dev)
{ {
unsigned long start_time = jiffies;

@ -1,6 +1,6 @@
--- /dev/null --- /dev/null
+++ b/drivers/net/imq.c +++ b/drivers/net/imq.c
@@ -0,0 +1,410 @@ @@ -0,0 +1,464 @@
+/* +/*
+ * Pseudo-driver for the intermediate queue device. + * Pseudo-driver for the intermediate queue device.
+ * + *
@ -24,8 +24,8 @@
+ * of IMQ again: http://www.linuximq.net + * of IMQ again: http://www.linuximq.net
+ * + *
+ * + *
+ * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7 including + * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7
+ * the following changes: + * including the following changes:
+ * + *
+ * - Correction of ipv6 support "+"s issue (Hasso Tepper) + * - Correction of ipv6 support "+"s issue (Hasso Tepper)
+ * - Correction of imq_init_devs() issue that resulted in + * - Correction of imq_init_devs() issue that resulted in
@ -49,6 +49,12 @@
+ * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully + * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
+ * I didn't forget anybody). I apologize again for my lack of time. + * I didn't forget anybody). I apologize again for my lack of time.
+ * + *
+ *
+ * 2008/06/07 - Changed imq.c to use qdisc_run() instead of
+ * qdisc_restart() and moved qdisc_run() to tasklet to avoid
+ * recursive locking. (Jussi Kivilinna)
+ *
+ *
+ * More info at: http://www.linuximq.net/ (Andre Correa) + * More info at: http://www.linuximq.net/ (Andre Correa)
+ */ + */
+ +
@ -61,14 +67,17 @@
+#include <linux/if_arp.h> +#include <linux/if_arp.h>
+#include <linux/netfilter.h> +#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h> +#include <linux/netfilter_ipv4.h>
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ #include <linux/netfilter_ipv6.h> + #include <linux/netfilter_ipv6.h>
+#endif +#endif
+#include <linux/imq.h> +#include <linux/imq.h>
+#include <net/pkt_sched.h> +#include <net/pkt_sched.h>
+#include <net/netfilter/nf_queue.h> +#include <net/netfilter/nf_queue.h>
+ +
+extern int qdisc_restart1(struct net_device *dev); +struct imq_private {
+ struct tasklet_struct tasklet;
+ int tasklet_pending;
+};
+ +
+static nf_hookfn imq_nf_hook; +static nf_hookfn imq_nf_hook;
+ +
@ -96,7 +105,7 @@
+#endif +#endif
+}; +};
+ +
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static struct nf_hook_ops imq_ingress_ipv6 = { +static struct nf_hook_ops imq_ingress_ipv6 = {
+ .hook = imq_nf_hook, + .hook = imq_nf_hook,
+ .owner = THIS_MODULE, + .owner = THIS_MODULE,
@ -132,29 +141,27 @@
+ +
+static struct net_device_stats *imq_get_stats(struct net_device *dev) +static struct net_device_stats *imq_get_stats(struct net_device *dev)
+{ +{
+ return (struct net_device_stats *)dev->priv; + return &dev->stats;
+} +}
+ +
+/* called for packets kfree'd in qdiscs at places other than enqueue */ +/* called for packets kfree'd in qdiscs at places other than enqueue */
+static void imq_skb_destructor(struct sk_buff *skb) +static void imq_skb_destructor(struct sk_buff *skb)
+{ +{
+ struct nf_queue_entry *info = skb->nf_queue_entry; + struct nf_queue_entry *entry = skb->nf_queue_entry;
+ +
+ if (info) { + if (entry) {
+ if (info->indev) + if (entry->indev)
+ dev_put(info->indev); + dev_put(entry->indev);
+ if (info->outdev) + if (entry->outdev)
+ dev_put(info->outdev); + dev_put(entry->outdev);
+ kfree(info); + kfree(entry);
+ } + }
+} +}
+ +
+static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev) +static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{ +{
+ struct net_device_stats *stats = (struct net_device_stats*) dev->priv; + dev->stats.tx_bytes += skb->len;
+ + dev->stats.tx_packets++;
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ +
+ skb->imq_flags = 0; + skb->imq_flags = 0;
+ skb->destructor = NULL; + skb->destructor = NULL;
@ -164,58 +171,56 @@
+ return 0; + return 0;
+} +}
+ +
+static int imq_nf_queue(struct nf_queue_entry *info, unsigned queue_num) +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
+{ +{
+ struct net_device *dev; + struct net_device *dev;
+ struct net_device_stats *stats; + struct imq_private *priv;
+ struct sk_buff *skb2 = NULL; + struct sk_buff *skb2 = NULL;
+ struct Qdisc *q; + struct Qdisc *q;
+ unsigned int index = info->skb->imq_flags&IMQ_F_IFMASK; + unsigned int index = entry->skb->imq_flags & IMQ_F_IFMASK;
+ int ret = -1; + int ret = -1;
+ +
+ if (index > numdevs) + if (index > numdevs)
+ return -1; + return -1;
+ +
+ dev = imq_devs + index; + dev = imq_devs + index;
+ priv = netdev_priv(dev);
+ if (!(dev->flags & IFF_UP)) { + if (!(dev->flags & IFF_UP)) {
+ info->skb->imq_flags = 0; + entry->skb->imq_flags = 0;
+ nf_reinject(info, NF_ACCEPT); + nf_reinject(entry, NF_ACCEPT);
+ return 0; + return 0;
+ } + }
+ dev->last_rx = jiffies; + dev->last_rx = jiffies;
+ +
+ if (info->skb->destructor) { + if (entry->skb->destructor) {
+ skb2 = info->skb; + skb2 = entry->skb;
+ info->skb = skb_clone(info->skb, GFP_ATOMIC); + entry->skb = skb_clone(entry->skb, GFP_ATOMIC);
+ if (!info->skb) + if (!entry->skb)
+ return -1; + return -1;
+ } + }
+ info->skb->nf_queue_entry = info; + entry->skb->nf_queue_entry = entry;
+ +
+ stats = (struct net_device_stats *)dev->priv; + dev->stats.rx_bytes += entry->skb->len;
+ stats->rx_bytes+= info->skb->len; + dev->stats.rx_packets++;
+ stats->rx_packets++;
+ +
+ spin_lock_bh(&dev->queue_lock); + spin_lock_bh(&dev->queue_lock);
+ q = dev->qdisc; + q = dev->qdisc;
+ if (q->enqueue) { + if (q->enqueue) {
+ q->enqueue(skb_get(info->skb), q); + q->enqueue(skb_get(entry->skb), q);
+ if (skb_shared(info->skb)) { + if (skb_shared(entry->skb)) {
+ info->skb->destructor = imq_skb_destructor; + entry->skb->destructor = imq_skb_destructor;
+ kfree_skb(info->skb); + kfree_skb(entry->skb);
+ ret = 0; + ret = 0;
+ } + }
+ } + }
+ if (spin_is_locked(&dev->_xmit_lock))
+ netif_schedule(dev);
+ else
+ while (!netif_queue_stopped(dev) && qdisc_restart1(dev) < 0)
+ /* NOTHING */;
+ +
+ spin_unlock_bh(&dev->queue_lock); + spin_unlock_bh(&dev->queue_lock);
+ +
+ if (!test_and_set_bit(1, &priv->tasklet_pending))
+ tasklet_schedule(&priv->tasklet);
+
+ if (skb2) + if (skb2)
+ kfree_skb(ret ? info->skb : skb2); + kfree_skb(ret ? entry->skb : skb2);
+ +
+ return ret; + return ret;
+} +}
@ -225,6 +230,18 @@
+ .outfn = imq_nf_queue, + .outfn = imq_nf_queue,
+}; +};
+ +
+static void qdisc_run_tasklet(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ struct imq_private *priv = netdev_priv(dev);
+
+ spin_lock(&dev->queue_lock);
+ qdisc_run(dev);
+ spin_unlock(&dev->queue_lock);
+
+ clear_bit(1, &priv->tasklet_pending);
+}
+
+static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb, +static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb,
+ const struct net_device *indev, + const struct net_device *indev,
+ const struct net_device *outdev, + const struct net_device *outdev,
@ -236,30 +253,59 @@
+ return NF_ACCEPT; + return NF_ACCEPT;
+} +}
+ +
+static int imq_close(struct net_device *dev)
+{
+ struct imq_private *priv = netdev_priv(dev);
+
+ tasklet_kill(&priv->tasklet);
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+static int imq_open(struct net_device *dev)
+{
+ struct imq_private *priv = netdev_priv(dev);
+
+ tasklet_init(&priv->tasklet, qdisc_run_tasklet, (unsigned long)dev);
+ netif_start_queue(dev);
+
+ return 0;
+}
+ +
+static int __init imq_init_hooks(void) +static int __init imq_init_hooks(void)
+{ +{
+ int err; + int err;
+ +
+ err = nf_register_queue_handler(PF_INET, &nfqh); + err = nf_register_queue_handler(PF_INET, &nfqh);
+ if (err > 0) + if (err)
+ goto err1; + goto err1;
+ if ((err = nf_register_hook(&imq_ingress_ipv4))) +
+ err = nf_register_hook(&imq_ingress_ipv4);
+ if (err)
+ goto err2; + goto err2;
+ if ((err = nf_register_hook(&imq_egress_ipv4))) +
+ err = nf_register_hook(&imq_egress_ipv4);
+ if (err)
+ goto err3; + goto err3;
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +
+ if ((err = nf_register_queue_handler(PF_INET6, &nfqh))) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ err = nf_register_queue_handler(PF_INET6, &nfqh);
+ if (err)
+ goto err4; + goto err4;
+ if ((err = nf_register_hook(&imq_ingress_ipv6))) +
+ err = nf_register_hook(&imq_ingress_ipv6);
+ if (err)
+ goto err5; + goto err5;
+ if ((err = nf_register_hook(&imq_egress_ipv6))) +
+ err = nf_register_hook(&imq_egress_ipv6);
+ if (err)
+ goto err6; + goto err6;
+#endif +#endif
+ +
+ return 0; + return 0;
+ +
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+err6: +err6:
+ nf_unregister_hook(&imq_ingress_ipv6); + nf_unregister_hook(&imq_ingress_ipv6);
+err5: +err5:
@ -277,7 +323,7 @@
+ +
+static void __exit imq_unhook(void) +static void __exit imq_unhook(void)
+{ +{
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ nf_unregister_hook(&imq_ingress_ipv6); + nf_unregister_hook(&imq_ingress_ipv6);
+ nf_unregister_hook(&imq_egress_ipv6); + nf_unregister_hook(&imq_egress_ipv6);
+ nf_unregister_queue_handler(PF_INET6, &nfqh); + nf_unregister_queue_handler(PF_INET6, &nfqh);
@ -290,14 +336,17 @@
+static int __init imq_dev_init(struct net_device *dev) +static int __init imq_dev_init(struct net_device *dev)
+{ +{
+ dev->hard_start_xmit = imq_dev_xmit; + dev->hard_start_xmit = imq_dev_xmit;
+ dev->open = imq_open;
+ dev->get_stats = imq_get_stats;
+ dev->stop = imq_close;
+ dev->type = ARPHRD_VOID; + dev->type = ARPHRD_VOID;
+ dev->mtu = 16000; + dev->mtu = 16000;
+ dev->tx_queue_len = 11000; + dev->tx_queue_len = 11000;
+ dev->flags = IFF_NOARP; + dev->flags = IFF_NOARP;
+ dev->priv = kzalloc(sizeof(struct net_device_stats), GFP_KERNEL); +
+ dev->priv = kzalloc(sizeof(struct imq_private), GFP_KERNEL);
+ if (dev->priv == NULL) + if (dev->priv == NULL)
+ return -ENOMEM; + return -ENOMEM;
+ dev->get_stats = imq_get_stats;
+ +
+ return 0; + return 0;
+} +}
@ -310,8 +359,7 @@
+static int __init imq_init_devs(struct net *net) +static int __init imq_init_devs(struct net *net)
+{ +{
+ struct net_device *dev; + struct net_device *dev;
+ int i,j; + int i, j;
+ j = numdevs;
+ +
+ if (!numdevs || numdevs > IMQ_MAX_DEVS) { + if (!numdevs || numdevs > IMQ_MAX_DEVS) {
+ printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n", + printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
@ -324,9 +372,9 @@
+ return -ENOMEM; + return -ENOMEM;
+ +
+ /* we start counting at zero */ + /* we start counting at zero */
+ numdevs--; + j = numdevs - 1;
+ +
+ for (i = 0, dev = imq_devs; i <= numdevs; i++, dev++) { + for (i = 0, dev = imq_devs; i <= j; i++, dev++) {
+ strcpy(dev->name, "imq%d"); + strcpy(dev->name, "imq%d");
+ dev->init = imq_dev_init; + dev->init = imq_dev_init;
+ dev->uninit = imq_dev_uninit; + dev->uninit = imq_dev_uninit;
@ -335,7 +383,7 @@
+ if (register_netdev(dev) < 0) + if (register_netdev(dev) < 0)
+ goto err_register; + goto err_register;
+ } + }
+ printk(KERN_INFO "IMQ starting with %u devices...\n", j); + printk(KERN_INFO "IMQ starting with %u devices...\n", numdevs);
+ return 0; + return 0;
+ +
+err_register: +err_register:
@ -360,11 +408,14 @@
+{ +{
+ int err; + int err;
+ +
+ if ((err = imq_init_devs(net))) { + err = imq_init_devs(net);
+ if (err) {
+ printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n"); + printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n");
+ return err; + return err;
+ } + }
+ if ((err = imq_init_hooks())) { +
+ err = imq_init_hooks();
+ if (err) {
+ printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n"); + printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
+ imq_cleanup_devs(); + imq_cleanup_devs();
+ return err; + return err;
@ -394,23 +445,26 @@
+} +}
+ +
+static struct pernet_operations __net_initdata imq_net_ops = { +static struct pernet_operations __net_initdata imq_net_ops = {
+ .init = imq_init_module, + .init = imq_init_module,
+ .exit = imq_exit_module, + .exit = imq_exit_module,
+}; +};
+ +
+static int __init imq_init(void) +static int __init imq_init(void)
+{ +{
+ return register_pernet_device(&imq_net_ops); + return register_pernet_device(&imq_net_ops);
+} +}
+ +
+module_init(imq_init); +module_init(imq_init);
+//module_exit(imq_cleanup_module); +/*module_exit(imq_cleanup_module);*/
+ +
+module_param(numdevs, int, 0); +module_param(numdevs, int, 0);
+MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)"); +MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will "
+ "be created)");
+MODULE_AUTHOR("http://www.linuximq.net"); +MODULE_AUTHOR("http://www.linuximq.net");
+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information."); +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See "
+ "http://www.linuximq.net/ for more information.");
+MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL");
+
--- a/drivers/net/Kconfig --- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig +++ b/drivers/net/Kconfig
@@ -117,6 +117,129 @@ @@ -117,6 +117,129 @@
@ -545,7 +599,7 @@
select CRC32 select CRC32
--- a/drivers/net/Makefile --- a/drivers/net/Makefile
+++ b/drivers/net/Makefile +++ b/drivers/net/Makefile
@@ -142,6 +142,7 @@ @@ -143,6 +143,7 @@
obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_DUMMY) += dummy.o
@ -589,7 +643,7 @@
+#endif /* _IP6T_IMQ_H */ +#endif /* _IP6T_IMQ_H */
--- a/include/linux/skbuff.h --- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h +++ b/include/linux/skbuff.h
@@ -300,6 +300,10 @@ @@ -296,6 +296,10 @@
struct nf_conntrack *nfct; struct nf_conntrack *nfct;
struct sk_buff *nfct_reasm; struct sk_buff *nfct_reasm;
#endif #endif
@ -600,7 +654,7 @@
#ifdef CONFIG_BRIDGE_NETFILTER #ifdef CONFIG_BRIDGE_NETFILTER
struct nf_bridge_info *nf_bridge; struct nf_bridge_info *nf_bridge;
#endif #endif
@@ -1633,6 +1637,10 @@ @@ -1736,6 +1740,10 @@
dst->nfct_reasm = src->nfct_reasm; dst->nfct_reasm = src->nfct_reasm;
nf_conntrack_get_reasm(src->nfct_reasm); nf_conntrack_get_reasm(src->nfct_reasm);
#endif #endif
@ -623,7 +677,7 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/stat.h> #include <linux/stat.h>
@@ -1545,7 +1548,11 @@ @@ -1537,7 +1540,11 @@
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
if (likely(!skb->next)) { if (likely(!skb->next)) {
@ -716,7 +770,7 @@
+config IP_NF_TARGET_IMQ +config IP_NF_TARGET_IMQ
+ tristate "IMQ target support" + tristate "IMQ target support"
+ depends on IP_NF_MANGLE && IMQ + depends on IP_NF_MANGLE
+ help + help
+ This option adds a `IMQ' target which is used to specify if and + This option adds a `IMQ' target which is used to specify if and
+ to which IMQ device packets should get enqueued/dequeued. + to which IMQ device packets should get enqueued/dequeued.
@ -730,7 +784,7 @@
depends on IP_NF_FILTER depends on IP_NF_FILTER
--- a/net/ipv4/netfilter/Makefile --- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile
@@ -58,6 +58,7 @@ @@ -55,6 +55,7 @@
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
@ -818,7 +872,7 @@
+config IP6_NF_TARGET_IMQ +config IP6_NF_TARGET_IMQ
+ tristate "IMQ target support" + tristate "IMQ target support"
+ depends on IP6_NF_MANGLE && IMQ + depends on IP6_NF_MANGLE
+ help + help
+ This option adds a `IMQ' target which is used to specify if and + This option adds a `IMQ' target which is used to specify if and
+ to which imq device packets should get enqueued/dequeued. + to which imq device packets should get enqueued/dequeued.
@ -840,16 +894,11 @@
--- a/net/sched/sch_generic.c --- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c
@@ -182,6 +182,12 @@ @@ -203,6 +203,7 @@
return ret;
clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
} }
+EXPORT_SYMBOL(__qdisc_run);
+int qdisc_restart1(struct net_device *dev) static void dev_watchdog(unsigned long arg)
+{
+ return qdisc_restart(dev);
+}
+EXPORT_SYMBOL(qdisc_restart1);
+
void __qdisc_run(struct net_device *dev)
{ {
unsigned long start_time = jiffies;

Loading…
Cancel
Save