Skip to content

Instantly share code, notes, and snippets.

@shangjiyu
Last active August 29, 2015 14:24
Show Gist options
  • Save shangjiyu/6a15ec7dc84e3059d776 to your computer and use it in GitHub Desktop.
Save shangjiyu/6a15ec7dc84e3059d776 to your computer and use it in GitHub Desktop.
add_gargoyle_netfilter&itpables_modules.patch
Index: target/linux/generic/config-3.18
===================================================================
--- target/linux/generic/config-3.18 (revision 46316)
+++ target/linux/generic/config-3.18 (working copy)
@@ -4633,6 +4633,11 @@
# CONFIG_ZLIB_DEFLATE is not set
# CONFIG_ZLIB_INFLATE is not set
# CONFIG_ZNET is not set
+CONFIG_IMQ_NUM_DEVS=2
+# CONFIG_IMQ_BEHAVIOR_AA is not set
+# CONFIG_IMQ_BEHAVIOR_AB is not set
+CONFIG_IMQ_BEHAVIOR_BA=y
+# CONFIG_IMQ_BEHAVIOR_BB is not set
# CONFIG_ZPOOL is not set
CONFIG_ZONE_DMA=y
CONFIG_ZONE_DMA_FLAG=1
@@ -4639,3 +4644,7 @@
# CONFIG_ZRAM is not set
# CONFIG_ZRAM_LZ4_COMPRESS is not set
# CONFIG_ZSMALLOC is not set
+CONFIG_IP_NF_MATCH_WEBURL=m
+CONFIG_IP_NF_MATCH_WEBMON=m
+CONFIG_IP_NF_MATCH_TIMERANGE=m
+CONFIG_IP_NF_MATCH_BANDWIDTH=m
Index: target/linux/generic/patches-3.18/690-imq.patch
===================================================================
--- target/linux/generic/patches-3.18/690-imq.patch (revision 0)
+++ target/linux/generic/patches-3.18/690-imq.patch (working copy)
@@ -0,0 +1,1785 @@
+net: add Intermediate Queueing Device (imq)
+
+From: Jussi Kivilinna <jussi.kivilinna@iki.fi>
+
+This patch is for kernel version 3.12.4+.
+
+See: http://linuximq.net/
+
+Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
+---
+ drivers/net/Kconfig | 119 ++++
+ drivers/net/Makefile | 1
+ drivers/net/imq.c | 1007 +++++++++++++++++++++++++++++++
+ include/linux/imq.h | 13
+ include/linux/netfilter/xt_IMQ.h | 9
+ include/linux/netfilter_ipv4/ipt_IMQ.h | 10
+ include/linux/netfilter_ipv6/ip6t_IMQ.h | 10
+ include/linux/skbuff.h | 22 +
+ include/net/netfilter/nf_queue.h | 6
+ include/uapi/linux/netfilter.h | 3
+ net/core/dev.c | 8
+ net/core/skbuff.c | 112 +++
+ net/ipv6/ip6_output.c | 10
+ net/netfilter/Kconfig | 12
+ net/netfilter/Makefile | 1
+ net/netfilter/core.c | 6
+ net/netfilter/nf_internals.h | 2
+ net/netfilter/nf_queue.c | 36 +
+ net/netfilter/xt_IMQ.c | 72 ++
+ 19 files changed, 1449 insertions(+), 10 deletions(-)
+ create mode 100644 drivers/net/imq.c
+ create mode 100644 include/linux/imq.h
+ create mode 100644 include/linux/netfilter/xt_IMQ.h
+ create mode 100644 include/linux/netfilter_ipv4/ipt_IMQ.h
+ create mode 100644 include/linux/netfilter_ipv6/ip6t_IMQ.h
+ create mode 100644 net/netfilter/xt_IMQ.c
+
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index b45b240..5a20da0 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -203,6 +203,125 @@ config RIONET_RX_SIZE
+ depends on RIONET
+ default "128"
+
++config IMQ
++ tristate "IMQ (intermediate queueing device) support"
++ depends on NETDEVICES && NETFILTER
++ ---help---
++ The IMQ device(s) is used as placeholder for QoS queueing
++ disciplines. Every packet entering/leaving the IP stack can be
++ directed through the IMQ device where it's enqueued/dequeued to the
++ attached qdisc. This allows you to treat network devices as classes
++ and distribute bandwidth among them. Iptables is used to specify
++ through which IMQ device, if any, packets travel.
++
++ More information at: http://www.linuximq.net/
++
++ To compile this driver as a module, choose M here: the module
++ will be called imq. If unsure, say N.
++
++choice
++ prompt "IMQ behavior (PRE/POSTROUTING)"
++ depends on IMQ
++ default IMQ_BEHAVIOR_AB
++ help
++ This setting defines how IMQ behaves in respect to its
++ hooking in PREROUTING and POSTROUTING.
++
++ IMQ can work in any of the following ways:
++
++ PREROUTING | POSTROUTING
++ -----------------|-------------------
++ #1 After NAT | After NAT
++ #2 After NAT | Before NAT
++ #3 Before NAT | After NAT
++ #4 Before NAT | Before NAT
++
++ The default behavior is to hook before NAT on PREROUTING
++ and after NAT on POSTROUTING (#3).
++
++ This settings are specially usefull when trying to use IMQ
++ to shape NATed clients.
++
++ More information can be found at: www.linuximq.net
++
++ If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_AA
++ bool "IMQ AA"
++ help
++ This setting defines how IMQ behaves in respect to its
++ hooking in PREROUTING and POSTROUTING.
++
++ Choosing this option will make IMQ hook like this:
++
++ PREROUTING: After NAT
++ POSTROUTING: After NAT
++
++ More information can be found at: www.linuximq.net
++
++ If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_AB
++ bool "IMQ AB"
++ help
++ This setting defines how IMQ behaves in respect to its
++ hooking in PREROUTING and POSTROUTING.
++
++ Choosing this option will make IMQ hook like this:
++
++ PREROUTING: After NAT
++ POSTROUTING: Before NAT
++
++ More information can be found at: www.linuximq.net
++
++ If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_BA
++ bool "IMQ BA"
++ help
++ This setting defines how IMQ behaves in respect to its
++ hooking in PREROUTING and POSTROUTING.
++
++ Choosing this option will make IMQ hook like this:
++
++ PREROUTING: Before NAT
++ POSTROUTING: After NAT
++
++ More information can be found at: www.linuximq.net
++
++ If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_BB
++ bool "IMQ BB"
++ help
++ This setting defines how IMQ behaves in respect to its
++ hooking in PREROUTING and POSTROUTING.
++
++ Choosing this option will make IMQ hook like this:
++
++ PREROUTING: Before NAT
++ POSTROUTING: Before NAT
++
++ More information can be found at: www.linuximq.net
++
++ If not sure leave the default settings alone.
++
++endchoice
++
++config IMQ_NUM_DEVS
++ int "Number of IMQ devices"
++ range 2 16
++ depends on IMQ
++ default "16"
++ help
++ This setting defines how many IMQ devices will be created.
++
++ The default value is 16.
++
++ More information can be found at: www.linuximq.net
++
++ If not sure leave the default settings alone.
++
+ config TUN
+ tristate "Universal TUN/TAP device driver support"
+ select CRC32
+diff --git a/drivers/net/Makefile b/drivers/net/Makefile
+index 3fef8a8..12dafc0 100644
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -9,6 +9,7 @@ obj-$(CONFIG_BONDING) += bonding/
+ obj-$(CONFIG_DUMMY) += dummy.o
+ obj-$(CONFIG_EQUALIZER) += eql.o
+ obj-$(CONFIG_IFB) += ifb.o
++obj-$(CONFIG_IMQ) += imq.o
+ obj-$(CONFIG_MACVLAN) += macvlan.o
+ obj-$(CONFIG_MACVTAP) += macvtap.o
+ obj-$(CONFIG_MII) += mii.o
+diff --git a/drivers/net/imq.c b/drivers/net/imq.c
+new file mode 100644
+index 0000000..801bc8c
+--- /dev/null
++++ b/drivers/net/imq.c
+@@ -0,0 +1,1012 @@
++/*
++ * Pseudo-driver for the intermediate queue device.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * Authors: Patrick McHardy, <kaber@trash.net>
++ *
++ * The first version was written by Martin Devera, <devik@cdi.cz>
++ *
++ * Credits: Jan Rafaj <imq2t@cedric.vabo.cz>
++ * - Update patch to 2.4.21
++ * Sebastian Strollo <sstrollo@nortelnetworks.com>
++ * - Fix "Dead-loop on netdevice imq"-issue
++ * Marcel Sebek <sebek64@post.cz>
++ * - Update to 2.6.2-rc1
++ *
++ * After some time of inactivity there is a group taking care
++ * of IMQ again: http://www.linuximq.net
++ *
++ *
++ * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7
++ * including the following changes:
++ *
++ * - Correction of ipv6 support "+"s issue (Hasso Tepper)
++ * - Correction of imq_init_devs() issue that resulted in
++ * kernel OOPS unloading IMQ as module (Norbert Buchmuller)
++ * - Addition of functionality to choose number of IMQ devices
++ * during kernel config (Andre Correa)
++ * - Addition of functionality to choose how IMQ hooks on
++ * PRE and POSTROUTING (after or before NAT) (Andre Correa)
++ * - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
++ *
++ *
++ * 2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
++ * released with almost no problems. 2.6.14-x was released
++ * with some important changes: nfcache was removed; After
++ * some weeks of trouble we figured out that some IMQ fields
++ * in skb were missing in skbuff.c - skb_clone and copy_skb_header.
++ * These functions are correctly patched by this new patch version.
++ *
++ * Thanks for all who helped to figure out all the problems with
++ * 2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
++ * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
++ * I didn't forget anybody). I apologize again for my lack of time.
++ *
++ *
++ * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead
++ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid
++ * recursive locking. New initialization routines to fix 'rmmod' not
++ * working anymore. Used code from ifb.c. (Jussi Kivilinna)
++ *
++ * 2008/08/06 - 2.6.26 - (JK)
++ * - Replaced tasklet with 'netif_schedule()'.
++ * - Cleaned up and added comments for imq_nf_queue().
++ *
++ * 2009/04/12
++ * - Add skb_save_cb/skb_restore_cb helper functions for backuping
++ * control buffer. This is needed because qdisc-layer on kernels
++ * 2.6.27 and newer overwrite control buffer. (Jussi Kivilinna)
++ * - Add better locking for IMQ device. Hopefully this will solve
++ * SMP issues. (Jussi Kivilinna)
++ * - Port to 2.6.27
++ * - Port to 2.6.28
++ * - Port to 2.6.29 + fix rmmod not working
++ *
++ * 2009/04/20 - (Jussi Kivilinna)
++ * - Use netdevice feature flags to avoid extra packet handling
++ * by core networking layer and possibly increase performance.
++ *
++ * 2009/09/26 - (Jussi Kivilinna)
++ * - Add imq_nf_reinject_lockless to fix deadlock with
++ * imq_nf_queue/imq_nf_reinject.
++ *
++ * 2009/12/08 - (Jussi Kivilinna)
++ * - Port to 2.6.32
++ * - Add check for skb->nf_queue_entry==NULL in imq_dev_xmit()
++ * - Also add better error checking for skb->nf_queue_entry usage
++ *
++ * 2010/02/25 - (Jussi Kivilinna)
++ * - Port to 2.6.33
++ *
++ * 2010/08/15 - (Jussi Kivilinna)
++ * - Port to 2.6.35
++ * - Simplify hook registration by using nf_register_hooks.
++ * - nf_reinject doesn't need spinlock around it, therefore remove
++ * imq_nf_reinject function. Other nf_reinject users protect
++ * their own data with spinlock. With IMQ however all data is
++ * needed is stored per skbuff, so no locking is needed.
++ * - Changed IMQ to use 'separate' NF_IMQ_QUEUE instead of
++ * NF_QUEUE, this allows working coexistance of IMQ and other
++ * NF_QUEUE users.
++ * - Make IMQ multi-queue. Number of IMQ device queues can be
++ * increased with 'numqueues' module parameters. Default number
++ * of queues is 1, in other words by default IMQ works as
++ * single-queue device. Multi-queue selection is based on
++ * IFB multi-queue patch by Changli Gao <xiaosuo@gmail.com>.
++ *
++ * 2011/03/18 - (Jussi Kivilinna)
++ * - Port to 2.6.38
++ *
++ * 2011/07/12 - (syoder89@gmail.com)
++ * - Crash fix that happens when the receiving interface has more
++ * than one queue (add missing skb_set_queue_mapping in
++ * imq_select_queue).
++ *
++ * 2011/07/26 - (Jussi Kivilinna)
++ * - Add queue mapping checks for packets exiting IMQ.
++ * - Port to 3.0
++ *
++ * 2011/08/16 - (Jussi Kivilinna)
++ * - Clear IFF_TX_SKB_SHARING flag that was added for linux 3.0.2
++ *
++ * 2011/11/03 - Germano Michel <germanomichel@gmail.com>
++ * - Fix IMQ for net namespaces
++ *
++ * 2011/11/04 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
++ * - Port to 3.1
++ * - Clean-up, move 'get imq device pointer by imqX name' to
++ * separate function from imq_nf_queue().
++ *
++ * 2012/01/05 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
++ * - Port to 3.2
++ *
++ * 2012/03/19 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
++ * - Port to 3.3
++ *
++ * 2012/12/12 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
++ * - Port to 3.7
++ * - Fix checkpatch.pl warnings
++ *
++ * 2013/09/10 - Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ * - Fixed GSO handling for 3.10, see imq_nf_queue() for comments.
++ * - Don't copy skb->cb_next when copying or cloning skbuffs.
++ *
++ * 2013/09/16 - Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ * - Port to 3.11
++ *
++ * 2013/11/12 - Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ * - Port to 3.12
++ *
++ * Also, many thanks to pablo Sebastian Greco for making the initial
++ * patch and to those who helped the testing.
++ *
++ * More info at: http://www.linuximq.net/ (Andre Correa)
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/moduleparam.h>
++#include <linux/list.h>
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/rtnetlink.h>
++#include <linux/if_arp.h>
++#include <linux/netfilter.h>
++#include <linux/netfilter_ipv4.h>
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++ #include <linux/netfilter_ipv6.h>
++#endif
++#include <linux/imq.h>
++#include <net/pkt_sched.h>
++#include <net/netfilter/nf_queue.h>
++#include <net/sock.h>
++#include <linux/ip.h>
++#include <linux/ipv6.h>
++#include <linux/if_vlan.h>
++#include <linux/if_pppox.h>
++#include <net/ip.h>
++#include <net/ipv6.h>
++
++static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num);
++
++static unsigned int imq_nf_hook(const struct nf_hook_ops *ops,
++ struct sk_buff *pskb,
++ const struct net_device *indev,
++ const struct net_device *outdev,
++ int (*okfn)(struct sk_buff *));
++
++static struct nf_hook_ops imq_ops[] = {
++ {
++ /* imq_ingress_ipv4 */
++ .hook = imq_nf_hook,
++ .owner = THIS_MODULE,
++ .pf = PF_INET,
++ .hooknum = NF_INET_PRE_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++ .priority = NF_IP_PRI_MANGLE + 1,
++#else
++ .priority = NF_IP_PRI_NAT_DST + 1,
++#endif
++ },
++ {
++ /* imq_egress_ipv4 */
++ .hook = imq_nf_hook,
++ .owner = THIS_MODULE,
++ .pf = PF_INET,
++ .hooknum = NF_INET_POST_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
++ .priority = NF_IP_PRI_LAST,
++#else
++ .priority = NF_IP_PRI_NAT_SRC - 1,
++#endif
++ },
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++ {
++ /* imq_ingress_ipv6 */
++ .hook = imq_nf_hook,
++ .owner = THIS_MODULE,
++ .pf = PF_INET6,
++ .hooknum = NF_INET_PRE_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++ .priority = NF_IP6_PRI_MANGLE + 1,
++#else
++ .priority = NF_IP6_PRI_NAT_DST + 1,
++#endif
++ },
++ {
++ /* imq_egress_ipv6 */
++ .hook = imq_nf_hook,
++ .owner = THIS_MODULE,
++ .pf = PF_INET6,
++ .hooknum = NF_INET_POST_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
++ .priority = NF_IP6_PRI_LAST,
++#else
++ .priority = NF_IP6_PRI_NAT_SRC - 1,
++#endif
++ },
++#endif
++};
++
++#if defined(CONFIG_IMQ_NUM_DEVS)
++static int numdevs = CONFIG_IMQ_NUM_DEVS;
++#else
++static int numdevs = IMQ_MAX_DEVS;
++#endif
++
++static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
++
++#define IMQ_MAX_QUEUES 32
++static int numqueues = 1;
++static u32 imq_hashrnd;
++
++static inline __be16 pppoe_proto(const struct sk_buff *skb)
++{
++ return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
++ sizeof(struct pppoe_hdr)));
++}
++
++static u16 imq_hash(struct net_device *dev, struct sk_buff *skb)
++{
++ unsigned int pull_len;
++ u16 protocol = skb->protocol;
++ u32 addr1, addr2;
++ u32 hash, ihl = 0;
++ union {
++ u16 in16[2];
++ u32 in32;
++ } ports;
++ u8 ip_proto;
++
++ pull_len = 0;
++
++recheck:
++ switch (protocol) {
++ case htons(ETH_P_8021Q): {
++ if (unlikely(skb_pull(skb, VLAN_HLEN) == NULL))
++ goto other;
++
++ pull_len += VLAN_HLEN;
++ skb->network_header += VLAN_HLEN;
++
++ protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
++ goto recheck;
++ }
++
++ case htons(ETH_P_PPP_SES): {
++ if (unlikely(skb_pull(skb, PPPOE_SES_HLEN) == NULL))
++ goto other;
++
++ pull_len += PPPOE_SES_HLEN;
++ skb->network_header += PPPOE_SES_HLEN;
++
++ protocol = pppoe_proto(skb);
++ goto recheck;
++ }
++
++ case htons(ETH_P_IP): {
++ const struct iphdr *iph = ip_hdr(skb);
++
++ if (unlikely(!pskb_may_pull(skb, sizeof(struct iphdr))))
++ goto other;
++
++ addr1 = iph->daddr;
++ addr2 = iph->saddr;
++
++ ip_proto = !(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ?
++ iph->protocol : 0;
++ ihl = ip_hdrlen(skb);
++
++ break;
++ }
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++ case htons(ETH_P_IPV6): {
++ const struct ipv6hdr *iph = ipv6_hdr(skb);
++ __be16 fo = 0;
++
++ if (unlikely(!pskb_may_pull(skb, sizeof(struct ipv6hdr))))
++ goto other;
++
++ addr1 = iph->daddr.s6_addr32[3];
++ addr2 = iph->saddr.s6_addr32[3];
++ ihl = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &ip_proto,
++ &fo);
++ if (unlikely(ihl < 0))
++ goto other;
++
++ break;
++ }
++#endif
++ default:
++other:
++ if (pull_len != 0) {
++ skb_push(skb, pull_len);
++ skb->network_header -= pull_len;
++ }
++
++ return (u16)(ntohs(protocol) % dev->real_num_tx_queues);
++ }
++
++ if (addr1 > addr2)
++ swap(addr1, addr2);
++
++ switch (ip_proto) {
++ case IPPROTO_TCP:
++ case IPPROTO_UDP:
++ case IPPROTO_DCCP:
++ case IPPROTO_ESP:
++ case IPPROTO_AH:
++ case IPPROTO_SCTP:
++ case IPPROTO_UDPLITE: {
++ if (likely(skb_copy_bits(skb, ihl, &ports.in32, 4) >= 0)) {
++ if (ports.in16[0] > ports.in16[1])
++ swap(ports.in16[0], ports.in16[1]);
++ break;
++ }
++ /* fall-through */
++ }
++ default:
++ ports.in32 = 0;
++ break;
++ }
++
++ if (pull_len != 0) {
++ skb_push(skb, pull_len);
++ skb->network_header -= pull_len;
++ }
++
++ hash = jhash_3words(addr1, addr2, ports.in32, imq_hashrnd ^ ip_proto);
++
++ return (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
++}
++
++static inline bool sk_tx_queue_recorded(struct sock *sk)
++{
++ return (sk_tx_queue_get(sk) >= 0);
++}
++
++static struct netdev_queue *imq_select_queue(struct net_device *dev,
++ struct sk_buff *skb)
++{
++ u16 queue_index = 0;
++ u32 hash;
++
++ if (likely(dev->real_num_tx_queues == 1))
++ goto out;
++
++ /* IMQ can be receiving ingress or engress packets. */
++
++ /* Check first for if rx_queue is set */
++ if (skb_rx_queue_recorded(skb)) {
++ queue_index = skb_get_rx_queue(skb);
++ goto out;
++ }
++
++ /* Check if socket has tx_queue set */
++ if (sk_tx_queue_recorded(skb->sk)) {
++ queue_index = sk_tx_queue_get(skb->sk);
++ goto out;
++ }
++
++ /* Try use socket hash */
++ if (skb->sk && skb->sk->sk_hash) {
++ hash = skb->sk->sk_hash;
++ queue_index =
++ (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
++ goto out;
++ }
++
++ /* Generate hash from packet data */
++ queue_index = imq_hash(dev, skb);
++
++out:
++ if (unlikely(queue_index >= dev->real_num_tx_queues))
++ queue_index = (u16)((u32)queue_index % dev->real_num_tx_queues);
++
++ skb_set_queue_mapping(skb, queue_index);
++ return netdev_get_tx_queue(dev, queue_index);
++}
++
++static struct net_device_stats *imq_get_stats(struct net_device *dev)
++{
++ return &dev->stats;
++}
++
++/* called for packets kfree'd in qdiscs at places other than enqueue */
++static void imq_skb_destructor(struct sk_buff *skb)
++{
++ struct nf_queue_entry *entry = skb->nf_queue_entry;
++
++ skb->nf_queue_entry = NULL;
++
++ if (entry) {
++ nf_queue_entry_release_refs(entry);
++ kfree(entry);
++ }
++
++ skb_restore_cb(skb); /* kfree backup */
++}
++
++static void imq_done_check_queue_mapping(struct sk_buff *skb,
++ struct net_device *dev)
++{
++ unsigned int queue_index;
++
++ /* Don't let queue_mapping be left too large after exiting IMQ */
++ if (likely(skb->dev != dev && skb->dev != NULL)) {
++ queue_index = skb_get_queue_mapping(skb);
++ if (unlikely(queue_index >= skb->dev->real_num_tx_queues)) {
++ queue_index = (u16)((u32)queue_index %
++ skb->dev->real_num_tx_queues);
++ skb_set_queue_mapping(skb, queue_index);
++ }
++ } else {
++ /* skb->dev was IMQ device itself or NULL, be on safe side and
++ * just clear queue mapping.
++ */
++ skb_set_queue_mapping(skb, 0);
++ }
++}
++
++static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct nf_queue_entry *entry = skb->nf_queue_entry;
++
++ skb->nf_queue_entry = NULL;
++ dev->trans_start = jiffies;
++
++ dev->stats.tx_bytes += skb->len;
++ dev->stats.tx_packets++;
++
++ if (unlikely(entry == NULL)) {
++ /* We don't know what is going on here.. packet is queued for
++ * imq device, but (probably) not by us.
++ *
++ * If this packet was not send here by imq_nf_queue(), then
++ * skb_save_cb() was not used and skb_free() should not show:
++ * WARNING: IMQ: kfree_skb: skb->cb_next:..
++ * and/or
++ * WARNING: IMQ: kfree_skb: skb->nf_queue_entry...
++ *
++ * However if this message is shown, then IMQ is somehow broken
++ * and you should report this to linuximq.net.
++ */
++
++ /* imq_dev_xmit is black hole that eats all packets, report that
++ * we eat this packet happily and increase dropped counters.
++ */
++
++ dev->stats.tx_dropped++;
++ dev_kfree_skb(skb);
++
++ return NETDEV_TX_OK;
++ }
++
++ skb_restore_cb(skb); /* restore skb->cb */
++
++ skb->imq_flags = 0;
++ skb->destructor = NULL;
++
++ imq_done_check_queue_mapping(skb, dev);
++
++ nf_reinject(entry, NF_ACCEPT);
++
++ return NETDEV_TX_OK;
++}
++
++static struct net_device *get_imq_device_by_index(int index)
++{
++ struct net_device *dev = NULL;
++ struct net *net;
++ char buf[8];
++
++ /* get device by name and cache result */
++ snprintf(buf, sizeof(buf), "imq%d", index);
++
++ /* Search device from all namespaces. */
++ for_each_net(net) {
++ dev = dev_get_by_name(net, buf);
++ if (dev)
++ break;
++ }
++
++ if (WARN_ON_ONCE(dev == NULL)) {
++ /* IMQ device not found. Exotic config? */
++ return ERR_PTR(-ENODEV);
++ }
++
++ imq_devs_cache[index] = dev;
++ dev_put(dev);
++
++ return dev;
++}
++
++static struct nf_queue_entry *nf_queue_entry_dup(struct nf_queue_entry *e)
++{
++ struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
++ if (entry) {
++ if (nf_queue_entry_get_refs(entry))
++ return entry;
++ kfree(entry);
++ }
++ return NULL;
++}
++
++#ifdef CONFIG_BRIDGE_NETFILTER
++/* When called from bridge netfilter, skb->data must point to MAC header
++ * before calling skb_gso_segment(). Else, original MAC header is lost
++ * and segmented skbs will be sent to wrong destination.
++ */
++static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
++{
++ if (skb->nf_bridge)
++ __skb_push(skb, skb->network_header - skb->mac_header);
++}
++
++static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
++{
++ if (skb->nf_bridge)
++ __skb_pull(skb, skb->network_header - skb->mac_header);
++}
++#else
++#define nf_bridge_adjust_skb_data(s) do {} while (0)
++#define nf_bridge_adjust_segmented_data(s) do {} while (0)
++#endif
++
++static void free_entry(struct nf_queue_entry *entry)
++{
++ nf_queue_entry_release_refs(entry);
++ kfree(entry);
++}
++
++static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev);
++
++static int __imq_nf_queue_gso(struct nf_queue_entry *entry,
++ struct net_device *dev, struct sk_buff *skb)
++{
++ int ret = -ENOMEM;
++ struct nf_queue_entry *entry_seg;
++
++ nf_bridge_adjust_segmented_data(skb);
++
++ if (skb->next == NULL) { /* last packet, no need to copy entry */
++ struct sk_buff *gso_skb = entry->skb;
++ entry->skb = skb;
++ ret = __imq_nf_queue(entry, dev);
++ if (ret)
++ entry->skb = gso_skb;
++ return ret;
++ }
++
++ skb->next = NULL;
++
++ entry_seg = nf_queue_entry_dup(entry);
++ if (entry_seg) {
++ entry_seg->skb = skb;
++ ret = __imq_nf_queue(entry_seg, dev);
++ if (ret)
++ free_entry(entry_seg);
++ }
++ return ret;
++}
++
++static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
++{
++ struct sk_buff *skb, *segs;
++ struct net_device *dev;
++ unsigned int queued;
++ int index, retval, err;
++
++ index = entry->skb->imq_flags & IMQ_F_IFMASK;
++ if (unlikely(index > numdevs - 1)) {
++ if (net_ratelimit())
++ pr_warn("IMQ: invalid device specified, highest is %u\n",
++ numdevs - 1);
++ retval = -EINVAL;
++ goto out_no_dev;
++ }
++
++ /* check for imq device by index from cache */
++ dev = imq_devs_cache[index];
++ if (unlikely(!dev)) {
++ dev = get_imq_device_by_index(index);
++ if (IS_ERR(dev)) {
++ retval = PTR_ERR(dev);
++ goto out_no_dev;
++ }
++ }
++
++ if (unlikely(!(dev->flags & IFF_UP))) {
++ entry->skb->imq_flags = 0;
++ retval = -ECANCELED;
++ goto out_no_dev;
++ }
++
++ if (!skb_is_gso(entry->skb))
++ return __imq_nf_queue(entry, dev);
++
++ /* Since 3.10.x, GSO handling moved here as result of upstream commit
++ * a5fedd43d5f6c94c71053a66e4c3d2e35f1731a2 (netfilter: move
++ * skb_gso_segment into nfnetlink_queue module).
++ *
++ * Following code replicates the gso handling from
++ * 'net/netfilter/nfnetlink_queue_core.c':nfqnl_enqueue_packet().
++ */
++
++ skb = entry->skb;
++
++ switch (entry->pf) {
++ case NFPROTO_IPV4:
++ skb->protocol = htons(ETH_P_IP);
++ break;
++ case NFPROTO_IPV6:
++ skb->protocol = htons(ETH_P_IPV6);
++ break;
++ }
++
++ nf_bridge_adjust_skb_data(skb);
++ segs = skb_gso_segment(skb, 0);
++ /* Does not use PTR_ERR to limit the number of error codes that can be
++ * returned by nf_queue. For instance, callers rely on -ECANCELED to
++ * mean 'ignore this hook'.
++ */
++ err = -ENOBUFS;
++ if (IS_ERR(segs))
++ goto out_err;
++ queued = 0;
++ err = 0;
++ do {
++ struct sk_buff *nskb = segs->next;
++ if (nskb && nskb->next)
++ nskb->cb_next = NULL;
++ if (err == 0)
++ err = __imq_nf_queue_gso(entry, dev, segs);
++ if (err == 0)
++ queued++;
++ else
++ kfree_skb(segs);
++ segs = nskb;
++ } while (segs);
++
++ if (queued) {
++ if (err) /* some segments are already queued */
++ free_entry(entry);
++ kfree_skb(skb);
++ return 0;
++ }
++
++out_err:
++ nf_bridge_adjust_segmented_data(skb);
++ retval = err;
++out_no_dev:
++ return retval;
++}
++
++static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev)
++{
++ struct sk_buff *skb_orig, *skb, *skb_shared;
++ struct Qdisc *q;
++ struct netdev_queue *txq;
++ spinlock_t *root_lock;
++ int users;
++ int retval = -EINVAL;
++ unsigned int orig_queue_index;
++
++ dev->last_rx = jiffies;
++
++ skb = entry->skb;
++ skb_orig = NULL;
++
++ /* skb has owner? => make clone */
++ if (unlikely(skb->destructor)) {
++ skb_orig = skb;
++ skb = skb_clone(skb, GFP_ATOMIC);
++ if (unlikely(!skb)) {
++ retval = -ENOMEM;
++ goto out;
++ }
++ skb->cb_next = NULL;
++ entry->skb = skb;
++ }
++
++ skb->nf_queue_entry = entry;
++
++ dev->stats.rx_bytes += skb->len;
++ dev->stats.rx_packets++;
++
++ if (!skb->dev) {
++ /* skb->dev == NULL causes problems, try the find cause. */
++ if (net_ratelimit()) {
++ dev_warn(&dev->dev,
++ "received packet with skb->dev == NULL\n");
++ dump_stack();
++ }
++
++ skb->dev = dev;
++ }
++
++ /* Disables softirqs for lock below */
++ rcu_read_lock_bh();
++
++ /* Multi-queue selection */
++ orig_queue_index = skb_get_queue_mapping(skb);
++ txq = imq_select_queue(dev, skb);
++
++ q = rcu_dereference(txq->qdisc);
++ if (unlikely(!q->enqueue))
++ goto packet_not_eaten_by_imq_dev;
++
++ root_lock = qdisc_lock(q);
++ spin_lock(root_lock);
++
++ users = atomic_read(&skb->users);
++
++ skb_shared = skb_get(skb); /* increase reference count by one */
++
++ /* backup skb->cb, as qdisc layer will overwrite it */
++ skb_save_cb(skb_shared);
++ qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
++
++ if (likely(atomic_read(&skb_shared->users) == users + 1)) {
++ kfree_skb(skb_shared); /* decrease reference count by one */
++
++ skb->destructor = &imq_skb_destructor;
++
++ /* cloned? */
++ if (unlikely(skb_orig))
++ kfree_skb(skb_orig); /* free original */
++
++ spin_unlock(root_lock);
++ rcu_read_unlock_bh();
++
++ /* schedule qdisc dequeue */
++ __netif_schedule(q);
++
++ retval = 0;
++ goto out;
++ } else {
++ skb_restore_cb(skb_shared); /* restore skb->cb */
++ skb->nf_queue_entry = NULL;
++ /*
++ * qdisc dropped packet and decreased skb reference count of
++ * skb, so we don't really want to and try refree as that would
++ * actually destroy the skb.
++ */
++ spin_unlock(root_lock);
++ goto packet_not_eaten_by_imq_dev;
++ }
++
++packet_not_eaten_by_imq_dev:
++ skb_set_queue_mapping(skb, orig_queue_index);
++ rcu_read_unlock_bh();
++
++ /* cloned? restore original */
++ if (unlikely(skb_orig)) {
++ kfree_skb(skb);
++ entry->skb = skb_orig;
++ }
++ retval = -1;
++out:
++ return retval;
++}
++
++static unsigned int imq_nf_hook(const struct nf_hook_ops *ops,
++ struct sk_buff *pskb,
++ const struct net_device *indev,
++ const struct net_device *outdev,
++ int (*okfn)(struct sk_buff *))
++{
++ return (pskb->imq_flags & IMQ_F_ENQUEUE) ? NF_IMQ_QUEUE : NF_ACCEPT;
++}
++
++static int imq_close(struct net_device *dev)
++{
++ netif_stop_queue(dev);
++ return 0;
++}
++
++static int imq_open(struct net_device *dev)
++{
++ netif_start_queue(dev);
++ return 0;
++}
++
++static const struct net_device_ops imq_netdev_ops = {
++ .ndo_open = imq_open,
++ .ndo_stop = imq_close,
++ .ndo_start_xmit = imq_dev_xmit,
++ .ndo_get_stats = imq_get_stats,
++};
++
++static void imq_setup(struct net_device *dev)
++{
++ dev->netdev_ops = &imq_netdev_ops;
++ dev->type = ARPHRD_VOID;
++ dev->mtu = 16000; /* too small? */
++ dev->tx_queue_len = 11000; /* too big? */
++ dev->flags = IFF_NOARP;
++ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
++ NETIF_F_GSO | NETIF_F_HW_CSUM |
++ NETIF_F_HIGHDMA;
++ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE |
++ IFF_TX_SKB_SHARING);
++}
++
++static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
++{
++ int ret = 0;
++
++ if (tb[IFLA_ADDRESS]) {
++ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
++ ret = -EINVAL;
++ goto end;
++ }
++ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
++ ret = -EADDRNOTAVAIL;
++ goto end;
++ }
++ }
++ return 0;
++end:
++ pr_warn("IMQ: imq_validate failed (%d)\n", ret);
++ return ret;
++}
++
++static struct rtnl_link_ops imq_link_ops __read_mostly = {
++ .kind = "imq",
++ .priv_size = 0,
++ .setup = imq_setup,
++ .validate = imq_validate,
++};
++
++static const struct nf_queue_handler imq_nfqh = {
++ .outfn = imq_nf_queue,
++};
++
++static int __init imq_init_hooks(void)
++{
++ int ret;
++
++ nf_register_queue_imq_handler(&imq_nfqh);
++
++ ret = nf_register_hooks(imq_ops, ARRAY_SIZE(imq_ops));
++ if (ret < 0)
++ nf_unregister_queue_imq_handler();
++
++ return ret;
++}
++
++static int __init imq_init_one(int index)
++{
++ struct net_device *dev;
++ int ret;
++
++ dev = alloc_netdev_mq(0, "imq%d", NET_NAME_UNKNOWN, imq_setup, numqueues);
++ if (!dev)
++ return -ENOMEM;
++
++ ret = dev_alloc_name(dev, dev->name);
++ if (ret < 0)
++ goto fail;
++
++ dev->rtnl_link_ops = &imq_link_ops;
++ ret = register_netdevice(dev);
++ if (ret < 0)
++ goto fail;
++
++ return 0;
++fail:
++ free_netdev(dev);
++ return ret;
++}
++
++static int __init imq_init_devs(void)
++{
++ int err, i;
++
++ if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
++ pr_err("IMQ: numdevs has to be betweed 1 and %u\n",
++ IMQ_MAX_DEVS);
++ return -EINVAL;
++ }
++
++ if (numqueues < 1 || numqueues > IMQ_MAX_QUEUES) {
++ pr_err("IMQ: numqueues has to be betweed 1 and %u\n",
++ IMQ_MAX_QUEUES);
++ return -EINVAL;
++ }
++
++ get_random_bytes(&imq_hashrnd, sizeof(imq_hashrnd));
++
++ rtnl_lock();
++ err = __rtnl_link_register(&imq_link_ops);
++
++ for (i = 0; i < numdevs && !err; i++)
++ err = imq_init_one(i);
++
++ if (err) {
++ __rtnl_link_unregister(&imq_link_ops);
++ memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
++ }
++ rtnl_unlock();
++
++ return err;
++}
++
++static int __init imq_init_module(void)
++{
++ int err;
++
++#if defined(CONFIG_IMQ_NUM_DEVS)
++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
++#endif
++
++ err = imq_init_devs();
++ if (err) {
++ pr_err("IMQ: Error trying imq_init_devs(net)\n");
++ return err;
++ }
++
++ err = imq_init_hooks();
++ if (err) {
++ pr_err(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
++ rtnl_link_unregister(&imq_link_ops);
++ memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
++ return err;
++ }
++
++ pr_info("IMQ driver loaded successfully. (numdevs = %d, numqueues = %d)\n",
++ numdevs, numqueues);
++
++#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++ pr_info("\tHooking IMQ before NAT on PREROUTING.\n");
++#else
++ pr_info("\tHooking IMQ after NAT on PREROUTING.\n");
++#endif
++#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++ pr_info("\tHooking IMQ before NAT on POSTROUTING.\n");
++#else
++ pr_info("\tHooking IMQ after NAT on POSTROUTING.\n");
++#endif
++
++ return 0;
++}
++
++static void __exit imq_unhook(void)
++{
++ nf_unregister_hooks(imq_ops, ARRAY_SIZE(imq_ops));
++ nf_unregister_queue_imq_handler();
++}
++
++static void __exit imq_cleanup_devs(void)
++{
++ rtnl_link_unregister(&imq_link_ops);
++ memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
++}
++
++static void __exit imq_exit_module(void)
++{
++ imq_unhook();
++ imq_cleanup_devs();
++ pr_info("IMQ driver unloaded successfully.\n");
++}
++
++module_init(imq_init_module);
++module_exit(imq_exit_module);
++
++module_param(numdevs, int, 0);
++module_param(numqueues, int, 0);
++MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
++MODULE_PARM_DESC(numqueues, "number of queues per IMQ device");
++MODULE_AUTHOR("http://www.linuximq.net");
++MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS_RTNL_LINK("imq");
++
+diff --git a/include/linux/imq.h b/include/linux/imq.h
+new file mode 100644
+index 0000000..1babb09
+--- /dev/null
++++ b/include/linux/imq.h
+@@ -0,0 +1,13 @@
++#ifndef _IMQ_H
++#define _IMQ_H
++
++/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
++#define IMQ_F_BITS 5
++
++#define IMQ_F_IFMASK 0x0f
++#define IMQ_F_ENQUEUE 0x10
++
++#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1)
++
++#endif /* _IMQ_H */
++
+diff --git a/include/linux/netfilter/xt_IMQ.h b/include/linux/netfilter/xt_IMQ.h
+new file mode 100644
+index 0000000..9b07230
+--- /dev/null
++++ b/include/linux/netfilter/xt_IMQ.h
+@@ -0,0 +1,9 @@
++#ifndef _XT_IMQ_H
++#define _XT_IMQ_H
++
++struct xt_imq_info {
++ unsigned int todev; /* target imq device */
++};
++
++#endif /* _XT_IMQ_H */
++
+diff --git a/include/linux/netfilter_ipv4/ipt_IMQ.h b/include/linux/netfilter_ipv4/ipt_IMQ.h
+new file mode 100644
+index 0000000..7af320f
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ipt_IMQ.h
+@@ -0,0 +1,10 @@
++#ifndef _IPT_IMQ_H
++#define _IPT_IMQ_H
++
++/* Backwards compatibility for old userspace */
++#include <linux/netfilter/xt_IMQ.h>
++
++#define ipt_imq_info xt_imq_info
++
++#endif /* _IPT_IMQ_H */
++
+diff --git a/include/linux/netfilter_ipv6/ip6t_IMQ.h b/include/linux/netfilter_ipv6/ip6t_IMQ.h
+new file mode 100644
+index 0000000..198ac01
+--- /dev/null
++++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
+@@ -0,0 +1,10 @@
++#ifndef _IP6T_IMQ_H
++#define _IP6T_IMQ_H
++
++/* Backwards compatibility for old userspace */
++#include <linux/netfilter/xt_IMQ.h>
++
++#define ip6t_imq_info xt_imq_info
++
++#endif /* _IP6T_IMQ_H */
++
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index f66f346..d699b19 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -33,6 +33,9 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/netdev_features.h>
+ #include <net/flow_keys.h>
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++#include <linux/imq.h>
++#endif
+
+ /* Don't change this without changing skb_csum_unnecessary! */
+ #define CHECKSUM_NONE 0
+@@ -418,6 +421,9 @@ struct sk_buff {
+ * first. This is owned by whoever has the skb queued ATM.
+ */
+ char cb[48] __aligned(8);
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ void *cb_next;
++#endif
+
+ unsigned long _skb_refdst;
+ #ifdef CONFIG_XFRM
+@@ -453,6 +459,9 @@ struct sk_buff {
+ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ struct nf_conntrack *nfct;
+ #endif
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ struct nf_queue_entry *nf_queue_entry;
++#endif
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ struct nf_bridge_info *nf_bridge;
+ #endif
+@@ -490,6 +490,9 @@ struct sk_buff {
+ __u16 tc_verd; /* traffic control verdict */
+ #endif
+ #endif
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ __u8 imq_flags:IMQ_F_BITS;
++#endif
+
+ union {
+ __wsum csum;
+@@ -625,6 +637,12 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
+ return (struct rtable *)skb_dst(skb);
+ }
+
++
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++extern int skb_save_cb(struct sk_buff *skb);
++extern int skb_restore_cb(struct sk_buff *skb);
++#endif
++
+ void kfree_skb(struct sk_buff *skb);
+ void kfree_skb_list(struct sk_buff *segs);
+ void skb_tx_error(struct sk_buff *skb);
+@@ -2435,6 +2453,10 @@ static inline void nf_reset(struct sk_buff *skb)
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = NULL;
+ #endif
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ skb->imq_flags = 0;
++ skb->nf_queue_entry = NULL;
++#endif
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ nf_bridge_put(skb->nf_bridge);
+ skb->nf_bridge = NULL;
+@@ -2635,6 +2653,10 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
+ if (copy)
+ dst->nfctinfo = src->nfctinfo;
+ #endif
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ dst->imq_flags = src->imq_flags;
++ dst->nf_queue_entry = src->nf_queue_entry;
++#endif
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ dst->nf_bridge = src->nf_bridge;
+ nf_bridge_get(src->nf_bridge);
+diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
+index aaba4bb..f6e92a4 100644
+--- a/include/net/netfilter/nf_queue.h
++++ b/include/net/netfilter/nf_queue.h
+@@ -29,6 +29,12 @@ struct nf_queue_handler {
+ void nf_register_queue_handler(const struct nf_queue_handler *qh);
+ void nf_unregister_queue_handler(void);
+ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
++extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
++
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
++extern void nf_unregister_queue_imq_handler(void);
++#endif
+
+ bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
+ void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
+diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
+index f7dc0eb..58c46a9 100644
+--- a/include/uapi/linux/netfilter.h
++++ b/include/uapi/linux/netfilter.h
+@@ -13,7 +13,8 @@
+ #define NF_QUEUE 3
+ #define NF_REPEAT 4
+ #define NF_STOP 5
+-#define NF_MAX_VERDICT NF_STOP
++#define NF_IMQ_QUEUE 6
++#define NF_MAX_VERDICT NF_IMQ_QUEUE
+
+ /* we overload the higher bits for encoding auxiliary data such as the queue
+ * number or errno values. Not nice, but better than additional function
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 3d13874..9842f21 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -131,6 +131,9 @@
+ #include <linux/vmalloc.h>
+ #include <linux/if_macvlan.h>
+ #include <linux/errqueue.h>
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++#include <linux/imq.h>
++#endif
+
+ #include "net-sysfs.h"
+
+@@ -2595,7 +2598,12 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev,
+ unsigned int len;
+ int rc;
+
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ if (!list_empty(&ptype_all) &&
++ !(skb->imq_flags & IMQ_F_ENQUEUE))
++#else
+ if (!list_empty(&ptype_all))
++#endif
+ dev_queue_xmit_nit(skb, dev);
+
+ len = skb->len;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index c28c7fe..a5f1888 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -73,6 +73,84 @@
+
+ struct kmem_cache *skbuff_head_cache __read_mostly;
+ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
++
++/* Control buffer save/restore for IMQ devices */
++struct skb_cb_table {
++ char cb[48] __aligned(8);
++ void *cb_next;
++ atomic_t refcnt;
++};
++
++static DEFINE_SPINLOCK(skb_cb_store_lock);
++
++int skb_save_cb(struct sk_buff *skb)
++{
++ struct skb_cb_table *next;
++
++ next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
++ if (!next)
++ return -ENOMEM;
++
++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
++
++ memcpy(next->cb, skb->cb, sizeof(skb->cb));
++ next->cb_next = skb->cb_next;
++
++ atomic_set(&next->refcnt, 1);
++
++ skb->cb_next = next;
++ return 0;
++}
++EXPORT_SYMBOL(skb_save_cb);
++
++int skb_restore_cb(struct sk_buff *skb)
++{
++ struct skb_cb_table *next;
++
++ if (!skb->cb_next)
++ return 0;
++
++ next = skb->cb_next;
++
++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
++
++ memcpy(skb->cb, next->cb, sizeof(skb->cb));
++ skb->cb_next = next->cb_next;
++
++ spin_lock(&skb_cb_store_lock);
++
++ if (atomic_dec_and_test(&next->refcnt))
++ kmem_cache_free(skbuff_cb_store_cache, next);
++
++ spin_unlock(&skb_cb_store_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(skb_restore_cb);
++
++static void skb_copy_stored_cb(struct sk_buff *new, const struct sk_buff *__old)
++{
++ struct skb_cb_table *next;
++ struct sk_buff *old;
++
++ if (!__old->cb_next) {
++ new->cb_next = NULL;
++ return;
++ }
++
++ spin_lock(&skb_cb_store_lock);
++
++ old = (struct sk_buff *)__old;
++
++ next = old->cb_next;
++ atomic_inc(&next->refcnt);
++ new->cb_next = next;
++
++ spin_unlock(&skb_cb_store_lock);
++}
++#endif
+
+ /**
+ * skb_panic - private function for out-of-line support
+@@ -577,6 +656,28 @@ static void skb_release_head_state(struct sk_buff *skb)
+ WARN_ON(in_irq());
+ skb->destructor(skb);
+ }
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ /*
++ * This should not happen. When it does, avoid memleak by restoring
++ * the chain of cb-backups.
++ */
++ while (skb->cb_next != NULL) {
++ if (net_ratelimit())
++ pr_warn("IMQ: kfree_skb: skb->cb_next: %08x\n",
++ (unsigned int)skb->cb_next);
++
++ skb_restore_cb(skb);
++ }
++ /*
++ * This should not happen either, nf_queue_entry is nullified in
++ * imq_dev_xmit(). If we have non-NULL nf_queue_entry then we are
++ * leaking entry pointers, maybe memory. We don't know if this is
++ * pointer to already freed memory, or should this be freed.
++ * If this happens we need to add refcounting, etc for nf_queue_entry.
++ */
++ if (skb->nf_queue_entry && net_ratelimit())
++ pr_warn("%s\n", "IMQ: kfree_skb: skb->nf_queue_entry != NULL");
++#endif
+ #if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ nf_conntrack_put(skb->nfct);
+ #endif
+@@ -709,6 +810,10 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+ #ifdef CONFIG_XFRM
+ new->sp = secpath_get(old->sp);
+ #endif
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ new->cb_next = NULL;
++ /*skb_copy_stored_cb(new, old);*/
++#endif
+ __nf_copy(new, old, false);
+
+ /* Note : this field could be in headers_start/headers_end section
+@@ -3112,6 +3217,13 @@ void __init skb_init(void)
+ 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ NULL);
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
++ sizeof(struct skb_cb_table),
++ 0,
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC,
++ NULL);
++#endif
+ }
+
+ /**
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index b6fa35e..08dcfef 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -64,9 +64,6 @@ static int ip6_finish_output2(struct sk_buff *skb)
+ struct in6_addr *nexthop;
+ int ret;
+
+- skb->protocol = htons(ETH_P_IPV6);
+- skb->dev = dev;
+-
+ if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
+ struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+
+@@ -143,6 +140,13 @@ int ip6_output(struct sk_buff *skb)
+ return 0;
+ }
+
++ /*
++ * IMQ-patch: moved setting skb->dev and skb->protocol from
++ * ip6_finish_output2 to fix crashing at netif_skb_features().
++ */
++ skb->protocol = htons(ETH_P_IPV6);
++ skb->dev = dev;
++
+ return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
+ ip6_finish_output,
+ !(IP6CB(skb)->flags & IP6SKB_REROUTED));
+diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
+index 6e839b6..45ac31c 100644
+--- a/net/netfilter/Kconfig
++++ b/net/netfilter/Kconfig
+@@ -630,6 +630,18 @@ config NETFILTER_XT_TARGET_LOG
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config NETFILTER_XT_TARGET_IMQ
++ tristate '"IMQ" target support'
++ depends on NETFILTER_XTABLES
++ depends on IP_NF_MANGLE || IP6_NF_MANGLE
++ select IMQ
++ default m if NETFILTER_ADVANCED=n
++ help
++ This option adds a `IMQ' target which is used to specify if and
++ to which imq device packets should get enqueued/dequeued.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+ config NETFILTER_XT_TARGET_MARK
+ tristate '"MARK" target support'
+ depends on NETFILTER_ADVANCED
+diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
+index c3a0a12..9647f06 100644
+--- a/net/netfilter/Makefile
++++ b/net/netfilter/Makefile
+@@ -82,6 +82,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
++obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_NETMAP) += xt_NETMAP.o
+diff --git a/net/netfilter/core.c b/net/netfilter/core.c
+index 593b16e..740cd69 100644
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
+@@ -191,9 +191,11 @@ next_hook:
+ ret = NF_DROP_GETERR(verdict);
+ if (ret == 0)
+ ret = -EPERM;
+- } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
++ } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE ||
++ (verdict & NF_VERDICT_MASK) == NF_IMQ_QUEUE) {
+ int err = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
+- verdict >> NF_VERDICT_QBITS);
++ verdict >> NF_VERDICT_QBITS,
++ verdict & NF_VERDICT_MASK);
+ if (err < 0) {
+ if (err == -ECANCELED)
+ goto next_hook;
+diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
+index 3deec99..c1a1397 100644
+--- a/net/netfilter/nf_internals.h
++++ b/net/netfilter/nf_internals.h
+@@ -29,7 +29,7 @@ extern int nf_queue(struct sk_buff *skb,
+ int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, u_int8_t pf,
+ unsigned int hook, struct net_device *indev,
+ struct net_device *outdev, int (*okfn)(struct sk_buff *),
+- unsigned int queuenum);
++ unsigned int queuenum, unsigned int queuetype);
+ int __init netfilter_queue_init(void);
+
+ /* nf_log.c */
+diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
+index 5d24b1f..28317dc 100644
+--- a/net/netfilter/nf_queue.c
++++ b/net/netfilter/nf_queue.c
+@@ -27,6 +27,23 @@
+ */
+ static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
+
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++static const struct nf_queue_handler __rcu *queue_imq_handler __read_mostly;
++
++void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
++{
++ rcu_assign_pointer(queue_imq_handler, qh);
++}
++EXPORT_SYMBOL_GPL(nf_register_queue_imq_handler);
++
++void nf_unregister_queue_imq_handler(void)
++{
++ RCU_INIT_POINTER(queue_imq_handler, NULL);
++ synchronize_rcu();
++}
++EXPORT_SYMBOL_GPL(nf_unregister_queue_imq_handler);
++#endif
++
+ /* return EBUSY when somebody else is registered, return EEXIST if the
+ * same handler is registered, return 0 in case of success. */
+ void nf_register_queue_handler(const struct nf_queue_handler *qh)
+@@ -105,7 +122,8 @@ int nf_queue(struct sk_buff *skb,
+ struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct sk_buff *),
+- unsigned int queuenum)
++ unsigned int queuenum,
++ unsigned int queuetype)
+ {
+ int status = -ENOENT;
+ struct nf_queue_entry *entry = NULL;
+@@ -115,7 +133,17 @@ int nf_queue(struct sk_buff *skb,
+ /* QUEUE == DROP if no one is waiting, to be safe. */
+ rcu_read_lock();
+
+- qh = rcu_dereference(queue_handler);
++ if (queuetype == NF_IMQ_QUEUE) {
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ qh = rcu_dereference(queue_imq_handler);
++#else
++ BUG();
++ goto err_unlock;
++#endif
++ } else {
++ qh = rcu_dereference(queue_handler);
++ }
++
+ if (!qh) {
+ status = -ESRCH;
+ goto err_unlock;
+@@ -205,9 +233,11 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
+ local_bh_enable();
+ break;
+ case NF_QUEUE:
++ case NF_IMQ_QUEUE:
+ err = nf_queue(skb, elem, entry->pf, entry->hook,
+ entry->indev, entry->outdev, entry->okfn,
+- verdict >> NF_VERDICT_QBITS);
++ verdict >> NF_VERDICT_QBITS,
++ verdict & NF_VERDICT_MASK);
+ if (err < 0) {
+ if (err == -ECANCELED)
+ goto next_hook;
+diff --git a/net/netfilter/xt_IMQ.c b/net/netfilter/xt_IMQ.c
+new file mode 100644
+index 0000000..1c3cd66
+--- /dev/null
++++ b/net/netfilter/xt_IMQ.c
+@@ -0,0 +1,72 @@
++/*
++ * This target marks packets to be enqueued to an imq device
++ */
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter/x_tables.h>
++#include <linux/netfilter/xt_IMQ.h>
++#include <linux/imq.h>
++
++static unsigned int imq_target(struct sk_buff *pskb,
++ const struct xt_action_param *par)
++{
++ const struct xt_imq_info *mr = par->targinfo;
++
++ pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
++
++ return XT_CONTINUE;
++}
++
++static int imq_checkentry(const struct xt_tgchk_param *par)
++{
++ struct xt_imq_info *mr = par->targinfo;
++
++ if (mr->todev > IMQ_MAX_DEVS - 1) {
++ pr_warn("IMQ: invalid device specified, highest is %u\n",
++ IMQ_MAX_DEVS - 1);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static struct xt_target xt_imq_reg[] __read_mostly = {
++ {
++ .name = "IMQ",
++ .family = AF_INET,
++ .checkentry = imq_checkentry,
++ .target = imq_target,
++ .targetsize = sizeof(struct xt_imq_info),
++ .table = "mangle",
++ .me = THIS_MODULE
++ },
++ {
++ .name = "IMQ",
++ .family = AF_INET6,
++ .checkentry = imq_checkentry,
++ .target = imq_target,
++ .targetsize = sizeof(struct xt_imq_info),
++ .table = "mangle",
++ .me = THIS_MODULE
++ },
++};
++
++static int __init imq_init(void)
++{
++ return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
++}
++
++static void __exit imq_fini(void)
++{
++ xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
++}
++
++module_init(imq_init);
++module_exit(imq_fini);
++
++MODULE_AUTHOR("http://www.linuximq.net");
++MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("ipt_IMQ");
++MODULE_ALIAS("ip6t_IMQ");
++
Index: target/linux/generic/patches-3.18/linux_650-custom_netfilter_match_modules.patch
===================================================================
--- target/linux/generic/patches-3.18/linux_650-custom_netfilter_match_modules.patch (revision 0)
+++ target/linux/generic/patches-3.18/linux_650-custom_netfilter_match_modules.patch (working copy)
@@ -0,0 +1,9252 @@
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/ipt_bandwidth.c 2015-06-19 03:02:55.381669455 +0800
+@@ -0,0 +1,2501 @@
++/* bandwidth -- An iptables extension for bandwidth monitoring/control
++ * Can be used to efficiently monitor bandwidth and/or implement bandwidth quotas
++ * Can be queried using the iptbwctl userspace library
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2009-2011 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <asm/uaccess.h>
++
++#include <linux/time.h>
++
++#include <linux/semaphore.h>
++
++
++#include "bandwidth_deps/tree_map.h"
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_bandwidth.h>
++
++
++#include <linux/ip.h>
++#include <linux/netfilter/x_tables.h>
++
++
++/* #define BANDWIDTH_DEBUG 1 */
++
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Eric Bishop");
++MODULE_DESCRIPTION("Match bandwidth used, designed for use with Gargoyle web interface (www.gargoyle-router.com)");
++
++/*
++ * WARNING: accessing the sys_tz variable takes FOREVER, and kills performance
++ * keep a local variable that gets updated from the extern variable
++ */
++extern struct timezone sys_tz;
++static int local_minutes_west;
++static int local_seconds_west;
++static time_t last_local_mw_update;
++
++
++static spinlock_t bandwidth_lock = __SPIN_LOCK_UNLOCKED(bandwidth_lock);
++DEFINE_SEMAPHORE(userspace_lock);
++
++static string_map* id_map = NULL;
++
++
++typedef struct info_and_maps_struct
++{
++ struct ipt_bandwidth_info* info;
++ long_map* ip_map;
++ long_map* ip_history_map;
++}info_and_maps;
++
++typedef struct history_struct
++{
++ time_t first_start;
++ time_t first_end;
++ time_t last_end; /* also beginning of current time frame */
++ uint32_t max_nodes;
++ uint32_t num_nodes;
++ uint32_t non_zero_nodes;
++ uint32_t current_index;
++ uint64_t* history_data;
++} bw_history;
++
++
++
++static unsigned char set_in_progress = 0;
++static char set_id[BANDWIDTH_MAX_ID_LENGTH] = "";
++
++/*
++ * function prototypes
++ *
++ * (prototypes only provided for
++ * functions not part of iptables API)
++ *
++*/
++
++
++static void adjust_ip_for_backwards_time_shift(unsigned long key, void* value);
++static void adjust_id_for_backwards_time_shift(char* key, void* value);
++static void check_for_backwards_time_shift(time_t now);
++
++
++static void shift_timezone_of_ip(unsigned long key, void* value);
++static void shift_timezone_of_id(char* key, void* value);
++static void check_for_timezone_shift(time_t now, int already_locked);
++
++
++
++static bw_history* initialize_history(uint32_t max_nodes);
++static unsigned char update_history(bw_history* history, time_t interval_start, time_t interval_end, struct ipt_bandwidth_info* info);
++
++
++
++static void do_reset(unsigned long key, void* value);
++static void set_bandwidth_to_zero(unsigned long key, void* value);
++static void handle_interval_reset(info_and_maps* iam, time_t now);
++
++static uint64_t pow64(uint64_t base, uint64_t pow);
++static uint64_t get_bw_record_max(void); /* called by init to set global variable */
++
++static inline int is_leap(unsigned int y);
++static time_t get_next_reset_time(struct ipt_bandwidth_info *info, time_t now, time_t previous_reset);
++static time_t get_nominal_previous_reset_time(struct ipt_bandwidth_info *info, time_t current_next_reset);
++
++static uint64_t* initialize_map_entries_for_ip(info_and_maps* iam, unsigned long ip, uint64_t initial_bandwidth);
++
++
++
++
++static time_t backwards_check = 0;
++static time_t backwards_adjust_current_time = 0;
++static time_t backwards_adjust_info_previous_reset = 0;
++static time_t backwards_adjust_ips_zeroed = 0;
++static info_and_maps* backwards_adjust_iam = NULL;
++
++/*
++static char print_out_buf[25000];
++static void print_to_buf(char* outdat);
++static void reset_buf(void);
++static void do_print_buf(void);
++
++static void print_to_buf(char* outdat)
++{
++ int buf_len = strlen(print_out_buf);
++ sprintf(print_out_buf+buf_len, "\t%s\n", outdat);
++}
++static void reset_buf(void)
++{
++ print_out_buf[0] = '\n';
++ print_out_buf[1] = '\0';
++}
++static void do_print_buf(void)
++{
++ char* start = print_out_buf;
++ char* next = strchr(start, '\n');
++ while(next != NULL)
++ {
++ *next = '\0';
++ printk("%s\n", start);
++ start = next+1;
++ next = strchr(start, '\n');
++ }
++ printk("%s\n", start);
++
++ reset_buf();
++}
++*/
++
++static void adjust_ip_for_backwards_time_shift(unsigned long key, void* value)
++{
++ bw_history* old_history = (bw_history*)value;
++
++ if(old_history->num_nodes == 1)
++ {
++ if(backwards_adjust_info_previous_reset > backwards_adjust_current_time)
++ {
++ if(backwards_adjust_ips_zeroed == 0)
++ {
++ apply_to_every_long_map_value(backwards_adjust_iam->ip_map, set_bandwidth_to_zero);
++ backwards_adjust_iam->info->next_reset = get_next_reset_time(backwards_adjust_iam->info, backwards_adjust_current_time, backwards_adjust_current_time);
++ backwards_adjust_iam->info->previous_reset = backwards_adjust_current_time;
++ backwards_adjust_iam->info->current_bandwidth = 0;
++ backwards_adjust_ips_zeroed = 1;
++ }
++ }
++ return;
++ }
++ else if(old_history->last_end < backwards_adjust_current_time)
++ {
++ return;
++ }
++ else
++ {
++
++ /*
++ * reconstruct new history without newest nodes, to represent data as it was
++ * last time the current time was set to the interval to which we just jumped back
++ */
++ uint32_t next_old_index;
++ time_t old_next_start = old_history->first_start == 0 ? backwards_adjust_info_previous_reset : old_history->first_start; /* first time point in old history */
++ bw_history* new_history = initialize_history(old_history->max_nodes);
++ if(new_history == NULL)
++ {
++ printk("ipt_bandwidth: warning, kmalloc failure!\n");
++ return;
++ }
++
++
++
++ /* oldest index in old history -- we iterate forward through old history using this index */
++ next_old_index = old_history->num_nodes == old_history->max_nodes ? (old_history->current_index+1) % old_history->max_nodes : 0;
++
++
++ /* if first time point is after current time, just completely re-initialize history, otherwise set first time point to old first time point */
++ (new_history->history_data)[ new_history->current_index ] = old_next_start < backwards_adjust_current_time ? (old_history->history_data)[next_old_index] : 0;
++ backwards_adjust_iam->info->previous_reset = old_next_start < backwards_adjust_current_time ? old_next_start : backwards_adjust_current_time;
++
++
++ /* iterate through old history, rebuilding in new history*/
++ while( old_next_start < backwards_adjust_current_time )
++ {
++ time_t old_next_end = get_next_reset_time(backwards_adjust_iam->info, old_next_start, old_next_start); /* 2nd param = last reset, 3rd param = current time */
++ if( old_next_end < backwards_adjust_current_time)
++ {
++ update_history(new_history, old_next_start, old_next_end, backwards_adjust_iam->info);
++ next_old_index++;
++ (new_history->history_data)[ new_history->current_index ] = (old_history->history_data)[next_old_index];
++ }
++ backwards_adjust_iam->info->previous_reset = old_next_start; /*update previous_reset variable in bw_info as we iterate */
++ old_next_start = old_next_end;
++ }
++
++ /* update next_reset variable from previous_reset variable which we've already set */
++ backwards_adjust_iam->info->next_reset = get_next_reset_time(backwards_adjust_iam->info, backwards_adjust_iam->info->previous_reset, backwards_adjust_iam->info->previous_reset);
++
++
++
++ /* set old_history to be new_history */
++ kfree(old_history->history_data);
++ old_history->history_data = new_history->history_data;
++ old_history->first_start = new_history->first_start;
++ old_history->first_end = new_history->first_end;
++ old_history->last_end = new_history->last_end;
++ old_history->num_nodes = new_history->num_nodes;
++ old_history->non_zero_nodes = new_history->non_zero_nodes;
++ old_history->current_index = new_history->current_index;
++ set_long_map_element(backwards_adjust_iam->ip_map, key, (void*)(old_history->history_data + old_history->current_index) );
++ if(key == 0)
++ {
++ backwards_adjust_iam->info->combined_bw = (uint64_t*)(old_history->history_data + old_history->current_index);
++ }
++
++ /*
++ * free new history (which was just temporary)
++ * note that we don't need to free history_data from new_history
++ * we freed the history_data from old history, and set that to the history_data from new_history
++ * so, this cleanup has already been handled
++ */
++ kfree(new_history);
++
++ }
++}
++static void adjust_id_for_backwards_time_shift(char* key, void* value)
++{
++ info_and_maps* iam = (info_and_maps*)value;
++ if(iam == NULL)
++ {
++ return;
++ }
++ if(iam->info == NULL)
++ {
++ return;
++ }
++
++ backwards_adjust_iam = iam;
++ if( (iam->info->reset_is_constant_interval == 0 && iam->info->reset_interval == BANDWIDTH_NEVER) || iam->info->cmp == BANDWIDTH_CHECK )
++ {
++ return;
++ }
++ if(iam->ip_history_map != NULL)
++ {
++ backwards_adjust_info_previous_reset = iam->info->previous_reset;
++ backwards_adjust_ips_zeroed = 0;
++ apply_to_every_long_map_value(iam->ip_history_map, adjust_ip_for_backwards_time_shift);
++ }
++ else
++ {
++ time_t next_reset_after_adjustment = get_next_reset_time(iam->info, backwards_adjust_current_time, backwards_adjust_current_time);
++ if(next_reset_after_adjustment < iam->info->next_reset)
++ {
++ iam->info->previous_reset = backwards_adjust_current_time;
++ iam->info->next_reset = next_reset_after_adjustment;
++ }
++ }
++ backwards_adjust_iam = NULL;
++}
++static void check_for_backwards_time_shift(time_t now)
++{
++ spin_lock_bh(&bandwidth_lock);
++ if(now < backwards_check && backwards_check != 0)
++ {
++ printk("ipt_bandwidth: backwards time shift detected, adjusting\n");
++
++ /* adjust */
++ down(&userspace_lock);
++
++ /* This function is always called with absolute time, not time adjusted for timezone. Correct that before adjusting. */
++ backwards_adjust_current_time = now - local_seconds_west;
++ apply_to_every_string_map_value(id_map, adjust_id_for_backwards_time_shift);
++ up(&userspace_lock);
++ }
++ backwards_check = now;
++ spin_unlock_bh(&bandwidth_lock);
++}
++
++
++
++static int old_minutes_west;
++static time_t shift_timezone_current_time;
++static time_t shift_timezone_info_previous_reset;
++static info_and_maps* shift_timezone_iam = NULL;
++static void shift_timezone_of_ip(unsigned long key, void* value)
++{
++ #ifdef BANDWIDTH_DEBUG
++ unsigned long* ip = &key;
++ printk("shifting ip = %u.%u.%u.%u\n", *((char*)ip), *(((char*)ip)+1), *(((char*)ip)+2), *(((char*)ip)+3) );
++ #endif
++
++
++ bw_history* history = (bw_history*)value;
++ int32_t timezone_adj = (old_minutes_west-local_minutes_west)*60;
++ #ifdef BANDWIDTH_DEBUG
++ printk(" before jump:\n");
++ printk(" current time = %ld\n", shift_timezone_current_time);
++ printk(" first_start = %ld\n", history->first_start);
++ printk(" first_end = %ld\n", history->first_end);
++ printk(" last_end = %ld\n", history->last_end);
++ printk("\n");
++ #endif
++
++ /* given time after shift, calculate next and previous reset times */
++ time_t next_reset = get_next_reset_time(shift_timezone_iam->info, shift_timezone_current_time, 0);
++ time_t previous_reset = get_nominal_previous_reset_time(shift_timezone_iam->info, next_reset);
++ shift_timezone_iam->info->next_reset = next_reset;
++
++ /*if we're resetting on a constant interval, we can just adjust -- no need to worry about relationship to constant boundaries, e.g. end of day */
++ if(shift_timezone_iam->info->reset_is_constant_interval)
++ {
++ shift_timezone_iam->info->previous_reset = previous_reset;
++ if(history->num_nodes > 1)
++ {
++ history->first_start = history->first_start + timezone_adj;
++ history->first_end = history->first_end + timezone_adj;
++ history->last_end = history->last_end + timezone_adj;
++ }
++ }
++ else
++ {
++
++
++ /* next reset will be the newly computed next_reset. */
++ int node_index=history->num_nodes - 1;
++ if(node_index > 0)
++ {
++ /* based on new, shifted time, iterate back over all nodes in history */
++ shift_timezone_iam->info->previous_reset = previous_reset ;
++ history->last_end = previous_reset;
++
++ while(node_index > 1)
++ {
++ previous_reset = get_nominal_previous_reset_time(shift_timezone_iam->info, previous_reset);
++ node_index--;
++ }
++ history->first_end = previous_reset;
++
++ previous_reset = get_nominal_previous_reset_time(shift_timezone_iam->info, previous_reset);
++ history->first_start = previous_reset > history->first_start + timezone_adj ? previous_reset : history->first_start + timezone_adj;
++ }
++ else
++ {
++ /*
++ * history hasn't really been initialized -- there's only one, current time point.
++ * we only know what's in the current accumulator in info. Just adjust previous reset time and make sure it's valid
++ */
++ shift_timezone_iam->info->previous_reset = previous_reset > shift_timezone_info_previous_reset + timezone_adj ? previous_reset : shift_timezone_info_previous_reset + timezone_adj;
++ }
++ }
++
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("\n");
++ printk(" after jump:\n");
++ printk(" first_start = %ld\n", history->first_start);
++ printk(" first_end = %ld\n", history->first_end);
++ printk(" last_end = %ld\n", history->last_end);
++ printk("\n\n");
++ #endif
++
++}
++static void shift_timezone_of_id(char* key, void* value)
++{
++ info_and_maps* iam = (info_and_maps*)value;
++ int history_found = 0;
++ if(iam == NULL)
++ {
++ return;
++ }
++ if(iam->info == NULL)
++ {
++ return;
++ }
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("shifting id %s\n", key);
++ #endif
++
++ shift_timezone_iam = iam;
++ if( (iam->info->reset_is_constant_interval == 0 && iam->info->reset_interval == BANDWIDTH_NEVER) || iam->info->cmp == BANDWIDTH_CHECK )
++ {
++ return;
++ }
++
++ if(iam->ip_history_map != NULL)
++ {
++ if(iam->ip_history_map->num_elements > 0)
++ {
++ history_found = 1;
++ shift_timezone_info_previous_reset = iam->info->previous_reset;
++ apply_to_every_long_map_value(iam->ip_history_map, shift_timezone_of_ip);
++ }
++ }
++ if(history_found == 0)
++ {
++ iam->info->previous_reset = iam->info->previous_reset + ((old_minutes_west - local_minutes_west )*60);
++ if(iam->info->previous_reset > shift_timezone_current_time)
++ {
++ iam->info->next_reset = get_next_reset_time(iam->info, shift_timezone_current_time, shift_timezone_current_time);
++ iam->info->previous_reset = shift_timezone_current_time;
++ }
++ else
++ {
++ iam->info->next_reset = get_next_reset_time(iam->info, shift_timezone_current_time, iam->info->previous_reset);
++ while (iam->info->next_reset < shift_timezone_current_time)
++ {
++ iam->info->previous_reset = iam->info->next_reset;
++ iam->info->next_reset = get_next_reset_time(iam->info, iam->info->previous_reset, iam->info->previous_reset);
++ }
++ }
++ }
++ shift_timezone_iam = NULL;
++}
++
++static void check_for_timezone_shift(time_t now, int already_locked)
++{
++
++ if(already_locked == 0) { spin_lock_bh(&bandwidth_lock); }
++ if(now != last_local_mw_update ) /* make sure nothing changed while waiting for lock */
++ {
++ local_minutes_west = sys_tz.tz_minuteswest;
++ local_seconds_west = 60*local_minutes_west;
++ last_local_mw_update = now;
++ if(local_seconds_west > last_local_mw_update)
++ {
++ /* we can't let adjusted time be < 0 -- pretend timezone is still UTC */
++ local_minutes_west = 0;
++ local_seconds_west = 0;
++ }
++
++ if(local_minutes_west != old_minutes_west)
++ {
++ int adj_minutes = old_minutes_west-local_minutes_west;
++ adj_minutes = adj_minutes < 0 ? adj_minutes*-1 : adj_minutes;
++
++ if(already_locked == 0) { down(&userspace_lock); }
++
++ printk("ipt_bandwidth: timezone shift of %d minutes detected, adjusting\n", adj_minutes);
++ printk(" old minutes west=%d, new minutes west=%d\n", old_minutes_west, local_minutes_west);
++
++ /* this function is always called with absolute time, not time adjusted for timezone. Correct that before adjusting */
++ shift_timezone_current_time = now - local_seconds_west;
++ apply_to_every_string_map_value(id_map, shift_timezone_of_id);
++
++ old_minutes_west = local_minutes_west;
++
++
++ if(already_locked == 0) { up(&userspace_lock); }
++ }
++ }
++ if(already_locked == 0) { spin_unlock_bh(&bandwidth_lock); }
++}
++
++
++
++static bw_history* initialize_history(uint32_t max_nodes)
++{
++ bw_history* new_history = (bw_history*)kmalloc(sizeof(bw_history), GFP_ATOMIC);
++ if(new_history != NULL)
++ {
++ new_history->history_data = (uint64_t*)kmalloc((1+max_nodes)*sizeof(uint64_t), GFP_ATOMIC); /*number to save +1 for current */
++ if(new_history->history_data == NULL) /* deal with malloc failure */
++ {
++ kfree(new_history);
++ new_history = NULL;
++ }
++ else
++ {
++ new_history->first_start = 0;
++ new_history->first_end = 0;
++ new_history->last_end = 0;
++ new_history->max_nodes = max_nodes+1; /*number to save +1 for current */
++ new_history->num_nodes = 1;
++ new_history->non_zero_nodes = 0; /* counts non_zero nodes other than current, so initialize to 0 */
++ new_history->current_index = 0;
++ memset(new_history->history_data, 0, max_nodes*sizeof(uint64_t));
++ }
++ }
++ return new_history; /* in case of malloc failure new_history will be NULL, this should be safe */
++}
++
++/* returns 1 if there are non-zero nodes in history, 0 if history is empty (all zero) */
++static unsigned char update_history(bw_history* history, time_t interval_start, time_t interval_end, struct ipt_bandwidth_info* info)
++{
++ unsigned char history_is_nonzero = 0;
++ if(history != NULL) /* should never be null, but let's be sure */
++ {
++
++ /* adjust number of non-zero nodes */
++ if(history->num_nodes == history->max_nodes)
++ {
++ uint32_t first_index = (history->current_index+1) % history->max_nodes;
++ if( (history->history_data)[first_index] > 0)
++ {
++ history->non_zero_nodes = history->non_zero_nodes -1;
++ }
++ }
++ if( (history->history_data)[history->current_index] > 0 )
++ {
++ history->non_zero_nodes = history->non_zero_nodes + 1;
++ }
++ history_is_nonzero = history->non_zero_nodes > 0 ? 1 : 0;
++
++
++ /* update interval start/end */
++ if(history->first_start == 0)
++ {
++ history->first_start = interval_start;
++ history->first_end = interval_end;
++ }
++ if(history->num_nodes >= history->max_nodes)
++ {
++ history->first_start = history->first_end;
++ history->first_end = get_next_reset_time(info, history->first_start, history->first_start);
++ }
++ history->last_end = interval_end;
++
++
++ history->num_nodes = history->num_nodes < history->max_nodes ? history->num_nodes+1 : history->max_nodes;
++ history->current_index = (history->current_index+1) % history->max_nodes;
++ (history->history_data)[history->current_index] = 0;
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("after update history->num_nodes = %d\n", history->num_nodes);
++ printk("after update history->current_index = %d\n", history->current_index);
++ #endif
++ }
++ return history_is_nonzero;
++}
++
++
++static struct ipt_bandwidth_info* do_reset_info = NULL;
++static long_map* do_reset_ip_map = NULL;
++static long_map* do_reset_delete_ips = NULL;
++static time_t do_reset_interval_start = 0;
++static time_t do_reset_interval_end = 0;
++static void do_reset(unsigned long key, void* value)
++{
++ bw_history* history = (bw_history*)value;
++ if(history != NULL && do_reset_info != NULL) /* should never be null.. but let's be sure */
++ {
++ unsigned char history_contains_data = update_history(history, do_reset_interval_start, do_reset_interval_end, do_reset_info);
++ if(history_contains_data == 0 || do_reset_ip_map == NULL)
++ {
++ //schedule data for ip to be deleted (can't delete history while we're traversing history tree data structure!)
++ if(do_reset_delete_ips != NULL) /* should never be null.. but let's be sure */
++ {
++ set_long_map_element(do_reset_delete_ips, key, (void*)(history->history_data + history->current_index));
++ }
++ }
++ else
++ {
++ set_long_map_element(do_reset_ip_map, key, (void*)(history->history_data + history->current_index) );
++ }
++ }
++}
++
++long_map* clear_ip_map = NULL;
++long_map* clear_ip_history_map = NULL;
++static void clear_ips(unsigned long key, void* value)
++{
++ if(clear_ip_history_map != NULL && clear_ip_map != NULL)
++ {
++ bw_history* history;
++
++ #ifdef BANDWIDTH_DEBUG
++ unsigned long* ip = &key;
++ printk("clearing ip = %u.%u.%u.%u\n", *((char*)ip), *(((char*)ip)+1), *(((char*)ip)+2), *(((char*)ip)+3) );
++ #endif
++
++ remove_long_map_element(clear_ip_map, key);
++ history = (bw_history*)remove_long_map_element(clear_ip_history_map, key);
++ if(history != NULL)
++ {
++ kfree(history->history_data);
++ kfree(history);
++ }
++ }
++}
++
++static void set_bandwidth_to_zero(unsigned long key, void* value)
++{
++ *((uint64_t*)value) = 0;
++}
++
++
++long_map* reset_histories_ip_map = NULL;
++static void reset_histories(unsigned long key, void* value)
++{
++ bw_history* bh = (bw_history*)value;
++ bh->first_start = 0;
++ bh->first_end = 0;
++ bh->last_end = 0;
++ bh->num_nodes = 1;
++ bh->non_zero_nodes = 1;
++ bh->current_index = 0;
++ (bh->history_data)[0] = 0;
++ if(reset_histories_ip_map != NULL)
++ {
++ set_long_map_element(reset_histories_ip_map, key, bh->history_data);
++ }
++}
++
++
++static void handle_interval_reset(info_and_maps* iam, time_t now)
++{
++ struct ipt_bandwidth_info* info;
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("now, handling interval reset\n");
++ #endif
++ if(iam == NULL)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk("error: doing reset, iam is null \n");
++ #endif
++ return;
++ }
++ if(iam->ip_map == NULL)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk("error: doing reset, ip_map is null\n");
++ #endif
++ return;
++ }
++ if(iam->info == NULL)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk("error: doing reset, info is null\n");
++ #endif
++
++ return;
++ }
++
++ info = iam->info;
++ if(info->num_intervals_to_save == 0)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk("doing reset for case where no intervals are saved\n");
++ #endif
++
++ if(info->next_reset <= now)
++ {
++ info->next_reset = get_next_reset_time(info, info->previous_reset, info->previous_reset);
++ if(info->next_reset <= now)
++ {
++ info->next_reset = get_next_reset_time(info, now, info->previous_reset);
++ }
++ }
++ apply_to_every_long_map_value(iam->ip_map, set_bandwidth_to_zero);
++ }
++ else
++ {
++ unsigned long num_updates;
++ #ifdef BANDWIDTH_DEBUG
++ printk("doing reset for case where at least one interval is saved\n");
++ #endif
++
++
++ if(iam->ip_history_map == NULL)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk("error: doing reset, history_map is null when num_intervals_to_save > 0\n");
++ #endif
++ return;
++ }
++
++ do_reset_info = info;
++ do_reset_ip_map = iam->ip_map;
++ clear_ip_map = iam->ip_map;
++ clear_ip_history_map = iam->ip_history_map;
++
++
++ /*
++ * at most update as many times as we have intervals to save -- prevents
++ * rediculously long loop if interval length is 2 seconds and time was
++ * reset to 5 years in the future
++ */
++ num_updates = 0;
++ while(info->next_reset <= now && num_updates < info->num_intervals_to_save)
++ {
++ do_reset_delete_ips = initialize_long_map();
++ /*
++ * don't check for malloc failure here -- we
++ * include tests for whether do_reset_delete_ips
++ * is null below (reset should still be able to procede)
++ */
++
++ do_reset_interval_start = info->previous_reset;
++ do_reset_interval_end = info->next_reset;
++
++ apply_to_every_long_map_value(iam->ip_history_map, do_reset);
++
++
++ info->previous_reset = info->next_reset;
++ info->next_reset = get_next_reset_time(info, info->previous_reset, info->previous_reset);
++
++ /* free all data for ips whose entire histories contain only zeros to conserve space */
++ if(do_reset_delete_ips != NULL)
++ {
++ unsigned long num_destroyed;
++
++ /* only clear ips if this is the last iteration of this update */
++ if(info->next_reset >= now)
++ {
++ /*
++ * no need to reset iam->info->combined_bw if it gets deleted here.
++ * below, at end of function it will get set to NULL if it gets wiped
++ */
++
++ apply_to_every_long_map_value(do_reset_delete_ips, clear_ips);
++ }
++
++ /* but clear do_reset_delete_ips no matter what, values are just pointers to history data so we can ignore them */
++ destroy_long_map(do_reset_delete_ips, DESTROY_MODE_IGNORE_VALUES, &num_destroyed);
++ do_reset_delete_ips = NULL;
++ }
++ num_updates++;
++ }
++ do_reset_info = NULL;
++ do_reset_ip_map = NULL;
++ clear_ip_map = NULL;
++ clear_ip_history_map = NULL;
++
++ do_reset_interval_start = 0;
++ do_reset_interval_end = 0;
++
++ /*
++ * test if we've cycled past all existing data -- if so wipe all existing histories
++ * and set previous reset time to now, and compute next reset time from
++ * current time
++ */
++ if(info->next_reset <= now)
++ {
++ reset_histories_ip_map = iam->ip_map;
++ apply_to_every_long_map_value(iam->ip_history_map, reset_histories);
++ reset_histories_ip_map = NULL;
++
++ info->previous_reset = now;
++ info->next_reset = get_next_reset_time(info, now, info->previous_reset);
++ }
++ }
++ info->combined_bw = (uint64_t*)get_long_map_element(iam->ip_map, 0);
++ info->current_bandwidth = 0;
++}
++
++/*
++ * set max bandwidth to be max possible using 63 of the
++ * 64 bits in our record. In some systems uint64_t is treated
++ * like signed, so to prevent errors, use only 63 bits
++ */
++static uint64_t pow64(uint64_t base, uint64_t pow)
++{
++ uint64_t val = 1;
++ if(pow > 0)
++ {
++ val = base*pow64(base, pow-1);
++ }
++ return val;
++}
++static uint64_t get_bw_record_max(void) /* called by init to set global variable */
++{
++ return (pow64(2,62)) + (pow64(2,62)-1);
++}
++static uint64_t bandwidth_record_max;
++
++
++#define ADD_UP_TO_MAX(original,add,is_check) (bandwidth_record_max - original > add && is_check== 0) ? original+add : (is_check ? original : bandwidth_record_max);
++
++
++/*
++ * Shamelessly yoinked from xt_time.c
++ * "That is so amazingly amazing, I think I'd like to steal it."
++ * -- Zaphod Beeblebrox
++ */
++
++static const u_int16_t days_since_year[] = {
++ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334,
++};
++
++static const u_int16_t days_since_leapyear[] = {
++ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335,
++};
++
++/*
++ * Since time progresses forward, it is best to organize this array in reverse,
++ * to minimize lookup time. These are days since epoch since start of each year,
++ * going back to 1970
++ */
++#define DSE_FIRST 2039
++static const u_int16_t days_since_epoch_for_each_year_start[] = {
++ /* 2039 - 2030 */
++ 25202, 24837, 24472, 24106, 23741, 23376, 23011, 22645, 22280, 21915,
++ /* 2029 - 2020 */
++ 21550, 21184, 20819, 20454, 20089, 19723, 19358, 18993, 18628, 18262,
++ /* 2019 - 2010 */
++ 17897, 17532, 17167, 16801, 16436, 16071, 15706, 15340, 14975, 14610,
++ /* 2009 - 2000 */
++ 14245, 13879, 13514, 13149, 12784, 12418, 12053, 11688, 11323, 10957,
++ /* 1999 - 1990 */
++ 10592, 10227, 9862, 9496, 9131, 8766, 8401, 8035, 7670, 7305,
++ /* 1989 - 1980 */
++ 6940, 6574, 6209, 5844, 5479, 5113, 4748, 4383, 4018, 3652,
++ /* 1979 - 1970 */
++ 3287, 2922, 2557, 2191, 1826, 1461, 1096, 730, 365, 0,
++};
++
++static inline int is_leap(unsigned int y)
++{
++ return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0);
++}
++
++/* end of code yoinked from xt_time */
++
++
++static time_t get_nominal_previous_reset_time(struct ipt_bandwidth_info *info, time_t current_next_reset)
++{
++ time_t previous_reset = current_next_reset;
++ if(info->reset_is_constant_interval == 0)
++ {
++ /* skip backwards in halves of interval after next, until */
++ time_t next = get_next_reset_time(info, current_next_reset, 0);
++ time_t half_interval = (next-current_next_reset)/2;
++ time_t half_count, tmp;
++ half_interval = half_interval == 0 ? 1 : half_interval; /* must be at least one second, otherwise we loop forever*/
++
++ half_count = 1;
++ tmp = get_next_reset_time(info, (current_next_reset-(half_count*half_interval)),0);
++ while(previous_reset >= current_next_reset)
++ {
++ previous_reset = tmp;
++ half_count++;
++ tmp = get_next_reset_time(info, (current_next_reset-(half_count*half_interval)),0);
++ }
++ }
++ else
++ {
++ previous_reset = current_next_reset - info->reset_interval;
++ }
++ return previous_reset;
++}
++
++
++static time_t get_next_reset_time(struct ipt_bandwidth_info *info, time_t now, time_t previous_reset)
++{
++ //first calculate when next reset would be if reset_time is 0 (which it may be)
++ time_t next_reset = 0;
++ if(info->reset_is_constant_interval == 0)
++ {
++ if(info->reset_interval == BANDWIDTH_MINUTE)
++ {
++ next_reset = ( (long)(now/60) + 1)*60;
++ if(info->reset_time > 0)
++ {
++ time_t alt_reset = next_reset + info->reset_time - 60;
++ next_reset = alt_reset > now ? alt_reset : next_reset+info->reset_time;
++ }
++ }
++ else if(info->reset_interval == BANDWIDTH_HOUR)
++ {
++ next_reset = ( (long)(now/(60*60)) + 1)*60*60;
++ if(info->reset_time > 0)
++ {
++ time_t alt_reset = next_reset + info->reset_time - (60*60);
++ next_reset = alt_reset > now ? alt_reset : next_reset+info->reset_time;
++ }
++ }
++ else if(info->reset_interval == BANDWIDTH_DAY)
++ {
++ next_reset = ( (long)(now/(60*60*24)) + 1)*60*60*24;
++ if(info->reset_time > 0)
++ {
++ time_t alt_reset = next_reset + info->reset_time - (60*60*24);
++ next_reset = alt_reset > now ? alt_reset : next_reset+info->reset_time;
++ }
++ }
++ else if(info->reset_interval == BANDWIDTH_WEEK)
++ {
++ long days_since_epoch = now/(60*60*24);
++ long current_weekday = (4 + days_since_epoch ) % 7 ;
++ next_reset = (days_since_epoch + (7-current_weekday) )*(60*60*24);
++ if(info->reset_time > 0)
++ {
++ time_t alt_reset = next_reset + info->reset_time - (60*60*24*7);
++ next_reset = alt_reset > now ? alt_reset : next_reset+info->reset_time;
++ }
++ }
++ else if(info->reset_interval == BANDWIDTH_MONTH)
++ {
++ /* yeah, most of this is yoinked from xt_time too */
++ int year;
++ int year_index;
++ int year_day;
++ int month;
++ long days_since_epoch = now/(60*60*24);
++ uint16_t* month_start_days;
++ time_t alt_reset;
++
++ for (year_index = 0, year = DSE_FIRST; days_since_epoch_for_each_year_start[year_index] > days_since_epoch; year_index++)
++ {
++ year--;
++ }
++ year_day = days_since_epoch - days_since_epoch_for_each_year_start[year_index];
++ if (is_leap(year))
++ {
++ month_start_days = (u_int16_t*)days_since_leapyear;
++ }
++ else
++ {
++ month_start_days = (u_int16_t*)days_since_year;
++ }
++ for (month = 11 ; month > 0 && month_start_days[month] > year_day; month--){}
++
++ /* end majority of yoinkage */
++
++ alt_reset = (days_since_epoch_for_each_year_start[year_index] + month_start_days[month])*(60*60*24) + info->reset_time;
++ if(alt_reset > now)
++ {
++ next_reset = alt_reset;
++ }
++ else if(month == 11)
++ {
++ next_reset = days_since_epoch_for_each_year_start[year_index-1]*(60*60*24) + info->reset_time;
++ }
++ else
++ {
++ next_reset = (days_since_epoch_for_each_year_start[year_index] + month_start_days[month+1])*(60*60*24) + info->reset_time;
++ }
++ }
++ }
++ else
++ {
++ if(info->reset_time > 0 && previous_reset > 0 && previous_reset <= now)
++ {
++ unsigned long adj_reset_time = info->reset_time;
++ unsigned long tz_secs = 60 * local_minutes_west;
++ if(adj_reset_time < tz_secs)
++ {
++ unsigned long interval_multiple = 1+(tz_secs/info->reset_interval);
++ adj_reset_time = adj_reset_time + (interval_multiple*info->reset_interval);
++ }
++ adj_reset_time = adj_reset_time - tz_secs;
++
++ if(info->reset_time > now)
++ {
++ unsigned long whole_intervals = ((info->reset_time - now)/info->reset_interval) + 1; /* add one to make sure integer gets rounded UP (since we're subtracting) */
++ next_reset = info->reset_time - (whole_intervals*info->reset_interval);
++ while(next_reset <= now)
++ {
++ next_reset = next_reset + info->reset_interval;
++ }
++
++ }
++ else /* info->reset_time <= now */
++ {
++ unsigned long whole_intervals = (now-info->reset_time)/info->reset_interval; /* integer gets rounded down */
++ next_reset = info->reset_time + (whole_intervals*info->reset_interval);
++ while(next_reset <= now)
++ {
++ next_reset = next_reset + info->reset_interval;
++ }
++ }
++ }
++ else if(previous_reset > 0)
++ {
++ next_reset = previous_reset;
++ if(next_reset <= now) /* check just to be sure, if this is not true VERY BAD THINGS will happen */
++ {
++ unsigned long whole_intervals = (now-next_reset)/info->reset_interval; /* integer gets rounded down */
++ next_reset = next_reset + (whole_intervals*info->reset_interval);
++ while(next_reset <= now)
++ {
++ next_reset = next_reset + info->reset_interval;
++ }
++ }
++ }
++ else
++ {
++ next_reset = now + info->reset_interval;
++ }
++ }
++
++ return next_reset;
++}
++
++
++
++static uint64_t* initialize_map_entries_for_ip(info_and_maps* iam, unsigned long ip, uint64_t initial_bandwidth)
++{
++ #ifdef BANDWIDTH_DEBUG
++ printk("initializing entry for ip, bw=%lld\n", initial_bandwidth);
++ #endif
++
++ #ifdef BANDWIDTH_DEBUG
++ if(iam == NULL){ printk("error in initialization: iam is null!\n"); }
++ #endif
++
++
++ uint64_t* new_bw = NULL;
++ if(iam != NULL) /* should never happen, but let's be certain */
++ {
++ struct ipt_bandwidth_info *info = iam->info;
++ long_map* ip_map = iam->ip_map;
++ long_map* ip_history_map = iam->ip_history_map;
++
++ #ifdef BANDWIDTH_DEBUG
++ if(info == NULL){ printk("error in initialization: info is null!\n"); }
++ if(ip_map == NULL){ printk("error in initialization: ip_map is null!\n"); }
++ #endif
++
++
++ if(info != NULL && ip_map != NULL) /* again... should never happen but let's be sure */
++ {
++ if(info->num_intervals_to_save == 0 || ip_history_map == NULL)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk(" initializing entry for ip without history\n");
++ #endif
++ new_bw = (uint64_t*)kmalloc(sizeof(uint64_t), GFP_ATOMIC);
++ }
++ else
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk(" initializing entry for ip with history\n");
++ #endif
++
++ bw_history *new_history = initialize_history(info->num_intervals_to_save);
++ if(new_history != NULL) /* check for kmalloc failure */
++ {
++ bw_history* old_history;
++ #ifdef BANDWIDTH_DEBUG
++ printk(" malloc succeeded, new history is non-null\n");
++ #endif
++
++ new_bw = (uint64_t*)(new_history->history_data + new_history->current_index);
++ old_history = set_long_map_element(ip_history_map, ip, (void*)new_history);
++ if(old_history != NULL)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk(" after initialization old_history not null! (something is FUBAR)\n");
++ #endif
++ kfree(old_history->history_data);
++ kfree(old_history);
++ }
++
++ #ifdef BANDWIDTH_DEBUG
++
++ #endif
++ }
++ }
++ if(new_bw != NULL) /* check for kmalloc failure */
++ {
++ uint64_t* old_bw;
++ *new_bw = initial_bandwidth;
++ old_bw = set_long_map_element(ip_map, ip, (void*)new_bw );
++
++ /* only free old_bw if num_intervals_to_save is zero -- otherwise it already got freed above when we wiped the old history */
++ if(old_bw != NULL && info->num_intervals_to_save == 0)
++ {
++ free(old_bw);
++ }
++
++ if(ip == 0)
++ {
++ info->combined_bw = new_bw;
++ }
++
++ #ifdef BANDWIDTH_DEBUG
++ uint64_t *test = (uint64_t*)get_long_map_element(ip_map, ip);
++ if(test == NULL)
++ {
++ printk(" after initialization bw is null!\n");
++ }
++ else
++ {
++ printk(" after initialization bw is %lld\n", *new_bw);
++ printk(" after initialization test is %lld\n", *test);
++ }
++ #endif
++ }
++ }
++ }
++
++ return new_bw;
++}
++
++
++static bool match(const struct sk_buff *skb, struct xt_action_param *par)
++{
++
++ struct ipt_bandwidth_info *info = ((const struct ipt_bandwidth_info*)(par->matchinfo))->non_const_self;
++
++ time_t now;
++ int match_found;
++
++
++ unsigned char is_check = info->cmp == BANDWIDTH_CHECK ? 1 : 0;
++ unsigned char do_src_dst_swap = 0;
++ info_and_maps* iam = NULL;
++ long_map* ip_map = NULL;
++
++ uint64_t* bws[2] = {NULL, NULL};
++
++ /* if we're currently setting this id, ignore new data until set is complete */
++ if(set_in_progress == 1)
++ {
++ if(strcmp(info->id, set_id) == 0)
++ {
++ return 0;
++ }
++ }
++
++
++
++
++ /*
++ * BEFORE we lock, check for timezone shift
++ * this will almost always be be very,very quick,
++ * but in the event there IS a shift this
++ * function will lock both kernel update spinlock
++ * and userspace i/o semaphore, and do a lot of
++ * number crunching so we shouldn't
++ * already be locked.
++ */
++ now = get_seconds();
++
++
++ if(now != last_local_mw_update )
++ {
++ check_for_timezone_shift(now, 0);
++ check_for_backwards_time_shift(now);
++ }
++ now = now - local_seconds_west; /* Adjust for local timezone */
++
++ spin_lock_bh(&bandwidth_lock);
++
++ if(is_check)
++ {
++ info_and_maps* check_iam;
++ do_src_dst_swap = info->check_type == BANDWIDTH_CHECK_SWAP ? 1 : 0;
++ check_iam = (info_and_maps*)get_string_map_element_with_hashed_key(id_map, info->hashed_id);
++ if(check_iam == NULL)
++ {
++ spin_unlock_bh(&bandwidth_lock);
++ return 0;
++ }
++ info = check_iam->info;
++ }
++
++
++
++
++ if(info->reset_interval != BANDWIDTH_NEVER)
++ {
++ if(info->next_reset < now)
++ {
++ //do reset
++ //iam = (info_and_maps*)get_string_map_element_with_hashed_key(id_map, info->hashed_id);
++ iam = (info_and_maps*)info->iam;
++ if(iam != NULL) /* should never be null, but let's be sure */
++ {
++ handle_interval_reset(iam, now);
++ ip_map = iam->ip_map;
++ }
++ else
++ {
++ /* even in case of malloc failure or weird error we can update these params */
++ info->current_bandwidth = 0;
++ info->next_reset = get_next_reset_time(info, now, info->previous_reset);
++ }
++ }
++ }
++
++ if(info->type == BANDWIDTH_COMBINED)
++ {
++ if(iam == NULL)
++ {
++ //iam = (info_and_maps*)get_string_map_element_with_hashed_key(id_map, info->hashed_id);
++ iam = (info_and_maps*)info->iam;
++ if(iam != NULL)
++ {
++ ip_map = iam->ip_map;
++ }
++ }
++ if(ip_map != NULL) /* if this ip_map != NULL iam can never be NULL, so we don't need to check this */
++ {
++
++ if(info->combined_bw == NULL)
++ {
++ bws[0] = initialize_map_entries_for_ip(iam, 0, skb->len);
++ }
++ else
++ {
++ bws[0] = info->combined_bw;
++ *(bws[0]) = ADD_UP_TO_MAX(*(bws[0]), (uint64_t)skb->len, is_check);
++ }
++ }
++ else
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk("error: ip_map is null in match!\n");
++ #endif
++ }
++ info->current_bandwidth = ADD_UP_TO_MAX(info->current_bandwidth, (uint64_t)skb->len, is_check);
++ }
++ else
++ {
++ uint32_t bw_ip, bw_ip_index;
++ uint32_t bw_ips[2] = {0, 0};
++ struct iphdr* iph = (struct iphdr*)(skb_network_header(skb));
++ if(info->type == BANDWIDTH_INDIVIDUAL_SRC)
++ {
++ //src ip
++ bw_ips[0] = iph->saddr;
++ if(do_src_dst_swap)
++ {
++ bw_ips[0] = iph->daddr;
++ }
++ }
++ else if (info->type == BANDWIDTH_INDIVIDUAL_DST)
++ {
++ //dst ip
++ bw_ips[0] = iph->daddr;
++ if(do_src_dst_swap)
++ {
++ bw_ips[0] = iph->saddr;
++ }
++ }
++ else if(info->type == BANDWIDTH_INDIVIDUAL_LOCAL || info->type == BANDWIDTH_INDIVIDUAL_REMOTE)
++ {
++ //remote or local ip -- need to test both src && dst
++ uint32_t src_ip = iph->saddr;
++ uint32_t dst_ip = iph->daddr;
++ if(info->type == BANDWIDTH_INDIVIDUAL_LOCAL)
++ {
++ bw_ips[0] = ((info->local_subnet_mask & src_ip) == info->local_subnet) ? src_ip : 0;
++ bw_ips[1] = ((info->local_subnet_mask & dst_ip) == info->local_subnet) ? dst_ip : 0;
++ }
++ else if(info->type == BANDWIDTH_INDIVIDUAL_REMOTE)
++ {
++ bw_ips[0] = ((info->local_subnet_mask & src_ip) != info->local_subnet ) ? src_ip : 0;
++ bw_ips[1] = ((info->local_subnet_mask & dst_ip) != info->local_subnet ) ? dst_ip : 0;
++ }
++ }
++
++ if(ip_map == NULL)
++ {
++ //iam = (info_and_maps*)get_string_map_element_with_hashed_key(id_map, info->hashed_id);
++ iam = (info_and_maps*)info->iam;
++ if(iam != NULL)
++ {
++ ip_map = iam->ip_map;
++ }
++ }
++ if(!is_check && info->cmp == BANDWIDTH_MONITOR)
++ {
++ uint64_t* combined_oldval = info->combined_bw;
++ if(combined_oldval == NULL)
++ {
++ combined_oldval = initialize_map_entries_for_ip(iam, 0, (uint64_t)skb->len);
++ }
++ else
++ {
++ *combined_oldval = ADD_UP_TO_MAX(*combined_oldval, (uint64_t)skb->len, is_check);
++ }
++ }
++ bw_ip_index = bw_ips[0] == 0 ? 1 : 0;
++ bw_ip = bw_ips[bw_ip_index];
++ if(bw_ip != 0 && ip_map != NULL)
++ {
++ uint64_t* oldval = get_long_map_element(ip_map, (unsigned long)bw_ip);
++ if(oldval == NULL)
++ {
++ if(!is_check)
++ {
++ /* may return NULL on malloc failure but that's ok */
++ oldval = initialize_map_entries_for_ip(iam, (unsigned long)bw_ip, (uint64_t)skb->len);
++ }
++ }
++ else
++ {
++ *oldval = ADD_UP_TO_MAX(*oldval, (uint64_t)skb->len, is_check);
++ }
++
++ /* this is fine, setting bws[bw_ip_index] to NULL on check for undefined value or kmalloc failure won't crash anything */
++ bws[bw_ip_index] = oldval;
++ }
++
++ }
++
++
++ match_found = 0;
++ if(info->cmp != BANDWIDTH_MONITOR)
++ {
++ if(info->cmp == BANDWIDTH_GT)
++ {
++ match_found = bws[0] != NULL ? ( *(bws[0]) > info->bandwidth_cutoff ? 1 : match_found ) : match_found;
++ match_found = bws[1] != NULL ? ( *(bws[1]) > info->bandwidth_cutoff ? 1 : match_found ) : match_found;
++ match_found = info->current_bandwidth > info->bandwidth_cutoff ? 1 : match_found;
++ }
++ else if(info->cmp == BANDWIDTH_LT)
++ {
++ match_found = bws[0] != NULL ? ( *(bws[0]) < info->bandwidth_cutoff ? 1 : match_found ) : match_found;
++ match_found = bws[1] != NULL ? ( *(bws[1]) < info->bandwidth_cutoff ? 1 : match_found ) : match_found;
++ match_found = info->current_bandwidth < info->bandwidth_cutoff ? 1 : match_found;
++ }
++ }
++
++
++ spin_unlock_bh(&bandwidth_lock);
++
++
++
++
++
++ return match_found;
++}
++
++
++
++
++
++
++
++
++
++
++/**********************
++ * Get functions
++ *********************/
++
++#define MAX_IP_STR_LENGTH 16
++
++#define ERROR_NONE 0
++#define ERROR_NO_ID 1
++#define ERROR_BUFFER_TOO_SHORT 2
++#define ERROR_NO_HISTORY 3
++#define ERROR_UNKNOWN 4
++typedef struct get_req_struct
++{
++ uint32_t ip;
++ uint32_t next_ip_index;
++ unsigned char return_history;
++ char id[BANDWIDTH_MAX_ID_LENGTH];
++} get_request;
++
++static unsigned long* output_ip_list = NULL;
++static unsigned long output_ip_list_length = 0;
++
++static char add_ip_block( uint32_t ip,
++ unsigned char full_history_requested,
++ info_and_maps* iam,
++ unsigned char* output_buffer,
++ uint32_t* current_output_index,
++ uint32_t buffer_length
++ );
++static void parse_get_request(unsigned char* request_buffer, get_request* parsed_request);
++static int handle_get_failure(int ret_value, int unlock_user_sem, int unlock_bandwidth_spin, unsigned char error_code, unsigned char* out_buffer, unsigned char* free_buffer );
++
++
++/*
++ * returns whether we succeeded in adding ip block, 0= success,
++ * otherwise error code of problem that we found
++ */
++static char add_ip_block( uint32_t ip,
++ unsigned char full_history_requested,
++ info_and_maps* iam,
++ unsigned char* output_buffer,
++ uint32_t* current_output_index,
++ uint32_t output_buffer_length
++ )
++{
++ #ifdef BANDWIDTH_DEBUG
++ uint32_t *ipp = &ip;
++ printk("doing output for ip = %u.%u.%u.%u\n", *((unsigned char*)ipp), *(((unsigned char*)ipp)+1), *(((unsigned char*)ipp)+2), *(((unsigned char*)ipp)+3) );
++ #endif
++
++ if(full_history_requested)
++ {
++ bw_history* history = NULL;
++ if(iam->info->num_intervals_to_save > 0 && iam->ip_history_map != NULL)
++ {
++ history = (bw_history*)get_long_map_element(iam->ip_history_map, ip);
++ }
++ if(history == NULL)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk(" no history map for ip, dumping latest value in history format\n" );
++ #endif
++
++
++ uint32_t block_length = (2*4) + (3*8);
++ uint64_t *bw;
++
++ if(*current_output_index + block_length > output_buffer_length)
++ {
++ return ERROR_BUFFER_TOO_SHORT;
++ }
++ *( (uint32_t*)(output_buffer + *current_output_index) ) = ip;
++ *current_output_index = *current_output_index + 4;
++
++ *( (uint32_t*)(output_buffer + *current_output_index) ) = 1;
++ *current_output_index = *current_output_index + 4;
++
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = (uint64_t)iam->info->previous_reset + (60 * local_minutes_west);
++ *current_output_index = *current_output_index + 8;
++
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = (uint64_t)iam->info->previous_reset + (60 * local_minutes_west);
++ *current_output_index = *current_output_index + 8;
++
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = (uint64_t)iam->info->previous_reset + (60 * local_minutes_west);
++ *current_output_index = *current_output_index + 8;
++
++ bw = (uint64_t*)get_long_map_element(iam->ip_map, ip);
++ if(bw == NULL)
++ {
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = 0;
++ }
++ else
++ {
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = *bw;
++ }
++ *current_output_index = *current_output_index + 8;
++
++ }
++ else
++ {
++ uint32_t block_length = (2*4) + (3*8) + (8*history->num_nodes);
++ uint64_t last_reset;
++ uint32_t node_num;
++ uint32_t next_index;
++
++ if(*current_output_index + block_length > output_buffer_length)
++ {
++ return ERROR_BUFFER_TOO_SHORT;
++ }
++
++ *( (uint32_t*)(output_buffer + *current_output_index) ) = ip;
++ *current_output_index = *current_output_index + 4;
++
++ *( (uint32_t*)(output_buffer + *current_output_index) )= history->num_nodes;
++ *current_output_index = *current_output_index + 4;
++
++
++
++ /* need to return times in regular UTC not the UTC - minutes west, which is useful for processing */
++ last_reset = (uint64_t)iam->info->previous_reset + (60 * local_minutes_west);
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = history->first_start > 0 ? (uint64_t)history->first_start + (60 * local_minutes_west) : last_reset;
++ #ifdef BANDWIDTH_DEBUG
++ printk(" dumping first start = %lld\n", *( (uint64_t*)(output_buffer + *current_output_index) ) );
++ #endif
++ *current_output_index = *current_output_index + 8;
++
++
++
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = history->first_end > 0 ? (uint64_t)history->first_end + (60 * local_minutes_west) : last_reset;
++ #ifdef BANDWIDTH_DEBUG
++ printk(" dumping first end = %lld\n", *( (uint64_t*)(output_buffer + *current_output_index) ) );
++ #endif
++ *current_output_index = *current_output_index + 8;
++
++
++
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = history->last_end > 0 ? (uint64_t)history->last_end + (60 * local_minutes_west) : last_reset;
++ #ifdef BANDWIDTH_DEBUG
++ printk(" dumping last end = %lld\n", *( (uint64_t*)(output_buffer + *current_output_index) ) );
++ #endif
++ *current_output_index = *current_output_index + 8;
++
++
++
++ node_num = 0;
++ next_index = history->num_nodes == history->max_nodes ? history->current_index+1 : 0;
++ next_index = next_index >= history->max_nodes ? 0 : next_index;
++ for(node_num=0; node_num < history->num_nodes; node_num++)
++ {
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = (history->history_data)[ next_index ];
++ *current_output_index = *current_output_index + 8;
++ next_index = (next_index + 1) % history->max_nodes;
++ }
++ }
++ }
++ else
++ {
++ uint64_t *bw;
++ if(*current_output_index + 8 > output_buffer_length)
++ {
++ return ERROR_BUFFER_TOO_SHORT;
++ }
++
++ *( (uint32_t*)(output_buffer + *current_output_index) ) = ip;
++ *current_output_index = *current_output_index + 4;
++
++
++ bw = (uint64_t*)get_long_map_element(iam->ip_map, ip);
++ if(bw == NULL)
++ {
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = 0;
++ }
++ else
++ {
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = *bw;
++ }
++ *current_output_index = *current_output_index + 8;
++ }
++ return ERROR_NONE;
++}
++
++
++
++/*
++ * convenience method for cleaning crap up after failed malloc or other
++ * error that we can't recover from in get function
++ */
++static int handle_get_failure(int ret_value, int unlock_user_sem, int unlock_bandwidth_spin, unsigned char error_code, unsigned char* out_buffer, unsigned char* free_buffer )
++{
++ copy_to_user(out_buffer, &error_code, 1);
++ if( free_buffer != NULL ) { kfree(free_buffer); }
++ if(unlock_bandwidth_spin) { spin_unlock_bh(&bandwidth_lock); }
++ if(unlock_user_sem) { up(&userspace_lock); }
++ return ret_value;
++}
++
++/*
++ * request structure:
++ * bytes 1:4 is ip (uint32_t)
++ * bytes 4:8 is the next ip index (uint32_t)
++ * byte 9 is whether to return full history or just current usage (unsigned char)
++ * bytes 10:10+MAX_ID_LENGTH are the id (a string)
++ */
++static void parse_get_request(unsigned char* request_buffer, get_request* parsed_request)
++{
++ uint32_t* ip = (uint32_t*)(request_buffer+0);
++ uint32_t* next_ip_index = (uint32_t*)(request_buffer+4);
++ unsigned char* return_history = (unsigned char*)(request_buffer+8);
++
++
++
++ parsed_request->ip = *ip;
++ parsed_request->next_ip_index = *next_ip_index;
++ parsed_request->return_history = *return_history;
++ memcpy(parsed_request->id, request_buffer+9, BANDWIDTH_MAX_ID_LENGTH);
++ (parsed_request->id)[BANDWIDTH_MAX_ID_LENGTH-1] = '\0'; /* make sure id is null terminated no matter what */
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("ip = %u.%u.%u.%u\n", *((char*)ip), *(((char*)ip)+1), *(((char*)ip)+2), *(((char*)ip)+3) );
++ printk("next ip index = %d\n", *next_ip_index);
++ printk("return_history = %d\n", *return_history);
++ #endif
++}
++
++
++static int ipt_bandwidth_get_ctl(struct sock *sk, int cmd, void *user, int *len)
++{
++ /* check for timezone shift & adjust if necessary */
++ char* buffer;
++ get_request query;
++ info_and_maps* iam;
++
++ unsigned char* error;
++ uint32_t* total_ips;
++ uint32_t* start_index;
++ uint32_t* num_ips_in_response;
++ uint64_t* reset_interval;
++ uint64_t* reset_time;
++ unsigned char* reset_is_constant_interval;
++ uint32_t current_output_index;
++ time_t now = get_seconds();
++ check_for_timezone_shift(now, 0);
++ check_for_backwards_time_shift(now);
++ now = now - local_seconds_west; /* Adjust for local timezone */
++
++
++ down(&userspace_lock);
++
++
++ /* first check that query buffer is big enough to hold the info needed to parse the query */
++ if(*len < BANDWIDTH_MAX_ID_LENGTH + 9)
++ {
++
++ return handle_get_failure(0, 1, 0, ERROR_BUFFER_TOO_SHORT, user, NULL);
++ }
++
++
++
++ /* copy the query from userspace to kernel space & parse */
++ buffer = kmalloc(*len, GFP_ATOMIC);
++ if(buffer == NULL) /* check for malloc failure */
++ {
++ return handle_get_failure(0, 1, 0, ERROR_UNKNOWN, user, NULL);
++ }
++ copy_from_user(buffer, user, *len);
++ parse_get_request(buffer, &query);
++
++
++
++
++
++
++ /*
++ * retrieve data for this id and verify all variables are properly defined, just to be sure
++ * this is a kernel module -- it pays to be paranoid!
++ */
++ spin_lock_bh(&bandwidth_lock);
++
++ iam = (info_and_maps*)get_string_map_element(id_map, query.id);
++
++ if(iam == NULL)
++ {
++ return handle_get_failure(0, 1, 1, ERROR_NO_ID, user, buffer);
++ }
++ if(iam->info == NULL || iam->ip_map == NULL)
++ {
++ return handle_get_failure(0, 1, 1, ERROR_NO_ID, user, buffer);
++ }
++ if(iam->info->num_intervals_to_save > 0 && iam->ip_history_map == NULL)
++ {
++ return handle_get_failure(0, 1, 1, ERROR_NO_ID, user, buffer);
++ }
++
++ /* allocate ip list if this is first query */
++ if(query.next_ip_index == 0 && query.ip == 0)
++ {
++ if(output_ip_list != NULL)
++ {
++ kfree(output_ip_list);
++ }
++ if(iam->info->type == BANDWIDTH_COMBINED)
++ {
++ output_ip_list_length = 1;
++ output_ip_list = (unsigned long*)kmalloc(sizeof(unsigned long), GFP_ATOMIC);
++ if(output_ip_list != NULL) { *output_ip_list = 0; }
++ }
++ else
++ {
++ output_ip_list = get_sorted_long_map_keys(iam->ip_map, &output_ip_list_length);
++ }
++
++ if(output_ip_list == NULL)
++ {
++ return handle_get_failure(0, 1, 1, ERROR_UNKNOWN, user, buffer);
++ }
++ }
++
++ /* if this is not first query do a sanity check -- make sure it's within bounds of allocated ip list */
++ if(query.next_ip_index > 0 && (output_ip_list == NULL || query.next_ip_index > output_ip_list_length))
++ {
++ return handle_get_failure(0, 1, 1, ERROR_UNKNOWN, user, buffer);
++ }
++
++
++
++
++ /*
++ // values only reset when a packet hits a rule, so
++ // reset may have expired without data being reset.
++ // So, test if we need to reset values to zero
++ */
++ if(iam->info->reset_interval != BANDWIDTH_NEVER)
++ {
++ if(iam->info->next_reset < now)
++ {
++ //do reset
++ handle_interval_reset(iam, now);
++ }
++ }
++
++
++
++ /* compute response & store it in buffer
++ *
++ * format of response:
++ * byte 1 : error code (0 for ok)
++ * bytes 2-5 : total_num_ips found in query (further gets may be necessary to retrieve them)
++ * bytes 6-9 : start_index, index (in a list of total_num_ips) of first ip in response
++ * bytes 10-13 : num_ips_in_response, number of ips in this response
++ * bytes 14-21 : reset_interval (helps deal with DST shifts in userspace)
++ * bytes 22-29 : reset_time (helps deal with DST shifts in userspace)
++ * byte 30 : reset_is_constant_interval (helps deal with DST shifts in userspace)
++ * remaining bytes contain blocks of ip data
++ * format is dependent on whether history was queried
++ *
++ * if history was NOT queried we have
++ * bytes 1-4 : ip
++ * bytes 5-12 : bandwidth
++ *
++ * if history WAS queried we have
++ * (note we are using 64 bit integers for time here
++ * even though time_t is 32 bits on most 32 bit systems
++ * just to be on the safe side)
++ * bytes 1-4 : ip
++ * bytes 4-8 : history_length number of history values (including current)
++ * bytes 9-16 : first start
++ * bytes 17-24 : first end
++ * bytes 25-32 : recent end
++ * 33 onward : list of 64 bit integers of length history_length
++ *
++ */
++ error = buffer;
++ total_ips = (uint32_t*)(buffer+1);
++ start_index = (uint32_t*)(buffer+5);
++ num_ips_in_response = (uint32_t*)(buffer+9);
++ reset_interval = (uint64_t*)(buffer+13);
++ reset_time = (uint64_t*)(buffer+21);
++ reset_is_constant_interval = (char*)(buffer+29);
++
++ *reset_interval = (uint64_t)iam->info->reset_interval;
++ *reset_time = (uint64_t)iam->info->reset_time;
++ *reset_is_constant_interval = iam->info->reset_is_constant_interval;
++
++ current_output_index = 30;
++ if(query.ip != 0)
++ {
++ *error = add_ip_block( query.ip,
++ query.return_history,
++ iam,
++ buffer,
++ &current_output_index,
++ *len
++ );
++
++ *total_ips = *error == 0;
++ *start_index = 0;
++ *num_ips_in_response = *error == 0 ? 1 : 0;
++ }
++ else
++ {
++ uint32_t next_index = query.next_ip_index;
++ *error = ERROR_NONE;
++ *total_ips = output_ip_list_length;
++ *start_index = next_index;
++ *num_ips_in_response = 0;
++ while(*error == ERROR_NONE && next_index < output_ip_list_length)
++ {
++ uint32_t next_ip = output_ip_list[next_index];
++ *error = add_ip_block( next_ip,
++ query.return_history,
++ iam,
++ buffer,
++ &current_output_index,
++ *len
++ );
++ if(*error == ERROR_NONE)
++ {
++ *num_ips_in_response = *num_ips_in_response + 1;
++ next_index++;
++ }
++ }
++ if(*error == ERROR_BUFFER_TOO_SHORT && *num_ips_in_response > 0)
++ {
++ *error = ERROR_NONE;
++ }
++ if(next_index == output_ip_list_length)
++ {
++ kfree(output_ip_list);
++ output_ip_list = NULL;
++ output_ip_list_length = 0;
++ }
++ }
++
++ spin_unlock_bh(&bandwidth_lock);
++
++ copy_to_user(user, buffer, *len);
++ kfree(buffer);
++
++
++
++ up(&userspace_lock);
++
++
++ return 0;
++}
++
++
++
++
++
++/********************
++ * Set functions
++ ********************/
++
++typedef struct set_header_struct
++{
++ uint32_t total_ips;
++ uint32_t next_ip_index;
++ uint32_t num_ips_in_buffer;
++ unsigned char history_included;
++ unsigned char zero_unset_ips;
++ time_t last_backup;
++ char id[BANDWIDTH_MAX_ID_LENGTH];
++} set_header;
++
++static int handle_set_failure(int ret_value, int unlock_user_sem, int unlock_bandwidth_spin, unsigned char* free_buffer );
++static void parse_set_header(unsigned char* input_buffer, set_header* header);
++static void set_single_ip_data(unsigned char history_included, info_and_maps* iam, unsigned char* buffer, uint32_t* buffer_index, time_t now);
++
++static int handle_set_failure(int ret_value, int unlock_user_sem, int unlock_bandwidth_spin, unsigned char* free_buffer )
++{
++ if( free_buffer != NULL ) { kfree(free_buffer); }
++ set_in_progress = 0;
++ if(unlock_bandwidth_spin) { spin_unlock_bh(&bandwidth_lock); }
++ if(unlock_user_sem) { up(&userspace_lock); }
++ return ret_value;
++}
++
++static void parse_set_header(unsigned char* input_buffer, set_header* header)
++{
++ /*
++ * set header structure:
++ * bytes 1-4 : total_ips being set in this and subsequent requests
++ * bytes 5-8 : next_ip_index, first ip being set in this set command
++ * bytes 9-12 : num_ips_in_buffer, the number of ips in this set request
++ * byte 13 : history_included (whether history data is included, or just current data)
++ * byte 14 : zero_unset_ips, whether to zero all ips not included in this and subsequent requests
++ * bytes 15-22 : last_backup time (64 bit)
++ * bytes 23-23+BANDWIDTH_MAX_ID_LENGTH : id
++ * bytes 23+ : ip data
++ */
++
++ uint32_t* total_ips = (uint32_t*)(input_buffer+0);
++ uint32_t* next_ip_index = (uint32_t*)(input_buffer+4);
++ uint32_t* num_ips_in_buffer = (uint32_t*)(input_buffer+8);
++ unsigned char* history_included = (unsigned char*)(input_buffer+12);
++ unsigned char* zero_unset_ips = (unsigned char*)(input_buffer+13);
++ uint64_t* last_backup = (uint64_t*)(input_buffer+14);
++
++
++ header->total_ips = *total_ips;
++ header->next_ip_index = *next_ip_index;
++ header->num_ips_in_buffer = *num_ips_in_buffer;
++ header->history_included = *history_included;
++ header->zero_unset_ips = *zero_unset_ips;
++ header->last_backup = (time_t)*last_backup;
++ memcpy(header->id, input_buffer+22, BANDWIDTH_MAX_ID_LENGTH);
++ (header->id)[BANDWIDTH_MAX_ID_LENGTH-1] = '\0'; /* make sure id is null terminated no matter what */
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("parsed set header:\n");
++ printk(" total_ips = %d\n", header->total_ips);
++ printk(" next_ip_index = %d\n", header->next_ip_index);
++ printk(" num_ips_in_buffer = %d\n", header->num_ips_in_buffer);
++ printk(" zero_unset_ips = %d\n", header->zero_unset_ips);
++ printk(" last_backup = %ld\n", header->last_backup);
++ printk(" id = %s\n", header->id);
++ #endif
++}
++static void set_single_ip_data(unsigned char history_included, info_and_maps* iam, unsigned char* buffer, uint32_t* buffer_index, time_t now)
++{
++ /*
++ * note that times stored within the module are adjusted so they are equal to seconds
++ * since unix epoch that corrosponds to the UTC wall-clock time (timezone offset 0)
++ * that is equal to the wall-clock time in the current time-zone. Incoming values must
++ * be adjusted similarly
++ */
++ uint32_t ip = *( (uint32_t*)(buffer + *buffer_index) );
++
++ #ifdef BANDWIDTH_DEBUG
++ uint32_t* ipp = &ip;
++ printk("doing set for ip = %u.%u.%u.%u\n", *((unsigned char*)ipp), *(((unsigned char*)ipp)+1), *(((unsigned char*)ipp)+2), *(((unsigned char*)ipp)+3) );
++ printk("ip index = %d\n", *buffer_index);
++ #endif
++
++ if(history_included)
++ {
++ uint32_t num_history_nodes = *( (uint32_t*)(buffer + *buffer_index+4));
++ if(iam->info->num_intervals_to_save > 0 && iam->ip_history_map != NULL)
++ {
++ time_t first_start = (time_t) *( (uint64_t*)(buffer + *buffer_index+8));
++ /* time_t first_end = (time_t) *( (uint64_t*)(buffer + *buffer_index+16)); //not used */
++ /* time_t last_end = (time_t) *( (uint64_t*)(buffer + *buffer_index+24)); //not used */
++ time_t next_start;
++ time_t next_end;
++ uint32_t node_index;
++ uint32_t zero_count;
++ bw_history* history;
++
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("setting history with first start = %ld, now = %ld\n", first_start, now);
++ #endif
++
++
++ *buffer_index = *buffer_index + (2*4) + (3*8);
++
++ /* adjust for timezone */
++ next_start = first_start - (60 * local_minutes_west);
++ next_end = get_next_reset_time(iam->info, next_start, next_start);
++ node_index=0;
++ zero_count=0;
++ history = NULL;
++ while(next_start < now)
++ {
++ uint64_t next_bw = 0;
++ if(node_index < num_history_nodes)
++ {
++ next_bw = *( (uint64_t*)(buffer + *buffer_index));
++ *buffer_index = *buffer_index + 8;
++ }
++ zero_count = next_bw == 0 ? zero_count+1 : 0;
++
++ if(node_index == 0 || history == NULL)
++ {
++ initialize_map_entries_for_ip(iam, ip, next_bw);
++ history = get_long_map_element(iam->ip_history_map, (unsigned long)ip);
++ }
++ else if(next_end < now) /* if this is most recent node, don't do update since last node is current bandwidth */
++ {
++ update_history(history, next_start, next_end, iam->info);
++ (history->history_data)[ history->current_index ] = next_bw;
++ if(zero_count < history->max_nodes +2)
++ {
++ next_start = next_end;
++ next_end = get_next_reset_time(iam->info, next_start, next_start);
++ }
++ else
++ {
++ /* do history reset */
++ history->first_start = 0;
++ history->first_end = 0;
++ history->last_end = 0;
++ history->num_nodes = 1;
++ history->non_zero_nodes = 1;
++ history->current_index = 0;
++ (history->history_data)[0] = 0;
++
++ next_start = now;
++ next_end = get_next_reset_time(iam->info, now, next_start);
++ }
++ }
++ else /* if this is most recent node, we still need to exit loop*/
++ {
++ break;
++ }
++ node_index++;
++ }
++ while(node_index < num_history_nodes)
++ {
++ *buffer_index = *buffer_index + 8;
++ node_index++;
++ }
++ if(history != NULL)
++ {
++ set_long_map_element(iam->ip_map, ip, (history->history_data + history->current_index) );
++ iam->info->previous_reset = next_start;
++ iam->info->next_reset = next_end;
++ if(ip == 0)
++ {
++ iam->info->current_bandwidth = (history->history_data)[history->current_index];
++ }
++ }
++ }
++ else
++ {
++ uint64_t bw;
++ *buffer_index = *buffer_index + (2*4) + (3*8) + ((num_history_nodes-1)*8);
++ bw = *( (uint64_t*)(buffer + *buffer_index));
++ initialize_map_entries_for_ip(iam, ip, bw); /* automatically frees existing values if they exist */
++ *buffer_index = *buffer_index + 8;
++ if(ip == 0)
++ {
++ iam->info->current_bandwidth = bw;
++ }
++ }
++
++ }
++ else
++ {
++ uint64_t bw = *( (uint64_t*)(buffer + *buffer_index+4) );
++ #ifdef BANDWIDTH_DEBUG
++ printk(" setting bw to %lld\n", bw );
++ #endif
++
++
++ initialize_map_entries_for_ip(iam, ip, bw); /* automatically frees existing values if they exist */
++ *buffer_index = *buffer_index + 12;
++
++ if(ip == 0)
++ {
++ iam->info->current_bandwidth = bw;
++ }
++ }
++
++
++}
++
++static int ipt_bandwidth_set_ctl(struct sock *sk, int cmd, void *user, u_int32_t len)
++{
++ /* check for timezone shift & adjust if necessary */
++ char* buffer;
++ set_header header;
++ info_and_maps* iam;
++ uint32_t buffer_index;
++ uint32_t next_ip_index;
++ time_t now = get_seconds();
++ check_for_timezone_shift(now, 0);
++ check_for_backwards_time_shift(now);
++ now = now - local_seconds_west; /* Adjust for local timezone */
++
++
++ /* just return right away if user buffer is too short to contain even the header */
++ if(len < (3*4) + 2 + 8 + BANDWIDTH_MAX_ID_LENGTH)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk("set error: buffer not large enough!\n");
++ #endif
++ return 0;
++ }
++
++ down(&userspace_lock);
++ set_in_progress = 1;
++
++ buffer = kmalloc(len, GFP_ATOMIC);
++ if(buffer == NULL) /* check for malloc failure */
++ {
++ return handle_set_failure(0, 1, 0, NULL);
++ }
++ copy_from_user(buffer, user, len);
++ parse_set_header(buffer, &header);
++
++
++
++
++ /*
++ * retrieve data for this id and verify all variables are properly defined, just to be sure
++ * this is a kernel module -- it pays to be paranoid!
++ */
++ spin_lock_bh(&bandwidth_lock);
++
++
++ iam = (info_and_maps*)get_string_map_element(id_map, header.id);
++ if(iam == NULL)
++ {
++ return handle_set_failure(0, 1, 1, buffer);
++ }
++ if(iam->info == NULL || iam->ip_map == NULL)
++ {
++ return handle_set_failure(0, 1, 1, buffer);
++ }
++ if(iam->info->num_intervals_to_save > 0 && iam->ip_history_map == NULL)
++ {
++ return handle_set_failure(0, 1, 1, buffer);
++ }
++
++ /*
++ * during set unconditionally set combined_bw to NULL
++ * if combined data (ip=0) exists after set exits cleanly, we will restore it
++ */
++ iam->info->combined_bw = NULL;
++
++ //if zero_unset_ips == 1 && next_ip_index == 0
++ //then clear data for all ips for this id
++ if(header.zero_unset_ips && header.next_ip_index == 0)
++ {
++ //clear data
++ if(iam->info->num_intervals_to_save > 0)
++ {
++ while(iam->ip_map->num_elements > 0)
++ {
++ unsigned long key;
++ remove_smallest_long_map_element(iam->ip_map, &key);
++ /* ignore return value -- it's actually malloced in history, not here */
++ }
++ while(iam->ip_history_map->num_elements > 0)
++ {
++ unsigned long key;
++ bw_history* history = remove_smallest_long_map_element(iam->ip_history_map, &key);
++ kfree(history->history_data);
++ kfree(history);
++ }
++ }
++ else
++ {
++ while(iam->ip_map->num_elements > 0)
++ {
++ unsigned long key;
++ uint64_t *bw = remove_smallest_long_map_element(iam->ip_map, &key);
++ kfree(bw);
++ }
++ }
++ }
++
++ /*
++ * last_backup parameter is only relevant for case where we are not setting history
++ * and when we don't have a constant interval length or a specified reset_time (since in this case start time gets reset when rule is inserted and there is therefore no constant end)
++ * If num_intervals_to_save =0 and is_constant_interval=0, check it. If it's nonzero (0=ignore) and invalid, return.
++ */
++ if(header.last_backup > 0 && iam->info->num_intervals_to_save == 0 && (iam->info->reset_is_constant_interval == 0 || iam->info->reset_time != 0) )
++ {
++ time_t adjusted_last_backup_time = header.last_backup - (60 * local_minutes_west);
++ time_t next_reset_of_last_backup = get_next_reset_time(iam->info, adjusted_last_backup_time, adjusted_last_backup_time);
++ if(next_reset_of_last_backup != iam->info->next_reset)
++ {
++ return handle_set_failure(0, 1, 1, buffer);
++ }
++ }
++
++
++ /*
++ * iterate over each ip block in buffer,
++ * loading data into necessary kerenel-space data structures
++ */
++ buffer_index = (3*4) + 1 + 1 + 8 + BANDWIDTH_MAX_ID_LENGTH;
++ next_ip_index = header.next_ip_index;
++
++ while(next_ip_index < header.num_ips_in_buffer)
++ {
++ set_single_ip_data(header.history_included, iam, buffer, &buffer_index, now);
++ next_ip_index++;
++ }
++
++ if (next_ip_index == header.total_ips)
++ {
++ set_in_progress = 0;
++ }
++
++ /* set combined_bw */
++ iam->info->combined_bw = (uint64_t*)get_long_map_element(iam->ip_map, 0);
++
++ kfree(buffer);
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ return 0;
++}
++static int checkentry(const struct xt_mtchk_param *par)
++{
++
++
++ struct ipt_bandwidth_info *info = (struct ipt_bandwidth_info*)(par->matchinfo);
++
++
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("checkentry called\n");
++ #endif
++
++
++
++
++
++ if(info->ref_count == NULL) /* first instance, we're inserting rule */
++ {
++ struct ipt_bandwidth_info *master_info = (struct ipt_bandwidth_info*)kmalloc(sizeof(struct ipt_bandwidth_info), GFP_ATOMIC);
++ info->ref_count = (unsigned long*)kmalloc(sizeof(unsigned long), GFP_ATOMIC);
++
++ if(info->ref_count == NULL) /* deal with kmalloc failure */
++ {
++ printk("ipt_bandwidth: kmalloc failure in checkentry!\n");
++ return 0;
++ }
++ *(info->ref_count) = 1;
++ info->non_const_self = master_info;
++ info->hashed_id = sdbm_string_hash(info->id);
++ info->iam = NULL;
++ info->combined_bw = NULL;
++
++ memcpy(master_info->id, info->id, BANDWIDTH_MAX_ID_LENGTH);
++ master_info->type = info->type;
++ master_info->check_type = info->check_type;
++ master_info->local_subnet = info->local_subnet;
++ master_info->local_subnet_mask = info->local_subnet_mask;
++ master_info->cmp = info->cmp;
++ master_info->reset_is_constant_interval = info->reset_is_constant_interval;
++ master_info->reset_interval = info->reset_interval;
++ master_info->reset_time = info->reset_time;
++ master_info->bandwidth_cutoff = info->bandwidth_cutoff;
++ master_info->current_bandwidth = info->current_bandwidth;
++ master_info->next_reset = info->next_reset;
++ master_info->previous_reset = info->previous_reset;
++ master_info->last_backup_time = info->last_backup_time;
++ master_info->num_intervals_to_save = info->num_intervals_to_save;
++
++ master_info->hashed_id = info->hashed_id;
++ master_info->iam = info->iam;
++ master_info->combined_bw = info->combined_bw;
++ master_info->non_const_self = info->non_const_self;
++ master_info->ref_count = info->ref_count;
++
++ #ifdef BANDWIDTH_DEBUG
++ printk(" after increment, ref count = %ld\n", *(info->ref_count) );
++ #endif
++
++ if(info->cmp != BANDWIDTH_CHECK)
++ {
++ info_and_maps *iam;
++
++ down(&userspace_lock);
++ spin_lock_bh(&bandwidth_lock);
++
++
++
++ iam = (info_and_maps*)get_string_map_element(id_map, info->id);
++ if(iam != NULL)
++ {
++ printk("ipt_bandwidth: error, \"%s\" is a duplicate id\n", info->id);
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ return 0;
++ }
++
++ if(info->reset_interval != BANDWIDTH_NEVER)
++ {
++ time_t now = get_seconds();
++ if(now != last_local_mw_update )
++ {
++ check_for_timezone_shift(now, 1);
++ }
++
++
++ now = now - (60 * local_minutes_west); /* Adjust for local timezone */
++ info->previous_reset = now;
++ master_info->previous_reset = now;
++ if(info->next_reset == 0)
++ {
++ info->next_reset = get_next_reset_time(info, now, now);
++ master_info->next_reset = info->next_reset;
++ /*
++ * if we specify last backup time, check that next reset is consistent,
++ * otherwise reset current_bandwidth to 0
++ *
++ * only applies to combined type -- otherwise we need to handle setting bandwidth
++ * through userspace library
++ */
++ if(info->last_backup_time != 0 && info->type == BANDWIDTH_COMBINED)
++ {
++ time_t adjusted_last_backup_time = info->last_backup_time - (60 * local_minutes_west);
++ time_t next_reset_of_last_backup = get_next_reset_time(info, adjusted_last_backup_time, adjusted_last_backup_time);
++ if(next_reset_of_last_backup != info->next_reset)
++ {
++ info->current_bandwidth = 0;
++ master_info->current_bandwidth = 0;
++ }
++ info->last_backup_time = 0;
++ master_info->last_backup_time = 0;
++ }
++ }
++ }
++
++ iam = (info_and_maps*)kmalloc( sizeof(info_and_maps), GFP_ATOMIC);
++ if(iam == NULL) /* handle kmalloc failure */
++ {
++ printk("ipt_bandwidth: kmalloc failure in checkentry!\n");
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ return 0;
++ }
++ iam->ip_map = initialize_long_map();
++ if(iam->ip_map == NULL) /* handle kmalloc failure */
++ {
++ printk("ipt_bandwidth: kmalloc failure in checkentry!\n");
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ return 0;
++ }
++ iam->ip_history_map = NULL;
++ if(info->num_intervals_to_save > 0)
++ {
++ iam->ip_history_map = initialize_long_map();
++ if(iam->ip_history_map == NULL) /* handle kmalloc failure */
++ {
++ printk("ipt_bandwidth: kmalloc failure in checkentry!\n");
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ return 0;
++ }
++ }
++
++
++ iam->info = master_info;
++ set_string_map_element(id_map, info->id, iam);
++
++ info->iam = (void*)iam;
++ master_info->iam = (void*)iam;
++
++
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ }
++ }
++
++ else
++ {
++ /* info->non_const_self = info; */
++
++
++ *(info->ref_count) = *(info->ref_count) + 1;
++ #ifdef BANDWIDTH_DEBUG
++ printk(" after increment, ref count = %ld\n", *(info->ref_count) );
++ #endif
++
++
++ /*
++ if(info->cmp != BANDWIDTH_CHECK)
++ {
++ info_and_maps* iam;
++ down(&userspace_lock);
++ spin_lock_bh(&bandwidth_lock);
++ iam = (info_and_maps*)get_string_map_element(id_map, info->id);
++ if(iam != NULL)
++ {
++ iam->info = info;
++ }
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ }
++ */
++ }
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("checkentry complete\n");
++ #endif
++ return 0;
++}
++
++static void destroy(const struct xt_mtdtor_param *par)
++{
++
++ struct ipt_bandwidth_info *info = (struct ipt_bandwidth_info*)(par->matchinfo);
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("destroy called\n");
++ #endif
++
++ *(info->ref_count) = *(info->ref_count) - 1;
++
++ #ifdef BANDWIDTH_DEBUG
++ printk(" after decrement refcount = %ld\n", *(info->ref_count));
++ #endif
++
++ if(*(info->ref_count) == 0)
++ {
++ info_and_maps* iam;
++ down(&userspace_lock);
++ spin_lock_bh(&bandwidth_lock);
++
++ info->combined_bw = NULL;
++ iam = (info_and_maps*)remove_string_map_element(id_map, info->id);
++ if(iam != NULL && info->cmp != BANDWIDTH_CHECK)
++ {
++ unsigned long num_destroyed;
++ if(iam->ip_map != NULL && iam->ip_history_map != NULL)
++ {
++ unsigned long history_index = 0;
++ bw_history** histories_to_free;
++
++ destroy_long_map(iam->ip_map, DESTROY_MODE_IGNORE_VALUES, &num_destroyed);
++
++ histories_to_free = (bw_history**)destroy_long_map(iam->ip_history_map, DESTROY_MODE_RETURN_VALUES, &num_destroyed);
++
++ /* num_destroyed will be 0 if histories_to_free is null after malloc failure, so this is safe */
++ for(history_index = 0; history_index < num_destroyed; history_index++)
++ {
++ bw_history* h = histories_to_free[history_index];
++ if(h != NULL)
++ {
++ kfree(h->history_data);
++ kfree(h);
++ }
++ }
++
++ }
++ else if(iam->ip_map != NULL)
++ {
++ destroy_long_map(iam->ip_map, DESTROY_MODE_FREE_VALUES, &num_destroyed);
++ }
++ kfree(iam);
++ /* info portion of iam gets taken care of automatically */
++ }
++ kfree(info->ref_count);
++ kfree(info->non_const_self);
++
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ }
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("destroy complete\n");
++ #endif
++}
++
++static struct nf_sockopt_ops ipt_bandwidth_sockopts =
++{
++ .pf = PF_INET,
++ .set_optmin = BANDWIDTH_SET,
++ .set_optmax = BANDWIDTH_SET+1,
++ .set = ipt_bandwidth_set_ctl,
++ .get_optmin = BANDWIDTH_GET,
++ .get_optmax = BANDWIDTH_GET+1,
++ .get = ipt_bandwidth_get_ctl
++};
++
++
++static struct xt_match bandwidth_match __read_mostly =
++{
++ .name = "bandwidth",
++ .match = &match,
++ .family = AF_INET,
++ .matchsize = sizeof(struct ipt_bandwidth_info),
++ .checkentry = &checkentry,
++ .destroy = &destroy,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ /* Register setsockopt */
++ if (nf_register_sockopt(&ipt_bandwidth_sockopts) < 0)
++ {
++ printk("ipt_bandwidth: Can't register sockopts. Aborting\n");
++ }
++ bandwidth_record_max = get_bw_record_max();
++ local_minutes_west = old_minutes_west = sys_tz.tz_minuteswest;
++ local_seconds_west = local_minutes_west*60;
++ last_local_mw_update = get_seconds();
++ if(local_seconds_west > last_local_mw_update)
++ {
++ /* we can't let adjusted time be < 0 -- pretend timezone is still UTC */
++ local_minutes_west = 0;
++ local_seconds_west = 0;
++ }
++
++ id_map = initialize_string_map(0);
++ if(id_map == NULL) /* deal with kmalloc failure */
++ {
++ printk("id map is null, returning -1\n");
++ return -1;
++ }
++
++
++ return xt_register_match(&bandwidth_match);
++}
++
++static void __exit fini(void)
++{
++ down(&userspace_lock);
++ spin_lock_bh(&bandwidth_lock);
++ if(id_map != NULL)
++ {
++ unsigned long num_returned;
++ info_and_maps **iams = (info_and_maps**)destroy_string_map(id_map, DESTROY_MODE_RETURN_VALUES, &num_returned);
++ int iam_index;
++ for(iam_index=0; iam_index < num_returned; iam_index++)
++ {
++ info_and_maps* iam = iams[iam_index];
++ long_map* ip_map = iam->ip_map;
++ unsigned long num_destroyed;
++ destroy_long_map(ip_map, DESTROY_MODE_FREE_VALUES, &num_destroyed);
++ kfree(iam);
++ /* info portion of iam gets taken care of automatically */
++ }
++ }
++ nf_unregister_sockopt(&ipt_bandwidth_sockopts);
++ xt_unregister_match(&bandwidth_match);
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++
++}
++
++module_init(init);
++module_exit(fini);
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/ipt_bandwidth.mod.c 2015-06-19 03:02:55.381669455 +0800
+@@ -0,0 +1,20 @@
++#include <linux/module.h>
++#include <linux/vermagic.h>
++#include <linux/compiler.h>
++
++MODULE_INFO(vermagic, VERMAGIC_STRING);
++
++struct module __this_module
++__attribute__((section(".gnu.linkonce.this_module"))) = {
++ .name = KBUILD_MODNAME,
++ .init = init_module,
++#ifdef CONFIG_MODULE_UNLOAD
++ .exit = cleanup_module,
++#endif
++};
++
++static const char __module_depends[]
++__attribute_used__
++__attribute__((section(".modinfo"))) =
++"depends=";
++
+--- linux.orig/net/ipv4/netfilter/Kconfig 2015-06-15 00:19:31.000000000 +0800
++++ linux.new/net/ipv4/netfilter/Kconfig 2015-06-19 03:02:55.441666949 +0800
+@@ -389,5 +389,25 @@
+
+ endif # IP_NF_ARPTABLES
+
++config IP_NF_MATCH_WEBURL
++ tristate "weburl match support"
++ depends on IP_NF_IPTABLES
++ help
++ This option enables weburl match support.
++config IP_NF_MATCH_WEBMON
++ tristate "webmon match support"
++ depends on IP_NF_IPTABLES
++ help
++ This option enables webmon match support.
++config IP_NF_MATCH_TIMERANGE
++ tristate "timerange match support"
++ depends on IP_NF_IPTABLES
++ help
++ This option enables timerange match support.
++config IP_NF_MATCH_BANDWIDTH
++ tristate "bandwidth match support"
++ depends on IP_NF_IPTABLES
++ help
++ This option enables bandwidth match support.
+ endmenu
+
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/ipt_timerange.c 2015-06-19 03:02:55.285673465 +0800
+@@ -0,0 +1,142 @@
++/* timerange -- An iptables extension to match multiple timeranges within a week
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2009-2010 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <net/sock.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <linux/time.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_timerange.h>
++
++#include <linux/ktime.h>
++
++
++#include <linux/ip.h>
++
++#include <linux/netfilter/x_tables.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Eric Bishop");
++MODULE_DESCRIPTION("Match time ranges, designed for use with Gargoyle web interface (www.gargoyle-router.com)");
++
++
++extern struct timezone sys_tz;
++
++
++static bool match(const struct sk_buff *skb, const struct xt_action_param *par)
++{
++ const struct ipt_timerange_info *info = (const struct ipt_timerange_info*)(par->matchinfo);
++
++
++ time_t stamp_time;
++ int weekday;
++ int seconds_since_midnight;
++ int test_index;
++ int match_found;
++
++ struct timeval test_time;
++
++ do_gettimeofday(&test_time);
++ stamp_time = test_time.tv_sec;
++ stamp_time = stamp_time - (60 * sys_tz.tz_minuteswest); /* Adjust for local timezone */
++ seconds_since_midnight = stamp_time % 86400; /* 86400 seconds per day */
++ weekday = (4 + (stamp_time/86400)) % 7; /* 1970-01-01 (time=0) was a Thursday (4). */
++
++ /*
++ printk("time=%d, since midnight = %d, day=%d, minuteswest=%d\n", stamp_time, seconds_since_midnight, weekday, sys_tz.tz_minuteswest);
++ */
++
++ match_found = 0;
++ if(info->type == HOURS)
++ {
++ for(test_index=0; info->ranges[test_index] != -1 && match_found == 0 && seconds_since_midnight >= info->ranges[test_index]; test_index=test_index+2)
++ {
++ match_found = seconds_since_midnight >= info->ranges[test_index] && seconds_since_midnight <= info->ranges[test_index+1] ? 1 : match_found;
++ }
++ }
++ else if(info->type == WEEKDAYS)
++ {
++ match_found = info->days[weekday];
++ }
++ else if(info->type == DAYS_HOURS)
++ {
++ match_found = info->days[weekday];
++ if(match_found == 1)
++ {
++ match_found = 0;
++ for(test_index=0; info->ranges[test_index] != -1 && match_found == 0 && seconds_since_midnight >= info->ranges[test_index]; test_index=test_index+2)
++ {
++ match_found = seconds_since_midnight >= info->ranges[test_index] && seconds_since_midnight <= info->ranges[test_index+1] ? 1 : match_found;
++ }
++ }
++ }
++ else if(info->type == WEEKLY_RANGE)
++ {
++ time_t seconds_since_sunday_midnight = seconds_since_midnight + (weekday*86400);
++ for(test_index=0; info->ranges[test_index] != -1 && match_found == 0 && seconds_since_sunday_midnight >= info->ranges[test_index]; test_index=test_index+2)
++ {
++ match_found = seconds_since_sunday_midnight >= info->ranges[test_index] && seconds_since_sunday_midnight <= info->ranges[test_index+1] ? 1 : match_found;
++ }
++
++ }
++
++ match_found = info->invert == 0 ? match_found : !match_found;
++ return match_found;
++}
++
++
++static int checkentry(const struct xt_mtchk_param *par)
++{
++ return 0;
++}
++
++
++static struct xt_match timerange_match __read_mostly =
++{
++ .name = "timerange",
++ .match = &match,
++ .family = AF_INET,
++ .matchsize = sizeof(struct ipt_timerange_info),
++ .checkentry = &checkentry,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ return xt_register_match(&timerange_match);
++}
++
++static void __exit fini(void)
++{
++ xt_unregister_match(&timerange_match);
++}
++
++module_init(init);
++module_exit(fini);
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/ipt_weburl.mod.c 2015-06-19 03:02:54.725696856 +0800
+@@ -0,0 +1,20 @@
++#include <linux/module.h>
++#include <linux/vermagic.h>
++#include <linux/compiler.h>
++
++MODULE_INFO(vermagic, VERMAGIC_STRING);
++
++struct module __this_module
++__attribute__((section(".gnu.linkonce.this_module"))) = {
++ .name = KBUILD_MODNAME,
++ .init = init_module,
++#ifdef CONFIG_MODULE_UNLOAD
++ .exit = cleanup_module,
++#endif
++};
++
++static const char __module_depends[]
++__attribute_used__
++__attribute__((section(".modinfo"))) =
++"depends=";
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/ipt_timerange.mod.c 2015-06-19 03:02:55.285673465 +0800
+@@ -0,0 +1,20 @@
++#include <linux/module.h>
++#include <linux/vermagic.h>
++#include <linux/compiler.h>
++
++MODULE_INFO(vermagic, VERMAGIC_STRING);
++
++struct module __this_module
++__attribute__((section(".gnu.linkonce.this_module"))) = {
++ .name = KBUILD_MODNAME,
++ .init = init_module,
++#ifdef CONFIG_MODULE_UNLOAD
++ .exit = cleanup_module,
++#endif
++};
++
++static const char __module_depends[]
++__attribute_used__
++__attribute__((section(".modinfo"))) =
++"depends=";
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/weburl_deps/tree_map.h 2015-06-19 03:02:54.737696355 +0800
+@@ -0,0 +1,1084 @@
++/*
++ * Copyright © 2008 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This work 'as-is' we provide.
++ * No warranty, express or implied.
++ * We've done our best,
++ * to debug and test.
++ * Liability for damages denied.
++ *
++ * Permission is granted hereby,
++ * to copy, share, and modify.
++ * Use as is fit,
++ * free or for profit.
++ * On this notice these rights rely.
++ *
++ *
++ *
++ * Note that unlike other portions of Gargoyle this code
++ * does not fall under the GPL, but the rather whimsical
++ * 'Poetic License' above.
++ *
++ * Basically, this library contains a bunch of utilities
++ * that I find useful. I'm sure other libraries exist
++ * that are just as good or better, but I like these tools
++ * because I personally wrote them, so I know their quirks.
++ * (i.e. I know where the bodies are buried). I want to
++ * make sure that I can re-use these utilities for whatever
++ * code I may want to write in the future be it
++ * proprietary or open-source, so I've put them under
++ * a very, very permissive license.
++ *
++ * If you find this code useful, use it. If not, don't.
++ * I really don't care.
++ *
++ */
++
++
++#if __KERNEL__
++ #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
++ #define free(foo) kfree(foo)
++ #define printf(format,args...) printk(format,##args)
++
++ /* kernel strdup */
++ static inline char *kernel_strdup(const char *str);
++ static inline char *kernel_strdup(const char *str)
++ {
++ char *tmp;
++ long int s;
++ s=strlen(str) + 1;
++ tmp = kmalloc(s, GFP_ATOMIC);
++ if (tmp != NULL)
++ {
++ memcpy(tmp, str, s);
++ }
++ return tmp;
++ }
++ #define strdup kernel_strdup
++
++#endif
++
++
++
++/* tree_map structs / prototypes */
++typedef struct long_tree_map_node
++{
++ unsigned long key;
++ void* value;
++
++ signed char balance;
++ struct long_tree_map_node* left;
++ struct long_tree_map_node* right;
++} long_map_node;
++
++typedef struct
++{
++ long_map_node* root;
++ unsigned long num_elements;
++
++}long_map;
++
++typedef struct
++{
++ long_map lm;
++ unsigned char store_keys;
++ unsigned long num_elements;
++
++}string_map;
++
++
++
++/* long map functions */
++long_map* initialize_long_map(void);
++void* get_long_map_element(long_map* map, unsigned long key);
++void* get_smallest_long_map_element(long_map* map, unsigned long* smallest_key);
++void* get_largest_long_map_element(long_map* map, unsigned long* largest_key);
++void* remove_smallest_long_map_element(long_map* map, unsigned long* smallest_key);
++void* remove_largest_long_map_element(long_map* map, unsigned long* largest_key);
++void* set_long_map_element(long_map* map, unsigned long key, void* value);
++void* remove_long_map_element(long_map* map, unsigned long key);
++unsigned long* get_sorted_long_map_keys(long_map* map, unsigned long* num_keys_returned);
++void** get_sorted_long_map_values(long_map* map, unsigned long* num_values_returned);
++void** destroy_long_map(long_map* map, int destruction_type, unsigned long* num_destroyed);
++void apply_to_every_long_map_value(long_map* map, void (*apply_func)(unsigned long key, void* value));
++
++/* string map functions */
++string_map* initialize_string_map(unsigned char store_keys);
++void* get_string_map_element(string_map* map, const char* key);
++void* set_string_map_element(string_map* map, const char* key, void* value);
++void* remove_string_map_element(string_map* map, const char* key);
++char** get_string_map_keys(string_map* map, unsigned long* num_keys_returned);
++void** get_string_map_values(string_map* map, unsigned long* num_values_returned);
++void** destroy_string_map(string_map* map, int destruction_type, unsigned long* num_destroyed);
++void apply_to_every_string_map_value(string_map* map, void (*apply_func)(char* key, void* value));
++
++
++/*
++ * three different ways to deal with values when data structure is destroyed
++ */
++#define DESTROY_MODE_RETURN_VALUES 20
++#define DESTROY_MODE_FREE_VALUES 21
++#define DESTROY_MODE_IGNORE_VALUES 22
++
++
++/*
++ * for convenience & backwards compatibility alias _string_map_ functions to
++ * _map_ functions since string map is used more often than long map
++ */
++#define initialize_map initialize_string_map
++#define set_map_element set_string_map_element
++#define get_map_element get_string_map_element
++#define remove_map_element remove_string_map_element
++#define get_map_keys get_string_map_keys
++#define get_map_values get_string_map_values
++#define destroy_map destroy_string_map
++
++
++/* internal utility structures/ functions */
++typedef struct stack_node_struct
++{
++ long_map_node** node_ptr;
++ signed char direction;
++ struct stack_node_struct* previous;
++} stack_node;
++
++static void free_stack(stack_node* stack);
++static void** destroy_long_map_values(long_map* map, int destruction_type, unsigned long* num_destroyed);
++static void apply_to_every_long_map_node(long_map_node* node, void (*apply_func)(unsigned long key, void* value));
++static void apply_to_every_string_map_node(long_map_node* node, unsigned char has_key, void (*apply_func)(char* key, void* value));
++static void get_sorted_node_keys(long_map_node* node, unsigned long* key_list, unsigned long* next_key_index, int depth);
++static void get_sorted_node_values(long_map_node* node, void** value_list, unsigned long* next_value_index, int depth);
++static signed char rebalance (long_map_node** n, signed char direction, signed char update_op);
++static void rotate_right (long_map_node** parent);
++static void rotate_left (long_map_node** parent);
++
++/* internal for string map */
++typedef struct
++{
++ char* key;
++ void* value;
++} string_map_key_value;
++static unsigned long sdbm_string_hash(const char *key);
++
++
++
++
++/***************************************************
++ * For testing only
++ ***************************************************/
++/*
++void print_list(stack_node *l);
++
++void print_list(stack_node *l)
++{
++ if(l != NULL)
++ {
++ printf(" list key = %ld, dir=%d, \n", (*(l->node_ptr))->key, l->direction);
++ print_list(l->previous);
++ }
++}
++*/
++/******************************************************
++ * End testing Code
++ *******************************************************/
++
++
++
++
++/***************************************************
++ * string_map function definitions
++ ***************************************************/
++
++string_map* initialize_string_map(unsigned char store_keys)
++{
++ string_map* map = (string_map*)malloc(sizeof(string_map));
++ if(map != NULL)
++ {
++ map->store_keys = store_keys;
++ map->lm.root = NULL;
++ map->lm.num_elements = 0;
++ map->num_elements = map->lm.num_elements;
++ }
++ return map;
++}
++
++void* get_string_map_element(string_map* map, const char* key)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = get_long_map_element( &(map->lm), hashed_key);
++ if(return_value != NULL && map->store_keys)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++void* set_string_map_element(string_map* map, const char* key, void* value)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = NULL;
++ if(map->store_keys)
++ {
++ string_map_key_value* kv = (string_map_key_value*)malloc(sizeof(string_map_key_value));
++ if(kv == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ kv->key = strdup(key);
++ if(kv->key == NULL) /* deal with malloc failure */
++ {
++ free(kv);
++ return NULL;
++ }
++ kv->value = value;
++ return_value = set_long_map_element( &(map->lm), hashed_key, kv);
++ if(return_value != NULL)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ free(r->key);
++ free(r);
++ }
++ }
++ else
++ {
++ return_value = set_long_map_element( &(map->lm), hashed_key, value);
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++void* remove_string_map_element(string_map* map, const char* key)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = remove_long_map_element( &(map->lm), hashed_key);
++
++ if(return_value != NULL && map->store_keys)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ free(r->key);
++ free(r);
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++char** get_string_map_keys(string_map* map, unsigned long* num_keys_returned)
++{
++ char** str_keys;
++ str_keys = (char**)malloc((map->num_elements+1)*sizeof(char*));
++ if(str_keys == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ str_keys[0] = NULL;
++ *num_keys_returned = 0;
++ if(map->store_keys && map->num_elements > 0)
++ {
++ unsigned long list_length;
++ void** long_values = get_sorted_long_map_values( &(map->lm), &list_length);
++ unsigned long key_index;
++ /*list_length will be 0 on malloc failure in get_sorted_long_map_values, so this code shouldn't seg fault if that happens */
++ for(key_index = 0; key_index < list_length; key_index++)
++ {
++ str_keys[key_index] = strdup( ((string_map_key_value*)(long_values[key_index]))->key);
++ if(str_keys[key_index] == NULL) /* deal with malloc failure */
++ {
++ //just return the incomplete list (hey, it's null terminated...)
++ free(long_values);
++ return str_keys;
++ }
++ *num_keys_returned = *num_keys_returned + 1;
++ }
++ str_keys[list_length] = NULL;
++ free(long_values);
++ }
++ return str_keys;
++}
++
++
++void** get_string_map_values(string_map* map, unsigned long* num_values_returned)
++{
++ void** values = NULL;
++ if(map != NULL)
++ {
++ values = get_sorted_long_map_values ( &(map->lm), num_values_returned );
++ }
++ return values;
++}
++
++
++void** destroy_string_map(string_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = NULL;
++ if(map != NULL)
++ {
++ if(map->store_keys)
++ {
++ void** kvs = destroy_long_map_values( &(map->lm), DESTROY_MODE_RETURN_VALUES, num_destroyed );
++ unsigned long kv_index = 0;
++ for(kv_index=0; kv_index < *num_destroyed; kv_index++)
++ {
++ string_map_key_value* kv = (string_map_key_value*)kvs[kv_index];
++ void* value = kv->value;
++
++ free(kv->key);
++ free(kv);
++ if(destruction_type == DESTROY_MODE_FREE_VALUES)
++ {
++ free(value);
++ }
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ kvs[kv_index] = value;
++ }
++ }
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values = kvs;
++ }
++ else
++ {
++ free(kvs);
++ }
++ }
++ else
++ {
++ return_values = destroy_long_map_values( &(map->lm), destruction_type, num_destroyed );
++ }
++ free(map);
++ }
++ return return_values;
++}
++
++
++
++
++/***************************************************
++ * long_map function definitions
++ ***************************************************/
++
++long_map* initialize_long_map(void)
++{
++ long_map* map = (long_map*)malloc(sizeof(long_map));
++ if(map != NULL) /* test for malloc failure */
++ {
++ map->root = NULL;
++ map->num_elements = 0;
++ }
++ return map;
++}
++
++void* get_long_map_element(long_map* map, unsigned long key)
++{
++ void* value = NULL;
++
++ if(map->root != NULL)
++ {
++ long_map_node* parent_node = map->root;
++ long_map_node* next_node;
++ while( key != parent_node->key && (next_node = (long_map_node *)(key < parent_node->key ? parent_node->left : parent_node->right)) != NULL)
++ {
++ parent_node = next_node;
++ }
++ if(parent_node->key == key)
++ {
++ value = parent_node->value;
++ }
++ }
++ return value;
++}
++
++void* get_smallest_long_map_element(long_map* map, unsigned long* smallest_key)
++{
++ void* value = NULL;
++ if(map->root != NULL)
++ {
++ long_map_node* next_node = map->root;
++ while( next_node->left != NULL)
++ {
++ next_node = next_node->left;
++ }
++ value = next_node->value;
++ *smallest_key = next_node->key;
++ }
++ return value;
++}
++
++void* get_largest_long_map_element(long_map* map, unsigned long* largest_key)
++{
++ void* value = NULL;
++ if(map->root != NULL)
++ {
++ long_map_node* next_node = map->root;
++ while( next_node->right != NULL)
++ {
++ next_node = next_node->right;
++ }
++ value = next_node->value;
++ *largest_key = next_node->key;
++ }
++ return value;
++}
++
++void* remove_smallest_long_map_element(long_map* map, unsigned long* smallest_key)
++{
++ get_smallest_long_map_element(map, smallest_key);
++ return remove_long_map_element(map, *smallest_key);
++}
++
++void* remove_largest_long_map_element(long_map* map, unsigned long* largest_key)
++{
++ get_largest_long_map_element(map, largest_key);
++ return remove_long_map_element(map, *largest_key);
++}
++
++
++/* if replacement performed, returns replaced value, otherwise null */
++void* set_long_map_element(long_map* map, unsigned long key, void* value)
++{
++ stack_node* parent_list = NULL;
++ void* old_value = NULL;
++ int old_value_found = 0;
++
++ long_map_node* parent_node;
++ long_map_node* next_node;
++ stack_node* next_parent;
++ stack_node* previous_parent;
++ signed char new_balance;
++
++
++ long_map_node* new_node = (long_map_node*)malloc(sizeof(long_map_node));
++ if(new_node == NULL)
++ {
++ return NULL;
++ }
++ new_node->value = value;
++ new_node->key = key;
++ new_node->left = NULL;
++ new_node->right = NULL;
++ new_node->balance = 0;
++
++
++
++ if(map->root == NULL)
++ {
++ map->root = new_node;
++ }
++ else
++ {
++ parent_node = map->root;
++
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ free(new_node);
++ return NULL; /* won't insert but won't seg fault */
++ }
++ next_parent->node_ptr = &(map->root);
++ next_parent->previous = parent_list;
++ parent_list = next_parent;
++
++ while( key != parent_node->key && (next_node = (key < parent_node->key ? parent_node->left : parent_node->right) ) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ free(new_node);
++ return NULL;
++ }
++ next_parent->node_ptr = key < parent_node->key ? &(parent_node->left) : &(parent_node->right);
++ next_parent->previous = parent_list;
++ next_parent->previous->direction = key < parent_node->key ? -1 : 1;
++ parent_list = next_parent;
++
++ parent_node = next_node;
++ }
++
++
++ if(key == parent_node->key)
++ {
++ old_value = parent_node->value;
++ old_value_found = 1;
++ parent_node->value = value;
++ free(new_node);
++ /* we merely replaced a node, no need to rebalance */
++ }
++ else
++ {
++ if(key < parent_node->key)
++ {
++ parent_node->left = (void*)new_node;
++ parent_list->direction = -1;
++ }
++ else
++ {
++ parent_node->right = (void*)new_node;
++ parent_list->direction = 1;
++ }
++
++
++ /* we inserted a node, rebalance */
++ previous_parent = parent_list;
++ new_balance = 1; /* initial value is not used, but must not be 0 for initial loop condition */
++
++
++ while(previous_parent != NULL && new_balance != 0)
++ {
++ new_balance = rebalance(previous_parent->node_ptr, previous_parent->direction, 1);
++ previous_parent = previous_parent->previous;
++ }
++ }
++ }
++
++ free_stack(parent_list);
++
++ if(old_value_found == 0)
++ {
++ map->num_elements = map->num_elements + 1;
++ }
++
++ return old_value;
++}
++
++
++void* remove_long_map_element(long_map* map, unsigned long key)
++{
++
++ void* value = NULL;
++
++ long_map_node* root_node = map->root;
++ stack_node* parent_list = NULL;
++
++
++ long_map_node* remove_parent;
++ long_map_node* remove_node;
++ long_map_node* next_node;
++
++ long_map_node* replacement;
++ long_map_node* replacement_parent;
++ long_map_node* replacement_next;
++
++ stack_node* next_parent;
++ stack_node* previous_parent;
++ stack_node* replacement_stack_node;
++
++
++ signed char new_balance;
++
++
++
++ if(root_node != NULL)
++ {
++ remove_parent = root_node;
++ remove_node = key < remove_parent->key ? remove_parent->left : remove_parent->right;
++
++ if(remove_node != NULL && key != remove_parent->key)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ next_parent->node_ptr = &(map->root);
++ next_parent->previous = parent_list;
++ parent_list = next_parent;
++ while( key != remove_node->key && (next_node = (key < remove_node->key ? remove_node->left : remove_node->right)) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++ next_parent->node_ptr = key < remove_parent->key ? &(remove_parent->left) : &(remove_parent->right);
++ next_parent->previous = parent_list;
++ next_parent->previous->direction = key < remove_parent->key ? -1 : 1;
++ parent_list = next_parent;
++
++
++ remove_parent = remove_node;
++ remove_node = next_node;
++ }
++ parent_list->direction = key < remove_parent-> key ? -1 : 1;
++ }
++ else
++ {
++ remove_node = remove_parent;
++ }
++
++
++ if(key == remove_node->key)
++ {
++
++ /* find replacement for node we are deleting */
++ if( remove_node->right == NULL )
++ {
++ replacement = remove_node->left;
++ }
++ else if( remove_node->right->left == NULL)
++ {
++
++ replacement = remove_node->right;
++ replacement->left = remove_node->left;
++ replacement->balance = remove_node->balance;
++
++ /* put pointer to replacement node into list for balance update */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = 1; /* replacement is from right */
++ if(remove_node == remove_parent) /* special case for root node */
++ {
++ replacement_stack_node->node_ptr = &(map->root);
++ }
++ else
++ {
++ replacement_stack_node->node_ptr = key < remove_parent-> key ? &(remove_parent->left) : &(remove_parent->right);
++ }
++ parent_list = replacement_stack_node;
++
++ }
++ else
++ {
++ /* put pointer to replacement node into list for balance update */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = 1; /* we always look for replacement on right */
++ if(remove_node == remove_parent) /* special case for root node */
++ {
++ replacement_stack_node->node_ptr = &(map->root);
++ }
++ else
++ {
++ replacement_stack_node->node_ptr = key < remove_parent-> key ? &(remove_parent->left) : &(remove_parent->right);
++ }
++
++ parent_list = replacement_stack_node;
++
++
++ /*
++ * put pointer to replacement node->right into list for balance update
++ * this node will have to be updated with the proper pointer
++ * after we have identified the replacement
++ */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = -1; /* we always look for replacement to left of this node */
++ parent_list = replacement_stack_node;
++
++ /* find smallest node on right (large) side of tree */
++ replacement_parent = remove_node->right;
++ replacement = replacement_parent->left;
++
++ while((replacement_next = replacement->left) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ next_parent->node_ptr = &(replacement_parent->left);
++ next_parent->previous = parent_list;
++ next_parent->direction = -1; /* we always go left */
++ parent_list = next_parent;
++
++ replacement_parent = replacement;
++ replacement = replacement_next;
++
++ }
++
++ replacement_parent->left = replacement->right;
++
++ replacement->left = remove_node->left;
++ replacement->right = remove_node->right;
++ replacement->balance = remove_node->balance;
++ replacement_stack_node->node_ptr = &(replacement->right);
++ }
++
++ /* insert replacement at proper location in tree */
++ if(remove_node == remove_parent)
++ {
++ map->root = replacement;
++ }
++ else
++ {
++ remove_parent->left = remove_node == remove_parent->left ? replacement : remove_parent->left;
++ remove_parent->right = remove_node == remove_parent->right ? replacement : remove_parent->right;
++ }
++
++
++ /* rebalance tree */
++ previous_parent = parent_list;
++ new_balance = 0;
++ while(previous_parent != NULL && new_balance == 0)
++ {
++ new_balance = rebalance(previous_parent->node_ptr, previous_parent->direction, -1);
++ previous_parent = previous_parent->previous;
++ }
++
++
++
++
++ /*
++ * since we found a value to remove, decrease number of elements in map
++ * set return value to the deleted node's value and free the node
++ */
++ map->num_elements = map->num_elements - 1;
++ value = remove_node->value;
++ free(remove_node);
++ }
++ }
++
++ free_stack(parent_list);
++
++ return value;
++}
++
++
++/* note: returned keys are dynamically allocated, you need to free them! */
++unsigned long* get_sorted_long_map_keys(long_map* map, unsigned long* num_keys_returned)
++{
++ unsigned long* key_list = (unsigned long*)malloc((map->num_elements)*sizeof(unsigned long));
++ unsigned long next_key_index;
++ if(key_list == NULL)
++ {
++ *num_keys_returned = 0;
++ return NULL;
++ }
++ next_key_index = 0;
++ get_sorted_node_keys(map->root, key_list, &next_key_index, 0);
++
++ *num_keys_returned = map->num_elements;
++
++ return key_list;
++}
++
++
++void** get_sorted_long_map_values(long_map* map, unsigned long* num_values_returned)
++{
++ void** value_list = (void**)malloc((map->num_elements+1)*sizeof(void*));
++ unsigned long next_value_index;
++
++ if(value_list == NULL)
++ {
++ *num_values_returned = 0;
++ return NULL;
++ }
++ next_value_index = 0;
++ get_sorted_node_values(map->root, value_list, &next_value_index, 0);
++ value_list[map->num_elements] = NULL; /* since we're dealing with pointers make list null terminated */
++
++ *num_values_returned = map->num_elements;
++ return value_list;
++
++}
++
++
++
++void** destroy_long_map(long_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = destroy_long_map_values(map, destruction_type, num_destroyed);
++ free(map);
++ return return_values;
++}
++
++
++
++void apply_to_every_long_map_value(long_map* map, void (*apply_func)(unsigned long key, void* value))
++{
++ apply_to_every_long_map_node(map->root, apply_func);
++}
++void apply_to_every_string_map_value(string_map* map, void (*apply_func)(char* key, void* value))
++{
++ apply_to_every_string_map_node( (map->lm).root, map->store_keys, apply_func);
++}
++
++
++/***************************************************
++ * internal utility function definitions
++ ***************************************************/
++static void free_stack(stack_node* stack)
++{
++ while(stack != NULL)
++ {
++ stack_node* prev_node = stack;
++ stack = prev_node->previous;
++ free(prev_node);
++ }
++
++}
++
++static void** destroy_long_map_values(long_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = NULL;
++ unsigned long return_index = 0;
++
++ *num_destroyed = 0;
++
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values = (void**)malloc((map->num_elements+1)*sizeof(void*));
++ if(return_values == NULL) /* deal with malloc failure */
++ {
++ destruction_type = DESTROY_MODE_IGNORE_VALUES; /* could cause memory leak, but there's no other way to be sure we won't seg fault */
++ }
++ else
++ {
++ return_values[map->num_elements] = NULL;
++ }
++ }
++ while(map->num_elements > 0)
++ {
++ unsigned long smallest_key;
++ void* removed_value = remove_smallest_long_map_element(map, &smallest_key);
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values[return_index] = removed_value;
++ }
++ if(destruction_type == DESTROY_MODE_FREE_VALUES)
++ {
++ free(removed_value);
++ }
++ return_index++;
++ *num_destroyed = *num_destroyed + 1;
++ }
++ return return_values;
++}
++
++static void apply_to_every_long_map_node(long_map_node* node, void (*apply_func)(unsigned long key, void* value))
++{
++ if(node != NULL)
++ {
++ apply_to_every_long_map_node(node->left, apply_func);
++
++ apply_func(node->key, node->value);
++
++ apply_to_every_long_map_node(node->right, apply_func);
++ }
++}
++static void apply_to_every_string_map_node(long_map_node* node, unsigned char has_key, void (*apply_func)(char* key, void* value))
++{
++ if(node != NULL)
++ {
++ apply_to_every_string_map_node(node->left, has_key, apply_func);
++
++ if(has_key)
++ {
++ string_map_key_value* kv = (string_map_key_value*)(node->value);
++ apply_func(kv->key, kv->value);
++ }
++ else
++ {
++ apply_func(NULL, node->value);
++ }
++ apply_to_every_string_map_node(node->right, has_key, apply_func);
++ }
++}
++
++
++
++static void get_sorted_node_keys(long_map_node* node, unsigned long* key_list, unsigned long* next_key_index, int depth)
++{
++ if(node != NULL)
++ {
++ get_sorted_node_keys(node->left, key_list, next_key_index, depth+1);
++
++ key_list[ *next_key_index ] = node->key;
++ (*next_key_index)++;
++
++ get_sorted_node_keys(node->right, key_list, next_key_index, depth+1);
++ }
++}
++
++static void get_sorted_node_values(long_map_node* node, void** value_list, unsigned long* next_value_index, int depth)
++{
++ if(node != NULL)
++ {
++ get_sorted_node_values(node->left, value_list, next_value_index, depth+1);
++
++ value_list[ *next_value_index ] = node->value;
++ (*next_value_index)++;
++
++ get_sorted_node_values(node->right, value_list, next_value_index, depth+1);
++ }
++}
++
++
++
++/*
++ * direction = -1 indicates left subtree updated, direction = 1 for right subtree
++ * update_op = -1 indicates delete node, update_op = 1 for insert node
++ */
++static signed char rebalance (long_map_node** n, signed char direction, signed char update_op)
++{
++ /*
++ printf( "original: key = %ld, balance = %d, update_op=%d, direction=%d\n", (*n)->key, (*n)->balance, update_op, direction);
++ */
++
++ (*n)->balance = (*n)->balance + (update_op*direction);
++
++ if( (*n)->balance < -1)
++ {
++ if((*n)->left->balance < 0)
++ {
++ rotate_right(n);
++ (*n)->right->balance = 0;
++ (*n)->balance = 0;
++ }
++ else if((*n)->left->balance == 0)
++ {
++ rotate_right(n);
++ (*n)->right->balance = -1;
++ (*n)->balance = 1;
++ }
++ else if((*n)->left->balance > 0)
++ {
++ rotate_left( &((*n)->left) );
++ rotate_right(n);
++ /*
++ if( (*n)->balance < 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 1;
++ }
++ else if( (*n)->balance == 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 0;
++ }
++ else if( (*n)->balance > 0 )
++ {
++ (*n)->left->balance = -1;
++ (*n)->right->balance = 0;
++ }
++ */
++ (*n)->left->balance = (*n)->balance > 0 ? -1 : 0;
++ (*n)->right->balance = (*n)->balance < 0 ? 1 : 0;
++ (*n)->balance = 0;
++ }
++ }
++ if( (*n)->balance > 1)
++ {
++ if((*n)->right->balance > 0)
++ {
++ rotate_left(n);
++ (*n)->left->balance = 0;
++ (*n)->balance = 0;
++ }
++ else if ((*n)->right->balance == 0)
++ {
++ rotate_left(n);
++ (*n)->left->balance = 1;
++ (*n)->balance = -1;
++ }
++ else if((*n)->right->balance < 0)
++ {
++ rotate_right( &((*n)->right) );
++ rotate_left(n);
++ /*
++ if( (*n)->balance < 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 1;
++ }
++ else if( (*n)->balance == 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 0;
++ }
++ else if( (*n)->balance > 0 )
++ {
++ (*n)->left->balance = -1;
++ (*n)->right->balance = 0;
++ }
++ */
++ (*n)->left->balance = (*n)->balance > 0 ? -1 : 0;
++ (*n)->right->balance = (*n)->balance < 0 ? 1 : 0;
++ (*n)->balance = 0;
++ }
++ }
++
++ /*
++ printf( "key = %ld, balance = %d\n", (*n)->key, (*n)->balance);
++ */
++
++ return (*n)->balance;
++}
++
++
++static void rotate_right (long_map_node** parent)
++{
++ long_map_node* old_parent = *parent;
++ long_map_node* pivot = old_parent->left;
++ old_parent->left = pivot->right;
++ pivot->right = old_parent;
++
++ *parent = pivot;
++}
++
++static void rotate_left (long_map_node** parent)
++{
++ long_map_node* old_parent = *parent;
++ long_map_node* pivot = old_parent->right;
++ old_parent->right = pivot->left;
++ pivot->left = old_parent;
++
++ *parent = pivot;
++}
++
++
++
++/***************************************************************************
++ * This algorithm was created for the sdbm database library (a public-domain
++ * reimplementation of ndbm) and seems to work relatively well in
++ * scrambling bits
++ *
++ *
++ * This code was derived from code found at:
++ * http://www.cse.yorku.ca/~oz/hash.html
++ ***************************************************************************/
++static unsigned long sdbm_string_hash(const char *key)
++{
++ unsigned long hashed_key = 0;
++
++ int index = 0;
++ unsigned int nextch;
++ while(key[index] != '\0')
++ {
++ nextch = key[index];
++ hashed_key = nextch + (hashed_key << 6) + (hashed_key << 16) - hashed_key;
++ index++;
++ }
++ return hashed_key;
++}
++
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/weburl_deps/regexp.h 2015-06-19 03:02:54.745696021 +0800
+@@ -0,0 +1,41 @@
++/*
++ * Definitions etc. for regexp(3) routines.
++ *
++ * Caveat: this is V8 regexp(3) [actually, a reimplementation thereof],
++ * not the System V one.
++ */
++
++#ifndef REGEXP_H
++#define REGEXP_H
++
++
++/*
++http://www.opensource.apple.com/darwinsource/10.3/expect-1/expect/expect.h ,
++which contains a version of this library, says:
++
++ *
++ * NSUBEXP must be at least 10, and no greater than 117 or the parser
++ * will not work properly.
++ *
++
++However, it looks rather like this library is limited to 10. If you think
++otherwise, let us know.
++*/
++
++#define NSUBEXP 10
++typedef struct regexp {
++ char *startp[NSUBEXP];
++ char *endp[NSUBEXP];
++ char regstart; /* Internal use only. */
++ char reganch; /* Internal use only. */
++ char *regmust; /* Internal use only. */
++ int regmlen; /* Internal use only. */
++ char program[1]; /* Unwarranted chumminess with compiler. */
++} regexp;
++
++regexp * regcomp(char *exp, int *patternsize);
++int regexec(regexp *prog, char *string);
++void regsub(regexp *prog, char *source, char *dest);
++void regerror(char *s);
++
++#endif
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/weburl_deps/regmagic.h 2015-06-19 03:02:54.749695854 +0800
+@@ -0,0 +1,5 @@
++/*
++ * The first byte of the regexp internal "program" is actually this magic
++ * number; the start node begins in the second byte.
++ */
++#define MAGIC 0234
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/weburl_deps/regsub.c 2015-06-19 03:02:54.749695854 +0800
+@@ -0,0 +1,95 @@
++/*
++ * regsub
++ * @(#)regsub.c 1.3 of 2 April 86
++ *
++ * Copyright (c) 1986 by University of Toronto.
++ * Written by Henry Spencer. Not derived from licensed software.
++ *
++ * Permission is granted to anyone to use this software for any
++ * purpose on any computer system, and to redistribute it freely,
++ * subject to the following restrictions:
++ *
++ * 1. The author is not responsible for the consequences of use of
++ * this software, no matter how awful, even if they arise
++ * from defects in it.
++ *
++ * 2. The origin of this software must not be misrepresented, either
++ * by explicit claim or by omission.
++ *
++ * 3. Altered versions must be plainly marked as such, and must not
++ * be misrepresented as being the original software.
++ *
++ *
++ * This code was modified by Ethan Sommer to work within the kernel
++ * (it now uses kmalloc etc..)
++ *
++ */
++#include "regexp.h"
++#include "regmagic.h"
++#include <linux/string.h>
++
++
++#ifndef CHARBITS
++#define UCHARAT(p) ((int)*(unsigned char *)(p))
++#else
++#define UCHARAT(p) ((int)*(p)&CHARBITS)
++#endif
++
++#if 0
++//void regerror(char * s)
++//{
++// printk("regexp(3): %s", s);
++// /* NOTREACHED */
++//}
++#endif
++
++/*
++ - regsub - perform substitutions after a regexp match
++ */
++void
++regsub(regexp * prog, char * source, char * dest)
++{
++ register char *src;
++ register char *dst;
++ register char c;
++ register int no;
++ register int len;
++
++ /* Not necessary and gcc doesn't like it -MLS */
++ /*extern char *strncpy();*/
++
++ if (prog == NULL || source == NULL || dest == NULL) {
++ regerror("NULL parm to regsub");
++ return;
++ }
++ if (UCHARAT(prog->program) != MAGIC) {
++ regerror("damaged regexp fed to regsub");
++ return;
++ }
++
++ src = source;
++ dst = dest;
++ while ((c = *src++) != '\0') {
++ if (c == '&')
++ no = 0;
++ else if (c == '\\' && '0' <= *src && *src <= '9')
++ no = *src++ - '0';
++ else
++ no = -1;
++
++ if (no < 0) { /* Ordinary character. */
++ if (c == '\\' && (*src == '\\' || *src == '&'))
++ c = *src++;
++ *dst++ = c;
++ } else if (prog->startp[no] != NULL && prog->endp[no] != NULL) {
++ len = prog->endp[no] - prog->startp[no];
++ (void) strncpy(dst, prog->startp[no], len);
++ dst += len;
++ if (len != 0 && *(dst-1) == '\0') { /* strncpy hit NUL. */
++ regerror("damaged match string");
++ return;
++ }
++ }
++ }
++ *dst++ = '\0';
++}
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/weburl_deps/regexp.c 2015-06-19 03:02:54.749695854 +0800
+@@ -0,0 +1,1197 @@
++/*
++ * regcomp and regexec -- regsub and regerror are elsewhere
++ * @(#)regexp.c 1.3 of 18 April 87
++ *
++ * Copyright (c) 1986 by University of Toronto.
++ * Written by Henry Spencer. Not derived from licensed software.
++ *
++ * Permission is granted to anyone to use this software for any
++ * purpose on any computer system, and to redistribute it freely,
++ * subject to the following restrictions:
++ *
++ * 1. The author is not responsible for the consequences of use of
++ * this software, no matter how awful, even if they arise
++ * from defects in it.
++ *
++ * 2. The origin of this software must not be misrepresented, either
++ * by explicit claim or by omission.
++ *
++ * 3. Altered versions must be plainly marked as such, and must not
++ * be misrepresented as being the original software.
++ *
++ * Beware that some of this code is subtly aware of the way operator
++ * precedence is structured in regular expressions. Serious changes in
++ * regular-expression syntax might require a total rethink.
++ *
++ * This code was modified by Ethan Sommer to work within the kernel
++ * (it now uses kmalloc etc..)
++ *
++ * Modified slightly by Matthew Strait to use more modern C.
++ */
++
++#include "regexp.h"
++#include "regmagic.h"
++
++/* added by ethan and matt. Lets it work in both kernel and user space.
++(So iptables can use it, for instance.) Yea, it goes both ways... */
++#if __KERNEL__
++ #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
++#else
++ #define printk(format,args...) printf(format,##args)
++#endif
++
++void regerror(char * s)
++{
++ printk("<3>Regexp: %s\n", s);
++ /* NOTREACHED */
++}
++
++/*
++ * The "internal use only" fields in regexp.h are present to pass info from
++ * compile to execute that permits the execute phase to run lots faster on
++ * simple cases. They are:
++ *
++ * regstart char that must begin a match; '\0' if none obvious
++ * reganch is the match anchored (at beginning-of-line only)?
++ * regmust string (pointer into program) that match must include, or NULL
++ * regmlen length of regmust string
++ *
++ * Regstart and reganch permit very fast decisions on suitable starting points
++ * for a match, cutting down the work a lot. Regmust permits fast rejection
++ * of lines that cannot possibly match. The regmust tests are costly enough
++ * that regcomp() supplies a regmust only if the r.e. contains something
++ * potentially expensive (at present, the only such thing detected is * or +
++ * at the start of the r.e., which can involve a lot of backup). Regmlen is
++ * supplied because the test in regexec() needs it and regcomp() is computing
++ * it anyway.
++ */
++
++/*
++ * Structure for regexp "program". This is essentially a linear encoding
++ * of a nondeterministic finite-state machine (aka syntax charts or
++ * "railroad normal form" in parsing technology). Each node is an opcode
++ * plus a "next" pointer, possibly plus an operand. "Next" pointers of
++ * all nodes except BRANCH implement concatenation; a "next" pointer with
++ * a BRANCH on both ends of it is connecting two alternatives. (Here we
++ * have one of the subtle syntax dependencies: an individual BRANCH (as
++ * opposed to a collection of them) is never concatenated with anything
++ * because of operator precedence.) The operand of some types of node is
++ * a literal string; for others, it is a node leading into a sub-FSM. In
++ * particular, the operand of a BRANCH node is the first node of the branch.
++ * (NB this is *not* a tree structure: the tail of the branch connects
++ * to the thing following the set of BRANCHes.) The opcodes are:
++ */
++
++/* definition number opnd? meaning */
++#define END 0 /* no End of program. */
++#define BOL 1 /* no Match "" at beginning of line. */
++#define EOL 2 /* no Match "" at end of line. */
++#define ANY 3 /* no Match any one character. */
++#define ANYOF 4 /* str Match any character in this string. */
++#define ANYBUT 5 /* str Match any character not in this string. */
++#define BRANCH 6 /* node Match this alternative, or the next... */
++#define BACK 7 /* no Match "", "next" ptr points backward. */
++#define EXACTLY 8 /* str Match this string. */
++#define NOTHING 9 /* no Match empty string. */
++#define STAR 10 /* node Match this (simple) thing 0 or more times. */
++#define PLUS 11 /* node Match this (simple) thing 1 or more times. */
++#define OPEN 20 /* no Mark this point in input as start of #n. */
++ /* OPEN+1 is number 1, etc. */
++#define CLOSE 30 /* no Analogous to OPEN. */
++
++/*
++ * Opcode notes:
++ *
++ * BRANCH The set of branches constituting a single choice are hooked
++ * together with their "next" pointers, since precedence prevents
++ * anything being concatenated to any individual branch. The
++ * "next" pointer of the last BRANCH in a choice points to the
++ * thing following the whole choice. This is also where the
++ * final "next" pointer of each individual branch points; each
++ * branch starts with the operand node of a BRANCH node.
++ *
++ * BACK Normal "next" pointers all implicitly point forward; BACK
++ * exists to make loop structures possible.
++ *
++ * STAR,PLUS '?', and complex '*' and '+', are implemented as circular
++ * BRANCH structures using BACK. Simple cases (one character
++ * per match) are implemented with STAR and PLUS for speed
++ * and to minimize recursive plunges.
++ *
++ * OPEN,CLOSE ...are numbered at compile time.
++ */
++
++/*
++ * A node is one char of opcode followed by two chars of "next" pointer.
++ * "Next" pointers are stored as two 8-bit pieces, high order first. The
++ * value is a positive offset from the opcode of the node containing it.
++ * An operand, if any, simply follows the node. (Note that much of the
++ * code generation knows about this implicit relationship.)
++ *
++ * Using two bytes for the "next" pointer is vast overkill for most things,
++ * but allows patterns to get big without disasters.
++ */
++#define OP(p) (*(p))
++#define NEXT(p) (((*((p)+1)&0377)<<8) + (*((p)+2)&0377))
++#define OPERAND(p) ((p) + 3)
++
++/*
++ * See regmagic.h for one further detail of program structure.
++ */
++
++
++/*
++ * Utility definitions.
++ */
++#ifndef CHARBITS
++#define UCHARAT(p) ((int)*(unsigned char *)(p))
++#else
++#define UCHARAT(p) ((int)*(p)&CHARBITS)
++#endif
++
++#define FAIL(m) { regerror(m); return(NULL); }
++#define ISMULT(c) ((c) == '*' || (c) == '+' || (c) == '?')
++#define META "^$.[()|?+*\\"
++
++/*
++ * Flags to be passed up and down.
++ */
++#define HASWIDTH 01 /* Known never to match null string. */
++#define SIMPLE 02 /* Simple enough to be STAR/PLUS operand. */
++#define SPSTART 04 /* Starts with * or +. */
++#define WORST 0 /* Worst case. */
++
++/*
++ * Global work variables for regcomp().
++ */
++struct match_globals {
++char *reginput; /* String-input pointer. */
++char *regbol; /* Beginning of input, for ^ check. */
++char **regstartp; /* Pointer to startp array. */
++char **regendp; /* Ditto for endp. */
++char *regparse; /* Input-scan pointer. */
++int regnpar; /* () count. */
++char regdummy;
++char *regcode; /* Code-emit pointer; &regdummy = don't. */
++long regsize; /* Code size. */
++};
++
++/*
++ * Forward declarations for regcomp()'s friends.
++ */
++#ifndef STATIC
++#define STATIC static
++#endif
++STATIC char *reg(struct match_globals *g, int paren,int *flagp);
++STATIC char *regbranch(struct match_globals *g, int *flagp);
++STATIC char *regpiece(struct match_globals *g, int *flagp);
++STATIC char *regatom(struct match_globals *g, int *flagp);
++STATIC char *regnode(struct match_globals *g, char op);
++STATIC char *regnext(struct match_globals *g, char *p);
++STATIC void regc(struct match_globals *g, char b);
++STATIC void reginsert(struct match_globals *g, char op, char *opnd);
++STATIC void regtail(struct match_globals *g, char *p, char *val);
++STATIC void regoptail(struct match_globals *g, char *p, char *val);
++
++
++__kernel_size_t my_strcspn(const char *s1,const char *s2)
++{
++ char *scan1;
++ char *scan2;
++ int count;
++
++ count = 0;
++ for (scan1 = (char *)s1; *scan1 != '\0'; scan1++) {
++ for (scan2 = (char *)s2; *scan2 != '\0';) /* ++ moved down. */
++ if (*scan1 == *scan2++)
++ return(count);
++ count++;
++ }
++ return(count);
++}
++
++/*
++ - regcomp - compile a regular expression into internal code
++ *
++ * We can't allocate space until we know how big the compiled form will be,
++ * but we can't compile it (and thus know how big it is) until we've got a
++ * place to put the code. So we cheat: we compile it twice, once with code
++ * generation turned off and size counting turned on, and once "for real".
++ * This also means that we don't allocate space until we are sure that the
++ * thing really will compile successfully, and we never have to move the
++ * code and thus invalidate pointers into it. (Note that it has to be in
++ * one piece because free() must be able to free it all.)
++ *
++ * Beware that the optimization-preparation code in here knows about some
++ * of the structure of the compiled regexp.
++ */
++regexp *
++regcomp(char *exp,int *patternsize)
++{
++ register regexp *r;
++ register char *scan;
++ register char *longest;
++ register int len;
++ int flags;
++ struct match_globals g;
++
++ /* commented out by ethan
++ extern char *malloc();
++ */
++
++ if (exp == NULL)
++ FAIL("NULL argument");
++
++ /* First pass: determine size, legality. */
++ g.regparse = exp;
++ g.regnpar = 1;
++ g.regsize = 0L;
++ g.regcode = &g.regdummy;
++ regc(&g, MAGIC);
++ if (reg(&g, 0, &flags) == NULL)
++ return(NULL);
++
++ /* Small enough for pointer-storage convention? */
++ if (g.regsize >= 32767L) /* Probably could be 65535L. */
++ FAIL("regexp too big");
++
++ /* Allocate space. */
++ *patternsize=sizeof(regexp) + (unsigned)g.regsize;
++ r = (regexp *)malloc(sizeof(regexp) + (unsigned)g.regsize);
++ if (r == NULL)
++ FAIL("out of space");
++
++ /* Second pass: emit code. */
++ g.regparse = exp;
++ g.regnpar = 1;
++ g.regcode = r->program;
++ regc(&g, MAGIC);
++ if (reg(&g, 0, &flags) == NULL)
++ return(NULL);
++
++ /* Dig out information for optimizations. */
++ r->regstart = '\0'; /* Worst-case defaults. */
++ r->reganch = 0;
++ r->regmust = NULL;
++ r->regmlen = 0;
++ scan = r->program+1; /* First BRANCH. */
++ if (OP(regnext(&g, scan)) == END) { /* Only one top-level choice. */
++ scan = OPERAND(scan);
++
++ /* Starting-point info. */
++ if (OP(scan) == EXACTLY)
++ r->regstart = *OPERAND(scan);
++ else if (OP(scan) == BOL)
++ r->reganch++;
++
++ /*
++ * If there's something expensive in the r.e., find the
++ * longest literal string that must appear and make it the
++ * regmust. Resolve ties in favor of later strings, since
++ * the regstart check works with the beginning of the r.e.
++ * and avoiding duplication strengthens checking. Not a
++ * strong reason, but sufficient in the absence of others.
++ */
++ if (flags&SPSTART) {
++ longest = NULL;
++ len = 0;
++ for (; scan != NULL; scan = regnext(&g, scan))
++ if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) {
++ longest = OPERAND(scan);
++ len = strlen(OPERAND(scan));
++ }
++ r->regmust = longest;
++ r->regmlen = len;
++ }
++ }
++
++ return(r);
++}
++
++/*
++ - reg - regular expression, i.e. main body or parenthesized thing
++ *
++ * Caller must absorb opening parenthesis.
++ *
++ * Combining parenthesis handling with the base level of regular expression
++ * is a trifle forced, but the need to tie the tails of the branches to what
++ * follows makes it hard to avoid.
++ */
++static char *
++reg(struct match_globals *g, int paren, int *flagp /* Parenthesized? */ )
++{
++ register char *ret;
++ register char *br;
++ register char *ender;
++ register int parno = 0; /* 0 makes gcc happy */
++ int flags;
++
++ *flagp = HASWIDTH; /* Tentatively. */
++
++ /* Make an OPEN node, if parenthesized. */
++ if (paren) {
++ if (g->regnpar >= NSUBEXP)
++ FAIL("too many ()");
++ parno = g->regnpar;
++ g->regnpar++;
++ ret = regnode(g, OPEN+parno);
++ } else
++ ret = NULL;
++
++ /* Pick up the branches, linking them together. */
++ br = regbranch(g, &flags);
++ if (br == NULL)
++ return(NULL);
++ if (ret != NULL)
++ regtail(g, ret, br); /* OPEN -> first. */
++ else
++ ret = br;
++ if (!(flags&HASWIDTH))
++ *flagp &= ~HASWIDTH;
++ *flagp |= flags&SPSTART;
++ while (*g->regparse == '|') {
++ g->regparse++;
++ br = regbranch(g, &flags);
++ if (br == NULL)
++ return(NULL);
++ regtail(g, ret, br); /* BRANCH -> BRANCH. */
++ if (!(flags&HASWIDTH))
++ *flagp &= ~HASWIDTH;
++ *flagp |= flags&SPSTART;
++ }
++
++ /* Make a closing node, and hook it on the end. */
++ ender = regnode(g, (paren) ? CLOSE+parno : END);
++ regtail(g, ret, ender);
++
++ /* Hook the tails of the branches to the closing node. */
++ for (br = ret; br != NULL; br = regnext(g, br))
++ regoptail(g, br, ender);
++
++ /* Check for proper termination. */
++ if (paren && *g->regparse++ != ')') {
++ FAIL("unmatched ()");
++ } else if (!paren && *g->regparse != '\0') {
++ if (*g->regparse == ')') {
++ FAIL("unmatched ()");
++ } else
++ FAIL("junk on end"); /* "Can't happen". */
++ /* NOTREACHED */
++ }
++
++ return(ret);
++}
++
++/*
++ - regbranch - one alternative of an | operator
++ *
++ * Implements the concatenation operator.
++ */
++static char *
++regbranch(struct match_globals *g, int *flagp)
++{
++ register char *ret;
++ register char *chain;
++ register char *latest;
++ int flags;
++
++ *flagp = WORST; /* Tentatively. */
++
++ ret = regnode(g, BRANCH);
++ chain = NULL;
++ while (*g->regparse != '\0' && *g->regparse != '|' && *g->regparse != ')') {
++ latest = regpiece(g, &flags);
++ if (latest == NULL)
++ return(NULL);
++ *flagp |= flags&HASWIDTH;
++ if (chain == NULL) /* First piece. */
++ *flagp |= flags&SPSTART;
++ else
++ regtail(g, chain, latest);
++ chain = latest;
++ }
++ if (chain == NULL) /* Loop ran zero times. */
++ (void) regnode(g, NOTHING);
++
++ return(ret);
++}
++
++/*
++ - regpiece - something followed by possible [*+?]
++ *
++ * Note that the branching code sequences used for ? and the general cases
++ * of * and + are somewhat optimized: they use the same NOTHING node as
++ * both the endmarker for their branch list and the body of the last branch.
++ * It might seem that this node could be dispensed with entirely, but the
++ * endmarker role is not redundant.
++ */
++static char *
++regpiece(struct match_globals *g, int *flagp)
++{
++ register char *ret;
++ register char op;
++ register char *next;
++ int flags;
++
++ ret = regatom(g, &flags);
++ if (ret == NULL)
++ return(NULL);
++
++ op = *g->regparse;
++ if (!ISMULT(op)) {
++ *flagp = flags;
++ return(ret);
++ }
++
++ if (!(flags&HASWIDTH) && op != '?')
++ FAIL("*+ operand could be empty");
++ *flagp = (op != '+') ? (WORST|SPSTART) : (WORST|HASWIDTH);
++
++ if (op == '*' && (flags&SIMPLE))
++ reginsert(g, STAR, ret);
++ else if (op == '*') {
++ /* Emit x* as (x&|), where & means "self". */
++ reginsert(g, BRANCH, ret); /* Either x */
++ regoptail(g, ret, regnode(g, BACK)); /* and loop */
++ regoptail(g, ret, ret); /* back */
++ regtail(g, ret, regnode(g, BRANCH)); /* or */
++ regtail(g, ret, regnode(g, NOTHING)); /* null. */
++ } else if (op == '+' && (flags&SIMPLE))
++ reginsert(g, PLUS, ret);
++ else if (op == '+') {
++ /* Emit x+ as x(&|), where & means "self". */
++ next = regnode(g, BRANCH); /* Either */
++ regtail(g, ret, next);
++ regtail(g, regnode(g, BACK), ret); /* loop back */
++ regtail(g, next, regnode(g, BRANCH)); /* or */
++ regtail(g, ret, regnode(g, NOTHING)); /* null. */
++ } else if (op == '?') {
++ /* Emit x? as (x|) */
++ reginsert(g, BRANCH, ret); /* Either x */
++ regtail(g, ret, regnode(g, BRANCH)); /* or */
++ next = regnode(g, NOTHING); /* null. */
++ regtail(g, ret, next);
++ regoptail(g, ret, next);
++ }
++ g->regparse++;
++ if (ISMULT(*g->regparse))
++ FAIL("nested *?+");
++
++ return(ret);
++}
++
++/*
++ - regatom - the lowest level
++ *
++ * Optimization: gobbles an entire sequence of ordinary characters so that
++ * it can turn them into a single node, which is smaller to store and
++ * faster to run. Backslashed characters are exceptions, each becoming a
++ * separate node; the code is simpler that way and it's not worth fixing.
++ */
++static char *
++regatom(struct match_globals *g, int *flagp)
++{
++ register char *ret;
++ int flags;
++
++ *flagp = WORST; /* Tentatively. */
++
++ switch (*g->regparse++) {
++ case '^':
++ ret = regnode(g, BOL);
++ break;
++ case '$':
++ ret = regnode(g, EOL);
++ break;
++ case '.':
++ ret = regnode(g, ANY);
++ *flagp |= HASWIDTH|SIMPLE;
++ break;
++ case '[': {
++ register int class;
++ register int classend;
++
++ if (*g->regparse == '^') { /* Complement of range. */
++ ret = regnode(g, ANYBUT);
++ g->regparse++;
++ } else
++ ret = regnode(g, ANYOF);
++ if (*g->regparse == ']' || *g->regparse == '-')
++ regc(g, *g->regparse++);
++ while (*g->regparse != '\0' && *g->regparse != ']') {
++ if (*g->regparse == '-') {
++ g->regparse++;
++ if (*g->regparse == ']' || *g->regparse == '\0')
++ regc(g, '-');
++ else {
++ class = UCHARAT(g->regparse-2)+1;
++ classend = UCHARAT(g->regparse);
++ if (class > classend+1)
++ FAIL("invalid [] range");
++ for (; class <= classend; class++)
++ regc(g, class);
++ g->regparse++;
++ }
++ } else
++ regc(g, *g->regparse++);
++ }
++ regc(g, '\0');
++ if (*g->regparse != ']')
++ FAIL("unmatched []");
++ g->regparse++;
++ *flagp |= HASWIDTH|SIMPLE;
++ }
++ break;
++ case '(':
++ ret = reg(g, 1, &flags);
++ if (ret == NULL)
++ return(NULL);
++ *flagp |= flags&(HASWIDTH|SPSTART);
++ break;
++ case '\0':
++ case '|':
++ case ')':
++ FAIL("internal urp"); /* Supposed to be caught earlier. */
++ break;
++ case '?':
++ case '+':
++ case '*':
++ FAIL("?+* follows nothing");
++ break;
++ case '\\':
++ if (*g->regparse == '\0')
++ FAIL("trailing \\");
++ ret = regnode(g, EXACTLY);
++ regc(g, *g->regparse++);
++ regc(g, '\0');
++ *flagp |= HASWIDTH|SIMPLE;
++ break;
++ default: {
++ register int len;
++ register char ender;
++
++ g->regparse--;
++ len = my_strcspn((const char *)g->regparse, (const char *)META);
++ if (len <= 0)
++ FAIL("internal disaster");
++ ender = *(g->regparse+len);
++ if (len > 1 && ISMULT(ender))
++ len--; /* Back off clear of ?+* operand. */
++ *flagp |= HASWIDTH;
++ if (len == 1)
++ *flagp |= SIMPLE;
++ ret = regnode(g, EXACTLY);
++ while (len > 0) {
++ regc(g, *g->regparse++);
++ len--;
++ }
++ regc(g, '\0');
++ }
++ break;
++ }
++
++ return(ret);
++}
++
++/*
++ - regnode - emit a node
++ */
++static char * /* Location. */
++regnode(struct match_globals *g, char op)
++{
++ register char *ret;
++ register char *ptr;
++
++ ret = g->regcode;
++ if (ret == &g->regdummy) {
++ g->regsize += 3;
++ return(ret);
++ }
++
++ ptr = ret;
++ *ptr++ = op;
++ *ptr++ = '\0'; /* Null "next" pointer. */
++ *ptr++ = '\0';
++ g->regcode = ptr;
++
++ return(ret);
++}
++
++/*
++ - regc - emit (if appropriate) a byte of code
++ */
++static void
++regc(struct match_globals *g, char b)
++{
++ if (g->regcode != &g->regdummy)
++ *g->regcode++ = b;
++ else
++ g->regsize++;
++}
++
++/*
++ - reginsert - insert an operator in front of already-emitted operand
++ *
++ * Means relocating the operand.
++ */
++static void
++reginsert(struct match_globals *g, char op, char* opnd)
++{
++ register char *src;
++ register char *dst;
++ register char *place;
++
++ if (g->regcode == &g->regdummy) {
++ g->regsize += 3;
++ return;
++ }
++
++ src = g->regcode;
++ g->regcode += 3;
++ dst = g->regcode;
++ while (src > opnd)
++ *--dst = *--src;
++
++ place = opnd; /* Op node, where operand used to be. */
++ *place++ = op;
++ *place++ = '\0';
++ *place++ = '\0';
++}
++
++/*
++ - regtail - set the next-pointer at the end of a node chain
++ */
++static void
++regtail(struct match_globals *g, char *p, char *val)
++{
++ register char *scan;
++ register char *temp;
++ register int offset;
++
++ if (p == &g->regdummy)
++ return;
++
++ /* Find last node. */
++ scan = p;
++ for (;;) {
++ temp = regnext(g, scan);
++ if (temp == NULL)
++ break;
++ scan = temp;
++ }
++
++ if (OP(scan) == BACK)
++ offset = scan - val;
++ else
++ offset = val - scan;
++ *(scan+1) = (offset>>8)&0377;
++ *(scan+2) = offset&0377;
++}
++
++/*
++ - regoptail - regtail on operand of first argument; nop if operandless
++ */
++static void
++regoptail(struct match_globals *g, char *p, char *val)
++{
++ /* "Operandless" and "op != BRANCH" are synonymous in practice. */
++ if (p == NULL || p == &g->regdummy || OP(p) != BRANCH)
++ return;
++ regtail(g, OPERAND(p), val);
++}
++
++/*
++ * regexec and friends
++ */
++
++
++/*
++ * Forwards.
++ */
++STATIC int regtry(struct match_globals *g, regexp *prog, char *string);
++STATIC int regmatch(struct match_globals *g, char *prog);
++STATIC int regrepeat(struct match_globals *g, char *p);
++
++#ifdef DEBUG
++int regnarrate = 0;
++void regdump();
++STATIC char *regprop(char *op);
++#endif
++
++/*
++ - regexec - match a regexp against a string
++ */
++int
++regexec(regexp *prog, char *string)
++{
++ register char *s;
++ struct match_globals g;
++
++ /* Be paranoid... */
++ if (prog == NULL || string == NULL) {
++ printk("<3>Regexp: NULL parameter\n");
++ return(0);
++ }
++
++ /* Check validity of program. */
++ if (UCHARAT(prog->program) != MAGIC) {
++ printk("<3>Regexp: corrupted program\n");
++ return(0);
++ }
++
++ /* If there is a "must appear" string, look for it. */
++ if (prog->regmust != NULL) {
++ s = string;
++ while ((s = strchr(s, prog->regmust[0])) != NULL) {
++ if (strncmp(s, prog->regmust, prog->regmlen) == 0)
++ break; /* Found it. */
++ s++;
++ }
++ if (s == NULL) /* Not present. */
++ return(0);
++ }
++
++ /* Mark beginning of line for ^ . */
++ g.regbol = string;
++
++ /* Simplest case: anchored match need be tried only once. */
++ if (prog->reganch)
++ return(regtry(&g, prog, string));
++
++ /* Messy cases: unanchored match. */
++ s = string;
++ if (prog->regstart != '\0')
++ /* We know what char it must start with. */
++ while ((s = strchr(s, prog->regstart)) != NULL) {
++ if (regtry(&g, prog, s))
++ return(1);
++ s++;
++ }
++ else
++ /* We don't -- general case. */
++ do {
++ if (regtry(&g, prog, s))
++ return(1);
++ } while (*s++ != '\0');
++
++ /* Failure. */
++ return(0);
++}
++
++/*
++ - regtry - try match at specific point
++ */
++static int /* 0 failure, 1 success */
++regtry(struct match_globals *g, regexp *prog, char *string)
++{
++ register int i;
++ register char **sp;
++ register char **ep;
++
++ g->reginput = string;
++ g->regstartp = prog->startp;
++ g->regendp = prog->endp;
++
++ sp = prog->startp;
++ ep = prog->endp;
++ for (i = NSUBEXP; i > 0; i--) {
++ *sp++ = NULL;
++ *ep++ = NULL;
++ }
++ if (regmatch(g, prog->program + 1)) {
++ prog->startp[0] = string;
++ prog->endp[0] = g->reginput;
++ return(1);
++ } else
++ return(0);
++}
++
++/*
++ - regmatch - main matching routine
++ *
++ * Conceptually the strategy is simple: check to see whether the current
++ * node matches, call self recursively to see whether the rest matches,
++ * and then act accordingly. In practice we make some effort to avoid
++ * recursion, in particular by going through "ordinary" nodes (that don't
++ * need to know whether the rest of the match failed) by a loop instead of
++ * by recursion.
++ */
++static int /* 0 failure, 1 success */
++regmatch(struct match_globals *g, char *prog)
++{
++ register char *scan = prog; /* Current node. */
++ char *next; /* Next node. */
++
++#ifdef DEBUG
++ if (scan != NULL && regnarrate)
++ fprintf(stderr, "%s(\n", regprop(scan));
++#endif
++ while (scan != NULL) {
++#ifdef DEBUG
++ if (regnarrate)
++ fprintf(stderr, "%s...\n", regprop(scan));
++#endif
++ next = regnext(g, scan);
++
++ switch (OP(scan)) {
++ case BOL:
++ if (g->reginput != g->regbol)
++ return(0);
++ break;
++ case EOL:
++ if (*g->reginput != '\0')
++ return(0);
++ break;
++ case ANY:
++ if (*g->reginput == '\0')
++ return(0);
++ g->reginput++;
++ break;
++ case EXACTLY: {
++ register int len;
++ register char *opnd;
++
++ opnd = OPERAND(scan);
++ /* Inline the first character, for speed. */
++ if (*opnd != *g->reginput)
++ return(0);
++ len = strlen(opnd);
++ if (len > 1 && strncmp(opnd, g->reginput, len) != 0)
++ return(0);
++ g->reginput += len;
++ }
++ break;
++ case ANYOF:
++ if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) == NULL)
++ return(0);
++ g->reginput++;
++ break;
++ case ANYBUT:
++ if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) != NULL)
++ return(0);
++ g->reginput++;
++ break;
++ case NOTHING:
++ case BACK:
++ break;
++ case OPEN+1:
++ case OPEN+2:
++ case OPEN+3:
++ case OPEN+4:
++ case OPEN+5:
++ case OPEN+6:
++ case OPEN+7:
++ case OPEN+8:
++ case OPEN+9: {
++ register int no;
++ register char *save;
++
++ no = OP(scan) - OPEN;
++ save = g->reginput;
++
++ if (regmatch(g, next)) {
++ /*
++ * Don't set startp if some later
++ * invocation of the same parentheses
++ * already has.
++ */
++ if (g->regstartp[no] == NULL)
++ g->regstartp[no] = save;
++ return(1);
++ } else
++ return(0);
++ }
++ break;
++ case CLOSE+1:
++ case CLOSE+2:
++ case CLOSE+3:
++ case CLOSE+4:
++ case CLOSE+5:
++ case CLOSE+6:
++ case CLOSE+7:
++ case CLOSE+8:
++ case CLOSE+9:
++ {
++ register int no;
++ register char *save;
++
++ no = OP(scan) - CLOSE;
++ save = g->reginput;
++
++ if (regmatch(g, next)) {
++ /*
++ * Don't set endp if some later
++ * invocation of the same parentheses
++ * already has.
++ */
++ if (g->regendp[no] == NULL)
++ g->regendp[no] = save;
++ return(1);
++ } else
++ return(0);
++ }
++ break;
++ case BRANCH: {
++ register char *save;
++
++ if (OP(next) != BRANCH) /* No choice. */
++ next = OPERAND(scan); /* Avoid recursion. */
++ else {
++ do {
++ save = g->reginput;
++ if (regmatch(g, OPERAND(scan)))
++ return(1);
++ g->reginput = save;
++ scan = regnext(g, scan);
++ } while (scan != NULL && OP(scan) == BRANCH);
++ return(0);
++ /* NOTREACHED */
++ }
++ }
++ break;
++ case STAR:
++ case PLUS: {
++ register char nextch;
++ register int no;
++ register char *save;
++ register int min;
++
++ /*
++ * Lookahead to avoid useless match attempts
++ * when we know what character comes next.
++ */
++ nextch = '\0';
++ if (OP(next) == EXACTLY)
++ nextch = *OPERAND(next);
++ min = (OP(scan) == STAR) ? 0 : 1;
++ save = g->reginput;
++ no = regrepeat(g, OPERAND(scan));
++ while (no >= min) {
++ /* If it could work, try it. */
++ if (nextch == '\0' || *g->reginput == nextch)
++ if (regmatch(g, next))
++ return(1);
++ /* Couldn't or didn't -- back up. */
++ no--;
++ g->reginput = save + no;
++ }
++ return(0);
++ }
++ break;
++ case END:
++ return(1); /* Success! */
++ break;
++ default:
++ printk("<3>Regexp: memory corruption\n");
++ return(0);
++ break;
++ }
++
++ scan = next;
++ }
++
++ /*
++ * We get here only if there's trouble -- normally "case END" is
++ * the terminating point.
++ */
++ printk("<3>Regexp: corrupted pointers\n");
++ return(0);
++}
++
++/*
++ - regrepeat - repeatedly match something simple, report how many
++ */
++static int
++regrepeat(struct match_globals *g, char *p)
++{
++ register int count = 0;
++ register char *scan;
++ register char *opnd;
++
++ scan = g->reginput;
++ opnd = OPERAND(p);
++ switch (OP(p)) {
++ case ANY:
++ count = strlen(scan);
++ scan += count;
++ break;
++ case EXACTLY:
++ while (*opnd == *scan) {
++ count++;
++ scan++;
++ }
++ break;
++ case ANYOF:
++ while (*scan != '\0' && strchr(opnd, *scan) != NULL) {
++ count++;
++ scan++;
++ }
++ break;
++ case ANYBUT:
++ while (*scan != '\0' && strchr(opnd, *scan) == NULL) {
++ count++;
++ scan++;
++ }
++ break;
++ default: /* Oh dear. Called inappropriately. */
++ printk("<3>Regexp: internal foulup\n");
++ count = 0; /* Best compromise. */
++ break;
++ }
++ g->reginput = scan;
++
++ return(count);
++}
++
++/*
++ - regnext - dig the "next" pointer out of a node
++ */
++static char*
++regnext(struct match_globals *g, char *p)
++{
++ register int offset;
++
++ if (p == &g->regdummy)
++ return(NULL);
++
++ offset = NEXT(p);
++ if (offset == 0)
++ return(NULL);
++
++ if (OP(p) == BACK)
++ return(p-offset);
++ else
++ return(p+offset);
++}
++
++#ifdef DEBUG
++
++STATIC char *regprop();
++
++/*
++ - regdump - dump a regexp onto stdout in vaguely comprehensible form
++ */
++void
++regdump(regexp *r)
++{
++ register char *s;
++ register char op = EXACTLY; /* Arbitrary non-END op. */
++ register char *next;
++ /* extern char *strchr(); */
++
++
++ s = r->program + 1;
++ while (op != END) { /* While that wasn't END last time... */
++ op = OP(s);
++ printf("%2d%s", s-r->program, regprop(s)); /* Where, what. */
++ next = regnext(s);
++ if (next == NULL) /* Next ptr. */
++ printf("(0)");
++ else
++ printf("(%d)", (s-r->program)+(next-s));
++ s += 3;
++ if (op == ANYOF || op == ANYBUT || op == EXACTLY) {
++ /* Literal string, where present. */
++ while (*s != '\0') {
++ putchar(*s);
++ s++;
++ }
++ s++;
++ }
++ putchar('\n');
++ }
++
++ /* Header fields of interest. */
++ if (r->regstart != '\0')
++ printf("start `%c' ", r->regstart);
++ if (r->reganch)
++ printf("anchored ");
++ if (r->regmust != NULL)
++ printf("must have \"%s\"", r->regmust);
++ printf("\n");
++}
++
++/*
++ - regprop - printable representation of opcode
++ */
++static char *
++regprop(char *op)
++{
++#define BUFLEN 50
++ register char *p;
++ static char buf[BUFLEN];
++
++ strcpy(buf, ":");
++
++ switch (OP(op)) {
++ case BOL:
++ p = "BOL";
++ break;
++ case EOL:
++ p = "EOL";
++ break;
++ case ANY:
++ p = "ANY";
++ break;
++ case ANYOF:
++ p = "ANYOF";
++ break;
++ case ANYBUT:
++ p = "ANYBUT";
++ break;
++ case BRANCH:
++ p = "BRANCH";
++ break;
++ case EXACTLY:
++ p = "EXACTLY";
++ break;
++ case NOTHING:
++ p = "NOTHING";
++ break;
++ case BACK:
++ p = "BACK";
++ break;
++ case END:
++ p = "END";
++ break;
++ case OPEN+1:
++ case OPEN+2:
++ case OPEN+3:
++ case OPEN+4:
++ case OPEN+5:
++ case OPEN+6:
++ case OPEN+7:
++ case OPEN+8:
++ case OPEN+9:
++ snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "OPEN%d", OP(op)-OPEN);
++ p = NULL;
++ break;
++ case CLOSE+1:
++ case CLOSE+2:
++ case CLOSE+3:
++ case CLOSE+4:
++ case CLOSE+5:
++ case CLOSE+6:
++ case CLOSE+7:
++ case CLOSE+8:
++ case CLOSE+9:
++ snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "CLOSE%d", OP(op)-CLOSE);
++ p = NULL;
++ break;
++ case STAR:
++ p = "STAR";
++ break;
++ case PLUS:
++ p = "PLUS";
++ break;
++ default:
++ printk("<3>Regexp: corrupted opcode\n");
++ break;
++ }
++ if (p != NULL)
++ strncat(buf, p, BUFLEN-strlen(buf));
++ return(buf);
++}
++#endif
++
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/webmon_deps/tree_map.h 2015-06-19 03:02:55.169678310 +0800
+@@ -0,0 +1,1084 @@
++/*
++ * Copyright © 2008 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This work 'as-is' we provide.
++ * No warranty, express or implied.
++ * We've done our best,
++ * to debug and test.
++ * Liability for damages denied.
++ *
++ * Permission is granted hereby,
++ * to copy, share, and modify.
++ * Use as is fit,
++ * free or for profit.
++ * On this notice these rights rely.
++ *
++ *
++ *
++ * Note that unlike other portions of Gargoyle this code
++ * does not fall under the GPL, but the rather whimsical
++ * 'Poetic License' above.
++ *
++ * Basically, this library contains a bunch of utilities
++ * that I find useful. I'm sure other libraries exist
++ * that are just as good or better, but I like these tools
++ * because I personally wrote them, so I know their quirks.
++ * (i.e. I know where the bodies are buried). I want to
++ * make sure that I can re-use these utilities for whatever
++ * code I may want to write in the future be it
++ * proprietary or open-source, so I've put them under
++ * a very, very permissive license.
++ *
++ * If you find this code useful, use it. If not, don't.
++ * I really don't care.
++ *
++ */
++
++
++#if __KERNEL__
++ #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
++ #define free(foo) kfree(foo)
++ #define printf(format,args...) printk(format,##args)
++
++ /* kernel strdup */
++ static inline char *kernel_strdup(const char *str);
++ static inline char *kernel_strdup(const char *str)
++ {
++ char *tmp;
++ long int s;
++ s=strlen(str) + 1;
++ tmp = kmalloc(s, GFP_ATOMIC);
++ if (tmp != NULL)
++ {
++ memcpy(tmp, str, s);
++ }
++ return tmp;
++ }
++ #define strdup kernel_strdup
++
++#endif
++
++
++
++/* tree_map structs / prototypes */
++typedef struct long_tree_map_node
++{
++ unsigned long key;
++ void* value;
++
++ signed char balance;
++ struct long_tree_map_node* left;
++ struct long_tree_map_node* right;
++} long_map_node;
++
++typedef struct
++{
++ long_map_node* root;
++ unsigned long num_elements;
++
++}long_map;
++
++typedef struct
++{
++ long_map lm;
++ unsigned char store_keys;
++ unsigned long num_elements;
++
++}string_map;
++
++
++
++/* long map functions */
++long_map* initialize_long_map(void);
++void* get_long_map_element(long_map* map, unsigned long key);
++void* get_smallest_long_map_element(long_map* map, unsigned long* smallest_key);
++void* get_largest_long_map_element(long_map* map, unsigned long* largest_key);
++void* remove_smallest_long_map_element(long_map* map, unsigned long* smallest_key);
++void* remove_largest_long_map_element(long_map* map, unsigned long* largest_key);
++void* set_long_map_element(long_map* map, unsigned long key, void* value);
++void* remove_long_map_element(long_map* map, unsigned long key);
++unsigned long* get_sorted_long_map_keys(long_map* map, unsigned long* num_keys_returned);
++void** get_sorted_long_map_values(long_map* map, unsigned long* num_values_returned);
++void** destroy_long_map(long_map* map, int destruction_type, unsigned long* num_destroyed);
++void apply_to_every_long_map_value(long_map* map, void (*apply_func)(unsigned long key, void* value));
++
++/* string map functions */
++string_map* initialize_string_map(unsigned char store_keys);
++void* get_string_map_element(string_map* map, const char* key);
++void* set_string_map_element(string_map* map, const char* key, void* value);
++void* remove_string_map_element(string_map* map, const char* key);
++char** get_string_map_keys(string_map* map, unsigned long* num_keys_returned);
++void** get_string_map_values(string_map* map, unsigned long* num_values_returned);
++void** destroy_string_map(string_map* map, int destruction_type, unsigned long* num_destroyed);
++void apply_to_every_string_map_value(string_map* map, void (*apply_func)(char* key, void* value));
++
++
++/*
++ * three different ways to deal with values when data structure is destroyed
++ */
++#define DESTROY_MODE_RETURN_VALUES 20
++#define DESTROY_MODE_FREE_VALUES 21
++#define DESTROY_MODE_IGNORE_VALUES 22
++
++
++/*
++ * for convenience & backwards compatibility alias _string_map_ functions to
++ * _map_ functions since string map is used more often than long map
++ */
++#define initialize_map initialize_string_map
++#define set_map_element set_string_map_element
++#define get_map_element get_string_map_element
++#define remove_map_element remove_string_map_element
++#define get_map_keys get_string_map_keys
++#define get_map_values get_string_map_values
++#define destroy_map destroy_string_map
++
++
++/* internal utility structures/ functions */
++typedef struct stack_node_struct
++{
++ long_map_node** node_ptr;
++ signed char direction;
++ struct stack_node_struct* previous;
++} stack_node;
++
++static void free_stack(stack_node* stack);
++static void** destroy_long_map_values(long_map* map, int destruction_type, unsigned long* num_destroyed);
++static void apply_to_every_long_map_node(long_map_node* node, void (*apply_func)(unsigned long key, void* value));
++static void apply_to_every_string_map_node(long_map_node* node, unsigned char has_key, void (*apply_func)(char* key, void* value));
++static void get_sorted_node_keys(long_map_node* node, unsigned long* key_list, unsigned long* next_key_index, int depth);
++static void get_sorted_node_values(long_map_node* node, void** value_list, unsigned long* next_value_index, int depth);
++static signed char rebalance (long_map_node** n, signed char direction, signed char update_op);
++static void rotate_right (long_map_node** parent);
++static void rotate_left (long_map_node** parent);
++
++/* internal for string map */
++typedef struct
++{
++ char* key;
++ void* value;
++} string_map_key_value;
++static unsigned long sdbm_string_hash(const char *key);
++
++
++
++
++/***************************************************
++ * For testing only
++ ***************************************************/
++/*
++void print_list(stack_node *l);
++
++void print_list(stack_node *l)
++{
++ if(l != NULL)
++ {
++ printf(" list key = %ld, dir=%d, \n", (*(l->node_ptr))->key, l->direction);
++ print_list(l->previous);
++ }
++}
++*/
++/******************************************************
++ * End testing Code
++ *******************************************************/
++
++
++
++
++/***************************************************
++ * string_map function definitions
++ ***************************************************/
++
++string_map* initialize_string_map(unsigned char store_keys)
++{
++ string_map* map = (string_map*)malloc(sizeof(string_map));
++ if(map != NULL)
++ {
++ map->store_keys = store_keys;
++ map->lm.root = NULL;
++ map->lm.num_elements = 0;
++ map->num_elements = map->lm.num_elements;
++ }
++ return map;
++}
++
++void* get_string_map_element(string_map* map, const char* key)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = get_long_map_element( &(map->lm), hashed_key);
++ if(return_value != NULL && map->store_keys)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++void* set_string_map_element(string_map* map, const char* key, void* value)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = NULL;
++ if(map->store_keys)
++ {
++ string_map_key_value* kv = (string_map_key_value*)malloc(sizeof(string_map_key_value));
++ if(kv == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ kv->key = strdup(key);
++ if(kv->key == NULL) /* deal with malloc failure */
++ {
++ free(kv);
++ return NULL;
++ }
++ kv->value = value;
++ return_value = set_long_map_element( &(map->lm), hashed_key, kv);
++ if(return_value != NULL)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ free(r->key);
++ free(r);
++ }
++ }
++ else
++ {
++ return_value = set_long_map_element( &(map->lm), hashed_key, value);
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++void* remove_string_map_element(string_map* map, const char* key)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = remove_long_map_element( &(map->lm), hashed_key);
++
++ if(return_value != NULL && map->store_keys)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ free(r->key);
++ free(r);
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++char** get_string_map_keys(string_map* map, unsigned long* num_keys_returned)
++{
++ char** str_keys;
++ str_keys = (char**)malloc((map->num_elements+1)*sizeof(char*));
++ if(str_keys == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ str_keys[0] = NULL;
++ *num_keys_returned = 0;
++ if(map->store_keys && map->num_elements > 0)
++ {
++ unsigned long list_length;
++ void** long_values = get_sorted_long_map_values( &(map->lm), &list_length);
++ unsigned long key_index;
++ /*list_length will be 0 on malloc failure in get_sorted_long_map_values, so this code shouldn't seg fault if that happens */
++ for(key_index = 0; key_index < list_length; key_index++)
++ {
++ str_keys[key_index] = strdup( ((string_map_key_value*)(long_values[key_index]))->key);
++ if(str_keys[key_index] == NULL) /* deal with malloc failure */
++ {
++ //just return the incomplete list (hey, it's null terminated...)
++ free(long_values);
++ return str_keys;
++ }
++ *num_keys_returned = *num_keys_returned + 1;
++ }
++ str_keys[list_length] = NULL;
++ free(long_values);
++ }
++ return str_keys;
++}
++
++
++void** get_string_map_values(string_map* map, unsigned long* num_values_returned)
++{
++ void** values = NULL;
++ if(map != NULL)
++ {
++ values = get_sorted_long_map_values ( &(map->lm), num_values_returned );
++ }
++ return values;
++}
++
++
++void** destroy_string_map(string_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = NULL;
++ if(map != NULL)
++ {
++ if(map->store_keys)
++ {
++ void** kvs = destroy_long_map_values( &(map->lm), DESTROY_MODE_RETURN_VALUES, num_destroyed );
++ unsigned long kv_index = 0;
++ for(kv_index=0; kv_index < *num_destroyed; kv_index++)
++ {
++ string_map_key_value* kv = (string_map_key_value*)kvs[kv_index];
++ void* value = kv->value;
++
++ free(kv->key);
++ free(kv);
++ if(destruction_type == DESTROY_MODE_FREE_VALUES)
++ {
++ free(value);
++ }
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ kvs[kv_index] = value;
++ }
++ }
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values = kvs;
++ }
++ else
++ {
++ free(kvs);
++ }
++ }
++ else
++ {
++ return_values = destroy_long_map_values( &(map->lm), destruction_type, num_destroyed );
++ }
++ free(map);
++ }
++ return return_values;
++}
++
++
++
++
++/***************************************************
++ * long_map function definitions
++ ***************************************************/
++
++long_map* initialize_long_map(void)
++{
++ long_map* map = (long_map*)malloc(sizeof(long_map));
++ if(map != NULL) /* test for malloc failure */
++ {
++ map->root = NULL;
++ map->num_elements = 0;
++ }
++ return map;
++}
++
++void* get_long_map_element(long_map* map, unsigned long key)
++{
++ void* value = NULL;
++
++ if(map->root != NULL)
++ {
++ long_map_node* parent_node = map->root;
++ long_map_node* next_node;
++ while( key != parent_node->key && (next_node = (long_map_node *)(key < parent_node->key ? parent_node->left : parent_node->right)) != NULL)
++ {
++ parent_node = next_node;
++ }
++ if(parent_node->key == key)
++ {
++ value = parent_node->value;
++ }
++ }
++ return value;
++}
++
++void* get_smallest_long_map_element(long_map* map, unsigned long* smallest_key)
++{
++ void* value = NULL;
++ if(map->root != NULL)
++ {
++ long_map_node* next_node = map->root;
++ while( next_node->left != NULL)
++ {
++ next_node = next_node->left;
++ }
++ value = next_node->value;
++ *smallest_key = next_node->key;
++ }
++ return value;
++}
++
++void* get_largest_long_map_element(long_map* map, unsigned long* largest_key)
++{
++ void* value = NULL;
++ if(map->root != NULL)
++ {
++ long_map_node* next_node = map->root;
++ while( next_node->right != NULL)
++ {
++ next_node = next_node->right;
++ }
++ value = next_node->value;
++ *largest_key = next_node->key;
++ }
++ return value;
++}
++
++void* remove_smallest_long_map_element(long_map* map, unsigned long* smallest_key)
++{
++ get_smallest_long_map_element(map, smallest_key);
++ return remove_long_map_element(map, *smallest_key);
++}
++
++void* remove_largest_long_map_element(long_map* map, unsigned long* largest_key)
++{
++ get_largest_long_map_element(map, largest_key);
++ return remove_long_map_element(map, *largest_key);
++}
++
++
++/* if replacement performed, returns replaced value, otherwise null */
++void* set_long_map_element(long_map* map, unsigned long key, void* value)
++{
++ stack_node* parent_list = NULL;
++ void* old_value = NULL;
++ int old_value_found = 0;
++
++ long_map_node* parent_node;
++ long_map_node* next_node;
++ stack_node* next_parent;
++ stack_node* previous_parent;
++ signed char new_balance;
++
++
++ long_map_node* new_node = (long_map_node*)malloc(sizeof(long_map_node));
++ if(new_node == NULL)
++ {
++ return NULL;
++ }
++ new_node->value = value;
++ new_node->key = key;
++ new_node->left = NULL;
++ new_node->right = NULL;
++ new_node->balance = 0;
++
++
++
++ if(map->root == NULL)
++ {
++ map->root = new_node;
++ }
++ else
++ {
++ parent_node = map->root;
++
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ free(new_node);
++ return NULL; /* won't insert but won't seg fault */
++ }
++ next_parent->node_ptr = &(map->root);
++ next_parent->previous = parent_list;
++ parent_list = next_parent;
++
++ while( key != parent_node->key && (next_node = (key < parent_node->key ? parent_node->left : parent_node->right) ) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ free(new_node);
++ return NULL;
++ }
++ next_parent->node_ptr = key < parent_node->key ? &(parent_node->left) : &(parent_node->right);
++ next_parent->previous = parent_list;
++ next_parent->previous->direction = key < parent_node->key ? -1 : 1;
++ parent_list = next_parent;
++
++ parent_node = next_node;
++ }
++
++
++ if(key == parent_node->key)
++ {
++ old_value = parent_node->value;
++ old_value_found = 1;
++ parent_node->value = value;
++ free(new_node);
++ /* we merely replaced a node, no need to rebalance */
++ }
++ else
++ {
++ if(key < parent_node->key)
++ {
++ parent_node->left = (void*)new_node;
++ parent_list->direction = -1;
++ }
++ else
++ {
++ parent_node->right = (void*)new_node;
++ parent_list->direction = 1;
++ }
++
++
++ /* we inserted a node, rebalance */
++ previous_parent = parent_list;
++ new_balance = 1; /* initial value is not used, but must not be 0 for initial loop condition */
++
++
++ while(previous_parent != NULL && new_balance != 0)
++ {
++ new_balance = rebalance(previous_parent->node_ptr, previous_parent->direction, 1);
++ previous_parent = previous_parent->previous;
++ }
++ }
++ }
++
++ free_stack(parent_list);
++
++ if(old_value_found == 0)
++ {
++ map->num_elements = map->num_elements + 1;
++ }
++
++ return old_value;
++}
++
++
++void* remove_long_map_element(long_map* map, unsigned long key)
++{
++
++ void* value = NULL;
++
++ long_map_node* root_node = map->root;
++ stack_node* parent_list = NULL;
++
++
++ long_map_node* remove_parent;
++ long_map_node* remove_node;
++ long_map_node* next_node;
++
++ long_map_node* replacement;
++ long_map_node* replacement_parent;
++ long_map_node* replacement_next;
++
++ stack_node* next_parent;
++ stack_node* previous_parent;
++ stack_node* replacement_stack_node;
++
++
++ signed char new_balance;
++
++
++
++ if(root_node != NULL)
++ {
++ remove_parent = root_node;
++ remove_node = key < remove_parent->key ? remove_parent->left : remove_parent->right;
++
++ if(remove_node != NULL && key != remove_parent->key)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ next_parent->node_ptr = &(map->root);
++ next_parent->previous = parent_list;
++ parent_list = next_parent;
++ while( key != remove_node->key && (next_node = (key < remove_node->key ? remove_node->left : remove_node->right)) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++ next_parent->node_ptr = key < remove_parent->key ? &(remove_parent->left) : &(remove_parent->right);
++ next_parent->previous = parent_list;
++ next_parent->previous->direction = key < remove_parent->key ? -1 : 1;
++ parent_list = next_parent;
++
++
++ remove_parent = remove_node;
++ remove_node = next_node;
++ }
++ parent_list->direction = key < remove_parent-> key ? -1 : 1;
++ }
++ else
++ {
++ remove_node = remove_parent;
++ }
++
++
++ if(key == remove_node->key)
++ {
++
++ /* find replacement for node we are deleting */
++ if( remove_node->right == NULL )
++ {
++ replacement = remove_node->left;
++ }
++ else if( remove_node->right->left == NULL)
++ {
++
++ replacement = remove_node->right;
++ replacement->left = remove_node->left;
++ replacement->balance = remove_node->balance;
++
++ /* put pointer to replacement node into list for balance update */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = 1; /* replacement is from right */
++ if(remove_node == remove_parent) /* special case for root node */
++ {
++ replacement_stack_node->node_ptr = &(map->root);
++ }
++ else
++ {
++ replacement_stack_node->node_ptr = key < remove_parent-> key ? &(remove_parent->left) : &(remove_parent->right);
++ }
++ parent_list = replacement_stack_node;
++
++ }
++ else
++ {
++ /* put pointer to replacement node into list for balance update */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = 1; /* we always look for replacement on right */
++ if(remove_node == remove_parent) /* special case for root node */
++ {
++ replacement_stack_node->node_ptr = &(map->root);
++ }
++ else
++ {
++ replacement_stack_node->node_ptr = key < remove_parent-> key ? &(remove_parent->left) : &(remove_parent->right);
++ }
++
++ parent_list = replacement_stack_node;
++
++
++ /*
++ * put pointer to replacement node->right into list for balance update
++ * this node will have to be updated with the proper pointer
++ * after we have identified the replacement
++ */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = -1; /* we always look for replacement to left of this node */
++ parent_list = replacement_stack_node;
++
++ /* find smallest node on right (large) side of tree */
++ replacement_parent = remove_node->right;
++ replacement = replacement_parent->left;
++
++ while((replacement_next = replacement->left) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ next_parent->node_ptr = &(replacement_parent->left);
++ next_parent->previous = parent_list;
++ next_parent->direction = -1; /* we always go left */
++ parent_list = next_parent;
++
++ replacement_parent = replacement;
++ replacement = replacement_next;
++
++ }
++
++ replacement_parent->left = replacement->right;
++
++ replacement->left = remove_node->left;
++ replacement->right = remove_node->right;
++ replacement->balance = remove_node->balance;
++ replacement_stack_node->node_ptr = &(replacement->right);
++ }
++
++ /* insert replacement at proper location in tree */
++ if(remove_node == remove_parent)
++ {
++ map->root = replacement;
++ }
++ else
++ {
++ remove_parent->left = remove_node == remove_parent->left ? replacement : remove_parent->left;
++ remove_parent->right = remove_node == remove_parent->right ? replacement : remove_parent->right;
++ }
++
++
++ /* rebalance tree */
++ previous_parent = parent_list;
++ new_balance = 0;
++ while(previous_parent != NULL && new_balance == 0)
++ {
++ new_balance = rebalance(previous_parent->node_ptr, previous_parent->direction, -1);
++ previous_parent = previous_parent->previous;
++ }
++
++
++
++
++ /*
++ * since we found a value to remove, decrease number of elements in map
++ * set return value to the deleted node's value and free the node
++ */
++ map->num_elements = map->num_elements - 1;
++ value = remove_node->value;
++ free(remove_node);
++ }
++ }
++
++ free_stack(parent_list);
++
++ return value;
++}
++
++
++/* note: returned keys are dynamically allocated, you need to free them! */
++unsigned long* get_sorted_long_map_keys(long_map* map, unsigned long* num_keys_returned)
++{
++ unsigned long* key_list = (unsigned long*)malloc((map->num_elements)*sizeof(unsigned long));
++ unsigned long next_key_index;
++ if(key_list == NULL)
++ {
++ *num_keys_returned = 0;
++ return NULL;
++ }
++ next_key_index = 0;
++ get_sorted_node_keys(map->root, key_list, &next_key_index, 0);
++
++ *num_keys_returned = map->num_elements;
++
++ return key_list;
++}
++
++
++void** get_sorted_long_map_values(long_map* map, unsigned long* num_values_returned)
++{
++ void** value_list = (void**)malloc((map->num_elements+1)*sizeof(void*));
++ unsigned long next_value_index;
++
++ if(value_list == NULL)
++ {
++ *num_values_returned = 0;
++ return NULL;
++ }
++ next_value_index = 0;
++ get_sorted_node_values(map->root, value_list, &next_value_index, 0);
++ value_list[map->num_elements] = NULL; /* since we're dealing with pointers make list null terminated */
++
++ *num_values_returned = map->num_elements;
++ return value_list;
++
++}
++
++
++
++void** destroy_long_map(long_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = destroy_long_map_values(map, destruction_type, num_destroyed);
++ free(map);
++ return return_values;
++}
++
++
++
++void apply_to_every_long_map_value(long_map* map, void (*apply_func)(unsigned long key, void* value))
++{
++ apply_to_every_long_map_node(map->root, apply_func);
++}
++void apply_to_every_string_map_value(string_map* map, void (*apply_func)(char* key, void* value))
++{
++ apply_to_every_string_map_node( (map->lm).root, map->store_keys, apply_func);
++}
++
++
++/***************************************************
++ * internal utility function definitions
++ ***************************************************/
++static void free_stack(stack_node* stack)
++{
++ while(stack != NULL)
++ {
++ stack_node* prev_node = stack;
++ stack = prev_node->previous;
++ free(prev_node);
++ }
++
++}
++
++static void** destroy_long_map_values(long_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = NULL;
++ unsigned long return_index = 0;
++
++ *num_destroyed = 0;
++
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values = (void**)malloc((map->num_elements+1)*sizeof(void*));
++ if(return_values == NULL) /* deal with malloc failure */
++ {
++ destruction_type = DESTROY_MODE_IGNORE_VALUES; /* could cause memory leak, but there's no other way to be sure we won't seg fault */
++ }
++ else
++ {
++ return_values[map->num_elements] = NULL;
++ }
++ }
++ while(map->num_elements > 0)
++ {
++ unsigned long smallest_key;
++ void* removed_value = remove_smallest_long_map_element(map, &smallest_key);
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values[return_index] = removed_value;
++ }
++ if(destruction_type == DESTROY_MODE_FREE_VALUES)
++ {
++ free(removed_value);
++ }
++ return_index++;
++ *num_destroyed = *num_destroyed + 1;
++ }
++ return return_values;
++}
++
++static void apply_to_every_long_map_node(long_map_node* node, void (*apply_func)(unsigned long key, void* value))
++{
++ if(node != NULL)
++ {
++ apply_to_every_long_map_node(node->left, apply_func);
++
++ apply_func(node->key, node->value);
++
++ apply_to_every_long_map_node(node->right, apply_func);
++ }
++}
++static void apply_to_every_string_map_node(long_map_node* node, unsigned char has_key, void (*apply_func)(char* key, void* value))
++{
++ if(node != NULL)
++ {
++ apply_to_every_string_map_node(node->left, has_key, apply_func);
++
++ if(has_key)
++ {
++ string_map_key_value* kv = (string_map_key_value*)(node->value);
++ apply_func(kv->key, kv->value);
++ }
++ else
++ {
++ apply_func(NULL, node->value);
++ }
++ apply_to_every_string_map_node(node->right, has_key, apply_func);
++ }
++}
++
++
++
++static void get_sorted_node_keys(long_map_node* node, unsigned long* key_list, unsigned long* next_key_index, int depth)
++{
++ if(node != NULL)
++ {
++ get_sorted_node_keys(node->left, key_list, next_key_index, depth+1);
++
++ key_list[ *next_key_index ] = node->key;
++ (*next_key_index)++;
++
++ get_sorted_node_keys(node->right, key_list, next_key_index, depth+1);
++ }
++}
++
++static void get_sorted_node_values(long_map_node* node, void** value_list, unsigned long* next_value_index, int depth)
++{
++ if(node != NULL)
++ {
++ get_sorted_node_values(node->left, value_list, next_value_index, depth+1);
++
++ value_list[ *next_value_index ] = node->value;
++ (*next_value_index)++;
++
++ get_sorted_node_values(node->right, value_list, next_value_index, depth+1);
++ }
++}
++
++
++
++/*
++ * direction = -1 indicates left subtree updated, direction = 1 for right subtree
++ * update_op = -1 indicates delete node, update_op = 1 for insert node
++ */
++static signed char rebalance (long_map_node** n, signed char direction, signed char update_op)
++{
++ /*
++ printf( "original: key = %ld, balance = %d, update_op=%d, direction=%d\n", (*n)->key, (*n)->balance, update_op, direction);
++ */
++
++ (*n)->balance = (*n)->balance + (update_op*direction);
++
++ if( (*n)->balance < -1)
++ {
++ if((*n)->left->balance < 0)
++ {
++ rotate_right(n);
++ (*n)->right->balance = 0;
++ (*n)->balance = 0;
++ }
++ else if((*n)->left->balance == 0)
++ {
++ rotate_right(n);
++ (*n)->right->balance = -1;
++ (*n)->balance = 1;
++ }
++ else if((*n)->left->balance > 0)
++ {
++ rotate_left( &((*n)->left) );
++ rotate_right(n);
++ /*
++ if( (*n)->balance < 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 1;
++ }
++ else if( (*n)->balance == 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 0;
++ }
++ else if( (*n)->balance > 0 )
++ {
++ (*n)->left->balance = -1;
++ (*n)->right->balance = 0;
++ }
++ */
++ (*n)->left->balance = (*n)->balance > 0 ? -1 : 0;
++ (*n)->right->balance = (*n)->balance < 0 ? 1 : 0;
++ (*n)->balance = 0;
++ }
++ }
++ if( (*n)->balance > 1)
++ {
++ if((*n)->right->balance > 0)
++ {
++ rotate_left(n);
++ (*n)->left->balance = 0;
++ (*n)->balance = 0;
++ }
++ else if ((*n)->right->balance == 0)
++ {
++ rotate_left(n);
++ (*n)->left->balance = 1;
++ (*n)->balance = -1;
++ }
++ else if((*n)->right->balance < 0)
++ {
++ rotate_right( &((*n)->right) );
++ rotate_left(n);
++ /*
++ if( (*n)->balance < 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 1;
++ }
++ else if( (*n)->balance == 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 0;
++ }
++ else if( (*n)->balance > 0 )
++ {
++ (*n)->left->balance = -1;
++ (*n)->right->balance = 0;
++ }
++ */
++ (*n)->left->balance = (*n)->balance > 0 ? -1 : 0;
++ (*n)->right->balance = (*n)->balance < 0 ? 1 : 0;
++ (*n)->balance = 0;
++ }
++ }
++
++ /*
++ printf( "key = %ld, balance = %d\n", (*n)->key, (*n)->balance);
++ */
++
++ return (*n)->balance;
++}
++
++
++static void rotate_right (long_map_node** parent)
++{
++ long_map_node* old_parent = *parent;
++ long_map_node* pivot = old_parent->left;
++ old_parent->left = pivot->right;
++ pivot->right = old_parent;
++
++ *parent = pivot;
++}
++
++static void rotate_left (long_map_node** parent)
++{
++ long_map_node* old_parent = *parent;
++ long_map_node* pivot = old_parent->right;
++ old_parent->right = pivot->left;
++ pivot->left = old_parent;
++
++ *parent = pivot;
++}
++
++
++
++/***************************************************************************
++ * This algorithm was created for the sdbm database library (a public-domain
++ * reimplementation of ndbm) and seems to work relatively well in
++ * scrambling bits
++ *
++ *
++ * This code was derived from code found at:
++ * http://www.cse.yorku.ca/~oz/hash.html
++ ***************************************************************************/
++static unsigned long sdbm_string_hash(const char *key)
++{
++ unsigned long hashed_key = 0;
++
++ int index = 0;
++ unsigned int nextch;
++ while(key[index] != '\0')
++ {
++ nextch = key[index];
++ hashed_key = nextch + (hashed_key << 6) + (hashed_key << 16) - hashed_key;
++ index++;
++ }
++ return hashed_key;
++}
++
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/ipt_weburl.c 2015-06-19 03:02:54.721697023 +0800
+@@ -0,0 +1,398 @@
++/* weburl -- A netfilter module to match URLs in HTTP requests
++ * This module can match using string match or regular expressions
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2008-2010 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <net/sock.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_weburl.h>
++
++#include "weburl_deps/regexp.c"
++#include "weburl_deps/tree_map.h"
++
++
++#include <linux/ip.h>
++
++
++#include <linux/netfilter/x_tables.h>
++
++
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Eric Bishop");
++MODULE_DESCRIPTION("Match URL in HTTP requests, designed for use with Gargoyle web interface (www.gargoyle-router.com)");
++
++string_map* compiled_map = NULL;
++
++int strnicmp(const char * cs,const char * ct,size_t count)
++{
++ register signed char __res = 0;
++
++ while (count)
++ {
++ if ((__res = toupper( *cs ) - toupper( *ct++ ) ) != 0 || !*cs++)
++ {
++ break;
++ }
++ count--;
++ }
++ return __res;
++}
++
++char *strnistr(const char *s, const char *find, size_t slen)
++{
++ char c, sc;
++ size_t len;
++
++
++ if ((c = *find++) != '\0')
++ {
++ len = strlen(find);
++ do
++ {
++ do
++ {
++ if (slen < 1 || (sc = *s) == '\0')
++ {
++ return (NULL);
++ }
++ --slen;
++ ++s;
++ }
++ while ( toupper(sc) != toupper(c));
++
++ if (len > slen)
++ {
++ return (NULL);
++ }
++ }
++ while (strnicmp(s, find, len) != 0);
++
++ s--;
++ }
++ return ((char *)s);
++}
++
++
++int do_match_test(unsigned char match_type, const char* reference, char* query)
++{
++ int matches = 0;
++ struct regexp* r;
++ switch(match_type)
++ {
++ case WEBURL_CONTAINS_TYPE:
++ matches = (strstr(query, reference) != NULL);
++ break;
++ case WEBURL_REGEX_TYPE:
++
++ if(compiled_map == NULL)
++ {
++ compiled_map = initialize_map(0);
++ if(compiled_map == NULL) /* test for malloc failure */
++ {
++ return 0;
++ }
++ }
++ r = (struct regexp*)get_map_element(compiled_map, reference);
++ if(r == NULL)
++ {
++ int rlen = strlen(reference);
++ r= regcomp((char*)reference, &rlen);
++ if(r == NULL) /* test for malloc failure */
++ {
++ return 0;
++ }
++ set_map_element(compiled_map, reference, (void*)r);
++ }
++ matches = regexec(r, query);
++ break;
++ case WEBURL_EXACT_TYPE:
++ matches = (strstr(query, reference) != NULL) && strlen(query) == strlen(reference);
++ break;
++ }
++ return matches;
++}
++
++int http_match(const struct ipt_weburl_info* info, const unsigned char* packet_data, int packet_length)
++{
++ int test = 0;
++
++ /* first test if we're dealing with a web page request */
++ if(strnicmp((char*)packet_data, "GET ", 4) == 0 || strnicmp( (char*)packet_data, "POST ", 5) == 0 || strnicmp((char*)packet_data, "HEAD ", 5) == 0)
++ {
++ /* printk("found a web page request\n"); */
++ char path[625] = "";
++ char host[625] = "";
++ int path_start_index;
++ int path_end_index;
++ int last_header_index;
++ char last_two_buf[2];
++ int end_found;
++ char* host_match;
++ char* test_prefixes[6];
++ int prefix_index;
++
++ /* get path portion of URL */
++ path_start_index = (int)(strstr((char*)packet_data, " ") - (char*)packet_data);
++ while( packet_data[path_start_index] == ' ')
++ {
++ path_start_index++;
++ }
++ path_end_index= (int)(strstr( (char*)(packet_data+path_start_index), " ") - (char*)packet_data);
++ if(path_end_index > 0)
++ {
++ int path_length = path_end_index-path_start_index;
++ path_length = path_length < 625 ? path_length : 624; /* prevent overflow */
++ memcpy(path, packet_data+path_start_index, path_length);
++ path[ path_length] = '\0';
++ }
++
++ /* get header length */
++ last_header_index = 2;
++ memcpy(last_two_buf,(char*)packet_data, 2);
++ end_found = 0;
++ while(end_found == 0 && last_header_index < packet_length)
++ {
++ char next = (char)packet_data[last_header_index];
++ if(next == '\n')
++ {
++ end_found = last_two_buf[1] == '\n' || (last_two_buf[0] == '\n' && last_two_buf[1] == '\r') ? 1 : 0;
++ }
++ if(end_found == 0)
++ {
++ last_two_buf[0] = last_two_buf[1];
++ last_two_buf[1] = next;
++ last_header_index++;
++ }
++ }
++
++ /* get host portion of URL */
++ host_match = strnistr( (char*)packet_data, "Host:", last_header_index);
++ if(host_match != NULL)
++ {
++ int host_end_index;
++ host_match = host_match + 5; /* character after "Host:" */
++ while(host_match[0] == ' ')
++ {
++ host_match = host_match+1;
++ }
++
++ host_end_index = 0;
++ while( host_match[host_end_index] != '\n' &&
++ host_match[host_end_index] != '\r' &&
++ host_match[host_end_index] != ' ' &&
++ host_match[host_end_index] != ':' &&
++ ((char*)host_match - (char*)packet_data)+host_end_index < last_header_index
++ )
++ {
++ host_end_index++;
++ }
++ memcpy(host, host_match, host_end_index);
++ host_end_index = host_end_index < 625 ? host_end_index : 624; /* prevent overflow */
++ host[host_end_index] = '\0';
++
++
++ }
++
++ /* printk("host = \"%s\", path =\"%s\"\n", host, path); */
++
++
++ switch(info->match_part)
++ {
++ case WEBURL_DOMAIN_PART:
++ test = do_match_test(info->match_type, info->test_str, host);
++ if(!test && strstr(host, "www.") == host)
++ {
++ test = do_match_test(info->match_type, info->test_str, ((char*)host+4) );
++ }
++ break;
++ case WEBURL_PATH_PART:
++ test = do_match_test(info->match_type, info->test_str, path);
++ if( !test && path[0] == '/' )
++ {
++ test = do_match_test(info->match_type, info->test_str, ((char*)path+1) );
++ }
++ break;
++ case WEBURL_ALL_PART:
++
++ test_prefixes[0] = "http://";
++ test_prefixes[1] = "";
++ test_prefixes[2] = NULL;
++
++
++ for(prefix_index=0; test_prefixes[prefix_index] != NULL && test == 0; prefix_index++)
++ {
++ char test_url[1250];
++ test_url[0] = '\0';
++ strcat(test_url, test_prefixes[prefix_index]);
++ strcat(test_url, host);
++ if(strcmp(path, "/") != 0)
++ {
++ strcat(test_url, path);
++ }
++ test = do_match_test(info->match_type, info->test_str, test_url);
++ if(!test && strcmp(path, "/") == 0)
++ {
++ strcat(test_url, path);
++ test = do_match_test(info->match_type, info->test_str, test_url);
++ }
++
++ /* printk("test_url = \"%s\", test=%d\n", test_url, test); */
++ }
++ if(!test && strstr(host, "www.") == host)
++ {
++ char* www_host = ((char*)host+4);
++ for(prefix_index=0; test_prefixes[prefix_index] != NULL && test == 0; prefix_index++)
++ {
++ char test_url[1250];
++ test_url[0] = '\0';
++ strcat(test_url, test_prefixes[prefix_index]);
++ strcat(test_url, www_host);
++ if(strcmp(path, "/") != 0)
++ {
++ strcat(test_url, path);
++ }
++ test = do_match_test(info->match_type, info->test_str, test_url);
++ if(!test && strcmp(path, "/") == 0)
++ {
++ strcat(test_url, path);
++ test = do_match_test(info->match_type, info->test_str, test_url);
++ }
++
++ /* printk("test_url = \"%s\", test=%d\n", test_url, test); */
++ }
++ }
++ break;
++
++ }
++
++
++ /*
++ * If invert flag is set, return true if this IS a web request, but it didn't match
++ * Always return false for non-web requests
++ */
++ test = info->invert ? !test : test;
++ }
++
++ return test;
++}
++
++
++static bool match(const struct sk_buff *skb, struct xt_action_param *par)
++{
++
++ const struct ipt_weburl_info *info = (const struct ipt_weburl_info*)(par->matchinfo);
++
++
++ int test = 0;
++ struct iphdr* iph;
++
++ /* linearize skb if necessary */
++ struct sk_buff *linear_skb;
++ int skb_copied;
++ if(skb_is_nonlinear(skb))
++ {
++ linear_skb = skb_copy(skb, GFP_ATOMIC);
++ skb_copied = 1;
++ }
++ else
++ {
++ linear_skb = (struct sk_buff*)skb;
++ skb_copied = 0;
++ }
++
++
++
++ /* ignore packets that are not TCP */
++ iph = (struct iphdr*)(skb_network_header(skb));
++ if(iph->protocol == IPPROTO_TCP)
++ {
++ /* get payload */
++ struct tcphdr* tcp_hdr = (struct tcphdr*)( ((unsigned char*)iph) + (iph->ihl*4) );
++ unsigned short payload_offset = (tcp_hdr->doff*4) + (iph->ihl*4);
++ unsigned char* payload = ((unsigned char*)iph) + payload_offset;
++ unsigned short payload_length = ntohs(iph->tot_len) - payload_offset;
++
++
++
++ /* if payload length <= 10 bytes don't bother doing a check, otherwise check for match */
++ if(payload_length > 10)
++ {
++ test = http_match(info, payload, payload_length);
++ }
++ }
++
++ /* free skb if we made a copy to linearize it */
++ if(skb_copied == 1)
++ {
++ kfree_skb(linear_skb);
++ }
++
++
++ /* printk("returning %d from weburl\n\n\n", test); */
++ return test;
++}
++
++
++static int checkentry(const struct xt_mtchk_param *par)
++{
++ return 0;
++}
++
++
++static struct xt_match weburl_match __read_mostly =
++{
++ .name = "weburl",
++ .match = &match,
++ .family = AF_INET,
++ .matchsize = sizeof(struct ipt_weburl_info),
++ .checkentry = &checkentry,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ compiled_map = NULL;
++ return xt_register_match(&weburl_match);
++
++}
++
++static void __exit fini(void)
++{
++ xt_unregister_match(&weburl_match);
++ if(compiled_map != NULL)
++ {
++ unsigned long num_destroyed;
++ destroy_map(compiled_map, DESTROY_MODE_FREE_VALUES, &num_destroyed);
++ }
++}
++
++module_init(init);
++module_exit(fini);
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/ipt_webmon.c 2015-06-19 03:02:55.165678477 +0800
+@@ -0,0 +1,1200 @@
++/* webmon -- A netfilter module to match URLs in HTTP requests
++ * This module can match using string match or regular expressions
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2008-2011 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <net/sock.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <linux/time.h>
++#include <linux/spinlock.h>
++#include <linux/proc_fs.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_webmon.h>
++
++#include "webmon_deps/tree_map.h"
++
++
++#include <linux/ktime.h>
++
++
++#include <linux/ip.h>
++#include <linux/netfilter/x_tables.h>
++
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Eric Bishop");
++MODULE_DESCRIPTION("Monitor URL in HTTP Requests, designed for use with Gargoyle web interface (www.gargoyle-router.com)");
++
++#define NIPQUAD(addr) \
++ ((unsigned char *)&addr)[0], \
++ ((unsigned char *)&addr)[1], \
++ ((unsigned char *)&addr)[2], \
++ ((unsigned char *)&addr)[3]
++#define STRIP "%u.%u.%u.%u"
++
++typedef struct qn
++{
++ uint32_t src_ip;
++ char* value;
++ struct timeval time;
++ struct qn* next;
++ struct qn* previous;
++} queue_node;
++
++typedef struct
++{
++ queue_node* first;
++ queue_node* last;
++ int length;
++} queue;
++
++static string_map* domain_map = NULL;
++static queue* recent_domains = NULL;
++
++static string_map* search_map = NULL;
++static queue* recent_searches = NULL;
++
++
++static int max_domain_queue_length = 5;
++static int max_search_queue_length = 5;
++
++static spinlock_t webmon_lock = __SPIN_LOCK_UNLOCKED(webmon_lock);;
++
++
++static void update_queue_node_time(queue_node* update_node, queue* full_queue)
++{
++ struct timeval t;
++ do_gettimeofday(&t);
++ update_node->time = t;
++
++ /* move to front of queue if not already at front of queue */
++ if(update_node->previous != NULL)
++ {
++ queue_node* p = update_node->previous;
++ queue_node* n = update_node->next;
++ p->next = n;
++ if(n != NULL)
++ {
++ n->previous = p;
++ }
++ else
++ {
++ full_queue->last = p;
++ }
++ update_node->previous = NULL;
++ update_node->next = full_queue->first;
++ full_queue->first->previous = update_node;
++ full_queue->first = update_node;
++ }
++}
++
++void add_queue_node(uint32_t src_ip, char* value, queue* full_queue, string_map* queue_index, char* queue_index_key, uint32_t max_queue_length )
++{
++
++ queue_node *new_node = (queue_node*)kmalloc(sizeof(queue_node), GFP_ATOMIC);
++ char* dyn_value = kernel_strdup(value);
++ struct timeval t;
++
++
++ if(new_node == NULL || dyn_value == NULL)
++ {
++ if(dyn_value) { kfree(dyn_value); }
++ if(new_node) { kfree(new_node); };
++
++ return;
++ }
++ set_map_element(queue_index, queue_index_key, (void*)new_node);
++
++
++ do_gettimeofday(&t);
++ new_node->time = t;
++ new_node->src_ip = src_ip;
++ new_node->value = dyn_value;
++ new_node->previous = NULL;
++
++ new_node->next = full_queue->first;
++ if(full_queue->first != NULL)
++ {
++ full_queue->first->previous = new_node;
++ }
++ full_queue->first = new_node;
++ full_queue->last = (full_queue->last == NULL) ? new_node : full_queue->last ;
++ full_queue->length = full_queue->length + 1;
++
++ if( full_queue->length > max_queue_length )
++ {
++ queue_node *old_node = full_queue->last;
++ full_queue->last = old_node->previous;
++ full_queue->last->next = NULL;
++ full_queue->first = old_node->previous == NULL ? NULL : full_queue->first; /*shouldn't be needed, but just in case...*/
++ full_queue->length = full_queue->length - 1;
++
++ sprintf(queue_index_key, STRIP"@%s", NIPQUAD(old_node->src_ip), old_node->value);
++ remove_map_element(queue_index, queue_index_key);
++
++ kfree(old_node->value);
++ kfree(old_node);
++ }
++
++ /*
++ queue_node* n = full_queue->first;
++ while(n != NULL)
++ {
++ printf("%ld\t%s\t%s\t%s\n", (unsigned long)n->time, n->src_ip, n->dst_ip, n->domain);
++ n = (queue_node*)n->next;
++ }
++ printf("\n\n");
++ */
++}
++
++void destroy_queue(queue* q)
++{
++ queue_node *last_node = q->last;
++ while(last_node != NULL)
++ {
++ queue_node *previous_node = last_node->previous;
++ free(last_node->value);
++ free(last_node);
++ last_node = previous_node;
++ }
++ free(q);
++}
++
++
++int strnicmp(const char * cs,const char * ct,size_t count)
++{
++ register signed char __res = 0;
++
++ while (count)
++ {
++ if ((__res = toupper( *cs ) - toupper( *ct++ ) ) != 0 || !*cs++)
++ {
++ break;
++ }
++ count--;
++ }
++ return __res;
++}
++
++char *strnistr(const char *s, const char *find, size_t slen)
++{
++ char c, sc;
++ size_t len;
++
++
++ if ((c = *find++) != '\0')
++ {
++ len = strlen(find);
++ do
++ {
++ do
++ {
++ if (slen < 1 || (sc = *s) == '\0')
++ {
++ return (NULL);
++ }
++ --slen;
++ ++s;
++ }
++ while ( toupper(sc) != toupper(c));
++
++ if (len > slen)
++ {
++ return (NULL);
++ }
++ }
++ while (strnicmp(s, find, len) != 0);
++
++ s--;
++ }
++ return ((char *)s);
++}
++
++/* NOTE: This is not quite real edit distance -- all differences are assumed to be in one contiguous block
++ * If differences are not in a contiguous block computed edit distance will be greater than real edit distance.
++ * Edit distance computed here is an upper bound on real edit distance.
++ */
++int within_edit_distance(char *s1, char *s2, int max_edit)
++{
++ int ret = 0;
++ if(s1 != NULL && s2 != NULL)
++ {
++ int edit1 = strlen(s1);
++ int edit2 = strlen(s2);
++ char* s1sp = s1;
++ char* s2sp = s2;
++ char* s1ep = s1 + (edit1-1);
++ char* s2ep = s2 + (edit2-1);
++ while(*s1sp != '\0' && *s2sp != '\0' && *s1sp == *s2sp)
++ {
++ s1sp++;
++ s2sp++;
++ edit1--;
++ edit2--;
++ }
++
++ /* if either is zero we got to the end of one of the strings */
++ while(s1ep > s1sp && s2ep > s2sp && *s1ep == *s2ep)
++ {
++ s1ep--;
++ s2ep--;
++ edit1--;
++ edit2--;
++ }
++ ret = edit1 <= max_edit && edit2 <= max_edit ? 1 : 0;
++ }
++ return ret;
++}
++
++
++/*
++ * line is the line to be parsed -- it is not modified in any way
++ * max_pieces indicates number of pieces to return, if negative this is determined dynamically
++ * include_remainder_at_max indicates whether the last piece, when max pieces are reached,
++ * should be what it would normally be (0) or the entire remainder of the line (1)
++ * if max_pieces < 0 this parameter is ignored
++ *
++ *
++ * returns all non-separator pieces in a line
++ * result is dynamically allocated, MUST be freed after call-- even if
++ * line is empty (you still get a valid char** pointer to to a NULL char*)
++ */
++char** split_on_separators(char* line, char* separators, int num_separators, int max_pieces, int include_remainder_at_max, unsigned long *num_pieces)
++{
++ char** split;
++
++ *num_pieces = 0;
++ if(line != NULL)
++ {
++ int split_index;
++ int non_separator_found;
++ char* dup_line;
++ char* start;
++
++ if(max_pieces < 0)
++ {
++ /* count number of separator characters in line -- this count + 1 is an upperbound on number of pieces */
++ int separator_count = 0;
++ int line_index;
++ for(line_index = 0; line[line_index] != '\0'; line_index++)
++ {
++ int sep_index;
++ int found = 0;
++ for(sep_index =0; found == 0 && sep_index < num_separators; sep_index++)
++ {
++ found = separators[sep_index] == line[line_index] ? 1 : 0;
++ }
++ separator_count = separator_count+ found;
++ }
++ max_pieces = separator_count + 1;
++ }
++ split = (char**)malloc((1+max_pieces)*sizeof(char*));
++ split_index = 0;
++ split[split_index] = NULL;
++
++
++ dup_line = strdup(line);
++ start = dup_line;
++ non_separator_found = 0;
++ while(non_separator_found == 0)
++ {
++ int matches = 0;
++ int sep_index;
++ for(sep_index =0; sep_index < num_separators; sep_index++)
++ {
++ matches = matches == 1 || separators[sep_index] == start[0] ? 1 : 0;
++ }
++ non_separator_found = matches==0 || start[0] == '\0' ? 1 : 0;
++ if(non_separator_found == 0)
++ {
++ start++;
++ }
++ }
++
++ while(start[0] != '\0' && split_index < max_pieces)
++ {
++ /* find first separator index */
++ int first_separator_index = 0;
++ int separator_found = 0;
++ while( separator_found == 0 )
++ {
++ int sep_index;
++ for(sep_index =0; separator_found == 0 && sep_index < num_separators; sep_index++)
++ {
++ separator_found = separators[sep_index] == start[first_separator_index] || start[first_separator_index] == '\0' ? 1 : 0;
++ }
++ if(separator_found == 0)
++ {
++ first_separator_index++;
++ }
++ }
++
++ /* copy next piece to split array */
++ if(first_separator_index > 0)
++ {
++ char* next_piece = NULL;
++ if(split_index +1 < max_pieces || include_remainder_at_max <= 0)
++ {
++ next_piece = (char*)malloc((first_separator_index+1)*sizeof(char));
++ memcpy(next_piece, start, first_separator_index);
++ next_piece[first_separator_index] = '\0';
++ }
++ else
++ {
++ next_piece = strdup(start);
++ }
++ split[split_index] = next_piece;
++ split[split_index+1] = NULL;
++ split_index++;
++ }
++
++
++ /* find next non-separator index, indicating start of next piece */
++ start = start+ first_separator_index;
++ non_separator_found = 0;
++ while(non_separator_found == 0)
++ {
++ int matches = 0;
++ int sep_index;
++ for(sep_index =0; sep_index < num_separators; sep_index++)
++ {
++ matches = matches == 1 || separators[sep_index] == start[0] ? 1 : 0;
++ }
++ non_separator_found = matches==0 || start[0] == '\0' ? 1 : 0;
++ if(non_separator_found == 0)
++ {
++ start++;
++ }
++ }
++ }
++ free(dup_line);
++ *num_pieces = split_index;
++ }
++ else
++ {
++ split = (char**)malloc((1)*sizeof(char*));
++ split[0] = NULL;
++ }
++ return split;
++}
++
++
++
++static void extract_url(const unsigned char* packet_data, int packet_length, char* domain, char* path)
++{
++
++ int path_start_index;
++ int path_end_index;
++ int last_header_index;
++ char last_two_buf[2];
++ int end_found;
++ char* domain_match;
++ char* start_ptr;
++
++ domain[0] = '\0';
++ path[0] = '\0';
++
++
++ /* get path portion of URL */
++ start_ptr = strnistr((char*)packet_data, " ", packet_length);
++ if(start_ptr == NULL)
++ {
++ return;
++ }
++
++ path_start_index = (int)(start_ptr - (char*)packet_data);
++ start_ptr = strnistr((char*)(packet_data+path_start_index), " ", packet_length-(path_start_index+2));
++ if(start_ptr == NULL)
++ {
++ return;
++ }
++
++ while( packet_data[path_start_index] == ' ')
++ {
++ path_start_index++;
++ }
++ path_end_index= (int)(strstr( (char*)(packet_data+path_start_index), " ") - (char*)packet_data);
++ if(path_end_index > 0)
++ {
++ int path_length = path_end_index-path_start_index;
++ path_length = path_length < 625 ? path_length : 624; /* prevent overflow */
++ memcpy(path, packet_data+path_start_index, path_length);
++ path[ path_length] = '\0';
++ }
++ else
++ {
++ return;
++ }
++
++ /* get header length */
++ last_header_index = 2;
++ memcpy(last_two_buf,(char*)packet_data, 2);
++ end_found = 0;
++ while(end_found == 0 && last_header_index < packet_length)
++ {
++ char next = (char)packet_data[last_header_index];
++ if(next == '\n')
++ {
++ end_found = last_two_buf[1] == '\n' || (last_two_buf[0] == '\n' && last_two_buf[1] == '\r') ? 1 : 0;
++ }
++ if(end_found == 0)
++ {
++ last_two_buf[0] = last_two_buf[1];
++ last_two_buf[1] = next;
++ last_header_index++;
++ }
++ }
++
++ /* get domain portion of URL */
++ domain_match = strnistr( (char*)packet_data, "Host:", last_header_index);
++ if(domain_match != NULL)
++ {
++ int domain_end_index;
++ domain_match = domain_match + 5; /* character after "Host:" */
++ while(domain_match[0] == ' ' && ( (char*)domain_match - (char*)packet_data) < last_header_index)
++ {
++ domain_match = domain_match+1;
++ }
++
++ domain_end_index = 0;
++ while( domain_match[domain_end_index] != '\n' &&
++ domain_match[domain_end_index] != '\r' &&
++ domain_match[domain_end_index] != ' ' &&
++ domain_match[domain_end_index] != ':' &&
++ ((char*)domain_match - (char*)packet_data)+domain_end_index < last_header_index
++ )
++ {
++ domain_end_index++;
++ }
++ domain_end_index = domain_end_index < 625 ? domain_end_index : 624; /* prevent overflow */
++ memcpy(domain, domain_match, domain_end_index);
++ domain[domain_end_index] = '\0';
++
++ for(domain_end_index=0; domain[domain_end_index] != '\0'; domain_end_index++)
++ {
++ domain[domain_end_index] = (char)tolower(domain[domain_end_index]);
++ }
++ }
++}
++
++#ifdef CONFIG_PROC_FS
++
++static void *webmon_proc_start(struct seq_file *seq, loff_t *loff_pos)
++{
++ static unsigned long counter = 0;
++
++ /* beginning a new sequence ? */
++ if ( *loff_pos == 0 )
++ {
++ /* yes => return a non null value to begin the sequence */
++ return &counter;
++ }
++ else
++ {
++ /* no => it's the end of the sequence, return end to stop reading */
++ *loff_pos = 0;
++ return NULL;
++ }
++}
++
++static void *webmon_proc_next(struct seq_file *seq, void *v, loff_t *pos)
++{
++ return NULL;
++}
++
++
++static void webmon_proc_stop(struct seq_file *seq, void *v)
++{
++ //don't need to do anything
++}
++
++
++static int webmon_proc_domain_show(struct seq_file *s, void *v)
++{
++ queue_node* next_node;
++ spin_lock_bh(&webmon_lock);
++
++ next_node = recent_domains->last;
++ while(next_node != NULL)
++ {
++ seq_printf(s, "%ld\t"STRIP"\t%s\n", (unsigned long)(next_node->time).tv_sec, NIPQUAD(next_node->src_ip), next_node->value);
++ next_node = (queue_node*)next_node->previous;
++ }
++ spin_unlock_bh(&webmon_lock);
++
++ return 0;
++}
++
++static int webmon_proc_search_show(struct seq_file *s, void *v)
++{
++ queue_node* next_node;
++ spin_lock_bh(&webmon_lock);
++
++ next_node = recent_searches->last;
++ while(next_node != NULL)
++ {
++ seq_printf(s, "%ld\t"STRIP"\t%s\n", (unsigned long)(next_node->time).tv_sec, NIPQUAD(next_node->src_ip), next_node->value);
++ next_node = (queue_node*)next_node->previous;
++ }
++ spin_unlock_bh(&webmon_lock);
++
++ return 0;
++}
++
++
++static struct seq_operations webmon_proc_domain_sops = {
++ .start = webmon_proc_start,
++ .next = webmon_proc_next,
++ .stop = webmon_proc_stop,
++ .show = webmon_proc_domain_show
++};
++
++static struct seq_operations webmon_proc_search_sops = {
++ .start = webmon_proc_start,
++ .next = webmon_proc_next,
++ .stop = webmon_proc_stop,
++ .show = webmon_proc_search_show
++};
++
++
++static int webmon_proc_domain_open(struct inode *inode, struct file* file)
++{
++ return seq_open(file, &webmon_proc_domain_sops);
++}
++static int webmon_proc_search_open(struct inode *inode, struct file* file)
++{
++ return seq_open(file, &webmon_proc_search_sops);
++}
++
++
++
++static struct file_operations webmon_proc_domain_fops = {
++ .owner = THIS_MODULE,
++ .open = webmon_proc_domain_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = seq_release
++};
++static struct file_operations webmon_proc_search_fops = {
++ .owner = THIS_MODULE,
++ .open = webmon_proc_search_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = seq_release
++};
++
++
++#endif
++
++
++
++
++
++
++static int ipt_webmon_set_ctl(struct sock *sk, int cmd, void *user, u_int32_t len)
++{
++
++ char* buffer = kmalloc(len, GFP_ATOMIC);
++ if(buffer == NULL) /* check for malloc failure */
++ {
++ return 0;
++ }
++ spin_lock_bh(&webmon_lock);
++ copy_from_user(buffer, user, len);
++
++ if(len > 1 + sizeof(uint32_t))
++ {
++ unsigned char type = buffer[0];
++ uint32_t max_queue_length = *((uint32_t*)(buffer+1));
++ char* data = buffer+1+sizeof(uint32_t);
++ char newline_terminator[] = { '\n', '\r' };
++ char whitespace_chars[] = { '\t', ' ' };
++
++ if(type == WEBMON_DOMAIN || type == WEBMON_SEARCH )
++ {
++ unsigned long num_destroyed;
++
++
++ /* destroy and re-initialize queue and map */
++ if(type == WEBMON_DOMAIN )
++ {
++ destroy_map(domain_map, DESTROY_MODE_IGNORE_VALUES, &num_destroyed);
++ destroy_queue(recent_domains);
++ recent_domains = (queue*)malloc(sizeof(queue));
++ recent_domains->first = NULL;
++ recent_domains->last = NULL;
++ recent_domains->length = 0;
++ domain_map = initialize_map(0);
++
++ max_domain_queue_length = max_queue_length;
++ }
++ else if(type == WEBMON_SEARCH)
++ {
++ destroy_map(search_map, DESTROY_MODE_IGNORE_VALUES, &num_destroyed);
++ destroy_queue(recent_searches);
++ recent_searches = (queue*)malloc(sizeof(queue));
++ recent_searches->first = NULL;
++ recent_searches->last = NULL;
++ recent_searches->length = 0;
++ search_map = initialize_map(0);
++
++ max_search_queue_length = max_queue_length;
++ }
++
++ if(data[0] != '\0')
++ {
++ unsigned long num_lines;
++ unsigned long line_index;
++ char** lines = split_on_separators(data, newline_terminator, 2, -1, 0, &num_lines);
++ for(line_index=0; line_index < num_lines; line_index++)
++ {
++ char* line = lines[line_index];
++ unsigned long num_pieces;
++ char** split = split_on_separators(line, whitespace_chars, 2, -1, 0, &num_pieces);
++
++ //check that there are 3 pieces (time, src_ip, value)
++ int length;
++ for(length=0; split[length] != NULL ; length++){}
++ if(length == 3)
++ {
++ time_t time;
++ int parsed_ip[4];
++ int valid_ip = sscanf(split[1], "%d.%d.%d.%d", parsed_ip, parsed_ip+1, parsed_ip+2, parsed_ip+3);
++ if(valid_ip == 4)
++ {
++ valid_ip = parsed_ip[0] <= 255 && parsed_ip[1] <= 255 && parsed_ip[2] <= 255 && parsed_ip[3] <= 255 ? valid_ip : 0;
++ }
++ if(sscanf(split[0], "%ld", &time) > 0 && valid_ip == 4)
++ {
++ char* value = split[2];
++ char value_key[700];
++ uint32_t ip = (parsed_ip[0]<<24) + (parsed_ip[1]<<16) + (parsed_ip[2]<<8) + (parsed_ip[3]) ;
++ ip = htonl(ip);
++ sprintf(value_key, STRIP"@%s", NIPQUAD(ip), value);
++ if(type == WEBMON_DOMAIN)
++ {
++ add_queue_node(ip, value, recent_domains, domain_map, value_key, max_domain_queue_length );
++ (recent_domains->first->time).tv_sec = time;
++ }
++ else if(type == WEBMON_SEARCH)
++ {
++ add_queue_node(ip, value, recent_searches, search_map, value_key, max_search_queue_length );
++ (recent_searches->first->time).tv_sec = time;
++ }
++ }
++ }
++
++ for(length=0; split[length] != NULL ; length++)
++ {
++ free(split[length]);
++ }
++ free(split);
++ free(line);
++ }
++ free(lines);
++ }
++ }
++ }
++ kfree(buffer);
++ spin_unlock_bh(&webmon_lock);
++
++
++ return 1;
++}
++static struct nf_sockopt_ops ipt_webmon_sockopts =
++{
++ .pf = PF_INET,
++ .set_optmin = WEBMON_SET,
++ .set_optmax = WEBMON_SET+1,
++ .set = ipt_webmon_set_ctl,
++};
++
++
++
++
++static bool match(const struct sk_buff *skb, struct xt_action_param *par)
++{
++
++ const struct ipt_webmon_info *info = (const struct ipt_webmon_info*)(par->matchinfo);
++
++
++ struct iphdr* iph;
++
++ /* linearize skb if necessary */
++ struct sk_buff *linear_skb;
++ int skb_copied;
++ if(skb_is_nonlinear(skb))
++ {
++ linear_skb = skb_copy(skb, GFP_ATOMIC);
++ skb_copied = 1;
++ }
++ else
++ {
++ linear_skb = (struct sk_buff*)skb;
++ skb_copied = 0;
++ }
++
++
++
++ /* ignore packets that are not TCP */
++ iph = (struct iphdr*)(skb_network_header(skb));
++ if(iph->protocol == IPPROTO_TCP)
++ {
++ /* get payload */
++ struct tcphdr* tcp_hdr = (struct tcphdr*)( ((unsigned char*)iph) + (iph->ihl*4) );
++ unsigned short payload_offset = (tcp_hdr->doff*4) + (iph->ihl*4);
++ unsigned char* payload = ((unsigned char*)iph) + payload_offset;
++ unsigned short payload_length = ntohs(iph->tot_len) - payload_offset;
++
++
++
++ /* if payload length <= 10 bytes don't bother doing a check, otherwise check for match */
++ if(payload_length > 10)
++ {
++ /* are we dealing with a web page request */
++ if(strnicmp((char*)payload, "GET ", 4) == 0 || strnicmp( (char*)payload, "POST ", 5) == 0 || strnicmp((char*)payload, "HEAD ", 5) == 0)
++ {
++ char domain[650];
++ char path[650];
++ char domain_key[700];
++ unsigned char save = info->exclude_type == WEBMON_EXCLUDE ? 1 : 0;
++ uint32_t ip_index;
++
++
++ for(ip_index = 0; ip_index < info->num_exclude_ips; ip_index++)
++ {
++ if( (info->exclude_ips)[ip_index] == iph->saddr )
++ {
++ save = info->exclude_type == WEBMON_EXCLUDE ? 0 : 1;
++ }
++ }
++ for(ip_index=0; ip_index < info->num_exclude_ranges; ip_index++)
++ {
++ struct ipt_webmon_ip_range r = (info->exclude_ranges)[ip_index];
++ if( (unsigned long)ntohl( r.start) <= (unsigned long)ntohl(iph->saddr) && (unsigned long)ntohl(r.end) >= (unsigned long)ntohl(iph->saddr) )
++ {
++ save = info->exclude_type == WEBMON_EXCLUDE ? 0 : 1;
++ }
++ }
++
++
++ if(save)
++ {
++ extract_url(payload, payload_length, domain, path);
++
++
++ sprintf(domain_key, STRIP"@%s", NIPQUAD(iph->saddr), domain);
++
++ if(strlen(domain) > 0)
++ {
++ char *search_part = NULL;
++ spin_lock_bh(&webmon_lock);
++
++
++
++ if(get_string_map_element(domain_map, domain_key))
++ {
++ //update time
++ update_queue_node_time( (queue_node*)get_map_element(domain_map, domain_key), recent_domains );
++ }
++ else
++ {
++ //add
++ add_queue_node(iph->saddr, domain, recent_domains, domain_map, domain_key, max_domain_queue_length );
++ }
++
++
++ /* printk("domain,path=\"%s\", \"%s\"\n", domain, path); */
++
++ if(strnistr(domain, "google.", 625) != NULL)
++ {
++ search_part = strstr(path, "&q=");
++ search_part = search_part == NULL ? strstr(path, "#q=") : search_part;
++ search_part = search_part == NULL ? strstr(path, "?q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "bing.") != NULL)
++ {
++ search_part = strstr(path, "?q=");
++ search_part = search_part == NULL ? strstr(path, "&q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "yahoo.") != NULL)
++ {
++ search_part = strstr(path, "?p=");
++ search_part = search_part == NULL ? strstr(path, "&p=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "lycos.") != NULL)
++ {
++ search_part = strstr(path, "&query=");
++ search_part = search_part == NULL ? strstr(path, "?query=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+7;
++ }
++ else if(strstr(domain, "altavista.") != NULL)
++ {
++ search_part = strstr(path, "&q=");
++ search_part = search_part == NULL ? strstr(path, "?q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "duckduckgo.") != NULL)
++ {
++ search_part = strstr(path, "?q=");
++ search_part = search_part == NULL ? strstr(path, "&q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "baidu.") != NULL)
++ {
++ search_part = strstr(path, "?wd=");
++ search_part = search_part == NULL ? strstr(path, "&wd=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+4;
++ }
++ else if(strstr(domain, "search.") != NULL)
++ {
++ search_part = strstr(path, "?q=");
++ search_part = search_part == NULL ? strstr(path, "&q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "aol.") != NULL)
++ {
++ search_part = strstr(path, "&q=");
++ search_part = search_part == NULL ? strstr(path, "?q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "ask.") != NULL)
++ {
++ search_part = strstr(path, "?q=");
++ search_part = search_part == NULL ? strstr(path, "&q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "yandex.") != NULL)
++ {
++ search_part = strstr(path, "?text=");
++ search_part = search_part == NULL ? strstr(path, "&text=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+6;
++ }
++ else if(strstr(domain, "naver.") != NULL)
++ {
++ search_part = strstr(path, "&query=");
++ search_part = search_part == NULL ? strstr(path, "?query=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+7;
++ }
++ else if(strstr(domain, "daum.") != NULL)
++ {
++ search_part = strstr(path, "&q=");
++ search_part = search_part == NULL ? strstr(path, "?q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "cuil.") != NULL)
++ {
++ search_part = strstr(path, "?q=");
++ search_part = search_part == NULL ? strstr(path, "&q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "kosmix.") != NULL)
++ {
++ search_part = strstr(path, "/topic/");
++ search_part = search_part == NULL ? search_part : search_part+7;
++ }
++ else if(strstr(domain, "yebol.") != NULL)
++ {
++ search_part = strstr(path, "?key=");
++ search_part = search_part == NULL ? strstr(path, "&key=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+5;
++ }
++ else if(strstr(domain, "sogou.") != NULL)
++ {
++ search_part = strstr(path, "&query=");
++ search_part = search_part == NULL ? strstr(path, "?query=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+7;
++ }
++ else if(strstr(domain, "youdao.") != NULL)
++ {
++ search_part = strstr(path, "?q=");
++ search_part = search_part == NULL ? strstr(path, "&q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "metacrawler.") != NULL)
++ {
++ search_part = strstr(path, "/ws/results/Web/");
++ search_part = search_part == NULL ? search_part : search_part+16;
++ }
++ else if(strstr(domain, "webcrawler.") != NULL)
++ {
++ search_part = strstr(path, "/ws/results/Web/");
++ search_part = search_part == NULL ? search_part : search_part+16;
++ }
++
++
++ if(search_part != NULL)
++ {
++ int spi, si;
++ char search_key[700];
++ char search[650];
++ queue_node *recent_node = recent_searches->first;
++
++ /*unescape, replacing whitespace with + */
++ si = 0;
++ for(spi=0; search_part[spi] != '\0' && search_part[spi] != '&' && search_part[spi] != '/'; spi++)
++ {
++ int parsed_hex = 0;
++ if( search_part[spi] == '%')
++ {
++ if(search_part[spi+1] != '\0' && search_part[spi+1] != '&' && search_part[spi+1] != '/')
++ {
++ if(search_part[spi+2] != '\0' && search_part[spi+2] != '&' && search_part[spi+2] != '/')
++ {
++ char enc[3];
++ int hex;
++ enc[0] = search_part[spi+1];
++ enc[1] = search_part[spi+2];
++ enc[2] = '\0';
++ if(sscanf(enc, "%x", &hex) > 0)
++ {
++ parsed_hex = 1;
++ search[si] = hex == ' ' || hex == '\t' || hex == '\r' || hex == '\n' ? '+' : (char)hex;
++ spi = spi+2;
++ }
++ }
++ }
++ }
++ if(parsed_hex == 0)
++ {
++ search[si] = search_part[spi];
++ }
++ si++;
++ }
++ search[si] = '\0';
++
++
++
++ sprintf(search_key, STRIP"@%s", NIPQUAD(iph->saddr), search);
++
++
++ /* Often times search engines will initiate a search as you type it in, but these intermediate queries aren't the real search query
++ * So, if the most recent query is a substring of the current one, discard it in favor of this one
++ */
++ if(recent_node != NULL)
++ {
++ if(recent_node->src_ip == iph->saddr)
++ {
++ struct timeval t;
++ do_gettimeofday(&t);
++ if( (recent_node->time).tv_sec + 1 >= t.tv_sec || ((recent_node->time).tv_sec + 5 >= t.tv_sec && within_edit_distance(search, recent_node->value, 2)))
++ {
++ char recent_key[700];
++
++ sprintf(recent_key, STRIP"@%s", NIPQUAD(recent_node->src_ip), recent_node->value);
++ remove_map_element(search_map, recent_key);
++
++ recent_searches->first = recent_node->next;
++ recent_searches->last = recent_searches->first == NULL ? NULL : recent_searches->last;
++ if(recent_searches->first != NULL)
++ {
++ recent_searches->first->previous = NULL;
++ }
++ recent_searches->length = recent_searches->length - 1 ;
++ free(recent_node->value);
++ free(recent_node);
++ }
++ }
++ }
++
++
++
++ if(get_string_map_element(search_map, search_key))
++ {
++ //update time
++ update_queue_node_time( (queue_node*)get_map_element(search_map, search_key), recent_searches );
++ }
++ else
++ {
++ //add
++ add_queue_node(iph->saddr, search, recent_searches, search_map, search_key, max_search_queue_length );
++ }
++ }
++ spin_unlock_bh(&webmon_lock);
++ }
++ }
++ }
++ }
++ }
++
++ /* free skb if we made a copy to linearize it */
++ if(skb_copied == 1)
++ {
++ kfree_skb(linear_skb);
++ }
++
++
++ /* printk("returning %d from webmon\n\n\n", test); */
++ return 0;
++}
++
++
++
++static int checkentry(const struct xt_mtchk_param *par)
++{
++
++ struct ipt_webmon_info *info = (struct ipt_webmon_info*)(par->matchinfo);
++
++
++ spin_lock_bh(&webmon_lock);
++ if(info->ref_count == NULL) /* first instance, we're inserting rule */
++ {
++ info->ref_count = (uint32_t*)kmalloc(sizeof(uint32_t), GFP_ATOMIC);
++ if(info->ref_count == NULL) /* deal with kmalloc failure */
++ {
++ printk("ipt_webmon: kmalloc failure in checkentry!\n");
++ return 0;
++ }
++ *(info->ref_count) = 1;
++
++
++ max_search_queue_length = info->max_searches;
++ max_domain_queue_length = info->max_domains;
++
++
++ }
++ else
++ {
++ *(info->ref_count) = *(info->ref_count) + 1;
++ }
++ spin_unlock_bh(&webmon_lock);
++
++ return 0;
++}
++
++static void destroy( const struct xt_mtdtor_param *par )
++{
++ struct ipt_webmon_info *info = (struct ipt_webmon_info*)(par->matchinfo);
++
++ spin_lock_bh(&webmon_lock);
++ *(info->ref_count) = *(info->ref_count) - 1;
++ if(*(info->ref_count) == 0)
++ {
++ kfree(info->ref_count);
++ }
++ spin_unlock_bh(&webmon_lock);
++
++}
++
++static struct xt_match webmon_match __read_mostly =
++{
++
++ .name = "webmon",
++ .match = &match,
++ .family = AF_INET,
++ .matchsize = sizeof(struct ipt_webmon_info),
++ .checkentry = &checkentry,
++ .destroy = &destroy,
++ .me = THIS_MODULE,
++};
++
++#ifdef CONFIG_PROC_FS
++ struct proc_dir_entry *proc_webmon_recent_domains;
++ struct proc_dir_entry *proc_webmon_recent_searches;
++#endif
++
++
++static int __init init(void)
++{
++/*
++ #ifdef CONFIG_PROC_FS
++ struct proc_dir_entry *proc_webmon_recent_domains;
++ struct proc_dir_entry *proc_webmon_recent_searches;
++ #endif
++*/
++ spin_lock_bh(&webmon_lock);
++
++ recent_domains = (queue*)malloc(sizeof(queue));
++ recent_domains->first = NULL;
++ recent_domains->last = NULL;
++ recent_domains->length = 0;
++ domain_map = initialize_string_map(0);
++
++ recent_searches = (queue*)malloc(sizeof(queue));
++ recent_searches->first = NULL;
++ recent_searches->last = NULL;
++ recent_searches->length = 0;
++ search_map = initialize_string_map(0);
++
++
++
++ #ifdef CONFIG_PROC_FS
++ proc_webmon_recent_domains =proc_create("webmon_recent_domains", 0, NULL, &webmon_proc_domain_fops);
++ proc_webmon_recent_searches =proc_create("webmon_recent_searches", 0, NULL, &webmon_proc_search_fops);
++/* if(proc_webmon_recent_domains)
++ {
++ proc_webmon_recent_domains->proc_fops = &webmon_proc_domain_fops;
++ }
++ if(proc_webmon_recent_searches)
++ {
++ proc_webmon_recent_searches->proc_fops = &webmon_proc_search_fops;
++ }*/
++ #endif
++
++ if (nf_register_sockopt(&ipt_webmon_sockopts) < 0)
++ {
++ printk("ipt_webmon: Can't register sockopts. Aborting\n");
++ spin_unlock_bh(&webmon_lock);
++ return -1;
++ }
++ spin_unlock_bh(&webmon_lock);
++
++ return xt_register_match(&webmon_match);
++}
++
++static void __exit fini(void)
++{
++
++ unsigned long num_destroyed;
++
++ spin_lock_bh(&webmon_lock);
++
++
++ #ifdef CONFIG_PROC_FS
++ proc_remove(proc_webmon_recent_domains);
++ proc_remove(proc_webmon_recent_searches);
++ #endif
++ nf_unregister_sockopt(&ipt_webmon_sockopts);
++ xt_unregister_match(&webmon_match);
++ destroy_map(domain_map, DESTROY_MODE_IGNORE_VALUES, &num_destroyed);
++ destroy_map(search_map, DESTROY_MODE_IGNORE_VALUES, &num_destroyed);
++ destroy_queue(recent_domains);
++ destroy_queue(recent_searches);
++
++ spin_unlock_bh(&webmon_lock);
++
++
++}
++
++module_init(init);
++module_exit(fini);
++
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/bandwidth_deps/tree_map.h 2015-06-19 03:02:55.365670123 +0800
+@@ -0,0 +1,1093 @@
++/*
++ * Copyright © 2008 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This work 'as-is' we provide.
++ * No warranty, express or implied.
++ * We've done our best,
++ * to debug and test.
++ * Liability for damages denied.
++ *
++ * Permission is granted hereby,
++ * to copy, share, and modify.
++ * Use as is fit,
++ * free or for profit.
++ * On this notice these rights rely.
++ *
++ *
++ *
++ * Note that unlike other portions of Gargoyle this code
++ * does not fall under the GPL, but the rather whimsical
++ * 'Poetic License' above.
++ *
++ * Basically, this library contains a bunch of utilities
++ * that I find useful. I'm sure other libraries exist
++ * that are just as good or better, but I like these tools
++ * because I personally wrote them, so I know their quirks.
++ * (i.e. I know where the bodies are buried). I want to
++ * make sure that I can re-use these utilities for whatever
++ * code I may want to write in the future be it
++ * proprietary or open-source, so I've put them under
++ * a very, very permissive license.
++ *
++ * If you find this code useful, use it. If not, don't.
++ * I really don't care.
++ *
++ */
++
++
++#if __KERNEL__
++ #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
++ #define free(foo) kfree(foo)
++ #define printf(format,args...) printk(format,##args)
++
++ /* kernel strdup */
++ static inline char *kernel_strdup(const char *str);
++ static inline char *kernel_strdup(const char *str)
++ {
++ char *tmp;
++ long int s;
++ s=strlen(str) + 1;
++ tmp = kmalloc(s, GFP_ATOMIC);
++ if (tmp != NULL)
++ {
++ memcpy(tmp, str, s);
++ }
++ return tmp;
++ }
++ #define strdup kernel_strdup
++
++#endif
++
++
++
++/* tree_map structs / prototypes */
++typedef struct long_tree_map_node
++{
++ unsigned long key;
++ void* value;
++
++ signed char balance;
++ struct long_tree_map_node* left;
++ struct long_tree_map_node* right;
++} long_map_node;
++
++typedef struct
++{
++ long_map_node* root;
++ unsigned long num_elements;
++
++}long_map;
++
++typedef struct
++{
++ long_map lm;
++ unsigned char store_keys;
++ unsigned long num_elements;
++
++}string_map;
++
++
++
++/* long map functions */
++long_map* initialize_long_map(void);
++void* get_long_map_element(long_map* map, unsigned long key);
++void* get_smallest_long_map_element(long_map* map, unsigned long* smallest_key);
++void* get_largest_long_map_element(long_map* map, unsigned long* largest_key);
++void* remove_smallest_long_map_element(long_map* map, unsigned long* smallest_key);
++void* remove_largest_long_map_element(long_map* map, unsigned long* largest_key);
++void* set_long_map_element(long_map* map, unsigned long key, void* value);
++void* remove_long_map_element(long_map* map, unsigned long key);
++unsigned long* get_sorted_long_map_keys(long_map* map, unsigned long* num_keys_returned);
++void** get_sorted_long_map_values(long_map* map, unsigned long* num_values_returned);
++void** destroy_long_map(long_map* map, int destruction_type, unsigned long* num_destroyed);
++void apply_to_every_long_map_value(long_map* map, void (*apply_func)(unsigned long key, void* value));
++
++/* string map functions */
++string_map* initialize_string_map(unsigned char store_keys);
++void* get_string_map_element(string_map* map, const char* key);
++void* get_string_map_element_with_hashed_key(string_map* map, unsigned long hashed_key);
++void* set_string_map_element(string_map* map, const char* key, void* value);
++void* remove_string_map_element(string_map* map, const char* key);
++char** get_string_map_keys(string_map* map, unsigned long* num_keys_returned);
++void** get_string_map_values(string_map* map, unsigned long* num_values_returned);
++void** destroy_string_map(string_map* map, int destruction_type, unsigned long* num_destroyed);
++void apply_to_every_string_map_value(string_map* map, void (*apply_func)(char* key, void* value));
++
++
++/*
++ * three different ways to deal with values when data structure is destroyed
++ */
++#define DESTROY_MODE_RETURN_VALUES 20
++#define DESTROY_MODE_FREE_VALUES 21
++#define DESTROY_MODE_IGNORE_VALUES 22
++
++
++/*
++ * for convenience & backwards compatibility alias _string_map_ functions to
++ * _map_ functions since string map is used more often than long map
++ */
++#define initialize_map initialize_string_map
++#define set_map_element set_string_map_element
++#define get_map_element get_string_map_element
++#define remove_map_element remove_string_map_element
++#define get_map_keys get_string_map_keys
++#define get_map_values get_string_map_values
++#define destroy_map destroy_string_map
++
++
++/* internal utility structures/ functions */
++typedef struct stack_node_struct
++{
++ long_map_node** node_ptr;
++ signed char direction;
++ struct stack_node_struct* previous;
++} stack_node;
++
++static void free_stack(stack_node* stack);
++static void** destroy_long_map_values(long_map* map, int destruction_type, unsigned long* num_destroyed);
++static void apply_to_every_long_map_node(long_map_node* node, void (*apply_func)(unsigned long key, void* value));
++static void apply_to_every_string_map_node(long_map_node* node, unsigned char has_key, void (*apply_func)(char* key, void* value));
++static void get_sorted_node_keys(long_map_node* node, unsigned long* key_list, unsigned long* next_key_index, int depth);
++static void get_sorted_node_values(long_map_node* node, void** value_list, unsigned long* next_value_index, int depth);
++static signed char rebalance (long_map_node** n, signed char direction, signed char update_op);
++static void rotate_right (long_map_node** parent);
++static void rotate_left (long_map_node** parent);
++
++/* internal for string map */
++typedef struct
++{
++ char* key;
++ void* value;
++} string_map_key_value;
++static unsigned long sdbm_string_hash(const char *key);
++
++
++
++
++/***************************************************
++ * For testing only
++ ***************************************************/
++/*
++void print_list(stack_node *l);
++
++void print_list(stack_node *l)
++{
++ if(l != NULL)
++ {
++ printf(" list key = %ld, dir=%d, \n", (*(l->node_ptr))->key, l->direction);
++ print_list(l->previous);
++ }
++}
++*/
++/******************************************************
++ * End testing Code
++ *******************************************************/
++
++
++
++
++/***************************************************
++ * string_map function definitions
++ ***************************************************/
++
++string_map* initialize_string_map(unsigned char store_keys)
++{
++ string_map* map = (string_map*)malloc(sizeof(string_map));
++ if(map != NULL)
++ {
++ map->store_keys = store_keys;
++ map->lm.root = NULL;
++ map->lm.num_elements = 0;
++ map->num_elements = map->lm.num_elements;
++ }
++ return map;
++}
++
++void* get_string_map_element(string_map* map, const char* key)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++
++ return get_string_map_element_with_hashed_key(map, hashed_key);
++}
++
++void* get_string_map_element_with_hashed_key(string_map* map, unsigned long hashed_key)
++{
++ void* return_value;
++ /* printk("doing lookup for key = %lu\n", hashed_key); */
++ return_value = get_long_map_element( &(map->lm), hashed_key);
++ if(return_value != NULL && map->store_keys)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++void* set_string_map_element(string_map* map, const char* key, void* value)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = NULL;
++ if(map->store_keys)
++ {
++ string_map_key_value* kv = (string_map_key_value*)malloc(sizeof(string_map_key_value));
++ if(kv == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ kv->key = strdup(key);
++ if(kv->key == NULL) /* deal with malloc failure */
++ {
++ free(kv);
++ return NULL;
++ }
++ kv->value = value;
++ return_value = set_long_map_element( &(map->lm), hashed_key, kv);
++ if(return_value != NULL)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ free(r->key);
++ free(r);
++ }
++ }
++ else
++ {
++ return_value = set_long_map_element( &(map->lm), hashed_key, value);
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++void* remove_string_map_element(string_map* map, const char* key)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = remove_long_map_element( &(map->lm), hashed_key);
++
++ if(return_value != NULL && map->store_keys)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ free(r->key);
++ free(r);
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++char** get_string_map_keys(string_map* map, unsigned long* num_keys_returned)
++{
++ char** str_keys;
++ str_keys = (char**)malloc((map->num_elements+1)*sizeof(char*));
++ if(str_keys == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ str_keys[0] = NULL;
++ *num_keys_returned = 0;
++ if(map->store_keys && map->num_elements > 0)
++ {
++ unsigned long list_length;
++ void** long_values = get_sorted_long_map_values( &(map->lm), &list_length);
++ unsigned long key_index;
++ /*list_length will be 0 on malloc failure in get_sorted_long_map_values, so this code shouldn't seg fault if that happens */
++ for(key_index = 0; key_index < list_length; key_index++)
++ {
++ str_keys[key_index] = strdup( ((string_map_key_value*)(long_values[key_index]))->key);
++ if(str_keys[key_index] == NULL) /* deal with malloc failure */
++ {
++ //just return the incomplete list (hey, it's null terminated...)
++ free(long_values);
++ return str_keys;
++ }
++ *num_keys_returned = *num_keys_returned + 1;
++ }
++ str_keys[list_length] = NULL;
++ free(long_values);
++ }
++ return str_keys;
++}
++
++
++void** get_string_map_values(string_map* map, unsigned long* num_values_returned)
++{
++ void** values = NULL;
++ if(map != NULL)
++ {
++ values = get_sorted_long_map_values ( &(map->lm), num_values_returned );
++ }
++ return values;
++}
++
++
++void** destroy_string_map(string_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = NULL;
++ if(map != NULL)
++ {
++ if(map->store_keys)
++ {
++ void** kvs = destroy_long_map_values( &(map->lm), DESTROY_MODE_RETURN_VALUES, num_destroyed );
++ unsigned long kv_index = 0;
++ for(kv_index=0; kv_index < *num_destroyed; kv_index++)
++ {
++ string_map_key_value* kv = (string_map_key_value*)kvs[kv_index];
++ void* value = kv->value;
++
++ free(kv->key);
++ free(kv);
++ if(destruction_type == DESTROY_MODE_FREE_VALUES)
++ {
++ free(value);
++ }
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ kvs[kv_index] = value;
++ }
++ }
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values = kvs;
++ }
++ else
++ {
++ free(kvs);
++ }
++ }
++ else
++ {
++ return_values = destroy_long_map_values( &(map->lm), destruction_type, num_destroyed );
++ }
++ free(map);
++ }
++ return return_values;
++}
++
++
++
++
++/***************************************************
++ * long_map function definitions
++ ***************************************************/
++
++long_map* initialize_long_map(void)
++{
++ long_map* map = (long_map*)malloc(sizeof(long_map));
++ if(map != NULL) /* test for malloc failure */
++ {
++ map->root = NULL;
++ map->num_elements = 0;
++ }
++ return map;
++}
++
++void* get_long_map_element(long_map* map, unsigned long key)
++{
++ void* value = NULL;
++
++ if(map->root != NULL)
++ {
++ long_map_node* parent_node = map->root;
++ long_map_node* next_node;
++ while( key != parent_node->key && (next_node = (long_map_node *)(key < parent_node->key ? parent_node->left : parent_node->right)) != NULL)
++ {
++ parent_node = next_node;
++ }
++ if(parent_node->key == key)
++ {
++ value = parent_node->value;
++ }
++ }
++ return value;
++}
++
++void* get_smallest_long_map_element(long_map* map, unsigned long* smallest_key)
++{
++ void* value = NULL;
++ if(map->root != NULL)
++ {
++ long_map_node* next_node = map->root;
++ while( next_node->left != NULL)
++ {
++ next_node = next_node->left;
++ }
++ value = next_node->value;
++ *smallest_key = next_node->key;
++ }
++ return value;
++}
++
++void* get_largest_long_map_element(long_map* map, unsigned long* largest_key)
++{
++ void* value = NULL;
++ if(map->root != NULL)
++ {
++ long_map_node* next_node = map->root;
++ while( next_node->right != NULL)
++ {
++ next_node = next_node->right;
++ }
++ value = next_node->value;
++ *largest_key = next_node->key;
++ }
++ return value;
++}
++
++void* remove_smallest_long_map_element(long_map* map, unsigned long* smallest_key)
++{
++ get_smallest_long_map_element(map, smallest_key);
++ return remove_long_map_element(map, *smallest_key);
++}
++
++void* remove_largest_long_map_element(long_map* map, unsigned long* largest_key)
++{
++ get_largest_long_map_element(map, largest_key);
++ return remove_long_map_element(map, *largest_key);
++}
++
++
++/* if replacement performed, returns replaced value, otherwise null */
++void* set_long_map_element(long_map* map, unsigned long key, void* value)
++{
++ stack_node* parent_list = NULL;
++ void* old_value = NULL;
++ int old_value_found = 0;
++
++ long_map_node* parent_node;
++ long_map_node* next_node;
++ stack_node* next_parent;
++ stack_node* previous_parent;
++ signed char new_balance;
++
++
++ long_map_node* new_node = (long_map_node*)malloc(sizeof(long_map_node));
++ if(new_node == NULL)
++ {
++ return NULL;
++ }
++ new_node->value = value;
++ new_node->key = key;
++ new_node->left = NULL;
++ new_node->right = NULL;
++ new_node->balance = 0;
++
++
++
++ if(map->root == NULL)
++ {
++ map->root = new_node;
++ }
++ else
++ {
++ parent_node = map->root;
++
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ free(new_node);
++ return NULL; /* won't insert but won't seg fault */
++ }
++ next_parent->node_ptr = &(map->root);
++ next_parent->previous = parent_list;
++ parent_list = next_parent;
++
++ while( key != parent_node->key && (next_node = (key < parent_node->key ? parent_node->left : parent_node->right) ) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ free(new_node);
++ return NULL;
++ }
++ next_parent->node_ptr = key < parent_node->key ? &(parent_node->left) : &(parent_node->right);
++ next_parent->previous = parent_list;
++ next_parent->previous->direction = key < parent_node->key ? -1 : 1;
++ parent_list = next_parent;
++
++ parent_node = next_node;
++ }
++
++
++ if(key == parent_node->key)
++ {
++ old_value = parent_node->value;
++ old_value_found = 1;
++ parent_node->value = value;
++ free(new_node);
++ /* we merely replaced a node, no need to rebalance */
++ }
++ else
++ {
++ if(key < parent_node->key)
++ {
++ parent_node->left = (void*)new_node;
++ parent_list->direction = -1;
++ }
++ else
++ {
++ parent_node->right = (void*)new_node;
++ parent_list->direction = 1;
++ }
++
++
++ /* we inserted a node, rebalance */
++ previous_parent = parent_list;
++ new_balance = 1; /* initial value is not used, but must not be 0 for initial loop condition */
++
++
++ while(previous_parent != NULL && new_balance != 0)
++ {
++ new_balance = rebalance(previous_parent->node_ptr, previous_parent->direction, 1);
++ previous_parent = previous_parent->previous;
++ }
++ }
++ }
++
++ free_stack(parent_list);
++
++ if(old_value_found == 0)
++ {
++ map->num_elements = map->num_elements + 1;
++ }
++
++ return old_value;
++}
++
++
++void* remove_long_map_element(long_map* map, unsigned long key)
++{
++
++ void* value = NULL;
++
++ long_map_node* root_node = map->root;
++ stack_node* parent_list = NULL;
++
++
++ long_map_node* remove_parent;
++ long_map_node* remove_node;
++ long_map_node* next_node;
++
++ long_map_node* replacement;
++ long_map_node* replacement_parent;
++ long_map_node* replacement_next;
++
++ stack_node* next_parent;
++ stack_node* previous_parent;
++ stack_node* replacement_stack_node;
++
++
++ signed char new_balance;
++
++
++
++ if(root_node != NULL)
++ {
++ remove_parent = root_node;
++ remove_node = key < remove_parent->key ? remove_parent->left : remove_parent->right;
++
++ if(remove_node != NULL && key != remove_parent->key)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ next_parent->node_ptr = &(map->root);
++ next_parent->previous = parent_list;
++ parent_list = next_parent;
++ while( key != remove_node->key && (next_node = (key < remove_node->key ? remove_node->left : remove_node->right)) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++ next_parent->node_ptr = key < remove_parent->key ? &(remove_parent->left) : &(remove_parent->right);
++ next_parent->previous = parent_list;
++ next_parent->previous->direction = key < remove_parent->key ? -1 : 1;
++ parent_list = next_parent;
++
++
++ remove_parent = remove_node;
++ remove_node = next_node;
++ }
++ parent_list->direction = key < remove_parent-> key ? -1 : 1;
++ }
++ else
++ {
++ remove_node = remove_parent;
++ }
++
++
++ if(key == remove_node->key)
++ {
++
++ /* find replacement for node we are deleting */
++ if( remove_node->right == NULL )
++ {
++ replacement = remove_node->left;
++ }
++ else if( remove_node->right->left == NULL)
++ {
++
++ replacement = remove_node->right;
++ replacement->left = remove_node->left;
++ replacement->balance = remove_node->balance;
++
++ /* put pointer to replacement node into list for balance update */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = 1; /* replacement is from right */
++ if(remove_node == remove_parent) /* special case for root node */
++ {
++ replacement_stack_node->node_ptr = &(map->root);
++ }
++ else
++ {
++ replacement_stack_node->node_ptr = key < remove_parent-> key ? &(remove_parent->left) : &(remove_parent->right);
++ }
++ parent_list = replacement_stack_node;
++
++ }
++ else
++ {
++ /* put pointer to replacement node into list for balance update */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = 1; /* we always look for replacement on right */
++ if(remove_node == remove_parent) /* special case for root node */
++ {
++ replacement_stack_node->node_ptr = &(map->root);
++ }
++ else
++ {
++ replacement_stack_node->node_ptr = key < remove_parent-> key ? &(remove_parent->left) : &(remove_parent->right);
++ }
++
++ parent_list = replacement_stack_node;
++
++
++ /*
++ * put pointer to replacement node->right into list for balance update
++ * this node will have to be updated with the proper pointer
++ * after we have identified the replacement
++ */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = -1; /* we always look for replacement to left of this node */
++ parent_list = replacement_stack_node;
++
++ /* find smallest node on right (large) side of tree */
++ replacement_parent = remove_node->right;
++ replacement = replacement_parent->left;
++
++ while((replacement_next = replacement->left) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ next_parent->node_ptr = &(replacement_parent->left);
++ next_parent->previous = parent_list;
++ next_parent->direction = -1; /* we always go left */
++ parent_list = next_parent;
++
++ replacement_parent = replacement;
++ replacement = replacement_next;
++
++ }
++
++ replacement_parent->left = replacement->right;
++
++ replacement->left = remove_node->left;
++ replacement->right = remove_node->right;
++ replacement->balance = remove_node->balance;
++ replacement_stack_node->node_ptr = &(replacement->right);
++ }
++
++ /* insert replacement at proper location in tree */
++ if(remove_node == remove_parent)
++ {
++ map->root = replacement;
++ }
++ else
++ {
++ remove_parent->left = remove_node == remove_parent->left ? replacement : remove_parent->left;
++ remove_parent->right = remove_node == remove_parent->right ? replacement : remove_parent->right;
++ }
++
++
++ /* rebalance tree */
++ previous_parent = parent_list;
++ new_balance = 0;
++ while(previous_parent != NULL && new_balance == 0)
++ {
++ new_balance = rebalance(previous_parent->node_ptr, previous_parent->direction, -1);
++ previous_parent = previous_parent->previous;
++ }
++
++
++
++
++ /*
++ * since we found a value to remove, decrease number of elements in map
++ * set return value to the deleted node's value and free the node
++ */
++ map->num_elements = map->num_elements - 1;
++ value = remove_node->value;
++ free(remove_node);
++ }
++ }
++
++ free_stack(parent_list);
++
++ return value;
++}
++
++
++/* note: returned keys are dynamically allocated, you need to free them! */
++unsigned long* get_sorted_long_map_keys(long_map* map, unsigned long* num_keys_returned)
++{
++ unsigned long* key_list = (unsigned long*)malloc((map->num_elements)*sizeof(unsigned long));
++ unsigned long next_key_index;
++ if(key_list == NULL)
++ {
++ *num_keys_returned = 0;
++ return NULL;
++ }
++ next_key_index = 0;
++ get_sorted_node_keys(map->root, key_list, &next_key_index, 0);
++
++ *num_keys_returned = map->num_elements;
++
++ return key_list;
++}
++
++
++void** get_sorted_long_map_values(long_map* map, unsigned long* num_values_returned)
++{
++ void** value_list = (void**)malloc((map->num_elements+1)*sizeof(void*));
++ unsigned long next_value_index;
++
++ if(value_list == NULL)
++ {
++ *num_values_returned = 0;
++ return NULL;
++ }
++ next_value_index = 0;
++ get_sorted_node_values(map->root, value_list, &next_value_index, 0);
++ value_list[map->num_elements] = NULL; /* since we're dealing with pointers make list null terminated */
++
++ *num_values_returned = map->num_elements;
++ return value_list;
++
++}
++
++
++
++void** destroy_long_map(long_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = destroy_long_map_values(map, destruction_type, num_destroyed);
++ free(map);
++ return return_values;
++}
++
++
++
++void apply_to_every_long_map_value(long_map* map, void (*apply_func)(unsigned long key, void* value))
++{
++ apply_to_every_long_map_node(map->root, apply_func);
++}
++void apply_to_every_string_map_value(string_map* map, void (*apply_func)(char* key, void* value))
++{
++ apply_to_every_string_map_node( (map->lm).root, map->store_keys, apply_func);
++}
++
++
++/***************************************************
++ * internal utility function definitions
++ ***************************************************/
++static void free_stack(stack_node* stack)
++{
++ while(stack != NULL)
++ {
++ stack_node* prev_node = stack;
++ stack = prev_node->previous;
++ free(prev_node);
++ }
++
++}
++
++static void** destroy_long_map_values(long_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = NULL;
++ unsigned long return_index = 0;
++
++ *num_destroyed = 0;
++
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values = (void**)malloc((map->num_elements+1)*sizeof(void*));
++ if(return_values == NULL) /* deal with malloc failure */
++ {
++ destruction_type = DESTROY_MODE_IGNORE_VALUES; /* could cause memory leak, but there's no other way to be sure we won't seg fault */
++ }
++ else
++ {
++ return_values[map->num_elements] = NULL;
++ }
++ }
++ while(map->num_elements > 0)
++ {
++ unsigned long smallest_key;
++ void* removed_value = remove_smallest_long_map_element(map, &smallest_key);
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values[return_index] = removed_value;
++ }
++ if(destruction_type == DESTROY_MODE_FREE_VALUES)
++ {
++ free(removed_value);
++ }
++ return_index++;
++ *num_destroyed = *num_destroyed + 1;
++ }
++ return return_values;
++}
++
++static void apply_to_every_long_map_node(long_map_node* node, void (*apply_func)(unsigned long key, void* value))
++{
++ if(node != NULL)
++ {
++ apply_to_every_long_map_node(node->left, apply_func);
++
++ apply_func(node->key, node->value);
++
++ apply_to_every_long_map_node(node->right, apply_func);
++ }
++}
++static void apply_to_every_string_map_node(long_map_node* node, unsigned char has_key, void (*apply_func)(char* key, void* value))
++{
++ if(node != NULL)
++ {
++ apply_to_every_string_map_node(node->left, has_key, apply_func);
++
++ if(has_key)
++ {
++ string_map_key_value* kv = (string_map_key_value*)(node->value);
++ apply_func(kv->key, kv->value);
++ }
++ else
++ {
++ apply_func(NULL, node->value);
++ }
++ apply_to_every_string_map_node(node->right, has_key, apply_func);
++ }
++}
++
++
++
++static void get_sorted_node_keys(long_map_node* node, unsigned long* key_list, unsigned long* next_key_index, int depth)
++{
++ if(node != NULL)
++ {
++ get_sorted_node_keys(node->left, key_list, next_key_index, depth+1);
++
++ key_list[ *next_key_index ] = node->key;
++ (*next_key_index)++;
++
++ get_sorted_node_keys(node->right, key_list, next_key_index, depth+1);
++ }
++}
++
++static void get_sorted_node_values(long_map_node* node, void** value_list, unsigned long* next_value_index, int depth)
++{
++ if(node != NULL)
++ {
++ get_sorted_node_values(node->left, value_list, next_value_index, depth+1);
++
++ value_list[ *next_value_index ] = node->value;
++ (*next_value_index)++;
++
++ get_sorted_node_values(node->right, value_list, next_value_index, depth+1);
++ }
++}
++
++
++
++/*
++ * direction = -1 indicates left subtree updated, direction = 1 for right subtree
++ * update_op = -1 indicates delete node, update_op = 1 for insert node
++ */
++static signed char rebalance (long_map_node** n, signed char direction, signed char update_op)
++{
++ /*
++ printf( "original: key = %ld, balance = %d, update_op=%d, direction=%d\n", (*n)->key, (*n)->balance, update_op, direction);
++ */
++
++ (*n)->balance = (*n)->balance + (update_op*direction);
++
++ if( (*n)->balance < -1)
++ {
++ if((*n)->left->balance < 0)
++ {
++ rotate_right(n);
++ (*n)->right->balance = 0;
++ (*n)->balance = 0;
++ }
++ else if((*n)->left->balance == 0)
++ {
++ rotate_right(n);
++ (*n)->right->balance = -1;
++ (*n)->balance = 1;
++ }
++ else if((*n)->left->balance > 0)
++ {
++ rotate_left( &((*n)->left) );
++ rotate_right(n);
++ /*
++ if( (*n)->balance < 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 1;
++ }
++ else if( (*n)->balance == 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 0;
++ }
++ else if( (*n)->balance > 0 )
++ {
++ (*n)->left->balance = -1;
++ (*n)->right->balance = 0;
++ }
++ */
++ (*n)->left->balance = (*n)->balance > 0 ? -1 : 0;
++ (*n)->right->balance = (*n)->balance < 0 ? 1 : 0;
++ (*n)->balance = 0;
++ }
++ }
++ if( (*n)->balance > 1)
++ {
++ if((*n)->right->balance > 0)
++ {
++ rotate_left(n);
++ (*n)->left->balance = 0;
++ (*n)->balance = 0;
++ }
++ else if ((*n)->right->balance == 0)
++ {
++ rotate_left(n);
++ (*n)->left->balance = 1;
++ (*n)->balance = -1;
++ }
++ else if((*n)->right->balance < 0)
++ {
++ rotate_right( &((*n)->right) );
++ rotate_left(n);
++ /*
++ if( (*n)->balance < 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 1;
++ }
++ else if( (*n)->balance == 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 0;
++ }
++ else if( (*n)->balance > 0 )
++ {
++ (*n)->left->balance = -1;
++ (*n)->right->balance = 0;
++ }
++ */
++ (*n)->left->balance = (*n)->balance > 0 ? -1 : 0;
++ (*n)->right->balance = (*n)->balance < 0 ? 1 : 0;
++ (*n)->balance = 0;
++ }
++ }
++
++ /*
++ printf( "key = %ld, balance = %d\n", (*n)->key, (*n)->balance);
++ */
++
++ return (*n)->balance;
++}
++
++
++static void rotate_right (long_map_node** parent)
++{
++ long_map_node* old_parent = *parent;
++ long_map_node* pivot = old_parent->left;
++ old_parent->left = pivot->right;
++ pivot->right = old_parent;
++
++ *parent = pivot;
++}
++
++static void rotate_left (long_map_node** parent)
++{
++ long_map_node* old_parent = *parent;
++ long_map_node* pivot = old_parent->right;
++ old_parent->right = pivot->left;
++ pivot->left = old_parent;
++
++ *parent = pivot;
++}
++
++
++
++/***************************************************************************
++ * This algorithm was created for the sdbm database library (a public-domain
++ * reimplementation of ndbm) and seems to work relatively well in
++ * scrambling bits
++ *
++ *
++ * This code was derived from code found at:
++ * http://www.cse.yorku.ca/~oz/hash.html
++ ***************************************************************************/
++static unsigned long sdbm_string_hash(const char *key)
++{
++ unsigned long hashed_key = 0;
++
++ int index = 0;
++ unsigned int nextch;
++ while(key[index] != '\0')
++ {
++ nextch = key[index];
++ hashed_key = nextch + (hashed_key << 6) + (hashed_key << 16) - hashed_key;
++ index++;
++ }
++ return hashed_key;
++}
++
++
+--- linux.orig/net/ipv4/netfilter/Makefile 2015-06-15 00:19:31.000000000 +0800
++++ linux.new/net/ipv4/netfilter/Makefile 2015-06-19 03:02:55.425667617 +0800
+@@ -53,6 +53,10 @@
+ obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
+
+ # matches
++obj-$(CONFIG_IP_NF_MATCH_BANDWIDTH) += ipt_bandwidth.o
++obj-$(CONFIG_IP_NF_MATCH_TIMERANGE) += ipt_timerange.o
++obj-$(CONFIG_IP_NF_MATCH_WEBMON) += ipt_webmon.o
++obj-$(CONFIG_IP_NF_MATCH_WEBURL) += ipt_weburl.o
+ obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
+ obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
+
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/include/linux/netfilter_ipv4/ipt_weburl.h 2015-06-19 03:02:54.757695519 +0800
+@@ -0,0 +1,45 @@
++/* weburl -- A netfilter module to match URLs in HTTP requests
++ * This module can match using string match or regular expressions
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2008 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++
++
++
++#ifndef _IPT_WEBURL_H
++#define _IPT_WEBURL_H
++
++
++#define MAX_TEST_STR 1024
++
++#define WEBURL_CONTAINS_TYPE 1
++#define WEBURL_REGEX_TYPE 2
++#define WEBURL_EXACT_TYPE 3
++#define WEBURL_ALL_PART 4
++#define WEBURL_DOMAIN_PART 5
++#define WEBURL_PATH_PART 6
++
++struct ipt_weburl_info
++{
++ char test_str[MAX_TEST_STR];
++ unsigned char match_type;
++ unsigned char match_part;
++ unsigned char invert;
++};
++#endif /*_IPT_WEBURL_H*/
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/include/linux/netfilter_ipv4/ipt_timerange.h 2015-06-19 03:02:55.289673298 +0800
+@@ -0,0 +1,43 @@
++/* timerange -- An iptables extension to match multiple timeranges within a week
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2009 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++
++
++
++#ifndef _IPT_TIMERANGE_H
++#define _IPT_TIMERANGE_H
++
++
++#define RANGE_LENGTH 51
++
++#define HOURS 1
++#define WEEKDAYS 2
++#define DAYS_HOURS (HOURS+WEEKDAYS)
++#define WEEKLY_RANGE 4
++
++
++struct ipt_timerange_info
++{
++ long ranges[RANGE_LENGTH];
++ char days[7];
++ char type;
++ unsigned char invert;
++};
++#endif /*_IPT_TIMERANGE_H*/
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/include/linux/netfilter_ipv4/ipt_bandwidth.h 2015-06-19 03:02:55.421667784 +0800
+@@ -0,0 +1,106 @@
++/* bandwidth -- An iptables extension for bandwidth monitoring/control
++ * Can be used to efficiently monitor bandwidth and/or implement bandwidth quotas
++ * Can be queried using the iptbwctl userspace library
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2009 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _IPT_BANDWIDTH_H
++#define _IPT_BANDWIDTH_H
++
++/*flags -- first three don't map to parameters the rest do */
++#define BANDWIDTH_INITIALIZED 1
++#define BANDWIDTH_REQUIRES_SUBNET 2
++#define BANDWIDTH_SUBNET 4
++#define BANDWIDTH_CMP 8
++#define BANDWIDTH_CURRENT 16
++#define BANDWIDTH_RESET_INTERVAL 32
++#define BANDWIDTH_RESET_TIME 64
++#define BANDWIDTH_LAST_BACKUP 128
++
++
++/* parameter defs that don't map to flag bits */
++#define BANDWIDTH_TYPE 70
++#define BANDWIDTH_ID 71
++#define BANDWIDTH_GT 72
++#define BANDWIDTH_LT 73
++#define BANDWIDTH_MONITOR 74
++#define BANDWIDTH_CHECK 75
++#define BANDWIDTH_CHECK_NOSWAP 76
++#define BANDWIDTH_CHECK_SWAP 77
++#define BANDWIDTH_NUM_INTERVALS 78
++
++/* possible reset intervals */
++#define BANDWIDTH_MINUTE 80
++#define BANDWIDTH_HOUR 81
++#define BANDWIDTH_DAY 82
++#define BANDWIDTH_WEEK 83
++#define BANDWIDTH_MONTH 84
++#define BANDWIDTH_NEVER 85
++
++/* possible monitoring types */
++#define BANDWIDTH_COMBINED 90
++#define BANDWIDTH_INDIVIDUAL_SRC 91
++#define BANDWIDTH_INDIVIDUAL_DST 92
++#define BANDWIDTH_INDIVIDUAL_LOCAL 93
++#define BANDWIDTH_INDIVIDUAL_REMOTE 94
++
++
++
++/* socket id parameters (for userspace i/o) */
++#define BANDWIDTH_SET 2048
++#define BANDWIDTH_GET 2049
++
++/* max id length */
++#define BANDWIDTH_MAX_ID_LENGTH 50
++
++/* 4 bytes for total number of entries, 100 entries of 12 bytes each, + 1 byte indicating whether all have been dumped */
++#define BANDWIDTH_QUERY_LENGTH 1205
++#define BANDWIDTH_ENTRY_LENGTH 12
++
++
++struct ipt_bandwidth_info
++{
++ char id[BANDWIDTH_MAX_ID_LENGTH];
++ unsigned char type;
++ unsigned char check_type;
++ uint32_t local_subnet;
++ uint32_t local_subnet_mask;
++
++ unsigned char cmp;
++ unsigned char reset_is_constant_interval;
++ time_t reset_interval; //specific fixed type (see above) or interval length in seconds
++ time_t reset_time; //seconds from start of month/week/day/hour/minute to do reset, or start point of interval if it is a constant interval
++ uint64_t bandwidth_cutoff;
++ uint64_t current_bandwidth;
++ time_t next_reset;
++ time_t previous_reset;
++ time_t last_backup_time;
++
++ uint32_t num_intervals_to_save;
++
++
++ unsigned long hashed_id;
++ void* iam;
++ uint64_t* combined_bw;
++ struct ipt_bandwidth_info* non_const_self;
++ unsigned long* ref_count;
++
++
++};
++#endif /*_IPT_BANDWIDTH_H*/
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/include/linux/netfilter_ipv4/ipt_webmon.h 2015-06-19 03:02:55.209676639 +0800
+@@ -0,0 +1,63 @@
++/* webmon -- A netfilter module to match URLs in HTTP requests
++ * This module can match using string match or regular expressions
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2008-2010 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++
++
++
++#ifndef _IPT_WEBMON_H
++#define _IPT_WEBMON_H
++
++
++#define WEBMON_MAX_IPS 256
++#define WEBMON_MAX_IP_RANGES 16
++
++#define WEBMON_EXCLUDE 1
++#define WEBMON_INCLUDE 2
++
++#define WEBMON_MAXDOMAIN 4
++#define WEBMON_MAXSEARCH 8
++
++#define WEBMON_DOMAIN 16
++#define WEBMON_SEARCH 32
++
++
++#define WEBMON_SET 3064
++
++struct ipt_webmon_ip_range
++{
++ uint32_t start;
++ uint32_t end;
++};
++
++struct ipt_webmon_info
++{
++ uint32_t max_domains;
++ uint32_t max_searches;
++ uint32_t exclude_ips[WEBMON_MAX_IPS];
++ struct ipt_webmon_ip_range exclude_ranges[WEBMON_MAX_IP_RANGES];
++ uint32_t num_exclude_ips;
++ uint32_t num_exclude_ranges;
++ unsigned char exclude_type;
++ uint32_t* ref_count;
++
++};
++
++#endif /*_IPT_WEBMON_H*/
Index: target/linux/generic/patches-4.0/690-imq.patch
===================================================================
--- target/linux/generic/patches-4.0/690-imq.patch (revision 0)
+++ target/linux/generic/patches-4.0/690-imq.patch (working copy)
@@ -0,0 +1,1784 @@
+net: add Intermediate Queueing Device (imq)
+
+From: Jussi Kivilinna <jussi.kivilinna@iki.fi>
+
+This patch is for kernel version 3.12.4+.
+
+See: http://linuximq.net/
+
+Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
+---
+ drivers/net/Kconfig | 119 ++++
+ drivers/net/Makefile | 1
+ drivers/net/imq.c | 1007 +++++++++++++++++++++++++++++++
+ include/linux/imq.h | 13
+ include/linux/netfilter/xt_IMQ.h | 9
+ include/linux/netfilter_ipv4/ipt_IMQ.h | 10
+ include/linux/netfilter_ipv6/ip6t_IMQ.h | 10
+ include/linux/skbuff.h | 22 +
+ include/net/netfilter/nf_queue.h | 6
+ include/uapi/linux/netfilter.h | 3
+ net/core/dev.c | 8
+ net/core/skbuff.c | 112 +++
+ net/ipv6/ip6_output.c | 10
+ net/netfilter/Kconfig | 12
+ net/netfilter/Makefile | 1
+ net/netfilter/core.c | 6
+ net/netfilter/nf_internals.h | 2
+ net/netfilter/nf_queue.c | 36 +
+ net/netfilter/xt_IMQ.c | 72 ++
+ 19 files changed, 1449 insertions(+), 10 deletions(-)
+ create mode 100644 drivers/net/imq.c
+ create mode 100644 include/linux/imq.h
+ create mode 100644 include/linux/netfilter/xt_IMQ.h
+ create mode 100644 include/linux/netfilter_ipv4/ipt_IMQ.h
+ create mode 100644 include/linux/netfilter_ipv6/ip6t_IMQ.h
+ create mode 100644 net/netfilter/xt_IMQ.c
+
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index b45b240..5a20da0 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -203,6 +203,125 @@ config RIONET_RX_SIZE
+ depends on RIONET
+ default "128"
+
++config IMQ
++ tristate "IMQ (intermediate queueing device) support"
++ depends on NETDEVICES && NETFILTER
++ ---help---
++ The IMQ device(s) is used as placeholder for QoS queueing
++ disciplines. Every packet entering/leaving the IP stack can be
++ directed through the IMQ device where it's enqueued/dequeued to the
++ attached qdisc. This allows you to treat network devices as classes
++ and distribute bandwidth among them. Iptables is used to specify
++ through which IMQ device, if any, packets travel.
++
++ More information at: http://www.linuximq.net/
++
++ To compile this driver as a module, choose M here: the module
++ will be called imq. If unsure, say N.
++
++choice
++ prompt "IMQ behavior (PRE/POSTROUTING)"
++ depends on IMQ
++ default IMQ_BEHAVIOR_AB
++ help
++ This setting defines how IMQ behaves in respect to its
++ hooking in PREROUTING and POSTROUTING.
++
++ IMQ can work in any of the following ways:
++
++ PREROUTING | POSTROUTING
++ -----------------|-------------------
++ #1 After NAT | After NAT
++ #2 After NAT | Before NAT
++ #3 Before NAT | After NAT
++ #4 Before NAT | Before NAT
++
++ The default behavior is to hook before NAT on PREROUTING
++ and after NAT on POSTROUTING (#3).
++
++ This settings are specially usefull when trying to use IMQ
++ to shape NATed clients.
++
++ More information can be found at: www.linuximq.net
++
++ If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_AA
++ bool "IMQ AA"
++ help
++ This setting defines how IMQ behaves in respect to its
++ hooking in PREROUTING and POSTROUTING.
++
++ Choosing this option will make IMQ hook like this:
++
++ PREROUTING: After NAT
++ POSTROUTING: After NAT
++
++ More information can be found at: www.linuximq.net
++
++ If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_AB
++ bool "IMQ AB"
++ help
++ This setting defines how IMQ behaves in respect to its
++ hooking in PREROUTING and POSTROUTING.
++
++ Choosing this option will make IMQ hook like this:
++
++ PREROUTING: After NAT
++ POSTROUTING: Before NAT
++
++ More information can be found at: www.linuximq.net
++
++ If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_BA
++ bool "IMQ BA"
++ help
++ This setting defines how IMQ behaves in respect to its
++ hooking in PREROUTING and POSTROUTING.
++
++ Choosing this option will make IMQ hook like this:
++
++ PREROUTING: Before NAT
++ POSTROUTING: After NAT
++
++ More information can be found at: www.linuximq.net
++
++ If not sure leave the default settings alone.
++
++config IMQ_BEHAVIOR_BB
++ bool "IMQ BB"
++ help
++ This setting defines how IMQ behaves in respect to its
++ hooking in PREROUTING and POSTROUTING.
++
++ Choosing this option will make IMQ hook like this:
++
++ PREROUTING: Before NAT
++ POSTROUTING: Before NAT
++
++ More information can be found at: www.linuximq.net
++
++ If not sure leave the default settings alone.
++
++endchoice
++
++config IMQ_NUM_DEVS
++ int "Number of IMQ devices"
++ range 2 16
++ depends on IMQ
++ default "16"
++ help
++ This setting defines how many IMQ devices will be created.
++
++ The default value is 16.
++
++ More information can be found at: www.linuximq.net
++
++ If not sure leave the default settings alone.
++
+ config TUN
+ tristate "Universal TUN/TAP device driver support"
+ select CRC32
+diff --git a/drivers/net/Makefile b/drivers/net/Makefile
+index 3fef8a8..12dafc0 100644
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -9,6 +9,7 @@ obj-$(CONFIG_BONDING) += bonding/
+ obj-$(CONFIG_DUMMY) += dummy.o
+ obj-$(CONFIG_EQUALIZER) += eql.o
+ obj-$(CONFIG_IFB) += ifb.o
++obj-$(CONFIG_IMQ) += imq.o
+ obj-$(CONFIG_MACVLAN) += macvlan.o
+ obj-$(CONFIG_MACVTAP) += macvtap.o
+ obj-$(CONFIG_MII) += mii.o
+diff --git a/drivers/net/imq.c b/drivers/net/imq.c
+new file mode 100644
+index 0000000..801bc8c
+--- /dev/null
++++ b/drivers/net/imq.c
+@@ -0,0 +1,1012 @@
++/*
++ * Pseudo-driver for the intermediate queue device.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * Authors: Patrick McHardy, <kaber@trash.net>
++ *
++ * The first version was written by Martin Devera, <devik@cdi.cz>
++ *
++ * Credits: Jan Rafaj <imq2t@cedric.vabo.cz>
++ * - Update patch to 2.4.21
++ * Sebastian Strollo <sstrollo@nortelnetworks.com>
++ * - Fix "Dead-loop on netdevice imq"-issue
++ * Marcel Sebek <sebek64@post.cz>
++ * - Update to 2.6.2-rc1
++ *
++ * After some time of inactivity there is a group taking care
++ * of IMQ again: http://www.linuximq.net
++ *
++ *
++ * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7
++ * including the following changes:
++ *
++ * - Correction of ipv6 support "+"s issue (Hasso Tepper)
++ * - Correction of imq_init_devs() issue that resulted in
++ * kernel OOPS unloading IMQ as module (Norbert Buchmuller)
++ * - Addition of functionality to choose number of IMQ devices
++ * during kernel config (Andre Correa)
++ * - Addition of functionality to choose how IMQ hooks on
++ * PRE and POSTROUTING (after or before NAT) (Andre Correa)
++ * - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
++ *
++ *
++ * 2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
++ * released with almost no problems. 2.6.14-x was released
++ * with some important changes: nfcache was removed; After
++ * some weeks of trouble we figured out that some IMQ fields
++ * in skb were missing in skbuff.c - skb_clone and copy_skb_header.
++ * These functions are correctly patched by this new patch version.
++ *
++ * Thanks for all who helped to figure out all the problems with
++ * 2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
++ * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
++ * I didn't forget anybody). I apologize again for my lack of time.
++ *
++ *
++ * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead
++ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid
++ * recursive locking. New initialization routines to fix 'rmmod' not
++ * working anymore. Used code from ifb.c. (Jussi Kivilinna)
++ *
++ * 2008/08/06 - 2.6.26 - (JK)
++ * - Replaced tasklet with 'netif_schedule()'.
++ * - Cleaned up and added comments for imq_nf_queue().
++ *
++ * 2009/04/12
++ * - Add skb_save_cb/skb_restore_cb helper functions for backuping
++ * control buffer. This is needed because qdisc-layer on kernels
++ * 2.6.27 and newer overwrite control buffer. (Jussi Kivilinna)
++ * - Add better locking for IMQ device. Hopefully this will solve
++ * SMP issues. (Jussi Kivilinna)
++ * - Port to 2.6.27
++ * - Port to 2.6.28
++ * - Port to 2.6.29 + fix rmmod not working
++ *
++ * 2009/04/20 - (Jussi Kivilinna)
++ * - Use netdevice feature flags to avoid extra packet handling
++ * by core networking layer and possibly increase performance.
++ *
++ * 2009/09/26 - (Jussi Kivilinna)
++ * - Add imq_nf_reinject_lockless to fix deadlock with
++ * imq_nf_queue/imq_nf_reinject.
++ *
++ * 2009/12/08 - (Jussi Kivilinna)
++ * - Port to 2.6.32
++ * - Add check for skb->nf_queue_entry==NULL in imq_dev_xmit()
++ * - Also add better error checking for skb->nf_queue_entry usage
++ *
++ * 2010/02/25 - (Jussi Kivilinna)
++ * - Port to 2.6.33
++ *
++ * 2010/08/15 - (Jussi Kivilinna)
++ * - Port to 2.6.35
++ * - Simplify hook registration by using nf_register_hooks.
++ * - nf_reinject doesn't need spinlock around it, therefore remove
++ * imq_nf_reinject function. Other nf_reinject users protect
++ * their own data with spinlock. With IMQ however all data is
++ * needed is stored per skbuff, so no locking is needed.
++ * - Changed IMQ to use 'separate' NF_IMQ_QUEUE instead of
++ * NF_QUEUE, this allows working coexistance of IMQ and other
++ * NF_QUEUE users.
++ * - Make IMQ multi-queue. Number of IMQ device queues can be
++ * increased with 'numqueues' module parameters. Default number
++ * of queues is 1, in other words by default IMQ works as
++ * single-queue device. Multi-queue selection is based on
++ * IFB multi-queue patch by Changli Gao <xiaosuo@gmail.com>.
++ *
++ * 2011/03/18 - (Jussi Kivilinna)
++ * - Port to 2.6.38
++ *
++ * 2011/07/12 - (syoder89@gmail.com)
++ * - Crash fix that happens when the receiving interface has more
++ * than one queue (add missing skb_set_queue_mapping in
++ * imq_select_queue).
++ *
++ * 2011/07/26 - (Jussi Kivilinna)
++ * - Add queue mapping checks for packets exiting IMQ.
++ * - Port to 3.0
++ *
++ * 2011/08/16 - (Jussi Kivilinna)
++ * - Clear IFF_TX_SKB_SHARING flag that was added for linux 3.0.2
++ *
++ * 2011/11/03 - Germano Michel <germanomichel@gmail.com>
++ * - Fix IMQ for net namespaces
++ *
++ * 2011/11/04 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
++ * - Port to 3.1
++ * - Clean-up, move 'get imq device pointer by imqX name' to
++ * separate function from imq_nf_queue().
++ *
++ * 2012/01/05 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
++ * - Port to 3.2
++ *
++ * 2012/03/19 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
++ * - Port to 3.3
++ *
++ * 2012/12/12 - Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
++ * - Port to 3.7
++ * - Fix checkpatch.pl warnings
++ *
++ * 2013/09/10 - Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ * - Fixed GSO handling for 3.10, see imq_nf_queue() for comments.
++ * - Don't copy skb->cb_next when copying or cloning skbuffs.
++ *
++ * 2013/09/16 - Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ * - Port to 3.11
++ *
++ * 2013/11/12 - Jussi Kivilinna <jussi.kivilinna@iki.fi>
++ * - Port to 3.12
++ *
++ * Also, many thanks to pablo Sebastian Greco for making the initial
++ * patch and to those who helped the testing.
++ *
++ * More info at: http://www.linuximq.net/ (Andre Correa)
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/moduleparam.h>
++#include <linux/list.h>
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/rtnetlink.h>
++#include <linux/if_arp.h>
++#include <linux/netfilter.h>
++#include <linux/netfilter_ipv4.h>
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++ #include <linux/netfilter_ipv6.h>
++#endif
++#include <linux/imq.h>
++#include <net/pkt_sched.h>
++#include <net/netfilter/nf_queue.h>
++#include <net/sock.h>
++#include <linux/ip.h>
++#include <linux/ipv6.h>
++#include <linux/if_vlan.h>
++#include <linux/if_pppox.h>
++#include <net/ip.h>
++#include <net/ipv6.h>
++
++static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num);
++
++static unsigned int imq_nf_hook(const struct nf_hook_ops *ops,
++ struct sk_buff *pskb,
++ const struct net_device *indev,
++ const struct net_device *outdev,
++ int (*okfn)(struct sk_buff *));
++
++static struct nf_hook_ops imq_ops[] = {
++ {
++ /* imq_ingress_ipv4 */
++ .hook = imq_nf_hook,
++ .owner = THIS_MODULE,
++ .pf = PF_INET,
++ .hooknum = NF_INET_PRE_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++ .priority = NF_IP_PRI_MANGLE + 1,
++#else
++ .priority = NF_IP_PRI_NAT_DST + 1,
++#endif
++ },
++ {
++ /* imq_egress_ipv4 */
++ .hook = imq_nf_hook,
++ .owner = THIS_MODULE,
++ .pf = PF_INET,
++ .hooknum = NF_INET_POST_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
++ .priority = NF_IP_PRI_LAST,
++#else
++ .priority = NF_IP_PRI_NAT_SRC - 1,
++#endif
++ },
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++ {
++ /* imq_ingress_ipv6 */
++ .hook = imq_nf_hook,
++ .owner = THIS_MODULE,
++ .pf = PF_INET6,
++ .hooknum = NF_INET_PRE_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++ .priority = NF_IP6_PRI_MANGLE + 1,
++#else
++ .priority = NF_IP6_PRI_NAT_DST + 1,
++#endif
++ },
++ {
++ /* imq_egress_ipv6 */
++ .hook = imq_nf_hook,
++ .owner = THIS_MODULE,
++ .pf = PF_INET6,
++ .hooknum = NF_INET_POST_ROUTING,
++#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
++ .priority = NF_IP6_PRI_LAST,
++#else
++ .priority = NF_IP6_PRI_NAT_SRC - 1,
++#endif
++ },
++#endif
++};
++
++#if defined(CONFIG_IMQ_NUM_DEVS)
++static int numdevs = CONFIG_IMQ_NUM_DEVS;
++#else
++static int numdevs = IMQ_MAX_DEVS;
++#endif
++
++static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
++
++#define IMQ_MAX_QUEUES 32
++static int numqueues = 1;
++static u32 imq_hashrnd;
++
++static inline __be16 pppoe_proto(const struct sk_buff *skb)
++{
++ return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
++ sizeof(struct pppoe_hdr)));
++}
++
++static u16 imq_hash(struct net_device *dev, struct sk_buff *skb)
++{
++ unsigned int pull_len;
++ u16 protocol = skb->protocol;
++ u32 addr1, addr2;
++ u32 hash, ihl = 0;
++ union {
++ u16 in16[2];
++ u32 in32;
++ } ports;
++ u8 ip_proto;
++
++ pull_len = 0;
++
++recheck:
++ switch (protocol) {
++ case htons(ETH_P_8021Q): {
++ if (unlikely(skb_pull(skb, VLAN_HLEN) == NULL))
++ goto other;
++
++ pull_len += VLAN_HLEN;
++ skb->network_header += VLAN_HLEN;
++
++ protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
++ goto recheck;
++ }
++
++ case htons(ETH_P_PPP_SES): {
++ if (unlikely(skb_pull(skb, PPPOE_SES_HLEN) == NULL))
++ goto other;
++
++ pull_len += PPPOE_SES_HLEN;
++ skb->network_header += PPPOE_SES_HLEN;
++
++ protocol = pppoe_proto(skb);
++ goto recheck;
++ }
++
++ case htons(ETH_P_IP): {
++ const struct iphdr *iph = ip_hdr(skb);
++
++ if (unlikely(!pskb_may_pull(skb, sizeof(struct iphdr))))
++ goto other;
++
++ addr1 = iph->daddr;
++ addr2 = iph->saddr;
++
++ ip_proto = !(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ?
++ iph->protocol : 0;
++ ihl = ip_hdrlen(skb);
++
++ break;
++ }
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++ case htons(ETH_P_IPV6): {
++ const struct ipv6hdr *iph = ipv6_hdr(skb);
++ __be16 fo = 0;
++
++ if (unlikely(!pskb_may_pull(skb, sizeof(struct ipv6hdr))))
++ goto other;
++
++ addr1 = iph->daddr.s6_addr32[3];
++ addr2 = iph->saddr.s6_addr32[3];
++ ihl = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &ip_proto,
++ &fo);
++ if (unlikely(ihl < 0))
++ goto other;
++
++ break;
++ }
++#endif
++ default:
++other:
++ if (pull_len != 0) {
++ skb_push(skb, pull_len);
++ skb->network_header -= pull_len;
++ }
++
++ return (u16)(ntohs(protocol) % dev->real_num_tx_queues);
++ }
++
++ if (addr1 > addr2)
++ swap(addr1, addr2);
++
++ switch (ip_proto) {
++ case IPPROTO_TCP:
++ case IPPROTO_UDP:
++ case IPPROTO_DCCP:
++ case IPPROTO_ESP:
++ case IPPROTO_AH:
++ case IPPROTO_SCTP:
++ case IPPROTO_UDPLITE: {
++ if (likely(skb_copy_bits(skb, ihl, &ports.in32, 4) >= 0)) {
++ if (ports.in16[0] > ports.in16[1])
++ swap(ports.in16[0], ports.in16[1]);
++ break;
++ }
++ /* fall-through */
++ }
++ default:
++ ports.in32 = 0;
++ break;
++ }
++
++ if (pull_len != 0) {
++ skb_push(skb, pull_len);
++ skb->network_header -= pull_len;
++ }
++
++ hash = jhash_3words(addr1, addr2, ports.in32, imq_hashrnd ^ ip_proto);
++
++ return (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
++}
++
++static inline bool sk_tx_queue_recorded(struct sock *sk)
++{
++ return (sk_tx_queue_get(sk) >= 0);
++}
++
++static struct netdev_queue *imq_select_queue(struct net_device *dev,
++ struct sk_buff *skb)
++{
++ u16 queue_index = 0;
++ u32 hash;
++
++ if (likely(dev->real_num_tx_queues == 1))
++ goto out;
++
++ /* IMQ can be receiving ingress or engress packets. */
++
++ /* Check first for if rx_queue is set */
++ if (skb_rx_queue_recorded(skb)) {
++ queue_index = skb_get_rx_queue(skb);
++ goto out;
++ }
++
++ /* Check if socket has tx_queue set */
++ if (sk_tx_queue_recorded(skb->sk)) {
++ queue_index = sk_tx_queue_get(skb->sk);
++ goto out;
++ }
++
++ /* Try use socket hash */
++ if (skb->sk && skb->sk->sk_hash) {
++ hash = skb->sk->sk_hash;
++ queue_index =
++ (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
++ goto out;
++ }
++
++ /* Generate hash from packet data */
++ queue_index = imq_hash(dev, skb);
++
++out:
++ if (unlikely(queue_index >= dev->real_num_tx_queues))
++ queue_index = (u16)((u32)queue_index % dev->real_num_tx_queues);
++
++ skb_set_queue_mapping(skb, queue_index);
++ return netdev_get_tx_queue(dev, queue_index);
++}
++
++static struct net_device_stats *imq_get_stats(struct net_device *dev)
++{
++ return &dev->stats;
++}
++
++/* called for packets kfree'd in qdiscs at places other than enqueue */
++static void imq_skb_destructor(struct sk_buff *skb)
++{
++ struct nf_queue_entry *entry = skb->nf_queue_entry;
++
++ skb->nf_queue_entry = NULL;
++
++ if (entry) {
++ nf_queue_entry_release_refs(entry);
++ kfree(entry);
++ }
++
++ skb_restore_cb(skb); /* kfree backup */
++}
++
++static void imq_done_check_queue_mapping(struct sk_buff *skb,
++ struct net_device *dev)
++{
++ unsigned int queue_index;
++
++ /* Don't let queue_mapping be left too large after exiting IMQ */
++ if (likely(skb->dev != dev && skb->dev != NULL)) {
++ queue_index = skb_get_queue_mapping(skb);
++ if (unlikely(queue_index >= skb->dev->real_num_tx_queues)) {
++ queue_index = (u16)((u32)queue_index %
++ skb->dev->real_num_tx_queues);
++ skb_set_queue_mapping(skb, queue_index);
++ }
++ } else {
++ /* skb->dev was IMQ device itself or NULL, be on safe side and
++ * just clear queue mapping.
++ */
++ skb_set_queue_mapping(skb, 0);
++ }
++}
++
++static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct nf_queue_entry *entry = skb->nf_queue_entry;
++
++ skb->nf_queue_entry = NULL;
++ dev->trans_start = jiffies;
++
++ dev->stats.tx_bytes += skb->len;
++ dev->stats.tx_packets++;
++
++ if (unlikely(entry == NULL)) {
++ /* We don't know what is going on here.. packet is queued for
++ * imq device, but (probably) not by us.
++ *
++ * If this packet was not send here by imq_nf_queue(), then
++ * skb_save_cb() was not used and skb_free() should not show:
++ * WARNING: IMQ: kfree_skb: skb->cb_next:..
++ * and/or
++ * WARNING: IMQ: kfree_skb: skb->nf_queue_entry...
++ *
++ * However if this message is shown, then IMQ is somehow broken
++ * and you should report this to linuximq.net.
++ */
++
++ /* imq_dev_xmit is black hole that eats all packets, report that
++ * we eat this packet happily and increase dropped counters.
++ */
++
++ dev->stats.tx_dropped++;
++ dev_kfree_skb(skb);
++
++ return NETDEV_TX_OK;
++ }
++
++ skb_restore_cb(skb); /* restore skb->cb */
++
++ skb->imq_flags = 0;
++ skb->destructor = NULL;
++
++ imq_done_check_queue_mapping(skb, dev);
++
++ nf_reinject(entry, NF_ACCEPT);
++
++ return NETDEV_TX_OK;
++}
++
++static struct net_device *get_imq_device_by_index(int index)
++{
++ struct net_device *dev = NULL;
++ struct net *net;
++ char buf[8];
++
++ /* get device by name and cache result */
++ snprintf(buf, sizeof(buf), "imq%d", index);
++
++ /* Search device from all namespaces. */
++ for_each_net(net) {
++ dev = dev_get_by_name(net, buf);
++ if (dev)
++ break;
++ }
++
++ if (WARN_ON_ONCE(dev == NULL)) {
++ /* IMQ device not found. Exotic config? */
++ return ERR_PTR(-ENODEV);
++ }
++
++ imq_devs_cache[index] = dev;
++ dev_put(dev);
++
++ return dev;
++}
++
++static struct nf_queue_entry *nf_queue_entry_dup(struct nf_queue_entry *e)
++{
++ struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
++ if (entry) {
++ if (nf_queue_entry_get_refs(entry))
++ return entry;
++ kfree(entry);
++ }
++ return NULL;
++}
++
++#ifdef CONFIG_BRIDGE_NETFILTER
++/* When called from bridge netfilter, skb->data must point to MAC header
++ * before calling skb_gso_segment(). Else, original MAC header is lost
++ * and segmented skbs will be sent to wrong destination.
++ */
++static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
++{
++ if (skb->nf_bridge)
++ __skb_push(skb, skb->network_header - skb->mac_header);
++}
++
++static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
++{
++ if (skb->nf_bridge)
++ __skb_pull(skb, skb->network_header - skb->mac_header);
++}
++#else
++#define nf_bridge_adjust_skb_data(s) do {} while (0)
++#define nf_bridge_adjust_segmented_data(s) do {} while (0)
++#endif
++
++static void free_entry(struct nf_queue_entry *entry)
++{
++ nf_queue_entry_release_refs(entry);
++ kfree(entry);
++}
++
++static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev);
++
++static int __imq_nf_queue_gso(struct nf_queue_entry *entry,
++ struct net_device *dev, struct sk_buff *skb)
++{
++ int ret = -ENOMEM;
++ struct nf_queue_entry *entry_seg;
++
++ nf_bridge_adjust_segmented_data(skb);
++
++ if (skb->next == NULL) { /* last packet, no need to copy entry */
++ struct sk_buff *gso_skb = entry->skb;
++ entry->skb = skb;
++ ret = __imq_nf_queue(entry, dev);
++ if (ret)
++ entry->skb = gso_skb;
++ return ret;
++ }
++
++ skb->next = NULL;
++
++ entry_seg = nf_queue_entry_dup(entry);
++ if (entry_seg) {
++ entry_seg->skb = skb;
++ ret = __imq_nf_queue(entry_seg, dev);
++ if (ret)
++ free_entry(entry_seg);
++ }
++ return ret;
++}
++
++static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
++{
++ struct sk_buff *skb, *segs;
++ struct net_device *dev;
++ unsigned int queued;
++ int index, retval, err;
++
++ index = entry->skb->imq_flags & IMQ_F_IFMASK;
++ if (unlikely(index > numdevs - 1)) {
++ if (net_ratelimit())
++ pr_warn("IMQ: invalid device specified, highest is %u\n",
++ numdevs - 1);
++ retval = -EINVAL;
++ goto out_no_dev;
++ }
++
++ /* check for imq device by index from cache */
++ dev = imq_devs_cache[index];
++ if (unlikely(!dev)) {
++ dev = get_imq_device_by_index(index);
++ if (IS_ERR(dev)) {
++ retval = PTR_ERR(dev);
++ goto out_no_dev;
++ }
++ }
++
++ if (unlikely(!(dev->flags & IFF_UP))) {
++ entry->skb->imq_flags = 0;
++ retval = -ECANCELED;
++ goto out_no_dev;
++ }
++
++ if (!skb_is_gso(entry->skb))
++ return __imq_nf_queue(entry, dev);
++
++ /* Since 3.10.x, GSO handling moved here as result of upstream commit
++ * a5fedd43d5f6c94c71053a66e4c3d2e35f1731a2 (netfilter: move
++ * skb_gso_segment into nfnetlink_queue module).
++ *
++ * Following code replicates the gso handling from
++ * 'net/netfilter/nfnetlink_queue_core.c':nfqnl_enqueue_packet().
++ */
++
++ skb = entry->skb;
++
++ switch (entry->pf) {
++ case NFPROTO_IPV4:
++ skb->protocol = htons(ETH_P_IP);
++ break;
++ case NFPROTO_IPV6:
++ skb->protocol = htons(ETH_P_IPV6);
++ break;
++ }
++
++ nf_bridge_adjust_skb_data(skb);
++ segs = skb_gso_segment(skb, 0);
++ /* Does not use PTR_ERR to limit the number of error codes that can be
++ * returned by nf_queue. For instance, callers rely on -ECANCELED to
++ * mean 'ignore this hook'.
++ */
++ err = -ENOBUFS;
++ if (IS_ERR(segs))
++ goto out_err;
++ queued = 0;
++ err = 0;
++ do {
++ struct sk_buff *nskb = segs->next;
++ if (nskb && nskb->next)
++ nskb->cb_next = NULL;
++ if (err == 0)
++ err = __imq_nf_queue_gso(entry, dev, segs);
++ if (err == 0)
++ queued++;
++ else
++ kfree_skb(segs);
++ segs = nskb;
++ } while (segs);
++
++ if (queued) {
++ if (err) /* some segments are already queued */
++ free_entry(entry);
++ kfree_skb(skb);
++ return 0;
++ }
++
++out_err:
++ nf_bridge_adjust_segmented_data(skb);
++ retval = err;
++out_no_dev:
++ return retval;
++}
++
++static int __imq_nf_queue(struct nf_queue_entry *entry, struct net_device *dev)
++{
++ struct sk_buff *skb_orig, *skb, *skb_shared;
++ struct Qdisc *q;
++ struct netdev_queue *txq;
++ spinlock_t *root_lock;
++ int users;
++ int retval = -EINVAL;
++ unsigned int orig_queue_index;
++
++ dev->last_rx = jiffies;
++
++ skb = entry->skb;
++ skb_orig = NULL;
++
++ /* skb has owner? => make clone */
++ if (unlikely(skb->destructor)) {
++ skb_orig = skb;
++ skb = skb_clone(skb, GFP_ATOMIC);
++ if (unlikely(!skb)) {
++ retval = -ENOMEM;
++ goto out;
++ }
++ skb->cb_next = NULL;
++ entry->skb = skb;
++ }
++
++ skb->nf_queue_entry = entry;
++
++ dev->stats.rx_bytes += skb->len;
++ dev->stats.rx_packets++;
++
++ if (!skb->dev) {
++ /* skb->dev == NULL causes problems, try the find cause. */
++ if (net_ratelimit()) {
++ dev_warn(&dev->dev,
++ "received packet with skb->dev == NULL\n");
++ dump_stack();
++ }
++
++ skb->dev = dev;
++ }
++
++ /* Disables softirqs for lock below */
++ rcu_read_lock_bh();
++
++ /* Multi-queue selection */
++ orig_queue_index = skb_get_queue_mapping(skb);
++ txq = imq_select_queue(dev, skb);
++
++ q = rcu_dereference(txq->qdisc);
++ if (unlikely(!q->enqueue))
++ goto packet_not_eaten_by_imq_dev;
++
++ root_lock = qdisc_lock(q);
++ spin_lock(root_lock);
++
++ users = atomic_read(&skb->users);
++
++ skb_shared = skb_get(skb); /* increase reference count by one */
++
++ /* backup skb->cb, as qdisc layer will overwrite it */
++ skb_save_cb(skb_shared);
++ qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
++
++ if (likely(atomic_read(&skb_shared->users) == users + 1)) {
++ kfree_skb(skb_shared); /* decrease reference count by one */
++
++ skb->destructor = &imq_skb_destructor;
++
++ /* cloned? */
++ if (unlikely(skb_orig))
++ kfree_skb(skb_orig); /* free original */
++
++ spin_unlock(root_lock);
++ rcu_read_unlock_bh();
++
++ /* schedule qdisc dequeue */
++ __netif_schedule(q);
++
++ retval = 0;
++ goto out;
++ } else {
++ skb_restore_cb(skb_shared); /* restore skb->cb */
++ skb->nf_queue_entry = NULL;
++ /*
++ * qdisc dropped packet and decreased skb reference count of
++ * skb, so we don't really want to and try refree as that would
++ * actually destroy the skb.
++ */
++ spin_unlock(root_lock);
++ goto packet_not_eaten_by_imq_dev;
++ }
++
++packet_not_eaten_by_imq_dev:
++ skb_set_queue_mapping(skb, orig_queue_index);
++ rcu_read_unlock_bh();
++
++ /* cloned? restore original */
++ if (unlikely(skb_orig)) {
++ kfree_skb(skb);
++ entry->skb = skb_orig;
++ }
++ retval = -1;
++out:
++ return retval;
++}
++
++static unsigned int imq_nf_hook(const struct nf_hook_ops *ops,
++ struct sk_buff *pskb,
++ const struct net_device *indev,
++ const struct net_device *outdev,
++ int (*okfn)(struct sk_buff *))
++{
++ return (pskb->imq_flags & IMQ_F_ENQUEUE) ? NF_IMQ_QUEUE : NF_ACCEPT;
++}
++
++static int imq_close(struct net_device *dev)
++{
++ netif_stop_queue(dev);
++ return 0;
++}
++
++static int imq_open(struct net_device *dev)
++{
++ netif_start_queue(dev);
++ return 0;
++}
++
++static const struct net_device_ops imq_netdev_ops = {
++ .ndo_open = imq_open,
++ .ndo_stop = imq_close,
++ .ndo_start_xmit = imq_dev_xmit,
++ .ndo_get_stats = imq_get_stats,
++};
++
++static void imq_setup(struct net_device *dev)
++{
++ dev->netdev_ops = &imq_netdev_ops;
++ dev->type = ARPHRD_VOID;
++ dev->mtu = 16000; /* too small? */
++ dev->tx_queue_len = 11000; /* too big? */
++ dev->flags = IFF_NOARP;
++ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
++ NETIF_F_GSO | NETIF_F_HW_CSUM |
++ NETIF_F_HIGHDMA;
++ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE |
++ IFF_TX_SKB_SHARING);
++}
++
++static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
++{
++ int ret = 0;
++
++ if (tb[IFLA_ADDRESS]) {
++ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
++ ret = -EINVAL;
++ goto end;
++ }
++ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
++ ret = -EADDRNOTAVAIL;
++ goto end;
++ }
++ }
++ return 0;
++end:
++ pr_warn("IMQ: imq_validate failed (%d)\n", ret);
++ return ret;
++}
++
++static struct rtnl_link_ops imq_link_ops __read_mostly = {
++ .kind = "imq",
++ .priv_size = 0,
++ .setup = imq_setup,
++ .validate = imq_validate,
++};
++
++static const struct nf_queue_handler imq_nfqh = {
++ .outfn = imq_nf_queue,
++};
++
++static int __init imq_init_hooks(void)
++{
++ int ret;
++
++ nf_register_queue_imq_handler(&imq_nfqh);
++
++ ret = nf_register_hooks(imq_ops, ARRAY_SIZE(imq_ops));
++ if (ret < 0)
++ nf_unregister_queue_imq_handler();
++
++ return ret;
++}
++
++static int __init imq_init_one(int index)
++{
++ struct net_device *dev;
++ int ret;
++
++ dev = alloc_netdev_mq(0, "imq%d", NET_NAME_UNKNOWN, imq_setup, numqueues);
++ if (!dev)
++ return -ENOMEM;
++
++ ret = dev_alloc_name(dev, dev->name);
++ if (ret < 0)
++ goto fail;
++
++ dev->rtnl_link_ops = &imq_link_ops;
++ ret = register_netdevice(dev);
++ if (ret < 0)
++ goto fail;
++
++ return 0;
++fail:
++ free_netdev(dev);
++ return ret;
++}
++
++static int __init imq_init_devs(void)
++{
++ int err, i;
++
++ if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
++ pr_err("IMQ: numdevs has to be betweed 1 and %u\n",
++ IMQ_MAX_DEVS);
++ return -EINVAL;
++ }
++
++ if (numqueues < 1 || numqueues > IMQ_MAX_QUEUES) {
++ pr_err("IMQ: numqueues has to be betweed 1 and %u\n",
++ IMQ_MAX_QUEUES);
++ return -EINVAL;
++ }
++
++ get_random_bytes(&imq_hashrnd, sizeof(imq_hashrnd));
++
++ rtnl_lock();
++ err = __rtnl_link_register(&imq_link_ops);
++
++ for (i = 0; i < numdevs && !err; i++)
++ err = imq_init_one(i);
++
++ if (err) {
++ __rtnl_link_unregister(&imq_link_ops);
++ memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
++ }
++ rtnl_unlock();
++
++ return err;
++}
++
++static int __init imq_init_module(void)
++{
++ int err;
++
++#if defined(CONFIG_IMQ_NUM_DEVS)
++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
++#endif
++
++ err = imq_init_devs();
++ if (err) {
++ pr_err("IMQ: Error trying imq_init_devs(net)\n");
++ return err;
++ }
++
++ err = imq_init_hooks();
++ if (err) {
++ pr_err(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
++ rtnl_link_unregister(&imq_link_ops);
++ memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
++ return err;
++ }
++
++ pr_info("IMQ driver loaded successfully. (numdevs = %d, numqueues = %d)\n",
++ numdevs, numqueues);
++
++#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++ pr_info("\tHooking IMQ before NAT on PREROUTING.\n");
++#else
++ pr_info("\tHooking IMQ after NAT on PREROUTING.\n");
++#endif
++#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
++ pr_info("\tHooking IMQ before NAT on POSTROUTING.\n");
++#else
++ pr_info("\tHooking IMQ after NAT on POSTROUTING.\n");
++#endif
++
++ return 0;
++}
++
++static void __exit imq_unhook(void)
++{
++ nf_unregister_hooks(imq_ops, ARRAY_SIZE(imq_ops));
++ nf_unregister_queue_imq_handler();
++}
++
++static void __exit imq_cleanup_devs(void)
++{
++ rtnl_link_unregister(&imq_link_ops);
++ memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
++}
++
++static void __exit imq_exit_module(void)
++{
++ imq_unhook();
++ imq_cleanup_devs();
++ pr_info("IMQ driver unloaded successfully.\n");
++}
++
++module_init(imq_init_module);
++module_exit(imq_exit_module);
++
++module_param(numdevs, int, 0);
++module_param(numqueues, int, 0);
++MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
++MODULE_PARM_DESC(numqueues, "number of queues per IMQ device");
++MODULE_AUTHOR("http://www.linuximq.net");
++MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS_RTNL_LINK("imq");
++
+diff --git a/include/linux/imq.h b/include/linux/imq.h
+new file mode 100644
+index 0000000..1babb09
+--- /dev/null
++++ b/include/linux/imq.h
+@@ -0,0 +1,13 @@
++#ifndef _IMQ_H
++#define _IMQ_H
++
++/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
++#define IMQ_F_BITS 5
++
++#define IMQ_F_IFMASK 0x0f
++#define IMQ_F_ENQUEUE 0x10
++
++#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1)
++
++#endif /* _IMQ_H */
++
+diff --git a/include/linux/netfilter/xt_IMQ.h b/include/linux/netfilter/xt_IMQ.h
+new file mode 100644
+index 0000000..9b07230
+--- /dev/null
++++ b/include/linux/netfilter/xt_IMQ.h
+@@ -0,0 +1,9 @@
++#ifndef _XT_IMQ_H
++#define _XT_IMQ_H
++
++struct xt_imq_info {
++ unsigned int todev; /* target imq device */
++};
++
++#endif /* _XT_IMQ_H */
++
+diff --git a/include/linux/netfilter_ipv4/ipt_IMQ.h b/include/linux/netfilter_ipv4/ipt_IMQ.h
+new file mode 100644
+index 0000000..7af320f
+--- /dev/null
++++ b/include/linux/netfilter_ipv4/ipt_IMQ.h
+@@ -0,0 +1,10 @@
++#ifndef _IPT_IMQ_H
++#define _IPT_IMQ_H
++
++/* Backwards compatibility for old userspace */
++#include <linux/netfilter/xt_IMQ.h>
++
++#define ipt_imq_info xt_imq_info
++
++#endif /* _IPT_IMQ_H */
++
+diff --git a/include/linux/netfilter_ipv6/ip6t_IMQ.h b/include/linux/netfilter_ipv6/ip6t_IMQ.h
+new file mode 100644
+index 0000000..198ac01
+--- /dev/null
++++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
+@@ -0,0 +1,10 @@
++#ifndef _IP6T_IMQ_H
++#define _IP6T_IMQ_H
++
++/* Backwards compatibility for old userspace */
++#include <linux/netfilter/xt_IMQ.h>
++
++#define ip6t_imq_info xt_imq_info
++
++#endif /* _IP6T_IMQ_H */
++
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index f66f346..d699b19 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -33,6 +33,9 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/netdev_features.h>
+ #include <net/flow_keys.h>
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++#include <linux/imq.h>
++#endif
+
+ /* Don't change this without changing skb_csum_unnecessary! */
+ #define CHECKSUM_NONE 0
+@@ -418,6 +421,9 @@ struct sk_buff {
+ * first. This is owned by whoever has the skb queued ATM.
+ */
+ char cb[48] __aligned(8);
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ void *cb_next;
++#endif
+
+ unsigned long _skb_refdst;
+ #ifdef CONFIG_XFRM
+@@ -453,6 +459,9 @@ struct sk_buff {
+ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ struct nf_conntrack *nfct;
+ #endif
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ struct nf_queue_entry *nf_queue_entry;
++#endif
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ struct nf_bridge_info *nf_bridge;
+ #endif
+@@ -490,6 +490,9 @@ struct sk_buff {
+ __u16 tc_verd; /* traffic control verdict */
+ #endif
+ #endif
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ __u8 imq_flags:IMQ_F_BITS;
++#endif
+
+ union {
+ __wsum csum;
+@@ -625,6 +637,12 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
+ return (struct rtable *)skb_dst(skb);
+ }
+
++
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++extern int skb_save_cb(struct sk_buff *skb);
++extern int skb_restore_cb(struct sk_buff *skb);
++#endif
++
+ void kfree_skb(struct sk_buff *skb);
+ void kfree_skb_list(struct sk_buff *segs);
+ void skb_tx_error(struct sk_buff *skb);
+@@ -2435,6 +2453,10 @@ static inline void nf_reset(struct sk_buff *skb)
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = NULL;
+ #endif
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ skb->imq_flags = 0;
++ skb->nf_queue_entry = NULL;
++#endif
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ nf_bridge_put(skb->nf_bridge);
+ skb->nf_bridge = NULL;
+@@ -2635,6 +2653,10 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
+ if (copy)
+ dst->nfctinfo = src->nfctinfo;
+ #endif
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ dst->imq_flags = src->imq_flags;
++ dst->nf_queue_entry = src->nf_queue_entry;
++#endif
+ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ dst->nf_bridge = src->nf_bridge;
+ nf_bridge_get(src->nf_bridge);
+diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
+index aaba4bb..f6e92a4 100644
+--- a/include/net/netfilter/nf_queue.h
++++ b/include/net/netfilter/nf_queue.h
+@@ -29,6 +29,12 @@ struct nf_queue_handler {
+ void nf_register_queue_handler(const struct nf_queue_handler *qh);
+ void nf_unregister_queue_handler(void);
+ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
++extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
++
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
++extern void nf_unregister_queue_imq_handler(void);
++#endif
+
+ bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
+ void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
+diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
+index f7dc0eb..58c46a9 100644
+--- a/include/uapi/linux/netfilter.h
++++ b/include/uapi/linux/netfilter.h
+@@ -13,7 +13,8 @@
+ #define NF_QUEUE 3
+ #define NF_REPEAT 4
+ #define NF_STOP 5
+-#define NF_MAX_VERDICT NF_STOP
++#define NF_IMQ_QUEUE 6
++#define NF_MAX_VERDICT NF_IMQ_QUEUE
+
+ /* we overload the higher bits for encoding auxiliary data such as the queue
+ * number or errno values. Not nice, but better than additional function
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 3d13874..9842f21 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -131,6 +131,9 @@
+ #include <linux/if_macvlan.h>
+ #include <linux/errqueue.h>
+ #include <linux/hrtimer.h>
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++#include <linux/imq.h>
++#endif
+
+ #include "net-sysfs.h"
+
+@@ -2618,7 +2618,11 @@ static int xmit_one(struct sk_buff *skb,
+ unsigned int len;
+ int rc;
+
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ if ((!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) && !(skb->imq_flags & IMQ_F_ENQUEUE))
++#else
+ if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
++#endif
+ dev_queue_xmit_nit(skb, dev);
+
+ len = skb->len;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index c28c7fe..a5f1888 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -73,6 +73,84 @@
+
+ struct kmem_cache *skbuff_head_cache __read_mostly;
+ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
++
++/* Control buffer save/restore for IMQ devices */
++struct skb_cb_table {
++ char cb[48] __aligned(8);
++ void *cb_next;
++ atomic_t refcnt;
++};
++
++static DEFINE_SPINLOCK(skb_cb_store_lock);
++
++int skb_save_cb(struct sk_buff *skb)
++{
++ struct skb_cb_table *next;
++
++ next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
++ if (!next)
++ return -ENOMEM;
++
++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
++
++ memcpy(next->cb, skb->cb, sizeof(skb->cb));
++ next->cb_next = skb->cb_next;
++
++ atomic_set(&next->refcnt, 1);
++
++ skb->cb_next = next;
++ return 0;
++}
++EXPORT_SYMBOL(skb_save_cb);
++
++int skb_restore_cb(struct sk_buff *skb)
++{
++ struct skb_cb_table *next;
++
++ if (!skb->cb_next)
++ return 0;
++
++ next = skb->cb_next;
++
++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
++
++ memcpy(skb->cb, next->cb, sizeof(skb->cb));
++ skb->cb_next = next->cb_next;
++
++ spin_lock(&skb_cb_store_lock);
++
++ if (atomic_dec_and_test(&next->refcnt))
++ kmem_cache_free(skbuff_cb_store_cache, next);
++
++ spin_unlock(&skb_cb_store_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(skb_restore_cb);
++
++static void skb_copy_stored_cb(struct sk_buff *new, const struct sk_buff *__old)
++{
++ struct skb_cb_table *next;
++ struct sk_buff *old;
++
++ if (!__old->cb_next) {
++ new->cb_next = NULL;
++ return;
++ }
++
++ spin_lock(&skb_cb_store_lock);
++
++ old = (struct sk_buff *)__old;
++
++ next = old->cb_next;
++ atomic_inc(&next->refcnt);
++ new->cb_next = next;
++
++ spin_unlock(&skb_cb_store_lock);
++}
++#endif
+
+ /**
+ * skb_panic - private function for out-of-line support
+@@ -577,6 +656,28 @@ static void skb_release_head_state(struct sk_buff *skb)
+ WARN_ON(in_irq());
+ skb->destructor(skb);
+ }
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ /*
++ * This should not happen. When it does, avoid memleak by restoring
++ * the chain of cb-backups.
++ */
++ while (skb->cb_next != NULL) {
++ if (net_ratelimit())
++ pr_warn("IMQ: kfree_skb: skb->cb_next: %08x\n",
++ (unsigned int)skb->cb_next);
++
++ skb_restore_cb(skb);
++ }
++ /*
++ * This should not happen either, nf_queue_entry is nullified in
++ * imq_dev_xmit(). If we have non-NULL nf_queue_entry then we are
++ * leaking entry pointers, maybe memory. We don't know if this is
++ * pointer to already freed memory, or should this be freed.
++ * If this happens we need to add refcounting, etc for nf_queue_entry.
++ */
++ if (skb->nf_queue_entry && net_ratelimit())
++ pr_warn("%s\n", "IMQ: kfree_skb: skb->nf_queue_entry != NULL");
++#endif
+ #if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ nf_conntrack_put(skb->nfct);
+ #endif
+@@ -709,6 +810,10 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+ #ifdef CONFIG_XFRM
+ new->sp = secpath_get(old->sp);
+ #endif
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ new->cb_next = NULL;
++ /*skb_copy_stored_cb(new, old);*/
++#endif
+ __nf_copy(new, old, false);
+
+ /* Note : this field could be in headers_start/headers_end section
+@@ -3112,6 +3217,13 @@ void __init skb_init(void)
+ 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ NULL);
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
++ sizeof(struct skb_cb_table),
++ 0,
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC,
++ NULL);
++#endif
+ }
+
+ /**
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index b6fa35e..08dcfef 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -64,9 +64,6 @@ static int ip6_finish_output2(struct sk_buff *skb)
+ struct in6_addr *nexthop;
+ int ret;
+
+- skb->protocol = htons(ETH_P_IPV6);
+- skb->dev = dev;
+-
+ if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
+ struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+
+@@ -143,6 +140,13 @@ int ip6_output(struct sk_buff *skb)
+ return 0;
+ }
+
++ /*
++ * IMQ-patch: moved setting skb->dev and skb->protocol from
++ * ip6_finish_output2 to fix crashing at netif_skb_features().
++ */
++ skb->protocol = htons(ETH_P_IPV6);
++ skb->dev = dev;
++
+ return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
+ ip6_finish_output,
+ !(IP6CB(skb)->flags & IP6SKB_REROUTED));
+diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
+index 6e839b6..45ac31c 100644
+--- a/net/netfilter/Kconfig
++++ b/net/netfilter/Kconfig
+@@ -630,6 +630,18 @@ config NETFILTER_XT_TARGET_LOG
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config NETFILTER_XT_TARGET_IMQ
++ tristate '"IMQ" target support'
++ depends on NETFILTER_XTABLES
++ depends on IP_NF_MANGLE || IP6_NF_MANGLE
++ select IMQ
++ default m if NETFILTER_ADVANCED=n
++ help
++ This option adds a `IMQ' target which is used to specify if and
++ to which imq device packets should get enqueued/dequeued.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+ config NETFILTER_XT_TARGET_MARK
+ tristate '"MARK" target support'
+ depends on NETFILTER_ADVANCED
+diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
+index c3a0a12..9647f06 100644
+--- a/net/netfilter/Makefile
++++ b/net/netfilter/Makefile
+@@ -82,6 +82,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
++obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
+ obj-$(CONFIG_NETFILTER_XT_TARGET_NETMAP) += xt_NETMAP.o
+diff --git a/net/netfilter/core.c b/net/netfilter/core.c
+index 593b16e..740cd69 100644
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
+@@ -191,9 +191,11 @@ next_hook:
+ ret = NF_DROP_GETERR(verdict);
+ if (ret == 0)
+ ret = -EPERM;
+- } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
++ } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE ||
++ (verdict & NF_VERDICT_MASK) == NF_IMQ_QUEUE) {
+ int err = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
+- verdict >> NF_VERDICT_QBITS);
++ verdict >> NF_VERDICT_QBITS,
++ verdict & NF_VERDICT_MASK);
+ if (err < 0) {
+ if (err == -ECANCELED)
+ goto next_hook;
+diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
+index 3deec99..c1a1397 100644
+--- a/net/netfilter/nf_internals.h
++++ b/net/netfilter/nf_internals.h
+@@ -29,7 +29,7 @@ extern int nf_queue(struct sk_buff *skb,
+ int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, u_int8_t pf,
+ unsigned int hook, struct net_device *indev,
+ struct net_device *outdev, int (*okfn)(struct sk_buff *),
+- unsigned int queuenum);
++ unsigned int queuenum, unsigned int queuetype);
+ int __init netfilter_queue_init(void);
+
+ /* nf_log.c */
+diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
+index 5d24b1f..28317dc 100644
+--- a/net/netfilter/nf_queue.c
++++ b/net/netfilter/nf_queue.c
+@@ -27,6 +27,23 @@
+ */
+ static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
+
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++static const struct nf_queue_handler __rcu *queue_imq_handler __read_mostly;
++
++void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
++{
++ rcu_assign_pointer(queue_imq_handler, qh);
++}
++EXPORT_SYMBOL_GPL(nf_register_queue_imq_handler);
++
++void nf_unregister_queue_imq_handler(void)
++{
++ RCU_INIT_POINTER(queue_imq_handler, NULL);
++ synchronize_rcu();
++}
++EXPORT_SYMBOL_GPL(nf_unregister_queue_imq_handler);
++#endif
++
+ /* return EBUSY when somebody else is registered, return EEXIST if the
+ * same handler is registered, return 0 in case of success. */
+ void nf_register_queue_handler(const struct nf_queue_handler *qh)
+@@ -105,7 +122,8 @@ int nf_queue(struct sk_buff *skb,
+ struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct sk_buff *),
+- unsigned int queuenum)
++ unsigned int queuenum,
++ unsigned int queuetype)
+ {
+ int status = -ENOENT;
+ struct nf_queue_entry *entry = NULL;
+@@ -115,7 +133,17 @@ int nf_queue(struct sk_buff *skb,
+ /* QUEUE == DROP if no one is waiting, to be safe. */
+ rcu_read_lock();
+
+- qh = rcu_dereference(queue_handler);
++ if (queuetype == NF_IMQ_QUEUE) {
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
++ qh = rcu_dereference(queue_imq_handler);
++#else
++ BUG();
++ goto err_unlock;
++#endif
++ } else {
++ qh = rcu_dereference(queue_handler);
++ }
++
+ if (!qh) {
+ status = -ESRCH;
+ goto err_unlock;
+@@ -205,9 +233,11 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
+ local_bh_enable();
+ break;
+ case NF_QUEUE:
++ case NF_IMQ_QUEUE:
+ err = nf_queue(skb, elem, entry->pf, entry->hook,
+ entry->indev, entry->outdev, entry->okfn,
+- verdict >> NF_VERDICT_QBITS);
++ verdict >> NF_VERDICT_QBITS,
++ verdict & NF_VERDICT_MASK);
+ if (err < 0) {
+ if (err == -ECANCELED)
+ goto next_hook;
+diff --git a/net/netfilter/xt_IMQ.c b/net/netfilter/xt_IMQ.c
+new file mode 100644
+index 0000000..1c3cd66
+--- /dev/null
++++ b/net/netfilter/xt_IMQ.c
+@@ -0,0 +1,72 @@
++/*
++ * This target marks packets to be enqueued to an imq device
++ */
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/netfilter/x_tables.h>
++#include <linux/netfilter/xt_IMQ.h>
++#include <linux/imq.h>
++
++static unsigned int imq_target(struct sk_buff *pskb,
++ const struct xt_action_param *par)
++{
++ const struct xt_imq_info *mr = par->targinfo;
++
++ pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
++
++ return XT_CONTINUE;
++}
++
++static int imq_checkentry(const struct xt_tgchk_param *par)
++{
++ struct xt_imq_info *mr = par->targinfo;
++
++ if (mr->todev > IMQ_MAX_DEVS - 1) {
++ pr_warn("IMQ: invalid device specified, highest is %u\n",
++ IMQ_MAX_DEVS - 1);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static struct xt_target xt_imq_reg[] __read_mostly = {
++ {
++ .name = "IMQ",
++ .family = AF_INET,
++ .checkentry = imq_checkentry,
++ .target = imq_target,
++ .targetsize = sizeof(struct xt_imq_info),
++ .table = "mangle",
++ .me = THIS_MODULE
++ },
++ {
++ .name = "IMQ",
++ .family = AF_INET6,
++ .checkentry = imq_checkentry,
++ .target = imq_target,
++ .targetsize = sizeof(struct xt_imq_info),
++ .table = "mangle",
++ .me = THIS_MODULE
++ },
++};
++
++static int __init imq_init(void)
++{
++ return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
++}
++
++static void __exit imq_fini(void)
++{
++ xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
++}
++
++module_init(imq_init);
++module_exit(imq_fini);
++
++MODULE_AUTHOR("http://www.linuximq.net");
++MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("ipt_IMQ");
++MODULE_ALIAS("ip6t_IMQ");
++
Index: target/linux/generic/patches-4.0/linux_650-custom_netfilter_match_modules.patch
===================================================================
--- target/linux/generic/patches-4.0/linux_650-custom_netfilter_match_modules.patch (revision 0)
+++ target/linux/generic/patches-4.0/linux_650-custom_netfilter_match_modules.patch (working copy)
@@ -0,0 +1,9252 @@
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/ipt_bandwidth.c 2015-06-19 03:02:55.381669455 +0800
+@@ -0,0 +1,2501 @@
++/* bandwidth -- An iptables extension for bandwidth monitoring/control
++ * Can be used to efficiently monitor bandwidth and/or implement bandwidth quotas
++ * Can be queried using the iptbwctl userspace library
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2009-2011 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <asm/uaccess.h>
++
++#include <linux/time.h>
++
++#include <linux/semaphore.h>
++
++
++#include "bandwidth_deps/tree_map.h"
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_bandwidth.h>
++
++
++#include <linux/ip.h>
++#include <linux/netfilter/x_tables.h>
++
++
++/* #define BANDWIDTH_DEBUG 1 */
++
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Eric Bishop");
++MODULE_DESCRIPTION("Match bandwidth used, designed for use with Gargoyle web interface (www.gargoyle-router.com)");
++
++/*
++ * WARNING: accessing the sys_tz variable takes FOREVER, and kills performance
++ * keep a local variable that gets updated from the extern variable
++ */
++extern struct timezone sys_tz;
++static int local_minutes_west;
++static int local_seconds_west;
++static time_t last_local_mw_update;
++
++
++static spinlock_t bandwidth_lock = __SPIN_LOCK_UNLOCKED(bandwidth_lock);
++DEFINE_SEMAPHORE(userspace_lock);
++
++static string_map* id_map = NULL;
++
++
++typedef struct info_and_maps_struct
++{
++ struct ipt_bandwidth_info* info;
++ long_map* ip_map;
++ long_map* ip_history_map;
++}info_and_maps;
++
++typedef struct history_struct
++{
++ time_t first_start;
++ time_t first_end;
++ time_t last_end; /* also beginning of current time frame */
++ uint32_t max_nodes;
++ uint32_t num_nodes;
++ uint32_t non_zero_nodes;
++ uint32_t current_index;
++ uint64_t* history_data;
++} bw_history;
++
++
++
++static unsigned char set_in_progress = 0;
++static char set_id[BANDWIDTH_MAX_ID_LENGTH] = "";
++
++/*
++ * function prototypes
++ *
++ * (prototypes only provided for
++ * functions not part of iptables API)
++ *
++*/
++
++
++static void adjust_ip_for_backwards_time_shift(unsigned long key, void* value);
++static void adjust_id_for_backwards_time_shift(char* key, void* value);
++static void check_for_backwards_time_shift(time_t now);
++
++
++static void shift_timezone_of_ip(unsigned long key, void* value);
++static void shift_timezone_of_id(char* key, void* value);
++static void check_for_timezone_shift(time_t now, int already_locked);
++
++
++
++static bw_history* initialize_history(uint32_t max_nodes);
++static unsigned char update_history(bw_history* history, time_t interval_start, time_t interval_end, struct ipt_bandwidth_info* info);
++
++
++
++static void do_reset(unsigned long key, void* value);
++static void set_bandwidth_to_zero(unsigned long key, void* value);
++static void handle_interval_reset(info_and_maps* iam, time_t now);
++
++static uint64_t pow64(uint64_t base, uint64_t pow);
++static uint64_t get_bw_record_max(void); /* called by init to set global variable */
++
++static inline int is_leap(unsigned int y);
++static time_t get_next_reset_time(struct ipt_bandwidth_info *info, time_t now, time_t previous_reset);
++static time_t get_nominal_previous_reset_time(struct ipt_bandwidth_info *info, time_t current_next_reset);
++
++static uint64_t* initialize_map_entries_for_ip(info_and_maps* iam, unsigned long ip, uint64_t initial_bandwidth);
++
++
++
++
++static time_t backwards_check = 0;
++static time_t backwards_adjust_current_time = 0;
++static time_t backwards_adjust_info_previous_reset = 0;
++static time_t backwards_adjust_ips_zeroed = 0;
++static info_and_maps* backwards_adjust_iam = NULL;
++
++/*
++static char print_out_buf[25000];
++static void print_to_buf(char* outdat);
++static void reset_buf(void);
++static void do_print_buf(void);
++
++static void print_to_buf(char* outdat)
++{
++ int buf_len = strlen(print_out_buf);
++ sprintf(print_out_buf+buf_len, "\t%s\n", outdat);
++}
++static void reset_buf(void)
++{
++ print_out_buf[0] = '\n';
++ print_out_buf[1] = '\0';
++}
++static void do_print_buf(void)
++{
++ char* start = print_out_buf;
++ char* next = strchr(start, '\n');
++ while(next != NULL)
++ {
++ *next = '\0';
++ printk("%s\n", start);
++ start = next+1;
++ next = strchr(start, '\n');
++ }
++ printk("%s\n", start);
++
++ reset_buf();
++}
++*/
++
++static void adjust_ip_for_backwards_time_shift(unsigned long key, void* value)
++{
++ bw_history* old_history = (bw_history*)value;
++
++ if(old_history->num_nodes == 1)
++ {
++ if(backwards_adjust_info_previous_reset > backwards_adjust_current_time)
++ {
++ if(backwards_adjust_ips_zeroed == 0)
++ {
++ apply_to_every_long_map_value(backwards_adjust_iam->ip_map, set_bandwidth_to_zero);
++ backwards_adjust_iam->info->next_reset = get_next_reset_time(backwards_adjust_iam->info, backwards_adjust_current_time, backwards_adjust_current_time);
++ backwards_adjust_iam->info->previous_reset = backwards_adjust_current_time;
++ backwards_adjust_iam->info->current_bandwidth = 0;
++ backwards_adjust_ips_zeroed = 1;
++ }
++ }
++ return;
++ }
++ else if(old_history->last_end < backwards_adjust_current_time)
++ {
++ return;
++ }
++ else
++ {
++
++ /*
++ * reconstruct new history without newest nodes, to represent data as it was
++ * last time the current time was set to the interval to which we just jumped back
++ */
++ uint32_t next_old_index;
++ time_t old_next_start = old_history->first_start == 0 ? backwards_adjust_info_previous_reset : old_history->first_start; /* first time point in old history */
++ bw_history* new_history = initialize_history(old_history->max_nodes);
++ if(new_history == NULL)
++ {
++ printk("ipt_bandwidth: warning, kmalloc failure!\n");
++ return;
++ }
++
++
++
++ /* oldest index in old history -- we iterate forward through old history using this index */
++ next_old_index = old_history->num_nodes == old_history->max_nodes ? (old_history->current_index+1) % old_history->max_nodes : 0;
++
++
++ /* if first time point is after current time, just completely re-initialize history, otherwise set first time point to old first time point */
++ (new_history->history_data)[ new_history->current_index ] = old_next_start < backwards_adjust_current_time ? (old_history->history_data)[next_old_index] : 0;
++ backwards_adjust_iam->info->previous_reset = old_next_start < backwards_adjust_current_time ? old_next_start : backwards_adjust_current_time;
++
++
++ /* iterate through old history, rebuilding in new history*/
++ while( old_next_start < backwards_adjust_current_time )
++ {
++ time_t old_next_end = get_next_reset_time(backwards_adjust_iam->info, old_next_start, old_next_start); /* 2nd param = last reset, 3rd param = current time */
++ if( old_next_end < backwards_adjust_current_time)
++ {
++ update_history(new_history, old_next_start, old_next_end, backwards_adjust_iam->info);
++ next_old_index++;
++ (new_history->history_data)[ new_history->current_index ] = (old_history->history_data)[next_old_index];
++ }
++ backwards_adjust_iam->info->previous_reset = old_next_start; /*update previous_reset variable in bw_info as we iterate */
++ old_next_start = old_next_end;
++ }
++
++ /* update next_reset variable from previous_reset variable which we've already set */
++ backwards_adjust_iam->info->next_reset = get_next_reset_time(backwards_adjust_iam->info, backwards_adjust_iam->info->previous_reset, backwards_adjust_iam->info->previous_reset);
++
++
++
++ /* set old_history to be new_history */
++ kfree(old_history->history_data);
++ old_history->history_data = new_history->history_data;
++ old_history->first_start = new_history->first_start;
++ old_history->first_end = new_history->first_end;
++ old_history->last_end = new_history->last_end;
++ old_history->num_nodes = new_history->num_nodes;
++ old_history->non_zero_nodes = new_history->non_zero_nodes;
++ old_history->current_index = new_history->current_index;
++ set_long_map_element(backwards_adjust_iam->ip_map, key, (void*)(old_history->history_data + old_history->current_index) );
++ if(key == 0)
++ {
++ backwards_adjust_iam->info->combined_bw = (uint64_t*)(old_history->history_data + old_history->current_index);
++ }
++
++ /*
++ * free new history (which was just temporary)
++ * note that we don't need to free history_data from new_history
++ * we freed the history_data from old history, and set that to the history_data from new_history
++ * so, this cleanup has already been handled
++ */
++ kfree(new_history);
++
++ }
++}
++static void adjust_id_for_backwards_time_shift(char* key, void* value)
++{
++ info_and_maps* iam = (info_and_maps*)value;
++ if(iam == NULL)
++ {
++ return;
++ }
++ if(iam->info == NULL)
++ {
++ return;
++ }
++
++ backwards_adjust_iam = iam;
++ if( (iam->info->reset_is_constant_interval == 0 && iam->info->reset_interval == BANDWIDTH_NEVER) || iam->info->cmp == BANDWIDTH_CHECK )
++ {
++ return;
++ }
++ if(iam->ip_history_map != NULL)
++ {
++ backwards_adjust_info_previous_reset = iam->info->previous_reset;
++ backwards_adjust_ips_zeroed = 0;
++ apply_to_every_long_map_value(iam->ip_history_map, adjust_ip_for_backwards_time_shift);
++ }
++ else
++ {
++ time_t next_reset_after_adjustment = get_next_reset_time(iam->info, backwards_adjust_current_time, backwards_adjust_current_time);
++ if(next_reset_after_adjustment < iam->info->next_reset)
++ {
++ iam->info->previous_reset = backwards_adjust_current_time;
++ iam->info->next_reset = next_reset_after_adjustment;
++ }
++ }
++ backwards_adjust_iam = NULL;
++}
++static void check_for_backwards_time_shift(time_t now)
++{
++ spin_lock_bh(&bandwidth_lock);
++ if(now < backwards_check && backwards_check != 0)
++ {
++ printk("ipt_bandwidth: backwards time shift detected, adjusting\n");
++
++ /* adjust */
++ down(&userspace_lock);
++
++ /* This function is always called with absolute time, not time adjusted for timezone. Correct that before adjusting. */
++ backwards_adjust_current_time = now - local_seconds_west;
++ apply_to_every_string_map_value(id_map, adjust_id_for_backwards_time_shift);
++ up(&userspace_lock);
++ }
++ backwards_check = now;
++ spin_unlock_bh(&bandwidth_lock);
++}
++
++
++
++static int old_minutes_west;
++static time_t shift_timezone_current_time;
++static time_t shift_timezone_info_previous_reset;
++static info_and_maps* shift_timezone_iam = NULL;
++static void shift_timezone_of_ip(unsigned long key, void* value)
++{
++ #ifdef BANDWIDTH_DEBUG
++ unsigned long* ip = &key;
++ printk("shifting ip = %u.%u.%u.%u\n", *((char*)ip), *(((char*)ip)+1), *(((char*)ip)+2), *(((char*)ip)+3) );
++ #endif
++
++
++ bw_history* history = (bw_history*)value;
++ int32_t timezone_adj = (old_minutes_west-local_minutes_west)*60;
++ #ifdef BANDWIDTH_DEBUG
++ printk(" before jump:\n");
++ printk(" current time = %ld\n", shift_timezone_current_time);
++ printk(" first_start = %ld\n", history->first_start);
++ printk(" first_end = %ld\n", history->first_end);
++ printk(" last_end = %ld\n", history->last_end);
++ printk("\n");
++ #endif
++
++ /* given time after shift, calculate next and previous reset times */
++ time_t next_reset = get_next_reset_time(shift_timezone_iam->info, shift_timezone_current_time, 0);
++ time_t previous_reset = get_nominal_previous_reset_time(shift_timezone_iam->info, next_reset);
++ shift_timezone_iam->info->next_reset = next_reset;
++
++ /*if we're resetting on a constant interval, we can just adjust -- no need to worry about relationship to constant boundaries, e.g. end of day */
++ if(shift_timezone_iam->info->reset_is_constant_interval)
++ {
++ shift_timezone_iam->info->previous_reset = previous_reset;
++ if(history->num_nodes > 1)
++ {
++ history->first_start = history->first_start + timezone_adj;
++ history->first_end = history->first_end + timezone_adj;
++ history->last_end = history->last_end + timezone_adj;
++ }
++ }
++ else
++ {
++
++
++ /* next reset will be the newly computed next_reset. */
++ int node_index=history->num_nodes - 1;
++ if(node_index > 0)
++ {
++ /* based on new, shifted time, iterate back over all nodes in history */
++ shift_timezone_iam->info->previous_reset = previous_reset ;
++ history->last_end = previous_reset;
++
++ while(node_index > 1)
++ {
++ previous_reset = get_nominal_previous_reset_time(shift_timezone_iam->info, previous_reset);
++ node_index--;
++ }
++ history->first_end = previous_reset;
++
++ previous_reset = get_nominal_previous_reset_time(shift_timezone_iam->info, previous_reset);
++ history->first_start = previous_reset > history->first_start + timezone_adj ? previous_reset : history->first_start + timezone_adj;
++ }
++ else
++ {
++ /*
++ * history hasn't really been initialized -- there's only one, current time point.
++ * we only know what's in the current accumulator in info. Just adjust previous reset time and make sure it's valid
++ */
++ shift_timezone_iam->info->previous_reset = previous_reset > shift_timezone_info_previous_reset + timezone_adj ? previous_reset : shift_timezone_info_previous_reset + timezone_adj;
++ }
++ }
++
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("\n");
++ printk(" after jump:\n");
++ printk(" first_start = %ld\n", history->first_start);
++ printk(" first_end = %ld\n", history->first_end);
++ printk(" last_end = %ld\n", history->last_end);
++ printk("\n\n");
++ #endif
++
++}
++static void shift_timezone_of_id(char* key, void* value)
++{
++ info_and_maps* iam = (info_and_maps*)value;
++ int history_found = 0;
++ if(iam == NULL)
++ {
++ return;
++ }
++ if(iam->info == NULL)
++ {
++ return;
++ }
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("shifting id %s\n", key);
++ #endif
++
++ shift_timezone_iam = iam;
++ if( (iam->info->reset_is_constant_interval == 0 && iam->info->reset_interval == BANDWIDTH_NEVER) || iam->info->cmp == BANDWIDTH_CHECK )
++ {
++ return;
++ }
++
++ if(iam->ip_history_map != NULL)
++ {
++ if(iam->ip_history_map->num_elements > 0)
++ {
++ history_found = 1;
++ shift_timezone_info_previous_reset = iam->info->previous_reset;
++ apply_to_every_long_map_value(iam->ip_history_map, shift_timezone_of_ip);
++ }
++ }
++ if(history_found == 0)
++ {
++ iam->info->previous_reset = iam->info->previous_reset + ((old_minutes_west - local_minutes_west )*60);
++ if(iam->info->previous_reset > shift_timezone_current_time)
++ {
++ iam->info->next_reset = get_next_reset_time(iam->info, shift_timezone_current_time, shift_timezone_current_time);
++ iam->info->previous_reset = shift_timezone_current_time;
++ }
++ else
++ {
++ iam->info->next_reset = get_next_reset_time(iam->info, shift_timezone_current_time, iam->info->previous_reset);
++ while (iam->info->next_reset < shift_timezone_current_time)
++ {
++ iam->info->previous_reset = iam->info->next_reset;
++ iam->info->next_reset = get_next_reset_time(iam->info, iam->info->previous_reset, iam->info->previous_reset);
++ }
++ }
++ }
++ shift_timezone_iam = NULL;
++}
++
++static void check_for_timezone_shift(time_t now, int already_locked)
++{
++
++ if(already_locked == 0) { spin_lock_bh(&bandwidth_lock); }
++ if(now != last_local_mw_update ) /* make sure nothing changed while waiting for lock */
++ {
++ local_minutes_west = sys_tz.tz_minuteswest;
++ local_seconds_west = 60*local_minutes_west;
++ last_local_mw_update = now;
++ if(local_seconds_west > last_local_mw_update)
++ {
++ /* we can't let adjusted time be < 0 -- pretend timezone is still UTC */
++ local_minutes_west = 0;
++ local_seconds_west = 0;
++ }
++
++ if(local_minutes_west != old_minutes_west)
++ {
++ int adj_minutes = old_minutes_west-local_minutes_west;
++ adj_minutes = adj_minutes < 0 ? adj_minutes*-1 : adj_minutes;
++
++ if(already_locked == 0) { down(&userspace_lock); }
++
++ printk("ipt_bandwidth: timezone shift of %d minutes detected, adjusting\n", adj_minutes);
++ printk(" old minutes west=%d, new minutes west=%d\n", old_minutes_west, local_minutes_west);
++
++ /* this function is always called with absolute time, not time adjusted for timezone. Correct that before adjusting */
++ shift_timezone_current_time = now - local_seconds_west;
++ apply_to_every_string_map_value(id_map, shift_timezone_of_id);
++
++ old_minutes_west = local_minutes_west;
++
++
++ if(already_locked == 0) { up(&userspace_lock); }
++ }
++ }
++ if(already_locked == 0) { spin_unlock_bh(&bandwidth_lock); }
++}
++
++
++
++static bw_history* initialize_history(uint32_t max_nodes)
++{
++ bw_history* new_history = (bw_history*)kmalloc(sizeof(bw_history), GFP_ATOMIC);
++ if(new_history != NULL)
++ {
++ new_history->history_data = (uint64_t*)kmalloc((1+max_nodes)*sizeof(uint64_t), GFP_ATOMIC); /*number to save +1 for current */
++ if(new_history->history_data == NULL) /* deal with malloc failure */
++ {
++ kfree(new_history);
++ new_history = NULL;
++ }
++ else
++ {
++ new_history->first_start = 0;
++ new_history->first_end = 0;
++ new_history->last_end = 0;
++ new_history->max_nodes = max_nodes+1; /*number to save +1 for current */
++ new_history->num_nodes = 1;
++ new_history->non_zero_nodes = 0; /* counts non_zero nodes other than current, so initialize to 0 */
++ new_history->current_index = 0;
++ memset(new_history->history_data, 0, max_nodes*sizeof(uint64_t));
++ }
++ }
++ return new_history; /* in case of malloc failure new_history will be NULL, this should be safe */
++}
++
++/* returns 1 if there are non-zero nodes in history, 0 if history is empty (all zero) */
++static unsigned char update_history(bw_history* history, time_t interval_start, time_t interval_end, struct ipt_bandwidth_info* info)
++{
++ unsigned char history_is_nonzero = 0;
++ if(history != NULL) /* should never be null, but let's be sure */
++ {
++
++ /* adjust number of non-zero nodes */
++ if(history->num_nodes == history->max_nodes)
++ {
++ uint32_t first_index = (history->current_index+1) % history->max_nodes;
++ if( (history->history_data)[first_index] > 0)
++ {
++ history->non_zero_nodes = history->non_zero_nodes -1;
++ }
++ }
++ if( (history->history_data)[history->current_index] > 0 )
++ {
++ history->non_zero_nodes = history->non_zero_nodes + 1;
++ }
++ history_is_nonzero = history->non_zero_nodes > 0 ? 1 : 0;
++
++
++ /* update interval start/end */
++ if(history->first_start == 0)
++ {
++ history->first_start = interval_start;
++ history->first_end = interval_end;
++ }
++ if(history->num_nodes >= history->max_nodes)
++ {
++ history->first_start = history->first_end;
++ history->first_end = get_next_reset_time(info, history->first_start, history->first_start);
++ }
++ history->last_end = interval_end;
++
++
++ history->num_nodes = history->num_nodes < history->max_nodes ? history->num_nodes+1 : history->max_nodes;
++ history->current_index = (history->current_index+1) % history->max_nodes;
++ (history->history_data)[history->current_index] = 0;
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("after update history->num_nodes = %d\n", history->num_nodes);
++ printk("after update history->current_index = %d\n", history->current_index);
++ #endif
++ }
++ return history_is_nonzero;
++}
++
++
++static struct ipt_bandwidth_info* do_reset_info = NULL;
++static long_map* do_reset_ip_map = NULL;
++static long_map* do_reset_delete_ips = NULL;
++static time_t do_reset_interval_start = 0;
++static time_t do_reset_interval_end = 0;
++static void do_reset(unsigned long key, void* value)
++{
++ bw_history* history = (bw_history*)value;
++ if(history != NULL && do_reset_info != NULL) /* should never be null.. but let's be sure */
++ {
++ unsigned char history_contains_data = update_history(history, do_reset_interval_start, do_reset_interval_end, do_reset_info);
++ if(history_contains_data == 0 || do_reset_ip_map == NULL)
++ {
++ //schedule data for ip to be deleted (can't delete history while we're traversing history tree data structure!)
++ if(do_reset_delete_ips != NULL) /* should never be null.. but let's be sure */
++ {
++ set_long_map_element(do_reset_delete_ips, key, (void*)(history->history_data + history->current_index));
++ }
++ }
++ else
++ {
++ set_long_map_element(do_reset_ip_map, key, (void*)(history->history_data + history->current_index) );
++ }
++ }
++}
++
++long_map* clear_ip_map = NULL;
++long_map* clear_ip_history_map = NULL;
++static void clear_ips(unsigned long key, void* value)
++{
++ if(clear_ip_history_map != NULL && clear_ip_map != NULL)
++ {
++ bw_history* history;
++
++ #ifdef BANDWIDTH_DEBUG
++ unsigned long* ip = &key;
++ printk("clearing ip = %u.%u.%u.%u\n", *((char*)ip), *(((char*)ip)+1), *(((char*)ip)+2), *(((char*)ip)+3) );
++ #endif
++
++ remove_long_map_element(clear_ip_map, key);
++ history = (bw_history*)remove_long_map_element(clear_ip_history_map, key);
++ if(history != NULL)
++ {
++ kfree(history->history_data);
++ kfree(history);
++ }
++ }
++}
++
++static void set_bandwidth_to_zero(unsigned long key, void* value)
++{
++ *((uint64_t*)value) = 0;
++}
++
++
++long_map* reset_histories_ip_map = NULL;
++static void reset_histories(unsigned long key, void* value)
++{
++ bw_history* bh = (bw_history*)value;
++ bh->first_start = 0;
++ bh->first_end = 0;
++ bh->last_end = 0;
++ bh->num_nodes = 1;
++ bh->non_zero_nodes = 1;
++ bh->current_index = 0;
++ (bh->history_data)[0] = 0;
++ if(reset_histories_ip_map != NULL)
++ {
++ set_long_map_element(reset_histories_ip_map, key, bh->history_data);
++ }
++}
++
++
++static void handle_interval_reset(info_and_maps* iam, time_t now)
++{
++ struct ipt_bandwidth_info* info;
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("now, handling interval reset\n");
++ #endif
++ if(iam == NULL)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk("error: doing reset, iam is null \n");
++ #endif
++ return;
++ }
++ if(iam->ip_map == NULL)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk("error: doing reset, ip_map is null\n");
++ #endif
++ return;
++ }
++ if(iam->info == NULL)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk("error: doing reset, info is null\n");
++ #endif
++
++ return;
++ }
++
++ info = iam->info;
++ if(info->num_intervals_to_save == 0)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk("doing reset for case where no intervals are saved\n");
++ #endif
++
++ if(info->next_reset <= now)
++ {
++ info->next_reset = get_next_reset_time(info, info->previous_reset, info->previous_reset);
++ if(info->next_reset <= now)
++ {
++ info->next_reset = get_next_reset_time(info, now, info->previous_reset);
++ }
++ }
++ apply_to_every_long_map_value(iam->ip_map, set_bandwidth_to_zero);
++ }
++ else
++ {
++ unsigned long num_updates;
++ #ifdef BANDWIDTH_DEBUG
++ printk("doing reset for case where at least one interval is saved\n");
++ #endif
++
++
++ if(iam->ip_history_map == NULL)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk("error: doing reset, history_map is null when num_intervals_to_save > 0\n");
++ #endif
++ return;
++ }
++
++ do_reset_info = info;
++ do_reset_ip_map = iam->ip_map;
++ clear_ip_map = iam->ip_map;
++ clear_ip_history_map = iam->ip_history_map;
++
++
++ /*
++ * at most update as many times as we have intervals to save -- prevents
++ * rediculously long loop if interval length is 2 seconds and time was
++ * reset to 5 years in the future
++ */
++ num_updates = 0;
++ while(info->next_reset <= now && num_updates < info->num_intervals_to_save)
++ {
++ do_reset_delete_ips = initialize_long_map();
++ /*
++ * don't check for malloc failure here -- we
++ * include tests for whether do_reset_delete_ips
++ * is null below (reset should still be able to procede)
++ */
++
++ do_reset_interval_start = info->previous_reset;
++ do_reset_interval_end = info->next_reset;
++
++ apply_to_every_long_map_value(iam->ip_history_map, do_reset);
++
++
++ info->previous_reset = info->next_reset;
++ info->next_reset = get_next_reset_time(info, info->previous_reset, info->previous_reset);
++
++ /* free all data for ips whose entire histories contain only zeros to conserve space */
++ if(do_reset_delete_ips != NULL)
++ {
++ unsigned long num_destroyed;
++
++ /* only clear ips if this is the last iteration of this update */
++ if(info->next_reset >= now)
++ {
++ /*
++ * no need to reset iam->info->combined_bw if it gets deleted here.
++ * below, at end of function it will get set to NULL if it gets wiped
++ */
++
++ apply_to_every_long_map_value(do_reset_delete_ips, clear_ips);
++ }
++
++ /* but clear do_reset_delete_ips no matter what, values are just pointers to history data so we can ignore them */
++ destroy_long_map(do_reset_delete_ips, DESTROY_MODE_IGNORE_VALUES, &num_destroyed);
++ do_reset_delete_ips = NULL;
++ }
++ num_updates++;
++ }
++ do_reset_info = NULL;
++ do_reset_ip_map = NULL;
++ clear_ip_map = NULL;
++ clear_ip_history_map = NULL;
++
++ do_reset_interval_start = 0;
++ do_reset_interval_end = 0;
++
++ /*
++ * test if we've cycled past all existing data -- if so wipe all existing histories
++ * and set previous reset time to now, and compute next reset time from
++ * current time
++ */
++ if(info->next_reset <= now)
++ {
++ reset_histories_ip_map = iam->ip_map;
++ apply_to_every_long_map_value(iam->ip_history_map, reset_histories);
++ reset_histories_ip_map = NULL;
++
++ info->previous_reset = now;
++ info->next_reset = get_next_reset_time(info, now, info->previous_reset);
++ }
++ }
++ info->combined_bw = (uint64_t*)get_long_map_element(iam->ip_map, 0);
++ info->current_bandwidth = 0;
++}
++
++/*
++ * set max bandwidth to be max possible using 63 of the
++ * 64 bits in our record. In some systems uint64_t is treated
++ * like signed, so to prevent errors, use only 63 bits
++ */
++static uint64_t pow64(uint64_t base, uint64_t pow)
++{
++ uint64_t val = 1;
++ if(pow > 0)
++ {
++ val = base*pow64(base, pow-1);
++ }
++ return val;
++}
++static uint64_t get_bw_record_max(void) /* called by init to set global variable */
++{
++ return (pow64(2,62)) + (pow64(2,62)-1);
++}
++static uint64_t bandwidth_record_max;
++
++
++#define ADD_UP_TO_MAX(original,add,is_check) (bandwidth_record_max - original > add && is_check== 0) ? original+add : (is_check ? original : bandwidth_record_max);
++
++
++/*
++ * Shamelessly yoinked from xt_time.c
++ * "That is so amazingly amazing, I think I'd like to steal it."
++ * -- Zaphod Beeblebrox
++ */
++
++static const u_int16_t days_since_year[] = {
++ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334,
++};
++
++static const u_int16_t days_since_leapyear[] = {
++ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335,
++};
++
++/*
++ * Since time progresses forward, it is best to organize this array in reverse,
++ * to minimize lookup time. These are days since epoch since start of each year,
++ * going back to 1970
++ */
++#define DSE_FIRST 2039
++static const u_int16_t days_since_epoch_for_each_year_start[] = {
++ /* 2039 - 2030 */
++ 25202, 24837, 24472, 24106, 23741, 23376, 23011, 22645, 22280, 21915,
++ /* 2029 - 2020 */
++ 21550, 21184, 20819, 20454, 20089, 19723, 19358, 18993, 18628, 18262,
++ /* 2019 - 2010 */
++ 17897, 17532, 17167, 16801, 16436, 16071, 15706, 15340, 14975, 14610,
++ /* 2009 - 2000 */
++ 14245, 13879, 13514, 13149, 12784, 12418, 12053, 11688, 11323, 10957,
++ /* 1999 - 1990 */
++ 10592, 10227, 9862, 9496, 9131, 8766, 8401, 8035, 7670, 7305,
++ /* 1989 - 1980 */
++ 6940, 6574, 6209, 5844, 5479, 5113, 4748, 4383, 4018, 3652,
++ /* 1979 - 1970 */
++ 3287, 2922, 2557, 2191, 1826, 1461, 1096, 730, 365, 0,
++};
++
++static inline int is_leap(unsigned int y)
++{
++ return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0);
++}
++
++/* end of code yoinked from xt_time */
++
++
++static time_t get_nominal_previous_reset_time(struct ipt_bandwidth_info *info, time_t current_next_reset)
++{
++ time_t previous_reset = current_next_reset;
++ if(info->reset_is_constant_interval == 0)
++ {
++ /* skip backwards in halves of interval after next, until */
++ time_t next = get_next_reset_time(info, current_next_reset, 0);
++ time_t half_interval = (next-current_next_reset)/2;
++ time_t half_count, tmp;
++ half_interval = half_interval == 0 ? 1 : half_interval; /* must be at least one second, otherwise we loop forever*/
++
++ half_count = 1;
++ tmp = get_next_reset_time(info, (current_next_reset-(half_count*half_interval)),0);
++ while(previous_reset >= current_next_reset)
++ {
++ previous_reset = tmp;
++ half_count++;
++ tmp = get_next_reset_time(info, (current_next_reset-(half_count*half_interval)),0);
++ }
++ }
++ else
++ {
++ previous_reset = current_next_reset - info->reset_interval;
++ }
++ return previous_reset;
++}
++
++
++static time_t get_next_reset_time(struct ipt_bandwidth_info *info, time_t now, time_t previous_reset)
++{
++ //first calculate when next reset would be if reset_time is 0 (which it may be)
++ time_t next_reset = 0;
++ if(info->reset_is_constant_interval == 0)
++ {
++ if(info->reset_interval == BANDWIDTH_MINUTE)
++ {
++ next_reset = ( (long)(now/60) + 1)*60;
++ if(info->reset_time > 0)
++ {
++ time_t alt_reset = next_reset + info->reset_time - 60;
++ next_reset = alt_reset > now ? alt_reset : next_reset+info->reset_time;
++ }
++ }
++ else if(info->reset_interval == BANDWIDTH_HOUR)
++ {
++ next_reset = ( (long)(now/(60*60)) + 1)*60*60;
++ if(info->reset_time > 0)
++ {
++ time_t alt_reset = next_reset + info->reset_time - (60*60);
++ next_reset = alt_reset > now ? alt_reset : next_reset+info->reset_time;
++ }
++ }
++ else if(info->reset_interval == BANDWIDTH_DAY)
++ {
++ next_reset = ( (long)(now/(60*60*24)) + 1)*60*60*24;
++ if(info->reset_time > 0)
++ {
++ time_t alt_reset = next_reset + info->reset_time - (60*60*24);
++ next_reset = alt_reset > now ? alt_reset : next_reset+info->reset_time;
++ }
++ }
++ else if(info->reset_interval == BANDWIDTH_WEEK)
++ {
++ long days_since_epoch = now/(60*60*24);
++ long current_weekday = (4 + days_since_epoch ) % 7 ;
++ next_reset = (days_since_epoch + (7-current_weekday) )*(60*60*24);
++ if(info->reset_time > 0)
++ {
++ time_t alt_reset = next_reset + info->reset_time - (60*60*24*7);
++ next_reset = alt_reset > now ? alt_reset : next_reset+info->reset_time;
++ }
++ }
++ else if(info->reset_interval == BANDWIDTH_MONTH)
++ {
++ /* yeah, most of this is yoinked from xt_time too */
++ int year;
++ int year_index;
++ int year_day;
++ int month;
++ long days_since_epoch = now/(60*60*24);
++ uint16_t* month_start_days;
++ time_t alt_reset;
++
++ for (year_index = 0, year = DSE_FIRST; days_since_epoch_for_each_year_start[year_index] > days_since_epoch; year_index++)
++ {
++ year--;
++ }
++ year_day = days_since_epoch - days_since_epoch_for_each_year_start[year_index];
++ if (is_leap(year))
++ {
++ month_start_days = (u_int16_t*)days_since_leapyear;
++ }
++ else
++ {
++ month_start_days = (u_int16_t*)days_since_year;
++ }
++ for (month = 11 ; month > 0 && month_start_days[month] > year_day; month--){}
++
++ /* end majority of yoinkage */
++
++ alt_reset = (days_since_epoch_for_each_year_start[year_index] + month_start_days[month])*(60*60*24) + info->reset_time;
++ if(alt_reset > now)
++ {
++ next_reset = alt_reset;
++ }
++ else if(month == 11)
++ {
++ next_reset = days_since_epoch_for_each_year_start[year_index-1]*(60*60*24) + info->reset_time;
++ }
++ else
++ {
++ next_reset = (days_since_epoch_for_each_year_start[year_index] + month_start_days[month+1])*(60*60*24) + info->reset_time;
++ }
++ }
++ }
++ else
++ {
++ if(info->reset_time > 0 && previous_reset > 0 && previous_reset <= now)
++ {
++ unsigned long adj_reset_time = info->reset_time;
++ unsigned long tz_secs = 60 * local_minutes_west;
++ if(adj_reset_time < tz_secs)
++ {
++ unsigned long interval_multiple = 1+(tz_secs/info->reset_interval);
++ adj_reset_time = adj_reset_time + (interval_multiple*info->reset_interval);
++ }
++ adj_reset_time = adj_reset_time - tz_secs;
++
++ if(info->reset_time > now)
++ {
++ unsigned long whole_intervals = ((info->reset_time - now)/info->reset_interval) + 1; /* add one to make sure integer gets rounded UP (since we're subtracting) */
++ next_reset = info->reset_time - (whole_intervals*info->reset_interval);
++ while(next_reset <= now)
++ {
++ next_reset = next_reset + info->reset_interval;
++ }
++
++ }
++ else /* info->reset_time <= now */
++ {
++ unsigned long whole_intervals = (now-info->reset_time)/info->reset_interval; /* integer gets rounded down */
++ next_reset = info->reset_time + (whole_intervals*info->reset_interval);
++ while(next_reset <= now)
++ {
++ next_reset = next_reset + info->reset_interval;
++ }
++ }
++ }
++ else if(previous_reset > 0)
++ {
++ next_reset = previous_reset;
++ if(next_reset <= now) /* check just to be sure, if this is not true VERY BAD THINGS will happen */
++ {
++ unsigned long whole_intervals = (now-next_reset)/info->reset_interval; /* integer gets rounded down */
++ next_reset = next_reset + (whole_intervals*info->reset_interval);
++ while(next_reset <= now)
++ {
++ next_reset = next_reset + info->reset_interval;
++ }
++ }
++ }
++ else
++ {
++ next_reset = now + info->reset_interval;
++ }
++ }
++
++ return next_reset;
++}
++
++
++
++static uint64_t* initialize_map_entries_for_ip(info_and_maps* iam, unsigned long ip, uint64_t initial_bandwidth)
++{
++ #ifdef BANDWIDTH_DEBUG
++ printk("initializing entry for ip, bw=%lld\n", initial_bandwidth);
++ #endif
++
++ #ifdef BANDWIDTH_DEBUG
++ if(iam == NULL){ printk("error in initialization: iam is null!\n"); }
++ #endif
++
++
++ uint64_t* new_bw = NULL;
++ if(iam != NULL) /* should never happen, but let's be certain */
++ {
++ struct ipt_bandwidth_info *info = iam->info;
++ long_map* ip_map = iam->ip_map;
++ long_map* ip_history_map = iam->ip_history_map;
++
++ #ifdef BANDWIDTH_DEBUG
++ if(info == NULL){ printk("error in initialization: info is null!\n"); }
++ if(ip_map == NULL){ printk("error in initialization: ip_map is null!\n"); }
++ #endif
++
++
++ if(info != NULL && ip_map != NULL) /* again... should never happen but let's be sure */
++ {
++ if(info->num_intervals_to_save == 0 || ip_history_map == NULL)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk(" initializing entry for ip without history\n");
++ #endif
++ new_bw = (uint64_t*)kmalloc(sizeof(uint64_t), GFP_ATOMIC);
++ }
++ else
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk(" initializing entry for ip with history\n");
++ #endif
++
++ bw_history *new_history = initialize_history(info->num_intervals_to_save);
++ if(new_history != NULL) /* check for kmalloc failure */
++ {
++ bw_history* old_history;
++ #ifdef BANDWIDTH_DEBUG
++ printk(" malloc succeeded, new history is non-null\n");
++ #endif
++
++ new_bw = (uint64_t*)(new_history->history_data + new_history->current_index);
++ old_history = set_long_map_element(ip_history_map, ip, (void*)new_history);
++ if(old_history != NULL)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk(" after initialization old_history not null! (something is FUBAR)\n");
++ #endif
++ kfree(old_history->history_data);
++ kfree(old_history);
++ }
++
++ #ifdef BANDWIDTH_DEBUG
++
++ #endif
++ }
++ }
++ if(new_bw != NULL) /* check for kmalloc failure */
++ {
++ uint64_t* old_bw;
++ *new_bw = initial_bandwidth;
++ old_bw = set_long_map_element(ip_map, ip, (void*)new_bw );
++
++ /* only free old_bw if num_intervals_to_save is zero -- otherwise it already got freed above when we wiped the old history */
++ if(old_bw != NULL && info->num_intervals_to_save == 0)
++ {
++ free(old_bw);
++ }
++
++ if(ip == 0)
++ {
++ info->combined_bw = new_bw;
++ }
++
++ #ifdef BANDWIDTH_DEBUG
++ uint64_t *test = (uint64_t*)get_long_map_element(ip_map, ip);
++ if(test == NULL)
++ {
++ printk(" after initialization bw is null!\n");
++ }
++ else
++ {
++ printk(" after initialization bw is %lld\n", *new_bw);
++ printk(" after initialization test is %lld\n", *test);
++ }
++ #endif
++ }
++ }
++ }
++
++ return new_bw;
++}
++
++
++static bool match(const struct sk_buff *skb, struct xt_action_param *par)
++{
++
++ struct ipt_bandwidth_info *info = ((const struct ipt_bandwidth_info*)(par->matchinfo))->non_const_self;
++
++ time_t now;
++ int match_found;
++
++
++ unsigned char is_check = info->cmp == BANDWIDTH_CHECK ? 1 : 0;
++ unsigned char do_src_dst_swap = 0;
++ info_and_maps* iam = NULL;
++ long_map* ip_map = NULL;
++
++ uint64_t* bws[2] = {NULL, NULL};
++
++ /* if we're currently setting this id, ignore new data until set is complete */
++ if(set_in_progress == 1)
++ {
++ if(strcmp(info->id, set_id) == 0)
++ {
++ return 0;
++ }
++ }
++
++
++
++
++ /*
++ * BEFORE we lock, check for timezone shift
++ * this will almost always be be very,very quick,
++ * but in the event there IS a shift this
++ * function will lock both kernel update spinlock
++ * and userspace i/o semaphore, and do a lot of
++ * number crunching so we shouldn't
++ * already be locked.
++ */
++ now = get_seconds();
++
++
++ if(now != last_local_mw_update )
++ {
++ check_for_timezone_shift(now, 0);
++ check_for_backwards_time_shift(now);
++ }
++ now = now - local_seconds_west; /* Adjust for local timezone */
++
++ spin_lock_bh(&bandwidth_lock);
++
++ if(is_check)
++ {
++ info_and_maps* check_iam;
++ do_src_dst_swap = info->check_type == BANDWIDTH_CHECK_SWAP ? 1 : 0;
++ check_iam = (info_and_maps*)get_string_map_element_with_hashed_key(id_map, info->hashed_id);
++ if(check_iam == NULL)
++ {
++ spin_unlock_bh(&bandwidth_lock);
++ return 0;
++ }
++ info = check_iam->info;
++ }
++
++
++
++
++ if(info->reset_interval != BANDWIDTH_NEVER)
++ {
++ if(info->next_reset < now)
++ {
++ //do reset
++ //iam = (info_and_maps*)get_string_map_element_with_hashed_key(id_map, info->hashed_id);
++ iam = (info_and_maps*)info->iam;
++ if(iam != NULL) /* should never be null, but let's be sure */
++ {
++ handle_interval_reset(iam, now);
++ ip_map = iam->ip_map;
++ }
++ else
++ {
++ /* even in case of malloc failure or weird error we can update these params */
++ info->current_bandwidth = 0;
++ info->next_reset = get_next_reset_time(info, now, info->previous_reset);
++ }
++ }
++ }
++
++ if(info->type == BANDWIDTH_COMBINED)
++ {
++ if(iam == NULL)
++ {
++ //iam = (info_and_maps*)get_string_map_element_with_hashed_key(id_map, info->hashed_id);
++ iam = (info_and_maps*)info->iam;
++ if(iam != NULL)
++ {
++ ip_map = iam->ip_map;
++ }
++ }
++ if(ip_map != NULL) /* if this ip_map != NULL iam can never be NULL, so we don't need to check this */
++ {
++
++ if(info->combined_bw == NULL)
++ {
++ bws[0] = initialize_map_entries_for_ip(iam, 0, skb->len);
++ }
++ else
++ {
++ bws[0] = info->combined_bw;
++ *(bws[0]) = ADD_UP_TO_MAX(*(bws[0]), (uint64_t)skb->len, is_check);
++ }
++ }
++ else
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk("error: ip_map is null in match!\n");
++ #endif
++ }
++ info->current_bandwidth = ADD_UP_TO_MAX(info->current_bandwidth, (uint64_t)skb->len, is_check);
++ }
++ else
++ {
++ uint32_t bw_ip, bw_ip_index;
++ uint32_t bw_ips[2] = {0, 0};
++ struct iphdr* iph = (struct iphdr*)(skb_network_header(skb));
++ if(info->type == BANDWIDTH_INDIVIDUAL_SRC)
++ {
++ //src ip
++ bw_ips[0] = iph->saddr;
++ if(do_src_dst_swap)
++ {
++ bw_ips[0] = iph->daddr;
++ }
++ }
++ else if (info->type == BANDWIDTH_INDIVIDUAL_DST)
++ {
++ //dst ip
++ bw_ips[0] = iph->daddr;
++ if(do_src_dst_swap)
++ {
++ bw_ips[0] = iph->saddr;
++ }
++ }
++ else if(info->type == BANDWIDTH_INDIVIDUAL_LOCAL || info->type == BANDWIDTH_INDIVIDUAL_REMOTE)
++ {
++ //remote or local ip -- need to test both src && dst
++ uint32_t src_ip = iph->saddr;
++ uint32_t dst_ip = iph->daddr;
++ if(info->type == BANDWIDTH_INDIVIDUAL_LOCAL)
++ {
++ bw_ips[0] = ((info->local_subnet_mask & src_ip) == info->local_subnet) ? src_ip : 0;
++ bw_ips[1] = ((info->local_subnet_mask & dst_ip) == info->local_subnet) ? dst_ip : 0;
++ }
++ else if(info->type == BANDWIDTH_INDIVIDUAL_REMOTE)
++ {
++ bw_ips[0] = ((info->local_subnet_mask & src_ip) != info->local_subnet ) ? src_ip : 0;
++ bw_ips[1] = ((info->local_subnet_mask & dst_ip) != info->local_subnet ) ? dst_ip : 0;
++ }
++ }
++
++ if(ip_map == NULL)
++ {
++ //iam = (info_and_maps*)get_string_map_element_with_hashed_key(id_map, info->hashed_id);
++ iam = (info_and_maps*)info->iam;
++ if(iam != NULL)
++ {
++ ip_map = iam->ip_map;
++ }
++ }
++ if(!is_check && info->cmp == BANDWIDTH_MONITOR)
++ {
++ uint64_t* combined_oldval = info->combined_bw;
++ if(combined_oldval == NULL)
++ {
++ combined_oldval = initialize_map_entries_for_ip(iam, 0, (uint64_t)skb->len);
++ }
++ else
++ {
++ *combined_oldval = ADD_UP_TO_MAX(*combined_oldval, (uint64_t)skb->len, is_check);
++ }
++ }
++ bw_ip_index = bw_ips[0] == 0 ? 1 : 0;
++ bw_ip = bw_ips[bw_ip_index];
++ if(bw_ip != 0 && ip_map != NULL)
++ {
++ uint64_t* oldval = get_long_map_element(ip_map, (unsigned long)bw_ip);
++ if(oldval == NULL)
++ {
++ if(!is_check)
++ {
++ /* may return NULL on malloc failure but that's ok */
++ oldval = initialize_map_entries_for_ip(iam, (unsigned long)bw_ip, (uint64_t)skb->len);
++ }
++ }
++ else
++ {
++ *oldval = ADD_UP_TO_MAX(*oldval, (uint64_t)skb->len, is_check);
++ }
++
++ /* this is fine, setting bws[bw_ip_index] to NULL on check for undefined value or kmalloc failure won't crash anything */
++ bws[bw_ip_index] = oldval;
++ }
++
++ }
++
++
++ match_found = 0;
++ if(info->cmp != BANDWIDTH_MONITOR)
++ {
++ if(info->cmp == BANDWIDTH_GT)
++ {
++ match_found = bws[0] != NULL ? ( *(bws[0]) > info->bandwidth_cutoff ? 1 : match_found ) : match_found;
++ match_found = bws[1] != NULL ? ( *(bws[1]) > info->bandwidth_cutoff ? 1 : match_found ) : match_found;
++ match_found = info->current_bandwidth > info->bandwidth_cutoff ? 1 : match_found;
++ }
++ else if(info->cmp == BANDWIDTH_LT)
++ {
++ match_found = bws[0] != NULL ? ( *(bws[0]) < info->bandwidth_cutoff ? 1 : match_found ) : match_found;
++ match_found = bws[1] != NULL ? ( *(bws[1]) < info->bandwidth_cutoff ? 1 : match_found ) : match_found;
++ match_found = info->current_bandwidth < info->bandwidth_cutoff ? 1 : match_found;
++ }
++ }
++
++
++ spin_unlock_bh(&bandwidth_lock);
++
++
++
++
++
++ return match_found;
++}
++
++
++
++
++
++
++
++
++
++
++/**********************
++ * Get functions
++ *********************/
++
++#define MAX_IP_STR_LENGTH 16
++
++#define ERROR_NONE 0
++#define ERROR_NO_ID 1
++#define ERROR_BUFFER_TOO_SHORT 2
++#define ERROR_NO_HISTORY 3
++#define ERROR_UNKNOWN 4
++typedef struct get_req_struct
++{
++ uint32_t ip;
++ uint32_t next_ip_index;
++ unsigned char return_history;
++ char id[BANDWIDTH_MAX_ID_LENGTH];
++} get_request;
++
++static unsigned long* output_ip_list = NULL;
++static unsigned long output_ip_list_length = 0;
++
++static char add_ip_block( uint32_t ip,
++ unsigned char full_history_requested,
++ info_and_maps* iam,
++ unsigned char* output_buffer,
++ uint32_t* current_output_index,
++ uint32_t buffer_length
++ );
++static void parse_get_request(unsigned char* request_buffer, get_request* parsed_request);
++static int handle_get_failure(int ret_value, int unlock_user_sem, int unlock_bandwidth_spin, unsigned char error_code, unsigned char* out_buffer, unsigned char* free_buffer );
++
++
++/*
++ * returns whether we succeeded in adding ip block, 0= success,
++ * otherwise error code of problem that we found
++ */
++static char add_ip_block( uint32_t ip,
++ unsigned char full_history_requested,
++ info_and_maps* iam,
++ unsigned char* output_buffer,
++ uint32_t* current_output_index,
++ uint32_t output_buffer_length
++ )
++{
++ #ifdef BANDWIDTH_DEBUG
++ uint32_t *ipp = &ip;
++ printk("doing output for ip = %u.%u.%u.%u\n", *((unsigned char*)ipp), *(((unsigned char*)ipp)+1), *(((unsigned char*)ipp)+2), *(((unsigned char*)ipp)+3) );
++ #endif
++
++ if(full_history_requested)
++ {
++ bw_history* history = NULL;
++ if(iam->info->num_intervals_to_save > 0 && iam->ip_history_map != NULL)
++ {
++ history = (bw_history*)get_long_map_element(iam->ip_history_map, ip);
++ }
++ if(history == NULL)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk(" no history map for ip, dumping latest value in history format\n" );
++ #endif
++
++
++ uint32_t block_length = (2*4) + (3*8);
++ uint64_t *bw;
++
++ if(*current_output_index + block_length > output_buffer_length)
++ {
++ return ERROR_BUFFER_TOO_SHORT;
++ }
++ *( (uint32_t*)(output_buffer + *current_output_index) ) = ip;
++ *current_output_index = *current_output_index + 4;
++
++ *( (uint32_t*)(output_buffer + *current_output_index) ) = 1;
++ *current_output_index = *current_output_index + 4;
++
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = (uint64_t)iam->info->previous_reset + (60 * local_minutes_west);
++ *current_output_index = *current_output_index + 8;
++
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = (uint64_t)iam->info->previous_reset + (60 * local_minutes_west);
++ *current_output_index = *current_output_index + 8;
++
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = (uint64_t)iam->info->previous_reset + (60 * local_minutes_west);
++ *current_output_index = *current_output_index + 8;
++
++ bw = (uint64_t*)get_long_map_element(iam->ip_map, ip);
++ if(bw == NULL)
++ {
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = 0;
++ }
++ else
++ {
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = *bw;
++ }
++ *current_output_index = *current_output_index + 8;
++
++ }
++ else
++ {
++ uint32_t block_length = (2*4) + (3*8) + (8*history->num_nodes);
++ uint64_t last_reset;
++ uint32_t node_num;
++ uint32_t next_index;
++
++ if(*current_output_index + block_length > output_buffer_length)
++ {
++ return ERROR_BUFFER_TOO_SHORT;
++ }
++
++ *( (uint32_t*)(output_buffer + *current_output_index) ) = ip;
++ *current_output_index = *current_output_index + 4;
++
++ *( (uint32_t*)(output_buffer + *current_output_index) )= history->num_nodes;
++ *current_output_index = *current_output_index + 4;
++
++
++
++ /* need to return times in regular UTC not the UTC - minutes west, which is useful for processing */
++ last_reset = (uint64_t)iam->info->previous_reset + (60 * local_minutes_west);
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = history->first_start > 0 ? (uint64_t)history->first_start + (60 * local_minutes_west) : last_reset;
++ #ifdef BANDWIDTH_DEBUG
++ printk(" dumping first start = %lld\n", *( (uint64_t*)(output_buffer + *current_output_index) ) );
++ #endif
++ *current_output_index = *current_output_index + 8;
++
++
++
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = history->first_end > 0 ? (uint64_t)history->first_end + (60 * local_minutes_west) : last_reset;
++ #ifdef BANDWIDTH_DEBUG
++ printk(" dumping first end = %lld\n", *( (uint64_t*)(output_buffer + *current_output_index) ) );
++ #endif
++ *current_output_index = *current_output_index + 8;
++
++
++
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = history->last_end > 0 ? (uint64_t)history->last_end + (60 * local_minutes_west) : last_reset;
++ #ifdef BANDWIDTH_DEBUG
++ printk(" dumping last end = %lld\n", *( (uint64_t*)(output_buffer + *current_output_index) ) );
++ #endif
++ *current_output_index = *current_output_index + 8;
++
++
++
++ node_num = 0;
++ next_index = history->num_nodes == history->max_nodes ? history->current_index+1 : 0;
++ next_index = next_index >= history->max_nodes ? 0 : next_index;
++ for(node_num=0; node_num < history->num_nodes; node_num++)
++ {
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = (history->history_data)[ next_index ];
++ *current_output_index = *current_output_index + 8;
++ next_index = (next_index + 1) % history->max_nodes;
++ }
++ }
++ }
++ else
++ {
++ uint64_t *bw;
++ if(*current_output_index + 8 > output_buffer_length)
++ {
++ return ERROR_BUFFER_TOO_SHORT;
++ }
++
++ *( (uint32_t*)(output_buffer + *current_output_index) ) = ip;
++ *current_output_index = *current_output_index + 4;
++
++
++ bw = (uint64_t*)get_long_map_element(iam->ip_map, ip);
++ if(bw == NULL)
++ {
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = 0;
++ }
++ else
++ {
++ *( (uint64_t*)(output_buffer + *current_output_index) ) = *bw;
++ }
++ *current_output_index = *current_output_index + 8;
++ }
++ return ERROR_NONE;
++}
++
++
++
++/*
++ * convenience method for cleaning crap up after failed malloc or other
++ * error that we can't recover from in get function
++ */
++static int handle_get_failure(int ret_value, int unlock_user_sem, int unlock_bandwidth_spin, unsigned char error_code, unsigned char* out_buffer, unsigned char* free_buffer )
++{
++ copy_to_user(out_buffer, &error_code, 1);
++ if( free_buffer != NULL ) { kfree(free_buffer); }
++ if(unlock_bandwidth_spin) { spin_unlock_bh(&bandwidth_lock); }
++ if(unlock_user_sem) { up(&userspace_lock); }
++ return ret_value;
++}
++
++/*
++ * request structure:
++ * bytes 1:4 is ip (uint32_t)
++ * bytes 4:8 is the next ip index (uint32_t)
++ * byte 9 is whether to return full history or just current usage (unsigned char)
++ * bytes 10:10+MAX_ID_LENGTH are the id (a string)
++ */
++static void parse_get_request(unsigned char* request_buffer, get_request* parsed_request)
++{
++ uint32_t* ip = (uint32_t*)(request_buffer+0);
++ uint32_t* next_ip_index = (uint32_t*)(request_buffer+4);
++ unsigned char* return_history = (unsigned char*)(request_buffer+8);
++
++
++
++ parsed_request->ip = *ip;
++ parsed_request->next_ip_index = *next_ip_index;
++ parsed_request->return_history = *return_history;
++ memcpy(parsed_request->id, request_buffer+9, BANDWIDTH_MAX_ID_LENGTH);
++ (parsed_request->id)[BANDWIDTH_MAX_ID_LENGTH-1] = '\0'; /* make sure id is null terminated no matter what */
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("ip = %u.%u.%u.%u\n", *((char*)ip), *(((char*)ip)+1), *(((char*)ip)+2), *(((char*)ip)+3) );
++ printk("next ip index = %d\n", *next_ip_index);
++ printk("return_history = %d\n", *return_history);
++ #endif
++}
++
++
++static int ipt_bandwidth_get_ctl(struct sock *sk, int cmd, void *user, int *len)
++{
++ /* check for timezone shift & adjust if necessary */
++ char* buffer;
++ get_request query;
++ info_and_maps* iam;
++
++ unsigned char* error;
++ uint32_t* total_ips;
++ uint32_t* start_index;
++ uint32_t* num_ips_in_response;
++ uint64_t* reset_interval;
++ uint64_t* reset_time;
++ unsigned char* reset_is_constant_interval;
++ uint32_t current_output_index;
++ time_t now = get_seconds();
++ check_for_timezone_shift(now, 0);
++ check_for_backwards_time_shift(now);
++ now = now - local_seconds_west; /* Adjust for local timezone */
++
++
++ down(&userspace_lock);
++
++
++ /* first check that query buffer is big enough to hold the info needed to parse the query */
++ if(*len < BANDWIDTH_MAX_ID_LENGTH + 9)
++ {
++
++ return handle_get_failure(0, 1, 0, ERROR_BUFFER_TOO_SHORT, user, NULL);
++ }
++
++
++
++ /* copy the query from userspace to kernel space & parse */
++ buffer = kmalloc(*len, GFP_ATOMIC);
++ if(buffer == NULL) /* check for malloc failure */
++ {
++ return handle_get_failure(0, 1, 0, ERROR_UNKNOWN, user, NULL);
++ }
++ copy_from_user(buffer, user, *len);
++ parse_get_request(buffer, &query);
++
++
++
++
++
++
++ /*
++ * retrieve data for this id and verify all variables are properly defined, just to be sure
++ * this is a kernel module -- it pays to be paranoid!
++ */
++ spin_lock_bh(&bandwidth_lock);
++
++ iam = (info_and_maps*)get_string_map_element(id_map, query.id);
++
++ if(iam == NULL)
++ {
++ return handle_get_failure(0, 1, 1, ERROR_NO_ID, user, buffer);
++ }
++ if(iam->info == NULL || iam->ip_map == NULL)
++ {
++ return handle_get_failure(0, 1, 1, ERROR_NO_ID, user, buffer);
++ }
++ if(iam->info->num_intervals_to_save > 0 && iam->ip_history_map == NULL)
++ {
++ return handle_get_failure(0, 1, 1, ERROR_NO_ID, user, buffer);
++ }
++
++ /* allocate ip list if this is first query */
++ if(query.next_ip_index == 0 && query.ip == 0)
++ {
++ if(output_ip_list != NULL)
++ {
++ kfree(output_ip_list);
++ }
++ if(iam->info->type == BANDWIDTH_COMBINED)
++ {
++ output_ip_list_length = 1;
++ output_ip_list = (unsigned long*)kmalloc(sizeof(unsigned long), GFP_ATOMIC);
++ if(output_ip_list != NULL) { *output_ip_list = 0; }
++ }
++ else
++ {
++ output_ip_list = get_sorted_long_map_keys(iam->ip_map, &output_ip_list_length);
++ }
++
++ if(output_ip_list == NULL)
++ {
++ return handle_get_failure(0, 1, 1, ERROR_UNKNOWN, user, buffer);
++ }
++ }
++
++ /* if this is not first query do a sanity check -- make sure it's within bounds of allocated ip list */
++ if(query.next_ip_index > 0 && (output_ip_list == NULL || query.next_ip_index > output_ip_list_length))
++ {
++ return handle_get_failure(0, 1, 1, ERROR_UNKNOWN, user, buffer);
++ }
++
++
++
++
++ /*
++ // values only reset when a packet hits a rule, so
++ // reset may have expired without data being reset.
++ // So, test if we need to reset values to zero
++ */
++ if(iam->info->reset_interval != BANDWIDTH_NEVER)
++ {
++ if(iam->info->next_reset < now)
++ {
++ //do reset
++ handle_interval_reset(iam, now);
++ }
++ }
++
++
++
++ /* compute response & store it in buffer
++ *
++ * format of response:
++ * byte 1 : error code (0 for ok)
++ * bytes 2-5 : total_num_ips found in query (further gets may be necessary to retrieve them)
++ * bytes 6-9 : start_index, index (in a list of total_num_ips) of first ip in response
++ * bytes 10-13 : num_ips_in_response, number of ips in this response
++ * bytes 14-21 : reset_interval (helps deal with DST shifts in userspace)
++ * bytes 22-29 : reset_time (helps deal with DST shifts in userspace)
++ * byte 30 : reset_is_constant_interval (helps deal with DST shifts in userspace)
++ * remaining bytes contain blocks of ip data
++ * format is dependent on whether history was queried
++ *
++ * if history was NOT queried we have
++ * bytes 1-4 : ip
++ * bytes 5-12 : bandwidth
++ *
++ * if history WAS queried we have
++ * (note we are using 64 bit integers for time here
++ * even though time_t is 32 bits on most 32 bit systems
++ * just to be on the safe side)
++ * bytes 1-4 : ip
++ * bytes 4-8 : history_length number of history values (including current)
++ * bytes 9-16 : first start
++ * bytes 17-24 : first end
++ * bytes 25-32 : recent end
++ * 33 onward : list of 64 bit integers of length history_length
++ *
++ */
++ error = buffer;
++ total_ips = (uint32_t*)(buffer+1);
++ start_index = (uint32_t*)(buffer+5);
++ num_ips_in_response = (uint32_t*)(buffer+9);
++ reset_interval = (uint64_t*)(buffer+13);
++ reset_time = (uint64_t*)(buffer+21);
++ reset_is_constant_interval = (char*)(buffer+29);
++
++ *reset_interval = (uint64_t)iam->info->reset_interval;
++ *reset_time = (uint64_t)iam->info->reset_time;
++ *reset_is_constant_interval = iam->info->reset_is_constant_interval;
++
++ current_output_index = 30;
++ if(query.ip != 0)
++ {
++ *error = add_ip_block( query.ip,
++ query.return_history,
++ iam,
++ buffer,
++ &current_output_index,
++ *len
++ );
++
++ *total_ips = *error == 0;
++ *start_index = 0;
++ *num_ips_in_response = *error == 0 ? 1 : 0;
++ }
++ else
++ {
++ uint32_t next_index = query.next_ip_index;
++ *error = ERROR_NONE;
++ *total_ips = output_ip_list_length;
++ *start_index = next_index;
++ *num_ips_in_response = 0;
++ while(*error == ERROR_NONE && next_index < output_ip_list_length)
++ {
++ uint32_t next_ip = output_ip_list[next_index];
++ *error = add_ip_block( next_ip,
++ query.return_history,
++ iam,
++ buffer,
++ &current_output_index,
++ *len
++ );
++ if(*error == ERROR_NONE)
++ {
++ *num_ips_in_response = *num_ips_in_response + 1;
++ next_index++;
++ }
++ }
++ if(*error == ERROR_BUFFER_TOO_SHORT && *num_ips_in_response > 0)
++ {
++ *error = ERROR_NONE;
++ }
++ if(next_index == output_ip_list_length)
++ {
++ kfree(output_ip_list);
++ output_ip_list = NULL;
++ output_ip_list_length = 0;
++ }
++ }
++
++ spin_unlock_bh(&bandwidth_lock);
++
++ copy_to_user(user, buffer, *len);
++ kfree(buffer);
++
++
++
++ up(&userspace_lock);
++
++
++ return 0;
++}
++
++
++
++
++
++/********************
++ * Set functions
++ ********************/
++
++typedef struct set_header_struct
++{
++ uint32_t total_ips;
++ uint32_t next_ip_index;
++ uint32_t num_ips_in_buffer;
++ unsigned char history_included;
++ unsigned char zero_unset_ips;
++ time_t last_backup;
++ char id[BANDWIDTH_MAX_ID_LENGTH];
++} set_header;
++
++static int handle_set_failure(int ret_value, int unlock_user_sem, int unlock_bandwidth_spin, unsigned char* free_buffer );
++static void parse_set_header(unsigned char* input_buffer, set_header* header);
++static void set_single_ip_data(unsigned char history_included, info_and_maps* iam, unsigned char* buffer, uint32_t* buffer_index, time_t now);
++
++static int handle_set_failure(int ret_value, int unlock_user_sem, int unlock_bandwidth_spin, unsigned char* free_buffer )
++{
++ if( free_buffer != NULL ) { kfree(free_buffer); }
++ set_in_progress = 0;
++ if(unlock_bandwidth_spin) { spin_unlock_bh(&bandwidth_lock); }
++ if(unlock_user_sem) { up(&userspace_lock); }
++ return ret_value;
++}
++
++static void parse_set_header(unsigned char* input_buffer, set_header* header)
++{
++ /*
++ * set header structure:
++ * bytes 1-4 : total_ips being set in this and subsequent requests
++ * bytes 5-8 : next_ip_index, first ip being set in this set command
++ * bytes 9-12 : num_ips_in_buffer, the number of ips in this set request
++ * byte 13 : history_included (whether history data is included, or just current data)
++ * byte 14 : zero_unset_ips, whether to zero all ips not included in this and subsequent requests
++ * bytes 15-22 : last_backup time (64 bit)
++ * bytes 23-23+BANDWIDTH_MAX_ID_LENGTH : id
++ * bytes 23+ : ip data
++ */
++
++ uint32_t* total_ips = (uint32_t*)(input_buffer+0);
++ uint32_t* next_ip_index = (uint32_t*)(input_buffer+4);
++ uint32_t* num_ips_in_buffer = (uint32_t*)(input_buffer+8);
++ unsigned char* history_included = (unsigned char*)(input_buffer+12);
++ unsigned char* zero_unset_ips = (unsigned char*)(input_buffer+13);
++ uint64_t* last_backup = (uint64_t*)(input_buffer+14);
++
++
++ header->total_ips = *total_ips;
++ header->next_ip_index = *next_ip_index;
++ header->num_ips_in_buffer = *num_ips_in_buffer;
++ header->history_included = *history_included;
++ header->zero_unset_ips = *zero_unset_ips;
++ header->last_backup = (time_t)*last_backup;
++ memcpy(header->id, input_buffer+22, BANDWIDTH_MAX_ID_LENGTH);
++ (header->id)[BANDWIDTH_MAX_ID_LENGTH-1] = '\0'; /* make sure id is null terminated no matter what */
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("parsed set header:\n");
++ printk(" total_ips = %d\n", header->total_ips);
++ printk(" next_ip_index = %d\n", header->next_ip_index);
++ printk(" num_ips_in_buffer = %d\n", header->num_ips_in_buffer);
++ printk(" zero_unset_ips = %d\n", header->zero_unset_ips);
++ printk(" last_backup = %ld\n", header->last_backup);
++ printk(" id = %s\n", header->id);
++ #endif
++}
++static void set_single_ip_data(unsigned char history_included, info_and_maps* iam, unsigned char* buffer, uint32_t* buffer_index, time_t now)
++{
++ /*
++ * note that times stored within the module are adjusted so they are equal to seconds
++ * since unix epoch that corrosponds to the UTC wall-clock time (timezone offset 0)
++ * that is equal to the wall-clock time in the current time-zone. Incoming values must
++ * be adjusted similarly
++ */
++ uint32_t ip = *( (uint32_t*)(buffer + *buffer_index) );
++
++ #ifdef BANDWIDTH_DEBUG
++ uint32_t* ipp = &ip;
++ printk("doing set for ip = %u.%u.%u.%u\n", *((unsigned char*)ipp), *(((unsigned char*)ipp)+1), *(((unsigned char*)ipp)+2), *(((unsigned char*)ipp)+3) );
++ printk("ip index = %d\n", *buffer_index);
++ #endif
++
++ if(history_included)
++ {
++ uint32_t num_history_nodes = *( (uint32_t*)(buffer + *buffer_index+4));
++ if(iam->info->num_intervals_to_save > 0 && iam->ip_history_map != NULL)
++ {
++ time_t first_start = (time_t) *( (uint64_t*)(buffer + *buffer_index+8));
++ /* time_t first_end = (time_t) *( (uint64_t*)(buffer + *buffer_index+16)); //not used */
++ /* time_t last_end = (time_t) *( (uint64_t*)(buffer + *buffer_index+24)); //not used */
++ time_t next_start;
++ time_t next_end;
++ uint32_t node_index;
++ uint32_t zero_count;
++ bw_history* history;
++
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("setting history with first start = %ld, now = %ld\n", first_start, now);
++ #endif
++
++
++ *buffer_index = *buffer_index + (2*4) + (3*8);
++
++ /* adjust for timezone */
++ next_start = first_start - (60 * local_minutes_west);
++ next_end = get_next_reset_time(iam->info, next_start, next_start);
++ node_index=0;
++ zero_count=0;
++ history = NULL;
++ while(next_start < now)
++ {
++ uint64_t next_bw = 0;
++ if(node_index < num_history_nodes)
++ {
++ next_bw = *( (uint64_t*)(buffer + *buffer_index));
++ *buffer_index = *buffer_index + 8;
++ }
++ zero_count = next_bw == 0 ? zero_count+1 : 0;
++
++ if(node_index == 0 || history == NULL)
++ {
++ initialize_map_entries_for_ip(iam, ip, next_bw);
++ history = get_long_map_element(iam->ip_history_map, (unsigned long)ip);
++ }
++ else if(next_end < now) /* if this is most recent node, don't do update since last node is current bandwidth */
++ {
++ update_history(history, next_start, next_end, iam->info);
++ (history->history_data)[ history->current_index ] = next_bw;
++ if(zero_count < history->max_nodes +2)
++ {
++ next_start = next_end;
++ next_end = get_next_reset_time(iam->info, next_start, next_start);
++ }
++ else
++ {
++ /* do history reset */
++ history->first_start = 0;
++ history->first_end = 0;
++ history->last_end = 0;
++ history->num_nodes = 1;
++ history->non_zero_nodes = 1;
++ history->current_index = 0;
++ (history->history_data)[0] = 0;
++
++ next_start = now;
++ next_end = get_next_reset_time(iam->info, now, next_start);
++ }
++ }
++ else /* if this is most recent node, we still need to exit loop*/
++ {
++ break;
++ }
++ node_index++;
++ }
++ while(node_index < num_history_nodes)
++ {
++ *buffer_index = *buffer_index + 8;
++ node_index++;
++ }
++ if(history != NULL)
++ {
++ set_long_map_element(iam->ip_map, ip, (history->history_data + history->current_index) );
++ iam->info->previous_reset = next_start;
++ iam->info->next_reset = next_end;
++ if(ip == 0)
++ {
++ iam->info->current_bandwidth = (history->history_data)[history->current_index];
++ }
++ }
++ }
++ else
++ {
++ uint64_t bw;
++ *buffer_index = *buffer_index + (2*4) + (3*8) + ((num_history_nodes-1)*8);
++ bw = *( (uint64_t*)(buffer + *buffer_index));
++ initialize_map_entries_for_ip(iam, ip, bw); /* automatically frees existing values if they exist */
++ *buffer_index = *buffer_index + 8;
++ if(ip == 0)
++ {
++ iam->info->current_bandwidth = bw;
++ }
++ }
++
++ }
++ else
++ {
++ uint64_t bw = *( (uint64_t*)(buffer + *buffer_index+4) );
++ #ifdef BANDWIDTH_DEBUG
++ printk(" setting bw to %lld\n", bw );
++ #endif
++
++
++ initialize_map_entries_for_ip(iam, ip, bw); /* automatically frees existing values if they exist */
++ *buffer_index = *buffer_index + 12;
++
++ if(ip == 0)
++ {
++ iam->info->current_bandwidth = bw;
++ }
++ }
++
++
++}
++
++static int ipt_bandwidth_set_ctl(struct sock *sk, int cmd, void *user, u_int32_t len)
++{
++ /* check for timezone shift & adjust if necessary */
++ char* buffer;
++ set_header header;
++ info_and_maps* iam;
++ uint32_t buffer_index;
++ uint32_t next_ip_index;
++ time_t now = get_seconds();
++ check_for_timezone_shift(now, 0);
++ check_for_backwards_time_shift(now);
++ now = now - local_seconds_west; /* Adjust for local timezone */
++
++
++ /* just return right away if user buffer is too short to contain even the header */
++ if(len < (3*4) + 2 + 8 + BANDWIDTH_MAX_ID_LENGTH)
++ {
++ #ifdef BANDWIDTH_DEBUG
++ printk("set error: buffer not large enough!\n");
++ #endif
++ return 0;
++ }
++
++ down(&userspace_lock);
++ set_in_progress = 1;
++
++ buffer = kmalloc(len, GFP_ATOMIC);
++ if(buffer == NULL) /* check for malloc failure */
++ {
++ return handle_set_failure(0, 1, 0, NULL);
++ }
++ copy_from_user(buffer, user, len);
++ parse_set_header(buffer, &header);
++
++
++
++
++ /*
++ * retrieve data for this id and verify all variables are properly defined, just to be sure
++ * this is a kernel module -- it pays to be paranoid!
++ */
++ spin_lock_bh(&bandwidth_lock);
++
++
++ iam = (info_and_maps*)get_string_map_element(id_map, header.id);
++ if(iam == NULL)
++ {
++ return handle_set_failure(0, 1, 1, buffer);
++ }
++ if(iam->info == NULL || iam->ip_map == NULL)
++ {
++ return handle_set_failure(0, 1, 1, buffer);
++ }
++ if(iam->info->num_intervals_to_save > 0 && iam->ip_history_map == NULL)
++ {
++ return handle_set_failure(0, 1, 1, buffer);
++ }
++
++ /*
++ * during set unconditionally set combined_bw to NULL
++ * if combined data (ip=0) exists after set exits cleanly, we will restore it
++ */
++ iam->info->combined_bw = NULL;
++
++ //if zero_unset_ips == 1 && next_ip_index == 0
++ //then clear data for all ips for this id
++ if(header.zero_unset_ips && header.next_ip_index == 0)
++ {
++ //clear data
++ if(iam->info->num_intervals_to_save > 0)
++ {
++ while(iam->ip_map->num_elements > 0)
++ {
++ unsigned long key;
++ remove_smallest_long_map_element(iam->ip_map, &key);
++ /* ignore return value -- it's actually malloced in history, not here */
++ }
++ while(iam->ip_history_map->num_elements > 0)
++ {
++ unsigned long key;
++ bw_history* history = remove_smallest_long_map_element(iam->ip_history_map, &key);
++ kfree(history->history_data);
++ kfree(history);
++ }
++ }
++ else
++ {
++ while(iam->ip_map->num_elements > 0)
++ {
++ unsigned long key;
++ uint64_t *bw = remove_smallest_long_map_element(iam->ip_map, &key);
++ kfree(bw);
++ }
++ }
++ }
++
++ /*
++ * last_backup parameter is only relevant for case where we are not setting history
++ * and when we don't have a constant interval length or a specified reset_time (since in this case start time gets reset when rule is inserted and there is therefore no constant end)
++ * If num_intervals_to_save =0 and is_constant_interval=0, check it. If it's nonzero (0=ignore) and invalid, return.
++ */
++ if(header.last_backup > 0 && iam->info->num_intervals_to_save == 0 && (iam->info->reset_is_constant_interval == 0 || iam->info->reset_time != 0) )
++ {
++ time_t adjusted_last_backup_time = header.last_backup - (60 * local_minutes_west);
++ time_t next_reset_of_last_backup = get_next_reset_time(iam->info, adjusted_last_backup_time, adjusted_last_backup_time);
++ if(next_reset_of_last_backup != iam->info->next_reset)
++ {
++ return handle_set_failure(0, 1, 1, buffer);
++ }
++ }
++
++
++ /*
++ * iterate over each ip block in buffer,
++ * loading data into necessary kerenel-space data structures
++ */
++ buffer_index = (3*4) + 1 + 1 + 8 + BANDWIDTH_MAX_ID_LENGTH;
++ next_ip_index = header.next_ip_index;
++
++ while(next_ip_index < header.num_ips_in_buffer)
++ {
++ set_single_ip_data(header.history_included, iam, buffer, &buffer_index, now);
++ next_ip_index++;
++ }
++
++ if (next_ip_index == header.total_ips)
++ {
++ set_in_progress = 0;
++ }
++
++ /* set combined_bw */
++ iam->info->combined_bw = (uint64_t*)get_long_map_element(iam->ip_map, 0);
++
++ kfree(buffer);
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ return 0;
++}
++static int checkentry(const struct xt_mtchk_param *par)
++{
++
++
++ struct ipt_bandwidth_info *info = (struct ipt_bandwidth_info*)(par->matchinfo);
++
++
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("checkentry called\n");
++ #endif
++
++
++
++
++
++ if(info->ref_count == NULL) /* first instance, we're inserting rule */
++ {
++ struct ipt_bandwidth_info *master_info = (struct ipt_bandwidth_info*)kmalloc(sizeof(struct ipt_bandwidth_info), GFP_ATOMIC);
++ info->ref_count = (unsigned long*)kmalloc(sizeof(unsigned long), GFP_ATOMIC);
++
++ if(info->ref_count == NULL) /* deal with kmalloc failure */
++ {
++ printk("ipt_bandwidth: kmalloc failure in checkentry!\n");
++ return 0;
++ }
++ *(info->ref_count) = 1;
++ info->non_const_self = master_info;
++ info->hashed_id = sdbm_string_hash(info->id);
++ info->iam = NULL;
++ info->combined_bw = NULL;
++
++ memcpy(master_info->id, info->id, BANDWIDTH_MAX_ID_LENGTH);
++ master_info->type = info->type;
++ master_info->check_type = info->check_type;
++ master_info->local_subnet = info->local_subnet;
++ master_info->local_subnet_mask = info->local_subnet_mask;
++ master_info->cmp = info->cmp;
++ master_info->reset_is_constant_interval = info->reset_is_constant_interval;
++ master_info->reset_interval = info->reset_interval;
++ master_info->reset_time = info->reset_time;
++ master_info->bandwidth_cutoff = info->bandwidth_cutoff;
++ master_info->current_bandwidth = info->current_bandwidth;
++ master_info->next_reset = info->next_reset;
++ master_info->previous_reset = info->previous_reset;
++ master_info->last_backup_time = info->last_backup_time;
++ master_info->num_intervals_to_save = info->num_intervals_to_save;
++
++ master_info->hashed_id = info->hashed_id;
++ master_info->iam = info->iam;
++ master_info->combined_bw = info->combined_bw;
++ master_info->non_const_self = info->non_const_self;
++ master_info->ref_count = info->ref_count;
++
++ #ifdef BANDWIDTH_DEBUG
++ printk(" after increment, ref count = %ld\n", *(info->ref_count) );
++ #endif
++
++ if(info->cmp != BANDWIDTH_CHECK)
++ {
++ info_and_maps *iam;
++
++ down(&userspace_lock);
++ spin_lock_bh(&bandwidth_lock);
++
++
++
++ iam = (info_and_maps*)get_string_map_element(id_map, info->id);
++ if(iam != NULL)
++ {
++ printk("ipt_bandwidth: error, \"%s\" is a duplicate id\n", info->id);
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ return 0;
++ }
++
++ if(info->reset_interval != BANDWIDTH_NEVER)
++ {
++ time_t now = get_seconds();
++ if(now != last_local_mw_update )
++ {
++ check_for_timezone_shift(now, 1);
++ }
++
++
++ now = now - (60 * local_minutes_west); /* Adjust for local timezone */
++ info->previous_reset = now;
++ master_info->previous_reset = now;
++ if(info->next_reset == 0)
++ {
++ info->next_reset = get_next_reset_time(info, now, now);
++ master_info->next_reset = info->next_reset;
++ /*
++ * if we specify last backup time, check that next reset is consistent,
++ * otherwise reset current_bandwidth to 0
++ *
++ * only applies to combined type -- otherwise we need to handle setting bandwidth
++ * through userspace library
++ */
++ if(info->last_backup_time != 0 && info->type == BANDWIDTH_COMBINED)
++ {
++ time_t adjusted_last_backup_time = info->last_backup_time - (60 * local_minutes_west);
++ time_t next_reset_of_last_backup = get_next_reset_time(info, adjusted_last_backup_time, adjusted_last_backup_time);
++ if(next_reset_of_last_backup != info->next_reset)
++ {
++ info->current_bandwidth = 0;
++ master_info->current_bandwidth = 0;
++ }
++ info->last_backup_time = 0;
++ master_info->last_backup_time = 0;
++ }
++ }
++ }
++
++ iam = (info_and_maps*)kmalloc( sizeof(info_and_maps), GFP_ATOMIC);
++ if(iam == NULL) /* handle kmalloc failure */
++ {
++ printk("ipt_bandwidth: kmalloc failure in checkentry!\n");
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ return 0;
++ }
++ iam->ip_map = initialize_long_map();
++ if(iam->ip_map == NULL) /* handle kmalloc failure */
++ {
++ printk("ipt_bandwidth: kmalloc failure in checkentry!\n");
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ return 0;
++ }
++ iam->ip_history_map = NULL;
++ if(info->num_intervals_to_save > 0)
++ {
++ iam->ip_history_map = initialize_long_map();
++ if(iam->ip_history_map == NULL) /* handle kmalloc failure */
++ {
++ printk("ipt_bandwidth: kmalloc failure in checkentry!\n");
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ return 0;
++ }
++ }
++
++
++ iam->info = master_info;
++ set_string_map_element(id_map, info->id, iam);
++
++ info->iam = (void*)iam;
++ master_info->iam = (void*)iam;
++
++
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ }
++ }
++
++ else
++ {
++ /* info->non_const_self = info; */
++
++
++ *(info->ref_count) = *(info->ref_count) + 1;
++ #ifdef BANDWIDTH_DEBUG
++ printk(" after increment, ref count = %ld\n", *(info->ref_count) );
++ #endif
++
++
++ /*
++ if(info->cmp != BANDWIDTH_CHECK)
++ {
++ info_and_maps* iam;
++ down(&userspace_lock);
++ spin_lock_bh(&bandwidth_lock);
++ iam = (info_and_maps*)get_string_map_element(id_map, info->id);
++ if(iam != NULL)
++ {
++ iam->info = info;
++ }
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ }
++ */
++ }
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("checkentry complete\n");
++ #endif
++ return 0;
++}
++
++static void destroy(const struct xt_mtdtor_param *par)
++{
++
++ struct ipt_bandwidth_info *info = (struct ipt_bandwidth_info*)(par->matchinfo);
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("destroy called\n");
++ #endif
++
++ *(info->ref_count) = *(info->ref_count) - 1;
++
++ #ifdef BANDWIDTH_DEBUG
++ printk(" after decrement refcount = %ld\n", *(info->ref_count));
++ #endif
++
++ if(*(info->ref_count) == 0)
++ {
++ info_and_maps* iam;
++ down(&userspace_lock);
++ spin_lock_bh(&bandwidth_lock);
++
++ info->combined_bw = NULL;
++ iam = (info_and_maps*)remove_string_map_element(id_map, info->id);
++ if(iam != NULL && info->cmp != BANDWIDTH_CHECK)
++ {
++ unsigned long num_destroyed;
++ if(iam->ip_map != NULL && iam->ip_history_map != NULL)
++ {
++ unsigned long history_index = 0;
++ bw_history** histories_to_free;
++
++ destroy_long_map(iam->ip_map, DESTROY_MODE_IGNORE_VALUES, &num_destroyed);
++
++ histories_to_free = (bw_history**)destroy_long_map(iam->ip_history_map, DESTROY_MODE_RETURN_VALUES, &num_destroyed);
++
++ /* num_destroyed will be 0 if histories_to_free is null after malloc failure, so this is safe */
++ for(history_index = 0; history_index < num_destroyed; history_index++)
++ {
++ bw_history* h = histories_to_free[history_index];
++ if(h != NULL)
++ {
++ kfree(h->history_data);
++ kfree(h);
++ }
++ }
++
++ }
++ else if(iam->ip_map != NULL)
++ {
++ destroy_long_map(iam->ip_map, DESTROY_MODE_FREE_VALUES, &num_destroyed);
++ }
++ kfree(iam);
++ /* info portion of iam gets taken care of automatically */
++ }
++ kfree(info->ref_count);
++ kfree(info->non_const_self);
++
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++ }
++
++ #ifdef BANDWIDTH_DEBUG
++ printk("destroy complete\n");
++ #endif
++}
++
++static struct nf_sockopt_ops ipt_bandwidth_sockopts =
++{
++ .pf = PF_INET,
++ .set_optmin = BANDWIDTH_SET,
++ .set_optmax = BANDWIDTH_SET+1,
++ .set = ipt_bandwidth_set_ctl,
++ .get_optmin = BANDWIDTH_GET,
++ .get_optmax = BANDWIDTH_GET+1,
++ .get = ipt_bandwidth_get_ctl
++};
++
++
++static struct xt_match bandwidth_match __read_mostly =
++{
++ .name = "bandwidth",
++ .match = &match,
++ .family = AF_INET,
++ .matchsize = sizeof(struct ipt_bandwidth_info),
++ .checkentry = &checkentry,
++ .destroy = &destroy,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ /* Register setsockopt */
++ if (nf_register_sockopt(&ipt_bandwidth_sockopts) < 0)
++ {
++ printk("ipt_bandwidth: Can't register sockopts. Aborting\n");
++ }
++ bandwidth_record_max = get_bw_record_max();
++ local_minutes_west = old_minutes_west = sys_tz.tz_minuteswest;
++ local_seconds_west = local_minutes_west*60;
++ last_local_mw_update = get_seconds();
++ if(local_seconds_west > last_local_mw_update)
++ {
++ /* we can't let adjusted time be < 0 -- pretend timezone is still UTC */
++ local_minutes_west = 0;
++ local_seconds_west = 0;
++ }
++
++ id_map = initialize_string_map(0);
++ if(id_map == NULL) /* deal with kmalloc failure */
++ {
++ printk("id map is null, returning -1\n");
++ return -1;
++ }
++
++
++ return xt_register_match(&bandwidth_match);
++}
++
++static void __exit fini(void)
++{
++ down(&userspace_lock);
++ spin_lock_bh(&bandwidth_lock);
++ if(id_map != NULL)
++ {
++ unsigned long num_returned;
++ info_and_maps **iams = (info_and_maps**)destroy_string_map(id_map, DESTROY_MODE_RETURN_VALUES, &num_returned);
++ int iam_index;
++ for(iam_index=0; iam_index < num_returned; iam_index++)
++ {
++ info_and_maps* iam = iams[iam_index];
++ long_map* ip_map = iam->ip_map;
++ unsigned long num_destroyed;
++ destroy_long_map(ip_map, DESTROY_MODE_FREE_VALUES, &num_destroyed);
++ kfree(iam);
++ /* info portion of iam gets taken care of automatically */
++ }
++ }
++ nf_unregister_sockopt(&ipt_bandwidth_sockopts);
++ xt_unregister_match(&bandwidth_match);
++ spin_unlock_bh(&bandwidth_lock);
++ up(&userspace_lock);
++
++}
++
++module_init(init);
++module_exit(fini);
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/ipt_bandwidth.mod.c 2015-06-19 03:02:55.381669455 +0800
+@@ -0,0 +1,20 @@
++#include <linux/module.h>
++#include <linux/vermagic.h>
++#include <linux/compiler.h>
++
++MODULE_INFO(vermagic, VERMAGIC_STRING);
++
++struct module __this_module
++__attribute__((section(".gnu.linkonce.this_module"))) = {
++ .name = KBUILD_MODNAME,
++ .init = init_module,
++#ifdef CONFIG_MODULE_UNLOAD
++ .exit = cleanup_module,
++#endif
++};
++
++static const char __module_depends[]
++__attribute_used__
++__attribute__((section(".modinfo"))) =
++"depends=";
++
+--- linux.orig/net/ipv4/netfilter/Kconfig 2015-06-15 00:19:31.000000000 +0800
++++ linux.new/net/ipv4/netfilter/Kconfig 2015-06-19 03:02:55.441666949 +0800
+@@ -389,5 +389,25 @@
+
+ endif # IP_NF_ARPTABLES
+
++config IP_NF_MATCH_WEBURL
++ tristate "weburl match support"
++ depends on IP_NF_IPTABLES
++ help
++ This option enables weburl match support.
++config IP_NF_MATCH_WEBMON
++ tristate "webmon match support"
++ depends on IP_NF_IPTABLES
++ help
++ This option enables webmon match support.
++config IP_NF_MATCH_TIMERANGE
++ tristate "timerange match support"
++ depends on IP_NF_IPTABLES
++ help
++ This option enables timerange match support.
++config IP_NF_MATCH_BANDWIDTH
++ tristate "bandwidth match support"
++ depends on IP_NF_IPTABLES
++ help
++ This option enables bandwidth match support.
+ endmenu
+
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/ipt_timerange.c 2015-06-19 03:02:55.285673465 +0800
+@@ -0,0 +1,142 @@
++/* timerange -- An iptables extension to match multiple timeranges within a week
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2009-2010 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <net/sock.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <linux/time.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_timerange.h>
++
++#include <linux/ktime.h>
++
++
++#include <linux/ip.h>
++
++#include <linux/netfilter/x_tables.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Eric Bishop");
++MODULE_DESCRIPTION("Match time ranges, designed for use with Gargoyle web interface (www.gargoyle-router.com)");
++
++
++extern struct timezone sys_tz;
++
++
++static bool match(const struct sk_buff *skb, const struct xt_action_param *par)
++{
++ const struct ipt_timerange_info *info = (const struct ipt_timerange_info*)(par->matchinfo);
++
++
++ time_t stamp_time;
++ int weekday;
++ int seconds_since_midnight;
++ int test_index;
++ int match_found;
++
++ struct timeval test_time;
++
++ do_gettimeofday(&test_time);
++ stamp_time = test_time.tv_sec;
++ stamp_time = stamp_time - (60 * sys_tz.tz_minuteswest); /* Adjust for local timezone */
++ seconds_since_midnight = stamp_time % 86400; /* 86400 seconds per day */
++ weekday = (4 + (stamp_time/86400)) % 7; /* 1970-01-01 (time=0) was a Thursday (4). */
++
++ /*
++ printk("time=%d, since midnight = %d, day=%d, minuteswest=%d\n", stamp_time, seconds_since_midnight, weekday, sys_tz.tz_minuteswest);
++ */
++
++ match_found = 0;
++ if(info->type == HOURS)
++ {
++ for(test_index=0; info->ranges[test_index] != -1 && match_found == 0 && seconds_since_midnight >= info->ranges[test_index]; test_index=test_index+2)
++ {
++ match_found = seconds_since_midnight >= info->ranges[test_index] && seconds_since_midnight <= info->ranges[test_index+1] ? 1 : match_found;
++ }
++ }
++ else if(info->type == WEEKDAYS)
++ {
++ match_found = info->days[weekday];
++ }
++ else if(info->type == DAYS_HOURS)
++ {
++ match_found = info->days[weekday];
++ if(match_found == 1)
++ {
++ match_found = 0;
++ for(test_index=0; info->ranges[test_index] != -1 && match_found == 0 && seconds_since_midnight >= info->ranges[test_index]; test_index=test_index+2)
++ {
++ match_found = seconds_since_midnight >= info->ranges[test_index] && seconds_since_midnight <= info->ranges[test_index+1] ? 1 : match_found;
++ }
++ }
++ }
++ else if(info->type == WEEKLY_RANGE)
++ {
++ time_t seconds_since_sunday_midnight = seconds_since_midnight + (weekday*86400);
++ for(test_index=0; info->ranges[test_index] != -1 && match_found == 0 && seconds_since_sunday_midnight >= info->ranges[test_index]; test_index=test_index+2)
++ {
++ match_found = seconds_since_sunday_midnight >= info->ranges[test_index] && seconds_since_sunday_midnight <= info->ranges[test_index+1] ? 1 : match_found;
++ }
++
++ }
++
++ match_found = info->invert == 0 ? match_found : !match_found;
++ return match_found;
++}
++
++
++static int checkentry(const struct xt_mtchk_param *par)
++{
++ return 0;
++}
++
++
++static struct xt_match timerange_match __read_mostly =
++{
++ .name = "timerange",
++ .match = &match,
++ .family = AF_INET,
++ .matchsize = sizeof(struct ipt_timerange_info),
++ .checkentry = &checkentry,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ return xt_register_match(&timerange_match);
++}
++
++static void __exit fini(void)
++{
++ xt_unregister_match(&timerange_match);
++}
++
++module_init(init);
++module_exit(fini);
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/ipt_weburl.mod.c 2015-06-19 03:02:54.725696856 +0800
+@@ -0,0 +1,20 @@
++#include <linux/module.h>
++#include <linux/vermagic.h>
++#include <linux/compiler.h>
++
++MODULE_INFO(vermagic, VERMAGIC_STRING);
++
++struct module __this_module
++__attribute__((section(".gnu.linkonce.this_module"))) = {
++ .name = KBUILD_MODNAME,
++ .init = init_module,
++#ifdef CONFIG_MODULE_UNLOAD
++ .exit = cleanup_module,
++#endif
++};
++
++static const char __module_depends[]
++__attribute_used__
++__attribute__((section(".modinfo"))) =
++"depends=";
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/ipt_timerange.mod.c 2015-06-19 03:02:55.285673465 +0800
+@@ -0,0 +1,20 @@
++#include <linux/module.h>
++#include <linux/vermagic.h>
++#include <linux/compiler.h>
++
++MODULE_INFO(vermagic, VERMAGIC_STRING);
++
++struct module __this_module
++__attribute__((section(".gnu.linkonce.this_module"))) = {
++ .name = KBUILD_MODNAME,
++ .init = init_module,
++#ifdef CONFIG_MODULE_UNLOAD
++ .exit = cleanup_module,
++#endif
++};
++
++static const char __module_depends[]
++__attribute_used__
++__attribute__((section(".modinfo"))) =
++"depends=";
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/weburl_deps/tree_map.h 2015-06-19 03:02:54.737696355 +0800
+@@ -0,0 +1,1084 @@
++/*
++ * Copyright © 2008 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This work 'as-is' we provide.
++ * No warranty, express or implied.
++ * We've done our best,
++ * to debug and test.
++ * Liability for damages denied.
++ *
++ * Permission is granted hereby,
++ * to copy, share, and modify.
++ * Use as is fit,
++ * free or for profit.
++ * On this notice these rights rely.
++ *
++ *
++ *
++ * Note that unlike other portions of Gargoyle this code
++ * does not fall under the GPL, but the rather whimsical
++ * 'Poetic License' above.
++ *
++ * Basically, this library contains a bunch of utilities
++ * that I find useful. I'm sure other libraries exist
++ * that are just as good or better, but I like these tools
++ * because I personally wrote them, so I know their quirks.
++ * (i.e. I know where the bodies are buried). I want to
++ * make sure that I can re-use these utilities for whatever
++ * code I may want to write in the future be it
++ * proprietary or open-source, so I've put them under
++ * a very, very permissive license.
++ *
++ * If you find this code useful, use it. If not, don't.
++ * I really don't care.
++ *
++ */
++
++
++#if __KERNEL__
++ #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
++ #define free(foo) kfree(foo)
++ #define printf(format,args...) printk(format,##args)
++
++ /* kernel strdup */
++ static inline char *kernel_strdup(const char *str);
++ static inline char *kernel_strdup(const char *str)
++ {
++ char *tmp;
++ long int s;
++ s=strlen(str) + 1;
++ tmp = kmalloc(s, GFP_ATOMIC);
++ if (tmp != NULL)
++ {
++ memcpy(tmp, str, s);
++ }
++ return tmp;
++ }
++ #define strdup kernel_strdup
++
++#endif
++
++
++
++/* tree_map structs / prototypes */
++typedef struct long_tree_map_node
++{
++ unsigned long key;
++ void* value;
++
++ signed char balance;
++ struct long_tree_map_node* left;
++ struct long_tree_map_node* right;
++} long_map_node;
++
++typedef struct
++{
++ long_map_node* root;
++ unsigned long num_elements;
++
++}long_map;
++
++typedef struct
++{
++ long_map lm;
++ unsigned char store_keys;
++ unsigned long num_elements;
++
++}string_map;
++
++
++
++/* long map functions */
++long_map* initialize_long_map(void);
++void* get_long_map_element(long_map* map, unsigned long key);
++void* get_smallest_long_map_element(long_map* map, unsigned long* smallest_key);
++void* get_largest_long_map_element(long_map* map, unsigned long* largest_key);
++void* remove_smallest_long_map_element(long_map* map, unsigned long* smallest_key);
++void* remove_largest_long_map_element(long_map* map, unsigned long* largest_key);
++void* set_long_map_element(long_map* map, unsigned long key, void* value);
++void* remove_long_map_element(long_map* map, unsigned long key);
++unsigned long* get_sorted_long_map_keys(long_map* map, unsigned long* num_keys_returned);
++void** get_sorted_long_map_values(long_map* map, unsigned long* num_values_returned);
++void** destroy_long_map(long_map* map, int destruction_type, unsigned long* num_destroyed);
++void apply_to_every_long_map_value(long_map* map, void (*apply_func)(unsigned long key, void* value));
++
++/* string map functions */
++string_map* initialize_string_map(unsigned char store_keys);
++void* get_string_map_element(string_map* map, const char* key);
++void* set_string_map_element(string_map* map, const char* key, void* value);
++void* remove_string_map_element(string_map* map, const char* key);
++char** get_string_map_keys(string_map* map, unsigned long* num_keys_returned);
++void** get_string_map_values(string_map* map, unsigned long* num_values_returned);
++void** destroy_string_map(string_map* map, int destruction_type, unsigned long* num_destroyed);
++void apply_to_every_string_map_value(string_map* map, void (*apply_func)(char* key, void* value));
++
++
++/*
++ * three different ways to deal with values when data structure is destroyed
++ */
++#define DESTROY_MODE_RETURN_VALUES 20
++#define DESTROY_MODE_FREE_VALUES 21
++#define DESTROY_MODE_IGNORE_VALUES 22
++
++
++/*
++ * for convenience & backwards compatibility alias _string_map_ functions to
++ * _map_ functions since string map is used more often than long map
++ */
++#define initialize_map initialize_string_map
++#define set_map_element set_string_map_element
++#define get_map_element get_string_map_element
++#define remove_map_element remove_string_map_element
++#define get_map_keys get_string_map_keys
++#define get_map_values get_string_map_values
++#define destroy_map destroy_string_map
++
++
++/* internal utility structures/ functions */
++typedef struct stack_node_struct
++{
++ long_map_node** node_ptr;
++ signed char direction;
++ struct stack_node_struct* previous;
++} stack_node;
++
++static void free_stack(stack_node* stack);
++static void** destroy_long_map_values(long_map* map, int destruction_type, unsigned long* num_destroyed);
++static void apply_to_every_long_map_node(long_map_node* node, void (*apply_func)(unsigned long key, void* value));
++static void apply_to_every_string_map_node(long_map_node* node, unsigned char has_key, void (*apply_func)(char* key, void* value));
++static void get_sorted_node_keys(long_map_node* node, unsigned long* key_list, unsigned long* next_key_index, int depth);
++static void get_sorted_node_values(long_map_node* node, void** value_list, unsigned long* next_value_index, int depth);
++static signed char rebalance (long_map_node** n, signed char direction, signed char update_op);
++static void rotate_right (long_map_node** parent);
++static void rotate_left (long_map_node** parent);
++
++/* internal for string map */
++typedef struct
++{
++ char* key;
++ void* value;
++} string_map_key_value;
++static unsigned long sdbm_string_hash(const char *key);
++
++
++
++
++/***************************************************
++ * For testing only
++ ***************************************************/
++/*
++void print_list(stack_node *l);
++
++void print_list(stack_node *l)
++{
++ if(l != NULL)
++ {
++ printf(" list key = %ld, dir=%d, \n", (*(l->node_ptr))->key, l->direction);
++ print_list(l->previous);
++ }
++}
++*/
++/******************************************************
++ * End testing Code
++ *******************************************************/
++
++
++
++
++/***************************************************
++ * string_map function definitions
++ ***************************************************/
++
++string_map* initialize_string_map(unsigned char store_keys)
++{
++ string_map* map = (string_map*)malloc(sizeof(string_map));
++ if(map != NULL)
++ {
++ map->store_keys = store_keys;
++ map->lm.root = NULL;
++ map->lm.num_elements = 0;
++ map->num_elements = map->lm.num_elements;
++ }
++ return map;
++}
++
++void* get_string_map_element(string_map* map, const char* key)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = get_long_map_element( &(map->lm), hashed_key);
++ if(return_value != NULL && map->store_keys)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++void* set_string_map_element(string_map* map, const char* key, void* value)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = NULL;
++ if(map->store_keys)
++ {
++ string_map_key_value* kv = (string_map_key_value*)malloc(sizeof(string_map_key_value));
++ if(kv == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ kv->key = strdup(key);
++ if(kv->key == NULL) /* deal with malloc failure */
++ {
++ free(kv);
++ return NULL;
++ }
++ kv->value = value;
++ return_value = set_long_map_element( &(map->lm), hashed_key, kv);
++ if(return_value != NULL)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ free(r->key);
++ free(r);
++ }
++ }
++ else
++ {
++ return_value = set_long_map_element( &(map->lm), hashed_key, value);
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++void* remove_string_map_element(string_map* map, const char* key)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = remove_long_map_element( &(map->lm), hashed_key);
++
++ if(return_value != NULL && map->store_keys)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ free(r->key);
++ free(r);
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++char** get_string_map_keys(string_map* map, unsigned long* num_keys_returned)
++{
++ char** str_keys;
++ str_keys = (char**)malloc((map->num_elements+1)*sizeof(char*));
++ if(str_keys == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ str_keys[0] = NULL;
++ *num_keys_returned = 0;
++ if(map->store_keys && map->num_elements > 0)
++ {
++ unsigned long list_length;
++ void** long_values = get_sorted_long_map_values( &(map->lm), &list_length);
++ unsigned long key_index;
++ /*list_length will be 0 on malloc failure in get_sorted_long_map_values, so this code shouldn't seg fault if that happens */
++ for(key_index = 0; key_index < list_length; key_index++)
++ {
++ str_keys[key_index] = strdup( ((string_map_key_value*)(long_values[key_index]))->key);
++ if(str_keys[key_index] == NULL) /* deal with malloc failure */
++ {
++ //just return the incomplete list (hey, it's null terminated...)
++ free(long_values);
++ return str_keys;
++ }
++ *num_keys_returned = *num_keys_returned + 1;
++ }
++ str_keys[list_length] = NULL;
++ free(long_values);
++ }
++ return str_keys;
++}
++
++
++void** get_string_map_values(string_map* map, unsigned long* num_values_returned)
++{
++ void** values = NULL;
++ if(map != NULL)
++ {
++ values = get_sorted_long_map_values ( &(map->lm), num_values_returned );
++ }
++ return values;
++}
++
++
++void** destroy_string_map(string_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = NULL;
++ if(map != NULL)
++ {
++ if(map->store_keys)
++ {
++ void** kvs = destroy_long_map_values( &(map->lm), DESTROY_MODE_RETURN_VALUES, num_destroyed );
++ unsigned long kv_index = 0;
++ for(kv_index=0; kv_index < *num_destroyed; kv_index++)
++ {
++ string_map_key_value* kv = (string_map_key_value*)kvs[kv_index];
++ void* value = kv->value;
++
++ free(kv->key);
++ free(kv);
++ if(destruction_type == DESTROY_MODE_FREE_VALUES)
++ {
++ free(value);
++ }
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ kvs[kv_index] = value;
++ }
++ }
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values = kvs;
++ }
++ else
++ {
++ free(kvs);
++ }
++ }
++ else
++ {
++ return_values = destroy_long_map_values( &(map->lm), destruction_type, num_destroyed );
++ }
++ free(map);
++ }
++ return return_values;
++}
++
++
++
++
++/***************************************************
++ * long_map function definitions
++ ***************************************************/
++
++long_map* initialize_long_map(void)
++{
++ long_map* map = (long_map*)malloc(sizeof(long_map));
++ if(map != NULL) /* test for malloc failure */
++ {
++ map->root = NULL;
++ map->num_elements = 0;
++ }
++ return map;
++}
++
++void* get_long_map_element(long_map* map, unsigned long key)
++{
++ void* value = NULL;
++
++ if(map->root != NULL)
++ {
++ long_map_node* parent_node = map->root;
++ long_map_node* next_node;
++ while( key != parent_node->key && (next_node = (long_map_node *)(key < parent_node->key ? parent_node->left : parent_node->right)) != NULL)
++ {
++ parent_node = next_node;
++ }
++ if(parent_node->key == key)
++ {
++ value = parent_node->value;
++ }
++ }
++ return value;
++}
++
++void* get_smallest_long_map_element(long_map* map, unsigned long* smallest_key)
++{
++ void* value = NULL;
++ if(map->root != NULL)
++ {
++ long_map_node* next_node = map->root;
++ while( next_node->left != NULL)
++ {
++ next_node = next_node->left;
++ }
++ value = next_node->value;
++ *smallest_key = next_node->key;
++ }
++ return value;
++}
++
++void* get_largest_long_map_element(long_map* map, unsigned long* largest_key)
++{
++ void* value = NULL;
++ if(map->root != NULL)
++ {
++ long_map_node* next_node = map->root;
++ while( next_node->right != NULL)
++ {
++ next_node = next_node->right;
++ }
++ value = next_node->value;
++ *largest_key = next_node->key;
++ }
++ return value;
++}
++
++void* remove_smallest_long_map_element(long_map* map, unsigned long* smallest_key)
++{
++ get_smallest_long_map_element(map, smallest_key);
++ return remove_long_map_element(map, *smallest_key);
++}
++
++void* remove_largest_long_map_element(long_map* map, unsigned long* largest_key)
++{
++ get_largest_long_map_element(map, largest_key);
++ return remove_long_map_element(map, *largest_key);
++}
++
++
++/* if replacement performed, returns replaced value, otherwise null */
++void* set_long_map_element(long_map* map, unsigned long key, void* value)
++{
++ stack_node* parent_list = NULL;
++ void* old_value = NULL;
++ int old_value_found = 0;
++
++ long_map_node* parent_node;
++ long_map_node* next_node;
++ stack_node* next_parent;
++ stack_node* previous_parent;
++ signed char new_balance;
++
++
++ long_map_node* new_node = (long_map_node*)malloc(sizeof(long_map_node));
++ if(new_node == NULL)
++ {
++ return NULL;
++ }
++ new_node->value = value;
++ new_node->key = key;
++ new_node->left = NULL;
++ new_node->right = NULL;
++ new_node->balance = 0;
++
++
++
++ if(map->root == NULL)
++ {
++ map->root = new_node;
++ }
++ else
++ {
++ parent_node = map->root;
++
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ free(new_node);
++ return NULL; /* won't insert but won't seg fault */
++ }
++ next_parent->node_ptr = &(map->root);
++ next_parent->previous = parent_list;
++ parent_list = next_parent;
++
++ while( key != parent_node->key && (next_node = (key < parent_node->key ? parent_node->left : parent_node->right) ) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ free(new_node);
++ return NULL;
++ }
++ next_parent->node_ptr = key < parent_node->key ? &(parent_node->left) : &(parent_node->right);
++ next_parent->previous = parent_list;
++ next_parent->previous->direction = key < parent_node->key ? -1 : 1;
++ parent_list = next_parent;
++
++ parent_node = next_node;
++ }
++
++
++ if(key == parent_node->key)
++ {
++ old_value = parent_node->value;
++ old_value_found = 1;
++ parent_node->value = value;
++ free(new_node);
++ /* we merely replaced a node, no need to rebalance */
++ }
++ else
++ {
++ if(key < parent_node->key)
++ {
++ parent_node->left = (void*)new_node;
++ parent_list->direction = -1;
++ }
++ else
++ {
++ parent_node->right = (void*)new_node;
++ parent_list->direction = 1;
++ }
++
++
++ /* we inserted a node, rebalance */
++ previous_parent = parent_list;
++ new_balance = 1; /* initial value is not used, but must not be 0 for initial loop condition */
++
++
++ while(previous_parent != NULL && new_balance != 0)
++ {
++ new_balance = rebalance(previous_parent->node_ptr, previous_parent->direction, 1);
++ previous_parent = previous_parent->previous;
++ }
++ }
++ }
++
++ free_stack(parent_list);
++
++ if(old_value_found == 0)
++ {
++ map->num_elements = map->num_elements + 1;
++ }
++
++ return old_value;
++}
++
++
++void* remove_long_map_element(long_map* map, unsigned long key)
++{
++
++ void* value = NULL;
++
++ long_map_node* root_node = map->root;
++ stack_node* parent_list = NULL;
++
++
++ long_map_node* remove_parent;
++ long_map_node* remove_node;
++ long_map_node* next_node;
++
++ long_map_node* replacement;
++ long_map_node* replacement_parent;
++ long_map_node* replacement_next;
++
++ stack_node* next_parent;
++ stack_node* previous_parent;
++ stack_node* replacement_stack_node;
++
++
++ signed char new_balance;
++
++
++
++ if(root_node != NULL)
++ {
++ remove_parent = root_node;
++ remove_node = key < remove_parent->key ? remove_parent->left : remove_parent->right;
++
++ if(remove_node != NULL && key != remove_parent->key)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ next_parent->node_ptr = &(map->root);
++ next_parent->previous = parent_list;
++ parent_list = next_parent;
++ while( key != remove_node->key && (next_node = (key < remove_node->key ? remove_node->left : remove_node->right)) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++ next_parent->node_ptr = key < remove_parent->key ? &(remove_parent->left) : &(remove_parent->right);
++ next_parent->previous = parent_list;
++ next_parent->previous->direction = key < remove_parent->key ? -1 : 1;
++ parent_list = next_parent;
++
++
++ remove_parent = remove_node;
++ remove_node = next_node;
++ }
++ parent_list->direction = key < remove_parent-> key ? -1 : 1;
++ }
++ else
++ {
++ remove_node = remove_parent;
++ }
++
++
++ if(key == remove_node->key)
++ {
++
++ /* find replacement for node we are deleting */
++ if( remove_node->right == NULL )
++ {
++ replacement = remove_node->left;
++ }
++ else if( remove_node->right->left == NULL)
++ {
++
++ replacement = remove_node->right;
++ replacement->left = remove_node->left;
++ replacement->balance = remove_node->balance;
++
++ /* put pointer to replacement node into list for balance update */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = 1; /* replacement is from right */
++ if(remove_node == remove_parent) /* special case for root node */
++ {
++ replacement_stack_node->node_ptr = &(map->root);
++ }
++ else
++ {
++ replacement_stack_node->node_ptr = key < remove_parent-> key ? &(remove_parent->left) : &(remove_parent->right);
++ }
++ parent_list = replacement_stack_node;
++
++ }
++ else
++ {
++ /* put pointer to replacement node into list for balance update */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = 1; /* we always look for replacement on right */
++ if(remove_node == remove_parent) /* special case for root node */
++ {
++ replacement_stack_node->node_ptr = &(map->root);
++ }
++ else
++ {
++ replacement_stack_node->node_ptr = key < remove_parent-> key ? &(remove_parent->left) : &(remove_parent->right);
++ }
++
++ parent_list = replacement_stack_node;
++
++
++ /*
++ * put pointer to replacement node->right into list for balance update
++ * this node will have to be updated with the proper pointer
++ * after we have identified the replacement
++ */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = -1; /* we always look for replacement to left of this node */
++ parent_list = replacement_stack_node;
++
++ /* find smallest node on right (large) side of tree */
++ replacement_parent = remove_node->right;
++ replacement = replacement_parent->left;
++
++ while((replacement_next = replacement->left) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ next_parent->node_ptr = &(replacement_parent->left);
++ next_parent->previous = parent_list;
++ next_parent->direction = -1; /* we always go left */
++ parent_list = next_parent;
++
++ replacement_parent = replacement;
++ replacement = replacement_next;
++
++ }
++
++ replacement_parent->left = replacement->right;
++
++ replacement->left = remove_node->left;
++ replacement->right = remove_node->right;
++ replacement->balance = remove_node->balance;
++ replacement_stack_node->node_ptr = &(replacement->right);
++ }
++
++ /* insert replacement at proper location in tree */
++ if(remove_node == remove_parent)
++ {
++ map->root = replacement;
++ }
++ else
++ {
++ remove_parent->left = remove_node == remove_parent->left ? replacement : remove_parent->left;
++ remove_parent->right = remove_node == remove_parent->right ? replacement : remove_parent->right;
++ }
++
++
++ /* rebalance tree */
++ previous_parent = parent_list;
++ new_balance = 0;
++ while(previous_parent != NULL && new_balance == 0)
++ {
++ new_balance = rebalance(previous_parent->node_ptr, previous_parent->direction, -1);
++ previous_parent = previous_parent->previous;
++ }
++
++
++
++
++ /*
++ * since we found a value to remove, decrease number of elements in map
++ * set return value to the deleted node's value and free the node
++ */
++ map->num_elements = map->num_elements - 1;
++ value = remove_node->value;
++ free(remove_node);
++ }
++ }
++
++ free_stack(parent_list);
++
++ return value;
++}
++
++
++/* note: returned keys are dynamically allocated, you need to free them! */
++unsigned long* get_sorted_long_map_keys(long_map* map, unsigned long* num_keys_returned)
++{
++ unsigned long* key_list = (unsigned long*)malloc((map->num_elements)*sizeof(unsigned long));
++ unsigned long next_key_index;
++ if(key_list == NULL)
++ {
++ *num_keys_returned = 0;
++ return NULL;
++ }
++ next_key_index = 0;
++ get_sorted_node_keys(map->root, key_list, &next_key_index, 0);
++
++ *num_keys_returned = map->num_elements;
++
++ return key_list;
++}
++
++
++void** get_sorted_long_map_values(long_map* map, unsigned long* num_values_returned)
++{
++ void** value_list = (void**)malloc((map->num_elements+1)*sizeof(void*));
++ unsigned long next_value_index;
++
++ if(value_list == NULL)
++ {
++ *num_values_returned = 0;
++ return NULL;
++ }
++ next_value_index = 0;
++ get_sorted_node_values(map->root, value_list, &next_value_index, 0);
++ value_list[map->num_elements] = NULL; /* since we're dealing with pointers make list null terminated */
++
++ *num_values_returned = map->num_elements;
++ return value_list;
++
++}
++
++
++
++void** destroy_long_map(long_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = destroy_long_map_values(map, destruction_type, num_destroyed);
++ free(map);
++ return return_values;
++}
++
++
++
++void apply_to_every_long_map_value(long_map* map, void (*apply_func)(unsigned long key, void* value))
++{
++ apply_to_every_long_map_node(map->root, apply_func);
++}
++void apply_to_every_string_map_value(string_map* map, void (*apply_func)(char* key, void* value))
++{
++ apply_to_every_string_map_node( (map->lm).root, map->store_keys, apply_func);
++}
++
++
++/***************************************************
++ * internal utility function definitions
++ ***************************************************/
++static void free_stack(stack_node* stack)
++{
++ while(stack != NULL)
++ {
++ stack_node* prev_node = stack;
++ stack = prev_node->previous;
++ free(prev_node);
++ }
++
++}
++
++static void** destroy_long_map_values(long_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = NULL;
++ unsigned long return_index = 0;
++
++ *num_destroyed = 0;
++
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values = (void**)malloc((map->num_elements+1)*sizeof(void*));
++ if(return_values == NULL) /* deal with malloc failure */
++ {
++ destruction_type = DESTROY_MODE_IGNORE_VALUES; /* could cause memory leak, but there's no other way to be sure we won't seg fault */
++ }
++ else
++ {
++ return_values[map->num_elements] = NULL;
++ }
++ }
++ while(map->num_elements > 0)
++ {
++ unsigned long smallest_key;
++ void* removed_value = remove_smallest_long_map_element(map, &smallest_key);
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values[return_index] = removed_value;
++ }
++ if(destruction_type == DESTROY_MODE_FREE_VALUES)
++ {
++ free(removed_value);
++ }
++ return_index++;
++ *num_destroyed = *num_destroyed + 1;
++ }
++ return return_values;
++}
++
++static void apply_to_every_long_map_node(long_map_node* node, void (*apply_func)(unsigned long key, void* value))
++{
++ if(node != NULL)
++ {
++ apply_to_every_long_map_node(node->left, apply_func);
++
++ apply_func(node->key, node->value);
++
++ apply_to_every_long_map_node(node->right, apply_func);
++ }
++}
++static void apply_to_every_string_map_node(long_map_node* node, unsigned char has_key, void (*apply_func)(char* key, void* value))
++{
++ if(node != NULL)
++ {
++ apply_to_every_string_map_node(node->left, has_key, apply_func);
++
++ if(has_key)
++ {
++ string_map_key_value* kv = (string_map_key_value*)(node->value);
++ apply_func(kv->key, kv->value);
++ }
++ else
++ {
++ apply_func(NULL, node->value);
++ }
++ apply_to_every_string_map_node(node->right, has_key, apply_func);
++ }
++}
++
++
++
++static void get_sorted_node_keys(long_map_node* node, unsigned long* key_list, unsigned long* next_key_index, int depth)
++{
++ if(node != NULL)
++ {
++ get_sorted_node_keys(node->left, key_list, next_key_index, depth+1);
++
++ key_list[ *next_key_index ] = node->key;
++ (*next_key_index)++;
++
++ get_sorted_node_keys(node->right, key_list, next_key_index, depth+1);
++ }
++}
++
++static void get_sorted_node_values(long_map_node* node, void** value_list, unsigned long* next_value_index, int depth)
++{
++ if(node != NULL)
++ {
++ get_sorted_node_values(node->left, value_list, next_value_index, depth+1);
++
++ value_list[ *next_value_index ] = node->value;
++ (*next_value_index)++;
++
++ get_sorted_node_values(node->right, value_list, next_value_index, depth+1);
++ }
++}
++
++
++
++/*
++ * direction = -1 indicates left subtree updated, direction = 1 for right subtree
++ * update_op = -1 indicates delete node, update_op = 1 for insert node
++ */
++static signed char rebalance (long_map_node** n, signed char direction, signed char update_op)
++{
++ /*
++ printf( "original: key = %ld, balance = %d, update_op=%d, direction=%d\n", (*n)->key, (*n)->balance, update_op, direction);
++ */
++
++ (*n)->balance = (*n)->balance + (update_op*direction);
++
++ if( (*n)->balance < -1)
++ {
++ if((*n)->left->balance < 0)
++ {
++ rotate_right(n);
++ (*n)->right->balance = 0;
++ (*n)->balance = 0;
++ }
++ else if((*n)->left->balance == 0)
++ {
++ rotate_right(n);
++ (*n)->right->balance = -1;
++ (*n)->balance = 1;
++ }
++ else if((*n)->left->balance > 0)
++ {
++ rotate_left( &((*n)->left) );
++ rotate_right(n);
++ /*
++ if( (*n)->balance < 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 1;
++ }
++ else if( (*n)->balance == 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 0;
++ }
++ else if( (*n)->balance > 0 )
++ {
++ (*n)->left->balance = -1;
++ (*n)->right->balance = 0;
++ }
++ */
++ (*n)->left->balance = (*n)->balance > 0 ? -1 : 0;
++ (*n)->right->balance = (*n)->balance < 0 ? 1 : 0;
++ (*n)->balance = 0;
++ }
++ }
++ if( (*n)->balance > 1)
++ {
++ if((*n)->right->balance > 0)
++ {
++ rotate_left(n);
++ (*n)->left->balance = 0;
++ (*n)->balance = 0;
++ }
++ else if ((*n)->right->balance == 0)
++ {
++ rotate_left(n);
++ (*n)->left->balance = 1;
++ (*n)->balance = -1;
++ }
++ else if((*n)->right->balance < 0)
++ {
++ rotate_right( &((*n)->right) );
++ rotate_left(n);
++ /*
++ if( (*n)->balance < 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 1;
++ }
++ else if( (*n)->balance == 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 0;
++ }
++ else if( (*n)->balance > 0 )
++ {
++ (*n)->left->balance = -1;
++ (*n)->right->balance = 0;
++ }
++ */
++ (*n)->left->balance = (*n)->balance > 0 ? -1 : 0;
++ (*n)->right->balance = (*n)->balance < 0 ? 1 : 0;
++ (*n)->balance = 0;
++ }
++ }
++
++ /*
++ printf( "key = %ld, balance = %d\n", (*n)->key, (*n)->balance);
++ */
++
++ return (*n)->balance;
++}
++
++
++static void rotate_right (long_map_node** parent)
++{
++ long_map_node* old_parent = *parent;
++ long_map_node* pivot = old_parent->left;
++ old_parent->left = pivot->right;
++ pivot->right = old_parent;
++
++ *parent = pivot;
++}
++
++static void rotate_left (long_map_node** parent)
++{
++ long_map_node* old_parent = *parent;
++ long_map_node* pivot = old_parent->right;
++ old_parent->right = pivot->left;
++ pivot->left = old_parent;
++
++ *parent = pivot;
++}
++
++
++
++/***************************************************************************
++ * This algorithm was created for the sdbm database library (a public-domain
++ * reimplementation of ndbm) and seems to work relatively well in
++ * scrambling bits
++ *
++ *
++ * This code was derived from code found at:
++ * http://www.cse.yorku.ca/~oz/hash.html
++ ***************************************************************************/
++static unsigned long sdbm_string_hash(const char *key)
++{
++ unsigned long hashed_key = 0;
++
++ int index = 0;
++ unsigned int nextch;
++ while(key[index] != '\0')
++ {
++ nextch = key[index];
++ hashed_key = nextch + (hashed_key << 6) + (hashed_key << 16) - hashed_key;
++ index++;
++ }
++ return hashed_key;
++}
++
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/weburl_deps/regexp.h 2015-06-19 03:02:54.745696021 +0800
+@@ -0,0 +1,41 @@
++/*
++ * Definitions etc. for regexp(3) routines.
++ *
++ * Caveat: this is V8 regexp(3) [actually, a reimplementation thereof],
++ * not the System V one.
++ */
++
++#ifndef REGEXP_H
++#define REGEXP_H
++
++
++/*
++http://www.opensource.apple.com/darwinsource/10.3/expect-1/expect/expect.h ,
++which contains a version of this library, says:
++
++ *
++ * NSUBEXP must be at least 10, and no greater than 117 or the parser
++ * will not work properly.
++ *
++
++However, it looks rather like this library is limited to 10. If you think
++otherwise, let us know.
++*/
++
++#define NSUBEXP 10
++typedef struct regexp {
++ char *startp[NSUBEXP];
++ char *endp[NSUBEXP];
++ char regstart; /* Internal use only. */
++ char reganch; /* Internal use only. */
++ char *regmust; /* Internal use only. */
++ int regmlen; /* Internal use only. */
++ char program[1]; /* Unwarranted chumminess with compiler. */
++} regexp;
++
++regexp * regcomp(char *exp, int *patternsize);
++int regexec(regexp *prog, char *string);
++void regsub(regexp *prog, char *source, char *dest);
++void regerror(char *s);
++
++#endif
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/weburl_deps/regmagic.h 2015-06-19 03:02:54.749695854 +0800
+@@ -0,0 +1,5 @@
++/*
++ * The first byte of the regexp internal "program" is actually this magic
++ * number; the start node begins in the second byte.
++ */
++#define MAGIC 0234
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/weburl_deps/regsub.c 2015-06-19 03:02:54.749695854 +0800
+@@ -0,0 +1,95 @@
++/*
++ * regsub
++ * @(#)regsub.c 1.3 of 2 April 86
++ *
++ * Copyright (c) 1986 by University of Toronto.
++ * Written by Henry Spencer. Not derived from licensed software.
++ *
++ * Permission is granted to anyone to use this software for any
++ * purpose on any computer system, and to redistribute it freely,
++ * subject to the following restrictions:
++ *
++ * 1. The author is not responsible for the consequences of use of
++ * this software, no matter how awful, even if they arise
++ * from defects in it.
++ *
++ * 2. The origin of this software must not be misrepresented, either
++ * by explicit claim or by omission.
++ *
++ * 3. Altered versions must be plainly marked as such, and must not
++ * be misrepresented as being the original software.
++ *
++ *
++ * This code was modified by Ethan Sommer to work within the kernel
++ * (it now uses kmalloc etc..)
++ *
++ */
++#include "regexp.h"
++#include "regmagic.h"
++#include <linux/string.h>
++
++
++#ifndef CHARBITS
++#define UCHARAT(p) ((int)*(unsigned char *)(p))
++#else
++#define UCHARAT(p) ((int)*(p)&CHARBITS)
++#endif
++
++#if 0
++//void regerror(char * s)
++//{
++// printk("regexp(3): %s", s);
++// /* NOTREACHED */
++//}
++#endif
++
++/*
++ - regsub - perform substitutions after a regexp match
++ */
++void
++regsub(regexp * prog, char * source, char * dest)
++{
++ register char *src;
++ register char *dst;
++ register char c;
++ register int no;
++ register int len;
++
++ /* Not necessary and gcc doesn't like it -MLS */
++ /*extern char *strncpy();*/
++
++ if (prog == NULL || source == NULL || dest == NULL) {
++ regerror("NULL parm to regsub");
++ return;
++ }
++ if (UCHARAT(prog->program) != MAGIC) {
++ regerror("damaged regexp fed to regsub");
++ return;
++ }
++
++ src = source;
++ dst = dest;
++ while ((c = *src++) != '\0') {
++ if (c == '&')
++ no = 0;
++ else if (c == '\\' && '0' <= *src && *src <= '9')
++ no = *src++ - '0';
++ else
++ no = -1;
++
++ if (no < 0) { /* Ordinary character. */
++ if (c == '\\' && (*src == '\\' || *src == '&'))
++ c = *src++;
++ *dst++ = c;
++ } else if (prog->startp[no] != NULL && prog->endp[no] != NULL) {
++ len = prog->endp[no] - prog->startp[no];
++ (void) strncpy(dst, prog->startp[no], len);
++ dst += len;
++ if (len != 0 && *(dst-1) == '\0') { /* strncpy hit NUL. */
++ regerror("damaged match string");
++ return;
++ }
++ }
++ }
++ *dst++ = '\0';
++}
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/weburl_deps/regexp.c 2015-06-19 03:02:54.749695854 +0800
+@@ -0,0 +1,1197 @@
++/*
++ * regcomp and regexec -- regsub and regerror are elsewhere
++ * @(#)regexp.c 1.3 of 18 April 87
++ *
++ * Copyright (c) 1986 by University of Toronto.
++ * Written by Henry Spencer. Not derived from licensed software.
++ *
++ * Permission is granted to anyone to use this software for any
++ * purpose on any computer system, and to redistribute it freely,
++ * subject to the following restrictions:
++ *
++ * 1. The author is not responsible for the consequences of use of
++ * this software, no matter how awful, even if they arise
++ * from defects in it.
++ *
++ * 2. The origin of this software must not be misrepresented, either
++ * by explicit claim or by omission.
++ *
++ * 3. Altered versions must be plainly marked as such, and must not
++ * be misrepresented as being the original software.
++ *
++ * Beware that some of this code is subtly aware of the way operator
++ * precedence is structured in regular expressions. Serious changes in
++ * regular-expression syntax might require a total rethink.
++ *
++ * This code was modified by Ethan Sommer to work within the kernel
++ * (it now uses kmalloc etc..)
++ *
++ * Modified slightly by Matthew Strait to use more modern C.
++ */
++
++#include "regexp.h"
++#include "regmagic.h"
++
++/* added by ethan and matt. Lets it work in both kernel and user space.
++(So iptables can use it, for instance.) Yea, it goes both ways... */
++#if __KERNEL__
++ #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
++#else
++ #define printk(format,args...) printf(format,##args)
++#endif
++
++void regerror(char * s)
++{
++ printk("<3>Regexp: %s\n", s);
++ /* NOTREACHED */
++}
++
++/*
++ * The "internal use only" fields in regexp.h are present to pass info from
++ * compile to execute that permits the execute phase to run lots faster on
++ * simple cases. They are:
++ *
++ * regstart char that must begin a match; '\0' if none obvious
++ * reganch is the match anchored (at beginning-of-line only)?
++ * regmust string (pointer into program) that match must include, or NULL
++ * regmlen length of regmust string
++ *
++ * Regstart and reganch permit very fast decisions on suitable starting points
++ * for a match, cutting down the work a lot. Regmust permits fast rejection
++ * of lines that cannot possibly match. The regmust tests are costly enough
++ * that regcomp() supplies a regmust only if the r.e. contains something
++ * potentially expensive (at present, the only such thing detected is * or +
++ * at the start of the r.e., which can involve a lot of backup). Regmlen is
++ * supplied because the test in regexec() needs it and regcomp() is computing
++ * it anyway.
++ */
++
++/*
++ * Structure for regexp "program". This is essentially a linear encoding
++ * of a nondeterministic finite-state machine (aka syntax charts or
++ * "railroad normal form" in parsing technology). Each node is an opcode
++ * plus a "next" pointer, possibly plus an operand. "Next" pointers of
++ * all nodes except BRANCH implement concatenation; a "next" pointer with
++ * a BRANCH on both ends of it is connecting two alternatives. (Here we
++ * have one of the subtle syntax dependencies: an individual BRANCH (as
++ * opposed to a collection of them) is never concatenated with anything
++ * because of operator precedence.) The operand of some types of node is
++ * a literal string; for others, it is a node leading into a sub-FSM. In
++ * particular, the operand of a BRANCH node is the first node of the branch.
++ * (NB this is *not* a tree structure: the tail of the branch connects
++ * to the thing following the set of BRANCHes.) The opcodes are:
++ */
++
++/* definition number opnd? meaning */
++#define END 0 /* no End of program. */
++#define BOL 1 /* no Match "" at beginning of line. */
++#define EOL 2 /* no Match "" at end of line. */
++#define ANY 3 /* no Match any one character. */
++#define ANYOF 4 /* str Match any character in this string. */
++#define ANYBUT 5 /* str Match any character not in this string. */
++#define BRANCH 6 /* node Match this alternative, or the next... */
++#define BACK 7 /* no Match "", "next" ptr points backward. */
++#define EXACTLY 8 /* str Match this string. */
++#define NOTHING 9 /* no Match empty string. */
++#define STAR 10 /* node Match this (simple) thing 0 or more times. */
++#define PLUS 11 /* node Match this (simple) thing 1 or more times. */
++#define OPEN 20 /* no Mark this point in input as start of #n. */
++ /* OPEN+1 is number 1, etc. */
++#define CLOSE 30 /* no Analogous to OPEN. */
++
++/*
++ * Opcode notes:
++ *
++ * BRANCH The set of branches constituting a single choice are hooked
++ * together with their "next" pointers, since precedence prevents
++ * anything being concatenated to any individual branch. The
++ * "next" pointer of the last BRANCH in a choice points to the
++ * thing following the whole choice. This is also where the
++ * final "next" pointer of each individual branch points; each
++ * branch starts with the operand node of a BRANCH node.
++ *
++ * BACK Normal "next" pointers all implicitly point forward; BACK
++ * exists to make loop structures possible.
++ *
++ * STAR,PLUS '?', and complex '*' and '+', are implemented as circular
++ * BRANCH structures using BACK. Simple cases (one character
++ * per match) are implemented with STAR and PLUS for speed
++ * and to minimize recursive plunges.
++ *
++ * OPEN,CLOSE ...are numbered at compile time.
++ */
++
++/*
++ * A node is one char of opcode followed by two chars of "next" pointer.
++ * "Next" pointers are stored as two 8-bit pieces, high order first. The
++ * value is a positive offset from the opcode of the node containing it.
++ * An operand, if any, simply follows the node. (Note that much of the
++ * code generation knows about this implicit relationship.)
++ *
++ * Using two bytes for the "next" pointer is vast overkill for most things,
++ * but allows patterns to get big without disasters.
++ */
++#define OP(p) (*(p))
++#define NEXT(p) (((*((p)+1)&0377)<<8) + (*((p)+2)&0377))
++#define OPERAND(p) ((p) + 3)
++
++/*
++ * See regmagic.h for one further detail of program structure.
++ */
++
++
++/*
++ * Utility definitions.
++ */
++#ifndef CHARBITS
++#define UCHARAT(p) ((int)*(unsigned char *)(p))
++#else
++#define UCHARAT(p) ((int)*(p)&CHARBITS)
++#endif
++
++#define FAIL(m) { regerror(m); return(NULL); }
++#define ISMULT(c) ((c) == '*' || (c) == '+' || (c) == '?')
++#define META "^$.[()|?+*\\"
++
++/*
++ * Flags to be passed up and down.
++ */
++#define HASWIDTH 01 /* Known never to match null string. */
++#define SIMPLE 02 /* Simple enough to be STAR/PLUS operand. */
++#define SPSTART 04 /* Starts with * or +. */
++#define WORST 0 /* Worst case. */
++
++/*
++ * Global work variables for regcomp().
++ */
++struct match_globals {
++char *reginput; /* String-input pointer. */
++char *regbol; /* Beginning of input, for ^ check. */
++char **regstartp; /* Pointer to startp array. */
++char **regendp; /* Ditto for endp. */
++char *regparse; /* Input-scan pointer. */
++int regnpar; /* () count. */
++char regdummy;
++char *regcode; /* Code-emit pointer; &regdummy = don't. */
++long regsize; /* Code size. */
++};
++
++/*
++ * Forward declarations for regcomp()'s friends.
++ */
++#ifndef STATIC
++#define STATIC static
++#endif
++STATIC char *reg(struct match_globals *g, int paren,int *flagp);
++STATIC char *regbranch(struct match_globals *g, int *flagp);
++STATIC char *regpiece(struct match_globals *g, int *flagp);
++STATIC char *regatom(struct match_globals *g, int *flagp);
++STATIC char *regnode(struct match_globals *g, char op);
++STATIC char *regnext(struct match_globals *g, char *p);
++STATIC void regc(struct match_globals *g, char b);
++STATIC void reginsert(struct match_globals *g, char op, char *opnd);
++STATIC void regtail(struct match_globals *g, char *p, char *val);
++STATIC void regoptail(struct match_globals *g, char *p, char *val);
++
++
++__kernel_size_t my_strcspn(const char *s1,const char *s2)
++{
++ char *scan1;
++ char *scan2;
++ int count;
++
++ count = 0;
++ for (scan1 = (char *)s1; *scan1 != '\0'; scan1++) {
++ for (scan2 = (char *)s2; *scan2 != '\0';) /* ++ moved down. */
++ if (*scan1 == *scan2++)
++ return(count);
++ count++;
++ }
++ return(count);
++}
++
++/*
++ - regcomp - compile a regular expression into internal code
++ *
++ * We can't allocate space until we know how big the compiled form will be,
++ * but we can't compile it (and thus know how big it is) until we've got a
++ * place to put the code. So we cheat: we compile it twice, once with code
++ * generation turned off and size counting turned on, and once "for real".
++ * This also means that we don't allocate space until we are sure that the
++ * thing really will compile successfully, and we never have to move the
++ * code and thus invalidate pointers into it. (Note that it has to be in
++ * one piece because free() must be able to free it all.)
++ *
++ * Beware that the optimization-preparation code in here knows about some
++ * of the structure of the compiled regexp.
++ */
++regexp *
++regcomp(char *exp,int *patternsize)
++{
++ register regexp *r;
++ register char *scan;
++ register char *longest;
++ register int len;
++ int flags;
++ struct match_globals g;
++
++ /* commented out by ethan
++ extern char *malloc();
++ */
++
++ if (exp == NULL)
++ FAIL("NULL argument");
++
++ /* First pass: determine size, legality. */
++ g.regparse = exp;
++ g.regnpar = 1;
++ g.regsize = 0L;
++ g.regcode = &g.regdummy;
++ regc(&g, MAGIC);
++ if (reg(&g, 0, &flags) == NULL)
++ return(NULL);
++
++ /* Small enough for pointer-storage convention? */
++ if (g.regsize >= 32767L) /* Probably could be 65535L. */
++ FAIL("regexp too big");
++
++ /* Allocate space. */
++ *patternsize=sizeof(regexp) + (unsigned)g.regsize;
++ r = (regexp *)malloc(sizeof(regexp) + (unsigned)g.regsize);
++ if (r == NULL)
++ FAIL("out of space");
++
++ /* Second pass: emit code. */
++ g.regparse = exp;
++ g.regnpar = 1;
++ g.regcode = r->program;
++ regc(&g, MAGIC);
++ if (reg(&g, 0, &flags) == NULL)
++ return(NULL);
++
++ /* Dig out information for optimizations. */
++ r->regstart = '\0'; /* Worst-case defaults. */
++ r->reganch = 0;
++ r->regmust = NULL;
++ r->regmlen = 0;
++ scan = r->program+1; /* First BRANCH. */
++ if (OP(regnext(&g, scan)) == END) { /* Only one top-level choice. */
++ scan = OPERAND(scan);
++
++ /* Starting-point info. */
++ if (OP(scan) == EXACTLY)
++ r->regstart = *OPERAND(scan);
++ else if (OP(scan) == BOL)
++ r->reganch++;
++
++ /*
++ * If there's something expensive in the r.e., find the
++ * longest literal string that must appear and make it the
++ * regmust. Resolve ties in favor of later strings, since
++ * the regstart check works with the beginning of the r.e.
++ * and avoiding duplication strengthens checking. Not a
++ * strong reason, but sufficient in the absence of others.
++ */
++ if (flags&SPSTART) {
++ longest = NULL;
++ len = 0;
++ for (; scan != NULL; scan = regnext(&g, scan))
++ if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) {
++ longest = OPERAND(scan);
++ len = strlen(OPERAND(scan));
++ }
++ r->regmust = longest;
++ r->regmlen = len;
++ }
++ }
++
++ return(r);
++}
++
++/*
++ - reg - regular expression, i.e. main body or parenthesized thing
++ *
++ * Caller must absorb opening parenthesis.
++ *
++ * Combining parenthesis handling with the base level of regular expression
++ * is a trifle forced, but the need to tie the tails of the branches to what
++ * follows makes it hard to avoid.
++ */
++static char *
++reg(struct match_globals *g, int paren, int *flagp /* Parenthesized? */ )
++{
++ register char *ret;
++ register char *br;
++ register char *ender;
++ register int parno = 0; /* 0 makes gcc happy */
++ int flags;
++
++ *flagp = HASWIDTH; /* Tentatively. */
++
++ /* Make an OPEN node, if parenthesized. */
++ if (paren) {
++ if (g->regnpar >= NSUBEXP)
++ FAIL("too many ()");
++ parno = g->regnpar;
++ g->regnpar++;
++ ret = regnode(g, OPEN+parno);
++ } else
++ ret = NULL;
++
++ /* Pick up the branches, linking them together. */
++ br = regbranch(g, &flags);
++ if (br == NULL)
++ return(NULL);
++ if (ret != NULL)
++ regtail(g, ret, br); /* OPEN -> first. */
++ else
++ ret = br;
++ if (!(flags&HASWIDTH))
++ *flagp &= ~HASWIDTH;
++ *flagp |= flags&SPSTART;
++ while (*g->regparse == '|') {
++ g->regparse++;
++ br = regbranch(g, &flags);
++ if (br == NULL)
++ return(NULL);
++ regtail(g, ret, br); /* BRANCH -> BRANCH. */
++ if (!(flags&HASWIDTH))
++ *flagp &= ~HASWIDTH;
++ *flagp |= flags&SPSTART;
++ }
++
++ /* Make a closing node, and hook it on the end. */
++ ender = regnode(g, (paren) ? CLOSE+parno : END);
++ regtail(g, ret, ender);
++
++ /* Hook the tails of the branches to the closing node. */
++ for (br = ret; br != NULL; br = regnext(g, br))
++ regoptail(g, br, ender);
++
++ /* Check for proper termination. */
++ if (paren && *g->regparse++ != ')') {
++ FAIL("unmatched ()");
++ } else if (!paren && *g->regparse != '\0') {
++ if (*g->regparse == ')') {
++ FAIL("unmatched ()");
++ } else
++ FAIL("junk on end"); /* "Can't happen". */
++ /* NOTREACHED */
++ }
++
++ return(ret);
++}
++
++/*
++ - regbranch - one alternative of an | operator
++ *
++ * Implements the concatenation operator.
++ */
++static char *
++regbranch(struct match_globals *g, int *flagp)
++{
++ register char *ret;
++ register char *chain;
++ register char *latest;
++ int flags;
++
++ *flagp = WORST; /* Tentatively. */
++
++ ret = regnode(g, BRANCH);
++ chain = NULL;
++ while (*g->regparse != '\0' && *g->regparse != '|' && *g->regparse != ')') {
++ latest = regpiece(g, &flags);
++ if (latest == NULL)
++ return(NULL);
++ *flagp |= flags&HASWIDTH;
++ if (chain == NULL) /* First piece. */
++ *flagp |= flags&SPSTART;
++ else
++ regtail(g, chain, latest);
++ chain = latest;
++ }
++ if (chain == NULL) /* Loop ran zero times. */
++ (void) regnode(g, NOTHING);
++
++ return(ret);
++}
++
++/*
++ - regpiece - something followed by possible [*+?]
++ *
++ * Note that the branching code sequences used for ? and the general cases
++ * of * and + are somewhat optimized: they use the same NOTHING node as
++ * both the endmarker for their branch list and the body of the last branch.
++ * It might seem that this node could be dispensed with entirely, but the
++ * endmarker role is not redundant.
++ */
++static char *
++regpiece(struct match_globals *g, int *flagp)
++{
++ register char *ret;
++ register char op;
++ register char *next;
++ int flags;
++
++ ret = regatom(g, &flags);
++ if (ret == NULL)
++ return(NULL);
++
++ op = *g->regparse;
++ if (!ISMULT(op)) {
++ *flagp = flags;
++ return(ret);
++ }
++
++ if (!(flags&HASWIDTH) && op != '?')
++ FAIL("*+ operand could be empty");
++ *flagp = (op != '+') ? (WORST|SPSTART) : (WORST|HASWIDTH);
++
++ if (op == '*' && (flags&SIMPLE))
++ reginsert(g, STAR, ret);
++ else if (op == '*') {
++ /* Emit x* as (x&|), where & means "self". */
++ reginsert(g, BRANCH, ret); /* Either x */
++ regoptail(g, ret, regnode(g, BACK)); /* and loop */
++ regoptail(g, ret, ret); /* back */
++ regtail(g, ret, regnode(g, BRANCH)); /* or */
++ regtail(g, ret, regnode(g, NOTHING)); /* null. */
++ } else if (op == '+' && (flags&SIMPLE))
++ reginsert(g, PLUS, ret);
++ else if (op == '+') {
++ /* Emit x+ as x(&|), where & means "self". */
++ next = regnode(g, BRANCH); /* Either */
++ regtail(g, ret, next);
++ regtail(g, regnode(g, BACK), ret); /* loop back */
++ regtail(g, next, regnode(g, BRANCH)); /* or */
++ regtail(g, ret, regnode(g, NOTHING)); /* null. */
++ } else if (op == '?') {
++ /* Emit x? as (x|) */
++ reginsert(g, BRANCH, ret); /* Either x */
++ regtail(g, ret, regnode(g, BRANCH)); /* or */
++ next = regnode(g, NOTHING); /* null. */
++ regtail(g, ret, next);
++ regoptail(g, ret, next);
++ }
++ g->regparse++;
++ if (ISMULT(*g->regparse))
++ FAIL("nested *?+");
++
++ return(ret);
++}
++
++/*
++ - regatom - the lowest level
++ *
++ * Optimization: gobbles an entire sequence of ordinary characters so that
++ * it can turn them into a single node, which is smaller to store and
++ * faster to run. Backslashed characters are exceptions, each becoming a
++ * separate node; the code is simpler that way and it's not worth fixing.
++ */
++static char *
++regatom(struct match_globals *g, int *flagp)
++{
++ register char *ret;
++ int flags;
++
++ *flagp = WORST; /* Tentatively. */
++
++ switch (*g->regparse++) {
++ case '^':
++ ret = regnode(g, BOL);
++ break;
++ case '$':
++ ret = regnode(g, EOL);
++ break;
++ case '.':
++ ret = regnode(g, ANY);
++ *flagp |= HASWIDTH|SIMPLE;
++ break;
++ case '[': {
++ register int class;
++ register int classend;
++
++ if (*g->regparse == '^') { /* Complement of range. */
++ ret = regnode(g, ANYBUT);
++ g->regparse++;
++ } else
++ ret = regnode(g, ANYOF);
++ if (*g->regparse == ']' || *g->regparse == '-')
++ regc(g, *g->regparse++);
++ while (*g->regparse != '\0' && *g->regparse != ']') {
++ if (*g->regparse == '-') {
++ g->regparse++;
++ if (*g->regparse == ']' || *g->regparse == '\0')
++ regc(g, '-');
++ else {
++ class = UCHARAT(g->regparse-2)+1;
++ classend = UCHARAT(g->regparse);
++ if (class > classend+1)
++ FAIL("invalid [] range");
++ for (; class <= classend; class++)
++ regc(g, class);
++ g->regparse++;
++ }
++ } else
++ regc(g, *g->regparse++);
++ }
++ regc(g, '\0');
++ if (*g->regparse != ']')
++ FAIL("unmatched []");
++ g->regparse++;
++ *flagp |= HASWIDTH|SIMPLE;
++ }
++ break;
++ case '(':
++ ret = reg(g, 1, &flags);
++ if (ret == NULL)
++ return(NULL);
++ *flagp |= flags&(HASWIDTH|SPSTART);
++ break;
++ case '\0':
++ case '|':
++ case ')':
++ FAIL("internal urp"); /* Supposed to be caught earlier. */
++ break;
++ case '?':
++ case '+':
++ case '*':
++ FAIL("?+* follows nothing");
++ break;
++ case '\\':
++ if (*g->regparse == '\0')
++ FAIL("trailing \\");
++ ret = regnode(g, EXACTLY);
++ regc(g, *g->regparse++);
++ regc(g, '\0');
++ *flagp |= HASWIDTH|SIMPLE;
++ break;
++ default: {
++ register int len;
++ register char ender;
++
++ g->regparse--;
++ len = my_strcspn((const char *)g->regparse, (const char *)META);
++ if (len <= 0)
++ FAIL("internal disaster");
++ ender = *(g->regparse+len);
++ if (len > 1 && ISMULT(ender))
++ len--; /* Back off clear of ?+* operand. */
++ *flagp |= HASWIDTH;
++ if (len == 1)
++ *flagp |= SIMPLE;
++ ret = regnode(g, EXACTLY);
++ while (len > 0) {
++ regc(g, *g->regparse++);
++ len--;
++ }
++ regc(g, '\0');
++ }
++ break;
++ }
++
++ return(ret);
++}
++
++/*
++ - regnode - emit a node
++ */
++static char * /* Location. */
++regnode(struct match_globals *g, char op)
++{
++ register char *ret;
++ register char *ptr;
++
++ ret = g->regcode;
++ if (ret == &g->regdummy) {
++ g->regsize += 3;
++ return(ret);
++ }
++
++ ptr = ret;
++ *ptr++ = op;
++ *ptr++ = '\0'; /* Null "next" pointer. */
++ *ptr++ = '\0';
++ g->regcode = ptr;
++
++ return(ret);
++}
++
++/*
++ - regc - emit (if appropriate) a byte of code
++ */
++static void
++regc(struct match_globals *g, char b)
++{
++ if (g->regcode != &g->regdummy)
++ *g->regcode++ = b;
++ else
++ g->regsize++;
++}
++
++/*
++ - reginsert - insert an operator in front of already-emitted operand
++ *
++ * Means relocating the operand.
++ */
++static void
++reginsert(struct match_globals *g, char op, char* opnd)
++{
++ register char *src;
++ register char *dst;
++ register char *place;
++
++ if (g->regcode == &g->regdummy) {
++ g->regsize += 3;
++ return;
++ }
++
++ src = g->regcode;
++ g->regcode += 3;
++ dst = g->regcode;
++ while (src > opnd)
++ *--dst = *--src;
++
++ place = opnd; /* Op node, where operand used to be. */
++ *place++ = op;
++ *place++ = '\0';
++ *place++ = '\0';
++}
++
++/*
++ - regtail - set the next-pointer at the end of a node chain
++ */
++static void
++regtail(struct match_globals *g, char *p, char *val)
++{
++ register char *scan;
++ register char *temp;
++ register int offset;
++
++ if (p == &g->regdummy)
++ return;
++
++ /* Find last node. */
++ scan = p;
++ for (;;) {
++ temp = regnext(g, scan);
++ if (temp == NULL)
++ break;
++ scan = temp;
++ }
++
++ if (OP(scan) == BACK)
++ offset = scan - val;
++ else
++ offset = val - scan;
++ *(scan+1) = (offset>>8)&0377;
++ *(scan+2) = offset&0377;
++}
++
++/*
++ - regoptail - regtail on operand of first argument; nop if operandless
++ */
++static void
++regoptail(struct match_globals *g, char *p, char *val)
++{
++ /* "Operandless" and "op != BRANCH" are synonymous in practice. */
++ if (p == NULL || p == &g->regdummy || OP(p) != BRANCH)
++ return;
++ regtail(g, OPERAND(p), val);
++}
++
++/*
++ * regexec and friends
++ */
++
++
++/*
++ * Forwards.
++ */
++STATIC int regtry(struct match_globals *g, regexp *prog, char *string);
++STATIC int regmatch(struct match_globals *g, char *prog);
++STATIC int regrepeat(struct match_globals *g, char *p);
++
++#ifdef DEBUG
++int regnarrate = 0;
++void regdump();
++STATIC char *regprop(char *op);
++#endif
++
++/*
++ - regexec - match a regexp against a string
++ */
++int
++regexec(regexp *prog, char *string)
++{
++ register char *s;
++ struct match_globals g;
++
++ /* Be paranoid... */
++ if (prog == NULL || string == NULL) {
++ printk("<3>Regexp: NULL parameter\n");
++ return(0);
++ }
++
++ /* Check validity of program. */
++ if (UCHARAT(prog->program) != MAGIC) {
++ printk("<3>Regexp: corrupted program\n");
++ return(0);
++ }
++
++ /* If there is a "must appear" string, look for it. */
++ if (prog->regmust != NULL) {
++ s = string;
++ while ((s = strchr(s, prog->regmust[0])) != NULL) {
++ if (strncmp(s, prog->regmust, prog->regmlen) == 0)
++ break; /* Found it. */
++ s++;
++ }
++ if (s == NULL) /* Not present. */
++ return(0);
++ }
++
++ /* Mark beginning of line for ^ . */
++ g.regbol = string;
++
++ /* Simplest case: anchored match need be tried only once. */
++ if (prog->reganch)
++ return(regtry(&g, prog, string));
++
++ /* Messy cases: unanchored match. */
++ s = string;
++ if (prog->regstart != '\0')
++ /* We know what char it must start with. */
++ while ((s = strchr(s, prog->regstart)) != NULL) {
++ if (regtry(&g, prog, s))
++ return(1);
++ s++;
++ }
++ else
++ /* We don't -- general case. */
++ do {
++ if (regtry(&g, prog, s))
++ return(1);
++ } while (*s++ != '\0');
++
++ /* Failure. */
++ return(0);
++}
++
++/*
++ - regtry - try match at specific point
++ */
++static int /* 0 failure, 1 success */
++regtry(struct match_globals *g, regexp *prog, char *string)
++{
++ register int i;
++ register char **sp;
++ register char **ep;
++
++ g->reginput = string;
++ g->regstartp = prog->startp;
++ g->regendp = prog->endp;
++
++ sp = prog->startp;
++ ep = prog->endp;
++ for (i = NSUBEXP; i > 0; i--) {
++ *sp++ = NULL;
++ *ep++ = NULL;
++ }
++ if (regmatch(g, prog->program + 1)) {
++ prog->startp[0] = string;
++ prog->endp[0] = g->reginput;
++ return(1);
++ } else
++ return(0);
++}
++
++/*
++ - regmatch - main matching routine
++ *
++ * Conceptually the strategy is simple: check to see whether the current
++ * node matches, call self recursively to see whether the rest matches,
++ * and then act accordingly. In practice we make some effort to avoid
++ * recursion, in particular by going through "ordinary" nodes (that don't
++ * need to know whether the rest of the match failed) by a loop instead of
++ * by recursion.
++ */
++static int /* 0 failure, 1 success */
++regmatch(struct match_globals *g, char *prog)
++{
++ register char *scan = prog; /* Current node. */
++ char *next; /* Next node. */
++
++#ifdef DEBUG
++ if (scan != NULL && regnarrate)
++ fprintf(stderr, "%s(\n", regprop(scan));
++#endif
++ while (scan != NULL) {
++#ifdef DEBUG
++ if (regnarrate)
++ fprintf(stderr, "%s...\n", regprop(scan));
++#endif
++ next = regnext(g, scan);
++
++ switch (OP(scan)) {
++ case BOL:
++ if (g->reginput != g->regbol)
++ return(0);
++ break;
++ case EOL:
++ if (*g->reginput != '\0')
++ return(0);
++ break;
++ case ANY:
++ if (*g->reginput == '\0')
++ return(0);
++ g->reginput++;
++ break;
++ case EXACTLY: {
++ register int len;
++ register char *opnd;
++
++ opnd = OPERAND(scan);
++ /* Inline the first character, for speed. */
++ if (*opnd != *g->reginput)
++ return(0);
++ len = strlen(opnd);
++ if (len > 1 && strncmp(opnd, g->reginput, len) != 0)
++ return(0);
++ g->reginput += len;
++ }
++ break;
++ case ANYOF:
++ if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) == NULL)
++ return(0);
++ g->reginput++;
++ break;
++ case ANYBUT:
++ if (*g->reginput == '\0' || strchr(OPERAND(scan), *g->reginput) != NULL)
++ return(0);
++ g->reginput++;
++ break;
++ case NOTHING:
++ case BACK:
++ break;
++ case OPEN+1:
++ case OPEN+2:
++ case OPEN+3:
++ case OPEN+4:
++ case OPEN+5:
++ case OPEN+6:
++ case OPEN+7:
++ case OPEN+8:
++ case OPEN+9: {
++ register int no;
++ register char *save;
++
++ no = OP(scan) - OPEN;
++ save = g->reginput;
++
++ if (regmatch(g, next)) {
++ /*
++ * Don't set startp if some later
++ * invocation of the same parentheses
++ * already has.
++ */
++ if (g->regstartp[no] == NULL)
++ g->regstartp[no] = save;
++ return(1);
++ } else
++ return(0);
++ }
++ break;
++ case CLOSE+1:
++ case CLOSE+2:
++ case CLOSE+3:
++ case CLOSE+4:
++ case CLOSE+5:
++ case CLOSE+6:
++ case CLOSE+7:
++ case CLOSE+8:
++ case CLOSE+9:
++ {
++ register int no;
++ register char *save;
++
++ no = OP(scan) - CLOSE;
++ save = g->reginput;
++
++ if (regmatch(g, next)) {
++ /*
++ * Don't set endp if some later
++ * invocation of the same parentheses
++ * already has.
++ */
++ if (g->regendp[no] == NULL)
++ g->regendp[no] = save;
++ return(1);
++ } else
++ return(0);
++ }
++ break;
++ case BRANCH: {
++ register char *save;
++
++ if (OP(next) != BRANCH) /* No choice. */
++ next = OPERAND(scan); /* Avoid recursion. */
++ else {
++ do {
++ save = g->reginput;
++ if (regmatch(g, OPERAND(scan)))
++ return(1);
++ g->reginput = save;
++ scan = regnext(g, scan);
++ } while (scan != NULL && OP(scan) == BRANCH);
++ return(0);
++ /* NOTREACHED */
++ }
++ }
++ break;
++ case STAR:
++ case PLUS: {
++ register char nextch;
++ register int no;
++ register char *save;
++ register int min;
++
++ /*
++ * Lookahead to avoid useless match attempts
++ * when we know what character comes next.
++ */
++ nextch = '\0';
++ if (OP(next) == EXACTLY)
++ nextch = *OPERAND(next);
++ min = (OP(scan) == STAR) ? 0 : 1;
++ save = g->reginput;
++ no = regrepeat(g, OPERAND(scan));
++ while (no >= min) {
++ /* If it could work, try it. */
++ if (nextch == '\0' || *g->reginput == nextch)
++ if (regmatch(g, next))
++ return(1);
++ /* Couldn't or didn't -- back up. */
++ no--;
++ g->reginput = save + no;
++ }
++ return(0);
++ }
++ break;
++ case END:
++ return(1); /* Success! */
++ break;
++ default:
++ printk("<3>Regexp: memory corruption\n");
++ return(0);
++ break;
++ }
++
++ scan = next;
++ }
++
++ /*
++ * We get here only if there's trouble -- normally "case END" is
++ * the terminating point.
++ */
++ printk("<3>Regexp: corrupted pointers\n");
++ return(0);
++}
++
++/*
++ - regrepeat - repeatedly match something simple, report how many
++ */
++static int
++regrepeat(struct match_globals *g, char *p)
++{
++ register int count = 0;
++ register char *scan;
++ register char *opnd;
++
++ scan = g->reginput;
++ opnd = OPERAND(p);
++ switch (OP(p)) {
++ case ANY:
++ count = strlen(scan);
++ scan += count;
++ break;
++ case EXACTLY:
++ while (*opnd == *scan) {
++ count++;
++ scan++;
++ }
++ break;
++ case ANYOF:
++ while (*scan != '\0' && strchr(opnd, *scan) != NULL) {
++ count++;
++ scan++;
++ }
++ break;
++ case ANYBUT:
++ while (*scan != '\0' && strchr(opnd, *scan) == NULL) {
++ count++;
++ scan++;
++ }
++ break;
++ default: /* Oh dear. Called inappropriately. */
++ printk("<3>Regexp: internal foulup\n");
++ count = 0; /* Best compromise. */
++ break;
++ }
++ g->reginput = scan;
++
++ return(count);
++}
++
++/*
++ - regnext - dig the "next" pointer out of a node
++ */
++static char*
++regnext(struct match_globals *g, char *p)
++{
++ register int offset;
++
++ if (p == &g->regdummy)
++ return(NULL);
++
++ offset = NEXT(p);
++ if (offset == 0)
++ return(NULL);
++
++ if (OP(p) == BACK)
++ return(p-offset);
++ else
++ return(p+offset);
++}
++
++#ifdef DEBUG
++
++STATIC char *regprop();
++
++/*
++ - regdump - dump a regexp onto stdout in vaguely comprehensible form
++ */
++void
++regdump(regexp *r)
++{
++ register char *s;
++ register char op = EXACTLY; /* Arbitrary non-END op. */
++ register char *next;
++ /* extern char *strchr(); */
++
++
++ s = r->program + 1;
++ while (op != END) { /* While that wasn't END last time... */
++ op = OP(s);
++ printf("%2d%s", s-r->program, regprop(s)); /* Where, what. */
++ next = regnext(s);
++ if (next == NULL) /* Next ptr. */
++ printf("(0)");
++ else
++ printf("(%d)", (s-r->program)+(next-s));
++ s += 3;
++ if (op == ANYOF || op == ANYBUT || op == EXACTLY) {
++ /* Literal string, where present. */
++ while (*s != '\0') {
++ putchar(*s);
++ s++;
++ }
++ s++;
++ }
++ putchar('\n');
++ }
++
++ /* Header fields of interest. */
++ if (r->regstart != '\0')
++ printf("start `%c' ", r->regstart);
++ if (r->reganch)
++ printf("anchored ");
++ if (r->regmust != NULL)
++ printf("must have \"%s\"", r->regmust);
++ printf("\n");
++}
++
++/*
++ - regprop - printable representation of opcode
++ */
++static char *
++regprop(char *op)
++{
++#define BUFLEN 50
++ register char *p;
++ static char buf[BUFLEN];
++
++ strcpy(buf, ":");
++
++ switch (OP(op)) {
++ case BOL:
++ p = "BOL";
++ break;
++ case EOL:
++ p = "EOL";
++ break;
++ case ANY:
++ p = "ANY";
++ break;
++ case ANYOF:
++ p = "ANYOF";
++ break;
++ case ANYBUT:
++ p = "ANYBUT";
++ break;
++ case BRANCH:
++ p = "BRANCH";
++ break;
++ case EXACTLY:
++ p = "EXACTLY";
++ break;
++ case NOTHING:
++ p = "NOTHING";
++ break;
++ case BACK:
++ p = "BACK";
++ break;
++ case END:
++ p = "END";
++ break;
++ case OPEN+1:
++ case OPEN+2:
++ case OPEN+3:
++ case OPEN+4:
++ case OPEN+5:
++ case OPEN+6:
++ case OPEN+7:
++ case OPEN+8:
++ case OPEN+9:
++ snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "OPEN%d", OP(op)-OPEN);
++ p = NULL;
++ break;
++ case CLOSE+1:
++ case CLOSE+2:
++ case CLOSE+3:
++ case CLOSE+4:
++ case CLOSE+5:
++ case CLOSE+6:
++ case CLOSE+7:
++ case CLOSE+8:
++ case CLOSE+9:
++ snprintf(buf+strlen(buf),BUFLEN-strlen(buf), "CLOSE%d", OP(op)-CLOSE);
++ p = NULL;
++ break;
++ case STAR:
++ p = "STAR";
++ break;
++ case PLUS:
++ p = "PLUS";
++ break;
++ default:
++ printk("<3>Regexp: corrupted opcode\n");
++ break;
++ }
++ if (p != NULL)
++ strncat(buf, p, BUFLEN-strlen(buf));
++ return(buf);
++}
++#endif
++
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/webmon_deps/tree_map.h 2015-06-19 03:02:55.169678310 +0800
+@@ -0,0 +1,1084 @@
++/*
++ * Copyright © 2008 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This work 'as-is' we provide.
++ * No warranty, express or implied.
++ * We've done our best,
++ * to debug and test.
++ * Liability for damages denied.
++ *
++ * Permission is granted hereby,
++ * to copy, share, and modify.
++ * Use as is fit,
++ * free or for profit.
++ * On this notice these rights rely.
++ *
++ *
++ *
++ * Note that unlike other portions of Gargoyle this code
++ * does not fall under the GPL, but the rather whimsical
++ * 'Poetic License' above.
++ *
++ * Basically, this library contains a bunch of utilities
++ * that I find useful. I'm sure other libraries exist
++ * that are just as good or better, but I like these tools
++ * because I personally wrote them, so I know their quirks.
++ * (i.e. I know where the bodies are buried). I want to
++ * make sure that I can re-use these utilities for whatever
++ * code I may want to write in the future be it
++ * proprietary or open-source, so I've put them under
++ * a very, very permissive license.
++ *
++ * If you find this code useful, use it. If not, don't.
++ * I really don't care.
++ *
++ */
++
++
++#if __KERNEL__
++ #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
++ #define free(foo) kfree(foo)
++ #define printf(format,args...) printk(format,##args)
++
++ /* kernel strdup */
++ static inline char *kernel_strdup(const char *str);
++ static inline char *kernel_strdup(const char *str)
++ {
++ char *tmp;
++ long int s;
++ s=strlen(str) + 1;
++ tmp = kmalloc(s, GFP_ATOMIC);
++ if (tmp != NULL)
++ {
++ memcpy(tmp, str, s);
++ }
++ return tmp;
++ }
++ #define strdup kernel_strdup
++
++#endif
++
++
++
++/* tree_map structs / prototypes */
++typedef struct long_tree_map_node
++{
++ unsigned long key;
++ void* value;
++
++ signed char balance;
++ struct long_tree_map_node* left;
++ struct long_tree_map_node* right;
++} long_map_node;
++
++typedef struct
++{
++ long_map_node* root;
++ unsigned long num_elements;
++
++}long_map;
++
++typedef struct
++{
++ long_map lm;
++ unsigned char store_keys;
++ unsigned long num_elements;
++
++}string_map;
++
++
++
++/* long map functions */
++long_map* initialize_long_map(void);
++void* get_long_map_element(long_map* map, unsigned long key);
++void* get_smallest_long_map_element(long_map* map, unsigned long* smallest_key);
++void* get_largest_long_map_element(long_map* map, unsigned long* largest_key);
++void* remove_smallest_long_map_element(long_map* map, unsigned long* smallest_key);
++void* remove_largest_long_map_element(long_map* map, unsigned long* largest_key);
++void* set_long_map_element(long_map* map, unsigned long key, void* value);
++void* remove_long_map_element(long_map* map, unsigned long key);
++unsigned long* get_sorted_long_map_keys(long_map* map, unsigned long* num_keys_returned);
++void** get_sorted_long_map_values(long_map* map, unsigned long* num_values_returned);
++void** destroy_long_map(long_map* map, int destruction_type, unsigned long* num_destroyed);
++void apply_to_every_long_map_value(long_map* map, void (*apply_func)(unsigned long key, void* value));
++
++/* string map functions */
++string_map* initialize_string_map(unsigned char store_keys);
++void* get_string_map_element(string_map* map, const char* key);
++void* set_string_map_element(string_map* map, const char* key, void* value);
++void* remove_string_map_element(string_map* map, const char* key);
++char** get_string_map_keys(string_map* map, unsigned long* num_keys_returned);
++void** get_string_map_values(string_map* map, unsigned long* num_values_returned);
++void** destroy_string_map(string_map* map, int destruction_type, unsigned long* num_destroyed);
++void apply_to_every_string_map_value(string_map* map, void (*apply_func)(char* key, void* value));
++
++
++/*
++ * three different ways to deal with values when data structure is destroyed
++ */
++#define DESTROY_MODE_RETURN_VALUES 20
++#define DESTROY_MODE_FREE_VALUES 21
++#define DESTROY_MODE_IGNORE_VALUES 22
++
++
++/*
++ * for convenience & backwards compatibility alias _string_map_ functions to
++ * _map_ functions since string map is used more often than long map
++ */
++#define initialize_map initialize_string_map
++#define set_map_element set_string_map_element
++#define get_map_element get_string_map_element
++#define remove_map_element remove_string_map_element
++#define get_map_keys get_string_map_keys
++#define get_map_values get_string_map_values
++#define destroy_map destroy_string_map
++
++
++/* internal utility structures/ functions */
++typedef struct stack_node_struct
++{
++ long_map_node** node_ptr;
++ signed char direction;
++ struct stack_node_struct* previous;
++} stack_node;
++
++static void free_stack(stack_node* stack);
++static void** destroy_long_map_values(long_map* map, int destruction_type, unsigned long* num_destroyed);
++static void apply_to_every_long_map_node(long_map_node* node, void (*apply_func)(unsigned long key, void* value));
++static void apply_to_every_string_map_node(long_map_node* node, unsigned char has_key, void (*apply_func)(char* key, void* value));
++static void get_sorted_node_keys(long_map_node* node, unsigned long* key_list, unsigned long* next_key_index, int depth);
++static void get_sorted_node_values(long_map_node* node, void** value_list, unsigned long* next_value_index, int depth);
++static signed char rebalance (long_map_node** n, signed char direction, signed char update_op);
++static void rotate_right (long_map_node** parent);
++static void rotate_left (long_map_node** parent);
++
++/* internal for string map */
++typedef struct
++{
++ char* key;
++ void* value;
++} string_map_key_value;
++static unsigned long sdbm_string_hash(const char *key);
++
++
++
++
++/***************************************************
++ * For testing only
++ ***************************************************/
++/*
++void print_list(stack_node *l);
++
++void print_list(stack_node *l)
++{
++ if(l != NULL)
++ {
++ printf(" list key = %ld, dir=%d, \n", (*(l->node_ptr))->key, l->direction);
++ print_list(l->previous);
++ }
++}
++*/
++/******************************************************
++ * End testing Code
++ *******************************************************/
++
++
++
++
++/***************************************************
++ * string_map function definitions
++ ***************************************************/
++
++string_map* initialize_string_map(unsigned char store_keys)
++{
++ string_map* map = (string_map*)malloc(sizeof(string_map));
++ if(map != NULL)
++ {
++ map->store_keys = store_keys;
++ map->lm.root = NULL;
++ map->lm.num_elements = 0;
++ map->num_elements = map->lm.num_elements;
++ }
++ return map;
++}
++
++void* get_string_map_element(string_map* map, const char* key)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = get_long_map_element( &(map->lm), hashed_key);
++ if(return_value != NULL && map->store_keys)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++void* set_string_map_element(string_map* map, const char* key, void* value)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = NULL;
++ if(map->store_keys)
++ {
++ string_map_key_value* kv = (string_map_key_value*)malloc(sizeof(string_map_key_value));
++ if(kv == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ kv->key = strdup(key);
++ if(kv->key == NULL) /* deal with malloc failure */
++ {
++ free(kv);
++ return NULL;
++ }
++ kv->value = value;
++ return_value = set_long_map_element( &(map->lm), hashed_key, kv);
++ if(return_value != NULL)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ free(r->key);
++ free(r);
++ }
++ }
++ else
++ {
++ return_value = set_long_map_element( &(map->lm), hashed_key, value);
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++void* remove_string_map_element(string_map* map, const char* key)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = remove_long_map_element( &(map->lm), hashed_key);
++
++ if(return_value != NULL && map->store_keys)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ free(r->key);
++ free(r);
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++char** get_string_map_keys(string_map* map, unsigned long* num_keys_returned)
++{
++ char** str_keys;
++ str_keys = (char**)malloc((map->num_elements+1)*sizeof(char*));
++ if(str_keys == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ str_keys[0] = NULL;
++ *num_keys_returned = 0;
++ if(map->store_keys && map->num_elements > 0)
++ {
++ unsigned long list_length;
++ void** long_values = get_sorted_long_map_values( &(map->lm), &list_length);
++ unsigned long key_index;
++ /*list_length will be 0 on malloc failure in get_sorted_long_map_values, so this code shouldn't seg fault if that happens */
++ for(key_index = 0; key_index < list_length; key_index++)
++ {
++ str_keys[key_index] = strdup( ((string_map_key_value*)(long_values[key_index]))->key);
++ if(str_keys[key_index] == NULL) /* deal with malloc failure */
++ {
++ //just return the incomplete list (hey, it's null terminated...)
++ free(long_values);
++ return str_keys;
++ }
++ *num_keys_returned = *num_keys_returned + 1;
++ }
++ str_keys[list_length] = NULL;
++ free(long_values);
++ }
++ return str_keys;
++}
++
++
++void** get_string_map_values(string_map* map, unsigned long* num_values_returned)
++{
++ void** values = NULL;
++ if(map != NULL)
++ {
++ values = get_sorted_long_map_values ( &(map->lm), num_values_returned );
++ }
++ return values;
++}
++
++
++void** destroy_string_map(string_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = NULL;
++ if(map != NULL)
++ {
++ if(map->store_keys)
++ {
++ void** kvs = destroy_long_map_values( &(map->lm), DESTROY_MODE_RETURN_VALUES, num_destroyed );
++ unsigned long kv_index = 0;
++ for(kv_index=0; kv_index < *num_destroyed; kv_index++)
++ {
++ string_map_key_value* kv = (string_map_key_value*)kvs[kv_index];
++ void* value = kv->value;
++
++ free(kv->key);
++ free(kv);
++ if(destruction_type == DESTROY_MODE_FREE_VALUES)
++ {
++ free(value);
++ }
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ kvs[kv_index] = value;
++ }
++ }
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values = kvs;
++ }
++ else
++ {
++ free(kvs);
++ }
++ }
++ else
++ {
++ return_values = destroy_long_map_values( &(map->lm), destruction_type, num_destroyed );
++ }
++ free(map);
++ }
++ return return_values;
++}
++
++
++
++
++/***************************************************
++ * long_map function definitions
++ ***************************************************/
++
++long_map* initialize_long_map(void)
++{
++ long_map* map = (long_map*)malloc(sizeof(long_map));
++ if(map != NULL) /* test for malloc failure */
++ {
++ map->root = NULL;
++ map->num_elements = 0;
++ }
++ return map;
++}
++
++void* get_long_map_element(long_map* map, unsigned long key)
++{
++ void* value = NULL;
++
++ if(map->root != NULL)
++ {
++ long_map_node* parent_node = map->root;
++ long_map_node* next_node;
++ while( key != parent_node->key && (next_node = (long_map_node *)(key < parent_node->key ? parent_node->left : parent_node->right)) != NULL)
++ {
++ parent_node = next_node;
++ }
++ if(parent_node->key == key)
++ {
++ value = parent_node->value;
++ }
++ }
++ return value;
++}
++
++void* get_smallest_long_map_element(long_map* map, unsigned long* smallest_key)
++{
++ void* value = NULL;
++ if(map->root != NULL)
++ {
++ long_map_node* next_node = map->root;
++ while( next_node->left != NULL)
++ {
++ next_node = next_node->left;
++ }
++ value = next_node->value;
++ *smallest_key = next_node->key;
++ }
++ return value;
++}
++
++void* get_largest_long_map_element(long_map* map, unsigned long* largest_key)
++{
++ void* value = NULL;
++ if(map->root != NULL)
++ {
++ long_map_node* next_node = map->root;
++ while( next_node->right != NULL)
++ {
++ next_node = next_node->right;
++ }
++ value = next_node->value;
++ *largest_key = next_node->key;
++ }
++ return value;
++}
++
++void* remove_smallest_long_map_element(long_map* map, unsigned long* smallest_key)
++{
++ get_smallest_long_map_element(map, smallest_key);
++ return remove_long_map_element(map, *smallest_key);
++}
++
++void* remove_largest_long_map_element(long_map* map, unsigned long* largest_key)
++{
++ get_largest_long_map_element(map, largest_key);
++ return remove_long_map_element(map, *largest_key);
++}
++
++
++/* if replacement performed, returns replaced value, otherwise null */
++void* set_long_map_element(long_map* map, unsigned long key, void* value)
++{
++ stack_node* parent_list = NULL;
++ void* old_value = NULL;
++ int old_value_found = 0;
++
++ long_map_node* parent_node;
++ long_map_node* next_node;
++ stack_node* next_parent;
++ stack_node* previous_parent;
++ signed char new_balance;
++
++
++ long_map_node* new_node = (long_map_node*)malloc(sizeof(long_map_node));
++ if(new_node == NULL)
++ {
++ return NULL;
++ }
++ new_node->value = value;
++ new_node->key = key;
++ new_node->left = NULL;
++ new_node->right = NULL;
++ new_node->balance = 0;
++
++
++
++ if(map->root == NULL)
++ {
++ map->root = new_node;
++ }
++ else
++ {
++ parent_node = map->root;
++
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ free(new_node);
++ return NULL; /* won't insert but won't seg fault */
++ }
++ next_parent->node_ptr = &(map->root);
++ next_parent->previous = parent_list;
++ parent_list = next_parent;
++
++ while( key != parent_node->key && (next_node = (key < parent_node->key ? parent_node->left : parent_node->right) ) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ free(new_node);
++ return NULL;
++ }
++ next_parent->node_ptr = key < parent_node->key ? &(parent_node->left) : &(parent_node->right);
++ next_parent->previous = parent_list;
++ next_parent->previous->direction = key < parent_node->key ? -1 : 1;
++ parent_list = next_parent;
++
++ parent_node = next_node;
++ }
++
++
++ if(key == parent_node->key)
++ {
++ old_value = parent_node->value;
++ old_value_found = 1;
++ parent_node->value = value;
++ free(new_node);
++ /* we merely replaced a node, no need to rebalance */
++ }
++ else
++ {
++ if(key < parent_node->key)
++ {
++ parent_node->left = (void*)new_node;
++ parent_list->direction = -1;
++ }
++ else
++ {
++ parent_node->right = (void*)new_node;
++ parent_list->direction = 1;
++ }
++
++
++ /* we inserted a node, rebalance */
++ previous_parent = parent_list;
++ new_balance = 1; /* initial value is not used, but must not be 0 for initial loop condition */
++
++
++ while(previous_parent != NULL && new_balance != 0)
++ {
++ new_balance = rebalance(previous_parent->node_ptr, previous_parent->direction, 1);
++ previous_parent = previous_parent->previous;
++ }
++ }
++ }
++
++ free_stack(parent_list);
++
++ if(old_value_found == 0)
++ {
++ map->num_elements = map->num_elements + 1;
++ }
++
++ return old_value;
++}
++
++
++void* remove_long_map_element(long_map* map, unsigned long key)
++{
++
++ void* value = NULL;
++
++ long_map_node* root_node = map->root;
++ stack_node* parent_list = NULL;
++
++
++ long_map_node* remove_parent;
++ long_map_node* remove_node;
++ long_map_node* next_node;
++
++ long_map_node* replacement;
++ long_map_node* replacement_parent;
++ long_map_node* replacement_next;
++
++ stack_node* next_parent;
++ stack_node* previous_parent;
++ stack_node* replacement_stack_node;
++
++
++ signed char new_balance;
++
++
++
++ if(root_node != NULL)
++ {
++ remove_parent = root_node;
++ remove_node = key < remove_parent->key ? remove_parent->left : remove_parent->right;
++
++ if(remove_node != NULL && key != remove_parent->key)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ next_parent->node_ptr = &(map->root);
++ next_parent->previous = parent_list;
++ parent_list = next_parent;
++ while( key != remove_node->key && (next_node = (key < remove_node->key ? remove_node->left : remove_node->right)) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++ next_parent->node_ptr = key < remove_parent->key ? &(remove_parent->left) : &(remove_parent->right);
++ next_parent->previous = parent_list;
++ next_parent->previous->direction = key < remove_parent->key ? -1 : 1;
++ parent_list = next_parent;
++
++
++ remove_parent = remove_node;
++ remove_node = next_node;
++ }
++ parent_list->direction = key < remove_parent-> key ? -1 : 1;
++ }
++ else
++ {
++ remove_node = remove_parent;
++ }
++
++
++ if(key == remove_node->key)
++ {
++
++ /* find replacement for node we are deleting */
++ if( remove_node->right == NULL )
++ {
++ replacement = remove_node->left;
++ }
++ else if( remove_node->right->left == NULL)
++ {
++
++ replacement = remove_node->right;
++ replacement->left = remove_node->left;
++ replacement->balance = remove_node->balance;
++
++ /* put pointer to replacement node into list for balance update */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = 1; /* replacement is from right */
++ if(remove_node == remove_parent) /* special case for root node */
++ {
++ replacement_stack_node->node_ptr = &(map->root);
++ }
++ else
++ {
++ replacement_stack_node->node_ptr = key < remove_parent-> key ? &(remove_parent->left) : &(remove_parent->right);
++ }
++ parent_list = replacement_stack_node;
++
++ }
++ else
++ {
++ /* put pointer to replacement node into list for balance update */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = 1; /* we always look for replacement on right */
++ if(remove_node == remove_parent) /* special case for root node */
++ {
++ replacement_stack_node->node_ptr = &(map->root);
++ }
++ else
++ {
++ replacement_stack_node->node_ptr = key < remove_parent-> key ? &(remove_parent->left) : &(remove_parent->right);
++ }
++
++ parent_list = replacement_stack_node;
++
++
++ /*
++ * put pointer to replacement node->right into list for balance update
++ * this node will have to be updated with the proper pointer
++ * after we have identified the replacement
++ */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = -1; /* we always look for replacement to left of this node */
++ parent_list = replacement_stack_node;
++
++ /* find smallest node on right (large) side of tree */
++ replacement_parent = remove_node->right;
++ replacement = replacement_parent->left;
++
++ while((replacement_next = replacement->left) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ next_parent->node_ptr = &(replacement_parent->left);
++ next_parent->previous = parent_list;
++ next_parent->direction = -1; /* we always go left */
++ parent_list = next_parent;
++
++ replacement_parent = replacement;
++ replacement = replacement_next;
++
++ }
++
++ replacement_parent->left = replacement->right;
++
++ replacement->left = remove_node->left;
++ replacement->right = remove_node->right;
++ replacement->balance = remove_node->balance;
++ replacement_stack_node->node_ptr = &(replacement->right);
++ }
++
++ /* insert replacement at proper location in tree */
++ if(remove_node == remove_parent)
++ {
++ map->root = replacement;
++ }
++ else
++ {
++ remove_parent->left = remove_node == remove_parent->left ? replacement : remove_parent->left;
++ remove_parent->right = remove_node == remove_parent->right ? replacement : remove_parent->right;
++ }
++
++
++ /* rebalance tree */
++ previous_parent = parent_list;
++ new_balance = 0;
++ while(previous_parent != NULL && new_balance == 0)
++ {
++ new_balance = rebalance(previous_parent->node_ptr, previous_parent->direction, -1);
++ previous_parent = previous_parent->previous;
++ }
++
++
++
++
++ /*
++ * since we found a value to remove, decrease number of elements in map
++ * set return value to the deleted node's value and free the node
++ */
++ map->num_elements = map->num_elements - 1;
++ value = remove_node->value;
++ free(remove_node);
++ }
++ }
++
++ free_stack(parent_list);
++
++ return value;
++}
++
++
++/* note: returned keys are dynamically allocated, you need to free them! */
++unsigned long* get_sorted_long_map_keys(long_map* map, unsigned long* num_keys_returned)
++{
++ unsigned long* key_list = (unsigned long*)malloc((map->num_elements)*sizeof(unsigned long));
++ unsigned long next_key_index;
++ if(key_list == NULL)
++ {
++ *num_keys_returned = 0;
++ return NULL;
++ }
++ next_key_index = 0;
++ get_sorted_node_keys(map->root, key_list, &next_key_index, 0);
++
++ *num_keys_returned = map->num_elements;
++
++ return key_list;
++}
++
++
++void** get_sorted_long_map_values(long_map* map, unsigned long* num_values_returned)
++{
++ void** value_list = (void**)malloc((map->num_elements+1)*sizeof(void*));
++ unsigned long next_value_index;
++
++ if(value_list == NULL)
++ {
++ *num_values_returned = 0;
++ return NULL;
++ }
++ next_value_index = 0;
++ get_sorted_node_values(map->root, value_list, &next_value_index, 0);
++ value_list[map->num_elements] = NULL; /* since we're dealing with pointers make list null terminated */
++
++ *num_values_returned = map->num_elements;
++ return value_list;
++
++}
++
++
++
++void** destroy_long_map(long_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = destroy_long_map_values(map, destruction_type, num_destroyed);
++ free(map);
++ return return_values;
++}
++
++
++
++void apply_to_every_long_map_value(long_map* map, void (*apply_func)(unsigned long key, void* value))
++{
++ apply_to_every_long_map_node(map->root, apply_func);
++}
++void apply_to_every_string_map_value(string_map* map, void (*apply_func)(char* key, void* value))
++{
++ apply_to_every_string_map_node( (map->lm).root, map->store_keys, apply_func);
++}
++
++
++/***************************************************
++ * internal utility function definitions
++ ***************************************************/
++static void free_stack(stack_node* stack)
++{
++ while(stack != NULL)
++ {
++ stack_node* prev_node = stack;
++ stack = prev_node->previous;
++ free(prev_node);
++ }
++
++}
++
++static void** destroy_long_map_values(long_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = NULL;
++ unsigned long return_index = 0;
++
++ *num_destroyed = 0;
++
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values = (void**)malloc((map->num_elements+1)*sizeof(void*));
++ if(return_values == NULL) /* deal with malloc failure */
++ {
++ destruction_type = DESTROY_MODE_IGNORE_VALUES; /* could cause memory leak, but there's no other way to be sure we won't seg fault */
++ }
++ else
++ {
++ return_values[map->num_elements] = NULL;
++ }
++ }
++ while(map->num_elements > 0)
++ {
++ unsigned long smallest_key;
++ void* removed_value = remove_smallest_long_map_element(map, &smallest_key);
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values[return_index] = removed_value;
++ }
++ if(destruction_type == DESTROY_MODE_FREE_VALUES)
++ {
++ free(removed_value);
++ }
++ return_index++;
++ *num_destroyed = *num_destroyed + 1;
++ }
++ return return_values;
++}
++
++static void apply_to_every_long_map_node(long_map_node* node, void (*apply_func)(unsigned long key, void* value))
++{
++ if(node != NULL)
++ {
++ apply_to_every_long_map_node(node->left, apply_func);
++
++ apply_func(node->key, node->value);
++
++ apply_to_every_long_map_node(node->right, apply_func);
++ }
++}
++static void apply_to_every_string_map_node(long_map_node* node, unsigned char has_key, void (*apply_func)(char* key, void* value))
++{
++ if(node != NULL)
++ {
++ apply_to_every_string_map_node(node->left, has_key, apply_func);
++
++ if(has_key)
++ {
++ string_map_key_value* kv = (string_map_key_value*)(node->value);
++ apply_func(kv->key, kv->value);
++ }
++ else
++ {
++ apply_func(NULL, node->value);
++ }
++ apply_to_every_string_map_node(node->right, has_key, apply_func);
++ }
++}
++
++
++
++static void get_sorted_node_keys(long_map_node* node, unsigned long* key_list, unsigned long* next_key_index, int depth)
++{
++ if(node != NULL)
++ {
++ get_sorted_node_keys(node->left, key_list, next_key_index, depth+1);
++
++ key_list[ *next_key_index ] = node->key;
++ (*next_key_index)++;
++
++ get_sorted_node_keys(node->right, key_list, next_key_index, depth+1);
++ }
++}
++
++static void get_sorted_node_values(long_map_node* node, void** value_list, unsigned long* next_value_index, int depth)
++{
++ if(node != NULL)
++ {
++ get_sorted_node_values(node->left, value_list, next_value_index, depth+1);
++
++ value_list[ *next_value_index ] = node->value;
++ (*next_value_index)++;
++
++ get_sorted_node_values(node->right, value_list, next_value_index, depth+1);
++ }
++}
++
++
++
++/*
++ * direction = -1 indicates left subtree updated, direction = 1 for right subtree
++ * update_op = -1 indicates delete node, update_op = 1 for insert node
++ */
++static signed char rebalance (long_map_node** n, signed char direction, signed char update_op)
++{
++ /*
++ printf( "original: key = %ld, balance = %d, update_op=%d, direction=%d\n", (*n)->key, (*n)->balance, update_op, direction);
++ */
++
++ (*n)->balance = (*n)->balance + (update_op*direction);
++
++ if( (*n)->balance < -1)
++ {
++ if((*n)->left->balance < 0)
++ {
++ rotate_right(n);
++ (*n)->right->balance = 0;
++ (*n)->balance = 0;
++ }
++ else if((*n)->left->balance == 0)
++ {
++ rotate_right(n);
++ (*n)->right->balance = -1;
++ (*n)->balance = 1;
++ }
++ else if((*n)->left->balance > 0)
++ {
++ rotate_left( &((*n)->left) );
++ rotate_right(n);
++ /*
++ if( (*n)->balance < 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 1;
++ }
++ else if( (*n)->balance == 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 0;
++ }
++ else if( (*n)->balance > 0 )
++ {
++ (*n)->left->balance = -1;
++ (*n)->right->balance = 0;
++ }
++ */
++ (*n)->left->balance = (*n)->balance > 0 ? -1 : 0;
++ (*n)->right->balance = (*n)->balance < 0 ? 1 : 0;
++ (*n)->balance = 0;
++ }
++ }
++ if( (*n)->balance > 1)
++ {
++ if((*n)->right->balance > 0)
++ {
++ rotate_left(n);
++ (*n)->left->balance = 0;
++ (*n)->balance = 0;
++ }
++ else if ((*n)->right->balance == 0)
++ {
++ rotate_left(n);
++ (*n)->left->balance = 1;
++ (*n)->balance = -1;
++ }
++ else if((*n)->right->balance < 0)
++ {
++ rotate_right( &((*n)->right) );
++ rotate_left(n);
++ /*
++ if( (*n)->balance < 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 1;
++ }
++ else if( (*n)->balance == 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 0;
++ }
++ else if( (*n)->balance > 0 )
++ {
++ (*n)->left->balance = -1;
++ (*n)->right->balance = 0;
++ }
++ */
++ (*n)->left->balance = (*n)->balance > 0 ? -1 : 0;
++ (*n)->right->balance = (*n)->balance < 0 ? 1 : 0;
++ (*n)->balance = 0;
++ }
++ }
++
++ /*
++ printf( "key = %ld, balance = %d\n", (*n)->key, (*n)->balance);
++ */
++
++ return (*n)->balance;
++}
++
++
++static void rotate_right (long_map_node** parent)
++{
++ long_map_node* old_parent = *parent;
++ long_map_node* pivot = old_parent->left;
++ old_parent->left = pivot->right;
++ pivot->right = old_parent;
++
++ *parent = pivot;
++}
++
++static void rotate_left (long_map_node** parent)
++{
++ long_map_node* old_parent = *parent;
++ long_map_node* pivot = old_parent->right;
++ old_parent->right = pivot->left;
++ pivot->left = old_parent;
++
++ *parent = pivot;
++}
++
++
++
++/***************************************************************************
++ * This algorithm was created for the sdbm database library (a public-domain
++ * reimplementation of ndbm) and seems to work relatively well in
++ * scrambling bits
++ *
++ *
++ * This code was derived from code found at:
++ * http://www.cse.yorku.ca/~oz/hash.html
++ ***************************************************************************/
++static unsigned long sdbm_string_hash(const char *key)
++{
++ unsigned long hashed_key = 0;
++
++ int index = 0;
++ unsigned int nextch;
++ while(key[index] != '\0')
++ {
++ nextch = key[index];
++ hashed_key = nextch + (hashed_key << 6) + (hashed_key << 16) - hashed_key;
++ index++;
++ }
++ return hashed_key;
++}
++
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/ipt_weburl.c 2015-06-19 03:02:54.721697023 +0800
+@@ -0,0 +1,398 @@
++/* weburl -- A netfilter module to match URLs in HTTP requests
++ * This module can match using string match or regular expressions
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2008-2010 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <net/sock.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_weburl.h>
++
++#include "weburl_deps/regexp.c"
++#include "weburl_deps/tree_map.h"
++
++
++#include <linux/ip.h>
++
++
++#include <linux/netfilter/x_tables.h>
++
++
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Eric Bishop");
++MODULE_DESCRIPTION("Match URL in HTTP requests, designed for use with Gargoyle web interface (www.gargoyle-router.com)");
++
++string_map* compiled_map = NULL;
++
++int strnicmp(const char * cs,const char * ct,size_t count)
++{
++ register signed char __res = 0;
++
++ while (count)
++ {
++ if ((__res = toupper( *cs ) - toupper( *ct++ ) ) != 0 || !*cs++)
++ {
++ break;
++ }
++ count--;
++ }
++ return __res;
++}
++
++char *strnistr(const char *s, const char *find, size_t slen)
++{
++ char c, sc;
++ size_t len;
++
++
++ if ((c = *find++) != '\0')
++ {
++ len = strlen(find);
++ do
++ {
++ do
++ {
++ if (slen < 1 || (sc = *s) == '\0')
++ {
++ return (NULL);
++ }
++ --slen;
++ ++s;
++ }
++ while ( toupper(sc) != toupper(c));
++
++ if (len > slen)
++ {
++ return (NULL);
++ }
++ }
++ while (strnicmp(s, find, len) != 0);
++
++ s--;
++ }
++ return ((char *)s);
++}
++
++
++int do_match_test(unsigned char match_type, const char* reference, char* query)
++{
++ int matches = 0;
++ struct regexp* r;
++ switch(match_type)
++ {
++ case WEBURL_CONTAINS_TYPE:
++ matches = (strstr(query, reference) != NULL);
++ break;
++ case WEBURL_REGEX_TYPE:
++
++ if(compiled_map == NULL)
++ {
++ compiled_map = initialize_map(0);
++ if(compiled_map == NULL) /* test for malloc failure */
++ {
++ return 0;
++ }
++ }
++ r = (struct regexp*)get_map_element(compiled_map, reference);
++ if(r == NULL)
++ {
++ int rlen = strlen(reference);
++ r= regcomp((char*)reference, &rlen);
++ if(r == NULL) /* test for malloc failure */
++ {
++ return 0;
++ }
++ set_map_element(compiled_map, reference, (void*)r);
++ }
++ matches = regexec(r, query);
++ break;
++ case WEBURL_EXACT_TYPE:
++ matches = (strstr(query, reference) != NULL) && strlen(query) == strlen(reference);
++ break;
++ }
++ return matches;
++}
++
++int http_match(const struct ipt_weburl_info* info, const unsigned char* packet_data, int packet_length)
++{
++ int test = 0;
++
++ /* first test if we're dealing with a web page request */
++ if(strnicmp((char*)packet_data, "GET ", 4) == 0 || strnicmp( (char*)packet_data, "POST ", 5) == 0 || strnicmp((char*)packet_data, "HEAD ", 5) == 0)
++ {
++ /* printk("found a web page request\n"); */
++ char path[625] = "";
++ char host[625] = "";
++ int path_start_index;
++ int path_end_index;
++ int last_header_index;
++ char last_two_buf[2];
++ int end_found;
++ char* host_match;
++ char* test_prefixes[6];
++ int prefix_index;
++
++ /* get path portion of URL */
++ path_start_index = (int)(strstr((char*)packet_data, " ") - (char*)packet_data);
++ while( packet_data[path_start_index] == ' ')
++ {
++ path_start_index++;
++ }
++ path_end_index= (int)(strstr( (char*)(packet_data+path_start_index), " ") - (char*)packet_data);
++ if(path_end_index > 0)
++ {
++ int path_length = path_end_index-path_start_index;
++ path_length = path_length < 625 ? path_length : 624; /* prevent overflow */
++ memcpy(path, packet_data+path_start_index, path_length);
++ path[ path_length] = '\0';
++ }
++
++ /* get header length */
++ last_header_index = 2;
++ memcpy(last_two_buf,(char*)packet_data, 2);
++ end_found = 0;
++ while(end_found == 0 && last_header_index < packet_length)
++ {
++ char next = (char)packet_data[last_header_index];
++ if(next == '\n')
++ {
++ end_found = last_two_buf[1] == '\n' || (last_two_buf[0] == '\n' && last_two_buf[1] == '\r') ? 1 : 0;
++ }
++ if(end_found == 0)
++ {
++ last_two_buf[0] = last_two_buf[1];
++ last_two_buf[1] = next;
++ last_header_index++;
++ }
++ }
++
++ /* get host portion of URL */
++ host_match = strnistr( (char*)packet_data, "Host:", last_header_index);
++ if(host_match != NULL)
++ {
++ int host_end_index;
++ host_match = host_match + 5; /* character after "Host:" */
++ while(host_match[0] == ' ')
++ {
++ host_match = host_match+1;
++ }
++
++ host_end_index = 0;
++ while( host_match[host_end_index] != '\n' &&
++ host_match[host_end_index] != '\r' &&
++ host_match[host_end_index] != ' ' &&
++ host_match[host_end_index] != ':' &&
++ ((char*)host_match - (char*)packet_data)+host_end_index < last_header_index
++ )
++ {
++ host_end_index++;
++ }
++ memcpy(host, host_match, host_end_index);
++ host_end_index = host_end_index < 625 ? host_end_index : 624; /* prevent overflow */
++ host[host_end_index] = '\0';
++
++
++ }
++
++ /* printk("host = \"%s\", path =\"%s\"\n", host, path); */
++
++
++ switch(info->match_part)
++ {
++ case WEBURL_DOMAIN_PART:
++ test = do_match_test(info->match_type, info->test_str, host);
++ if(!test && strstr(host, "www.") == host)
++ {
++ test = do_match_test(info->match_type, info->test_str, ((char*)host+4) );
++ }
++ break;
++ case WEBURL_PATH_PART:
++ test = do_match_test(info->match_type, info->test_str, path);
++ if( !test && path[0] == '/' )
++ {
++ test = do_match_test(info->match_type, info->test_str, ((char*)path+1) );
++ }
++ break;
++ case WEBURL_ALL_PART:
++
++ test_prefixes[0] = "http://";
++ test_prefixes[1] = "";
++ test_prefixes[2] = NULL;
++
++
++ for(prefix_index=0; test_prefixes[prefix_index] != NULL && test == 0; prefix_index++)
++ {
++ char test_url[1250];
++ test_url[0] = '\0';
++ strcat(test_url, test_prefixes[prefix_index]);
++ strcat(test_url, host);
++ if(strcmp(path, "/") != 0)
++ {
++ strcat(test_url, path);
++ }
++ test = do_match_test(info->match_type, info->test_str, test_url);
++ if(!test && strcmp(path, "/") == 0)
++ {
++ strcat(test_url, path);
++ test = do_match_test(info->match_type, info->test_str, test_url);
++ }
++
++ /* printk("test_url = \"%s\", test=%d\n", test_url, test); */
++ }
++ if(!test && strstr(host, "www.") == host)
++ {
++ char* www_host = ((char*)host+4);
++ for(prefix_index=0; test_prefixes[prefix_index] != NULL && test == 0; prefix_index++)
++ {
++ char test_url[1250];
++ test_url[0] = '\0';
++ strcat(test_url, test_prefixes[prefix_index]);
++ strcat(test_url, www_host);
++ if(strcmp(path, "/") != 0)
++ {
++ strcat(test_url, path);
++ }
++ test = do_match_test(info->match_type, info->test_str, test_url);
++ if(!test && strcmp(path, "/") == 0)
++ {
++ strcat(test_url, path);
++ test = do_match_test(info->match_type, info->test_str, test_url);
++ }
++
++ /* printk("test_url = \"%s\", test=%d\n", test_url, test); */
++ }
++ }
++ break;
++
++ }
++
++
++ /*
++ * If invert flag is set, return true if this IS a web request, but it didn't match
++ * Always return false for non-web requests
++ */
++ test = info->invert ? !test : test;
++ }
++
++ return test;
++}
++
++
++static bool match(const struct sk_buff *skb, struct xt_action_param *par)
++{
++
++ const struct ipt_weburl_info *info = (const struct ipt_weburl_info*)(par->matchinfo);
++
++
++ int test = 0;
++ struct iphdr* iph;
++
++ /* linearize skb if necessary */
++ struct sk_buff *linear_skb;
++ int skb_copied;
++ if(skb_is_nonlinear(skb))
++ {
++ linear_skb = skb_copy(skb, GFP_ATOMIC);
++ skb_copied = 1;
++ }
++ else
++ {
++ linear_skb = (struct sk_buff*)skb;
++ skb_copied = 0;
++ }
++
++
++
++ /* ignore packets that are not TCP */
++ iph = (struct iphdr*)(skb_network_header(skb));
++ if(iph->protocol == IPPROTO_TCP)
++ {
++ /* get payload */
++ struct tcphdr* tcp_hdr = (struct tcphdr*)( ((unsigned char*)iph) + (iph->ihl*4) );
++ unsigned short payload_offset = (tcp_hdr->doff*4) + (iph->ihl*4);
++ unsigned char* payload = ((unsigned char*)iph) + payload_offset;
++ unsigned short payload_length = ntohs(iph->tot_len) - payload_offset;
++
++
++
++ /* if payload length <= 10 bytes don't bother doing a check, otherwise check for match */
++ if(payload_length > 10)
++ {
++ test = http_match(info, payload, payload_length);
++ }
++ }
++
++ /* free skb if we made a copy to linearize it */
++ if(skb_copied == 1)
++ {
++ kfree_skb(linear_skb);
++ }
++
++
++ /* printk("returning %d from weburl\n\n\n", test); */
++ return test;
++}
++
++
++static int checkentry(const struct xt_mtchk_param *par)
++{
++ return 0;
++}
++
++
++static struct xt_match weburl_match __read_mostly =
++{
++ .name = "weburl",
++ .match = &match,
++ .family = AF_INET,
++ .matchsize = sizeof(struct ipt_weburl_info),
++ .checkentry = &checkentry,
++ .me = THIS_MODULE,
++};
++
++static int __init init(void)
++{
++ compiled_map = NULL;
++ return xt_register_match(&weburl_match);
++
++}
++
++static void __exit fini(void)
++{
++ xt_unregister_match(&weburl_match);
++ if(compiled_map != NULL)
++ {
++ unsigned long num_destroyed;
++ destroy_map(compiled_map, DESTROY_MODE_FREE_VALUES, &num_destroyed);
++ }
++}
++
++module_init(init);
++module_exit(fini);
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/ipt_webmon.c 2015-06-19 03:02:55.165678477 +0800
+@@ -0,0 +1,1200 @@
++/* webmon -- A netfilter module to match URLs in HTTP requests
++ * This module can match using string match or regular expressions
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2008-2011 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <net/sock.h>
++#include <net/ip.h>
++#include <net/tcp.h>
++#include <linux/time.h>
++#include <linux/spinlock.h>
++#include <linux/proc_fs.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_webmon.h>
++
++#include "webmon_deps/tree_map.h"
++
++
++#include <linux/ktime.h>
++
++
++#include <linux/ip.h>
++#include <linux/netfilter/x_tables.h>
++
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Eric Bishop");
++MODULE_DESCRIPTION("Monitor URL in HTTP Requests, designed for use with Gargoyle web interface (www.gargoyle-router.com)");
++
++#define NIPQUAD(addr) \
++ ((unsigned char *)&addr)[0], \
++ ((unsigned char *)&addr)[1], \
++ ((unsigned char *)&addr)[2], \
++ ((unsigned char *)&addr)[3]
++#define STRIP "%u.%u.%u.%u"
++
++typedef struct qn
++{
++ uint32_t src_ip;
++ char* value;
++ struct timeval time;
++ struct qn* next;
++ struct qn* previous;
++} queue_node;
++
++typedef struct
++{
++ queue_node* first;
++ queue_node* last;
++ int length;
++} queue;
++
++static string_map* domain_map = NULL;
++static queue* recent_domains = NULL;
++
++static string_map* search_map = NULL;
++static queue* recent_searches = NULL;
++
++
++static int max_domain_queue_length = 5;
++static int max_search_queue_length = 5;
++
++static spinlock_t webmon_lock = __SPIN_LOCK_UNLOCKED(webmon_lock);;
++
++
++static void update_queue_node_time(queue_node* update_node, queue* full_queue)
++{
++ struct timeval t;
++ do_gettimeofday(&t);
++ update_node->time = t;
++
++ /* move to front of queue if not already at front of queue */
++ if(update_node->previous != NULL)
++ {
++ queue_node* p = update_node->previous;
++ queue_node* n = update_node->next;
++ p->next = n;
++ if(n != NULL)
++ {
++ n->previous = p;
++ }
++ else
++ {
++ full_queue->last = p;
++ }
++ update_node->previous = NULL;
++ update_node->next = full_queue->first;
++ full_queue->first->previous = update_node;
++ full_queue->first = update_node;
++ }
++}
++
++void add_queue_node(uint32_t src_ip, char* value, queue* full_queue, string_map* queue_index, char* queue_index_key, uint32_t max_queue_length )
++{
++
++ queue_node *new_node = (queue_node*)kmalloc(sizeof(queue_node), GFP_ATOMIC);
++ char* dyn_value = kernel_strdup(value);
++ struct timeval t;
++
++
++ if(new_node == NULL || dyn_value == NULL)
++ {
++ if(dyn_value) { kfree(dyn_value); }
++ if(new_node) { kfree(new_node); };
++
++ return;
++ }
++ set_map_element(queue_index, queue_index_key, (void*)new_node);
++
++
++ do_gettimeofday(&t);
++ new_node->time = t;
++ new_node->src_ip = src_ip;
++ new_node->value = dyn_value;
++ new_node->previous = NULL;
++
++ new_node->next = full_queue->first;
++ if(full_queue->first != NULL)
++ {
++ full_queue->first->previous = new_node;
++ }
++ full_queue->first = new_node;
++ full_queue->last = (full_queue->last == NULL) ? new_node : full_queue->last ;
++ full_queue->length = full_queue->length + 1;
++
++ if( full_queue->length > max_queue_length )
++ {
++ queue_node *old_node = full_queue->last;
++ full_queue->last = old_node->previous;
++ full_queue->last->next = NULL;
++ full_queue->first = old_node->previous == NULL ? NULL : full_queue->first; /*shouldn't be needed, but just in case...*/
++ full_queue->length = full_queue->length - 1;
++
++ sprintf(queue_index_key, STRIP"@%s", NIPQUAD(old_node->src_ip), old_node->value);
++ remove_map_element(queue_index, queue_index_key);
++
++ kfree(old_node->value);
++ kfree(old_node);
++ }
++
++ /*
++ queue_node* n = full_queue->first;
++ while(n != NULL)
++ {
++ printf("%ld\t%s\t%s\t%s\n", (unsigned long)n->time, n->src_ip, n->dst_ip, n->domain);
++ n = (queue_node*)n->next;
++ }
++ printf("\n\n");
++ */
++}
++
++void destroy_queue(queue* q)
++{
++ queue_node *last_node = q->last;
++ while(last_node != NULL)
++ {
++ queue_node *previous_node = last_node->previous;
++ free(last_node->value);
++ free(last_node);
++ last_node = previous_node;
++ }
++ free(q);
++}
++
++
++int strnicmp(const char * cs,const char * ct,size_t count)
++{
++ register signed char __res = 0;
++
++ while (count)
++ {
++ if ((__res = toupper( *cs ) - toupper( *ct++ ) ) != 0 || !*cs++)
++ {
++ break;
++ }
++ count--;
++ }
++ return __res;
++}
++
++char *strnistr(const char *s, const char *find, size_t slen)
++{
++ char c, sc;
++ size_t len;
++
++
++ if ((c = *find++) != '\0')
++ {
++ len = strlen(find);
++ do
++ {
++ do
++ {
++ if (slen < 1 || (sc = *s) == '\0')
++ {
++ return (NULL);
++ }
++ --slen;
++ ++s;
++ }
++ while ( toupper(sc) != toupper(c));
++
++ if (len > slen)
++ {
++ return (NULL);
++ }
++ }
++ while (strnicmp(s, find, len) != 0);
++
++ s--;
++ }
++ return ((char *)s);
++}
++
++/* NOTE: This is not quite real edit distance -- all differences are assumed to be in one contiguous block
++ * If differences are not in a contiguous block computed edit distance will be greater than real edit distance.
++ * Edit distance computed here is an upper bound on real edit distance.
++ */
++int within_edit_distance(char *s1, char *s2, int max_edit)
++{
++ int ret = 0;
++ if(s1 != NULL && s2 != NULL)
++ {
++ int edit1 = strlen(s1);
++ int edit2 = strlen(s2);
++ char* s1sp = s1;
++ char* s2sp = s2;
++ char* s1ep = s1 + (edit1-1);
++ char* s2ep = s2 + (edit2-1);
++ while(*s1sp != '\0' && *s2sp != '\0' && *s1sp == *s2sp)
++ {
++ s1sp++;
++ s2sp++;
++ edit1--;
++ edit2--;
++ }
++
++ /* if either is zero we got to the end of one of the strings */
++ while(s1ep > s1sp && s2ep > s2sp && *s1ep == *s2ep)
++ {
++ s1ep--;
++ s2ep--;
++ edit1--;
++ edit2--;
++ }
++ ret = edit1 <= max_edit && edit2 <= max_edit ? 1 : 0;
++ }
++ return ret;
++}
++
++
++/*
++ * line is the line to be parsed -- it is not modified in any way
++ * max_pieces indicates number of pieces to return, if negative this is determined dynamically
++ * include_remainder_at_max indicates whether the last piece, when max pieces are reached,
++ * should be what it would normally be (0) or the entire remainder of the line (1)
++ * if max_pieces < 0 this parameter is ignored
++ *
++ *
++ * returns all non-separator pieces in a line
++ * result is dynamically allocated, MUST be freed after call-- even if
++ * line is empty (you still get a valid char** pointer to to a NULL char*)
++ */
++char** split_on_separators(char* line, char* separators, int num_separators, int max_pieces, int include_remainder_at_max, unsigned long *num_pieces)
++{
++ char** split;
++
++ *num_pieces = 0;
++ if(line != NULL)
++ {
++ int split_index;
++ int non_separator_found;
++ char* dup_line;
++ char* start;
++
++ if(max_pieces < 0)
++ {
++ /* count number of separator characters in line -- this count + 1 is an upperbound on number of pieces */
++ int separator_count = 0;
++ int line_index;
++ for(line_index = 0; line[line_index] != '\0'; line_index++)
++ {
++ int sep_index;
++ int found = 0;
++ for(sep_index =0; found == 0 && sep_index < num_separators; sep_index++)
++ {
++ found = separators[sep_index] == line[line_index] ? 1 : 0;
++ }
++ separator_count = separator_count+ found;
++ }
++ max_pieces = separator_count + 1;
++ }
++ split = (char**)malloc((1+max_pieces)*sizeof(char*));
++ split_index = 0;
++ split[split_index] = NULL;
++
++
++ dup_line = strdup(line);
++ start = dup_line;
++ non_separator_found = 0;
++ while(non_separator_found == 0)
++ {
++ int matches = 0;
++ int sep_index;
++ for(sep_index =0; sep_index < num_separators; sep_index++)
++ {
++ matches = matches == 1 || separators[sep_index] == start[0] ? 1 : 0;
++ }
++ non_separator_found = matches==0 || start[0] == '\0' ? 1 : 0;
++ if(non_separator_found == 0)
++ {
++ start++;
++ }
++ }
++
++ while(start[0] != '\0' && split_index < max_pieces)
++ {
++ /* find first separator index */
++ int first_separator_index = 0;
++ int separator_found = 0;
++ while( separator_found == 0 )
++ {
++ int sep_index;
++ for(sep_index =0; separator_found == 0 && sep_index < num_separators; sep_index++)
++ {
++ separator_found = separators[sep_index] == start[first_separator_index] || start[first_separator_index] == '\0' ? 1 : 0;
++ }
++ if(separator_found == 0)
++ {
++ first_separator_index++;
++ }
++ }
++
++ /* copy next piece to split array */
++ if(first_separator_index > 0)
++ {
++ char* next_piece = NULL;
++ if(split_index +1 < max_pieces || include_remainder_at_max <= 0)
++ {
++ next_piece = (char*)malloc((first_separator_index+1)*sizeof(char));
++ memcpy(next_piece, start, first_separator_index);
++ next_piece[first_separator_index] = '\0';
++ }
++ else
++ {
++ next_piece = strdup(start);
++ }
++ split[split_index] = next_piece;
++ split[split_index+1] = NULL;
++ split_index++;
++ }
++
++
++ /* find next non-separator index, indicating start of next piece */
++ start = start+ first_separator_index;
++ non_separator_found = 0;
++ while(non_separator_found == 0)
++ {
++ int matches = 0;
++ int sep_index;
++ for(sep_index =0; sep_index < num_separators; sep_index++)
++ {
++ matches = matches == 1 || separators[sep_index] == start[0] ? 1 : 0;
++ }
++ non_separator_found = matches==0 || start[0] == '\0' ? 1 : 0;
++ if(non_separator_found == 0)
++ {
++ start++;
++ }
++ }
++ }
++ free(dup_line);
++ *num_pieces = split_index;
++ }
++ else
++ {
++ split = (char**)malloc((1)*sizeof(char*));
++ split[0] = NULL;
++ }
++ return split;
++}
++
++
++
++static void extract_url(const unsigned char* packet_data, int packet_length, char* domain, char* path)
++{
++
++ int path_start_index;
++ int path_end_index;
++ int last_header_index;
++ char last_two_buf[2];
++ int end_found;
++ char* domain_match;
++ char* start_ptr;
++
++ domain[0] = '\0';
++ path[0] = '\0';
++
++
++ /* get path portion of URL */
++ start_ptr = strnistr((char*)packet_data, " ", packet_length);
++ if(start_ptr == NULL)
++ {
++ return;
++ }
++
++ path_start_index = (int)(start_ptr - (char*)packet_data);
++ start_ptr = strnistr((char*)(packet_data+path_start_index), " ", packet_length-(path_start_index+2));
++ if(start_ptr == NULL)
++ {
++ return;
++ }
++
++ while( packet_data[path_start_index] == ' ')
++ {
++ path_start_index++;
++ }
++ path_end_index= (int)(strstr( (char*)(packet_data+path_start_index), " ") - (char*)packet_data);
++ if(path_end_index > 0)
++ {
++ int path_length = path_end_index-path_start_index;
++ path_length = path_length < 625 ? path_length : 624; /* prevent overflow */
++ memcpy(path, packet_data+path_start_index, path_length);
++ path[ path_length] = '\0';
++ }
++ else
++ {
++ return;
++ }
++
++ /* get header length */
++ last_header_index = 2;
++ memcpy(last_two_buf,(char*)packet_data, 2);
++ end_found = 0;
++ while(end_found == 0 && last_header_index < packet_length)
++ {
++ char next = (char)packet_data[last_header_index];
++ if(next == '\n')
++ {
++ end_found = last_two_buf[1] == '\n' || (last_two_buf[0] == '\n' && last_two_buf[1] == '\r') ? 1 : 0;
++ }
++ if(end_found == 0)
++ {
++ last_two_buf[0] = last_two_buf[1];
++ last_two_buf[1] = next;
++ last_header_index++;
++ }
++ }
++
++ /* get domain portion of URL */
++ domain_match = strnistr( (char*)packet_data, "Host:", last_header_index);
++ if(domain_match != NULL)
++ {
++ int domain_end_index;
++ domain_match = domain_match + 5; /* character after "Host:" */
++ while(domain_match[0] == ' ' && ( (char*)domain_match - (char*)packet_data) < last_header_index)
++ {
++ domain_match = domain_match+1;
++ }
++
++ domain_end_index = 0;
++ while( domain_match[domain_end_index] != '\n' &&
++ domain_match[domain_end_index] != '\r' &&
++ domain_match[domain_end_index] != ' ' &&
++ domain_match[domain_end_index] != ':' &&
++ ((char*)domain_match - (char*)packet_data)+domain_end_index < last_header_index
++ )
++ {
++ domain_end_index++;
++ }
++ domain_end_index = domain_end_index < 625 ? domain_end_index : 624; /* prevent overflow */
++ memcpy(domain, domain_match, domain_end_index);
++ domain[domain_end_index] = '\0';
++
++ for(domain_end_index=0; domain[domain_end_index] != '\0'; domain_end_index++)
++ {
++ domain[domain_end_index] = (char)tolower(domain[domain_end_index]);
++ }
++ }
++}
++
++#ifdef CONFIG_PROC_FS
++
++static void *webmon_proc_start(struct seq_file *seq, loff_t *loff_pos)
++{
++ static unsigned long counter = 0;
++
++ /* beginning a new sequence ? */
++ if ( *loff_pos == 0 )
++ {
++ /* yes => return a non null value to begin the sequence */
++ return &counter;
++ }
++ else
++ {
++ /* no => it's the end of the sequence, return end to stop reading */
++ *loff_pos = 0;
++ return NULL;
++ }
++}
++
++static void *webmon_proc_next(struct seq_file *seq, void *v, loff_t *pos)
++{
++ return NULL;
++}
++
++
++static void webmon_proc_stop(struct seq_file *seq, void *v)
++{
++ //don't need to do anything
++}
++
++
++static int webmon_proc_domain_show(struct seq_file *s, void *v)
++{
++ queue_node* next_node;
++ spin_lock_bh(&webmon_lock);
++
++ next_node = recent_domains->last;
++ while(next_node != NULL)
++ {
++ seq_printf(s, "%ld\t"STRIP"\t%s\n", (unsigned long)(next_node->time).tv_sec, NIPQUAD(next_node->src_ip), next_node->value);
++ next_node = (queue_node*)next_node->previous;
++ }
++ spin_unlock_bh(&webmon_lock);
++
++ return 0;
++}
++
++static int webmon_proc_search_show(struct seq_file *s, void *v)
++{
++ queue_node* next_node;
++ spin_lock_bh(&webmon_lock);
++
++ next_node = recent_searches->last;
++ while(next_node != NULL)
++ {
++ seq_printf(s, "%ld\t"STRIP"\t%s\n", (unsigned long)(next_node->time).tv_sec, NIPQUAD(next_node->src_ip), next_node->value);
++ next_node = (queue_node*)next_node->previous;
++ }
++ spin_unlock_bh(&webmon_lock);
++
++ return 0;
++}
++
++
++static struct seq_operations webmon_proc_domain_sops = {
++ .start = webmon_proc_start,
++ .next = webmon_proc_next,
++ .stop = webmon_proc_stop,
++ .show = webmon_proc_domain_show
++};
++
++static struct seq_operations webmon_proc_search_sops = {
++ .start = webmon_proc_start,
++ .next = webmon_proc_next,
++ .stop = webmon_proc_stop,
++ .show = webmon_proc_search_show
++};
++
++
++static int webmon_proc_domain_open(struct inode *inode, struct file* file)
++{
++ return seq_open(file, &webmon_proc_domain_sops);
++}
++static int webmon_proc_search_open(struct inode *inode, struct file* file)
++{
++ return seq_open(file, &webmon_proc_search_sops);
++}
++
++
++
++static struct file_operations webmon_proc_domain_fops = {
++ .owner = THIS_MODULE,
++ .open = webmon_proc_domain_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = seq_release
++};
++static struct file_operations webmon_proc_search_fops = {
++ .owner = THIS_MODULE,
++ .open = webmon_proc_search_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = seq_release
++};
++
++
++#endif
++
++
++
++
++
++
++static int ipt_webmon_set_ctl(struct sock *sk, int cmd, void *user, u_int32_t len)
++{
++
++ char* buffer = kmalloc(len, GFP_ATOMIC);
++ if(buffer == NULL) /* check for malloc failure */
++ {
++ return 0;
++ }
++ spin_lock_bh(&webmon_lock);
++ copy_from_user(buffer, user, len);
++
++ if(len > 1 + sizeof(uint32_t))
++ {
++ unsigned char type = buffer[0];
++ uint32_t max_queue_length = *((uint32_t*)(buffer+1));
++ char* data = buffer+1+sizeof(uint32_t);
++ char newline_terminator[] = { '\n', '\r' };
++ char whitespace_chars[] = { '\t', ' ' };
++
++ if(type == WEBMON_DOMAIN || type == WEBMON_SEARCH )
++ {
++ unsigned long num_destroyed;
++
++
++ /* destroy and re-initialize queue and map */
++ if(type == WEBMON_DOMAIN )
++ {
++ destroy_map(domain_map, DESTROY_MODE_IGNORE_VALUES, &num_destroyed);
++ destroy_queue(recent_domains);
++ recent_domains = (queue*)malloc(sizeof(queue));
++ recent_domains->first = NULL;
++ recent_domains->last = NULL;
++ recent_domains->length = 0;
++ domain_map = initialize_map(0);
++
++ max_domain_queue_length = max_queue_length;
++ }
++ else if(type == WEBMON_SEARCH)
++ {
++ destroy_map(search_map, DESTROY_MODE_IGNORE_VALUES, &num_destroyed);
++ destroy_queue(recent_searches);
++ recent_searches = (queue*)malloc(sizeof(queue));
++ recent_searches->first = NULL;
++ recent_searches->last = NULL;
++ recent_searches->length = 0;
++ search_map = initialize_map(0);
++
++ max_search_queue_length = max_queue_length;
++ }
++
++ if(data[0] != '\0')
++ {
++ unsigned long num_lines;
++ unsigned long line_index;
++ char** lines = split_on_separators(data, newline_terminator, 2, -1, 0, &num_lines);
++ for(line_index=0; line_index < num_lines; line_index++)
++ {
++ char* line = lines[line_index];
++ unsigned long num_pieces;
++ char** split = split_on_separators(line, whitespace_chars, 2, -1, 0, &num_pieces);
++
++ //check that there are 3 pieces (time, src_ip, value)
++ int length;
++ for(length=0; split[length] != NULL ; length++){}
++ if(length == 3)
++ {
++ time_t time;
++ int parsed_ip[4];
++ int valid_ip = sscanf(split[1], "%d.%d.%d.%d", parsed_ip, parsed_ip+1, parsed_ip+2, parsed_ip+3);
++ if(valid_ip == 4)
++ {
++ valid_ip = parsed_ip[0] <= 255 && parsed_ip[1] <= 255 && parsed_ip[2] <= 255 && parsed_ip[3] <= 255 ? valid_ip : 0;
++ }
++ if(sscanf(split[0], "%ld", &time) > 0 && valid_ip == 4)
++ {
++ char* value = split[2];
++ char value_key[700];
++ uint32_t ip = (parsed_ip[0]<<24) + (parsed_ip[1]<<16) + (parsed_ip[2]<<8) + (parsed_ip[3]) ;
++ ip = htonl(ip);
++ sprintf(value_key, STRIP"@%s", NIPQUAD(ip), value);
++ if(type == WEBMON_DOMAIN)
++ {
++ add_queue_node(ip, value, recent_domains, domain_map, value_key, max_domain_queue_length );
++ (recent_domains->first->time).tv_sec = time;
++ }
++ else if(type == WEBMON_SEARCH)
++ {
++ add_queue_node(ip, value, recent_searches, search_map, value_key, max_search_queue_length );
++ (recent_searches->first->time).tv_sec = time;
++ }
++ }
++ }
++
++ for(length=0; split[length] != NULL ; length++)
++ {
++ free(split[length]);
++ }
++ free(split);
++ free(line);
++ }
++ free(lines);
++ }
++ }
++ }
++ kfree(buffer);
++ spin_unlock_bh(&webmon_lock);
++
++
++ return 1;
++}
++static struct nf_sockopt_ops ipt_webmon_sockopts =
++{
++ .pf = PF_INET,
++ .set_optmin = WEBMON_SET,
++ .set_optmax = WEBMON_SET+1,
++ .set = ipt_webmon_set_ctl,
++};
++
++
++
++
++static bool match(const struct sk_buff *skb, struct xt_action_param *par)
++{
++
++ const struct ipt_webmon_info *info = (const struct ipt_webmon_info*)(par->matchinfo);
++
++
++ struct iphdr* iph;
++
++ /* linearize skb if necessary */
++ struct sk_buff *linear_skb;
++ int skb_copied;
++ if(skb_is_nonlinear(skb))
++ {
++ linear_skb = skb_copy(skb, GFP_ATOMIC);
++ skb_copied = 1;
++ }
++ else
++ {
++ linear_skb = (struct sk_buff*)skb;
++ skb_copied = 0;
++ }
++
++
++
++ /* ignore packets that are not TCP */
++ iph = (struct iphdr*)(skb_network_header(skb));
++ if(iph->protocol == IPPROTO_TCP)
++ {
++ /* get payload */
++ struct tcphdr* tcp_hdr = (struct tcphdr*)( ((unsigned char*)iph) + (iph->ihl*4) );
++ unsigned short payload_offset = (tcp_hdr->doff*4) + (iph->ihl*4);
++ unsigned char* payload = ((unsigned char*)iph) + payload_offset;
++ unsigned short payload_length = ntohs(iph->tot_len) - payload_offset;
++
++
++
++ /* if payload length <= 10 bytes don't bother doing a check, otherwise check for match */
++ if(payload_length > 10)
++ {
++ /* are we dealing with a web page request */
++ if(strnicmp((char*)payload, "GET ", 4) == 0 || strnicmp( (char*)payload, "POST ", 5) == 0 || strnicmp((char*)payload, "HEAD ", 5) == 0)
++ {
++ char domain[650];
++ char path[650];
++ char domain_key[700];
++ unsigned char save = info->exclude_type == WEBMON_EXCLUDE ? 1 : 0;
++ uint32_t ip_index;
++
++
++ for(ip_index = 0; ip_index < info->num_exclude_ips; ip_index++)
++ {
++ if( (info->exclude_ips)[ip_index] == iph->saddr )
++ {
++ save = info->exclude_type == WEBMON_EXCLUDE ? 0 : 1;
++ }
++ }
++ for(ip_index=0; ip_index < info->num_exclude_ranges; ip_index++)
++ {
++ struct ipt_webmon_ip_range r = (info->exclude_ranges)[ip_index];
++ if( (unsigned long)ntohl( r.start) <= (unsigned long)ntohl(iph->saddr) && (unsigned long)ntohl(r.end) >= (unsigned long)ntohl(iph->saddr) )
++ {
++ save = info->exclude_type == WEBMON_EXCLUDE ? 0 : 1;
++ }
++ }
++
++
++ if(save)
++ {
++ extract_url(payload, payload_length, domain, path);
++
++
++ sprintf(domain_key, STRIP"@%s", NIPQUAD(iph->saddr), domain);
++
++ if(strlen(domain) > 0)
++ {
++ char *search_part = NULL;
++ spin_lock_bh(&webmon_lock);
++
++
++
++ if(get_string_map_element(domain_map, domain_key))
++ {
++ //update time
++ update_queue_node_time( (queue_node*)get_map_element(domain_map, domain_key), recent_domains );
++ }
++ else
++ {
++ //add
++ add_queue_node(iph->saddr, domain, recent_domains, domain_map, domain_key, max_domain_queue_length );
++ }
++
++
++ /* printk("domain,path=\"%s\", \"%s\"\n", domain, path); */
++
++ if(strnistr(domain, "google.", 625) != NULL)
++ {
++ search_part = strstr(path, "&q=");
++ search_part = search_part == NULL ? strstr(path, "#q=") : search_part;
++ search_part = search_part == NULL ? strstr(path, "?q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "bing.") != NULL)
++ {
++ search_part = strstr(path, "?q=");
++ search_part = search_part == NULL ? strstr(path, "&q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "yahoo.") != NULL)
++ {
++ search_part = strstr(path, "?p=");
++ search_part = search_part == NULL ? strstr(path, "&p=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "lycos.") != NULL)
++ {
++ search_part = strstr(path, "&query=");
++ search_part = search_part == NULL ? strstr(path, "?query=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+7;
++ }
++ else if(strstr(domain, "altavista.") != NULL)
++ {
++ search_part = strstr(path, "&q=");
++ search_part = search_part == NULL ? strstr(path, "?q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "duckduckgo.") != NULL)
++ {
++ search_part = strstr(path, "?q=");
++ search_part = search_part == NULL ? strstr(path, "&q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "baidu.") != NULL)
++ {
++ search_part = strstr(path, "?wd=");
++ search_part = search_part == NULL ? strstr(path, "&wd=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+4;
++ }
++ else if(strstr(domain, "search.") != NULL)
++ {
++ search_part = strstr(path, "?q=");
++ search_part = search_part == NULL ? strstr(path, "&q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "aol.") != NULL)
++ {
++ search_part = strstr(path, "&q=");
++ search_part = search_part == NULL ? strstr(path, "?q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "ask.") != NULL)
++ {
++ search_part = strstr(path, "?q=");
++ search_part = search_part == NULL ? strstr(path, "&q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "yandex.") != NULL)
++ {
++ search_part = strstr(path, "?text=");
++ search_part = search_part == NULL ? strstr(path, "&text=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+6;
++ }
++ else if(strstr(domain, "naver.") != NULL)
++ {
++ search_part = strstr(path, "&query=");
++ search_part = search_part == NULL ? strstr(path, "?query=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+7;
++ }
++ else if(strstr(domain, "daum.") != NULL)
++ {
++ search_part = strstr(path, "&q=");
++ search_part = search_part == NULL ? strstr(path, "?q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "cuil.") != NULL)
++ {
++ search_part = strstr(path, "?q=");
++ search_part = search_part == NULL ? strstr(path, "&q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "kosmix.") != NULL)
++ {
++ search_part = strstr(path, "/topic/");
++ search_part = search_part == NULL ? search_part : search_part+7;
++ }
++ else if(strstr(domain, "yebol.") != NULL)
++ {
++ search_part = strstr(path, "?key=");
++ search_part = search_part == NULL ? strstr(path, "&key=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+5;
++ }
++ else if(strstr(domain, "sogou.") != NULL)
++ {
++ search_part = strstr(path, "&query=");
++ search_part = search_part == NULL ? strstr(path, "?query=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+7;
++ }
++ else if(strstr(domain, "youdao.") != NULL)
++ {
++ search_part = strstr(path, "?q=");
++ search_part = search_part == NULL ? strstr(path, "&q=") : search_part;
++ search_part = search_part == NULL ? search_part : search_part+3;
++ }
++ else if(strstr(domain, "metacrawler.") != NULL)
++ {
++ search_part = strstr(path, "/ws/results/Web/");
++ search_part = search_part == NULL ? search_part : search_part+16;
++ }
++ else if(strstr(domain, "webcrawler.") != NULL)
++ {
++ search_part = strstr(path, "/ws/results/Web/");
++ search_part = search_part == NULL ? search_part : search_part+16;
++ }
++
++
++ if(search_part != NULL)
++ {
++ int spi, si;
++ char search_key[700];
++ char search[650];
++ queue_node *recent_node = recent_searches->first;
++
++ /*unescape, replacing whitespace with + */
++ si = 0;
++ for(spi=0; search_part[spi] != '\0' && search_part[spi] != '&' && search_part[spi] != '/'; spi++)
++ {
++ int parsed_hex = 0;
++ if( search_part[spi] == '%')
++ {
++ if(search_part[spi+1] != '\0' && search_part[spi+1] != '&' && search_part[spi+1] != '/')
++ {
++ if(search_part[spi+2] != '\0' && search_part[spi+2] != '&' && search_part[spi+2] != '/')
++ {
++ char enc[3];
++ int hex;
++ enc[0] = search_part[spi+1];
++ enc[1] = search_part[spi+2];
++ enc[2] = '\0';
++ if(sscanf(enc, "%x", &hex) > 0)
++ {
++ parsed_hex = 1;
++ search[si] = hex == ' ' || hex == '\t' || hex == '\r' || hex == '\n' ? '+' : (char)hex;
++ spi = spi+2;
++ }
++ }
++ }
++ }
++ if(parsed_hex == 0)
++ {
++ search[si] = search_part[spi];
++ }
++ si++;
++ }
++ search[si] = '\0';
++
++
++
++ sprintf(search_key, STRIP"@%s", NIPQUAD(iph->saddr), search);
++
++
++ /* Often times search engines will initiate a search as you type it in, but these intermediate queries aren't the real search query
++ * So, if the most recent query is a substring of the current one, discard it in favor of this one
++ */
++ if(recent_node != NULL)
++ {
++ if(recent_node->src_ip == iph->saddr)
++ {
++ struct timeval t;
++ do_gettimeofday(&t);
++ if( (recent_node->time).tv_sec + 1 >= t.tv_sec || ((recent_node->time).tv_sec + 5 >= t.tv_sec && within_edit_distance(search, recent_node->value, 2)))
++ {
++ char recent_key[700];
++
++ sprintf(recent_key, STRIP"@%s", NIPQUAD(recent_node->src_ip), recent_node->value);
++ remove_map_element(search_map, recent_key);
++
++ recent_searches->first = recent_node->next;
++ recent_searches->last = recent_searches->first == NULL ? NULL : recent_searches->last;
++ if(recent_searches->first != NULL)
++ {
++ recent_searches->first->previous = NULL;
++ }
++ recent_searches->length = recent_searches->length - 1 ;
++ free(recent_node->value);
++ free(recent_node);
++ }
++ }
++ }
++
++
++
++ if(get_string_map_element(search_map, search_key))
++ {
++ //update time
++ update_queue_node_time( (queue_node*)get_map_element(search_map, search_key), recent_searches );
++ }
++ else
++ {
++ //add
++ add_queue_node(iph->saddr, search, recent_searches, search_map, search_key, max_search_queue_length );
++ }
++ }
++ spin_unlock_bh(&webmon_lock);
++ }
++ }
++ }
++ }
++ }
++
++ /* free skb if we made a copy to linearize it */
++ if(skb_copied == 1)
++ {
++ kfree_skb(linear_skb);
++ }
++
++
++ /* printk("returning %d from webmon\n\n\n", test); */
++ return 0;
++}
++
++
++
++static int checkentry(const struct xt_mtchk_param *par)
++{
++
++ struct ipt_webmon_info *info = (struct ipt_webmon_info*)(par->matchinfo);
++
++
++ spin_lock_bh(&webmon_lock);
++ if(info->ref_count == NULL) /* first instance, we're inserting rule */
++ {
++ info->ref_count = (uint32_t*)kmalloc(sizeof(uint32_t), GFP_ATOMIC);
++ if(info->ref_count == NULL) /* deal with kmalloc failure */
++ {
++ printk("ipt_webmon: kmalloc failure in checkentry!\n");
++ return 0;
++ }
++ *(info->ref_count) = 1;
++
++
++ max_search_queue_length = info->max_searches;
++ max_domain_queue_length = info->max_domains;
++
++
++ }
++ else
++ {
++ *(info->ref_count) = *(info->ref_count) + 1;
++ }
++ spin_unlock_bh(&webmon_lock);
++
++ return 0;
++}
++
++static void destroy( const struct xt_mtdtor_param *par )
++{
++ struct ipt_webmon_info *info = (struct ipt_webmon_info*)(par->matchinfo);
++
++ spin_lock_bh(&webmon_lock);
++ *(info->ref_count) = *(info->ref_count) - 1;
++ if(*(info->ref_count) == 0)
++ {
++ kfree(info->ref_count);
++ }
++ spin_unlock_bh(&webmon_lock);
++
++}
++
++static struct xt_match webmon_match __read_mostly =
++{
++
++ .name = "webmon",
++ .match = &match,
++ .family = AF_INET,
++ .matchsize = sizeof(struct ipt_webmon_info),
++ .checkentry = &checkentry,
++ .destroy = &destroy,
++ .me = THIS_MODULE,
++};
++
++#ifdef CONFIG_PROC_FS
++ struct proc_dir_entry *proc_webmon_recent_domains;
++ struct proc_dir_entry *proc_webmon_recent_searches;
++#endif
++
++
++static int __init init(void)
++{
++/*
++ #ifdef CONFIG_PROC_FS
++ struct proc_dir_entry *proc_webmon_recent_domains;
++ struct proc_dir_entry *proc_webmon_recent_searches;
++ #endif
++*/
++ spin_lock_bh(&webmon_lock);
++
++ recent_domains = (queue*)malloc(sizeof(queue));
++ recent_domains->first = NULL;
++ recent_domains->last = NULL;
++ recent_domains->length = 0;
++ domain_map = initialize_string_map(0);
++
++ recent_searches = (queue*)malloc(sizeof(queue));
++ recent_searches->first = NULL;
++ recent_searches->last = NULL;
++ recent_searches->length = 0;
++ search_map = initialize_string_map(0);
++
++
++
++ #ifdef CONFIG_PROC_FS
++ proc_webmon_recent_domains =proc_create("webmon_recent_domains", 0, NULL, &webmon_proc_domain_fops);
++ proc_webmon_recent_searches =proc_create("webmon_recent_searches", 0, NULL, &webmon_proc_search_fops);
++/* if(proc_webmon_recent_domains)
++ {
++ proc_webmon_recent_domains->proc_fops = &webmon_proc_domain_fops;
++ }
++ if(proc_webmon_recent_searches)
++ {
++ proc_webmon_recent_searches->proc_fops = &webmon_proc_search_fops;
++ }*/
++ #endif
++
++ if (nf_register_sockopt(&ipt_webmon_sockopts) < 0)
++ {
++ printk("ipt_webmon: Can't register sockopts. Aborting\n");
++ spin_unlock_bh(&webmon_lock);
++ return -1;
++ }
++ spin_unlock_bh(&webmon_lock);
++
++ return xt_register_match(&webmon_match);
++}
++
++static void __exit fini(void)
++{
++
++ unsigned long num_destroyed;
++
++ spin_lock_bh(&webmon_lock);
++
++
++ #ifdef CONFIG_PROC_FS
++ proc_remove(proc_webmon_recent_domains);
++ proc_remove(proc_webmon_recent_searches);
++ #endif
++ nf_unregister_sockopt(&ipt_webmon_sockopts);
++ xt_unregister_match(&webmon_match);
++ destroy_map(domain_map, DESTROY_MODE_IGNORE_VALUES, &num_destroyed);
++ destroy_map(search_map, DESTROY_MODE_IGNORE_VALUES, &num_destroyed);
++ destroy_queue(recent_domains);
++ destroy_queue(recent_searches);
++
++ spin_unlock_bh(&webmon_lock);
++
++
++}
++
++module_init(init);
++module_exit(fini);
++
++
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/net/ipv4/netfilter/bandwidth_deps/tree_map.h 2015-06-19 03:02:55.365670123 +0800
+@@ -0,0 +1,1093 @@
++/*
++ * Copyright © 2008 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This work 'as-is' we provide.
++ * No warranty, express or implied.
++ * We've done our best,
++ * to debug and test.
++ * Liability for damages denied.
++ *
++ * Permission is granted hereby,
++ * to copy, share, and modify.
++ * Use as is fit,
++ * free or for profit.
++ * On this notice these rights rely.
++ *
++ *
++ *
++ * Note that unlike other portions of Gargoyle this code
++ * does not fall under the GPL, but the rather whimsical
++ * 'Poetic License' above.
++ *
++ * Basically, this library contains a bunch of utilities
++ * that I find useful. I'm sure other libraries exist
++ * that are just as good or better, but I like these tools
++ * because I personally wrote them, so I know their quirks.
++ * (i.e. I know where the bodies are buried). I want to
++ * make sure that I can re-use these utilities for whatever
++ * code I may want to write in the future be it
++ * proprietary or open-source, so I've put them under
++ * a very, very permissive license.
++ *
++ * If you find this code useful, use it. If not, don't.
++ * I really don't care.
++ *
++ */
++
++
++#if __KERNEL__
++ #define malloc(foo) kmalloc(foo,GFP_ATOMIC)
++ #define free(foo) kfree(foo)
++ #define printf(format,args...) printk(format,##args)
++
++ /* kernel strdup */
++ static inline char *kernel_strdup(const char *str);
++ static inline char *kernel_strdup(const char *str)
++ {
++ char *tmp;
++ long int s;
++ s=strlen(str) + 1;
++ tmp = kmalloc(s, GFP_ATOMIC);
++ if (tmp != NULL)
++ {
++ memcpy(tmp, str, s);
++ }
++ return tmp;
++ }
++ #define strdup kernel_strdup
++
++#endif
++
++
++
++/* tree_map structs / prototypes */
++typedef struct long_tree_map_node
++{
++ unsigned long key;
++ void* value;
++
++ signed char balance;
++ struct long_tree_map_node* left;
++ struct long_tree_map_node* right;
++} long_map_node;
++
++typedef struct
++{
++ long_map_node* root;
++ unsigned long num_elements;
++
++}long_map;
++
++typedef struct
++{
++ long_map lm;
++ unsigned char store_keys;
++ unsigned long num_elements;
++
++}string_map;
++
++
++
++/* long map functions */
++long_map* initialize_long_map(void);
++void* get_long_map_element(long_map* map, unsigned long key);
++void* get_smallest_long_map_element(long_map* map, unsigned long* smallest_key);
++void* get_largest_long_map_element(long_map* map, unsigned long* largest_key);
++void* remove_smallest_long_map_element(long_map* map, unsigned long* smallest_key);
++void* remove_largest_long_map_element(long_map* map, unsigned long* largest_key);
++void* set_long_map_element(long_map* map, unsigned long key, void* value);
++void* remove_long_map_element(long_map* map, unsigned long key);
++unsigned long* get_sorted_long_map_keys(long_map* map, unsigned long* num_keys_returned);
++void** get_sorted_long_map_values(long_map* map, unsigned long* num_values_returned);
++void** destroy_long_map(long_map* map, int destruction_type, unsigned long* num_destroyed);
++void apply_to_every_long_map_value(long_map* map, void (*apply_func)(unsigned long key, void* value));
++
++/* string map functions */
++string_map* initialize_string_map(unsigned char store_keys);
++void* get_string_map_element(string_map* map, const char* key);
++void* get_string_map_element_with_hashed_key(string_map* map, unsigned long hashed_key);
++void* set_string_map_element(string_map* map, const char* key, void* value);
++void* remove_string_map_element(string_map* map, const char* key);
++char** get_string_map_keys(string_map* map, unsigned long* num_keys_returned);
++void** get_string_map_values(string_map* map, unsigned long* num_values_returned);
++void** destroy_string_map(string_map* map, int destruction_type, unsigned long* num_destroyed);
++void apply_to_every_string_map_value(string_map* map, void (*apply_func)(char* key, void* value));
++
++
++/*
++ * three different ways to deal with values when data structure is destroyed
++ */
++#define DESTROY_MODE_RETURN_VALUES 20
++#define DESTROY_MODE_FREE_VALUES 21
++#define DESTROY_MODE_IGNORE_VALUES 22
++
++
++/*
++ * for convenience & backwards compatibility alias _string_map_ functions to
++ * _map_ functions since string map is used more often than long map
++ */
++#define initialize_map initialize_string_map
++#define set_map_element set_string_map_element
++#define get_map_element get_string_map_element
++#define remove_map_element remove_string_map_element
++#define get_map_keys get_string_map_keys
++#define get_map_values get_string_map_values
++#define destroy_map destroy_string_map
++
++
++/* internal utility structures/ functions */
++typedef struct stack_node_struct
++{
++ long_map_node** node_ptr;
++ signed char direction;
++ struct stack_node_struct* previous;
++} stack_node;
++
++static void free_stack(stack_node* stack);
++static void** destroy_long_map_values(long_map* map, int destruction_type, unsigned long* num_destroyed);
++static void apply_to_every_long_map_node(long_map_node* node, void (*apply_func)(unsigned long key, void* value));
++static void apply_to_every_string_map_node(long_map_node* node, unsigned char has_key, void (*apply_func)(char* key, void* value));
++static void get_sorted_node_keys(long_map_node* node, unsigned long* key_list, unsigned long* next_key_index, int depth);
++static void get_sorted_node_values(long_map_node* node, void** value_list, unsigned long* next_value_index, int depth);
++static signed char rebalance (long_map_node** n, signed char direction, signed char update_op);
++static void rotate_right (long_map_node** parent);
++static void rotate_left (long_map_node** parent);
++
++/* internal for string map */
++typedef struct
++{
++ char* key;
++ void* value;
++} string_map_key_value;
++static unsigned long sdbm_string_hash(const char *key);
++
++
++
++
++/***************************************************
++ * For testing only
++ ***************************************************/
++/*
++void print_list(stack_node *l);
++
++void print_list(stack_node *l)
++{
++ if(l != NULL)
++ {
++ printf(" list key = %ld, dir=%d, \n", (*(l->node_ptr))->key, l->direction);
++ print_list(l->previous);
++ }
++}
++*/
++/******************************************************
++ * End testing Code
++ *******************************************************/
++
++
++
++
++/***************************************************
++ * string_map function definitions
++ ***************************************************/
++
++string_map* initialize_string_map(unsigned char store_keys)
++{
++ string_map* map = (string_map*)malloc(sizeof(string_map));
++ if(map != NULL)
++ {
++ map->store_keys = store_keys;
++ map->lm.root = NULL;
++ map->lm.num_elements = 0;
++ map->num_elements = map->lm.num_elements;
++ }
++ return map;
++}
++
++void* get_string_map_element(string_map* map, const char* key)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++
++ return get_string_map_element_with_hashed_key(map, hashed_key);
++}
++
++void* get_string_map_element_with_hashed_key(string_map* map, unsigned long hashed_key)
++{
++ void* return_value;
++ /* printk("doing lookup for key = %lu\n", hashed_key); */
++ return_value = get_long_map_element( &(map->lm), hashed_key);
++ if(return_value != NULL && map->store_keys)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++void* set_string_map_element(string_map* map, const char* key, void* value)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = NULL;
++ if(map->store_keys)
++ {
++ string_map_key_value* kv = (string_map_key_value*)malloc(sizeof(string_map_key_value));
++ if(kv == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ kv->key = strdup(key);
++ if(kv->key == NULL) /* deal with malloc failure */
++ {
++ free(kv);
++ return NULL;
++ }
++ kv->value = value;
++ return_value = set_long_map_element( &(map->lm), hashed_key, kv);
++ if(return_value != NULL)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ free(r->key);
++ free(r);
++ }
++ }
++ else
++ {
++ return_value = set_long_map_element( &(map->lm), hashed_key, value);
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++void* remove_string_map_element(string_map* map, const char* key)
++{
++ unsigned long hashed_key = sdbm_string_hash(key);
++ void* return_value = remove_long_map_element( &(map->lm), hashed_key);
++
++ if(return_value != NULL && map->store_keys)
++ {
++ string_map_key_value* r = (string_map_key_value*)return_value;
++ return_value = r->value;
++ free(r->key);
++ free(r);
++ }
++ map->num_elements = map->lm.num_elements;
++ return return_value;
++}
++
++char** get_string_map_keys(string_map* map, unsigned long* num_keys_returned)
++{
++ char** str_keys;
++ str_keys = (char**)malloc((map->num_elements+1)*sizeof(char*));
++ if(str_keys == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ str_keys[0] = NULL;
++ *num_keys_returned = 0;
++ if(map->store_keys && map->num_elements > 0)
++ {
++ unsigned long list_length;
++ void** long_values = get_sorted_long_map_values( &(map->lm), &list_length);
++ unsigned long key_index;
++ /*list_length will be 0 on malloc failure in get_sorted_long_map_values, so this code shouldn't seg fault if that happens */
++ for(key_index = 0; key_index < list_length; key_index++)
++ {
++ str_keys[key_index] = strdup( ((string_map_key_value*)(long_values[key_index]))->key);
++ if(str_keys[key_index] == NULL) /* deal with malloc failure */
++ {
++ //just return the incomplete list (hey, it's null terminated...)
++ free(long_values);
++ return str_keys;
++ }
++ *num_keys_returned = *num_keys_returned + 1;
++ }
++ str_keys[list_length] = NULL;
++ free(long_values);
++ }
++ return str_keys;
++}
++
++
++void** get_string_map_values(string_map* map, unsigned long* num_values_returned)
++{
++ void** values = NULL;
++ if(map != NULL)
++ {
++ values = get_sorted_long_map_values ( &(map->lm), num_values_returned );
++ }
++ return values;
++}
++
++
++void** destroy_string_map(string_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = NULL;
++ if(map != NULL)
++ {
++ if(map->store_keys)
++ {
++ void** kvs = destroy_long_map_values( &(map->lm), DESTROY_MODE_RETURN_VALUES, num_destroyed );
++ unsigned long kv_index = 0;
++ for(kv_index=0; kv_index < *num_destroyed; kv_index++)
++ {
++ string_map_key_value* kv = (string_map_key_value*)kvs[kv_index];
++ void* value = kv->value;
++
++ free(kv->key);
++ free(kv);
++ if(destruction_type == DESTROY_MODE_FREE_VALUES)
++ {
++ free(value);
++ }
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ kvs[kv_index] = value;
++ }
++ }
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values = kvs;
++ }
++ else
++ {
++ free(kvs);
++ }
++ }
++ else
++ {
++ return_values = destroy_long_map_values( &(map->lm), destruction_type, num_destroyed );
++ }
++ free(map);
++ }
++ return return_values;
++}
++
++
++
++
++/***************************************************
++ * long_map function definitions
++ ***************************************************/
++
++long_map* initialize_long_map(void)
++{
++ long_map* map = (long_map*)malloc(sizeof(long_map));
++ if(map != NULL) /* test for malloc failure */
++ {
++ map->root = NULL;
++ map->num_elements = 0;
++ }
++ return map;
++}
++
++void* get_long_map_element(long_map* map, unsigned long key)
++{
++ void* value = NULL;
++
++ if(map->root != NULL)
++ {
++ long_map_node* parent_node = map->root;
++ long_map_node* next_node;
++ while( key != parent_node->key && (next_node = (long_map_node *)(key < parent_node->key ? parent_node->left : parent_node->right)) != NULL)
++ {
++ parent_node = next_node;
++ }
++ if(parent_node->key == key)
++ {
++ value = parent_node->value;
++ }
++ }
++ return value;
++}
++
++void* get_smallest_long_map_element(long_map* map, unsigned long* smallest_key)
++{
++ void* value = NULL;
++ if(map->root != NULL)
++ {
++ long_map_node* next_node = map->root;
++ while( next_node->left != NULL)
++ {
++ next_node = next_node->left;
++ }
++ value = next_node->value;
++ *smallest_key = next_node->key;
++ }
++ return value;
++}
++
++void* get_largest_long_map_element(long_map* map, unsigned long* largest_key)
++{
++ void* value = NULL;
++ if(map->root != NULL)
++ {
++ long_map_node* next_node = map->root;
++ while( next_node->right != NULL)
++ {
++ next_node = next_node->right;
++ }
++ value = next_node->value;
++ *largest_key = next_node->key;
++ }
++ return value;
++}
++
++void* remove_smallest_long_map_element(long_map* map, unsigned long* smallest_key)
++{
++ get_smallest_long_map_element(map, smallest_key);
++ return remove_long_map_element(map, *smallest_key);
++}
++
++void* remove_largest_long_map_element(long_map* map, unsigned long* largest_key)
++{
++ get_largest_long_map_element(map, largest_key);
++ return remove_long_map_element(map, *largest_key);
++}
++
++
++/* if replacement performed, returns replaced value, otherwise null */
++void* set_long_map_element(long_map* map, unsigned long key, void* value)
++{
++ stack_node* parent_list = NULL;
++ void* old_value = NULL;
++ int old_value_found = 0;
++
++ long_map_node* parent_node;
++ long_map_node* next_node;
++ stack_node* next_parent;
++ stack_node* previous_parent;
++ signed char new_balance;
++
++
++ long_map_node* new_node = (long_map_node*)malloc(sizeof(long_map_node));
++ if(new_node == NULL)
++ {
++ return NULL;
++ }
++ new_node->value = value;
++ new_node->key = key;
++ new_node->left = NULL;
++ new_node->right = NULL;
++ new_node->balance = 0;
++
++
++
++ if(map->root == NULL)
++ {
++ map->root = new_node;
++ }
++ else
++ {
++ parent_node = map->root;
++
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ free(new_node);
++ return NULL; /* won't insert but won't seg fault */
++ }
++ next_parent->node_ptr = &(map->root);
++ next_parent->previous = parent_list;
++ parent_list = next_parent;
++
++ while( key != parent_node->key && (next_node = (key < parent_node->key ? parent_node->left : parent_node->right) ) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ free(new_node);
++ return NULL;
++ }
++ next_parent->node_ptr = key < parent_node->key ? &(parent_node->left) : &(parent_node->right);
++ next_parent->previous = parent_list;
++ next_parent->previous->direction = key < parent_node->key ? -1 : 1;
++ parent_list = next_parent;
++
++ parent_node = next_node;
++ }
++
++
++ if(key == parent_node->key)
++ {
++ old_value = parent_node->value;
++ old_value_found = 1;
++ parent_node->value = value;
++ free(new_node);
++ /* we merely replaced a node, no need to rebalance */
++ }
++ else
++ {
++ if(key < parent_node->key)
++ {
++ parent_node->left = (void*)new_node;
++ parent_list->direction = -1;
++ }
++ else
++ {
++ parent_node->right = (void*)new_node;
++ parent_list->direction = 1;
++ }
++
++
++ /* we inserted a node, rebalance */
++ previous_parent = parent_list;
++ new_balance = 1; /* initial value is not used, but must not be 0 for initial loop condition */
++
++
++ while(previous_parent != NULL && new_balance != 0)
++ {
++ new_balance = rebalance(previous_parent->node_ptr, previous_parent->direction, 1);
++ previous_parent = previous_parent->previous;
++ }
++ }
++ }
++
++ free_stack(parent_list);
++
++ if(old_value_found == 0)
++ {
++ map->num_elements = map->num_elements + 1;
++ }
++
++ return old_value;
++}
++
++
++void* remove_long_map_element(long_map* map, unsigned long key)
++{
++
++ void* value = NULL;
++
++ long_map_node* root_node = map->root;
++ stack_node* parent_list = NULL;
++
++
++ long_map_node* remove_parent;
++ long_map_node* remove_node;
++ long_map_node* next_node;
++
++ long_map_node* replacement;
++ long_map_node* replacement_parent;
++ long_map_node* replacement_next;
++
++ stack_node* next_parent;
++ stack_node* previous_parent;
++ stack_node* replacement_stack_node;
++
++
++ signed char new_balance;
++
++
++
++ if(root_node != NULL)
++ {
++ remove_parent = root_node;
++ remove_node = key < remove_parent->key ? remove_parent->left : remove_parent->right;
++
++ if(remove_node != NULL && key != remove_parent->key)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ return NULL;
++ }
++ next_parent->node_ptr = &(map->root);
++ next_parent->previous = parent_list;
++ parent_list = next_parent;
++ while( key != remove_node->key && (next_node = (key < remove_node->key ? remove_node->left : remove_node->right)) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++ next_parent->node_ptr = key < remove_parent->key ? &(remove_parent->left) : &(remove_parent->right);
++ next_parent->previous = parent_list;
++ next_parent->previous->direction = key < remove_parent->key ? -1 : 1;
++ parent_list = next_parent;
++
++
++ remove_parent = remove_node;
++ remove_node = next_node;
++ }
++ parent_list->direction = key < remove_parent-> key ? -1 : 1;
++ }
++ else
++ {
++ remove_node = remove_parent;
++ }
++
++
++ if(key == remove_node->key)
++ {
++
++ /* find replacement for node we are deleting */
++ if( remove_node->right == NULL )
++ {
++ replacement = remove_node->left;
++ }
++ else if( remove_node->right->left == NULL)
++ {
++
++ replacement = remove_node->right;
++ replacement->left = remove_node->left;
++ replacement->balance = remove_node->balance;
++
++ /* put pointer to replacement node into list for balance update */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = 1; /* replacement is from right */
++ if(remove_node == remove_parent) /* special case for root node */
++ {
++ replacement_stack_node->node_ptr = &(map->root);
++ }
++ else
++ {
++ replacement_stack_node->node_ptr = key < remove_parent-> key ? &(remove_parent->left) : &(remove_parent->right);
++ }
++ parent_list = replacement_stack_node;
++
++ }
++ else
++ {
++ /* put pointer to replacement node into list for balance update */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = 1; /* we always look for replacement on right */
++ if(remove_node == remove_parent) /* special case for root node */
++ {
++ replacement_stack_node->node_ptr = &(map->root);
++ }
++ else
++ {
++ replacement_stack_node->node_ptr = key < remove_parent-> key ? &(remove_parent->left) : &(remove_parent->right);
++ }
++
++ parent_list = replacement_stack_node;
++
++
++ /*
++ * put pointer to replacement node->right into list for balance update
++ * this node will have to be updated with the proper pointer
++ * after we have identified the replacement
++ */
++ replacement_stack_node = (stack_node*)malloc(sizeof(stack_node));
++ if(replacement_stack_node == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ replacement_stack_node->previous = parent_list;
++ replacement_stack_node->direction = -1; /* we always look for replacement to left of this node */
++ parent_list = replacement_stack_node;
++
++ /* find smallest node on right (large) side of tree */
++ replacement_parent = remove_node->right;
++ replacement = replacement_parent->left;
++
++ while((replacement_next = replacement->left) != NULL)
++ {
++ next_parent = (stack_node*)malloc(sizeof(stack_node));
++ if(next_parent == NULL) /* deal with malloc failure */
++ {
++ /* free previous stack nodes to prevent memory leak */
++ free_stack(parent_list);
++ return NULL;
++ }
++
++ next_parent->node_ptr = &(replacement_parent->left);
++ next_parent->previous = parent_list;
++ next_parent->direction = -1; /* we always go left */
++ parent_list = next_parent;
++
++ replacement_parent = replacement;
++ replacement = replacement_next;
++
++ }
++
++ replacement_parent->left = replacement->right;
++
++ replacement->left = remove_node->left;
++ replacement->right = remove_node->right;
++ replacement->balance = remove_node->balance;
++ replacement_stack_node->node_ptr = &(replacement->right);
++ }
++
++ /* insert replacement at proper location in tree */
++ if(remove_node == remove_parent)
++ {
++ map->root = replacement;
++ }
++ else
++ {
++ remove_parent->left = remove_node == remove_parent->left ? replacement : remove_parent->left;
++ remove_parent->right = remove_node == remove_parent->right ? replacement : remove_parent->right;
++ }
++
++
++ /* rebalance tree */
++ previous_parent = parent_list;
++ new_balance = 0;
++ while(previous_parent != NULL && new_balance == 0)
++ {
++ new_balance = rebalance(previous_parent->node_ptr, previous_parent->direction, -1);
++ previous_parent = previous_parent->previous;
++ }
++
++
++
++
++ /*
++ * since we found a value to remove, decrease number of elements in map
++ * set return value to the deleted node's value and free the node
++ */
++ map->num_elements = map->num_elements - 1;
++ value = remove_node->value;
++ free(remove_node);
++ }
++ }
++
++ free_stack(parent_list);
++
++ return value;
++}
++
++
++/* note: returned keys are dynamically allocated, you need to free them! */
++unsigned long* get_sorted_long_map_keys(long_map* map, unsigned long* num_keys_returned)
++{
++ unsigned long* key_list = (unsigned long*)malloc((map->num_elements)*sizeof(unsigned long));
++ unsigned long next_key_index;
++ if(key_list == NULL)
++ {
++ *num_keys_returned = 0;
++ return NULL;
++ }
++ next_key_index = 0;
++ get_sorted_node_keys(map->root, key_list, &next_key_index, 0);
++
++ *num_keys_returned = map->num_elements;
++
++ return key_list;
++}
++
++
++void** get_sorted_long_map_values(long_map* map, unsigned long* num_values_returned)
++{
++ void** value_list = (void**)malloc((map->num_elements+1)*sizeof(void*));
++ unsigned long next_value_index;
++
++ if(value_list == NULL)
++ {
++ *num_values_returned = 0;
++ return NULL;
++ }
++ next_value_index = 0;
++ get_sorted_node_values(map->root, value_list, &next_value_index, 0);
++ value_list[map->num_elements] = NULL; /* since we're dealing with pointers make list null terminated */
++
++ *num_values_returned = map->num_elements;
++ return value_list;
++
++}
++
++
++
++void** destroy_long_map(long_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = destroy_long_map_values(map, destruction_type, num_destroyed);
++ free(map);
++ return return_values;
++}
++
++
++
++void apply_to_every_long_map_value(long_map* map, void (*apply_func)(unsigned long key, void* value))
++{
++ apply_to_every_long_map_node(map->root, apply_func);
++}
++void apply_to_every_string_map_value(string_map* map, void (*apply_func)(char* key, void* value))
++{
++ apply_to_every_string_map_node( (map->lm).root, map->store_keys, apply_func);
++}
++
++
++/***************************************************
++ * internal utility function definitions
++ ***************************************************/
++static void free_stack(stack_node* stack)
++{
++ while(stack != NULL)
++ {
++ stack_node* prev_node = stack;
++ stack = prev_node->previous;
++ free(prev_node);
++ }
++
++}
++
++static void** destroy_long_map_values(long_map* map, int destruction_type, unsigned long* num_destroyed)
++{
++ void** return_values = NULL;
++ unsigned long return_index = 0;
++
++ *num_destroyed = 0;
++
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values = (void**)malloc((map->num_elements+1)*sizeof(void*));
++ if(return_values == NULL) /* deal with malloc failure */
++ {
++ destruction_type = DESTROY_MODE_IGNORE_VALUES; /* could cause memory leak, but there's no other way to be sure we won't seg fault */
++ }
++ else
++ {
++ return_values[map->num_elements] = NULL;
++ }
++ }
++ while(map->num_elements > 0)
++ {
++ unsigned long smallest_key;
++ void* removed_value = remove_smallest_long_map_element(map, &smallest_key);
++ if(destruction_type == DESTROY_MODE_RETURN_VALUES)
++ {
++ return_values[return_index] = removed_value;
++ }
++ if(destruction_type == DESTROY_MODE_FREE_VALUES)
++ {
++ free(removed_value);
++ }
++ return_index++;
++ *num_destroyed = *num_destroyed + 1;
++ }
++ return return_values;
++}
++
++static void apply_to_every_long_map_node(long_map_node* node, void (*apply_func)(unsigned long key, void* value))
++{
++ if(node != NULL)
++ {
++ apply_to_every_long_map_node(node->left, apply_func);
++
++ apply_func(node->key, node->value);
++
++ apply_to_every_long_map_node(node->right, apply_func);
++ }
++}
++static void apply_to_every_string_map_node(long_map_node* node, unsigned char has_key, void (*apply_func)(char* key, void* value))
++{
++ if(node != NULL)
++ {
++ apply_to_every_string_map_node(node->left, has_key, apply_func);
++
++ if(has_key)
++ {
++ string_map_key_value* kv = (string_map_key_value*)(node->value);
++ apply_func(kv->key, kv->value);
++ }
++ else
++ {
++ apply_func(NULL, node->value);
++ }
++ apply_to_every_string_map_node(node->right, has_key, apply_func);
++ }
++}
++
++
++
++static void get_sorted_node_keys(long_map_node* node, unsigned long* key_list, unsigned long* next_key_index, int depth)
++{
++ if(node != NULL)
++ {
++ get_sorted_node_keys(node->left, key_list, next_key_index, depth+1);
++
++ key_list[ *next_key_index ] = node->key;
++ (*next_key_index)++;
++
++ get_sorted_node_keys(node->right, key_list, next_key_index, depth+1);
++ }
++}
++
++static void get_sorted_node_values(long_map_node* node, void** value_list, unsigned long* next_value_index, int depth)
++{
++ if(node != NULL)
++ {
++ get_sorted_node_values(node->left, value_list, next_value_index, depth+1);
++
++ value_list[ *next_value_index ] = node->value;
++ (*next_value_index)++;
++
++ get_sorted_node_values(node->right, value_list, next_value_index, depth+1);
++ }
++}
++
++
++
++/*
++ * direction = -1 indicates left subtree updated, direction = 1 for right subtree
++ * update_op = -1 indicates delete node, update_op = 1 for insert node
++ */
++static signed char rebalance (long_map_node** n, signed char direction, signed char update_op)
++{
++ /*
++ printf( "original: key = %ld, balance = %d, update_op=%d, direction=%d\n", (*n)->key, (*n)->balance, update_op, direction);
++ */
++
++ (*n)->balance = (*n)->balance + (update_op*direction);
++
++ if( (*n)->balance < -1)
++ {
++ if((*n)->left->balance < 0)
++ {
++ rotate_right(n);
++ (*n)->right->balance = 0;
++ (*n)->balance = 0;
++ }
++ else if((*n)->left->balance == 0)
++ {
++ rotate_right(n);
++ (*n)->right->balance = -1;
++ (*n)->balance = 1;
++ }
++ else if((*n)->left->balance > 0)
++ {
++ rotate_left( &((*n)->left) );
++ rotate_right(n);
++ /*
++ if( (*n)->balance < 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 1;
++ }
++ else if( (*n)->balance == 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 0;
++ }
++ else if( (*n)->balance > 0 )
++ {
++ (*n)->left->balance = -1;
++ (*n)->right->balance = 0;
++ }
++ */
++ (*n)->left->balance = (*n)->balance > 0 ? -1 : 0;
++ (*n)->right->balance = (*n)->balance < 0 ? 1 : 0;
++ (*n)->balance = 0;
++ }
++ }
++ if( (*n)->balance > 1)
++ {
++ if((*n)->right->balance > 0)
++ {
++ rotate_left(n);
++ (*n)->left->balance = 0;
++ (*n)->balance = 0;
++ }
++ else if ((*n)->right->balance == 0)
++ {
++ rotate_left(n);
++ (*n)->left->balance = 1;
++ (*n)->balance = -1;
++ }
++ else if((*n)->right->balance < 0)
++ {
++ rotate_right( &((*n)->right) );
++ rotate_left(n);
++ /*
++ if( (*n)->balance < 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 1;
++ }
++ else if( (*n)->balance == 0 )
++ {
++ (*n)->left->balance = 0;
++ (*n)->right->balance = 0;
++ }
++ else if( (*n)->balance > 0 )
++ {
++ (*n)->left->balance = -1;
++ (*n)->right->balance = 0;
++ }
++ */
++ (*n)->left->balance = (*n)->balance > 0 ? -1 : 0;
++ (*n)->right->balance = (*n)->balance < 0 ? 1 : 0;
++ (*n)->balance = 0;
++ }
++ }
++
++ /*
++ printf( "key = %ld, balance = %d\n", (*n)->key, (*n)->balance);
++ */
++
++ return (*n)->balance;
++}
++
++
++static void rotate_right (long_map_node** parent)
++{
++ long_map_node* old_parent = *parent;
++ long_map_node* pivot = old_parent->left;
++ old_parent->left = pivot->right;
++ pivot->right = old_parent;
++
++ *parent = pivot;
++}
++
++static void rotate_left (long_map_node** parent)
++{
++ long_map_node* old_parent = *parent;
++ long_map_node* pivot = old_parent->right;
++ old_parent->right = pivot->left;
++ pivot->left = old_parent;
++
++ *parent = pivot;
++}
++
++
++
++/***************************************************************************
++ * This algorithm was created for the sdbm database library (a public-domain
++ * reimplementation of ndbm) and seems to work relatively well in
++ * scrambling bits
++ *
++ *
++ * This code was derived from code found at:
++ * http://www.cse.yorku.ca/~oz/hash.html
++ ***************************************************************************/
++static unsigned long sdbm_string_hash(const char *key)
++{
++ unsigned long hashed_key = 0;
++
++ int index = 0;
++ unsigned int nextch;
++ while(key[index] != '\0')
++ {
++ nextch = key[index];
++ hashed_key = nextch + (hashed_key << 6) + (hashed_key << 16) - hashed_key;
++ index++;
++ }
++ return hashed_key;
++}
++
++
+--- linux.orig/net/ipv4/netfilter/Makefile 2015-06-15 00:19:31.000000000 +0800
++++ linux.new/net/ipv4/netfilter/Makefile 2015-06-19 03:02:55.425667617 +0800
+@@ -53,6 +53,10 @@
+ obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
+
+ # matches
++obj-$(CONFIG_IP_NF_MATCH_BANDWIDTH) += ipt_bandwidth.o
++obj-$(CONFIG_IP_NF_MATCH_TIMERANGE) += ipt_timerange.o
++obj-$(CONFIG_IP_NF_MATCH_WEBMON) += ipt_webmon.o
++obj-$(CONFIG_IP_NF_MATCH_WEBURL) += ipt_weburl.o
+ obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
+ obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o
+
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/include/linux/netfilter_ipv4/ipt_weburl.h 2015-06-19 03:02:54.757695519 +0800
+@@ -0,0 +1,45 @@
++/* weburl -- A netfilter module to match URLs in HTTP requests
++ * This module can match using string match or regular expressions
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2008 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++
++
++
++#ifndef _IPT_WEBURL_H
++#define _IPT_WEBURL_H
++
++
++#define MAX_TEST_STR 1024
++
++#define WEBURL_CONTAINS_TYPE 1
++#define WEBURL_REGEX_TYPE 2
++#define WEBURL_EXACT_TYPE 3
++#define WEBURL_ALL_PART 4
++#define WEBURL_DOMAIN_PART 5
++#define WEBURL_PATH_PART 6
++
++struct ipt_weburl_info
++{
++ char test_str[MAX_TEST_STR];
++ unsigned char match_type;
++ unsigned char match_part;
++ unsigned char invert;
++};
++#endif /*_IPT_WEBURL_H*/
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/include/linux/netfilter_ipv4/ipt_timerange.h 2015-06-19 03:02:55.289673298 +0800
+@@ -0,0 +1,43 @@
++/* timerange -- An iptables extension to match multiple timeranges within a week
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2009 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++
++
++
++#ifndef _IPT_TIMERANGE_H
++#define _IPT_TIMERANGE_H
++
++
++#define RANGE_LENGTH 51
++
++#define HOURS 1
++#define WEEKDAYS 2
++#define DAYS_HOURS (HOURS+WEEKDAYS)
++#define WEEKLY_RANGE 4
++
++
++struct ipt_timerange_info
++{
++ long ranges[RANGE_LENGTH];
++ char days[7];
++ char type;
++ unsigned char invert;
++};
++#endif /*_IPT_TIMERANGE_H*/
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/include/linux/netfilter_ipv4/ipt_bandwidth.h 2015-06-19 03:02:55.421667784 +0800
+@@ -0,0 +1,106 @@
++/* bandwidth -- An iptables extension for bandwidth monitoring/control
++ * Can be used to efficiently monitor bandwidth and/or implement bandwidth quotas
++ * Can be queried using the iptbwctl userspace library
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2009 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _IPT_BANDWIDTH_H
++#define _IPT_BANDWIDTH_H
++
++/*flags -- first three don't map to parameters the rest do */
++#define BANDWIDTH_INITIALIZED 1
++#define BANDWIDTH_REQUIRES_SUBNET 2
++#define BANDWIDTH_SUBNET 4
++#define BANDWIDTH_CMP 8
++#define BANDWIDTH_CURRENT 16
++#define BANDWIDTH_RESET_INTERVAL 32
++#define BANDWIDTH_RESET_TIME 64
++#define BANDWIDTH_LAST_BACKUP 128
++
++
++/* parameter defs that don't map to flag bits */
++#define BANDWIDTH_TYPE 70
++#define BANDWIDTH_ID 71
++#define BANDWIDTH_GT 72
++#define BANDWIDTH_LT 73
++#define BANDWIDTH_MONITOR 74
++#define BANDWIDTH_CHECK 75
++#define BANDWIDTH_CHECK_NOSWAP 76
++#define BANDWIDTH_CHECK_SWAP 77
++#define BANDWIDTH_NUM_INTERVALS 78
++
++/* possible reset intervals */
++#define BANDWIDTH_MINUTE 80
++#define BANDWIDTH_HOUR 81
++#define BANDWIDTH_DAY 82
++#define BANDWIDTH_WEEK 83
++#define BANDWIDTH_MONTH 84
++#define BANDWIDTH_NEVER 85
++
++/* possible monitoring types */
++#define BANDWIDTH_COMBINED 90
++#define BANDWIDTH_INDIVIDUAL_SRC 91
++#define BANDWIDTH_INDIVIDUAL_DST 92
++#define BANDWIDTH_INDIVIDUAL_LOCAL 93
++#define BANDWIDTH_INDIVIDUAL_REMOTE 94
++
++
++
++/* socket id parameters (for userspace i/o) */
++#define BANDWIDTH_SET 2048
++#define BANDWIDTH_GET 2049
++
++/* max id length */
++#define BANDWIDTH_MAX_ID_LENGTH 50
++
++/* 4 bytes for total number of entries, 100 entries of 12 bytes each, + 1 byte indicating whether all have been dumped */
++#define BANDWIDTH_QUERY_LENGTH 1205
++#define BANDWIDTH_ENTRY_LENGTH 12
++
++
++struct ipt_bandwidth_info
++{
++ char id[BANDWIDTH_MAX_ID_LENGTH];
++ unsigned char type;
++ unsigned char check_type;
++ uint32_t local_subnet;
++ uint32_t local_subnet_mask;
++
++ unsigned char cmp;
++ unsigned char reset_is_constant_interval;
++ time_t reset_interval; //specific fixed type (see above) or interval length in seconds
++ time_t reset_time; //seconds from start of month/week/day/hour/minute to do reset, or start point of interval if it is a constant interval
++ uint64_t bandwidth_cutoff;
++ uint64_t current_bandwidth;
++ time_t next_reset;
++ time_t previous_reset;
++ time_t last_backup_time;
++
++ uint32_t num_intervals_to_save;
++
++
++ unsigned long hashed_id;
++ void* iam;
++ uint64_t* combined_bw;
++ struct ipt_bandwidth_info* non_const_self;
++ unsigned long* ref_count;
++
++
++};
++#endif /*_IPT_BANDWIDTH_H*/
+--- /dev/null 2015-06-18 18:05:56.361705293 +0800
++++ linux.new/include/linux/netfilter_ipv4/ipt_webmon.h 2015-06-19 03:02:55.209676639 +0800
+@@ -0,0 +1,63 @@
++/* webmon -- A netfilter module to match URLs in HTTP requests
++ * This module can match using string match or regular expressions
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2008-2010 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++
++
++
++#ifndef _IPT_WEBMON_H
++#define _IPT_WEBMON_H
++
++
++#define WEBMON_MAX_IPS 256
++#define WEBMON_MAX_IP_RANGES 16
++
++#define WEBMON_EXCLUDE 1
++#define WEBMON_INCLUDE 2
++
++#define WEBMON_MAXDOMAIN 4
++#define WEBMON_MAXSEARCH 8
++
++#define WEBMON_DOMAIN 16
++#define WEBMON_SEARCH 32
++
++
++#define WEBMON_SET 3064
++
++struct ipt_webmon_ip_range
++{
++ uint32_t start;
++ uint32_t end;
++};
++
++struct ipt_webmon_info
++{
++ uint32_t max_domains;
++ uint32_t max_searches;
++ uint32_t exclude_ips[WEBMON_MAX_IPS];
++ struct ipt_webmon_ip_range exclude_ranges[WEBMON_MAX_IP_RANGES];
++ uint32_t num_exclude_ips;
++ uint32_t num_exclude_ranges;
++ unsigned char exclude_type;
++ uint32_t* ref_count;
++
++};
++
++#endif /*_IPT_WEBMON_H*/
Index: package/kernel/linux/modules/netfilter.mk
===================================================================
--- package/kernel/linux/modules/netfilter.mk (revision 46316)
+++ package/kernel/linux/modules/netfilter.mk (working copy)
@@ -368,7 +368,29 @@
$(eval $(call KernelPackage,nf-nathelper-extra))
+define KernelPackage/ipt-imq
+ TITLE:=Intermediate Queueing support
+ KCONFIG:= \
+ CONFIG_IMQ \
+ CONFIG_IMQ_BEHAVIOR_BA=y \
+ CONFIG_IMQ_NUM_DEVS=2 \
+ CONFIG_NETFILTER_XT_TARGET_IMQ
+ FILES:= \
+ $(LINUX_DIR)/drivers/net/imq.$(LINUX_KMOD_SUFFIX) \
+ $(foreach mod,$(IPT_IMQ-m),$(LINUX_DIR)/net/$(mod).$(LINUX_KMOD_SUFFIX))
+ AUTOLOAD:=$(call AutoLoad,46,$(notdir \
+ imq \
+ $(IPT_IMQ-m) \
+ ))
+ $(call AddDepends/ipt)
+endef
+define KernelPackage/ipt-imq/description
+ Kernel support for Intermediate Queueing devices
+endef
+
+$(eval $(call KernelPackage,ipt-imq))
+
define KernelPackage/ipt-ulog
TITLE:=Module for user-space packet logging
KCONFIG:=$(KCONFIG_IPT_ULOG)
@@ -849,3 +871,47 @@
$(eval $(call KernelPackage,nft-nat6))
+
+
+define KernelPackage/ipt-weburl
+ SUBMENU:=$(NF_MENU)
+ TITLE:=weburl
+ KCONFIG:=$(KCONFIG_IPT_WEBURL)
+ FILES:=$(LINUX_DIR)/net/ipv4/netfilter/*weburl*.$(LINUX_KMOD_SUFFIX)
+ AUTOLOAD:=$(call AutoLoad,45,$(notdir $(IPT_WEBURL-m)))
+ DEPENDS:= kmod-ipt-core
+endef
+$(eval $(call KernelPackage,ipt-weburl))
+
+
+define KernelPackage/ipt-webmon
+ SUBMENU:=$(NF_MENU)
+ TITLE:=webmon
+ KCONFIG:=$(KCONFIG_IPT_WEBMON)
+ FILES:=$(LINUX_DIR)/net/ipv4/netfilter/*webmon*.$(LINUX_KMOD_SUFFIX)
+ AUTOLOAD:=$(call AutoLoad,45,$(notdir $(IPT_WEBMON-m)))
+ DEPENDS:= kmod-ipt-core
+endef
+$(eval $(call KernelPackage,ipt-webmon))
+
+
+define KernelPackage/ipt-timerange
+ SUBMENU:=$(NF_MENU)
+ TITLE:=timerange
+ KCONFIG:=$(KCONFIG_IPT_TIMERANGE)
+ FILES:=$(LINUX_DIR)/net/ipv4/netfilter/*timerange*.$(LINUX_KMOD_SUFFIX)
+ AUTOLOAD:=$(call AutoLoad,45,$(notdir $(IPT_TIMERANGE-m)))
+ DEPENDS:= kmod-ipt-core
+endef
+$(eval $(call KernelPackage,ipt-timerange))
+
+
+define KernelPackage/ipt-bandwidth
+ SUBMENU:=$(NF_MENU)
+ TITLE:=bandwidth
+ KCONFIG:=$(KCONFIG_IPT_BANDWIDTH)
+ FILES:=$(LINUX_DIR)/net/ipv4/netfilter/*bandwidth*.$(LINUX_KMOD_SUFFIX)
+ AUTOLOAD:=$(call AutoLoad,45,$(notdir $(IPT_BANDWIDTH-m)))
+ DEPENDS:= kmod-ipt-core
+endef
+$(eval $(call KernelPackage,ipt-bandwidth))
Index: package/network/services/ppp/files/ppp.sh
===================================================================
--- package/network/services/ppp/files/ppp.sh (revision 46316)
+++ package/network/services/ppp/files/ppp.sh (working copy)
@@ -120,6 +120,13 @@
[ -n "$connect" ] || json_get_var connect connect
[ -n "$disconnect" ] || json_get_var disconnect disconnect
+ #By 蝈蝈:并发拨号同步的前期准备
+ [ "$(uci get syncdial.config.enabled)" == "1" ] && {
+ ppp_if_cnt=$(cat /etc/config/network | grep -c "proto 'pppoe'")
+ syncppp_option="syncppp $ppp_if_cnt"
+ shellsync $ppp_if_cnt 10
+ }
+
proto_run_command "$config" /usr/sbin/pppd \
nodetach ipparam "$config" \
ifname "$pppname" \
@@ -137,7 +144,8 @@
ip-down-script /lib/netifd/ppp-down \
ipv6-down-script /lib/netifd/ppp-down \
${mtu:+mtu $mtu mru $mtu} \
- "$@" $pppd_options
+ $syncppp_option \
+ "$@" $pppd_options
}
ppp_generic_teardown() {
Index: package/network/utils/iptables/Makefile
===================================================================
--- package/network/utils/iptables/Makefile (revision 46316)
+++ package/network/utils/iptables/Makefile (working copy)
@@ -128,6 +128,19 @@
endef
+define Package/iptables-mod-imq
+$(call Package/iptables/Module, +kmod-ipt-imq)
+ TITLE:=IMQ support
+endef
+
+define Package/iptables-mod-imq/description
+iptables extension for IMQ support.
+
+ Targets:
+ - IMQ
+
+endef
+
define Package/iptables-mod-ipopt
$(call Package/iptables/Module, +kmod-ipt-ipopt)
TITLE:=IP/Packet option extensions
@@ -546,3 +559,31 @@
$(eval $(call BuildPackage,libip4tc))
$(eval $(call BuildPackage,libip6tc))
$(eval $(call BuildPackage,libxtables))
+$(eval $(call BuildPlugin,iptables-mod-imq,$(IPT_IMQ-m)))
+
+define Package/iptables-mod-weburl
+$(call Package/iptables/Module, +kmod-ipt-weburl)
+ TITLE:=weburl
+endef
+$(eval $(call BuildPlugin,iptables-mod-weburl,$(IPT_WEBURL-m)))
+
+
+define Package/iptables-mod-webmon
+$(call Package/iptables/Module, +kmod-ipt-webmon)
+ TITLE:=webmon
+endef
+$(eval $(call BuildPlugin,iptables-mod-webmon,$(IPT_WEBMON-m)))
+
+
+define Package/iptables-mod-timerange
+$(call Package/iptables/Module, +kmod-ipt-timerange)
+ TITLE:=timerange
+endef
+$(eval $(call BuildPlugin,iptables-mod-timerange,$(IPT_TIMERANGE-m)))
+
+
+define Package/iptables-mod-bandwidth
+$(call Package/iptables/Module, +kmod-ipt-bandwidth)
+ TITLE:=bandwidth
+endef
+$(eval $(call BuildPlugin,iptables-mod-bandwidth,$(IPT_BANDWIDTH-m)))
Index: package/network/utils/iptables/patches/300-imq.patch
===================================================================
--- package/network/utils/iptables/patches/300-imq.patch (revision 0)
+++ package/network/utils/iptables/patches/300-imq.patch (working copy)
@@ -0,0 +1,141 @@
+diff -Naur iptables-1.4.12.1/extensions/libxt_IMQ.c iptables-1.4.12.1-imq/extensions/libxt_IMQ.c
+--- iptables-1.4.12.1/extensions/libxt_IMQ.c 1970-01-01 02:00:00.000000000 +0200
++++ iptables-1.4.12.1-imq/extensions/libxt_IMQ.c 2011-09-30 13:53:21.000000000 +0300
+@@ -0,0 +1,105 @@
++/* Shared library add-on to iptables to add IMQ target support. */
++#include <stdio.h>
++#include <string.h>
++#include <stdlib.h>
++#include <getopt.h>
++
++#include <xtables.h>
++#include <linux/netfilter/x_tables.h>
++#include <linux/netfilter/xt_IMQ.h>
++
++/* Function which prints out usage message. */
++static void IMQ_help(void)
++{
++ printf(
++"IMQ target options:\n"
++" --todev <N> enqueue to imq<N>, defaults to 0\n");
++
++}
++
++static struct option IMQ_opts[] = {
++ { "todev", 1, 0, '1' },
++ { 0 }
++};
++
++/* Initialize the target. */
++static void IMQ_init(struct xt_entry_target *t)
++{
++ struct xt_imq_info *mr = (struct xt_imq_info*)t->data;
++
++ mr->todev = 0;
++}
++
++/* Function which parses command options; returns true if it
++ ate an option */
++static int IMQ_parse(int c, char **argv, int invert, unsigned int *flags,
++ const void *entry, struct xt_entry_target **target)
++{
++ struct xt_imq_info *mr = (struct xt_imq_info*)(*target)->data;
++
++ switch(c) {
++ case '1':
++/* if (xtables_check_inverse(optarg, &invert, NULL, 0, argv))
++ xtables_error(PARAMETER_PROBLEM,
++ "Unexpected `!' after --todev");
++*/
++ mr->todev=atoi(optarg);
++ break;
++
++ default:
++ return 0;
++ }
++ return 1;
++}
++
++/* Prints out the targinfo. */
++static void IMQ_print(const void *ip,
++ const struct xt_entry_target *target,
++ int numeric)
++{
++ struct xt_imq_info *mr = (struct xt_imq_info*)target->data;
++
++ printf("IMQ: todev %u ", mr->todev);
++}
++
++/* Saves the union ipt_targinfo in parsable form to stdout. */
++static void IMQ_save(const void *ip, const struct xt_entry_target *target)
++{
++ struct xt_imq_info *mr = (struct xt_imq_info*)target->data;
++
++ printf(" --todev %u", mr->todev);
++}
++
++static struct xtables_target imq_target = {
++ .name = "IMQ",
++ .version = XTABLES_VERSION,
++ .family = NFPROTO_IPV4,
++ .size = XT_ALIGN(sizeof(struct xt_imq_info)),
++ .userspacesize = XT_ALIGN(sizeof(struct xt_imq_info)),
++ .help = IMQ_help,
++ .init = IMQ_init,
++ .parse = IMQ_parse,
++ .print = IMQ_print,
++ .save = IMQ_save,
++ .extra_opts = IMQ_opts,
++};
++
++static struct xtables_target imq_target6 = {
++ .name = "IMQ",
++ .version = XTABLES_VERSION,
++ .family = NFPROTO_IPV6,
++ .size = XT_ALIGN(sizeof(struct xt_imq_info)),
++ .userspacesize = XT_ALIGN(sizeof(struct xt_imq_info)),
++ .help = IMQ_help,
++ .init = IMQ_init,
++ .parse = IMQ_parse,
++ .print = IMQ_print,
++ .save = IMQ_save,
++ .extra_opts = IMQ_opts,
++};
++
++// void __attribute((constructor)) nf_ext_init(void){
++void _init(void){
++ xtables_register_target(&imq_target);
++ xtables_register_target(&imq_target6);
++}
+diff -Naur iptables-1.4.12.1/extensions/libxt_IMQ.man iptables-1.4.12.1-imq/extensions/libxt_IMQ.man
+--- iptables-1.4.12.1/extensions/libxt_IMQ.man 1970-01-01 02:00:00.000000000 +0200
++++ iptables-1.4.12.1-imq/extensions/libxt_IMQ.man 2011-09-30 13:53:21.000000000 +0300
+@@ -0,0 +1,15 @@
++This target is used to redirect the traffic to the IMQ driver and you can apply
++QoS rules like HTB or CBQ.
++For example you can select only traffic comming from a specific interface or
++is going out on a specific interface.
++Also it permits to capture the traffic BEFORE NAT in the case of outgoing traffic
++or AFTER NAT in the case of incomming traffic.
++.TP
++\fB\-\-to\-dev\fP \fIvalue\fP
++Set the IMQ interface where to send this traffic
++.TP
++Example:
++.TP
++Redirect incomming traffic from interface eth0 to imq0 and outgoing traffic to imq1:
++iptables \-t mangle \-A FORWARD \-i eth0 \-j IMQ \-\-to\-dev 0
++iptables \-t mangle \-A FORWARD \-o eth0 \-j IMQ \-\-to\-dev 1
+diff -Naur iptables-1.4.12.1/include/linux/netfilter/xt_IMQ.h iptables-1.4.12.1-imq/include/linux/netfilter/xt_IMQ.h
+--- iptables-1.4.12.1/include/linux/netfilter/xt_IMQ.h 1970-01-01 02:00:00.000000000 +0200
++++ iptables-1.4.12.1-imq/include/linux/netfilter/xt_IMQ.h 2011-09-30 13:53:21.000000000 +0300
+@@ -0,0 +1,9 @@
++#ifndef _XT_IMQ_H
++#define _XT_IMQ_H
++
++struct xt_imq_info {
++ unsigned int todev; /* target imq device */
++};
++
++#endif /* _XT_IMQ_H */
++
Index: package/network/utils/iptables/patches/650-custom_netfilter_match_modules.patch
===================================================================
--- package/network/utils/iptables/patches/650-custom_netfilter_match_modules.patch (revision 0)
+++ package/network/utils/iptables/patches/650-custom_netfilter_match_modules.patch (working copy)
@@ -0,0 +1,2824 @@
+--- /dev/null 2014-12-20 09:51:07.835224955 +0800
++++ iptables.new/extensions/.weburl-test 2014-12-20 16:55:22.302445944 +0800
+@@ -0,0 +1,2 @@
++#!/bin/sh
++[ -f $KERNEL_DIR/include/linux/netfilter_ipv4/ipt_weburl.h ] && echo weburl
+--- /dev/null 2014-12-20 09:51:07.835224955 +0800
++++ iptables.new/extensions/libipt_bandwidth.c 2014-12-20 16:55:22.839446027 +0800
+@@ -0,0 +1,657 @@
++/* bandwidth -- An iptables extension for bandwidth monitoring/control
++ * Can be used to efficiently monitor bandwidth and/or implement bandwidth quotas
++ * Can be queried using the iptbwctl userspace library
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2009 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++
++#include <stdio.h>
++#include <netdb.h>
++#include <string.h>
++#include <stdlib.h>
++#include <getopt.h>
++#include <unistd.h>
++#include <time.h>
++#include <sys/time.h>
++#include <limits.h>
++
++/*
++ * in iptables 1.4.0 and higher, iptables.h includes xtables.h, which
++ * we can use to check whether we need to deal with the new requirements
++ * in pre-processor directives below
++ */
++#include <iptables.h>
++#include <linux/netfilter_ipv4/ipt_bandwidth.h>
++
++#ifdef _XTABLES_H
++ #define iptables_rule_match xtables_rule_match
++ #define iptables_match xtables_match
++ #define iptables_target xtables_target
++ #define ipt_tryload xt_tryload
++#endif
++
++/*
++ * XTABLES_VERSION_CODE is only defined in versions 1.4.1 and later, which
++ * also require the use of xtables_register_match
++ *
++ * Version 1.4.0 uses register_match like previous versions
++ */
++#ifdef XTABLES_VERSION_CODE
++ #define register_match xtables_register_match
++#endif
++
++
++
++int get_minutes_west(void);
++void set_kernel_timezone(void);
++int parse_sub(char* subnet_string, uint32_t* subnet, uint32_t* subnet_mask);
++static unsigned long get_pow(unsigned long base, unsigned long pow);
++static void param_problem_exit_error(char* msg);
++
++
++/* Function which prints out usage message. */
++static void help(void)
++{
++ printf("bandwidth options:\n");
++ printf(" --id [unique identifier for querying bandwidth]\n");
++ printf(" --type [combined|individual_src|individual_dst|individual_local|individual_remote]\n");
++ printf(" --subnet [a.b.c.d/mask] (0 < mask < 32)\n");
++ printf(" --greater_than [BYTES]\n");
++ printf(" --less_than [BYTES]\n");
++ printf(" --current_bandwidth [BYTES]\n");
++ printf(" --reset_interval [minute|hour|day|week|month]\n");
++ printf(" --reset_time [OFFSET IN SECONDS]\n");
++ printf(" --intervals_to_save [NUMBER OF PREVIOS INTERVALS TO STORE IN MEMORY]\n");
++ printf(" --last_backup_time [UTC SECONDS SINCE 1970]\n");
++ printf(" --check Check another bandwidth rule without incrementing it\n");
++ printf(" --check_with_src_dst_swap Check another bandwidth rule without incrementing it, swapping src & dst ips for check\n");
++}
++
++static struct option opts[] =
++{
++ { .name = "id", .has_arg = 1, .flag = 0, .val = BANDWIDTH_ID },
++ { .name = "type", .has_arg = 1, .flag = 0, .val = BANDWIDTH_TYPE },
++ { .name = "subnet", .has_arg = 1, .flag = 0, .val = BANDWIDTH_SUBNET },
++ { .name = "greater_than", .has_arg = 1, .flag = 0, .val = BANDWIDTH_GT },
++ { .name = "less_than", .has_arg = 1, .flag = 0, .val = BANDWIDTH_LT },
++ { .name = "current_bandwidth", .has_arg = 1, .flag = 0, .val = BANDWIDTH_CURRENT },
++ { .name = "reset_interval", .has_arg = 1, .flag = 0, .val = BANDWIDTH_RESET_INTERVAL },
++ { .name = "reset_time", .has_arg = 1, .flag = 0, .val = BANDWIDTH_RESET_TIME },
++ { .name = "intervals_to_save", .has_arg = 1, .flag = 0, .val = BANDWIDTH_NUM_INTERVALS },
++ { .name = "last_backup_time", .has_arg = 1, .flag = 0, .val = BANDWIDTH_LAST_BACKUP},
++ { .name = "check", .has_arg = 0, .flag = 0, .val = BANDWIDTH_CHECK_NOSWAP },
++ { .name = "check_with_src_dst_swap", .has_arg = 0, .flag = 0, .val = BANDWIDTH_CHECK_SWAP },
++ { .name = 0 }
++};
++
++
++/* Function which parses command options; returns true if it
++ ate an option */
++static int parse( int c,
++ char **argv,
++ int invert,
++ unsigned int *flags,
++#ifdef _XTABLES_H
++ const void *entry,
++#else
++ const struct ipt_entry *entry,
++ unsigned int *nfcache,
++#endif
++ struct ipt_entry_match **match
++ )
++{
++ struct ipt_bandwidth_info *info = (struct ipt_bandwidth_info *)(*match)->data;
++ int valid_arg = 0;
++ long int num_read;
++ uint64_t read_64;
++ time_t read_time;
++
++ /* set defaults first time we get here */
++ if(*flags == 0)
++ {
++ /* generate random id */
++ srand ( time(NULL) );
++ unsigned long id_num = rand();
++ sprintf(info->id, "%lu", id_num);
++
++ info->type = BANDWIDTH_COMBINED;
++ info->check_type = BANDWIDTH_CHECK_NOSWAP;
++ info->local_subnet = 0;
++ info->local_subnet_mask = 0;
++ info->cmp = BANDWIDTH_MONITOR; /* don't test greater/less than, just monitor bandwidth */
++ info->current_bandwidth = 0;
++ info->reset_is_constant_interval = 0;
++ info->reset_interval = BANDWIDTH_NEVER;
++ info->reset_time=0;
++ info->last_backup_time = 0;
++ info->next_reset = 0;
++
++ info->num_intervals_to_save=0;
++
++ info->non_const_self = NULL;
++ info->ref_count = NULL;
++
++ *flags = *flags + BANDWIDTH_INITIALIZED;
++ }
++
++ switch (c)
++ {
++ case BANDWIDTH_ID:
++ if(strlen(optarg) < BANDWIDTH_MAX_ID_LENGTH)
++ {
++ sprintf(info->id, "%s", optarg);
++ valid_arg = 1;
++ }
++ c=0;
++ break;
++ case BANDWIDTH_TYPE:
++ valid_arg = 1;
++ if(strcmp(optarg, "combined") == 0)
++ {
++ info->type = BANDWIDTH_COMBINED;
++ }
++ else if(strcmp(optarg, "individual_src") == 0)
++ {
++ info->type = BANDWIDTH_INDIVIDUAL_SRC;
++ }
++ else if(strcmp(optarg, "individual_dst") == 0)
++ {
++ info->type = BANDWIDTH_INDIVIDUAL_DST;
++ }
++ else if(strcmp(optarg, "individual_local") == 0)
++ {
++ info->type = BANDWIDTH_INDIVIDUAL_LOCAL;
++ *flags = *flags + BANDWIDTH_REQUIRES_SUBNET;
++ }
++ else if(strcmp(optarg, "individual_remote") == 0)
++ {
++ info->type = BANDWIDTH_INDIVIDUAL_REMOTE;
++ *flags = *flags + BANDWIDTH_REQUIRES_SUBNET;
++ }
++ else
++ {
++ valid_arg = 0;
++ }
++
++ c=0;
++ break;
++
++ case BANDWIDTH_SUBNET:
++ valid_arg = parse_sub(optarg, &(info->local_subnet), &(info->local_subnet_mask));
++ break;
++ case BANDWIDTH_LT:
++ num_read = sscanf(argv[optind-1], "%lld", &read_64);
++ if(num_read > 0 && (*flags & BANDWIDTH_CMP) == 0)
++ {
++ info->cmp = BANDWIDTH_LT;
++ info->bandwidth_cutoff = read_64;
++ valid_arg = 1;
++ }
++ c = BANDWIDTH_CMP; //only need one flag for less_than/greater_than
++ break;
++ case BANDWIDTH_GT:
++ num_read = sscanf(argv[optind-1], "%lld", &read_64);
++ if(num_read > 0 && (*flags & BANDWIDTH_CMP) == 0)
++ {
++ info->cmp = BANDWIDTH_GT;
++ info->bandwidth_cutoff = read_64;
++ valid_arg = 1;
++ }
++ c = BANDWIDTH_CMP; //only need one flag for less_than/greater_than
++ break;
++ case BANDWIDTH_CHECK_NOSWAP:
++ if( (*flags & BANDWIDTH_CMP) == 0 )
++ {
++ info->cmp = BANDWIDTH_CHECK;
++ info->check_type = BANDWIDTH_CHECK_NOSWAP;
++ valid_arg = 1;
++ }
++ c = BANDWIDTH_CMP;
++ break;
++ case BANDWIDTH_CHECK_SWAP:
++ if( (*flags & BANDWIDTH_CMP) == 0 )
++ {
++ info->cmp = BANDWIDTH_CHECK;
++ info->check_type = BANDWIDTH_CHECK_SWAP;
++ valid_arg = 1;
++ }
++ c = BANDWIDTH_CMP;
++ break;
++ case BANDWIDTH_CURRENT:
++ num_read = sscanf(argv[optind-1], "%lld", &read_64);
++ if(num_read > 0 )
++ {
++ info->current_bandwidth = read_64;
++ valid_arg = 1;
++ }
++ break;
++ case BANDWIDTH_RESET_INTERVAL:
++ valid_arg = 1;
++ if(strcmp(argv[optind-1],"minute") ==0)
++ {
++ info->reset_interval = BANDWIDTH_MINUTE;
++ info->reset_is_constant_interval = 0;
++ }
++ else if(strcmp(argv[optind-1],"hour") ==0)
++ {
++ info->reset_interval = BANDWIDTH_HOUR;
++ info->reset_is_constant_interval = 0;
++ }
++ else if(strcmp(argv[optind-1],"day") ==0)
++ {
++ info->reset_interval = BANDWIDTH_DAY;
++ info->reset_is_constant_interval = 0;
++ }
++ else if(strcmp(argv[optind-1],"week") ==0)
++ {
++ info->reset_interval = BANDWIDTH_WEEK;
++ info->reset_is_constant_interval = 0;
++ }
++ else if(strcmp(argv[optind-1],"month") ==0)
++ {
++ info->reset_interval = BANDWIDTH_MONTH;
++ info->reset_is_constant_interval = 0;
++ }
++ else if(strcmp(argv[optind-1],"never") ==0)
++ {
++ info->reset_interval = BANDWIDTH_NEVER;
++ }
++ else if(sscanf(argv[optind-1], "%ld", &read_time) > 0)
++ {
++ info->reset_interval = read_time;
++ info->reset_is_constant_interval = 1;
++ }
++ else
++ {
++ valid_arg = 0;
++ }
++ break;
++ case BANDWIDTH_NUM_INTERVALS:
++ if( sscanf(argv[optind-1], "%ld", &num_read) > 0)
++ {
++ info->num_intervals_to_save = num_read;
++ valid_arg=1;
++ }
++ c=0;
++ break;
++ case BANDWIDTH_RESET_TIME:
++ num_read = sscanf(argv[optind-1], "%ld", &read_time);
++ if(num_read > 0 )
++ {
++ info->reset_time = read_time;
++ valid_arg = 1;
++ }
++ break;
++ case BANDWIDTH_LAST_BACKUP:
++ num_read = sscanf(argv[optind-1], "%ld", &read_time);
++ if(num_read > 0 )
++ {
++ info->last_backup_time = read_time;
++ valid_arg = 1;
++ }
++ break;
++ }
++ *flags = *flags + (unsigned int)c;
++
++
++ //if we have both reset_interval & reset_time, check reset_time is in valid range
++ if((*flags & BANDWIDTH_RESET_TIME) == BANDWIDTH_RESET_TIME && (*flags & BANDWIDTH_RESET_INTERVAL) == BANDWIDTH_RESET_INTERVAL)
++ {
++ if( (info->reset_interval == BANDWIDTH_NEVER) ||
++ (info->reset_interval == BANDWIDTH_MONTH && info->reset_time >= 60*60*24*28) ||
++ (info->reset_interval == BANDWIDTH_WEEK && info->reset_time >= 60*60*24*7) ||
++ (info->reset_interval == BANDWIDTH_DAY && info->reset_time >= 60*60*24) ||
++ (info->reset_interval == BANDWIDTH_HOUR && info->reset_time >= 60*60) ||
++ (info->reset_interval == BANDWIDTH_MINUTE && info->reset_time >= 60)
++ )
++ {
++ valid_arg = 0;
++ param_problem_exit_error("Parameter for '--reset_time' is not in valid range");
++ }
++ }
++ if(info->type != BANDWIDTH_COMBINED && (*flags & BANDWIDTH_CURRENT) == BANDWIDTH_CURRENT)
++ {
++ valid_arg = 0;
++ param_problem_exit_error("You may only specify current bandwidth for combined type\n Use user-space library for setting bandwidth for individual types");
++ }
++
++ return valid_arg;
++}
++
++
++
++static void print_bandwidth_args( struct ipt_bandwidth_info* info )
++{
++ if(info->cmp == BANDWIDTH_CHECK)
++ {
++ if(info->check_type == BANDWIDTH_CHECK_NOSWAP)
++ {
++ printf("--check ");
++ }
++ else
++ {
++ printf("--check_with_src_dst_swap ");
++ }
++ }
++ printf("--id %s ", info->id);
++
++
++
++ if(info->cmp != BANDWIDTH_CHECK)
++ {
++ /* determine current time in seconds since epoch, with offset for current timezone */
++ int minuteswest = get_minutes_west();
++ time_t now;
++ time(&now);
++ now = now - (minuteswest*60);
++
++ if(info->type == BANDWIDTH_COMBINED)
++ {
++ printf("--type combined ");
++ }
++ if(info->type == BANDWIDTH_INDIVIDUAL_SRC)
++ {
++ printf("--type individual_src ");
++ }
++ if(info->type == BANDWIDTH_INDIVIDUAL_DST)
++ {
++ printf("--type individual_dst ");
++ }
++ if(info->type == BANDWIDTH_INDIVIDUAL_LOCAL)
++ {
++ printf("--type individual_local ");
++ }
++ if(info->type == BANDWIDTH_INDIVIDUAL_REMOTE)
++ {
++ printf("--type individual_remote ");
++ }
++
++
++ if(info->local_subnet != 0)
++ {
++ unsigned char* sub = (unsigned char*)(&(info->local_subnet));
++ int msk_bits=0;
++ int pow=0;
++ for(pow=0; pow<32; pow++)
++ {
++ uint32_t test = get_pow(2, pow);
++ msk_bits = ( (info->local_subnet_mask & test) == test) ? msk_bits+1 : msk_bits;
++ }
++ printf("--subnet %u.%u.%u.%u/%u ", (unsigned char)sub[0], (unsigned char)sub[1], (unsigned char)sub[2], (unsigned char)sub[3], msk_bits);
++ }
++ if(info->cmp == BANDWIDTH_GT)
++ {
++ printf("--greater_than %lld ", info->bandwidth_cutoff);
++ }
++ if(info->cmp == BANDWIDTH_LT)
++ {
++ printf("--less_than %lld ", info->bandwidth_cutoff);
++ }
++ if (info->type == BANDWIDTH_COMBINED) /* too much data to print for multi types, have to use socket to get/set data */
++ {
++ if( info->reset_interval != BANDWIDTH_NEVER && info->next_reset != 0 && info->next_reset < now)
++ {
++ /*
++ * current bandwidth only gets reset when first packet after reset interval arrives, so output
++ * zero if we're already past interval, but no packets have arrived
++ */
++ printf("--current_bandwidth 0 ");
++ }
++ else
++ {
++ printf("--current_bandwidth %lld ", info->current_bandwidth);
++ }
++ }
++ if(info->reset_is_constant_interval)
++ {
++ printf("--reset_interval %ld ", info->reset_interval);
++ }
++ else
++ {
++ if(info->reset_interval == BANDWIDTH_MINUTE)
++ {
++ printf("--reset_interval minute ");
++ }
++ else if(info->reset_interval == BANDWIDTH_HOUR)
++ {
++ printf("--reset_interval hour ");
++ }
++ else if(info->reset_interval == BANDWIDTH_DAY)
++ {
++ printf("--reset_interval day ");
++ }
++ else if(info->reset_interval == BANDWIDTH_WEEK)
++ {
++ printf("--reset_interval week ");
++ }
++ else if(info->reset_interval == BANDWIDTH_MONTH)
++ {
++ printf("--reset_interval month ");
++ }
++ }
++ if(info->reset_time > 0)
++ {
++ printf("--reset_time %ld ", info->reset_time);
++ }
++ if(info->num_intervals_to_save > 0)
++ {
++ printf("--intervals_to_save %d ", info->num_intervals_to_save);
++ }
++ }
++}
++
++/*
++ * Final check, we can't have reset_time without reset_interval
++ */
++static void final_check(unsigned int flags)
++{
++ if (flags == 0)
++ {
++ param_problem_exit_error("You must specify at least one argument. ");
++ }
++ if( (flags & BANDWIDTH_RESET_INTERVAL) == 0 && (flags & BANDWIDTH_RESET_TIME) != 0)
++ {
++ param_problem_exit_error("You may not specify '--reset_time' without '--reset_interval' ");
++ }
++ if( (flags & BANDWIDTH_REQUIRES_SUBNET) == BANDWIDTH_REQUIRES_SUBNET && (flags & BANDWIDTH_SUBNET) == 0 )
++ {
++ param_problem_exit_error("You must specify a local subnet (--subnet a.b.c.d/mask) to match individual local/remote IPs ");
++ }
++
++ /* update timezone minutes_west in kernel to match userspace*/
++ set_kernel_timezone();
++}
++
++/* Prints out the matchinfo. */
++#ifdef _XTABLES_H
++static void print(const void *ip, const struct xt_entry_match *match, int numeric)
++#else
++static void print(const struct ipt_ip *ip, const struct ipt_entry_match *match, int numeric)
++#endif
++{
++ printf("bandwidth ");
++ struct ipt_bandwidth_info *info = (struct ipt_bandwidth_info *)match->data;
++
++ print_bandwidth_args(info);
++}
++
++/* Saves the union ipt_matchinfo in parsable form to stdout. */
++#ifdef _XTABLES_H
++static void save(const void *ip, const struct xt_entry_match *match)
++#else
++static void save(const struct ipt_ip *ip, const struct ipt_entry_match *match)
++#endif
++{
++ struct ipt_bandwidth_info *info = (struct ipt_bandwidth_info *)match->data;
++ time_t now;
++
++ print_bandwidth_args(info);
++
++ time(&now);
++ printf("--last_backup-time %ld ", now);
++}
++
++static struct iptables_match bandwidth =
++{
++ .next = NULL,
++ .name = "bandwidth",
++ #ifdef XTABLES_VERSION_CODE
++ .version = XTABLES_VERSION,
++ #else
++ .version = IPTABLES_VERSION,
++ #endif
++ .size = XT_ALIGN(sizeof(struct ipt_bandwidth_info)),
++ .userspacesize = XT_ALIGN(sizeof(struct ipt_bandwidth_info)),
++ .help = &help,
++ .parse = &parse,
++ .final_check = &final_check,
++ .print = &print,
++ .save = &save,
++ .extra_opts = opts
++};
++
++void _init(void)
++{
++ register_match(&bandwidth);
++}
++
++static void param_problem_exit_error(char* msg)
++{
++ #ifdef xtables_error
++ xtables_error(PARAMETER_PROBLEM, msg);
++ #else
++ exit_error(PARAMETER_PROBLEM, msg);
++ #endif
++}
++
++/*
++ * implement a simple function to get positive powers of positive integers so we don't have to mess with math.h
++ * all we really need are powers of 2 for calculating netmask
++ * This is only called a couple of times, so speed isn't an issue either
++ */
++static unsigned long get_pow(unsigned long base, unsigned long pow)
++{
++ unsigned long ret = pow == 0 ? 1 : base*get_pow(base, pow-1);
++ return ret;
++}
++
++
++int parse_sub(char* subnet_string, uint32_t* subnet, uint32_t* subnet_mask)
++{
++
++ int valid = 0;
++ unsigned int A,B,C,D,E,F,G,H;
++ int read_int = sscanf(subnet_string, "%u.%u.%u.%u/%u.%u.%u.%u", &A, &B, &C, &D, &E, &F, &G, &H);
++ if(read_int >= 5)
++ {
++ if( A <= 255 && B <= 255 && C <= 255 && D <= 255)
++ {
++ unsigned char* sub = (unsigned char*)(subnet);
++ unsigned char* msk = (unsigned char*)(subnet_mask);
++
++ *( sub ) = (unsigned char)A;
++ *( sub + 1 ) = (unsigned char)B;
++ *( sub + 2 ) = (unsigned char)C;
++ *( sub + 3 ) = (unsigned char)D;
++
++ if(read_int == 5)
++ {
++ unsigned int mask = E;
++ if(mask <= 32)
++ {
++ int msk_index;
++ for(msk_index=0; msk_index*8 < mask; msk_index++)
++ {
++ int bit_index;
++ msk[msk_index] = 0;
++ for(bit_index=0; msk_index*8 + bit_index < mask && bit_index < 8; bit_index++)
++ {
++ msk[msk_index] = msk[msk_index] + get_pow(2, 7-bit_index);
++ }
++ }
++ }
++ valid = 1;
++ }
++ if(read_int == 8)
++ {
++ if( E <= 255 && F <= 255 && G <= 255 && H <= 255)
++ *( msk ) = (unsigned char)E;
++ *( msk + 1 ) = (unsigned char)F;
++ *( msk + 2 ) = (unsigned char)G;
++ *( msk + 3 ) = (unsigned char)H;
++ valid = 1;
++ }
++ }
++ }
++ if(valid)
++ {
++ *subnet = (*subnet & *subnet_mask );
++ }
++ return valid;
++}
++
++
++
++int get_minutes_west(void)
++{
++ time_t now;
++ struct tm* utc_info;
++ struct tm* tz_info;
++ int utc_day;
++ int utc_hour;
++ int utc_minute;
++ int tz_day;
++ int tz_hour;
++ int tz_minute;
++ int minuteswest;
++
++ time(&now);
++ utc_info = gmtime(&now);
++ utc_day = utc_info->tm_mday;
++ utc_hour = utc_info->tm_hour;
++ utc_minute = utc_info->tm_min;
++ tz_info = localtime(&now);
++ tz_day = tz_info->tm_mday;
++ tz_hour = tz_info->tm_hour;
++ tz_minute = tz_info->tm_min;
++
++ utc_day = utc_day < tz_day - 1 ? tz_day + 1 : utc_day;
++ tz_day = tz_day < utc_day - 1 ? utc_day + 1 : tz_day;
++
++ minuteswest = (24*60*utc_day + 60*utc_hour + utc_minute) - (24*60*tz_day + 60*tz_hour + tz_minute) ;
++
++ return minuteswest;
++}
++
++void set_kernel_timezone(void)
++{
++ struct timeval tv;
++ struct timezone old_tz;
++ struct timezone new_tz;
++
++ new_tz.tz_minuteswest = get_minutes_west();;
++ new_tz.tz_dsttime = 0;
++
++ /* Get tv to pass to settimeofday(2) to be sure we avoid hour-sized warp */
++ /* (see gettimeofday(2) man page, or /usr/src/linux/kernel/time.c) */
++ gettimeofday(&tv, &old_tz);
++
++ /* set timezone */
++ settimeofday(&tv, &new_tz);
++}
+--- /dev/null 2014-12-20 09:51:07.835224955 +0800
++++ iptables.new/extensions/libipt_webmon.c 2014-12-20 16:55:22.562445985 +0800
+@@ -0,0 +1,700 @@
++/* webmon -- An iptables extension to match URLs in HTTP requests
++ * This module can match using string match or regular expressions
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2008-2011 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++
++#include <stdio.h>
++#include <netdb.h>
++#include <string.h>
++#include <stdlib.h>
++#include <getopt.h>
++
++#include <arpa/inet.h>
++
++/*
++ * in iptables 1.4.0 and higher, iptables.h includes xtables.h, which
++ * we can use to check whether we need to deal with the new requirements
++ * in pre-processor directives below
++ */
++#include <iptables.h>
++#include <linux/netfilter_ipv4/ipt_webmon.h>
++
++#ifdef _XTABLES_H
++ #define iptables_rule_match xtables_rule_match
++ #define iptables_match xtables_match
++ #define iptables_target xtables_target
++ #define ipt_tryload xt_tryload
++#endif
++
++/*
++ * XTABLES_VERSION_CODE is only defined in versions 1.4.1 and later, which
++ * also require the use of xtables_register_match
++ *
++ * Version 1.4.0 uses register_match like previous versions
++ */
++#ifdef XTABLES_VERSION_CODE
++ #define register_match xtables_register_match
++#endif
++
++
++#define STRIP "%d.%d.%d.%d"
++#define NIPQUAD(addr) \
++ ((unsigned char *)&addr)[0], \
++ ((unsigned char *)&addr)[1], \
++ ((unsigned char *)&addr)[2], \
++ ((unsigned char *)&addr)[3]
++
++
++
++/* utility functions necessary for module to work across multiple iptables versions */
++static void param_problem_exit_error(char* msg);
++
++
++void parse_ips_and_ranges(char* addr_str, struct ipt_webmon_info *info);
++
++char** split_on_separators(char* line, char* separators, int num_separators, int max_pieces, int include_remainder_at_max);
++char* trim_flanking_whitespace(char* str);
++unsigned char* read_entire_file(FILE* in, unsigned long read_block_size, unsigned long *length);
++
++#define DEFAULT_MAX 300
++
++#define SEARCH_LOAD_FILE 100
++#define DOMAIN_LOAD_FILE 101
++#define CLEAR_SEARCH 102
++#define CLEAR_DOMAIN 103
++
++static char* domain_load_file = NULL;
++static char* search_load_file = NULL;
++static uint32_t global_max_domains = DEFAULT_MAX;
++static uint32_t global_max_searches = DEFAULT_MAX;
++
++/* Function which prints out usage message. */
++static void help(void)
++{
++ printf( "webmon options:\n");
++}
++
++static struct option opts[] =
++{
++ { .name = "exclude_ips", .has_arg = 1, .flag = 0, .val = WEBMON_EXCLUDE },
++ { .name = "include_ips", .has_arg = 1, .flag = 0, .val = WEBMON_INCLUDE },
++ { .name = "max_domains", .has_arg = 1, .flag = 0, .val = WEBMON_MAXDOMAIN },
++ { .name = "max_searches", .has_arg = 1, .flag = 0, .val = WEBMON_MAXSEARCH },
++ { .name = "search_load_file", .has_arg = 1, .flag = 0, .val = SEARCH_LOAD_FILE },
++ { .name = "domain_load_file", .has_arg = 1, .flag = 0, .val = DOMAIN_LOAD_FILE },
++ { .name = "clear_search", .has_arg = 0, .flag = 0, .val = CLEAR_SEARCH },
++ { .name = "clear_domain", .has_arg = 0, .flag = 0, .val = CLEAR_DOMAIN },
++
++ { .name = 0 }
++};
++
++static void webmon_init(
++#ifdef _XTABLES_H
++ struct xt_entry_match *match
++#else
++ struct ipt_entry_match *match, unsigned int *nfcache
++#endif
++ )
++{
++ struct ipt_webmon_info *info = (struct ipt_webmon_info *)match->data;
++ info->max_domains=DEFAULT_MAX;
++ info->max_searches=DEFAULT_MAX;
++ info->num_exclude_ips=0;
++ info->num_exclude_ranges=0;
++ info->exclude_type = WEBMON_EXCLUDE;
++ info->ref_count = NULL;
++}
++
++
++/* Function which parses command options; returns true if it ate an option */
++static int parse( int c,
++ char **argv,
++ int invert,
++ unsigned int *flags,
++#ifdef _XTABLES_H
++ const void *entry,
++#else
++ const struct ipt_entry *entry,
++ unsigned int *nfcache,
++#endif
++ struct ipt_entry_match **match
++ )
++{
++ struct ipt_webmon_info *info = (struct ipt_webmon_info *)(*match)->data;
++ int valid_arg = 1;
++ long max;
++ switch (c)
++ {
++ case WEBMON_EXCLUDE:
++ parse_ips_and_ranges(optarg, info);
++ info->exclude_type = WEBMON_EXCLUDE;
++ break;
++ case WEBMON_INCLUDE:
++ parse_ips_and_ranges(optarg, info);
++ info->exclude_type = WEBMON_INCLUDE;
++ break;
++ case WEBMON_MAXSEARCH:
++ if( sscanf(argv[optind-1], "%ld", &max) == 0)
++ {
++ info->max_searches = DEFAULT_MAX ;
++ valid_arg = 0;
++ }
++ else
++ {
++ info->max_searches = (uint32_t)max;
++ global_max_searches = info->max_searches;
++ }
++ break;
++ case WEBMON_MAXDOMAIN:
++ if( sscanf(argv[optind-1], "%ld", &max) == 0)
++ {
++ info->max_domains = DEFAULT_MAX ;
++ valid_arg = 0;
++ }
++ else
++ {
++ info->max_domains = (uint32_t)max;
++ global_max_domains = info->max_domains;
++ }
++ break;
++ case SEARCH_LOAD_FILE:
++ search_load_file = strdup(optarg);
++ break;
++ case DOMAIN_LOAD_FILE:
++ domain_load_file = strdup(optarg);
++ break;
++ case CLEAR_SEARCH:
++ search_load_file = strdup("/dev/null");
++ break;
++ case CLEAR_DOMAIN:
++ domain_load_file = strdup("/dev/null");
++ break;
++ default:
++ valid_arg = 0;
++ }
++ return valid_arg;
++
++}
++
++
++
++static void print_webmon_args( struct ipt_webmon_info* info )
++{
++ printf("--max_domains %ld ", (unsigned long int)info->max_domains);
++ printf("--max_searches %ld ", (unsigned long int)info->max_searches);
++ if(info->num_exclude_ips > 0 || info->num_exclude_ranges > 0)
++ {
++ int ip_index = 0;
++ char comma[3] = "";
++ printf("--%s ", (info->exclude_type == WEBMON_EXCLUDE ? "exclude_ips" : "include_ips"));
++ for(ip_index=0; ip_index < info->num_exclude_ips; ip_index++)
++ {
++ printf("%s"STRIP, comma, NIPQUAD((info->exclude_ips)[ip_index]) );
++ sprintf(comma, ",");
++ }
++ for(ip_index=0; ip_index < info->num_exclude_ranges; ip_index++)
++ {
++ struct ipt_webmon_ip_range r = (info->exclude_ranges)[ip_index];
++ printf("%s"STRIP"-"STRIP, comma, NIPQUAD(r.start), NIPQUAD(r.end) );
++ sprintf(comma, ",");
++ }
++ printf(" ");
++ }
++}
++
++
++static void do_load(char* file, uint32_t max, unsigned char type)
++{
++ if(file != NULL)
++ {
++ unsigned char* data = NULL;
++ unsigned long data_length = 0;
++ char* file_data = NULL;
++ if(strcmp(file, "/dev/null") != 0)
++ {
++ FILE* in = fopen(file, "r");
++ if(in != NULL)
++ {
++ file_data = (char*)read_entire_file(in, 4096, &data_length);
++ fclose(in);
++ }
++ }
++ if(file_data == NULL)
++ {
++ file_data=strdup("");
++ }
++
++ if(file_data != NULL)
++ {
++ data_length = strlen(file_data) + sizeof(uint32_t)+2;
++ data = (unsigned char*)malloc(data_length);
++ if(data != NULL)
++ {
++ int sockfd = -1;
++ uint32_t* maxp = (uint32_t*)(data+1);
++ data[0] = type;
++ *maxp = max;
++ sprintf( (data+1+sizeof(uint32_t)), "%s", file_data);
++
++ sockfd = socket(AF_INET, SOCK_RAW, IPPROTO_RAW);
++ if(sockfd >= 0)
++ {
++ setsockopt(sockfd, IPPROTO_IP, WEBMON_SET, data, data_length);
++ close(sockfd);
++ }
++ free(data);
++ }
++ free(file_data);
++ }
++ }
++
++}
++
++
++static void final_check(unsigned int flags)
++{
++ do_load(domain_load_file, global_max_domains, WEBMON_DOMAIN);
++ do_load(search_load_file, global_max_searches, WEBMON_SEARCH);
++}
++
++/* Prints out the matchinfo. */
++#ifdef _XTABLES_H
++static void print(const void *ip, const struct xt_entry_match *match, int numeric)
++#else
++static void print(const struct ipt_ip *ip, const struct ipt_entry_match *match, int numeric)
++#endif
++{
++ printf("WEBMON ");
++ struct ipt_webmon_info *info = (struct ipt_webmon_info *)match->data;
++
++ print_webmon_args(info);
++}
++
++/* Saves the union ipt_matchinfo in parsable form to stdout. */
++#ifdef _XTABLES_H
++static void save(const void *ip, const struct xt_entry_match *match)
++#else
++static void save(const struct ipt_ip *ip, const struct ipt_entry_match *match)
++#endif
++{
++ struct ipt_webmon_info *info = (struct ipt_webmon_info *)match->data;
++ print_webmon_args(info);
++}
++
++static struct iptables_match webmon =
++{
++ .next = NULL,
++ .name = "webmon",
++ #ifdef XTABLES_VERSION_CODE
++ .version = XTABLES_VERSION,
++ #else
++ .version = IPTABLES_VERSION,
++ #endif
++ .size = XT_ALIGN(sizeof(struct ipt_webmon_info)),
++ .userspacesize = XT_ALIGN(sizeof(struct ipt_webmon_info)),
++ .help = &help,
++ .init = &webmon_init,
++ .parse = &parse,
++ .final_check = &final_check,
++ .print = &print,
++ .save = &save,
++ .extra_opts = opts
++};
++
++void _init(void)
++{
++ register_match(&webmon);
++}
++
++
++#ifndef TRUE
++#define TRUE 1
++#endif
++#ifndef FALSE
++#define FALSE 0
++#endif
++
++
++
++
++
++
++
++static void param_problem_exit_error(char* msg)
++{
++ #ifdef xtables_error
++ xtables_error(PARAMETER_PROBLEM, msg);
++ #else
++ exit_error(PARAMETER_PROBLEM, msg);
++ #endif
++}
++
++
++void parse_ips_and_ranges(char* addr_str, struct ipt_webmon_info *info)
++{
++ char** addr_parts = split_on_separators(addr_str, ",", 1, -1, 0);
++
++ info->num_exclude_ips=0;
++ info->num_exclude_ranges = 0;
++
++ int ip_part_index;
++ for(ip_part_index=0; addr_parts[ip_part_index] != NULL; ip_part_index++)
++ {
++ char* next_str = addr_parts[ip_part_index];
++ if(strchr(next_str, '-') != NULL)
++ {
++ char** range_parts = split_on_separators(next_str, "-", 1, 2, 1);
++ char* start = trim_flanking_whitespace(range_parts[0]);
++ char* end = trim_flanking_whitespace(range_parts[1]);
++ int start_ip[4];
++ int end_ip[4];
++ int start_valid = sscanf(start, "%d.%d.%d.%d", start_ip, start_ip+1, start_ip+2, start_ip+3);
++ int end_valid = sscanf(end, "%d.%d.%d.%d", end_ip, end_ip+1, end_ip+2, end_ip+3);
++
++ if(start_valid == 4 && end_valid == 4)
++ {
++ struct ipt_webmon_ip_range r;
++ struct in_addr sip, eip;
++ inet_pton(AF_INET, start, &sip);
++ inet_pton(AF_INET, end, &eip);
++ r.start = (uint32_t)sip.s_addr;
++ r.end = (uint32_t)eip.s_addr;
++
++ if(info->num_exclude_ranges < WEBMON_MAX_IP_RANGES && (unsigned long)ntohl(r.start) < (unsigned long)ntohl(r.end) )
++ {
++ (info->exclude_ranges)[ info->num_exclude_ranges ] = r;
++ info->num_exclude_ranges = info->num_exclude_ranges + 1;
++ }
++ }
++
++ free(start);
++ free(end);
++ free(range_parts);
++ }
++ else if(strchr(next_str, '/') != NULL)
++ {
++ char** range_parts = split_on_separators(next_str, "/", 1, 2, 1);
++ char* start = trim_flanking_whitespace(range_parts[0]);
++ char* end = trim_flanking_whitespace(range_parts[1]);
++ int base_ip[4];
++ int base_valid = sscanf(start, "%d.%d.%d.%d", base_ip, base_ip+1, base_ip+2, base_ip+3);
++ if(base_valid == 4)
++ {
++ int mask_valid = 0;
++ uint32_t mask;
++ if(strchr(end, '.') != NULL)
++ {
++ uint32_t mask_ip[4];
++ int mask_test = sscanf(end, "%d.%d.%d.%d", mask_ip, mask_ip+1, mask_ip+2, mask_ip+3);
++ if(mask_test == 4)
++ {
++ struct in_addr mask_add;
++ inet_pton(AF_INET, end, &mask_add);
++ mask = (uint32_t)mask_add.s_addr;
++ mask_valid = 1;
++ }
++ }
++ else
++ {
++ int mask_bits;
++ if( sscanf(end, "%d", &mask_bits) > 0)
++ {
++ if(mask_bits >=0 && mask_bits <= 32)
++ {
++ uint32_t byte = 0;
++ mask = 0;
++ for(byte=0; byte < 4; byte++)
++ {
++ unsigned char byte_bits = mask_bits > 8 ? 8 : mask_bits;
++ uint32_t byte_mask = 0;
++ mask_bits = mask_bits - byte_bits;
++
++ while(byte_bits > 0)
++ {
++ byte_mask = byte_mask | (256 >> byte_bits);
++ byte_bits--;
++ }
++ mask = mask | ((uint32_t)byte_mask << (byte*8));
++ printf("mask = "STRIP"\n", NIPQUAD(mask));
++ }
++ mask_valid = 1;
++ }
++ }
++ }
++ if(mask_valid)
++ {
++ struct ipt_webmon_ip_range r;
++ struct in_addr bip;
++ inet_pton(AF_INET, start, &bip);
++ r.start = ( ((uint32_t)bip.s_addr) & mask );
++ r.end = ( ((uint32_t)bip.s_addr) | (~mask) );
++ if(info->num_exclude_ranges < WEBMON_MAX_IP_RANGES && ntohl(r.start) <= ntohl(r.end) )
++ {
++ (info->exclude_ranges)[ info->num_exclude_ranges ] = r;
++ info->num_exclude_ranges = info->num_exclude_ranges + 1;
++ }
++ }
++ }
++ free(start);
++ free(end);
++ free(range_parts);
++ }
++ else
++ {
++ int parsed_ip[4];
++ int valid = sscanf(next_str, "%d.%d.%d.%d", parsed_ip, parsed_ip+1, parsed_ip+2, parsed_ip+3);
++ if(valid == 4)
++ {
++ struct in_addr ip;
++ trim_flanking_whitespace(next_str);
++ inet_pton(AF_INET, next_str, &ip);
++
++ if(info->num_exclude_ranges < WEBMON_MAX_IPS)
++ {
++ (info->exclude_ips)[ info->num_exclude_ips ] = (uint32_t)ip.s_addr;
++ info->num_exclude_ips = info->num_exclude_ips + 1;
++ }
++ }
++ }
++ free(next_str);
++ }
++ free(addr_parts);
++
++}
++
++
++
++/*
++ * line_str is the line to be parsed -- it is not modified in any way
++ * max_pieces indicates number of pieces to return, if negative this is determined dynamically
++ * include_remainder_at_max indicates whether the last piece, when max pieces are reached,
++ * should be what it would normally be (0) or the entire remainder of the line (1)
++ * if max_pieces < 0 this parameter is ignored
++ *
++ *
++ * returns all non-separator pieces in a line
++ * result is dynamically allocated, MUST be freed after call-- even if
++ * line is empty (you still get a valid char** pointer to to a NULL char*)
++ */
++char** split_on_separators(char* line_str, char* separators, int num_separators, int max_pieces, int include_remainder_at_max)
++{
++ char** split;
++
++ if(line_str != NULL)
++ {
++ int split_index;
++ int non_separator_found;
++ char* dup_line;
++ char* start;
++
++ if(max_pieces < 0)
++ {
++ /* count number of separator characters in line -- this count + 1 is an upperbound on number of pieces */
++ int separator_count = 0;
++ int line_index;
++ for(line_index = 0; line_str[line_index] != '\0'; line_index++)
++ {
++ int sep_index;
++ int found = 0;
++ for(sep_index =0; found == 0 && sep_index < num_separators; sep_index++)
++ {
++ found = separators[sep_index] == line_str[line_index] ? 1 : 0;
++ }
++ separator_count = separator_count+ found;
++ }
++ max_pieces = separator_count + 1;
++ }
++ split = (char**)malloc((1+max_pieces)*sizeof(char*));
++ split_index = 0;
++ split[split_index] = NULL;
++
++
++ dup_line = strdup(line_str);
++ start = dup_line;
++ non_separator_found = 0;
++ while(non_separator_found == 0)
++ {
++ int matches = 0;
++ int sep_index;
++ for(sep_index =0; sep_index < num_separators; sep_index++)
++ {
++ matches = matches == 1 || separators[sep_index] == start[0] ? 1 : 0;
++ }
++ non_separator_found = matches==0 || start[0] == '\0' ? 1 : 0;
++ if(non_separator_found == 0)
++ {
++ start++;
++ }
++ }
++
++ while(start[0] != '\0' && split_index < max_pieces)
++ {
++ /* find first separator index */
++ int first_separator_index = 0;
++ int separator_found = 0;
++ while( separator_found == 0 )
++ {
++ int sep_index;
++ for(sep_index =0; separator_found == 0 && sep_index < num_separators; sep_index++)
++ {
++ separator_found = separators[sep_index] == start[first_separator_index] || start[first_separator_index] == '\0' ? 1 : 0;
++ }
++ if(separator_found == 0)
++ {
++ first_separator_index++;
++ }
++ }
++
++ /* copy next piece to split array */
++ if(first_separator_index > 0)
++ {
++ char* next_piece = NULL;
++ if(split_index +1 < max_pieces || include_remainder_at_max <= 0)
++ {
++ next_piece = (char*)malloc((first_separator_index+1)*sizeof(char));
++ memcpy(next_piece, start, first_separator_index);
++ next_piece[first_separator_index] = '\0';
++ }
++ else
++ {
++ next_piece = strdup(start);
++ }
++ split[split_index] = next_piece;
++ split[split_index+1] = NULL;
++ split_index++;
++ }
++
++
++ /* find next non-separator index, indicating start of next piece */
++ start = start+ first_separator_index;
++ non_separator_found = 0;
++ while(non_separator_found == 0)
++ {
++ int matches = 0;
++ int sep_index;
++ for(sep_index =0; sep_index < num_separators; sep_index++)
++ {
++ matches = matches == 1 || separators[sep_index] == start[0] ? 1 : 0;
++ }
++ non_separator_found = matches==0 || start[0] == '\0' ? 1 : 0;
++ if(non_separator_found == 0)
++ {
++ start++;
++ }
++ }
++ }
++ free(dup_line);
++ }
++ else
++ {
++ split = (char**)malloc((1)*sizeof(char*));
++ split[0] = NULL;
++ }
++ return split;
++}
++
++
++
++char* trim_flanking_whitespace(char* str)
++{
++ int new_start = 0;
++ int new_length = 0;
++
++ char whitespace[5] = { ' ', '\t', '\n', '\r', '\0' };
++ int num_whitespace_chars = 4;
++
++
++ int str_index = 0;
++ int is_whitespace = 1;
++ int test;
++ while( (test = str[str_index]) != '\0' && is_whitespace == 1)
++ {
++ int whitespace_index;
++ is_whitespace = 0;
++ for(whitespace_index = 0; whitespace_index < num_whitespace_chars && is_whitespace == 0; whitespace_index++)
++ {
++ is_whitespace = test == whitespace[whitespace_index] ? 1 : 0;
++ }
++ str_index = is_whitespace == 1 ? str_index+1 : str_index;
++ }
++ new_start = str_index;
++
++
++ str_index = strlen(str) - 1;
++ is_whitespace = 1;
++ while( str_index >= new_start && is_whitespace == 1)
++ {
++ int whitespace_index;
++ is_whitespace = 0;
++ for(whitespace_index = 0; whitespace_index < num_whitespace_chars && is_whitespace == 0; whitespace_index++)
++ {
++ is_whitespace = str[str_index] == whitespace[whitespace_index] ? 1 : 0;
++ }
++ str_index = is_whitespace == 1 ? str_index-1 : str_index;
++ }
++ new_length = str[new_start] == '\0' ? 0 : str_index + 1 - new_start;
++
++
++ if(new_start > 0)
++ {
++ for(str_index = 0; str_index < new_length; str_index++)
++ {
++ str[str_index] = str[str_index+new_start];
++ }
++ }
++ str[new_length] = 0;
++ return str;
++}
++
++
++unsigned char* read_entire_file(FILE* in, unsigned long read_block_size, unsigned long *length)
++{
++ int max_read_size = read_block_size;
++ unsigned char* read_string = (unsigned char*)malloc(max_read_size+1);
++ unsigned long bytes_read = 0;
++ int end_found = 0;
++ while(end_found == 0)
++ {
++ int nextch = '?';
++ while(nextch != EOF && bytes_read < max_read_size)
++ {
++ nextch = fgetc(in);
++ if(nextch != EOF)
++ {
++ read_string[bytes_read] = (unsigned char)nextch;
++ bytes_read++;
++ }
++ }
++ read_string[bytes_read] = '\0';
++ end_found = (nextch == EOF) ? 1 : 0;
++ if(end_found == 0)
++ {
++ unsigned char *new_str;
++ max_read_size = max_read_size + read_block_size;
++ new_str = (unsigned char*)malloc(max_read_size+1);
++ memcpy(new_str, read_string, bytes_read);
++ free(read_string);
++ read_string = new_str;
++ }
++ }
++ *length = bytes_read;
++ return read_string;
++}
++
+--- /dev/null 2014-12-20 09:51:07.835224955 +0800
++++ iptables.new/extensions/libipt_timerange.c 2014-12-20 16:55:22.737446012 +0800
+@@ -0,0 +1,876 @@
++/* timerange -- An iptables extension to match multiple timeranges within a week
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2009-2010 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++
++#include <stdio.h>
++#include <netdb.h>
++#include <string.h>
++#include <stdlib.h>
++#include <getopt.h>
++#include <ctype.h>
++#include <time.h>
++#include <sys/time.h>
++
++
++/*
++ * in iptables 1.4.0 and higher, iptables.h includes xtables.h, which
++ * we can use to check whether we need to deal with the new requirements
++ * in pre-processor directives below
++ */
++#include <iptables.h>
++#include <linux/netfilter_ipv4/ipt_timerange.h>
++
++#ifdef _XTABLES_H
++ #define iptables_rule_match xtables_rule_match
++ #define iptables_match xtables_match
++ #define iptables_target xtables_target
++ #define ipt_tryload xt_tryload
++#endif
++
++/*
++ * XTABLES_VERSION_CODE is only defined in versions 1.4.1 and later, which
++ * also require the use of xtables_register_match
++ *
++ * Version 1.4.0 uses register_match like previous versions
++ */
++#ifdef XTABLES_VERSION_CODE
++ #define register_match xtables_register_match
++#endif
++
++/* utility functions necessary for module to work across multiple iptables versions */
++static int my_check_inverse(const char option[], int* invert, int *my_optind, int argc);
++static void param_problem_exit_error(char* msg);
++
++
++long* parse_time_ranges(char* time_ranges, unsigned char is_weekly_range);
++void merge_adjacent_time_ranges(long* time_ranges, unsigned char is_weekly_range);
++unsigned long parse_time(char* time_str);
++long* parse_weekdays(char* wd_str);
++
++char** split_on_separators(char* line, char* separators, int num_separators, int max_pieces, int include_remainder_at_max);
++void to_lowercase(char* str);
++char* trim_flanking_whitespace(char* str);
++
++void set_kernel_timezone(void);
++
++/* Function which prints out usage message. */
++static void help(void)
++{
++ printf( "timerange options:\n --hours [HOURLY RANGES] --weekdays [WEEKDAYS ACTIVE] --weekly_ranges [WEEKLY RANGES]\n");
++}
++
++static struct option opts[] =
++{
++ { .name = "hours", .has_arg = 1, .flag = 0, .val = HOURS },
++ { .name = "weekdays", .has_arg = 1, .flag = 0, .val = WEEKDAYS },
++ { .name = "weekly_ranges", .has_arg = 1, .flag = 0, .val = WEEKLY_RANGE },
++ { .name = 0 }
++};
++
++
++/* Function which parses command options; returns true if it
++ ate an option */
++static int parse( int c,
++ char **argv,
++ int invert,
++ unsigned int *flags,
++#ifdef _XTABLES_H
++ const void *entry,
++#else
++ const struct ipt_entry *entry,
++ unsigned int *nfcache,
++#endif
++ struct ipt_entry_match **match
++ )
++{
++ struct ipt_timerange_info *info = (struct ipt_timerange_info *)(*match)->data;
++ int valid_arg = 0;
++ if(*flags == 0)
++ {
++ my_check_inverse(optarg, &invert, &optind, 0);
++ info->invert = invert ? 1 : 0;
++ }
++
++ long* parsed = NULL;
++ switch (c)
++ {
++ case HOURS:
++ parsed = parse_time_ranges(argv[optind-1], 0);
++ if(parsed != NULL && (*flags & HOURS) == 0 && (*flags & WEEKLY_RANGE) == 0)
++ {
++ int range_index = 0;
++ for(range_index = 0; parsed[range_index] != -1; range_index++)
++ {
++ if(range_index > 100)
++ {
++ return 0;
++ }
++ info->ranges[range_index] = parsed[range_index];
++ }
++ info->ranges[range_index] = -1;
++ free(parsed);
++
++
++ valid_arg = 1;
++ *flags = *flags+ c;
++ info->type = *flags;
++ }
++ break;
++
++
++ case WEEKDAYS:
++ parsed = parse_weekdays(argv[optind-1]);
++ if(parsed != NULL && (*flags & WEEKDAYS) == 0 && (*flags & WEEKLY_RANGE) == 0)
++ {
++ int day_index;
++ for(day_index=0; day_index < 7; day_index++)
++ {
++ info->days[day_index] = parsed[day_index];
++ }
++ free(parsed);
++
++ valid_arg = 1 ;
++ *flags = *flags + c;
++ info->type = *flags;
++ }
++ break;
++ case WEEKLY_RANGE:
++ parsed = parse_time_ranges(argv[optind-1], 1);
++ if(parsed != NULL && (*flags & HOURS) == 0 && (*flags & WEEKDAYS) == 0 && (*flags & WEEKLY_RANGE) == 0 )
++ {
++ int range_index = 0;
++ for(range_index = 0; parsed[range_index] != -1; range_index++)
++ {
++ if(range_index > 100)
++ {
++ return 0;
++ }
++ info->ranges[range_index] = parsed[range_index];
++
++ }
++ info->ranges[range_index] = -1;
++ free(parsed);
++
++ valid_arg = 1;
++ *flags = *flags+c;
++ info->type = *flags;
++ }
++ break;
++ }
++
++ return valid_arg;
++}
++
++
++
++static void print_timerange_args( struct ipt_timerange_info* info )
++{
++ int i;
++
++ if(info->invert == 1)
++ {
++ printf(" ! ");
++ }
++
++ switch(info->type)
++ {
++ case DAYS_HOURS:
++ case HOURS:
++ printf(" --hours ");
++ for(i=0; info->ranges[i] != -1; i++)
++ {
++ printf("%ld", info->ranges[i]);
++ if(info->ranges[i+1] != -1)
++ {
++ if(i % 2 == 0){ printf("-"); }
++ else { printf(","); }
++ }
++ }
++ if(info->type == HOURS) { break; }
++ case WEEKDAYS:
++ printf(" --weekdays ");
++ for(i=0; i<7; i++)
++ {
++ printf("%d", info->days[i]);
++ if(i != 6){ printf(","); }
++ }
++ break;
++ case WEEKLY_RANGE:
++ printf(" --weekly_ranges ");
++ for(i=0; info->ranges[i] != -1; i++)
++ {
++ printf("%ld", info->ranges[i]);
++ if(info->ranges[i+1] != -1)
++ {
++ if(i % 2 == 0){ printf("-"); }
++ else { printf(","); }
++ }
++ }
++ break;
++ }
++ printf(" ");
++
++}
++
++/* Final check; must have specified a test string with either --contains or --contains_regex. */
++static void final_check(unsigned int flags)
++{
++ if(flags ==0)
++ {
++ param_problem_exit_error("Invalid arguments to time_range");
++ }
++
++ /* update timezone minutes_west in kernel to match userspace*/
++ set_kernel_timezone();
++}
++
++/* Prints out the matchinfo. */
++#ifdef _XTABLES_H
++static void print(const void *ip, const struct xt_entry_match *match, int numeric)
++#else
++static void print(const struct ipt_ip *ip, const struct ipt_entry_match *match, int numeric)
++#endif
++{
++ printf("timerange ");
++ struct ipt_timerange_info *info = (struct ipt_timerange_info *)match->data;
++
++ print_timerange_args(info);
++}
++
++/* Saves the union ipt_matchinfo in parsable form to stdout. */
++#ifdef _XTABLES_H
++static void save(const void *ip, const struct xt_entry_match *match)
++#else
++static void save(const struct ipt_ip *ip, const struct ipt_entry_match *match)
++#endif
++{
++ struct ipt_timerange_info *info = (struct ipt_timerange_info *)match->data;
++ print_timerange_args(info);
++}
++
++static struct iptables_match timerange =
++{
++ .next = NULL,
++ .name = "timerange",
++ #ifdef XTABLES_VERSION_CODE
++ .version = XTABLES_VERSION,
++ #else
++ .version = IPTABLES_VERSION,
++ #endif
++ .size = XT_ALIGN(sizeof(struct ipt_timerange_info)),
++ .userspacesize = XT_ALIGN(sizeof(struct ipt_timerange_info)),
++ .help = &help,
++ .parse = &parse,
++ .final_check = &final_check,
++ .print = &print,
++ .save = &save,
++ .extra_opts = opts
++};
++
++void _init(void)
++{
++ register_match(&timerange);
++}
++
++#ifndef TRUE
++#define TRUE 1
++#endif
++#ifndef FALSE
++#define FALSE 0
++#endif
++static int my_check_inverse(const char option[], int* invert, int *my_optind, int argc)
++{
++ if (option && strcmp(option, "!") == 0)
++ {
++ if (*invert)
++ {
++ param_problem_exit_error("Multiple `!' flags not allowed");
++ }
++ *invert = TRUE;
++ if (my_optind != NULL)
++ {
++ ++*my_optind;
++ if (argc && *my_optind > argc)
++ {
++ param_problem_exit_error("no argument following `!'");
++ }
++ }
++ return TRUE;
++ }
++ return FALSE;
++}
++static void param_problem_exit_error(char* msg)
++{
++ #ifdef xtables_error
++ xtables_error(PARAMETER_PROBLEM, msg);
++ #else
++ exit_error(PARAMETER_PROBLEM, msg);
++ #endif
++}
++
++/* takes a string of days e.g. "Monday, Tuesday, Friday", and turns into an array of 7 longs
++ * each 0 or 1, one for each weekday starting with sunday, e.g. [0,1,1,0,0,1,0] for our example
++ */
++long* parse_weekdays(char* wd_str)
++{
++ long* weekdays = (long*)malloc(7*sizeof(long));
++ weekdays[0] = weekdays[1] = weekdays[2] = weekdays[3] = weekdays[4] = weekdays[5] = weekdays[6] = 0;
++
++ char** days = split_on_separators(wd_str, ",", 1, -1, 0);
++ int day_index;
++ int found = 0;
++ for(day_index=0; days[day_index] != NULL; day_index++)
++ {
++ char day[4];
++ trim_flanking_whitespace(days[day_index]);
++ memcpy(day, days[day_index], 3);
++ free(days[day_index]);
++ day[3] = '\0';
++ to_lowercase(day);
++ if(strcmp(day, "sun") == 0)
++ {
++ weekdays[0] = 1;
++ found = 1;
++ }
++ else if(strcmp(day, "mon") ==0)
++ {
++ weekdays[1] = 1;
++ found = 1;
++ }
++ else if(strcmp(day, "tue") ==0)
++ {
++ weekdays[2] = 1;
++ found = 1;
++ }
++ else if(strcmp(day, "wed") ==0)
++ {
++ weekdays[3] = 1;
++ found = 1;
++ }
++ else if(strcmp(day, "thu") ==0)
++ {
++ weekdays[4] = 1;
++ found = 1;
++ }
++ else if(strcmp(day, "fri") ==0)
++ {
++ weekdays[5] = 1;
++ found = 1;
++ }
++ else if(strcmp(day, "sat") ==0)
++ {
++ weekdays[6] = 1;
++ found = 1;
++ }
++ else if(strcmp(day, "all") ==0)
++ {
++ weekdays[0] = weekdays[1] = weekdays[2] = weekdays[3] = weekdays[4] = weekdays[5] = weekdays[6] = 1;
++ found = 1;
++ }
++ }
++ free(days);
++ if(found == 0)
++ {
++ free(weekdays);
++ weekdays = NULL;
++ }
++ return weekdays;
++}
++
++
++/* is_weekly_range indicates whether we're parsing hours within a single day or a range over a whole week */
++long* parse_time_ranges(char* time_ranges, unsigned char is_weekly_range)
++{
++ char** pieces = split_on_separators(time_ranges, ",", 1, -1, 0);
++ int num_pieces = 0;
++ for(num_pieces = 0; pieces[num_pieces] != NULL; num_pieces++) {};
++ long *parsed = (long*)malloc( (1+(num_pieces*2)) * sizeof(long));
++
++
++
++ int piece_index = 0;
++ for(piece_index = 0; pieces[piece_index] != NULL; piece_index++)
++ {
++ trim_flanking_whitespace(pieces[piece_index]);
++ char** times=split_on_separators(pieces[piece_index], "-", 1, 2, 0);
++ int time_count = 0;
++ for(time_count = 0; times[time_count] != 0 ; time_count++){}
++ if( time_count == 2 )
++ {
++ unsigned long start = parse_time(trim_flanking_whitespace(times[0]));
++ unsigned long end = parse_time(trim_flanking_whitespace(times[1]));
++ parsed[ piece_index*2 ] = (long)start;
++ parsed[ (piece_index*2)+1 ] = (long)end;
++
++ free( times[1] );
++ }
++ if( time_count > 0) { free(times[0]); }
++
++ free(times);
++ free(pieces[piece_index]);
++ }
++ free(pieces);
++ parsed[ (num_pieces*2) ] = -1; // terminated with -1
++
++
++ // make sure there is no overlap -- this will invalidate ranges
++ int range_index = 0;
++ char overlap_found = 0;
++ for(range_index = 0; range_index < num_pieces; range_index++)
++ {
++ // now test for overlap
++ long start1 = parsed[ (range_index*2) ];
++ long end1 = parsed[ (range_index*2)+1 ];
++ end1= end1 < start1 ? end1 + (is_weekly_range ? 7*24*60*60 : 24*60*60) : end1;
++
++ int range_index2 = 0;
++ for(range_index2 = 0; range_index2 < num_pieces; range_index2++)
++ {
++ if(range_index2 != range_index)
++ {
++ long start2 = parsed[ (range_index2*2) ];
++ long end2 = parsed[ (range_index2*2)+1 ];
++ end2= end2 < start2 ? end2 + (is_weekly_range ? 7*24*60*60 : 24*60*60) : end2;
++ overlap_found = overlap_found || (start1 < end2 && end1 > start2 );
++ }
++ }
++ }
++
++ if(!overlap_found)
++ {
++ // sort ranges
++ int sorted_index = 0;
++ while(parsed[sorted_index] != -1)
++ {
++ int next_start=-1;
++ int next_start_index=-1;
++ int test_index;
++ long tmp1;
++ long tmp2;
++ for(test_index=sorted_index; parsed[test_index] != -1; test_index=test_index+2)
++ {
++ next_start_index = next_start < 0 || next_start > parsed[test_index] ? test_index : next_start_index;
++ next_start = next_start < 0 || next_start > parsed[test_index] ? parsed[test_index] : next_start;
++ }
++ tmp1 = parsed[next_start_index];
++ tmp2 = parsed[next_start_index+1];
++ parsed[next_start_index] = parsed[sorted_index];
++ parsed[next_start_index+1] = parsed[sorted_index+1];
++ parsed[sorted_index] = tmp1;
++ parsed[sorted_index+1] = tmp2;
++ sorted_index = sorted_index + 2;
++ }
++ }
++ else
++ {
++ // de-allocate parsed, set to NULL
++ free(parsed);
++ parsed = NULL;
++ }
++
++ // merge time ranges where end of first = start of second
++ merge_adjacent_time_ranges(parsed, is_weekly_range);
++
++
++ // if always active, free & return NULL
++ int max_multiple = is_weekly_range ? 7 : 1;
++ if(parsed[0] == 0 && parsed[1] == max_multiple*24*60*60)
++ {
++ free(parsed);
++ parsed = NULL;
++ }
++
++
++ //adjust so any range that crosses end of range is split in two
++ int num_range_indices=0;
++ for(num_range_indices=0; parsed[num_range_indices] != -1; num_range_indices++){}
++
++ long* adjusted_range = (long*)malloc((3+num_range_indices)*sizeof(long));
++ int ar_index = 0;
++ int old_index = 0;
++ if(parsed[num_range_indices-1] < parsed[0])
++ {
++ adjusted_range[0] = 0;
++ adjusted_range[1] = parsed[num_range_indices-1];
++ ar_index = ar_index + 2;
++ parsed[num_range_indices-1] = -1;
++ }
++ for(old_index=0; parsed[old_index] != -1; old_index++)
++ {
++ adjusted_range[ar_index] = parsed[old_index];
++ ar_index++;
++ }
++
++ if(ar_index % 2 == 1 )
++ {
++ adjusted_range[ar_index] = is_weekly_range ? 7*24*60*60 : 24*60*60;
++ ar_index++;
++ }
++ adjusted_range[ar_index] = -1;
++ free(parsed);
++
++ return adjusted_range;
++}
++
++
++
++void merge_adjacent_time_ranges(long* time_ranges, unsigned char is_weekly_range)
++{
++ int range_length = 0;
++ while(time_ranges[range_length] != -1){ range_length++; }
++ int* merged_indices = (int*)malloc((range_length+1)*sizeof(int));
++
++ int merged_index=0;
++ int next_index;
++ for(next_index=0; time_ranges[next_index] != -1; next_index++)
++ {
++ if(next_index == 0)
++ {
++ merged_indices[merged_index] = next_index;
++ merged_index++;
++ }
++ else if( time_ranges[next_index+1] == -1 )
++ {
++ merged_indices[merged_index] = next_index;
++ merged_index++;
++ }
++ else if( time_ranges[next_index] != time_ranges[next_index-1] && time_ranges[next_index] != time_ranges[next_index+1] )
++ {
++ merged_indices[merged_index] = next_index;
++ merged_index++;
++ }
++ }
++ merged_indices[merged_index] = -1;
++
++ for(next_index=0; merged_indices[next_index] != -1; next_index++)
++ {
++ time_ranges[next_index] = time_ranges[ merged_indices[next_index] ];
++ }
++ time_ranges[next_index] = -1;
++ free(merged_indices);
++
++}
++
++
++
++
++/*
++ * assumes 24hr time, not am/pm, in format:
++ * (Day of week) hours:minutes:seconds
++ * if day of week is present, returns seconds since midnight on Sunday
++ * otherwise, seconds since midnight
++ */
++unsigned long parse_time(char* time_str)
++{
++ while((*time_str == ' ' || *time_str == '\t') && *time_str != '\0') { time_str++; }
++
++ int weekday = -1;
++ if(strlen(time_str) > 3)
++ {
++ char wday_test[4];
++ memcpy(wday_test, time_str, 3);
++ wday_test[3] = '\0';
++ to_lowercase(wday_test);
++ if(strcmp(wday_test, "sun") == 0)
++ {
++ weekday = 0;
++ }
++ else if(strcmp(wday_test, "mon") == 0)
++ {
++ weekday = 1;
++ }
++ else if(strcmp(wday_test, "tue") == 0)
++ {
++ weekday = 2;
++ }
++ else if(strcmp(wday_test, "wed") == 0)
++ {
++ weekday = 3;
++ }
++ else if(strcmp(wday_test, "thu") == 0)
++ {
++ weekday = 4;
++ }
++ else if(strcmp(wday_test, "fri") == 0)
++ {
++ weekday = 5;
++ }
++ else if(strcmp(wday_test, "sat") == 0)
++ {
++ weekday = 6;
++ }
++ }
++
++ if(weekday >= 0)
++ {
++ time_str = time_str + 3;
++ while( (*time_str < 48 || *time_str > 57) && *time_str != '\0') { time_str++; }
++ }
++
++ char** time_parts=split_on_separators(time_str, ":", 1, -1, 0);
++ unsigned long seconds = weekday < 0 ? 0 : ( ((unsigned long)(weekday))*60*60*24 );
++ unsigned long tmp;
++ unsigned long multiple = 60*60;
++
++ int tp_index = 0;
++ for(tp_index=0; time_parts[tp_index] != NULL; tp_index++)
++ {
++ sscanf(time_parts[tp_index], "%ld", &tmp);
++ seconds = seconds + (tmp*multiple);
++ multiple = (unsigned long)(multiple/60);
++ free(time_parts[tp_index]);
++ }
++ free(time_parts);
++
++ return seconds;
++}
++
++void to_lowercase(char* str)
++{
++ int i;
++ for(i = 0; str[i] != '\0'; i++)
++ {
++ str[i] = tolower(str[i]);
++ }
++}
++
++/*
++ * line_str is the line to be parsed -- it is not modified in any way
++ * max_pieces indicates number of pieces to return, if negative this is determined dynamically
++ * include_remainder_at_max indicates whether the last piece, when max pieces are reached,
++ * should be what it would normally be (0) or the entire remainder of the line (1)
++ * if max_pieces < 0 this parameter is ignored
++ *
++ *
++ * returns all non-separator pieces in a line
++ * result is dynamically allocated, MUST be freed after call-- even if
++ * line is empty (you still get a valid char** pointer to to a NULL char*)
++ */
++char** split_on_separators(char* line_str, char* separators, int num_separators, int max_pieces, int include_remainder_at_max)
++{
++ char** split;
++
++ if(line_str != NULL)
++ {
++ int split_index;
++ int non_separator_found;
++ char* dup_line;
++ char* start;
++
++ if(max_pieces < 0)
++ {
++ /* count number of separator characters in line -- this count + 1 is an upperbound on number of pieces */
++ int separator_count = 0;
++ int line_index;
++ for(line_index = 0; line_str[line_index] != '\0'; line_index++)
++ {
++ int sep_index;
++ int found = 0;
++ for(sep_index =0; found == 0 && sep_index < num_separators; sep_index++)
++ {
++ found = separators[sep_index] == line_str[line_index] ? 1 : 0;
++ }
++ separator_count = separator_count+ found;
++ }
++ max_pieces = separator_count + 1;
++ }
++ split = (char**)malloc((1+max_pieces)*sizeof(char*));
++ split_index = 0;
++ split[split_index] = NULL;
++
++
++ dup_line = strdup(line_str);
++ start = dup_line;
++ non_separator_found = 0;
++ while(non_separator_found == 0)
++ {
++ int matches = 0;
++ int sep_index;
++ for(sep_index =0; sep_index < num_separators; sep_index++)
++ {
++ matches = matches == 1 || separators[sep_index] == start[0] ? 1 : 0;
++ }
++ non_separator_found = matches==0 || start[0] == '\0' ? 1 : 0;
++ if(non_separator_found == 0)
++ {
++ start++;
++ }
++ }
++
++ while(start[0] != '\0' && split_index < max_pieces)
++ {
++ /* find first separator index */
++ int first_separator_index = 0;
++ int separator_found = 0;
++ while( separator_found == 0 )
++ {
++ int sep_index;
++ for(sep_index =0; separator_found == 0 && sep_index < num_separators; sep_index++)
++ {
++ separator_found = separators[sep_index] == start[first_separator_index] || start[first_separator_index] == '\0' ? 1 : 0;
++ }
++ if(separator_found == 0)
++ {
++ first_separator_index++;
++ }
++ }
++
++ /* copy next piece to split array */
++ if(first_separator_index > 0)
++ {
++ char* next_piece = NULL;
++ if(split_index +1 < max_pieces || include_remainder_at_max <= 0)
++ {
++ next_piece = (char*)malloc((first_separator_index+1)*sizeof(char));
++ memcpy(next_piece, start, first_separator_index);
++ next_piece[first_separator_index] = '\0';
++ }
++ else
++ {
++ next_piece = strdup(start);
++ }
++ split[split_index] = next_piece;
++ split[split_index+1] = NULL;
++ split_index++;
++ }
++
++
++ /* find next non-separator index, indicating start of next piece */
++ start = start+ first_separator_index;
++ non_separator_found = 0;
++ while(non_separator_found == 0)
++ {
++ int matches = 0;
++ int sep_index;
++ for(sep_index =0; sep_index < num_separators; sep_index++)
++ {
++ matches = matches == 1 || separators[sep_index] == start[0] ? 1 : 0;
++ }
++ non_separator_found = matches==0 || start[0] == '\0' ? 1 : 0;
++ if(non_separator_found == 0)
++ {
++ start++;
++ }
++ }
++ }
++ free(dup_line);
++ }
++ else
++ {
++ split = (char**)malloc((1)*sizeof(char*));
++ split[0] = NULL;
++ }
++ return split;
++}
++
++
++char* trim_flanking_whitespace(char* str)
++{
++ int new_start = 0;
++ int new_length = 0;
++
++ char whitespace[5] = { ' ', '\t', '\n', '\r', '\0' };
++ int num_whitespace_chars = 4;
++
++
++ int str_index = 0;
++ int is_whitespace = 1;
++ int test;
++ while( (test = str[str_index]) != '\0' && is_whitespace == 1)
++ {
++ int whitespace_index;
++ is_whitespace = 0;
++ for(whitespace_index = 0; whitespace_index < num_whitespace_chars && is_whitespace == 0; whitespace_index++)
++ {
++ is_whitespace = test == whitespace[whitespace_index] ? 1 : 0;
++ }
++ str_index = is_whitespace == 1 ? str_index+1 : str_index;
++ }
++ new_start = str_index;
++
++
++ str_index = strlen(str) - 1;
++ is_whitespace = 1;
++ while( str_index >= new_start && is_whitespace == 1)
++ {
++ int whitespace_index;
++ is_whitespace = 0;
++ for(whitespace_index = 0; whitespace_index < num_whitespace_chars && is_whitespace == 0; whitespace_index++)
++ {
++ is_whitespace = str[str_index] == whitespace[whitespace_index] ? 1 : 0;
++ }
++ str_index = is_whitespace == 1 ? str_index-1 : str_index;
++ }
++ new_length = str[new_start] == '\0' ? 0 : str_index + 1 - new_start;
++
++
++ if(new_start > 0)
++ {
++ for(str_index = 0; str_index < new_length; str_index++)
++ {
++ str[str_index] = str[str_index+new_start];
++ }
++ }
++ str[new_length] = 0;
++ return str;
++}
++
++void set_kernel_timezone(void)
++{
++ time_t now;
++ struct tm* utc_info;
++ struct tm* tz_info;
++ int utc_day;
++ int utc_hour;
++ int utc_minute;
++ int tz_day;
++ int tz_hour;
++ int tz_minute;
++ int minuteswest;
++
++ struct timeval tv;
++ struct timezone old_tz;
++ struct timezone new_tz;
++
++ time(&now);
++ utc_info = gmtime(&now);
++ utc_day = utc_info->tm_mday;
++ utc_hour = utc_info->tm_hour;
++ utc_minute = utc_info->tm_min;
++ tz_info = localtime(&now);
++ tz_day = tz_info->tm_mday;
++ tz_hour = tz_info->tm_hour;
++ tz_minute = tz_info->tm_min;
++
++ utc_day = utc_day < tz_day - 1 ? tz_day + 1 : utc_day;
++ tz_day = tz_day < utc_day - 1 ? utc_day + 1 : tz_day;
++
++ minuteswest = (24*60*utc_day + 60*utc_hour + utc_minute) - (24*60*tz_day + 60*tz_hour + tz_minute) ;
++ new_tz.tz_minuteswest = minuteswest;
++ new_tz.tz_dsttime = 0;
++
++ /* Get tv to pass to settimeofday(2) to be sure we avoid hour-sized warp */
++ /* (see gettimeofday(2) man page, or /usr/src/linux/kernel/time.c) */
++ gettimeofday(&tv, &old_tz);
++
++ /* set timezone */
++ settimeofday(&tv, &new_tz);
++
++}
+--- /dev/null 2014-12-20 09:51:07.835224955 +0800
++++ iptables.new/extensions/.timerange-test 2014-12-20 16:55:22.741446012 +0800
+@@ -0,0 +1,2 @@
++#!/bin/sh
++[ -f $KERNEL_DIR/include/linux/netfilter_ipv4/ipt_timerange.h ] && echo timerange
+--- /dev/null 2014-12-20 09:51:07.835224955 +0800
++++ iptables.new/extensions/.bandwidth-test 2014-12-20 16:55:22.843446028 +0800
+@@ -0,0 +1,2 @@
++#!/bin/sh
++[ -f $KERNEL_DIR/include/linux/netfilter_ipv4/ipt_bandwidth.h ] && echo bandwidth
+--- /dev/null 2014-12-20 09:51:07.835224955 +0800
++++ iptables.new/extensions/libipt_weburl.c 2014-12-20 16:55:22.298445944 +0800
+@@ -0,0 +1,290 @@
++/* weburl -- An iptables extension to match URLs in HTTP requests
++ * This module can match using string match or regular expressions
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2008-2010 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++
++#include <stdio.h>
++#include <netdb.h>
++#include <string.h>
++#include <stdlib.h>
++#include <getopt.h>
++
++
++/*
++ * in iptables 1.4.0 and higher, iptables.h includes xtables.h, which
++ * we can use to check whether we need to deal with the new requirements
++ * in pre-processor directives below
++ */
++#include <iptables.h>
++#include <linux/netfilter_ipv4/ipt_weburl.h>
++
++#ifdef _XTABLES_H
++ #define iptables_rule_match xtables_rule_match
++ #define iptables_match xtables_match
++ #define iptables_target xtables_target
++ #define ipt_tryload xt_tryload
++#endif
++
++/*
++ * XTABLES_VERSION_CODE is only defined in versions 1.4.1 and later, which
++ * also require the use of xtables_register_match
++ *
++ * Version 1.4.0 uses register_match like previous versions
++ */
++#ifdef XTABLES_VERSION_CODE
++ #define register_match xtables_register_match
++#endif
++
++
++/* utility functions necessary for module to work across multiple iptables versions */
++static int my_check_inverse(const char option[], int* invert, int *my_optind, int argc);
++static void param_problem_exit_error(char* msg);
++
++
++
++/* Function which prints out usage message. */
++static void help(void)
++{
++ printf( "weburl options:\n --contains [!] [STRING]\n --contains_regex [!] [REGEX]\n --matches_exactly [!] [STRING]\n --domain_only\n --path_only\n");
++}
++
++static struct option opts[] =
++{
++ { .name = "contains", .has_arg = 1, .flag = 0, .val = WEBURL_CONTAINS_TYPE }, //string
++ { .name = "contains_regex", .has_arg = 1, .flag = 0, .val = WEBURL_REGEX_TYPE }, //regex
++ { .name = "matches_exactly", .has_arg = 1, .flag = 0, .val = WEBURL_EXACT_TYPE }, //exact string match
++ { .name = "domain_only", .has_arg = 0, .flag = 0, .val = WEBURL_DOMAIN_PART }, //only match domain portion of url
++ { .name = "path_only", .has_arg = 0, .flag = 0, .val = WEBURL_PATH_PART }, //only match path portion of url
++ { .name = 0 }
++};
++
++
++/* Function which parses command options; returns true if it
++ ate an option */
++static int parse( int c,
++ char **argv,
++ int invert,
++ unsigned int *flags,
++#ifdef _XTABLES_H
++ const void *entry,
++#else
++ const struct ipt_entry *entry,
++ unsigned int *nfcache,
++#endif
++ struct ipt_entry_match **match
++ )
++{
++ struct ipt_weburl_info *info = (struct ipt_weburl_info *)(*match)->data;
++ int valid_arg = 0;
++
++ if(*flags < 10)
++ {
++ info->match_part = WEBURL_ALL_PART;
++ }
++
++ switch (c)
++ {
++ case WEBURL_CONTAINS_TYPE:
++ case WEBURL_REGEX_TYPE:
++ case WEBURL_EXACT_TYPE:
++ info->match_type = c;
++
++ //test whether to invert rule
++ my_check_inverse(optarg, &invert, &optind, 0);
++ info->invert = invert ? 1 : 0;
++
++ //test that test string is reasonable length, then to info
++ int testlen = strlen(argv[optind-1]);
++ if(testlen > 0 && testlen < MAX_TEST_STR)
++ {
++ strcpy(info->test_str, argv[optind-1]);
++ }
++ else if(testlen >= MAX_TEST_STR)
++ {
++ char err[100];
++ sprintf(err, "Parameter definition is too long, must be less than %d characters", MAX_TEST_STR);
++ param_problem_exit_error(err);
++ }
++ else
++ {
++ param_problem_exit_error("Parameter definition is incomplete");
++ }
++
++ if(*flags % 10 == 1)
++ {
++ param_problem_exit_error("You may only specify one string/pattern to match");
++ }
++ *flags = *flags + 1;
++
++ valid_arg = 1;
++ break;
++
++ case WEBURL_DOMAIN_PART:
++ case WEBURL_PATH_PART:
++ info->match_part = c;
++ if(*flags >= 10)
++ {
++ param_problem_exit_error("You may specify at most one part of the url to match:\n\t--domain_only, --path_only or neither (to match full url)\n");
++ }
++ *flags = *flags+10;
++
++ valid_arg = 1;
++ break;
++ }
++
++ return valid_arg;
++}
++
++
++
++static void print_weburl_args( struct ipt_weburl_info* info )
++{
++ //invert
++ if(info->invert > 0)
++ {
++ printf("! ");
++ }
++ //match type
++ switch (info->match_type)
++ {
++ case WEBURL_CONTAINS_TYPE:
++ printf("--contains ");
++ break;
++ case WEBURL_REGEX_TYPE:
++ printf("--contains_regex ");
++ break;
++ case WEBURL_EXACT_TYPE:
++ printf("--matches_exactly ");
++ break;
++ }
++ //test string
++ printf("%s ", info->test_str);
++
++ //match part
++ switch(info->match_part)
++ {
++ case WEBURL_DOMAIN_PART:
++ printf("--domain_only ");
++ break;
++ case WEBURL_PATH_PART:
++ printf("--path_only ");
++ break;
++ case WEBURL_ALL_PART:
++ //print nothing
++ break;
++ }
++
++}
++
++/* Final check; must have specified a test string with either --contains or --contains_regex. */
++static void final_check(unsigned int flags)
++{
++ if (flags %10 == 0)
++ {
++ param_problem_exit_error("You must specify '--contains' or '--contains_regex' or '--matches_exactly'");
++ }
++}
++
++/* Prints out the matchinfo. */
++#ifdef _XTABLES_H
++static void print(const void *ip, const struct xt_entry_match *match, int numeric)
++#else
++static void print(const struct ipt_ip *ip, const struct ipt_entry_match *match, int numeric)
++#endif
++{
++ printf("WEBURL ");
++ struct ipt_weburl_info *info = (struct ipt_weburl_info *)match->data;
++
++ print_weburl_args(info);
++}
++
++/* Saves the union ipt_matchinfo in parsable form to stdout. */
++#ifdef _XTABLES_H
++static void save(const void *ip, const struct xt_entry_match *match)
++#else
++static void save(const struct ipt_ip *ip, const struct ipt_entry_match *match)
++#endif
++{
++ struct ipt_weburl_info *info = (struct ipt_weburl_info *)match->data;
++ print_weburl_args(info);
++}
++
++static struct iptables_match weburl =
++{
++ .next = NULL,
++ .name = "weburl",
++ #ifdef XTABLES_VERSION_CODE
++ .version = XTABLES_VERSION,
++ #else
++ .version = IPTABLES_VERSION,
++ #endif
++ .size = XT_ALIGN(sizeof(struct ipt_weburl_info)),
++ .userspacesize = XT_ALIGN(sizeof(struct ipt_weburl_info)),
++ .help = &help,
++ .parse = &parse,
++ .final_check = &final_check,
++ .print = &print,
++ .save = &save,
++ .extra_opts = opts
++};
++
++void _init(void)
++{
++ register_match(&weburl);
++}
++
++
++#ifndef TRUE
++#define TRUE 1
++#endif
++#ifndef FALSE
++#define FALSE 0
++#endif
++static int my_check_inverse(const char option[], int* invert, int *my_optind, int argc)
++{
++ if (option && strcmp(option, "!") == 0)
++ {
++ if (*invert)
++ {
++ param_problem_exit_error("Multiple `!' flags not allowed");
++ }
++ *invert = TRUE;
++ if (my_optind != NULL)
++ {
++ ++*my_optind;
++ if (argc && *my_optind > argc)
++ {
++ param_problem_exit_error("no argument following `!'");
++ }
++ }
++ return TRUE;
++ }
++ return FALSE;
++}
++static void param_problem_exit_error(char* msg)
++{
++ #ifdef xtables_error
++ xtables_error(PARAMETER_PROBLEM, msg);
++ #else
++ exit_error(PARAMETER_PROBLEM, msg);
++ #endif
++}
++
++
+--- /dev/null 2014-12-20 09:51:07.835224955 +0800
++++ iptables.new/extensions/.webmon-test 2014-12-20 16:55:22.566445985 +0800
+@@ -0,0 +1,2 @@
++#!/bin/sh
++[ -f $KERNEL_DIR/include/linux/netfilter_ipv4/ipt_webmon.h ] && echo webmon
+--- /dev/null 2014-12-20 09:51:07.835224955 +0800
++++ iptables.new/include/linux/netfilter_ipv4/ipt_bandwidth.h 2014-12-20 16:55:22.842446028 +0800
+@@ -0,0 +1,106 @@
++/* bandwidth -- An iptables extension for bandwidth monitoring/control
++ * Can be used to efficiently monitor bandwidth and/or implement bandwidth quotas
++ * Can be queried using the iptbwctl userspace library
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2009 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _IPT_BANDWIDTH_H
++#define _IPT_BANDWIDTH_H
++
++/*flags -- first three don't map to parameters the rest do */
++#define BANDWIDTH_INITIALIZED 1
++#define BANDWIDTH_REQUIRES_SUBNET 2
++#define BANDWIDTH_SUBNET 4
++#define BANDWIDTH_CMP 8
++#define BANDWIDTH_CURRENT 16
++#define BANDWIDTH_RESET_INTERVAL 32
++#define BANDWIDTH_RESET_TIME 64
++#define BANDWIDTH_LAST_BACKUP 128
++
++
++/* parameter defs that don't map to flag bits */
++#define BANDWIDTH_TYPE 70
++#define BANDWIDTH_ID 71
++#define BANDWIDTH_GT 72
++#define BANDWIDTH_LT 73
++#define BANDWIDTH_MONITOR 74
++#define BANDWIDTH_CHECK 75
++#define BANDWIDTH_CHECK_NOSWAP 76
++#define BANDWIDTH_CHECK_SWAP 77
++#define BANDWIDTH_NUM_INTERVALS 78
++
++/* possible reset intervals */
++#define BANDWIDTH_MINUTE 80
++#define BANDWIDTH_HOUR 81
++#define BANDWIDTH_DAY 82
++#define BANDWIDTH_WEEK 83
++#define BANDWIDTH_MONTH 84
++#define BANDWIDTH_NEVER 85
++
++/* possible monitoring types */
++#define BANDWIDTH_COMBINED 90
++#define BANDWIDTH_INDIVIDUAL_SRC 91
++#define BANDWIDTH_INDIVIDUAL_DST 92
++#define BANDWIDTH_INDIVIDUAL_LOCAL 93
++#define BANDWIDTH_INDIVIDUAL_REMOTE 94
++
++
++
++/* socket id parameters (for userspace i/o) */
++#define BANDWIDTH_SET 2048
++#define BANDWIDTH_GET 2049
++
++/* max id length */
++#define BANDWIDTH_MAX_ID_LENGTH 50
++
++/* 4 bytes for total number of entries, 100 entries of 12 bytes each, + 1 byte indicating whether all have been dumped */
++#define BANDWIDTH_QUERY_LENGTH 1205
++#define BANDWIDTH_ENTRY_LENGTH 12
++
++
++struct ipt_bandwidth_info
++{
++ char id[BANDWIDTH_MAX_ID_LENGTH];
++ unsigned char type;
++ unsigned char check_type;
++ uint32_t local_subnet;
++ uint32_t local_subnet_mask;
++
++ unsigned char cmp;
++ unsigned char reset_is_constant_interval;
++ time_t reset_interval; //specific fixed type (see above) or interval length in seconds
++ time_t reset_time; //seconds from start of month/week/day/hour/minute to do reset, or start point of interval if it is a constant interval
++ uint64_t bandwidth_cutoff;
++ uint64_t current_bandwidth;
++ time_t next_reset;
++ time_t previous_reset;
++ time_t last_backup_time;
++
++ uint32_t num_intervals_to_save;
++
++
++ unsigned long hashed_id;
++ void* iam;
++ uint64_t* combined_bw;
++ struct ipt_bandwidth_info* non_const_self;
++ unsigned long* ref_count;
++
++
++};
++#endif /*_IPT_BANDWIDTH_H*/
+--- /dev/null 2014-12-20 09:51:07.835224955 +0800
++++ iptables.new/include/linux/netfilter_ipv4/ipt_weburl.h 2014-12-20 16:55:22.301445944 +0800
+@@ -0,0 +1,45 @@
++/* weburl -- A netfilter module to match URLs in HTTP requests
++ * This module can match using string match or regular expressions
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2008 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++
++
++
++#ifndef _IPT_WEBURL_H
++#define _IPT_WEBURL_H
++
++
++#define MAX_TEST_STR 1024
++
++#define WEBURL_CONTAINS_TYPE 1
++#define WEBURL_REGEX_TYPE 2
++#define WEBURL_EXACT_TYPE 3
++#define WEBURL_ALL_PART 4
++#define WEBURL_DOMAIN_PART 5
++#define WEBURL_PATH_PART 6
++
++struct ipt_weburl_info
++{
++ char test_str[MAX_TEST_STR];
++ unsigned char match_type;
++ unsigned char match_part;
++ unsigned char invert;
++};
++#endif /*_IPT_WEBURL_H*/
+--- /dev/null 2014-12-20 09:51:07.835224955 +0800
++++ iptables.new/include/linux/netfilter_ipv4/ipt_webmon.h 2014-12-20 16:55:22.565445985 +0800
+@@ -0,0 +1,63 @@
++/* webmon -- A netfilter module to match URLs in HTTP requests
++ * This module can match using string match or regular expressions
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2008-2010 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++
++
++
++#ifndef _IPT_WEBMON_H
++#define _IPT_WEBMON_H
++
++
++#define WEBMON_MAX_IPS 256
++#define WEBMON_MAX_IP_RANGES 16
++
++#define WEBMON_EXCLUDE 1
++#define WEBMON_INCLUDE 2
++
++#define WEBMON_MAXDOMAIN 4
++#define WEBMON_MAXSEARCH 8
++
++#define WEBMON_DOMAIN 16
++#define WEBMON_SEARCH 32
++
++
++#define WEBMON_SET 3064
++
++struct ipt_webmon_ip_range
++{
++ uint32_t start;
++ uint32_t end;
++};
++
++struct ipt_webmon_info
++{
++ uint32_t max_domains;
++ uint32_t max_searches;
++ uint32_t exclude_ips[WEBMON_MAX_IPS];
++ struct ipt_webmon_ip_range exclude_ranges[WEBMON_MAX_IP_RANGES];
++ uint32_t num_exclude_ips;
++ uint32_t num_exclude_ranges;
++ unsigned char exclude_type;
++ uint32_t* ref_count;
++
++};
++
++#endif /*_IPT_WEBMON_H*/
+--- /dev/null 2014-12-20 09:51:07.835224955 +0800
++++ iptables.new/include/linux/netfilter_ipv4/ipt_timerange.h 2014-12-20 16:55:22.740446012 +0800
+@@ -0,0 +1,43 @@
++/* timerange -- An iptables extension to match multiple timeranges within a week
++ * Originally designed for use with Gargoyle router firmware (gargoyle-router.com)
++ *
++ *
++ * Copyright © 2009 by Eric Bishop <eric@gargoyle-router.com>
++ *
++ * This file is free software: you may copy, redistribute and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation, either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++
++
++
++#ifndef _IPT_TIMERANGE_H
++#define _IPT_TIMERANGE_H
++
++
++#define RANGE_LENGTH 51
++
++#define HOURS 1
++#define WEEKDAYS 2
++#define DAYS_HOURS (HOURS+WEEKDAYS)
++#define WEEKLY_RANGE 4
++
++
++struct ipt_timerange_info
++{
++ long ranges[RANGE_LENGTH];
++ char days[7];
++ char type;
++ unsigned char invert;
++};
++#endif /*_IPT_TIMERANGE_H*/
Index: include/netfilter.mk
===================================================================
--- include/netfilter.mk (revision 46316)
+++ include/netfilter.mk (working copy)
@@ -102,7 +102,11 @@
$(eval $(call nf_add,IPT_FILTER,CONFIG_NETFILTER_XT_MATCH_STRING, $(P_XT)xt_string))
+# imq
+$(eval $(call nf_add,IPT_IMQ,CONFIG_IP_NF_TARGET_IMQ, $(P_V4)ipt_IMQ))
+$(eval $(call nf_add,IPT_IMQ,CONFIG_NETFILTER_XT_TARGET_IMQ, $(P_XT)xt_IMQ))
+
# ipopt
$(eval $(call nf_add,IPT_IPOPT,CONFIG_NETFILTER_XT_MATCH_DSCP, $(P_XT)xt_dscp))
@@ -155,7 +159,7 @@
$(eval $(if $(NF_KMOD),,$(call nf_add,IPT_IPV6,CONFIG_IP6_NF_IPTABLES, ip6t_icmp6)))
-
+$(eval $(call nf_add,IPT_IPV6,CONFIG_IP6_NF_TARGET_IMQ, $(P_V6)ip6t_IMQ))
$(eval $(call nf_add,IPT_IPV6,CONFIG_IP6_NF_TARGET_LOG, $(P_V6)ip6t_LOG))
$(eval $(call nf_add,IPT_IPV6,CONFIG_IP6_NF_TARGET_REJECT, $(P_V6)ip6t_REJECT))
$(eval $(call nf_add,IPT_IPV6,CONFIG_IP6_NF_TARGET_REJECT, $(P_V6)nf_reject_ipv6))
@@ -350,6 +354,7 @@
IPT_BUILTIN += $(IPT_CONNTRACK_EXTRA-y)
IPT_BUILTIN += $(IPT_EXTRA-y)
IPT_BUILTIN += $(IPT_FILTER-y)
+IPT_BUILTIN += $(IPT_IMQ-y)
IPT_BUILTIN += $(IPT_IPOPT-y)
IPT_BUILTIN += $(IPT_IPRANGE-y)
IPT_BUILTIN += $(IPT_CLUSTER-y)
@@ -375,3 +380,23 @@
IPT_BUILTIN += $(EBTABLES_WATCHERS-y)
endif # __inc_netfilter
+
+
+IPT_WEBURL-m :=
+IPT_WEBURL-$(CONFIG_IP_NF_MATCH_WEBURL) += $(P_V4)ipt_weburl
+IPT_BUILTIN += $(IPT_WEBURL-y)
+
+
+IPT_WEBMON-m :=
+IPT_WEBMON-$(CONFIG_IP_NF_MATCH_WEBMON) += $(P_V4)ipt_webmon
+IPT_BUILTIN += $(IPT_WEBMON-y)
+
+
+IPT_TIMERANGE-m :=
+IPT_TIMERANGE-$(CONFIG_IP_NF_MATCH_TIMERANGE) += $(P_V4)ipt_timerange
+IPT_BUILTIN += $(IPT_TIMERANGE-y)
+
+
+IPT_BANDWIDTH-m :=
+IPT_BANDWIDTH-$(CONFIG_IP_NF_MATCH_BANDWIDTH) += $(P_V4)ipt_bandwidth
+IPT_BUILTIN += $(IPT_BANDWIDTH-y)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment