| 1 | --- a/include/linux/netfilter_ipv4/ip_tables.h |
| 2 | +++ b/include/linux/netfilter_ipv4/ip_tables.h |
| 3 | @@ -62,6 +62,7 @@ struct ipt_ip { |
| 4 | #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */ |
| 5 | #define IPT_F_GOTO 0x02 /* Set if jump is a goto */ |
| 6 | #define IPT_F_MASK 0x03 /* All possible flag bits mask. */ |
| 7 | +#define IPT_F_NO_DEF_MATCH 0x80 /* Internal: no default match rules present */ |
| 8 | |
| 9 | /* Values for "inv" field in struct ipt_ip. */ |
| 10 | #define IPT_INV_VIA_IN 0x01 /* Invert the sense of IN IFACE. */ |
| 11 | --- a/net/ipv4/netfilter/ip_tables.c |
| 12 | +++ b/net/ipv4/netfilter/ip_tables.c |
| 13 | @@ -88,6 +88,9 @@ ip_packet_match(const struct iphdr *ip, |
| 14 | |
| 15 | #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) |
| 16 | |
| 17 | + if (ipinfo->flags & IPT_F_NO_DEF_MATCH) |
| 18 | + return true; |
| 19 | + |
| 20 | if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, |
| 21 | IPT_INV_SRCIP) |
| 22 | || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, |
| 23 | @@ -138,13 +141,35 @@ ip_packet_match(const struct iphdr *ip, |
| 24 | return false; |
| 25 | } |
| 26 | |
| 27 | +#undef FWINV |
| 28 | return true; |
| 29 | } |
| 30 | |
| 31 | static bool |
| 32 | -ip_checkentry(const struct ipt_ip *ip) |
| 33 | +ip_checkentry(struct ipt_ip *ip) |
| 34 | { |
| 35 | - if (ip->flags & ~IPT_F_MASK) { |
| 36 | +#define FWINV(bool, invflg) ((bool) || (ip->invflags & (invflg))) |
| 37 | + |
| 38 | + if (FWINV(ip->smsk.s_addr, IPT_INV_SRCIP) || |
| 39 | + FWINV(ip->dmsk.s_addr, IPT_INV_DSTIP)) |
| 40 | + goto has_match_rules; |
| 41 | + |
| 42 | + if (FWINV(!!((const unsigned long *)ip->iniface_mask)[0], |
| 43 | + IPT_INV_VIA_IN) || |
| 44 | + FWINV(!!((const unsigned long *)ip->outiface_mask)[0], |
| 45 | + IPT_INV_VIA_OUT)) |
| 46 | + goto has_match_rules; |
| 47 | + |
| 48 | + if (FWINV(ip->proto, IPT_INV_PROTO)) |
| 49 | + goto has_match_rules; |
| 50 | + |
| 51 | + if (FWINV(ip->flags&IPT_F_FRAG, IPT_INV_FRAG)) |
| 52 | + goto has_match_rules; |
| 53 | + |
| 54 | + ip->flags |= IPT_F_NO_DEF_MATCH; |
| 55 | + |
| 56 | +has_match_rules: |
| 57 | + if (ip->flags & ~(IPT_F_MASK|IPT_F_NO_DEF_MATCH)) { |
| 58 | duprintf("Unknown flag bits set: %08X\n", |
| 59 | ip->flags & ~IPT_F_MASK); |
| 60 | return false; |
| 61 | @@ -154,6 +179,8 @@ ip_checkentry(const struct ipt_ip *ip) |
| 62 | ip->invflags & ~IPT_INV_MASK); |
| 63 | return false; |
| 64 | } |
| 65 | + |
| 66 | +#undef FWINV |
| 67 | return true; |
| 68 | } |
| 69 | |
| 70 | @@ -196,7 +223,6 @@ static inline bool unconditional(const s |
| 71 | static const struct ipt_ip uncond; |
| 72 | |
| 73 | return memcmp(ip, &uncond, sizeof(uncond)) == 0; |
| 74 | -#undef FWINV |
| 75 | } |
| 76 | |
| 77 | #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ |
| 78 | @@ -321,8 +347,28 @@ ipt_do_table(struct sk_buff *skb, |
| 79 | struct xt_match_param mtpar; |
| 80 | struct xt_target_param tgpar; |
| 81 | |
| 82 | - /* Initialization */ |
| 83 | ip = ip_hdr(skb); |
| 84 | + |
| 85 | + IP_NF_ASSERT(table->valid_hooks & (1 << hook)); |
| 86 | + xt_info_rdlock_bh(); |
| 87 | + private = table->private; |
| 88 | + table_base = private->entries[smp_processor_id()]; |
| 89 | + e = get_entry(table_base, private->hook_entry[hook]); |
| 90 | + |
| 91 | + if (e->target_offset <= sizeof(struct ipt_entry) && |
| 92 | + (e->ip.flags & IPT_F_NO_DEF_MATCH)) { |
| 93 | + struct ipt_entry_target *t = ipt_get_target(e); |
| 94 | + if (!t->u.kernel.target->target) { |
| 95 | + int v = ((struct ipt_standard_target *)t)->verdict; |
| 96 | + if ((v < 0) && (v != IPT_RETURN)) { |
| 97 | + ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1); |
| 98 | + xt_info_rdunlock_bh(); |
| 99 | + return (unsigned)(-v) - 1; |
| 100 | + } |
| 101 | + } |
| 102 | + } |
| 103 | + |
| 104 | + /* Initialization */ |
| 105 | indev = in ? in->name : nulldevname; |
| 106 | outdev = out ? out->name : nulldevname; |
| 107 | /* We handle fragments by dealing with the first fragment as |
| 108 | @@ -339,13 +385,6 @@ ipt_do_table(struct sk_buff *skb, |
| 109 | mtpar.family = tgpar.family = NFPROTO_IPV4; |
| 110 | mtpar.hooknum = tgpar.hooknum = hook; |
| 111 | |
| 112 | - IP_NF_ASSERT(table->valid_hooks & (1 << hook)); |
| 113 | - xt_info_rdlock_bh(); |
| 114 | - private = table->private; |
| 115 | - table_base = private->entries[smp_processor_id()]; |
| 116 | - |
| 117 | - e = get_entry(table_base, private->hook_entry[hook]); |
| 118 | - |
| 119 | /* For return from builtin chain */ |
| 120 | back = get_entry(table_base, private->underflow[hook]); |
| 121 | |
| 122 | @@ -992,6 +1031,7 @@ copy_entries_to_user(unsigned int total_ |
| 123 | unsigned int i; |
| 124 | const struct ipt_entry_match *m; |
| 125 | const struct ipt_entry_target *t; |
| 126 | + u8 flags; |
| 127 | |
| 128 | e = (struct ipt_entry *)(loc_cpu_entry + off); |
| 129 | if (copy_to_user(userptr + off |
| 130 | @@ -1001,6 +1041,14 @@ copy_entries_to_user(unsigned int total_ |
| 131 | ret = -EFAULT; |
| 132 | goto free_counters; |
| 133 | } |
| 134 | + |
| 135 | + flags = e->ip.flags & ~IPT_F_NO_DEF_MATCH; |
| 136 | + if (copy_to_user(userptr + off |
| 137 | + + offsetof(struct ipt_entry, ip.flags), |
| 138 | + &flags, sizeof(flags)) != 0) { |
| 139 | + ret = -EFAULT; |
| 140 | + goto free_counters; |
| 141 | + } |
| 142 | |
| 143 | for (i = sizeof(struct ipt_entry); |
| 144 | i < e->target_offset; |
| 145 | |