Root/
1 | /* |
2 | * Copyright (C)2002 USAGI/WIDE Project |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation; either version 2 of the License, or |
7 | * (at your option) any later version. |
8 | * |
9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | * |
18 | * Authors |
19 | * |
20 | * Mitsuru KANDA @USAGI : IPv6 Support |
21 | * Kazunori MIYAZAWA @USAGI : |
22 | * Kunihiro Ishiguro <kunihiro@ipinfusion.com> |
23 | * |
24 | * This file is derived from net/ipv4/esp.c |
25 | */ |
26 | |
27 | #include <crypto/aead.h> |
28 | #include <crypto/authenc.h> |
29 | #include <linux/err.h> |
30 | #include <linux/module.h> |
31 | #include <net/ip.h> |
32 | #include <net/xfrm.h> |
33 | #include <net/esp.h> |
34 | #include <linux/scatterlist.h> |
35 | #include <linux/kernel.h> |
36 | #include <linux/pfkeyv2.h> |
37 | #include <linux/random.h> |
38 | #include <linux/slab.h> |
39 | #include <linux/spinlock.h> |
40 | #include <net/icmp.h> |
41 | #include <net/ipv6.h> |
42 | #include <net/protocol.h> |
43 | #include <linux/icmpv6.h> |
44 | |
45 | struct esp_skb_cb { |
46 | struct xfrm_skb_cb xfrm; |
47 | void *tmp; |
48 | }; |
49 | |
50 | #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) |
51 | |
52 | /* |
53 | * Allocate an AEAD request structure with extra space for SG and IV. |
54 | * |
55 | * For alignment considerations the IV is placed at the front, followed |
56 | * by the request and finally the SG list. |
57 | * |
58 | * TODO: Use spare space in skb for this where possible. |
59 | */ |
60 | static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags) |
61 | { |
62 | unsigned int len; |
63 | |
64 | len = crypto_aead_ivsize(aead); |
65 | if (len) { |
66 | len += crypto_aead_alignmask(aead) & |
67 | ~(crypto_tfm_ctx_alignment() - 1); |
68 | len = ALIGN(len, crypto_tfm_ctx_alignment()); |
69 | } |
70 | |
71 | len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead); |
72 | len = ALIGN(len, __alignof__(struct scatterlist)); |
73 | |
74 | len += sizeof(struct scatterlist) * nfrags; |
75 | |
76 | return kmalloc(len, GFP_ATOMIC); |
77 | } |
78 | |
79 | static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp) |
80 | { |
81 | return crypto_aead_ivsize(aead) ? |
82 | PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp; |
83 | } |
84 | |
85 | static inline struct aead_givcrypt_request *esp_tmp_givreq( |
86 | struct crypto_aead *aead, u8 *iv) |
87 | { |
88 | struct aead_givcrypt_request *req; |
89 | |
90 | req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), |
91 | crypto_tfm_ctx_alignment()); |
92 | aead_givcrypt_set_tfm(req, aead); |
93 | return req; |
94 | } |
95 | |
96 | static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) |
97 | { |
98 | struct aead_request *req; |
99 | |
100 | req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), |
101 | crypto_tfm_ctx_alignment()); |
102 | aead_request_set_tfm(req, aead); |
103 | return req; |
104 | } |
105 | |
106 | static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, |
107 | struct aead_request *req) |
108 | { |
109 | return (void *)ALIGN((unsigned long)(req + 1) + |
110 | crypto_aead_reqsize(aead), |
111 | __alignof__(struct scatterlist)); |
112 | } |
113 | |
114 | static inline struct scatterlist *esp_givreq_sg( |
115 | struct crypto_aead *aead, struct aead_givcrypt_request *req) |
116 | { |
117 | return (void *)ALIGN((unsigned long)(req + 1) + |
118 | crypto_aead_reqsize(aead), |
119 | __alignof__(struct scatterlist)); |
120 | } |
121 | |
122 | static void esp_output_done(struct crypto_async_request *base, int err) |
123 | { |
124 | struct sk_buff *skb = base->data; |
125 | |
126 | kfree(ESP_SKB_CB(skb)->tmp); |
127 | xfrm_output_resume(skb, err); |
128 | } |
129 | |
130 | static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) |
131 | { |
132 | int err; |
133 | struct ip_esp_hdr *esph; |
134 | struct crypto_aead *aead; |
135 | struct aead_givcrypt_request *req; |
136 | struct scatterlist *sg; |
137 | struct scatterlist *asg; |
138 | struct sk_buff *trailer; |
139 | void *tmp; |
140 | int blksize; |
141 | int clen; |
142 | int alen; |
143 | int nfrags; |
144 | u8 *iv; |
145 | u8 *tail; |
146 | struct esp_data *esp = x->data; |
147 | |
148 | /* skb is pure payload to encrypt */ |
149 | err = -ENOMEM; |
150 | |
151 | /* Round to block size */ |
152 | clen = skb->len; |
153 | |
154 | aead = esp->aead; |
155 | alen = crypto_aead_authsize(aead); |
156 | |
157 | blksize = ALIGN(crypto_aead_blocksize(aead), 4); |
158 | clen = ALIGN(clen + 2, blksize); |
159 | if (esp->padlen) |
160 | clen = ALIGN(clen, esp->padlen); |
161 | |
162 | if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0) |
163 | goto error; |
164 | nfrags = err; |
165 | |
166 | tmp = esp_alloc_tmp(aead, nfrags + 1); |
167 | if (!tmp) |
168 | goto error; |
169 | |
170 | iv = esp_tmp_iv(aead, tmp); |
171 | req = esp_tmp_givreq(aead, iv); |
172 | asg = esp_givreq_sg(aead, req); |
173 | sg = asg + 1; |
174 | |
175 | /* Fill padding... */ |
176 | tail = skb_tail_pointer(trailer); |
177 | do { |
178 | int i; |
179 | for (i=0; i<clen-skb->len - 2; i++) |
180 | tail[i] = i + 1; |
181 | } while (0); |
182 | tail[clen-skb->len - 2] = (clen - skb->len) - 2; |
183 | tail[clen - skb->len - 1] = *skb_mac_header(skb); |
184 | pskb_put(skb, trailer, clen - skb->len + alen); |
185 | |
186 | skb_push(skb, -skb_network_offset(skb)); |
187 | esph = ip_esp_hdr(skb); |
188 | *skb_mac_header(skb) = IPPROTO_ESP; |
189 | |
190 | esph->spi = x->id.spi; |
191 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); |
192 | |
193 | sg_init_table(sg, nfrags); |
194 | skb_to_sgvec(skb, sg, |
195 | esph->enc_data + crypto_aead_ivsize(aead) - skb->data, |
196 | clen + alen); |
197 | sg_init_one(asg, esph, sizeof(*esph)); |
198 | |
199 | aead_givcrypt_set_callback(req, 0, esp_output_done, skb); |
200 | aead_givcrypt_set_crypt(req, sg, sg, clen, iv); |
201 | aead_givcrypt_set_assoc(req, asg, sizeof(*esph)); |
202 | aead_givcrypt_set_giv(req, esph->enc_data, |
203 | XFRM_SKB_CB(skb)->seq.output); |
204 | |
205 | ESP_SKB_CB(skb)->tmp = tmp; |
206 | err = crypto_aead_givencrypt(req); |
207 | if (err == -EINPROGRESS) |
208 | goto error; |
209 | |
210 | if (err == -EBUSY) |
211 | err = NET_XMIT_DROP; |
212 | |
213 | kfree(tmp); |
214 | |
215 | error: |
216 | return err; |
217 | } |
218 | |
219 | static int esp_input_done2(struct sk_buff *skb, int err) |
220 | { |
221 | struct xfrm_state *x = xfrm_input_state(skb); |
222 | struct esp_data *esp = x->data; |
223 | struct crypto_aead *aead = esp->aead; |
224 | int alen = crypto_aead_authsize(aead); |
225 | int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); |
226 | int elen = skb->len - hlen; |
227 | int hdr_len = skb_network_header_len(skb); |
228 | int padlen; |
229 | u8 nexthdr[2]; |
230 | |
231 | kfree(ESP_SKB_CB(skb)->tmp); |
232 | |
233 | if (unlikely(err)) |
234 | goto out; |
235 | |
236 | if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2)) |
237 | BUG(); |
238 | |
239 | err = -EINVAL; |
240 | padlen = nexthdr[0]; |
241 | if (padlen + 2 + alen >= elen) { |
242 | LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage " |
243 | "padlen=%d, elen=%d\n", padlen + 2, elen - alen); |
244 | goto out; |
245 | } |
246 | |
247 | /* ... check padding bits here. Silly. :-) */ |
248 | |
249 | pskb_trim(skb, skb->len - alen - padlen - 2); |
250 | __skb_pull(skb, hlen); |
251 | skb_set_transport_header(skb, -hdr_len); |
252 | |
253 | err = nexthdr[1]; |
254 | |
255 | /* RFC4303: Drop dummy packets without any error */ |
256 | if (err == IPPROTO_NONE) |
257 | err = -EINVAL; |
258 | |
259 | out: |
260 | return err; |
261 | } |
262 | |
263 | static void esp_input_done(struct crypto_async_request *base, int err) |
264 | { |
265 | struct sk_buff *skb = base->data; |
266 | |
267 | xfrm_input_resume(skb, esp_input_done2(skb, err)); |
268 | } |
269 | |
270 | static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) |
271 | { |
272 | struct ip_esp_hdr *esph; |
273 | struct esp_data *esp = x->data; |
274 | struct crypto_aead *aead = esp->aead; |
275 | struct aead_request *req; |
276 | struct sk_buff *trailer; |
277 | int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); |
278 | int nfrags; |
279 | int ret = 0; |
280 | void *tmp; |
281 | u8 *iv; |
282 | struct scatterlist *sg; |
283 | struct scatterlist *asg; |
284 | |
285 | if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) { |
286 | ret = -EINVAL; |
287 | goto out; |
288 | } |
289 | |
290 | if (elen <= 0) { |
291 | ret = -EINVAL; |
292 | goto out; |
293 | } |
294 | |
295 | if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) { |
296 | ret = -EINVAL; |
297 | goto out; |
298 | } |
299 | |
300 | ret = -ENOMEM; |
301 | tmp = esp_alloc_tmp(aead, nfrags + 1); |
302 | if (!tmp) |
303 | goto out; |
304 | |
305 | ESP_SKB_CB(skb)->tmp = tmp; |
306 | iv = esp_tmp_iv(aead, tmp); |
307 | req = esp_tmp_req(aead, iv); |
308 | asg = esp_req_sg(aead, req); |
309 | sg = asg + 1; |
310 | |
311 | skb->ip_summed = CHECKSUM_NONE; |
312 | |
313 | esph = (struct ip_esp_hdr *)skb->data; |
314 | |
315 | /* Get ivec. This can be wrong, check against another impls. */ |
316 | iv = esph->enc_data; |
317 | |
318 | sg_init_table(sg, nfrags); |
319 | skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); |
320 | sg_init_one(asg, esph, sizeof(*esph)); |
321 | |
322 | aead_request_set_callback(req, 0, esp_input_done, skb); |
323 | aead_request_set_crypt(req, sg, sg, elen, iv); |
324 | aead_request_set_assoc(req, asg, sizeof(*esph)); |
325 | |
326 | ret = crypto_aead_decrypt(req); |
327 | if (ret == -EINPROGRESS) |
328 | goto out; |
329 | |
330 | ret = esp_input_done2(skb, ret); |
331 | |
332 | out: |
333 | return ret; |
334 | } |
335 | |
336 | static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) |
337 | { |
338 | struct esp_data *esp = x->data; |
339 | u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); |
340 | u32 align = max_t(u32, blksize, esp->padlen); |
341 | u32 rem; |
342 | |
343 | mtu -= x->props.header_len + crypto_aead_authsize(esp->aead); |
344 | rem = mtu & (align - 1); |
345 | mtu &= ~(align - 1); |
346 | |
347 | if (x->props.mode != XFRM_MODE_TUNNEL) { |
348 | u32 padsize = ((blksize - 1) & 7) + 1; |
349 | mtu -= blksize - padsize; |
350 | mtu += min_t(u32, blksize - padsize, rem); |
351 | } |
352 | |
353 | return mtu - 2; |
354 | } |
355 | |
356 | static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
357 | u8 type, u8 code, int offset, __be32 info) |
358 | { |
359 | struct net *net = dev_net(skb->dev); |
360 | struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; |
361 | struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); |
362 | struct xfrm_state *x; |
363 | |
364 | if (type != ICMPV6_DEST_UNREACH && |
365 | type != ICMPV6_PKT_TOOBIG) |
366 | return; |
367 | |
368 | x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6); |
369 | if (!x) |
370 | return; |
371 | printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n", |
372 | ntohl(esph->spi), &iph->daddr); |
373 | xfrm_state_put(x); |
374 | } |
375 | |
376 | static void esp6_destroy(struct xfrm_state *x) |
377 | { |
378 | struct esp_data *esp = x->data; |
379 | |
380 | if (!esp) |
381 | return; |
382 | |
383 | crypto_free_aead(esp->aead); |
384 | kfree(esp); |
385 | } |
386 | |
387 | static int esp_init_aead(struct xfrm_state *x) |
388 | { |
389 | struct esp_data *esp = x->data; |
390 | struct crypto_aead *aead; |
391 | int err; |
392 | |
393 | aead = crypto_alloc_aead(x->aead->alg_name, 0, 0); |
394 | err = PTR_ERR(aead); |
395 | if (IS_ERR(aead)) |
396 | goto error; |
397 | |
398 | esp->aead = aead; |
399 | |
400 | err = crypto_aead_setkey(aead, x->aead->alg_key, |
401 | (x->aead->alg_key_len + 7) / 8); |
402 | if (err) |
403 | goto error; |
404 | |
405 | err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); |
406 | if (err) |
407 | goto error; |
408 | |
409 | error: |
410 | return err; |
411 | } |
412 | |
413 | static int esp_init_authenc(struct xfrm_state *x) |
414 | { |
415 | struct esp_data *esp = x->data; |
416 | struct crypto_aead *aead; |
417 | struct crypto_authenc_key_param *param; |
418 | struct rtattr *rta; |
419 | char *key; |
420 | char *p; |
421 | char authenc_name[CRYPTO_MAX_ALG_NAME]; |
422 | unsigned int keylen; |
423 | int err; |
424 | |
425 | err = -EINVAL; |
426 | if (x->ealg == NULL) |
427 | goto error; |
428 | |
429 | err = -ENAMETOOLONG; |
430 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)", |
431 | x->aalg ? x->aalg->alg_name : "digest_null", |
432 | x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME) |
433 | goto error; |
434 | |
435 | aead = crypto_alloc_aead(authenc_name, 0, 0); |
436 | err = PTR_ERR(aead); |
437 | if (IS_ERR(aead)) |
438 | goto error; |
439 | |
440 | esp->aead = aead; |
441 | |
442 | keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + |
443 | (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); |
444 | err = -ENOMEM; |
445 | key = kmalloc(keylen, GFP_KERNEL); |
446 | if (!key) |
447 | goto error; |
448 | |
449 | p = key; |
450 | rta = (void *)p; |
451 | rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; |
452 | rta->rta_len = RTA_LENGTH(sizeof(*param)); |
453 | param = RTA_DATA(rta); |
454 | p += RTA_SPACE(sizeof(*param)); |
455 | |
456 | if (x->aalg) { |
457 | struct xfrm_algo_desc *aalg_desc; |
458 | |
459 | memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); |
460 | p += (x->aalg->alg_key_len + 7) / 8; |
461 | |
462 | aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); |
463 | BUG_ON(!aalg_desc); |
464 | |
465 | err = -EINVAL; |
466 | if (aalg_desc->uinfo.auth.icv_fullbits/8 != |
467 | crypto_aead_authsize(aead)) { |
468 | NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", |
469 | x->aalg->alg_name, |
470 | crypto_aead_authsize(aead), |
471 | aalg_desc->uinfo.auth.icv_fullbits/8); |
472 | goto free_key; |
473 | } |
474 | |
475 | err = crypto_aead_setauthsize( |
476 | aead, x->aalg->alg_trunc_len / 8); |
477 | if (err) |
478 | goto free_key; |
479 | } |
480 | |
481 | param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); |
482 | memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); |
483 | |
484 | err = crypto_aead_setkey(aead, key, keylen); |
485 | |
486 | free_key: |
487 | kfree(key); |
488 | |
489 | error: |
490 | return err; |
491 | } |
492 | |
493 | static int esp6_init_state(struct xfrm_state *x) |
494 | { |
495 | struct esp_data *esp; |
496 | struct crypto_aead *aead; |
497 | u32 align; |
498 | int err; |
499 | |
500 | if (x->encap) |
501 | return -EINVAL; |
502 | |
503 | esp = kzalloc(sizeof(*esp), GFP_KERNEL); |
504 | if (esp == NULL) |
505 | return -ENOMEM; |
506 | |
507 | x->data = esp; |
508 | |
509 | if (x->aead) |
510 | err = esp_init_aead(x); |
511 | else |
512 | err = esp_init_authenc(x); |
513 | |
514 | if (err) |
515 | goto error; |
516 | |
517 | aead = esp->aead; |
518 | |
519 | esp->padlen = 0; |
520 | |
521 | x->props.header_len = sizeof(struct ip_esp_hdr) + |
522 | crypto_aead_ivsize(aead); |
523 | switch (x->props.mode) { |
524 | case XFRM_MODE_BEET: |
525 | if (x->sel.family != AF_INET6) |
526 | x->props.header_len += IPV4_BEET_PHMAXLEN + |
527 | (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); |
528 | break; |
529 | case XFRM_MODE_TRANSPORT: |
530 | break; |
531 | case XFRM_MODE_TUNNEL: |
532 | x->props.header_len += sizeof(struct ipv6hdr); |
533 | break; |
534 | default: |
535 | goto error; |
536 | } |
537 | |
538 | align = ALIGN(crypto_aead_blocksize(aead), 4); |
539 | if (esp->padlen) |
540 | align = max_t(u32, align, esp->padlen); |
541 | x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead); |
542 | |
543 | error: |
544 | return err; |
545 | } |
546 | |
547 | static const struct xfrm_type esp6_type = |
548 | { |
549 | .description = "ESP6", |
550 | .owner = THIS_MODULE, |
551 | .proto = IPPROTO_ESP, |
552 | .flags = XFRM_TYPE_REPLAY_PROT, |
553 | .init_state = esp6_init_state, |
554 | .destructor = esp6_destroy, |
555 | .get_mtu = esp6_get_mtu, |
556 | .input = esp6_input, |
557 | .output = esp6_output, |
558 | .hdr_offset = xfrm6_find_1stfragopt, |
559 | }; |
560 | |
561 | static const struct inet6_protocol esp6_protocol = { |
562 | .handler = xfrm6_rcv, |
563 | .err_handler = esp6_err, |
564 | .flags = INET6_PROTO_NOPOLICY, |
565 | }; |
566 | |
567 | static int __init esp6_init(void) |
568 | { |
569 | if (xfrm_register_type(&esp6_type, AF_INET6) < 0) { |
570 | printk(KERN_INFO "ipv6 esp init: can't add xfrm type\n"); |
571 | return -EAGAIN; |
572 | } |
573 | if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) { |
574 | printk(KERN_INFO "ipv6 esp init: can't add protocol\n"); |
575 | xfrm_unregister_type(&esp6_type, AF_INET6); |
576 | return -EAGAIN; |
577 | } |
578 | |
579 | return 0; |
580 | } |
581 | |
582 | static void __exit esp6_fini(void) |
583 | { |
584 | if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0) |
585 | printk(KERN_INFO "ipv6 esp close: can't remove protocol\n"); |
586 | if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0) |
587 | printk(KERN_INFO "ipv6 esp close: can't remove xfrm type\n"); |
588 | } |
589 | |
590 | module_init(esp6_init); |
591 | module_exit(esp6_fini); |
592 | |
593 | MODULE_LICENSE("GPL"); |
594 | MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP); |
595 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9