Root/
1 | /* |
2 | * iovec manipulation routines. |
3 | * |
4 | * |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License |
7 | * as published by the Free Software Foundation; either version |
8 | * 2 of the License, or (at your option) any later version. |
9 | * |
10 | * Fixes: |
11 | * Andrew Lunn : Errors in iovec copying. |
12 | * Pedro Roque : Added memcpy_fromiovecend and |
13 | * csum_..._fromiovecend. |
14 | * Andi Kleen : fixed error handling for 2.1 |
15 | * Alexey Kuznetsov: 2.1 optimisations |
16 | * Andi Kleen : Fix csum*fromiovecend for IPv6. |
17 | */ |
18 | |
19 | #include <linux/errno.h> |
20 | #include <linux/module.h> |
21 | #include <linux/kernel.h> |
22 | #include <linux/mm.h> |
23 | #include <linux/net.h> |
24 | #include <linux/in6.h> |
25 | #include <asm/uaccess.h> |
26 | #include <asm/byteorder.h> |
27 | #include <net/checksum.h> |
28 | #include <net/sock.h> |
29 | |
30 | /* |
31 | * Verify iovec. The caller must ensure that the iovec is big enough |
32 | * to hold the message iovec. |
33 | * |
34 | * Save time not doing access_ok. copy_*_user will make this work |
35 | * in any case. |
36 | */ |
37 | |
38 | int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode) |
39 | { |
40 | int size, ct, err; |
41 | |
42 | if (m->msg_namelen) { |
43 | if (mode == VERIFY_READ) { |
44 | void __user *namep; |
45 | namep = (void __user __force *) m->msg_name; |
46 | err = move_addr_to_kernel(namep, m->msg_namelen, |
47 | address); |
48 | if (err < 0) |
49 | return err; |
50 | } |
51 | if (m->msg_name) |
52 | m->msg_name = address; |
53 | } else { |
54 | m->msg_name = NULL; |
55 | } |
56 | |
57 | size = m->msg_iovlen * sizeof(struct iovec); |
58 | if (copy_from_user(iov, (void __user __force *) m->msg_iov, size)) |
59 | return -EFAULT; |
60 | |
61 | m->msg_iov = iov; |
62 | err = 0; |
63 | |
64 | for (ct = 0; ct < m->msg_iovlen; ct++) { |
65 | size_t len = iov[ct].iov_len; |
66 | |
67 | if (len > INT_MAX - err) { |
68 | len = INT_MAX - err; |
69 | iov[ct].iov_len = len; |
70 | } |
71 | err += len; |
72 | } |
73 | |
74 | return err; |
75 | } |
76 | |
77 | /* |
78 | * Copy kernel to iovec. Returns -EFAULT on error. |
79 | */ |
80 | |
81 | int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, |
82 | int offset, int len) |
83 | { |
84 | int copy; |
85 | for (; len > 0; ++iov) { |
86 | /* Skip over the finished iovecs */ |
87 | if (unlikely(offset >= iov->iov_len)) { |
88 | offset -= iov->iov_len; |
89 | continue; |
90 | } |
91 | copy = min_t(unsigned int, iov->iov_len - offset, len); |
92 | if (copy_to_user(iov->iov_base + offset, kdata, copy)) |
93 | return -EFAULT; |
94 | offset = 0; |
95 | kdata += copy; |
96 | len -= copy; |
97 | } |
98 | |
99 | return 0; |
100 | } |
101 | EXPORT_SYMBOL(memcpy_toiovecend); |
102 | |
103 | /* |
104 | * Copy iovec to kernel. Returns -EFAULT on error. |
105 | */ |
106 | |
107 | int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, |
108 | int offset, int len) |
109 | { |
110 | /* Skip over the finished iovecs */ |
111 | while (offset >= iov->iov_len) { |
112 | offset -= iov->iov_len; |
113 | iov++; |
114 | } |
115 | |
116 | while (len > 0) { |
117 | u8 __user *base = iov->iov_base + offset; |
118 | int copy = min_t(unsigned int, len, iov->iov_len - offset); |
119 | |
120 | offset = 0; |
121 | if (copy_from_user(kdata, base, copy)) |
122 | return -EFAULT; |
123 | len -= copy; |
124 | kdata += copy; |
125 | iov++; |
126 | } |
127 | |
128 | return 0; |
129 | } |
130 | EXPORT_SYMBOL(memcpy_fromiovecend); |
131 | |
132 | /* |
133 | * And now for the all-in-one: copy and checksum from a user iovec |
134 | * directly to a datagram |
135 | * Calls to csum_partial but the last must be in 32 bit chunks |
136 | * |
137 | * ip_build_xmit must ensure that when fragmenting only the last |
138 | * call to this function will be unaligned also. |
139 | */ |
140 | int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov, |
141 | int offset, unsigned int len, __wsum *csump) |
142 | { |
143 | __wsum csum = *csump; |
144 | int partial_cnt = 0, err = 0; |
145 | |
146 | /* Skip over the finished iovecs */ |
147 | while (offset >= iov->iov_len) { |
148 | offset -= iov->iov_len; |
149 | iov++; |
150 | } |
151 | |
152 | while (len > 0) { |
153 | u8 __user *base = iov->iov_base + offset; |
154 | int copy = min_t(unsigned int, len, iov->iov_len - offset); |
155 | |
156 | offset = 0; |
157 | |
158 | /* There is a remnant from previous iov. */ |
159 | if (partial_cnt) { |
160 | int par_len = 4 - partial_cnt; |
161 | |
162 | /* iov component is too short ... */ |
163 | if (par_len > copy) { |
164 | if (copy_from_user(kdata, base, copy)) |
165 | goto out_fault; |
166 | kdata += copy; |
167 | base += copy; |
168 | partial_cnt += copy; |
169 | len -= copy; |
170 | iov++; |
171 | if (len) |
172 | continue; |
173 | *csump = csum_partial(kdata - partial_cnt, |
174 | partial_cnt, csum); |
175 | goto out; |
176 | } |
177 | if (copy_from_user(kdata, base, par_len)) |
178 | goto out_fault; |
179 | csum = csum_partial(kdata - partial_cnt, 4, csum); |
180 | kdata += par_len; |
181 | base += par_len; |
182 | copy -= par_len; |
183 | len -= par_len; |
184 | partial_cnt = 0; |
185 | } |
186 | |
187 | if (len > copy) { |
188 | partial_cnt = copy % 4; |
189 | if (partial_cnt) { |
190 | copy -= partial_cnt; |
191 | if (copy_from_user(kdata + copy, base + copy, |
192 | partial_cnt)) |
193 | goto out_fault; |
194 | } |
195 | } |
196 | |
197 | if (copy) { |
198 | csum = csum_and_copy_from_user(base, kdata, copy, |
199 | csum, &err); |
200 | if (err) |
201 | goto out; |
202 | } |
203 | len -= copy + partial_cnt; |
204 | kdata += copy + partial_cnt; |
205 | iov++; |
206 | } |
207 | *csump = csum; |
208 | out: |
209 | return err; |
210 | |
211 | out_fault: |
212 | err = -EFAULT; |
213 | goto out; |
214 | } |
215 | EXPORT_SYMBOL(csum_partial_copy_fromiovecend); |
216 | |
217 | unsigned long iov_pages(const struct iovec *iov, int offset, |
218 | unsigned long nr_segs) |
219 | { |
220 | unsigned long seg, base; |
221 | int pages = 0, len, size; |
222 | |
223 | while (nr_segs && (offset >= iov->iov_len)) { |
224 | offset -= iov->iov_len; |
225 | ++iov; |
226 | --nr_segs; |
227 | } |
228 | |
229 | for (seg = 0; seg < nr_segs; seg++) { |
230 | base = (unsigned long)iov[seg].iov_base + offset; |
231 | len = iov[seg].iov_len - offset; |
232 | size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; |
233 | pages += size; |
234 | offset = 0; |
235 | } |
236 | |
237 | return pages; |
238 | } |
239 | EXPORT_SYMBOL(iov_pages); |
240 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9