Root/
1 | /* |
2 | * iovec manipulation routines. |
3 | * |
4 | * |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License |
7 | * as published by the Free Software Foundation; either version |
8 | * 2 of the License, or (at your option) any later version. |
9 | * |
10 | * Fixes: |
11 | * Andrew Lunn : Errors in iovec copying. |
12 | * Pedro Roque : Added memcpy_fromiovecend and |
13 | * csum_..._fromiovecend. |
14 | * Andi Kleen : fixed error handling for 2.1 |
15 | * Alexey Kuznetsov: 2.1 optimisations |
16 | * Andi Kleen : Fix csum*fromiovecend for IPv6. |
17 | */ |
18 | |
19 | #include <linux/errno.h> |
20 | #include <linux/module.h> |
21 | #include <linux/kernel.h> |
22 | #include <linux/mm.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/net.h> |
25 | #include <linux/in6.h> |
26 | #include <asm/uaccess.h> |
27 | #include <asm/byteorder.h> |
28 | #include <net/checksum.h> |
29 | #include <net/sock.h> |
30 | |
31 | /* |
32 | * Verify iovec. The caller must ensure that the iovec is big enough |
33 | * to hold the message iovec. |
34 | * |
35 | * Save time not doing access_ok. copy_*_user will make this work |
36 | * in any case. |
37 | */ |
38 | |
39 | int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) |
40 | { |
41 | int size, err, ct; |
42 | |
43 | if (m->msg_namelen) { |
44 | if (mode == VERIFY_READ) { |
45 | err = move_addr_to_kernel(m->msg_name, m->msg_namelen, |
46 | address); |
47 | if (err < 0) |
48 | return err; |
49 | } |
50 | m->msg_name = address; |
51 | } else { |
52 | m->msg_name = NULL; |
53 | } |
54 | |
55 | size = m->msg_iovlen * sizeof(struct iovec); |
56 | if (copy_from_user(iov, m->msg_iov, size)) |
57 | return -EFAULT; |
58 | |
59 | m->msg_iov = iov; |
60 | err = 0; |
61 | |
62 | for (ct = 0; ct < m->msg_iovlen; ct++) { |
63 | err += iov[ct].iov_len; |
64 | /* |
65 | * Goal is not to verify user data, but to prevent returning |
66 | * negative value, which is interpreted as errno. |
67 | * Overflow is still possible, but it is harmless. |
68 | */ |
69 | if (err < 0) |
70 | return -EMSGSIZE; |
71 | } |
72 | |
73 | return err; |
74 | } |
75 | |
76 | /* |
77 | * Copy kernel to iovec. Returns -EFAULT on error. |
78 | * |
79 | * Note: this modifies the original iovec. |
80 | */ |
81 | |
82 | int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len) |
83 | { |
84 | while (len > 0) { |
85 | if (iov->iov_len) { |
86 | int copy = min_t(unsigned int, iov->iov_len, len); |
87 | if (copy_to_user(iov->iov_base, kdata, copy)) |
88 | return -EFAULT; |
89 | kdata += copy; |
90 | len -= copy; |
91 | iov->iov_len -= copy; |
92 | iov->iov_base += copy; |
93 | } |
94 | iov++; |
95 | } |
96 | |
97 | return 0; |
98 | } |
99 | |
100 | /* |
101 | * Copy kernel to iovec. Returns -EFAULT on error. |
102 | */ |
103 | |
104 | int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, |
105 | int offset, int len) |
106 | { |
107 | int copy; |
108 | for (; len > 0; ++iov) { |
109 | /* Skip over the finished iovecs */ |
110 | if (unlikely(offset >= iov->iov_len)) { |
111 | offset -= iov->iov_len; |
112 | continue; |
113 | } |
114 | copy = min_t(unsigned int, iov->iov_len - offset, len); |
115 | if (copy_to_user(iov->iov_base + offset, kdata, copy)) |
116 | return -EFAULT; |
117 | offset = 0; |
118 | kdata += copy; |
119 | len -= copy; |
120 | } |
121 | |
122 | return 0; |
123 | } |
124 | |
125 | /* |
126 | * Copy iovec to kernel. Returns -EFAULT on error. |
127 | * |
128 | * Note: this modifies the original iovec. |
129 | */ |
130 | |
131 | int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len) |
132 | { |
133 | while (len > 0) { |
134 | if (iov->iov_len) { |
135 | int copy = min_t(unsigned int, len, iov->iov_len); |
136 | if (copy_from_user(kdata, iov->iov_base, copy)) |
137 | return -EFAULT; |
138 | len -= copy; |
139 | kdata += copy; |
140 | iov->iov_base += copy; |
141 | iov->iov_len -= copy; |
142 | } |
143 | iov++; |
144 | } |
145 | |
146 | return 0; |
147 | } |
148 | |
149 | /* |
150 | * Copy iovec from kernel. Returns -EFAULT on error. |
151 | */ |
152 | |
153 | int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, |
154 | int offset, int len) |
155 | { |
156 | /* Skip over the finished iovecs */ |
157 | while (offset >= iov->iov_len) { |
158 | offset -= iov->iov_len; |
159 | iov++; |
160 | } |
161 | |
162 | while (len > 0) { |
163 | u8 __user *base = iov->iov_base + offset; |
164 | int copy = min_t(unsigned int, len, iov->iov_len - offset); |
165 | |
166 | offset = 0; |
167 | if (copy_from_user(kdata, base, copy)) |
168 | return -EFAULT; |
169 | len -= copy; |
170 | kdata += copy; |
171 | iov++; |
172 | } |
173 | |
174 | return 0; |
175 | } |
176 | |
177 | /* |
178 | * And now for the all-in-one: copy and checksum from a user iovec |
179 | * directly to a datagram |
180 | * Calls to csum_partial but the last must be in 32 bit chunks |
181 | * |
182 | * ip_build_xmit must ensure that when fragmenting only the last |
183 | * call to this function will be unaligned also. |
184 | */ |
185 | int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov, |
186 | int offset, unsigned int len, __wsum *csump) |
187 | { |
188 | __wsum csum = *csump; |
189 | int partial_cnt = 0, err = 0; |
190 | |
191 | /* Skip over the finished iovecs */ |
192 | while (offset >= iov->iov_len) { |
193 | offset -= iov->iov_len; |
194 | iov++; |
195 | } |
196 | |
197 | while (len > 0) { |
198 | u8 __user *base = iov->iov_base + offset; |
199 | int copy = min_t(unsigned int, len, iov->iov_len - offset); |
200 | |
201 | offset = 0; |
202 | |
203 | /* There is a remnant from previous iov. */ |
204 | if (partial_cnt) { |
205 | int par_len = 4 - partial_cnt; |
206 | |
207 | /* iov component is too short ... */ |
208 | if (par_len > copy) { |
209 | if (copy_from_user(kdata, base, copy)) |
210 | goto out_fault; |
211 | kdata += copy; |
212 | base += copy; |
213 | partial_cnt += copy; |
214 | len -= copy; |
215 | iov++; |
216 | if (len) |
217 | continue; |
218 | *csump = csum_partial(kdata - partial_cnt, |
219 | partial_cnt, csum); |
220 | goto out; |
221 | } |
222 | if (copy_from_user(kdata, base, par_len)) |
223 | goto out_fault; |
224 | csum = csum_partial(kdata - partial_cnt, 4, csum); |
225 | kdata += par_len; |
226 | base += par_len; |
227 | copy -= par_len; |
228 | len -= par_len; |
229 | partial_cnt = 0; |
230 | } |
231 | |
232 | if (len > copy) { |
233 | partial_cnt = copy % 4; |
234 | if (partial_cnt) { |
235 | copy -= partial_cnt; |
236 | if (copy_from_user(kdata + copy, base + copy, |
237 | partial_cnt)) |
238 | goto out_fault; |
239 | } |
240 | } |
241 | |
242 | if (copy) { |
243 | csum = csum_and_copy_from_user(base, kdata, copy, |
244 | csum, &err); |
245 | if (err) |
246 | goto out; |
247 | } |
248 | len -= copy + partial_cnt; |
249 | kdata += copy + partial_cnt; |
250 | iov++; |
251 | } |
252 | *csump = csum; |
253 | out: |
254 | return err; |
255 | |
256 | out_fault: |
257 | err = -EFAULT; |
258 | goto out; |
259 | } |
260 | |
261 | EXPORT_SYMBOL(csum_partial_copy_fromiovecend); |
262 | EXPORT_SYMBOL(memcpy_fromiovec); |
263 | EXPORT_SYMBOL(memcpy_fromiovecend); |
264 | EXPORT_SYMBOL(memcpy_toiovec); |
265 | EXPORT_SYMBOL(memcpy_toiovecend); |
266 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9