Root/
1 | /* |
2 | * iovec manipulation routines. |
3 | * |
4 | * |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License |
7 | * as published by the Free Software Foundation; either version |
8 | * 2 of the License, or (at your option) any later version. |
9 | * |
10 | * Fixes: |
11 | * Andrew Lunn : Errors in iovec copying. |
12 | * Pedro Roque : Added memcpy_fromiovecend and |
13 | * csum_..._fromiovecend. |
14 | * Andi Kleen : fixed error handling for 2.1 |
15 | * Alexey Kuznetsov: 2.1 optimisations |
16 | * Andi Kleen : Fix csum*fromiovecend for IPv6. |
17 | */ |
18 | |
19 | #include <linux/errno.h> |
20 | #include <linux/module.h> |
21 | #include <linux/kernel.h> |
22 | #include <linux/mm.h> |
23 | #include <linux/net.h> |
24 | #include <linux/in6.h> |
25 | #include <asm/uaccess.h> |
26 | #include <asm/byteorder.h> |
27 | #include <net/checksum.h> |
28 | #include <net/sock.h> |
29 | |
30 | /* |
31 | * Verify iovec. The caller must ensure that the iovec is big enough |
32 | * to hold the message iovec. |
33 | * |
34 | * Save time not doing access_ok. copy_*_user will make this work |
35 | * in any case. |
36 | */ |
37 | |
38 | int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) |
39 | { |
40 | int size, err, ct; |
41 | |
42 | if (m->msg_namelen) { |
43 | if (mode == VERIFY_READ) { |
44 | err = move_addr_to_kernel(m->msg_name, m->msg_namelen, |
45 | address); |
46 | if (err < 0) |
47 | return err; |
48 | } |
49 | m->msg_name = address; |
50 | } else { |
51 | m->msg_name = NULL; |
52 | } |
53 | |
54 | size = m->msg_iovlen * sizeof(struct iovec); |
55 | if (copy_from_user(iov, m->msg_iov, size)) |
56 | return -EFAULT; |
57 | |
58 | m->msg_iov = iov; |
59 | err = 0; |
60 | |
61 | for (ct = 0; ct < m->msg_iovlen; ct++) { |
62 | err += iov[ct].iov_len; |
63 | /* |
64 | * Goal is not to verify user data, but to prevent returning |
65 | * negative value, which is interpreted as errno. |
66 | * Overflow is still possible, but it is harmless. |
67 | */ |
68 | if (err < 0) |
69 | return -EMSGSIZE; |
70 | } |
71 | |
72 | return err; |
73 | } |
74 | |
75 | /* |
76 | * Copy kernel to iovec. Returns -EFAULT on error. |
77 | * |
78 | * Note: this modifies the original iovec. |
79 | */ |
80 | |
81 | int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len) |
82 | { |
83 | while (len > 0) { |
84 | if (iov->iov_len) { |
85 | int copy = min_t(unsigned int, iov->iov_len, len); |
86 | if (copy_to_user(iov->iov_base, kdata, copy)) |
87 | return -EFAULT; |
88 | kdata += copy; |
89 | len -= copy; |
90 | iov->iov_len -= copy; |
91 | iov->iov_base += copy; |
92 | } |
93 | iov++; |
94 | } |
95 | |
96 | return 0; |
97 | } |
98 | |
99 | /* |
100 | * Copy kernel to iovec. Returns -EFAULT on error. |
101 | */ |
102 | |
103 | int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, |
104 | int offset, int len) |
105 | { |
106 | int copy; |
107 | for (; len > 0; ++iov) { |
108 | /* Skip over the finished iovecs */ |
109 | if (unlikely(offset >= iov->iov_len)) { |
110 | offset -= iov->iov_len; |
111 | continue; |
112 | } |
113 | copy = min_t(unsigned int, iov->iov_len - offset, len); |
114 | if (copy_to_user(iov->iov_base + offset, kdata, copy)) |
115 | return -EFAULT; |
116 | offset = 0; |
117 | kdata += copy; |
118 | len -= copy; |
119 | } |
120 | |
121 | return 0; |
122 | } |
123 | |
124 | /* |
125 | * Copy iovec to kernel. Returns -EFAULT on error. |
126 | * |
127 | * Note: this modifies the original iovec. |
128 | */ |
129 | |
130 | int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len) |
131 | { |
132 | while (len > 0) { |
133 | if (iov->iov_len) { |
134 | int copy = min_t(unsigned int, len, iov->iov_len); |
135 | if (copy_from_user(kdata, iov->iov_base, copy)) |
136 | return -EFAULT; |
137 | len -= copy; |
138 | kdata += copy; |
139 | iov->iov_base += copy; |
140 | iov->iov_len -= copy; |
141 | } |
142 | iov++; |
143 | } |
144 | |
145 | return 0; |
146 | } |
147 | |
148 | /* |
149 | * Copy iovec from kernel. Returns -EFAULT on error. |
150 | */ |
151 | |
152 | int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, |
153 | int offset, int len) |
154 | { |
155 | /* Skip over the finished iovecs */ |
156 | while (offset >= iov->iov_len) { |
157 | offset -= iov->iov_len; |
158 | iov++; |
159 | } |
160 | |
161 | while (len > 0) { |
162 | u8 __user *base = iov->iov_base + offset; |
163 | int copy = min_t(unsigned int, len, iov->iov_len - offset); |
164 | |
165 | offset = 0; |
166 | if (copy_from_user(kdata, base, copy)) |
167 | return -EFAULT; |
168 | len -= copy; |
169 | kdata += copy; |
170 | iov++; |
171 | } |
172 | |
173 | return 0; |
174 | } |
175 | |
176 | /* |
177 | * And now for the all-in-one: copy and checksum from a user iovec |
178 | * directly to a datagram |
179 | * Calls to csum_partial but the last must be in 32 bit chunks |
180 | * |
181 | * ip_build_xmit must ensure that when fragmenting only the last |
182 | * call to this function will be unaligned also. |
183 | */ |
184 | int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov, |
185 | int offset, unsigned int len, __wsum *csump) |
186 | { |
187 | __wsum csum = *csump; |
188 | int partial_cnt = 0, err = 0; |
189 | |
190 | /* Skip over the finished iovecs */ |
191 | while (offset >= iov->iov_len) { |
192 | offset -= iov->iov_len; |
193 | iov++; |
194 | } |
195 | |
196 | while (len > 0) { |
197 | u8 __user *base = iov->iov_base + offset; |
198 | int copy = min_t(unsigned int, len, iov->iov_len - offset); |
199 | |
200 | offset = 0; |
201 | |
202 | /* There is a remnant from previous iov. */ |
203 | if (partial_cnt) { |
204 | int par_len = 4 - partial_cnt; |
205 | |
206 | /* iov component is too short ... */ |
207 | if (par_len > copy) { |
208 | if (copy_from_user(kdata, base, copy)) |
209 | goto out_fault; |
210 | kdata += copy; |
211 | base += copy; |
212 | partial_cnt += copy; |
213 | len -= copy; |
214 | iov++; |
215 | if (len) |
216 | continue; |
217 | *csump = csum_partial(kdata - partial_cnt, |
218 | partial_cnt, csum); |
219 | goto out; |
220 | } |
221 | if (copy_from_user(kdata, base, par_len)) |
222 | goto out_fault; |
223 | csum = csum_partial(kdata - partial_cnt, 4, csum); |
224 | kdata += par_len; |
225 | base += par_len; |
226 | copy -= par_len; |
227 | len -= par_len; |
228 | partial_cnt = 0; |
229 | } |
230 | |
231 | if (len > copy) { |
232 | partial_cnt = copy % 4; |
233 | if (partial_cnt) { |
234 | copy -= partial_cnt; |
235 | if (copy_from_user(kdata + copy, base + copy, |
236 | partial_cnt)) |
237 | goto out_fault; |
238 | } |
239 | } |
240 | |
241 | if (copy) { |
242 | csum = csum_and_copy_from_user(base, kdata, copy, |
243 | csum, &err); |
244 | if (err) |
245 | goto out; |
246 | } |
247 | len -= copy + partial_cnt; |
248 | kdata += copy + partial_cnt; |
249 | iov++; |
250 | } |
251 | *csump = csum; |
252 | out: |
253 | return err; |
254 | |
255 | out_fault: |
256 | err = -EFAULT; |
257 | goto out; |
258 | } |
259 | |
260 | EXPORT_SYMBOL(csum_partial_copy_fromiovecend); |
261 | EXPORT_SYMBOL(memcpy_fromiovec); |
262 | EXPORT_SYMBOL(memcpy_fromiovecend); |
263 | EXPORT_SYMBOL(memcpy_toiovec); |
264 | EXPORT_SYMBOL(memcpy_toiovecend); |
265 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9