Root/
1 | /* |
2 | * fs/cifs/file.c |
3 | * |
4 | * vfs operations that deal with files |
5 | * |
6 | * Copyright (C) International Business Machines Corp., 2002,2010 |
7 | * Author(s): Steve French (sfrench@us.ibm.com) |
8 | * Jeremy Allison (jra@samba.org) |
9 | * |
10 | * This library is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU Lesser General Public License as published |
12 | * by the Free Software Foundation; either version 2.1 of the License, or |
13 | * (at your option) any later version. |
14 | * |
15 | * This library is distributed in the hope that it will be useful, |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See |
18 | * the GNU Lesser General Public License for more details. |
19 | * |
20 | * You should have received a copy of the GNU Lesser General Public License |
21 | * along with this library; if not, write to the Free Software |
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
23 | */ |
24 | #include <linux/fs.h> |
25 | #include <linux/backing-dev.h> |
26 | #include <linux/stat.h> |
27 | #include <linux/fcntl.h> |
28 | #include <linux/pagemap.h> |
29 | #include <linux/pagevec.h> |
30 | #include <linux/writeback.h> |
31 | #include <linux/task_io_accounting_ops.h> |
32 | #include <linux/delay.h> |
33 | #include <linux/mount.h> |
34 | #include <linux/slab.h> |
35 | #include <asm/div64.h> |
36 | #include "cifsfs.h" |
37 | #include "cifspdu.h" |
38 | #include "cifsglob.h" |
39 | #include "cifsproto.h" |
40 | #include "cifs_unicode.h" |
41 | #include "cifs_debug.h" |
42 | #include "cifs_fs_sb.h" |
43 | #include "fscache.h" |
44 | |
45 | static inline int cifs_convert_flags(unsigned int flags) |
46 | { |
47 | if ((flags & O_ACCMODE) == O_RDONLY) |
48 | return GENERIC_READ; |
49 | else if ((flags & O_ACCMODE) == O_WRONLY) |
50 | return GENERIC_WRITE; |
51 | else if ((flags & O_ACCMODE) == O_RDWR) { |
52 | /* GENERIC_ALL is too much permission to request |
53 | can cause unnecessary access denied on create */ |
54 | /* return GENERIC_ALL; */ |
55 | return (GENERIC_READ | GENERIC_WRITE); |
56 | } |
57 | |
58 | return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES | |
59 | FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA | |
60 | FILE_READ_DATA); |
61 | } |
62 | |
63 | static u32 cifs_posix_convert_flags(unsigned int flags) |
64 | { |
65 | u32 posix_flags = 0; |
66 | |
67 | if ((flags & O_ACCMODE) == O_RDONLY) |
68 | posix_flags = SMB_O_RDONLY; |
69 | else if ((flags & O_ACCMODE) == O_WRONLY) |
70 | posix_flags = SMB_O_WRONLY; |
71 | else if ((flags & O_ACCMODE) == O_RDWR) |
72 | posix_flags = SMB_O_RDWR; |
73 | |
74 | if (flags & O_CREAT) |
75 | posix_flags |= SMB_O_CREAT; |
76 | if (flags & O_EXCL) |
77 | posix_flags |= SMB_O_EXCL; |
78 | if (flags & O_TRUNC) |
79 | posix_flags |= SMB_O_TRUNC; |
80 | /* be safe and imply O_SYNC for O_DSYNC */ |
81 | if (flags & O_DSYNC) |
82 | posix_flags |= SMB_O_SYNC; |
83 | if (flags & O_DIRECTORY) |
84 | posix_flags |= SMB_O_DIRECTORY; |
85 | if (flags & O_NOFOLLOW) |
86 | posix_flags |= SMB_O_NOFOLLOW; |
87 | if (flags & O_DIRECT) |
88 | posix_flags |= SMB_O_DIRECT; |
89 | |
90 | return posix_flags; |
91 | } |
92 | |
93 | static inline int cifs_get_disposition(unsigned int flags) |
94 | { |
95 | if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) |
96 | return FILE_CREATE; |
97 | else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) |
98 | return FILE_OVERWRITE_IF; |
99 | else if ((flags & O_CREAT) == O_CREAT) |
100 | return FILE_OPEN_IF; |
101 | else if ((flags & O_TRUNC) == O_TRUNC) |
102 | return FILE_OVERWRITE; |
103 | else |
104 | return FILE_OPEN; |
105 | } |
106 | |
107 | int cifs_posix_open(char *full_path, struct inode **pinode, |
108 | struct super_block *sb, int mode, unsigned int f_flags, |
109 | __u32 *poplock, __u16 *pnetfid, int xid) |
110 | { |
111 | int rc; |
112 | FILE_UNIX_BASIC_INFO *presp_data; |
113 | __u32 posix_flags = 0; |
114 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
115 | struct cifs_fattr fattr; |
116 | struct tcon_link *tlink; |
117 | struct cifs_tcon *tcon; |
118 | |
119 | cFYI(1, "posix open %s", full_path); |
120 | |
121 | presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL); |
122 | if (presp_data == NULL) |
123 | return -ENOMEM; |
124 | |
125 | tlink = cifs_sb_tlink(cifs_sb); |
126 | if (IS_ERR(tlink)) { |
127 | rc = PTR_ERR(tlink); |
128 | goto posix_open_ret; |
129 | } |
130 | |
131 | tcon = tlink_tcon(tlink); |
132 | mode &= ~current_umask(); |
133 | |
134 | posix_flags = cifs_posix_convert_flags(f_flags); |
135 | rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data, |
136 | poplock, full_path, cifs_sb->local_nls, |
137 | cifs_sb->mnt_cifs_flags & |
138 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
139 | cifs_put_tlink(tlink); |
140 | |
141 | if (rc) |
142 | goto posix_open_ret; |
143 | |
144 | if (presp_data->Type == cpu_to_le32(-1)) |
145 | goto posix_open_ret; /* open ok, caller does qpathinfo */ |
146 | |
147 | if (!pinode) |
148 | goto posix_open_ret; /* caller does not need info */ |
149 | |
150 | cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb); |
151 | |
152 | /* get new inode and set it up */ |
153 | if (*pinode == NULL) { |
154 | cifs_fill_uniqueid(sb, &fattr); |
155 | *pinode = cifs_iget(sb, &fattr); |
156 | if (!*pinode) { |
157 | rc = -ENOMEM; |
158 | goto posix_open_ret; |
159 | } |
160 | } else { |
161 | cifs_fattr_to_inode(*pinode, &fattr); |
162 | } |
163 | |
164 | posix_open_ret: |
165 | kfree(presp_data); |
166 | return rc; |
167 | } |
168 | |
169 | static int |
170 | cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, |
171 | struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock, |
172 | __u16 *pnetfid, int xid) |
173 | { |
174 | int rc; |
175 | int desiredAccess; |
176 | int disposition; |
177 | FILE_ALL_INFO *buf; |
178 | |
179 | desiredAccess = cifs_convert_flags(f_flags); |
180 | |
181 | /********************************************************************* |
182 | * open flag mapping table: |
183 | * |
184 | * POSIX Flag CIFS Disposition |
185 | * ---------- ---------------- |
186 | * O_CREAT FILE_OPEN_IF |
187 | * O_CREAT | O_EXCL FILE_CREATE |
188 | * O_CREAT | O_TRUNC FILE_OVERWRITE_IF |
189 | * O_TRUNC FILE_OVERWRITE |
190 | * none of the above FILE_OPEN |
191 | * |
192 | * Note that there is not a direct match between disposition |
193 | * FILE_SUPERSEDE (ie create whether or not file exists although |
194 | * O_CREAT | O_TRUNC is similar but truncates the existing |
195 | * file rather than creating a new file as FILE_SUPERSEDE does |
196 | * (which uses the attributes / metadata passed in on open call) |
197 | *? |
198 | *? O_SYNC is a reasonable match to CIFS writethrough flag |
199 | *? and the read write flags match reasonably. O_LARGEFILE |
200 | *? is irrelevant because largefile support is always used |
201 | *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY, |
202 | * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation |
203 | *********************************************************************/ |
204 | |
205 | disposition = cifs_get_disposition(f_flags); |
206 | |
207 | /* BB pass O_SYNC flag through on file attributes .. BB */ |
208 | |
209 | buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); |
210 | if (!buf) |
211 | return -ENOMEM; |
212 | |
213 | if (tcon->ses->capabilities & CAP_NT_SMBS) |
214 | rc = CIFSSMBOpen(xid, tcon, full_path, disposition, |
215 | desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf, |
216 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags |
217 | & CIFS_MOUNT_MAP_SPECIAL_CHR); |
218 | else |
219 | rc = SMBLegacyOpen(xid, tcon, full_path, disposition, |
220 | desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf, |
221 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags |
222 | & CIFS_MOUNT_MAP_SPECIAL_CHR); |
223 | |
224 | if (rc) |
225 | goto out; |
226 | |
227 | if (tcon->unix_ext) |
228 | rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, |
229 | xid); |
230 | else |
231 | rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, |
232 | xid, pnetfid); |
233 | |
234 | out: |
235 | kfree(buf); |
236 | return rc; |
237 | } |
238 | |
239 | struct cifsFileInfo * |
240 | cifs_new_fileinfo(__u16 fileHandle, struct file *file, |
241 | struct tcon_link *tlink, __u32 oplock) |
242 | { |
243 | struct dentry *dentry = file->f_path.dentry; |
244 | struct inode *inode = dentry->d_inode; |
245 | struct cifsInodeInfo *pCifsInode = CIFS_I(inode); |
246 | struct cifsFileInfo *pCifsFile; |
247 | |
248 | pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); |
249 | if (pCifsFile == NULL) |
250 | return pCifsFile; |
251 | |
252 | pCifsFile->count = 1; |
253 | pCifsFile->netfid = fileHandle; |
254 | pCifsFile->pid = current->tgid; |
255 | pCifsFile->uid = current_fsuid(); |
256 | pCifsFile->dentry = dget(dentry); |
257 | pCifsFile->f_flags = file->f_flags; |
258 | pCifsFile->invalidHandle = false; |
259 | pCifsFile->tlink = cifs_get_tlink(tlink); |
260 | mutex_init(&pCifsFile->fh_mutex); |
261 | mutex_init(&pCifsFile->lock_mutex); |
262 | INIT_LIST_HEAD(&pCifsFile->llist); |
263 | INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break); |
264 | |
265 | spin_lock(&cifs_file_list_lock); |
266 | list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList)); |
267 | /* if readable file instance put first in list*/ |
268 | if (file->f_mode & FMODE_READ) |
269 | list_add(&pCifsFile->flist, &pCifsInode->openFileList); |
270 | else |
271 | list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList); |
272 | spin_unlock(&cifs_file_list_lock); |
273 | |
274 | cifs_set_oplock_level(pCifsInode, oplock); |
275 | |
276 | file->private_data = pCifsFile; |
277 | return pCifsFile; |
278 | } |
279 | |
280 | /* |
281 | * Release a reference on the file private data. This may involve closing |
282 | * the filehandle out on the server. Must be called without holding |
283 | * cifs_file_list_lock. |
284 | */ |
285 | void cifsFileInfo_put(struct cifsFileInfo *cifs_file) |
286 | { |
287 | struct inode *inode = cifs_file->dentry->d_inode; |
288 | struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); |
289 | struct cifsInodeInfo *cifsi = CIFS_I(inode); |
290 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
291 | struct cifsLockInfo *li, *tmp; |
292 | |
293 | spin_lock(&cifs_file_list_lock); |
294 | if (--cifs_file->count > 0) { |
295 | spin_unlock(&cifs_file_list_lock); |
296 | return; |
297 | } |
298 | |
299 | /* remove it from the lists */ |
300 | list_del(&cifs_file->flist); |
301 | list_del(&cifs_file->tlist); |
302 | |
303 | if (list_empty(&cifsi->openFileList)) { |
304 | cFYI(1, "closing last open instance for inode %p", |
305 | cifs_file->dentry->d_inode); |
306 | |
307 | /* in strict cache mode we need invalidate mapping on the last |
308 | close because it may cause a error when we open this file |
309 | again and get at least level II oplock */ |
310 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) |
311 | CIFS_I(inode)->invalid_mapping = true; |
312 | |
313 | cifs_set_oplock_level(cifsi, 0); |
314 | } |
315 | spin_unlock(&cifs_file_list_lock); |
316 | |
317 | if (!tcon->need_reconnect && !cifs_file->invalidHandle) { |
318 | int xid, rc; |
319 | |
320 | xid = GetXid(); |
321 | rc = CIFSSMBClose(xid, tcon, cifs_file->netfid); |
322 | FreeXid(xid); |
323 | } |
324 | |
325 | /* Delete any outstanding lock records. We'll lose them when the file |
326 | * is closed anyway. |
327 | */ |
328 | mutex_lock(&cifs_file->lock_mutex); |
329 | list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) { |
330 | list_del(&li->llist); |
331 | kfree(li); |
332 | } |
333 | mutex_unlock(&cifs_file->lock_mutex); |
334 | |
335 | cifs_put_tlink(cifs_file->tlink); |
336 | dput(cifs_file->dentry); |
337 | kfree(cifs_file); |
338 | } |
339 | |
340 | int cifs_open(struct inode *inode, struct file *file) |
341 | { |
342 | int rc = -EACCES; |
343 | int xid; |
344 | __u32 oplock; |
345 | struct cifs_sb_info *cifs_sb; |
346 | struct cifs_tcon *tcon; |
347 | struct tcon_link *tlink; |
348 | struct cifsFileInfo *pCifsFile = NULL; |
349 | char *full_path = NULL; |
350 | bool posix_open_ok = false; |
351 | __u16 netfid; |
352 | |
353 | xid = GetXid(); |
354 | |
355 | cifs_sb = CIFS_SB(inode->i_sb); |
356 | tlink = cifs_sb_tlink(cifs_sb); |
357 | if (IS_ERR(tlink)) { |
358 | FreeXid(xid); |
359 | return PTR_ERR(tlink); |
360 | } |
361 | tcon = tlink_tcon(tlink); |
362 | |
363 | full_path = build_path_from_dentry(file->f_path.dentry); |
364 | if (full_path == NULL) { |
365 | rc = -ENOMEM; |
366 | goto out; |
367 | } |
368 | |
369 | cFYI(1, "inode = 0x%p file flags are 0x%x for %s", |
370 | inode, file->f_flags, full_path); |
371 | |
372 | if (oplockEnabled) |
373 | oplock = REQ_OPLOCK; |
374 | else |
375 | oplock = 0; |
376 | |
377 | if (!tcon->broken_posix_open && tcon->unix_ext && |
378 | (tcon->ses->capabilities & CAP_UNIX) && |
379 | (CIFS_UNIX_POSIX_PATH_OPS_CAP & |
380 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { |
381 | /* can not refresh inode info since size could be stale */ |
382 | rc = cifs_posix_open(full_path, &inode, inode->i_sb, |
383 | cifs_sb->mnt_file_mode /* ignored */, |
384 | file->f_flags, &oplock, &netfid, xid); |
385 | if (rc == 0) { |
386 | cFYI(1, "posix open succeeded"); |
387 | posix_open_ok = true; |
388 | } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { |
389 | if (tcon->ses->serverNOS) |
390 | cERROR(1, "server %s of type %s returned" |
391 | " unexpected error on SMB posix open" |
392 | ", disabling posix open support." |
393 | " Check if server update available.", |
394 | tcon->ses->serverName, |
395 | tcon->ses->serverNOS); |
396 | tcon->broken_posix_open = true; |
397 | } else if ((rc != -EIO) && (rc != -EREMOTE) && |
398 | (rc != -EOPNOTSUPP)) /* path not found or net err */ |
399 | goto out; |
400 | /* else fallthrough to retry open the old way on network i/o |
401 | or DFS errors */ |
402 | } |
403 | |
404 | if (!posix_open_ok) { |
405 | rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, |
406 | file->f_flags, &oplock, &netfid, xid); |
407 | if (rc) |
408 | goto out; |
409 | } |
410 | |
411 | pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock); |
412 | if (pCifsFile == NULL) { |
413 | CIFSSMBClose(xid, tcon, netfid); |
414 | rc = -ENOMEM; |
415 | goto out; |
416 | } |
417 | |
418 | cifs_fscache_set_inode_cookie(inode, file); |
419 | |
420 | if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) { |
421 | /* time to set mode which we can not set earlier due to |
422 | problems creating new read-only files */ |
423 | struct cifs_unix_set_info_args args = { |
424 | .mode = inode->i_mode, |
425 | .uid = NO_CHANGE_64, |
426 | .gid = NO_CHANGE_64, |
427 | .ctime = NO_CHANGE_64, |
428 | .atime = NO_CHANGE_64, |
429 | .mtime = NO_CHANGE_64, |
430 | .device = 0, |
431 | }; |
432 | CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid, |
433 | pCifsFile->pid); |
434 | } |
435 | |
436 | out: |
437 | kfree(full_path); |
438 | FreeXid(xid); |
439 | cifs_put_tlink(tlink); |
440 | return rc; |
441 | } |
442 | |
443 | /* Try to reacquire byte range locks that were released when session */ |
444 | /* to server was lost */ |
445 | static int cifs_relock_file(struct cifsFileInfo *cifsFile) |
446 | { |
447 | int rc = 0; |
448 | |
449 | /* BB list all locks open on this file and relock */ |
450 | |
451 | return rc; |
452 | } |
453 | |
454 | static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush) |
455 | { |
456 | int rc = -EACCES; |
457 | int xid; |
458 | __u32 oplock; |
459 | struct cifs_sb_info *cifs_sb; |
460 | struct cifs_tcon *tcon; |
461 | struct cifsInodeInfo *pCifsInode; |
462 | struct inode *inode; |
463 | char *full_path = NULL; |
464 | int desiredAccess; |
465 | int disposition = FILE_OPEN; |
466 | __u16 netfid; |
467 | |
468 | xid = GetXid(); |
469 | mutex_lock(&pCifsFile->fh_mutex); |
470 | if (!pCifsFile->invalidHandle) { |
471 | mutex_unlock(&pCifsFile->fh_mutex); |
472 | rc = 0; |
473 | FreeXid(xid); |
474 | return rc; |
475 | } |
476 | |
477 | inode = pCifsFile->dentry->d_inode; |
478 | cifs_sb = CIFS_SB(inode->i_sb); |
479 | tcon = tlink_tcon(pCifsFile->tlink); |
480 | |
481 | /* can not grab rename sem here because various ops, including |
482 | those that already have the rename sem can end up causing writepage |
483 | to get called and if the server was down that means we end up here, |
484 | and we can never tell if the caller already has the rename_sem */ |
485 | full_path = build_path_from_dentry(pCifsFile->dentry); |
486 | if (full_path == NULL) { |
487 | rc = -ENOMEM; |
488 | mutex_unlock(&pCifsFile->fh_mutex); |
489 | FreeXid(xid); |
490 | return rc; |
491 | } |
492 | |
493 | cFYI(1, "inode = 0x%p file flags 0x%x for %s", |
494 | inode, pCifsFile->f_flags, full_path); |
495 | |
496 | if (oplockEnabled) |
497 | oplock = REQ_OPLOCK; |
498 | else |
499 | oplock = 0; |
500 | |
501 | if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && |
502 | (CIFS_UNIX_POSIX_PATH_OPS_CAP & |
503 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { |
504 | |
505 | /* |
506 | * O_CREAT, O_EXCL and O_TRUNC already had their effect on the |
507 | * original open. Must mask them off for a reopen. |
508 | */ |
509 | unsigned int oflags = pCifsFile->f_flags & |
510 | ~(O_CREAT | O_EXCL | O_TRUNC); |
511 | |
512 | rc = cifs_posix_open(full_path, NULL, inode->i_sb, |
513 | cifs_sb->mnt_file_mode /* ignored */, |
514 | oflags, &oplock, &netfid, xid); |
515 | if (rc == 0) { |
516 | cFYI(1, "posix reopen succeeded"); |
517 | goto reopen_success; |
518 | } |
519 | /* fallthrough to retry open the old way on errors, especially |
520 | in the reconnect path it is important to retry hard */ |
521 | } |
522 | |
523 | desiredAccess = cifs_convert_flags(pCifsFile->f_flags); |
524 | |
525 | /* Can not refresh inode by passing in file_info buf to be returned |
526 | by SMBOpen and then calling get_inode_info with returned buf |
527 | since file might have write behind data that needs to be flushed |
528 | and server version of file size can be stale. If we knew for sure |
529 | that inode was not dirty locally we could do this */ |
530 | |
531 | rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess, |
532 | CREATE_NOT_DIR, &netfid, &oplock, NULL, |
533 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & |
534 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
535 | if (rc) { |
536 | mutex_unlock(&pCifsFile->fh_mutex); |
537 | cFYI(1, "cifs_open returned 0x%x", rc); |
538 | cFYI(1, "oplock: %d", oplock); |
539 | goto reopen_error_exit; |
540 | } |
541 | |
542 | reopen_success: |
543 | pCifsFile->netfid = netfid; |
544 | pCifsFile->invalidHandle = false; |
545 | mutex_unlock(&pCifsFile->fh_mutex); |
546 | pCifsInode = CIFS_I(inode); |
547 | |
548 | if (can_flush) { |
549 | rc = filemap_write_and_wait(inode->i_mapping); |
550 | mapping_set_error(inode->i_mapping, rc); |
551 | |
552 | if (tcon->unix_ext) |
553 | rc = cifs_get_inode_info_unix(&inode, |
554 | full_path, inode->i_sb, xid); |
555 | else |
556 | rc = cifs_get_inode_info(&inode, |
557 | full_path, NULL, inode->i_sb, |
558 | xid, NULL); |
559 | } /* else we are writing out data to server already |
560 | and could deadlock if we tried to flush data, and |
561 | since we do not know if we have data that would |
562 | invalidate the current end of file on the server |
563 | we can not go to the server to get the new inod |
564 | info */ |
565 | |
566 | cifs_set_oplock_level(pCifsInode, oplock); |
567 | |
568 | cifs_relock_file(pCifsFile); |
569 | |
570 | reopen_error_exit: |
571 | kfree(full_path); |
572 | FreeXid(xid); |
573 | return rc; |
574 | } |
575 | |
576 | int cifs_close(struct inode *inode, struct file *file) |
577 | { |
578 | if (file->private_data != NULL) { |
579 | cifsFileInfo_put(file->private_data); |
580 | file->private_data = NULL; |
581 | } |
582 | |
583 | /* return code from the ->release op is always ignored */ |
584 | return 0; |
585 | } |
586 | |
587 | int cifs_closedir(struct inode *inode, struct file *file) |
588 | { |
589 | int rc = 0; |
590 | int xid; |
591 | struct cifsFileInfo *pCFileStruct = file->private_data; |
592 | char *ptmp; |
593 | |
594 | cFYI(1, "Closedir inode = 0x%p", inode); |
595 | |
596 | xid = GetXid(); |
597 | |
598 | if (pCFileStruct) { |
599 | struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink); |
600 | |
601 | cFYI(1, "Freeing private data in close dir"); |
602 | spin_lock(&cifs_file_list_lock); |
603 | if (!pCFileStruct->srch_inf.endOfSearch && |
604 | !pCFileStruct->invalidHandle) { |
605 | pCFileStruct->invalidHandle = true; |
606 | spin_unlock(&cifs_file_list_lock); |
607 | rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid); |
608 | cFYI(1, "Closing uncompleted readdir with rc %d", |
609 | rc); |
610 | /* not much we can do if it fails anyway, ignore rc */ |
611 | rc = 0; |
612 | } else |
613 | spin_unlock(&cifs_file_list_lock); |
614 | ptmp = pCFileStruct->srch_inf.ntwrk_buf_start; |
615 | if (ptmp) { |
616 | cFYI(1, "closedir free smb buf in srch struct"); |
617 | pCFileStruct->srch_inf.ntwrk_buf_start = NULL; |
618 | if (pCFileStruct->srch_inf.smallBuf) |
619 | cifs_small_buf_release(ptmp); |
620 | else |
621 | cifs_buf_release(ptmp); |
622 | } |
623 | cifs_put_tlink(pCFileStruct->tlink); |
624 | kfree(file->private_data); |
625 | file->private_data = NULL; |
626 | } |
627 | /* BB can we lock the filestruct while this is going on? */ |
628 | FreeXid(xid); |
629 | return rc; |
630 | } |
631 | |
632 | static int store_file_lock(struct cifsFileInfo *fid, __u64 len, |
633 | __u64 offset, __u8 lockType) |
634 | { |
635 | struct cifsLockInfo *li = |
636 | kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL); |
637 | if (li == NULL) |
638 | return -ENOMEM; |
639 | li->offset = offset; |
640 | li->length = len; |
641 | li->type = lockType; |
642 | mutex_lock(&fid->lock_mutex); |
643 | list_add(&li->llist, &fid->llist); |
644 | mutex_unlock(&fid->lock_mutex); |
645 | return 0; |
646 | } |
647 | |
648 | int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) |
649 | { |
650 | int rc, xid; |
651 | __u32 numLock = 0; |
652 | __u32 numUnlock = 0; |
653 | __u64 length; |
654 | bool wait_flag = false; |
655 | struct cifs_sb_info *cifs_sb; |
656 | struct cifs_tcon *tcon; |
657 | __u16 netfid; |
658 | __u8 lockType = LOCKING_ANDX_LARGE_FILES; |
659 | bool posix_locking = 0; |
660 | |
661 | length = 1 + pfLock->fl_end - pfLock->fl_start; |
662 | rc = -EACCES; |
663 | xid = GetXid(); |
664 | |
665 | cFYI(1, "Lock parm: 0x%x flockflags: " |
666 | "0x%x flocktype: 0x%x start: %lld end: %lld", |
667 | cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start, |
668 | pfLock->fl_end); |
669 | |
670 | if (pfLock->fl_flags & FL_POSIX) |
671 | cFYI(1, "Posix"); |
672 | if (pfLock->fl_flags & FL_FLOCK) |
673 | cFYI(1, "Flock"); |
674 | if (pfLock->fl_flags & FL_SLEEP) { |
675 | cFYI(1, "Blocking lock"); |
676 | wait_flag = true; |
677 | } |
678 | if (pfLock->fl_flags & FL_ACCESS) |
679 | cFYI(1, "Process suspended by mandatory locking - " |
680 | "not implemented yet"); |
681 | if (pfLock->fl_flags & FL_LEASE) |
682 | cFYI(1, "Lease on file - not implemented yet"); |
683 | if (pfLock->fl_flags & |
684 | (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE))) |
685 | cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags); |
686 | |
687 | if (pfLock->fl_type == F_WRLCK) { |
688 | cFYI(1, "F_WRLCK "); |
689 | numLock = 1; |
690 | } else if (pfLock->fl_type == F_UNLCK) { |
691 | cFYI(1, "F_UNLCK"); |
692 | numUnlock = 1; |
693 | /* Check if unlock includes more than |
694 | one lock range */ |
695 | } else if (pfLock->fl_type == F_RDLCK) { |
696 | cFYI(1, "F_RDLCK"); |
697 | lockType |= LOCKING_ANDX_SHARED_LOCK; |
698 | numLock = 1; |
699 | } else if (pfLock->fl_type == F_EXLCK) { |
700 | cFYI(1, "F_EXLCK"); |
701 | numLock = 1; |
702 | } else if (pfLock->fl_type == F_SHLCK) { |
703 | cFYI(1, "F_SHLCK"); |
704 | lockType |= LOCKING_ANDX_SHARED_LOCK; |
705 | numLock = 1; |
706 | } else |
707 | cFYI(1, "Unknown type of lock"); |
708 | |
709 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
710 | tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink); |
711 | netfid = ((struct cifsFileInfo *)file->private_data)->netfid; |
712 | |
713 | if ((tcon->ses->capabilities & CAP_UNIX) && |
714 | (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && |
715 | ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) |
716 | posix_locking = 1; |
717 | /* BB add code here to normalize offset and length to |
718 | account for negative length which we can not accept over the |
719 | wire */ |
720 | if (IS_GETLK(cmd)) { |
721 | if (posix_locking) { |
722 | int posix_lock_type; |
723 | if (lockType & LOCKING_ANDX_SHARED_LOCK) |
724 | posix_lock_type = CIFS_RDLCK; |
725 | else |
726 | posix_lock_type = CIFS_WRLCK; |
727 | rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */, |
728 | length, pfLock, posix_lock_type, |
729 | wait_flag); |
730 | FreeXid(xid); |
731 | return rc; |
732 | } |
733 | |
734 | /* BB we could chain these into one lock request BB */ |
735 | rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, |
736 | 0, 1, lockType, 0 /* wait flag */, 0); |
737 | if (rc == 0) { |
738 | rc = CIFSSMBLock(xid, tcon, netfid, length, |
739 | pfLock->fl_start, 1 /* numUnlock */ , |
740 | 0 /* numLock */ , lockType, |
741 | 0 /* wait flag */, 0); |
742 | pfLock->fl_type = F_UNLCK; |
743 | if (rc != 0) |
744 | cERROR(1, "Error unlocking previously locked " |
745 | "range %d during test of lock", rc); |
746 | rc = 0; |
747 | |
748 | } else { |
749 | /* if rc == ERR_SHARING_VIOLATION ? */ |
750 | rc = 0; |
751 | |
752 | if (lockType & LOCKING_ANDX_SHARED_LOCK) { |
753 | pfLock->fl_type = F_WRLCK; |
754 | } else { |
755 | rc = CIFSSMBLock(xid, tcon, netfid, length, |
756 | pfLock->fl_start, 0, 1, |
757 | lockType | LOCKING_ANDX_SHARED_LOCK, |
758 | 0 /* wait flag */, 0); |
759 | if (rc == 0) { |
760 | rc = CIFSSMBLock(xid, tcon, netfid, |
761 | length, pfLock->fl_start, 1, 0, |
762 | lockType | |
763 | LOCKING_ANDX_SHARED_LOCK, |
764 | 0 /* wait flag */, 0); |
765 | pfLock->fl_type = F_RDLCK; |
766 | if (rc != 0) |
767 | cERROR(1, "Error unlocking " |
768 | "previously locked range %d " |
769 | "during test of lock", rc); |
770 | rc = 0; |
771 | } else { |
772 | pfLock->fl_type = F_WRLCK; |
773 | rc = 0; |
774 | } |
775 | } |
776 | } |
777 | |
778 | FreeXid(xid); |
779 | return rc; |
780 | } |
781 | |
782 | if (!numLock && !numUnlock) { |
783 | /* if no lock or unlock then nothing |
784 | to do since we do not know what it is */ |
785 | FreeXid(xid); |
786 | return -EOPNOTSUPP; |
787 | } |
788 | |
789 | if (posix_locking) { |
790 | int posix_lock_type; |
791 | if (lockType & LOCKING_ANDX_SHARED_LOCK) |
792 | posix_lock_type = CIFS_RDLCK; |
793 | else |
794 | posix_lock_type = CIFS_WRLCK; |
795 | |
796 | if (numUnlock == 1) |
797 | posix_lock_type = CIFS_UNLCK; |
798 | |
799 | rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */, |
800 | length, pfLock, posix_lock_type, |
801 | wait_flag); |
802 | } else { |
803 | struct cifsFileInfo *fid = file->private_data; |
804 | |
805 | if (numLock) { |
806 | rc = CIFSSMBLock(xid, tcon, netfid, length, |
807 | pfLock->fl_start, 0, numLock, lockType, |
808 | wait_flag, 0); |
809 | |
810 | if (rc == 0) { |
811 | /* For Windows locks we must store them. */ |
812 | rc = store_file_lock(fid, length, |
813 | pfLock->fl_start, lockType); |
814 | } |
815 | } else if (numUnlock) { |
816 | /* For each stored lock that this unlock overlaps |
817 | completely, unlock it. */ |
818 | int stored_rc = 0; |
819 | struct cifsLockInfo *li, *tmp; |
820 | |
821 | rc = 0; |
822 | mutex_lock(&fid->lock_mutex); |
823 | list_for_each_entry_safe(li, tmp, &fid->llist, llist) { |
824 | if (pfLock->fl_start <= li->offset && |
825 | (pfLock->fl_start + length) >= |
826 | (li->offset + li->length)) { |
827 | stored_rc = CIFSSMBLock(xid, tcon, |
828 | netfid, li->length, |
829 | li->offset, 1, 0, |
830 | li->type, false, 0); |
831 | if (stored_rc) |
832 | rc = stored_rc; |
833 | else { |
834 | list_del(&li->llist); |
835 | kfree(li); |
836 | } |
837 | } |
838 | } |
839 | mutex_unlock(&fid->lock_mutex); |
840 | } |
841 | } |
842 | |
843 | if (pfLock->fl_flags & FL_POSIX) |
844 | posix_lock_file_wait(file, pfLock); |
845 | FreeXid(xid); |
846 | return rc; |
847 | } |
848 | |
849 | /* update the file size (if needed) after a write */ |
850 | void |
851 | cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, |
852 | unsigned int bytes_written) |
853 | { |
854 | loff_t end_of_write = offset + bytes_written; |
855 | |
856 | if (end_of_write > cifsi->server_eof) |
857 | cifsi->server_eof = end_of_write; |
858 | } |
859 | |
860 | static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid, |
861 | const char *write_data, size_t write_size, |
862 | loff_t *poffset) |
863 | { |
864 | int rc = 0; |
865 | unsigned int bytes_written = 0; |
866 | unsigned int total_written; |
867 | struct cifs_sb_info *cifs_sb; |
868 | struct cifs_tcon *pTcon; |
869 | int xid; |
870 | struct dentry *dentry = open_file->dentry; |
871 | struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode); |
872 | struct cifs_io_parms io_parms; |
873 | |
874 | cifs_sb = CIFS_SB(dentry->d_sb); |
875 | |
876 | cFYI(1, "write %zd bytes to offset %lld of %s", write_size, |
877 | *poffset, dentry->d_name.name); |
878 | |
879 | pTcon = tlink_tcon(open_file->tlink); |
880 | |
881 | xid = GetXid(); |
882 | |
883 | for (total_written = 0; write_size > total_written; |
884 | total_written += bytes_written) { |
885 | rc = -EAGAIN; |
886 | while (rc == -EAGAIN) { |
887 | struct kvec iov[2]; |
888 | unsigned int len; |
889 | |
890 | if (open_file->invalidHandle) { |
891 | /* we could deadlock if we called |
892 | filemap_fdatawait from here so tell |
893 | reopen_file not to flush data to |
894 | server now */ |
895 | rc = cifs_reopen_file(open_file, false); |
896 | if (rc != 0) |
897 | break; |
898 | } |
899 | |
900 | len = min((size_t)cifs_sb->wsize, |
901 | write_size - total_written); |
902 | /* iov[0] is reserved for smb header */ |
903 | iov[1].iov_base = (char *)write_data + total_written; |
904 | iov[1].iov_len = len; |
905 | io_parms.netfid = open_file->netfid; |
906 | io_parms.pid = pid; |
907 | io_parms.tcon = pTcon; |
908 | io_parms.offset = *poffset; |
909 | io_parms.length = len; |
910 | rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov, |
911 | 1, 0); |
912 | } |
913 | if (rc || (bytes_written == 0)) { |
914 | if (total_written) |
915 | break; |
916 | else { |
917 | FreeXid(xid); |
918 | return rc; |
919 | } |
920 | } else { |
921 | cifs_update_eof(cifsi, *poffset, bytes_written); |
922 | *poffset += bytes_written; |
923 | } |
924 | } |
925 | |
926 | cifs_stats_bytes_written(pTcon, total_written); |
927 | |
928 | if (total_written > 0) { |
929 | spin_lock(&dentry->d_inode->i_lock); |
930 | if (*poffset > dentry->d_inode->i_size) |
931 | i_size_write(dentry->d_inode, *poffset); |
932 | spin_unlock(&dentry->d_inode->i_lock); |
933 | } |
934 | mark_inode_dirty_sync(dentry->d_inode); |
935 | FreeXid(xid); |
936 | return total_written; |
937 | } |
938 | |
939 | struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, |
940 | bool fsuid_only) |
941 | { |
942 | struct cifsFileInfo *open_file = NULL; |
943 | struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); |
944 | |
945 | /* only filter by fsuid on multiuser mounts */ |
946 | if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) |
947 | fsuid_only = false; |
948 | |
949 | spin_lock(&cifs_file_list_lock); |
950 | /* we could simply get the first_list_entry since write-only entries |
951 | are always at the end of the list but since the first entry might |
952 | have a close pending, we go through the whole list */ |
953 | list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { |
954 | if (fsuid_only && open_file->uid != current_fsuid()) |
955 | continue; |
956 | if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) { |
957 | if (!open_file->invalidHandle) { |
958 | /* found a good file */ |
959 | /* lock it so it will not be closed on us */ |
960 | cifsFileInfo_get(open_file); |
961 | spin_unlock(&cifs_file_list_lock); |
962 | return open_file; |
963 | } /* else might as well continue, and look for |
964 | another, or simply have the caller reopen it |
965 | again rather than trying to fix this handle */ |
966 | } else /* write only file */ |
967 | break; /* write only files are last so must be done */ |
968 | } |
969 | spin_unlock(&cifs_file_list_lock); |
970 | return NULL; |
971 | } |
972 | |
973 | struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode, |
974 | bool fsuid_only) |
975 | { |
976 | struct cifsFileInfo *open_file; |
977 | struct cifs_sb_info *cifs_sb; |
978 | bool any_available = false; |
979 | int rc; |
980 | |
981 | /* Having a null inode here (because mapping->host was set to zero by |
982 | the VFS or MM) should not happen but we had reports of on oops (due to |
983 | it being zero) during stress testcases so we need to check for it */ |
984 | |
985 | if (cifs_inode == NULL) { |
986 | cERROR(1, "Null inode passed to cifs_writeable_file"); |
987 | dump_stack(); |
988 | return NULL; |
989 | } |
990 | |
991 | cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); |
992 | |
993 | /* only filter by fsuid on multiuser mounts */ |
994 | if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) |
995 | fsuid_only = false; |
996 | |
997 | spin_lock(&cifs_file_list_lock); |
998 | refind_writable: |
999 | list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { |
1000 | if (!any_available && open_file->pid != current->tgid) |
1001 | continue; |
1002 | if (fsuid_only && open_file->uid != current_fsuid()) |
1003 | continue; |
1004 | if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { |
1005 | cifsFileInfo_get(open_file); |
1006 | |
1007 | if (!open_file->invalidHandle) { |
1008 | /* found a good writable file */ |
1009 | spin_unlock(&cifs_file_list_lock); |
1010 | return open_file; |
1011 | } |
1012 | |
1013 | spin_unlock(&cifs_file_list_lock); |
1014 | |
1015 | /* Had to unlock since following call can block */ |
1016 | rc = cifs_reopen_file(open_file, false); |
1017 | if (!rc) |
1018 | return open_file; |
1019 | |
1020 | /* if it fails, try another handle if possible */ |
1021 | cFYI(1, "wp failed on reopen file"); |
1022 | cifsFileInfo_put(open_file); |
1023 | |
1024 | spin_lock(&cifs_file_list_lock); |
1025 | |
1026 | /* else we simply continue to the next entry. Thus |
1027 | we do not loop on reopen errors. If we |
1028 | can not reopen the file, for example if we |
1029 | reconnected to a server with another client |
1030 | racing to delete or lock the file we would not |
1031 | make progress if we restarted before the beginning |
1032 | of the loop here. */ |
1033 | } |
1034 | } |
1035 | /* couldn't find useable FH with same pid, try any available */ |
1036 | if (!any_available) { |
1037 | any_available = true; |
1038 | goto refind_writable; |
1039 | } |
1040 | spin_unlock(&cifs_file_list_lock); |
1041 | return NULL; |
1042 | } |
1043 | |
1044 | static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) |
1045 | { |
1046 | struct address_space *mapping = page->mapping; |
1047 | loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; |
1048 | char *write_data; |
1049 | int rc = -EFAULT; |
1050 | int bytes_written = 0; |
1051 | struct inode *inode; |
1052 | struct cifsFileInfo *open_file; |
1053 | |
1054 | if (!mapping || !mapping->host) |
1055 | return -EFAULT; |
1056 | |
1057 | inode = page->mapping->host; |
1058 | |
1059 | offset += (loff_t)from; |
1060 | write_data = kmap(page); |
1061 | write_data += from; |
1062 | |
1063 | if ((to > PAGE_CACHE_SIZE) || (from > to)) { |
1064 | kunmap(page); |
1065 | return -EIO; |
1066 | } |
1067 | |
1068 | /* racing with truncate? */ |
1069 | if (offset > mapping->host->i_size) { |
1070 | kunmap(page); |
1071 | return 0; /* don't care */ |
1072 | } |
1073 | |
1074 | /* check to make sure that we are not extending the file */ |
1075 | if (mapping->host->i_size - offset < (loff_t)to) |
1076 | to = (unsigned)(mapping->host->i_size - offset); |
1077 | |
1078 | open_file = find_writable_file(CIFS_I(mapping->host), false); |
1079 | if (open_file) { |
1080 | bytes_written = cifs_write(open_file, open_file->pid, |
1081 | write_data, to - from, &offset); |
1082 | cifsFileInfo_put(open_file); |
1083 | /* Does mm or vfs already set times? */ |
1084 | inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb); |
1085 | if ((bytes_written > 0) && (offset)) |
1086 | rc = 0; |
1087 | else if (bytes_written < 0) |
1088 | rc = bytes_written; |
1089 | } else { |
1090 | cFYI(1, "No writeable filehandles for inode"); |
1091 | rc = -EIO; |
1092 | } |
1093 | |
1094 | kunmap(page); |
1095 | return rc; |
1096 | } |
1097 | |
1098 | static int cifs_writepages(struct address_space *mapping, |
1099 | struct writeback_control *wbc) |
1100 | { |
1101 | struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb); |
1102 | bool done = false, scanned = false, range_whole = false; |
1103 | pgoff_t end, index; |
1104 | struct cifs_writedata *wdata; |
1105 | struct page *page; |
1106 | int rc = 0; |
1107 | |
1108 | /* |
1109 | * If wsize is smaller than the page cache size, default to writing |
1110 | * one page at a time via cifs_writepage |
1111 | */ |
1112 | if (cifs_sb->wsize < PAGE_CACHE_SIZE) |
1113 | return generic_writepages(mapping, wbc); |
1114 | |
1115 | if (wbc->range_cyclic) { |
1116 | index = mapping->writeback_index; /* Start from prev offset */ |
1117 | end = -1; |
1118 | } else { |
1119 | index = wbc->range_start >> PAGE_CACHE_SHIFT; |
1120 | end = wbc->range_end >> PAGE_CACHE_SHIFT; |
1121 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) |
1122 | range_whole = true; |
1123 | scanned = true; |
1124 | } |
1125 | retry: |
1126 | while (!done && index <= end) { |
1127 | unsigned int i, nr_pages, found_pages; |
1128 | pgoff_t next = 0, tofind; |
1129 | struct page **pages; |
1130 | |
1131 | tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1, |
1132 | end - index) + 1; |
1133 | |
1134 | wdata = cifs_writedata_alloc((unsigned int)tofind); |
1135 | if (!wdata) { |
1136 | rc = -ENOMEM; |
1137 | break; |
1138 | } |
1139 | |
1140 | /* |
1141 | * find_get_pages_tag seems to return a max of 256 on each |
1142 | * iteration, so we must call it several times in order to |
1143 | * fill the array or the wsize is effectively limited to |
1144 | * 256 * PAGE_CACHE_SIZE. |
1145 | */ |
1146 | found_pages = 0; |
1147 | pages = wdata->pages; |
1148 | do { |
1149 | nr_pages = find_get_pages_tag(mapping, &index, |
1150 | PAGECACHE_TAG_DIRTY, |
1151 | tofind, pages); |
1152 | found_pages += nr_pages; |
1153 | tofind -= nr_pages; |
1154 | pages += nr_pages; |
1155 | } while (nr_pages && tofind && index <= end); |
1156 | |
1157 | if (found_pages == 0) { |
1158 | kref_put(&wdata->refcount, cifs_writedata_release); |
1159 | break; |
1160 | } |
1161 | |
1162 | nr_pages = 0; |
1163 | for (i = 0; i < found_pages; i++) { |
1164 | page = wdata->pages[i]; |
1165 | /* |
1166 | * At this point we hold neither mapping->tree_lock nor |
1167 | * lock on the page itself: the page may be truncated or |
1168 | * invalidated (changing page->mapping to NULL), or even |
1169 | * swizzled back from swapper_space to tmpfs file |
1170 | * mapping |
1171 | */ |
1172 | |
1173 | if (nr_pages == 0) |
1174 | lock_page(page); |
1175 | else if (!trylock_page(page)) |
1176 | break; |
1177 | |
1178 | if (unlikely(page->mapping != mapping)) { |
1179 | unlock_page(page); |
1180 | break; |
1181 | } |
1182 | |
1183 | if (!wbc->range_cyclic && page->index > end) { |
1184 | done = true; |
1185 | unlock_page(page); |
1186 | break; |
1187 | } |
1188 | |
1189 | if (next && (page->index != next)) { |
1190 | /* Not next consecutive page */ |
1191 | unlock_page(page); |
1192 | break; |
1193 | } |
1194 | |
1195 | if (wbc->sync_mode != WB_SYNC_NONE) |
1196 | wait_on_page_writeback(page); |
1197 | |
1198 | if (PageWriteback(page) || |
1199 | !clear_page_dirty_for_io(page)) { |
1200 | unlock_page(page); |
1201 | break; |
1202 | } |
1203 | |
1204 | /* |
1205 | * This actually clears the dirty bit in the radix tree. |
1206 | * See cifs_writepage() for more commentary. |
1207 | */ |
1208 | set_page_writeback(page); |
1209 | |
1210 | if (page_offset(page) >= mapping->host->i_size) { |
1211 | done = true; |
1212 | unlock_page(page); |
1213 | end_page_writeback(page); |
1214 | break; |
1215 | } |
1216 | |
1217 | wdata->pages[i] = page; |
1218 | next = page->index + 1; |
1219 | ++nr_pages; |
1220 | } |
1221 | |
1222 | /* reset index to refind any pages skipped */ |
1223 | if (nr_pages == 0) |
1224 | index = wdata->pages[0]->index + 1; |
1225 | |
1226 | /* put any pages we aren't going to use */ |
1227 | for (i = nr_pages; i < found_pages; i++) { |
1228 | page_cache_release(wdata->pages[i]); |
1229 | wdata->pages[i] = NULL; |
1230 | } |
1231 | |
1232 | /* nothing to write? */ |
1233 | if (nr_pages == 0) { |
1234 | kref_put(&wdata->refcount, cifs_writedata_release); |
1235 | continue; |
1236 | } |
1237 | |
1238 | wdata->sync_mode = wbc->sync_mode; |
1239 | wdata->nr_pages = nr_pages; |
1240 | wdata->offset = page_offset(wdata->pages[0]); |
1241 | |
1242 | do { |
1243 | if (wdata->cfile != NULL) |
1244 | cifsFileInfo_put(wdata->cfile); |
1245 | wdata->cfile = find_writable_file(CIFS_I(mapping->host), |
1246 | false); |
1247 | if (!wdata->cfile) { |
1248 | cERROR(1, "No writable handles for inode"); |
1249 | rc = -EBADF; |
1250 | break; |
1251 | } |
1252 | rc = cifs_async_writev(wdata); |
1253 | } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN); |
1254 | |
1255 | for (i = 0; i < nr_pages; ++i) |
1256 | unlock_page(wdata->pages[i]); |
1257 | |
1258 | /* send failure -- clean up the mess */ |
1259 | if (rc != 0) { |
1260 | for (i = 0; i < nr_pages; ++i) { |
1261 | if (rc == -EAGAIN) |
1262 | redirty_page_for_writepage(wbc, |
1263 | wdata->pages[i]); |
1264 | else |
1265 | SetPageError(wdata->pages[i]); |
1266 | end_page_writeback(wdata->pages[i]); |
1267 | page_cache_release(wdata->pages[i]); |
1268 | } |
1269 | if (rc != -EAGAIN) |
1270 | mapping_set_error(mapping, rc); |
1271 | } |
1272 | kref_put(&wdata->refcount, cifs_writedata_release); |
1273 | |
1274 | wbc->nr_to_write -= nr_pages; |
1275 | if (wbc->nr_to_write <= 0) |
1276 | done = true; |
1277 | |
1278 | index = next; |
1279 | } |
1280 | |
1281 | if (!scanned && !done) { |
1282 | /* |
1283 | * We hit the last page and there is more work to be done: wrap |
1284 | * back to the start of the file |
1285 | */ |
1286 | scanned = true; |
1287 | index = 0; |
1288 | goto retry; |
1289 | } |
1290 | |
1291 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) |
1292 | mapping->writeback_index = index; |
1293 | |
1294 | return rc; |
1295 | } |
1296 | |
1297 | static int |
1298 | cifs_writepage_locked(struct page *page, struct writeback_control *wbc) |
1299 | { |
1300 | int rc; |
1301 | int xid; |
1302 | |
1303 | xid = GetXid(); |
1304 | /* BB add check for wbc flags */ |
1305 | page_cache_get(page); |
1306 | if (!PageUptodate(page)) |
1307 | cFYI(1, "ppw - page not up to date"); |
1308 | |
1309 | /* |
1310 | * Set the "writeback" flag, and clear "dirty" in the radix tree. |
1311 | * |
1312 | * A writepage() implementation always needs to do either this, |
1313 | * or re-dirty the page with "redirty_page_for_writepage()" in |
1314 | * the case of a failure. |
1315 | * |
1316 | * Just unlocking the page will cause the radix tree tag-bits |
1317 | * to fail to update with the state of the page correctly. |
1318 | */ |
1319 | set_page_writeback(page); |
1320 | retry_write: |
1321 | rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE); |
1322 | if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL) |
1323 | goto retry_write; |
1324 | else if (rc == -EAGAIN) |
1325 | redirty_page_for_writepage(wbc, page); |
1326 | else if (rc != 0) |
1327 | SetPageError(page); |
1328 | else |
1329 | SetPageUptodate(page); |
1330 | end_page_writeback(page); |
1331 | page_cache_release(page); |
1332 | FreeXid(xid); |
1333 | return rc; |
1334 | } |
1335 | |
1336 | static int cifs_writepage(struct page *page, struct writeback_control *wbc) |
1337 | { |
1338 | int rc = cifs_writepage_locked(page, wbc); |
1339 | unlock_page(page); |
1340 | return rc; |
1341 | } |
1342 | |
1343 | static int cifs_write_end(struct file *file, struct address_space *mapping, |
1344 | loff_t pos, unsigned len, unsigned copied, |
1345 | struct page *page, void *fsdata) |
1346 | { |
1347 | int rc; |
1348 | struct inode *inode = mapping->host; |
1349 | struct cifsFileInfo *cfile = file->private_data; |
1350 | struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); |
1351 | __u32 pid; |
1352 | |
1353 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) |
1354 | pid = cfile->pid; |
1355 | else |
1356 | pid = current->tgid; |
1357 | |
1358 | cFYI(1, "write_end for page %p from pos %lld with %d bytes", |
1359 | page, pos, copied); |
1360 | |
1361 | if (PageChecked(page)) { |
1362 | if (copied == len) |
1363 | SetPageUptodate(page); |
1364 | ClearPageChecked(page); |
1365 | } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE) |
1366 | SetPageUptodate(page); |
1367 | |
1368 | if (!PageUptodate(page)) { |
1369 | char *page_data; |
1370 | unsigned offset = pos & (PAGE_CACHE_SIZE - 1); |
1371 | int xid; |
1372 | |
1373 | xid = GetXid(); |
1374 | /* this is probably better than directly calling |
1375 | partialpage_write since in this function the file handle is |
1376 | known which we might as well leverage */ |
1377 | /* BB check if anything else missing out of ppw |
1378 | such as updating last write time */ |
1379 | page_data = kmap(page); |
1380 | rc = cifs_write(cfile, pid, page_data + offset, copied, &pos); |
1381 | /* if (rc < 0) should we set writebehind rc? */ |
1382 | kunmap(page); |
1383 | |
1384 | FreeXid(xid); |
1385 | } else { |
1386 | rc = copied; |
1387 | pos += copied; |
1388 | set_page_dirty(page); |
1389 | } |
1390 | |
1391 | if (rc > 0) { |
1392 | spin_lock(&inode->i_lock); |
1393 | if (pos > inode->i_size) |
1394 | i_size_write(inode, pos); |
1395 | spin_unlock(&inode->i_lock); |
1396 | } |
1397 | |
1398 | unlock_page(page); |
1399 | page_cache_release(page); |
1400 | |
1401 | return rc; |
1402 | } |
1403 | |
1404 | int cifs_strict_fsync(struct file *file, int datasync) |
1405 | { |
1406 | int xid; |
1407 | int rc = 0; |
1408 | struct cifs_tcon *tcon; |
1409 | struct cifsFileInfo *smbfile = file->private_data; |
1410 | struct inode *inode = file->f_path.dentry->d_inode; |
1411 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
1412 | |
1413 | xid = GetXid(); |
1414 | |
1415 | cFYI(1, "Sync file - name: %s datasync: 0x%x", |
1416 | file->f_path.dentry->d_name.name, datasync); |
1417 | |
1418 | if (!CIFS_I(inode)->clientCanCacheRead) { |
1419 | rc = cifs_invalidate_mapping(inode); |
1420 | if (rc) { |
1421 | cFYI(1, "rc: %d during invalidate phase", rc); |
1422 | rc = 0; /* don't care about it in fsync */ |
1423 | } |
1424 | } |
1425 | |
1426 | tcon = tlink_tcon(smbfile->tlink); |
1427 | if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) |
1428 | rc = CIFSSMBFlush(xid, tcon, smbfile->netfid); |
1429 | |
1430 | FreeXid(xid); |
1431 | return rc; |
1432 | } |
1433 | |
1434 | int cifs_fsync(struct file *file, int datasync) |
1435 | { |
1436 | int xid; |
1437 | int rc = 0; |
1438 | struct cifs_tcon *tcon; |
1439 | struct cifsFileInfo *smbfile = file->private_data; |
1440 | struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
1441 | |
1442 | xid = GetXid(); |
1443 | |
1444 | cFYI(1, "Sync file - name: %s datasync: 0x%x", |
1445 | file->f_path.dentry->d_name.name, datasync); |
1446 | |
1447 | tcon = tlink_tcon(smbfile->tlink); |
1448 | if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) |
1449 | rc = CIFSSMBFlush(xid, tcon, smbfile->netfid); |
1450 | |
1451 | FreeXid(xid); |
1452 | return rc; |
1453 | } |
1454 | |
1455 | /* |
1456 | * As file closes, flush all cached write data for this inode checking |
1457 | * for write behind errors. |
1458 | */ |
1459 | int cifs_flush(struct file *file, fl_owner_t id) |
1460 | { |
1461 | struct inode *inode = file->f_path.dentry->d_inode; |
1462 | int rc = 0; |
1463 | |
1464 | if (file->f_mode & FMODE_WRITE) |
1465 | rc = filemap_write_and_wait(inode->i_mapping); |
1466 | |
1467 | cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc); |
1468 | |
1469 | return rc; |
1470 | } |
1471 | |
1472 | static int |
1473 | cifs_write_allocate_pages(struct page **pages, unsigned long num_pages) |
1474 | { |
1475 | int rc = 0; |
1476 | unsigned long i; |
1477 | |
1478 | for (i = 0; i < num_pages; i++) { |
1479 | pages[i] = alloc_page(__GFP_HIGHMEM); |
1480 | if (!pages[i]) { |
1481 | /* |
1482 | * save number of pages we have already allocated and |
1483 | * return with ENOMEM error |
1484 | */ |
1485 | num_pages = i; |
1486 | rc = -ENOMEM; |
1487 | goto error; |
1488 | } |
1489 | } |
1490 | |
1491 | return rc; |
1492 | |
1493 | error: |
1494 | for (i = 0; i < num_pages; i++) |
1495 | put_page(pages[i]); |
1496 | return rc; |
1497 | } |
1498 | |
1499 | static inline |
1500 | size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len) |
1501 | { |
1502 | size_t num_pages; |
1503 | size_t clen; |
1504 | |
1505 | clen = min_t(const size_t, len, wsize); |
1506 | num_pages = clen / PAGE_CACHE_SIZE; |
1507 | if (clen % PAGE_CACHE_SIZE) |
1508 | num_pages++; |
1509 | |
1510 | if (cur_len) |
1511 | *cur_len = clen; |
1512 | |
1513 | return num_pages; |
1514 | } |
1515 | |
1516 | static ssize_t |
1517 | cifs_iovec_write(struct file *file, const struct iovec *iov, |
1518 | unsigned long nr_segs, loff_t *poffset) |
1519 | { |
1520 | unsigned int written; |
1521 | unsigned long num_pages, npages, i; |
1522 | size_t copied, len, cur_len; |
1523 | ssize_t total_written = 0; |
1524 | struct kvec *to_send; |
1525 | struct page **pages; |
1526 | struct iov_iter it; |
1527 | struct inode *inode; |
1528 | struct cifsFileInfo *open_file; |
1529 | struct cifs_tcon *pTcon; |
1530 | struct cifs_sb_info *cifs_sb; |
1531 | struct cifs_io_parms io_parms; |
1532 | int xid, rc; |
1533 | __u32 pid; |
1534 | |
1535 | len = iov_length(iov, nr_segs); |
1536 | if (!len) |
1537 | return 0; |
1538 | |
1539 | rc = generic_write_checks(file, poffset, &len, 0); |
1540 | if (rc) |
1541 | return rc; |
1542 | |
1543 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
1544 | num_pages = get_numpages(cifs_sb->wsize, len, &cur_len); |
1545 | |
1546 | pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL); |
1547 | if (!pages) |
1548 | return -ENOMEM; |
1549 | |
1550 | to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL); |
1551 | if (!to_send) { |
1552 | kfree(pages); |
1553 | return -ENOMEM; |
1554 | } |
1555 | |
1556 | rc = cifs_write_allocate_pages(pages, num_pages); |
1557 | if (rc) { |
1558 | kfree(pages); |
1559 | kfree(to_send); |
1560 | return rc; |
1561 | } |
1562 | |
1563 | xid = GetXid(); |
1564 | open_file = file->private_data; |
1565 | |
1566 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) |
1567 | pid = open_file->pid; |
1568 | else |
1569 | pid = current->tgid; |
1570 | |
1571 | pTcon = tlink_tcon(open_file->tlink); |
1572 | inode = file->f_path.dentry->d_inode; |
1573 | |
1574 | iov_iter_init(&it, iov, nr_segs, len, 0); |
1575 | npages = num_pages; |
1576 | |
1577 | do { |
1578 | size_t save_len = cur_len; |
1579 | for (i = 0; i < npages; i++) { |
1580 | copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE); |
1581 | copied = iov_iter_copy_from_user(pages[i], &it, 0, |
1582 | copied); |
1583 | cur_len -= copied; |
1584 | iov_iter_advance(&it, copied); |
1585 | to_send[i+1].iov_base = kmap(pages[i]); |
1586 | to_send[i+1].iov_len = copied; |
1587 | } |
1588 | |
1589 | cur_len = save_len - cur_len; |
1590 | |
1591 | do { |
1592 | if (open_file->invalidHandle) { |
1593 | rc = cifs_reopen_file(open_file, false); |
1594 | if (rc != 0) |
1595 | break; |
1596 | } |
1597 | io_parms.netfid = open_file->netfid; |
1598 | io_parms.pid = pid; |
1599 | io_parms.tcon = pTcon; |
1600 | io_parms.offset = *poffset; |
1601 | io_parms.length = cur_len; |
1602 | rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send, |
1603 | npages, 0); |
1604 | } while (rc == -EAGAIN); |
1605 | |
1606 | for (i = 0; i < npages; i++) |
1607 | kunmap(pages[i]); |
1608 | |
1609 | if (written) { |
1610 | len -= written; |
1611 | total_written += written; |
1612 | cifs_update_eof(CIFS_I(inode), *poffset, written); |
1613 | *poffset += written; |
1614 | } else if (rc < 0) { |
1615 | if (!total_written) |
1616 | total_written = rc; |
1617 | break; |
1618 | } |
1619 | |
1620 | /* get length and number of kvecs of the next write */ |
1621 | npages = get_numpages(cifs_sb->wsize, len, &cur_len); |
1622 | } while (len > 0); |
1623 | |
1624 | if (total_written > 0) { |
1625 | spin_lock(&inode->i_lock); |
1626 | if (*poffset > inode->i_size) |
1627 | i_size_write(inode, *poffset); |
1628 | spin_unlock(&inode->i_lock); |
1629 | } |
1630 | |
1631 | cifs_stats_bytes_written(pTcon, total_written); |
1632 | mark_inode_dirty_sync(inode); |
1633 | |
1634 | for (i = 0; i < num_pages; i++) |
1635 | put_page(pages[i]); |
1636 | kfree(to_send); |
1637 | kfree(pages); |
1638 | FreeXid(xid); |
1639 | return total_written; |
1640 | } |
1641 | |
1642 | ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov, |
1643 | unsigned long nr_segs, loff_t pos) |
1644 | { |
1645 | ssize_t written; |
1646 | struct inode *inode; |
1647 | |
1648 | inode = iocb->ki_filp->f_path.dentry->d_inode; |
1649 | |
1650 | /* |
1651 | * BB - optimize the way when signing is disabled. We can drop this |
1652 | * extra memory-to-memory copying and use iovec buffers for constructing |
1653 | * write request. |
1654 | */ |
1655 | |
1656 | written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos); |
1657 | if (written > 0) { |
1658 | CIFS_I(inode)->invalid_mapping = true; |
1659 | iocb->ki_pos = pos; |
1660 | } |
1661 | |
1662 | return written; |
1663 | } |
1664 | |
1665 | ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov, |
1666 | unsigned long nr_segs, loff_t pos) |
1667 | { |
1668 | struct inode *inode; |
1669 | |
1670 | inode = iocb->ki_filp->f_path.dentry->d_inode; |
1671 | |
1672 | if (CIFS_I(inode)->clientCanCacheAll) |
1673 | return generic_file_aio_write(iocb, iov, nr_segs, pos); |
1674 | |
1675 | /* |
1676 | * In strict cache mode we need to write the data to the server exactly |
1677 | * from the pos to pos+len-1 rather than flush all affected pages |
1678 | * because it may cause a error with mandatory locks on these pages but |
1679 | * not on the region from pos to ppos+len-1. |
1680 | */ |
1681 | |
1682 | return cifs_user_writev(iocb, iov, nr_segs, pos); |
1683 | } |
1684 | |
1685 | static ssize_t |
1686 | cifs_iovec_read(struct file *file, const struct iovec *iov, |
1687 | unsigned long nr_segs, loff_t *poffset) |
1688 | { |
1689 | int rc; |
1690 | int xid; |
1691 | ssize_t total_read; |
1692 | unsigned int bytes_read = 0; |
1693 | size_t len, cur_len; |
1694 | int iov_offset = 0; |
1695 | struct cifs_sb_info *cifs_sb; |
1696 | struct cifs_tcon *pTcon; |
1697 | struct cifsFileInfo *open_file; |
1698 | struct smb_com_read_rsp *pSMBr; |
1699 | struct cifs_io_parms io_parms; |
1700 | char *read_data; |
1701 | __u32 pid; |
1702 | |
1703 | if (!nr_segs) |
1704 | return 0; |
1705 | |
1706 | len = iov_length(iov, nr_segs); |
1707 | if (!len) |
1708 | return 0; |
1709 | |
1710 | xid = GetXid(); |
1711 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
1712 | |
1713 | open_file = file->private_data; |
1714 | pTcon = tlink_tcon(open_file->tlink); |
1715 | |
1716 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) |
1717 | pid = open_file->pid; |
1718 | else |
1719 | pid = current->tgid; |
1720 | |
1721 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) |
1722 | cFYI(1, "attempting read on write only file instance"); |
1723 | |
1724 | for (total_read = 0; total_read < len; total_read += bytes_read) { |
1725 | cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize); |
1726 | rc = -EAGAIN; |
1727 | read_data = NULL; |
1728 | |
1729 | while (rc == -EAGAIN) { |
1730 | int buf_type = CIFS_NO_BUFFER; |
1731 | if (open_file->invalidHandle) { |
1732 | rc = cifs_reopen_file(open_file, true); |
1733 | if (rc != 0) |
1734 | break; |
1735 | } |
1736 | io_parms.netfid = open_file->netfid; |
1737 | io_parms.pid = pid; |
1738 | io_parms.tcon = pTcon; |
1739 | io_parms.offset = *poffset; |
1740 | io_parms.length = cur_len; |
1741 | rc = CIFSSMBRead(xid, &io_parms, &bytes_read, |
1742 | &read_data, &buf_type); |
1743 | pSMBr = (struct smb_com_read_rsp *)read_data; |
1744 | if (read_data) { |
1745 | char *data_offset = read_data + 4 + |
1746 | le16_to_cpu(pSMBr->DataOffset); |
1747 | if (memcpy_toiovecend(iov, data_offset, |
1748 | iov_offset, bytes_read)) |
1749 | rc = -EFAULT; |
1750 | if (buf_type == CIFS_SMALL_BUFFER) |
1751 | cifs_small_buf_release(read_data); |
1752 | else if (buf_type == CIFS_LARGE_BUFFER) |
1753 | cifs_buf_release(read_data); |
1754 | read_data = NULL; |
1755 | iov_offset += bytes_read; |
1756 | } |
1757 | } |
1758 | |
1759 | if (rc || (bytes_read == 0)) { |
1760 | if (total_read) { |
1761 | break; |
1762 | } else { |
1763 | FreeXid(xid); |
1764 | return rc; |
1765 | } |
1766 | } else { |
1767 | cifs_stats_bytes_read(pTcon, bytes_read); |
1768 | *poffset += bytes_read; |
1769 | } |
1770 | } |
1771 | |
1772 | FreeXid(xid); |
1773 | return total_read; |
1774 | } |
1775 | |
1776 | ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov, |
1777 | unsigned long nr_segs, loff_t pos) |
1778 | { |
1779 | ssize_t read; |
1780 | |
1781 | read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos); |
1782 | if (read > 0) |
1783 | iocb->ki_pos = pos; |
1784 | |
1785 | return read; |
1786 | } |
1787 | |
1788 | ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov, |
1789 | unsigned long nr_segs, loff_t pos) |
1790 | { |
1791 | struct inode *inode; |
1792 | |
1793 | inode = iocb->ki_filp->f_path.dentry->d_inode; |
1794 | |
1795 | if (CIFS_I(inode)->clientCanCacheRead) |
1796 | return generic_file_aio_read(iocb, iov, nr_segs, pos); |
1797 | |
1798 | /* |
1799 | * In strict cache mode we need to read from the server all the time |
1800 | * if we don't have level II oplock because the server can delay mtime |
1801 | * change - so we can't make a decision about inode invalidating. |
1802 | * And we can also fail with pagereading if there are mandatory locks |
1803 | * on pages affected by this read but not on the region from pos to |
1804 | * pos+len-1. |
1805 | */ |
1806 | |
1807 | return cifs_user_readv(iocb, iov, nr_segs, pos); |
1808 | } |
1809 | |
1810 | static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, |
1811 | loff_t *poffset) |
1812 | { |
1813 | int rc = -EACCES; |
1814 | unsigned int bytes_read = 0; |
1815 | unsigned int total_read; |
1816 | unsigned int current_read_size; |
1817 | struct cifs_sb_info *cifs_sb; |
1818 | struct cifs_tcon *pTcon; |
1819 | int xid; |
1820 | char *current_offset; |
1821 | struct cifsFileInfo *open_file; |
1822 | struct cifs_io_parms io_parms; |
1823 | int buf_type = CIFS_NO_BUFFER; |
1824 | __u32 pid; |
1825 | |
1826 | xid = GetXid(); |
1827 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
1828 | |
1829 | if (file->private_data == NULL) { |
1830 | rc = -EBADF; |
1831 | FreeXid(xid); |
1832 | return rc; |
1833 | } |
1834 | open_file = file->private_data; |
1835 | pTcon = tlink_tcon(open_file->tlink); |
1836 | |
1837 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) |
1838 | pid = open_file->pid; |
1839 | else |
1840 | pid = current->tgid; |
1841 | |
1842 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) |
1843 | cFYI(1, "attempting read on write only file instance"); |
1844 | |
1845 | for (total_read = 0, current_offset = read_data; |
1846 | read_size > total_read; |
1847 | total_read += bytes_read, current_offset += bytes_read) { |
1848 | current_read_size = min_t(const int, read_size - total_read, |
1849 | cifs_sb->rsize); |
1850 | /* For windows me and 9x we do not want to request more |
1851 | than it negotiated since it will refuse the read then */ |
1852 | if ((pTcon->ses) && |
1853 | !(pTcon->ses->capabilities & CAP_LARGE_FILES)) { |
1854 | current_read_size = min_t(const int, current_read_size, |
1855 | pTcon->ses->server->maxBuf - 128); |
1856 | } |
1857 | rc = -EAGAIN; |
1858 | while (rc == -EAGAIN) { |
1859 | if (open_file->invalidHandle) { |
1860 | rc = cifs_reopen_file(open_file, true); |
1861 | if (rc != 0) |
1862 | break; |
1863 | } |
1864 | io_parms.netfid = open_file->netfid; |
1865 | io_parms.pid = pid; |
1866 | io_parms.tcon = pTcon; |
1867 | io_parms.offset = *poffset; |
1868 | io_parms.length = current_read_size; |
1869 | rc = CIFSSMBRead(xid, &io_parms, &bytes_read, |
1870 | ¤t_offset, &buf_type); |
1871 | } |
1872 | if (rc || (bytes_read == 0)) { |
1873 | if (total_read) { |
1874 | break; |
1875 | } else { |
1876 | FreeXid(xid); |
1877 | return rc; |
1878 | } |
1879 | } else { |
1880 | cifs_stats_bytes_read(pTcon, total_read); |
1881 | *poffset += bytes_read; |
1882 | } |
1883 | } |
1884 | FreeXid(xid); |
1885 | return total_read; |
1886 | } |
1887 | |
1888 | /* |
1889 | * If the page is mmap'ed into a process' page tables, then we need to make |
1890 | * sure that it doesn't change while being written back. |
1891 | */ |
1892 | static int |
1893 | cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) |
1894 | { |
1895 | struct page *page = vmf->page; |
1896 | |
1897 | lock_page(page); |
1898 | return VM_FAULT_LOCKED; |
1899 | } |
1900 | |
1901 | static struct vm_operations_struct cifs_file_vm_ops = { |
1902 | .fault = filemap_fault, |
1903 | .page_mkwrite = cifs_page_mkwrite, |
1904 | }; |
1905 | |
1906 | int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) |
1907 | { |
1908 | int rc, xid; |
1909 | struct inode *inode = file->f_path.dentry->d_inode; |
1910 | |
1911 | xid = GetXid(); |
1912 | |
1913 | if (!CIFS_I(inode)->clientCanCacheRead) { |
1914 | rc = cifs_invalidate_mapping(inode); |
1915 | if (rc) |
1916 | return rc; |
1917 | } |
1918 | |
1919 | rc = generic_file_mmap(file, vma); |
1920 | if (rc == 0) |
1921 | vma->vm_ops = &cifs_file_vm_ops; |
1922 | FreeXid(xid); |
1923 | return rc; |
1924 | } |
1925 | |
1926 | int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) |
1927 | { |
1928 | int rc, xid; |
1929 | |
1930 | xid = GetXid(); |
1931 | rc = cifs_revalidate_file(file); |
1932 | if (rc) { |
1933 | cFYI(1, "Validation prior to mmap failed, error=%d", rc); |
1934 | FreeXid(xid); |
1935 | return rc; |
1936 | } |
1937 | rc = generic_file_mmap(file, vma); |
1938 | if (rc == 0) |
1939 | vma->vm_ops = &cifs_file_vm_ops; |
1940 | FreeXid(xid); |
1941 | return rc; |
1942 | } |
1943 | |
1944 | |
1945 | static void cifs_copy_cache_pages(struct address_space *mapping, |
1946 | struct list_head *pages, int bytes_read, char *data) |
1947 | { |
1948 | struct page *page; |
1949 | char *target; |
1950 | |
1951 | while (bytes_read > 0) { |
1952 | if (list_empty(pages)) |
1953 | break; |
1954 | |
1955 | page = list_entry(pages->prev, struct page, lru); |
1956 | list_del(&page->lru); |
1957 | |
1958 | if (add_to_page_cache_lru(page, mapping, page->index, |
1959 | GFP_KERNEL)) { |
1960 | page_cache_release(page); |
1961 | cFYI(1, "Add page cache failed"); |
1962 | data += PAGE_CACHE_SIZE; |
1963 | bytes_read -= PAGE_CACHE_SIZE; |
1964 | continue; |
1965 | } |
1966 | page_cache_release(page); |
1967 | |
1968 | target = kmap_atomic(page, KM_USER0); |
1969 | |
1970 | if (PAGE_CACHE_SIZE > bytes_read) { |
1971 | memcpy(target, data, bytes_read); |
1972 | /* zero the tail end of this partial page */ |
1973 | memset(target + bytes_read, 0, |
1974 | PAGE_CACHE_SIZE - bytes_read); |
1975 | bytes_read = 0; |
1976 | } else { |
1977 | memcpy(target, data, PAGE_CACHE_SIZE); |
1978 | bytes_read -= PAGE_CACHE_SIZE; |
1979 | } |
1980 | kunmap_atomic(target, KM_USER0); |
1981 | |
1982 | flush_dcache_page(page); |
1983 | SetPageUptodate(page); |
1984 | unlock_page(page); |
1985 | data += PAGE_CACHE_SIZE; |
1986 | |
1987 | /* add page to FS-Cache */ |
1988 | cifs_readpage_to_fscache(mapping->host, page); |
1989 | } |
1990 | return; |
1991 | } |
1992 | |
1993 | static int cifs_readpages(struct file *file, struct address_space *mapping, |
1994 | struct list_head *page_list, unsigned num_pages) |
1995 | { |
1996 | int rc = -EACCES; |
1997 | int xid; |
1998 | loff_t offset; |
1999 | struct page *page; |
2000 | struct cifs_sb_info *cifs_sb; |
2001 | struct cifs_tcon *pTcon; |
2002 | unsigned int bytes_read = 0; |
2003 | unsigned int read_size, i; |
2004 | char *smb_read_data = NULL; |
2005 | struct smb_com_read_rsp *pSMBr; |
2006 | struct cifsFileInfo *open_file; |
2007 | struct cifs_io_parms io_parms; |
2008 | int buf_type = CIFS_NO_BUFFER; |
2009 | __u32 pid; |
2010 | |
2011 | xid = GetXid(); |
2012 | if (file->private_data == NULL) { |
2013 | rc = -EBADF; |
2014 | FreeXid(xid); |
2015 | return rc; |
2016 | } |
2017 | open_file = file->private_data; |
2018 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
2019 | pTcon = tlink_tcon(open_file->tlink); |
2020 | |
2021 | /* |
2022 | * Reads as many pages as possible from fscache. Returns -ENOBUFS |
2023 | * immediately if the cookie is negative |
2024 | */ |
2025 | rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, |
2026 | &num_pages); |
2027 | if (rc == 0) |
2028 | goto read_complete; |
2029 | |
2030 | cFYI(DBG2, "rpages: num pages %d", num_pages); |
2031 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) |
2032 | pid = open_file->pid; |
2033 | else |
2034 | pid = current->tgid; |
2035 | |
2036 | for (i = 0; i < num_pages; ) { |
2037 | unsigned contig_pages; |
2038 | struct page *tmp_page; |
2039 | unsigned long expected_index; |
2040 | |
2041 | if (list_empty(page_list)) |
2042 | break; |
2043 | |
2044 | page = list_entry(page_list->prev, struct page, lru); |
2045 | offset = (loff_t)page->index << PAGE_CACHE_SHIFT; |
2046 | |
2047 | /* count adjacent pages that we will read into */ |
2048 | contig_pages = 0; |
2049 | expected_index = |
2050 | list_entry(page_list->prev, struct page, lru)->index; |
2051 | list_for_each_entry_reverse(tmp_page, page_list, lru) { |
2052 | if (tmp_page->index == expected_index) { |
2053 | contig_pages++; |
2054 | expected_index++; |
2055 | } else |
2056 | break; |
2057 | } |
2058 | if (contig_pages + i > num_pages) |
2059 | contig_pages = num_pages - i; |
2060 | |
2061 | /* for reads over a certain size could initiate async |
2062 | read ahead */ |
2063 | |
2064 | read_size = contig_pages * PAGE_CACHE_SIZE; |
2065 | /* Read size needs to be in multiples of one page */ |
2066 | read_size = min_t(const unsigned int, read_size, |
2067 | cifs_sb->rsize & PAGE_CACHE_MASK); |
2068 | cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d", |
2069 | read_size, contig_pages); |
2070 | rc = -EAGAIN; |
2071 | while (rc == -EAGAIN) { |
2072 | if (open_file->invalidHandle) { |
2073 | rc = cifs_reopen_file(open_file, true); |
2074 | if (rc != 0) |
2075 | break; |
2076 | } |
2077 | io_parms.netfid = open_file->netfid; |
2078 | io_parms.pid = pid; |
2079 | io_parms.tcon = pTcon; |
2080 | io_parms.offset = offset; |
2081 | io_parms.length = read_size; |
2082 | rc = CIFSSMBRead(xid, &io_parms, &bytes_read, |
2083 | &smb_read_data, &buf_type); |
2084 | /* BB more RC checks ? */ |
2085 | if (rc == -EAGAIN) { |
2086 | if (smb_read_data) { |
2087 | if (buf_type == CIFS_SMALL_BUFFER) |
2088 | cifs_small_buf_release(smb_read_data); |
2089 | else if (buf_type == CIFS_LARGE_BUFFER) |
2090 | cifs_buf_release(smb_read_data); |
2091 | smb_read_data = NULL; |
2092 | } |
2093 | } |
2094 | } |
2095 | if ((rc < 0) || (smb_read_data == NULL)) { |
2096 | cFYI(1, "Read error in readpages: %d", rc); |
2097 | break; |
2098 | } else if (bytes_read > 0) { |
2099 | task_io_account_read(bytes_read); |
2100 | pSMBr = (struct smb_com_read_rsp *)smb_read_data; |
2101 | cifs_copy_cache_pages(mapping, page_list, bytes_read, |
2102 | smb_read_data + 4 /* RFC1001 hdr */ + |
2103 | le16_to_cpu(pSMBr->DataOffset)); |
2104 | |
2105 | i += bytes_read >> PAGE_CACHE_SHIFT; |
2106 | cifs_stats_bytes_read(pTcon, bytes_read); |
2107 | if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) { |
2108 | i++; /* account for partial page */ |
2109 | |
2110 | /* server copy of file can have smaller size |
2111 | than client */ |
2112 | /* BB do we need to verify this common case ? |
2113 | this case is ok - if we are at server EOF |
2114 | we will hit it on next read */ |
2115 | |
2116 | /* break; */ |
2117 | } |
2118 | } else { |
2119 | cFYI(1, "No bytes read (%d) at offset %lld . " |
2120 | "Cleaning remaining pages from readahead list", |
2121 | bytes_read, offset); |
2122 | /* BB turn off caching and do new lookup on |
2123 | file size at server? */ |
2124 | break; |
2125 | } |
2126 | if (smb_read_data) { |
2127 | if (buf_type == CIFS_SMALL_BUFFER) |
2128 | cifs_small_buf_release(smb_read_data); |
2129 | else if (buf_type == CIFS_LARGE_BUFFER) |
2130 | cifs_buf_release(smb_read_data); |
2131 | smb_read_data = NULL; |
2132 | } |
2133 | bytes_read = 0; |
2134 | } |
2135 | |
2136 | /* need to free smb_read_data buf before exit */ |
2137 | if (smb_read_data) { |
2138 | if (buf_type == CIFS_SMALL_BUFFER) |
2139 | cifs_small_buf_release(smb_read_data); |
2140 | else if (buf_type == CIFS_LARGE_BUFFER) |
2141 | cifs_buf_release(smb_read_data); |
2142 | smb_read_data = NULL; |
2143 | } |
2144 | |
2145 | read_complete: |
2146 | FreeXid(xid); |
2147 | return rc; |
2148 | } |
2149 | |
2150 | static int cifs_readpage_worker(struct file *file, struct page *page, |
2151 | loff_t *poffset) |
2152 | { |
2153 | char *read_data; |
2154 | int rc; |
2155 | |
2156 | /* Is the page cached? */ |
2157 | rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page); |
2158 | if (rc == 0) |
2159 | goto read_complete; |
2160 | |
2161 | page_cache_get(page); |
2162 | read_data = kmap(page); |
2163 | /* for reads over a certain size could initiate async read ahead */ |
2164 | |
2165 | rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset); |
2166 | |
2167 | if (rc < 0) |
2168 | goto io_error; |
2169 | else |
2170 | cFYI(1, "Bytes read %d", rc); |
2171 | |
2172 | file->f_path.dentry->d_inode->i_atime = |
2173 | current_fs_time(file->f_path.dentry->d_inode->i_sb); |
2174 | |
2175 | if (PAGE_CACHE_SIZE > rc) |
2176 | memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc); |
2177 | |
2178 | flush_dcache_page(page); |
2179 | SetPageUptodate(page); |
2180 | |
2181 | /* send this page to the cache */ |
2182 | cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page); |
2183 | |
2184 | rc = 0; |
2185 | |
2186 | io_error: |
2187 | kunmap(page); |
2188 | page_cache_release(page); |
2189 | |
2190 | read_complete: |
2191 | return rc; |
2192 | } |
2193 | |
2194 | static int cifs_readpage(struct file *file, struct page *page) |
2195 | { |
2196 | loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; |
2197 | int rc = -EACCES; |
2198 | int xid; |
2199 | |
2200 | xid = GetXid(); |
2201 | |
2202 | if (file->private_data == NULL) { |
2203 | rc = -EBADF; |
2204 | FreeXid(xid); |
2205 | return rc; |
2206 | } |
2207 | |
2208 | cFYI(1, "readpage %p at offset %d 0x%x\n", |
2209 | page, (int)offset, (int)offset); |
2210 | |
2211 | rc = cifs_readpage_worker(file, page, &offset); |
2212 | |
2213 | unlock_page(page); |
2214 | |
2215 | FreeXid(xid); |
2216 | return rc; |
2217 | } |
2218 | |
2219 | static int is_inode_writable(struct cifsInodeInfo *cifs_inode) |
2220 | { |
2221 | struct cifsFileInfo *open_file; |
2222 | |
2223 | spin_lock(&cifs_file_list_lock); |
2224 | list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { |
2225 | if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { |
2226 | spin_unlock(&cifs_file_list_lock); |
2227 | return 1; |
2228 | } |
2229 | } |
2230 | spin_unlock(&cifs_file_list_lock); |
2231 | return 0; |
2232 | } |
2233 | |
2234 | /* We do not want to update the file size from server for inodes |
2235 | open for write - to avoid races with writepage extending |
2236 | the file - in the future we could consider allowing |
2237 | refreshing the inode only on increases in the file size |
2238 | but this is tricky to do without racing with writebehind |
2239 | page caching in the current Linux kernel design */ |
2240 | bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file) |
2241 | { |
2242 | if (!cifsInode) |
2243 | return true; |
2244 | |
2245 | if (is_inode_writable(cifsInode)) { |
2246 | /* This inode is open for write at least once */ |
2247 | struct cifs_sb_info *cifs_sb; |
2248 | |
2249 | cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb); |
2250 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { |
2251 | /* since no page cache to corrupt on directio |
2252 | we can change size safely */ |
2253 | return true; |
2254 | } |
2255 | |
2256 | if (i_size_read(&cifsInode->vfs_inode) < end_of_file) |
2257 | return true; |
2258 | |
2259 | return false; |
2260 | } else |
2261 | return true; |
2262 | } |
2263 | |
2264 | static int cifs_write_begin(struct file *file, struct address_space *mapping, |
2265 | loff_t pos, unsigned len, unsigned flags, |
2266 | struct page **pagep, void **fsdata) |
2267 | { |
2268 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; |
2269 | loff_t offset = pos & (PAGE_CACHE_SIZE - 1); |
2270 | loff_t page_start = pos & PAGE_MASK; |
2271 | loff_t i_size; |
2272 | struct page *page; |
2273 | int rc = 0; |
2274 | |
2275 | cFYI(1, "write_begin from %lld len %d", (long long)pos, len); |
2276 | |
2277 | page = grab_cache_page_write_begin(mapping, index, flags); |
2278 | if (!page) { |
2279 | rc = -ENOMEM; |
2280 | goto out; |
2281 | } |
2282 | |
2283 | if (PageUptodate(page)) |
2284 | goto out; |
2285 | |
2286 | /* |
2287 | * If we write a full page it will be up to date, no need to read from |
2288 | * the server. If the write is short, we'll end up doing a sync write |
2289 | * instead. |
2290 | */ |
2291 | if (len == PAGE_CACHE_SIZE) |
2292 | goto out; |
2293 | |
2294 | /* |
2295 | * optimize away the read when we have an oplock, and we're not |
2296 | * expecting to use any of the data we'd be reading in. That |
2297 | * is, when the page lies beyond the EOF, or straddles the EOF |
2298 | * and the write will cover all of the existing data. |
2299 | */ |
2300 | if (CIFS_I(mapping->host)->clientCanCacheRead) { |
2301 | i_size = i_size_read(mapping->host); |
2302 | if (page_start >= i_size || |
2303 | (offset == 0 && (pos + len) >= i_size)) { |
2304 | zero_user_segments(page, 0, offset, |
2305 | offset + len, |
2306 | PAGE_CACHE_SIZE); |
2307 | /* |
2308 | * PageChecked means that the parts of the page |
2309 | * to which we're not writing are considered up |
2310 | * to date. Once the data is copied to the |
2311 | * page, it can be set uptodate. |
2312 | */ |
2313 | SetPageChecked(page); |
2314 | goto out; |
2315 | } |
2316 | } |
2317 | |
2318 | if ((file->f_flags & O_ACCMODE) != O_WRONLY) { |
2319 | /* |
2320 | * might as well read a page, it is fast enough. If we get |
2321 | * an error, we don't need to return it. cifs_write_end will |
2322 | * do a sync write instead since PG_uptodate isn't set. |
2323 | */ |
2324 | cifs_readpage_worker(file, page, &page_start); |
2325 | } else { |
2326 | /* we could try using another file handle if there is one - |
2327 | but how would we lock it to prevent close of that handle |
2328 | racing with this read? In any case |
2329 | this will be written out by write_end so is fine */ |
2330 | } |
2331 | out: |
2332 | *pagep = page; |
2333 | return rc; |
2334 | } |
2335 | |
2336 | static int cifs_release_page(struct page *page, gfp_t gfp) |
2337 | { |
2338 | if (PagePrivate(page)) |
2339 | return 0; |
2340 | |
2341 | return cifs_fscache_release_page(page, gfp); |
2342 | } |
2343 | |
2344 | static void cifs_invalidate_page(struct page *page, unsigned long offset) |
2345 | { |
2346 | struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host); |
2347 | |
2348 | if (offset == 0) |
2349 | cifs_fscache_invalidate_page(page, &cifsi->vfs_inode); |
2350 | } |
2351 | |
2352 | static int cifs_launder_page(struct page *page) |
2353 | { |
2354 | int rc = 0; |
2355 | loff_t range_start = page_offset(page); |
2356 | loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); |
2357 | struct writeback_control wbc = { |
2358 | .sync_mode = WB_SYNC_ALL, |
2359 | .nr_to_write = 0, |
2360 | .range_start = range_start, |
2361 | .range_end = range_end, |
2362 | }; |
2363 | |
2364 | cFYI(1, "Launder page: %p", page); |
2365 | |
2366 | if (clear_page_dirty_for_io(page)) |
2367 | rc = cifs_writepage_locked(page, &wbc); |
2368 | |
2369 | cifs_fscache_invalidate_page(page, page->mapping->host); |
2370 | return rc; |
2371 | } |
2372 | |
2373 | void cifs_oplock_break(struct work_struct *work) |
2374 | { |
2375 | struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, |
2376 | oplock_break); |
2377 | struct inode *inode = cfile->dentry->d_inode; |
2378 | struct cifsInodeInfo *cinode = CIFS_I(inode); |
2379 | int rc = 0; |
2380 | |
2381 | if (inode && S_ISREG(inode->i_mode)) { |
2382 | if (cinode->clientCanCacheRead) |
2383 | break_lease(inode, O_RDONLY); |
2384 | else |
2385 | break_lease(inode, O_WRONLY); |
2386 | rc = filemap_fdatawrite(inode->i_mapping); |
2387 | if (cinode->clientCanCacheRead == 0) { |
2388 | rc = filemap_fdatawait(inode->i_mapping); |
2389 | mapping_set_error(inode->i_mapping, rc); |
2390 | invalidate_remote_inode(inode); |
2391 | } |
2392 | cFYI(1, "Oplock flush inode %p rc %d", inode, rc); |
2393 | } |
2394 | |
2395 | /* |
2396 | * releasing stale oplock after recent reconnect of smb session using |
2397 | * a now incorrect file handle is not a data integrity issue but do |
2398 | * not bother sending an oplock release if session to server still is |
2399 | * disconnected since oplock already released by the server |
2400 | */ |
2401 | if (!cfile->oplock_break_cancelled) { |
2402 | rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0, |
2403 | 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false, |
2404 | cinode->clientCanCacheRead ? 1 : 0); |
2405 | cFYI(1, "Oplock release rc = %d", rc); |
2406 | } |
2407 | |
2408 | /* |
2409 | * We might have kicked in before is_valid_oplock_break() |
2410 | * finished grabbing reference for us. Make sure it's done by |
2411 | * waiting for cifs_file_list_lock. |
2412 | */ |
2413 | spin_lock(&cifs_file_list_lock); |
2414 | spin_unlock(&cifs_file_list_lock); |
2415 | |
2416 | cifs_oplock_break_put(cfile); |
2417 | } |
2418 | |
2419 | /* must be called while holding cifs_file_list_lock */ |
2420 | void cifs_oplock_break_get(struct cifsFileInfo *cfile) |
2421 | { |
2422 | cifs_sb_active(cfile->dentry->d_sb); |
2423 | cifsFileInfo_get(cfile); |
2424 | } |
2425 | |
2426 | void cifs_oplock_break_put(struct cifsFileInfo *cfile) |
2427 | { |
2428 | struct super_block *sb = cfile->dentry->d_sb; |
2429 | |
2430 | cifsFileInfo_put(cfile); |
2431 | cifs_sb_deactive(sb); |
2432 | } |
2433 | |
2434 | const struct address_space_operations cifs_addr_ops = { |
2435 | .readpage = cifs_readpage, |
2436 | .readpages = cifs_readpages, |
2437 | .writepage = cifs_writepage, |
2438 | .writepages = cifs_writepages, |
2439 | .write_begin = cifs_write_begin, |
2440 | .write_end = cifs_write_end, |
2441 | .set_page_dirty = __set_page_dirty_nobuffers, |
2442 | .releasepage = cifs_release_page, |
2443 | .invalidatepage = cifs_invalidate_page, |
2444 | .launder_page = cifs_launder_page, |
2445 | }; |
2446 | |
2447 | /* |
2448 | * cifs_readpages requires the server to support a buffer large enough to |
2449 | * contain the header plus one complete page of data. Otherwise, we need |
2450 | * to leave cifs_readpages out of the address space operations. |
2451 | */ |
2452 | const struct address_space_operations cifs_addr_ops_smallbuf = { |
2453 | .readpage = cifs_readpage, |
2454 | .writepage = cifs_writepage, |
2455 | .writepages = cifs_writepages, |
2456 | .write_begin = cifs_write_begin, |
2457 | .write_end = cifs_write_end, |
2458 | .set_page_dirty = __set_page_dirty_nobuffers, |
2459 | .releasepage = cifs_release_page, |
2460 | .invalidatepage = cifs_invalidate_page, |
2461 | .launder_page = cifs_launder_page, |
2462 | }; |
2463 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9