Root/
1 | /* |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. |
4 | * |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ |
18 | #include "xfs.h" |
19 | #include "xfs_fs.h" |
20 | #include "xfs_types.h" |
21 | #include "xfs_bit.h" |
22 | #include "xfs_log.h" |
23 | #include "xfs_inum.h" |
24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" |
28 | #include "xfs_dmapi.h" |
29 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" |
32 | #include "xfs_ialloc_btree.h" |
33 | #include "xfs_dir2_sf.h" |
34 | #include "xfs_attr_sf.h" |
35 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" |
37 | #include "xfs_btree.h" |
38 | #include "xfs_ialloc.h" |
39 | #include "xfs_alloc.h" |
40 | #include "xfs_rtalloc.h" |
41 | #include "xfs_bmap.h" |
42 | #include "xfs_error.h" |
43 | #include "xfs_rw.h" |
44 | #include "xfs_quota.h" |
45 | #include "xfs_fsops.h" |
46 | #include "xfs_utils.h" |
47 | #include "xfs_trace.h" |
48 | |
49 | |
50 | STATIC void xfs_unmountfs_wait(xfs_mount_t *); |
51 | |
52 | |
53 | #ifdef HAVE_PERCPU_SB |
54 | STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, |
55 | int); |
56 | STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t, |
57 | int); |
58 | STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t, |
59 | int64_t, int); |
60 | STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); |
61 | |
62 | #else |
63 | |
64 | #define xfs_icsb_balance_counter(mp, a, b) do { } while (0) |
65 | #define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0) |
66 | #define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0) |
67 | |
68 | #endif |
69 | |
70 | static const struct { |
71 | short offset; |
72 | short type; /* 0 = integer |
73 | * 1 = binary / string (no translation) |
74 | */ |
75 | } xfs_sb_info[] = { |
76 | { offsetof(xfs_sb_t, sb_magicnum), 0 }, |
77 | { offsetof(xfs_sb_t, sb_blocksize), 0 }, |
78 | { offsetof(xfs_sb_t, sb_dblocks), 0 }, |
79 | { offsetof(xfs_sb_t, sb_rblocks), 0 }, |
80 | { offsetof(xfs_sb_t, sb_rextents), 0 }, |
81 | { offsetof(xfs_sb_t, sb_uuid), 1 }, |
82 | { offsetof(xfs_sb_t, sb_logstart), 0 }, |
83 | { offsetof(xfs_sb_t, sb_rootino), 0 }, |
84 | { offsetof(xfs_sb_t, sb_rbmino), 0 }, |
85 | { offsetof(xfs_sb_t, sb_rsumino), 0 }, |
86 | { offsetof(xfs_sb_t, sb_rextsize), 0 }, |
87 | { offsetof(xfs_sb_t, sb_agblocks), 0 }, |
88 | { offsetof(xfs_sb_t, sb_agcount), 0 }, |
89 | { offsetof(xfs_sb_t, sb_rbmblocks), 0 }, |
90 | { offsetof(xfs_sb_t, sb_logblocks), 0 }, |
91 | { offsetof(xfs_sb_t, sb_versionnum), 0 }, |
92 | { offsetof(xfs_sb_t, sb_sectsize), 0 }, |
93 | { offsetof(xfs_sb_t, sb_inodesize), 0 }, |
94 | { offsetof(xfs_sb_t, sb_inopblock), 0 }, |
95 | { offsetof(xfs_sb_t, sb_fname[0]), 1 }, |
96 | { offsetof(xfs_sb_t, sb_blocklog), 0 }, |
97 | { offsetof(xfs_sb_t, sb_sectlog), 0 }, |
98 | { offsetof(xfs_sb_t, sb_inodelog), 0 }, |
99 | { offsetof(xfs_sb_t, sb_inopblog), 0 }, |
100 | { offsetof(xfs_sb_t, sb_agblklog), 0 }, |
101 | { offsetof(xfs_sb_t, sb_rextslog), 0 }, |
102 | { offsetof(xfs_sb_t, sb_inprogress), 0 }, |
103 | { offsetof(xfs_sb_t, sb_imax_pct), 0 }, |
104 | { offsetof(xfs_sb_t, sb_icount), 0 }, |
105 | { offsetof(xfs_sb_t, sb_ifree), 0 }, |
106 | { offsetof(xfs_sb_t, sb_fdblocks), 0 }, |
107 | { offsetof(xfs_sb_t, sb_frextents), 0 }, |
108 | { offsetof(xfs_sb_t, sb_uquotino), 0 }, |
109 | { offsetof(xfs_sb_t, sb_gquotino), 0 }, |
110 | { offsetof(xfs_sb_t, sb_qflags), 0 }, |
111 | { offsetof(xfs_sb_t, sb_flags), 0 }, |
112 | { offsetof(xfs_sb_t, sb_shared_vn), 0 }, |
113 | { offsetof(xfs_sb_t, sb_inoalignmt), 0 }, |
114 | { offsetof(xfs_sb_t, sb_unit), 0 }, |
115 | { offsetof(xfs_sb_t, sb_width), 0 }, |
116 | { offsetof(xfs_sb_t, sb_dirblklog), 0 }, |
117 | { offsetof(xfs_sb_t, sb_logsectlog), 0 }, |
118 | { offsetof(xfs_sb_t, sb_logsectsize),0 }, |
119 | { offsetof(xfs_sb_t, sb_logsunit), 0 }, |
120 | { offsetof(xfs_sb_t, sb_features2), 0 }, |
121 | { offsetof(xfs_sb_t, sb_bad_features2), 0 }, |
122 | { sizeof(xfs_sb_t), 0 } |
123 | }; |
124 | |
125 | static DEFINE_MUTEX(xfs_uuid_table_mutex); |
126 | static int xfs_uuid_table_size; |
127 | static uuid_t *xfs_uuid_table; |
128 | |
129 | /* |
130 | * See if the UUID is unique among mounted XFS filesystems. |
131 | * Mount fails if UUID is nil or a FS with the same UUID is already mounted. |
132 | */ |
133 | STATIC int |
134 | xfs_uuid_mount( |
135 | struct xfs_mount *mp) |
136 | { |
137 | uuid_t *uuid = &mp->m_sb.sb_uuid; |
138 | int hole, i; |
139 | |
140 | if (mp->m_flags & XFS_MOUNT_NOUUID) |
141 | return 0; |
142 | |
143 | if (uuid_is_nil(uuid)) { |
144 | cmn_err(CE_WARN, |
145 | "XFS: Filesystem %s has nil UUID - can't mount", |
146 | mp->m_fsname); |
147 | return XFS_ERROR(EINVAL); |
148 | } |
149 | |
150 | mutex_lock(&xfs_uuid_table_mutex); |
151 | for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) { |
152 | if (uuid_is_nil(&xfs_uuid_table[i])) { |
153 | hole = i; |
154 | continue; |
155 | } |
156 | if (uuid_equal(uuid, &xfs_uuid_table[i])) |
157 | goto out_duplicate; |
158 | } |
159 | |
160 | if (hole < 0) { |
161 | xfs_uuid_table = kmem_realloc(xfs_uuid_table, |
162 | (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table), |
163 | xfs_uuid_table_size * sizeof(*xfs_uuid_table), |
164 | KM_SLEEP); |
165 | hole = xfs_uuid_table_size++; |
166 | } |
167 | xfs_uuid_table[hole] = *uuid; |
168 | mutex_unlock(&xfs_uuid_table_mutex); |
169 | |
170 | return 0; |
171 | |
172 | out_duplicate: |
173 | mutex_unlock(&xfs_uuid_table_mutex); |
174 | cmn_err(CE_WARN, "XFS: Filesystem %s has duplicate UUID - can't mount", |
175 | mp->m_fsname); |
176 | return XFS_ERROR(EINVAL); |
177 | } |
178 | |
179 | STATIC void |
180 | xfs_uuid_unmount( |
181 | struct xfs_mount *mp) |
182 | { |
183 | uuid_t *uuid = &mp->m_sb.sb_uuid; |
184 | int i; |
185 | |
186 | if (mp->m_flags & XFS_MOUNT_NOUUID) |
187 | return; |
188 | |
189 | mutex_lock(&xfs_uuid_table_mutex); |
190 | for (i = 0; i < xfs_uuid_table_size; i++) { |
191 | if (uuid_is_nil(&xfs_uuid_table[i])) |
192 | continue; |
193 | if (!uuid_equal(uuid, &xfs_uuid_table[i])) |
194 | continue; |
195 | memset(&xfs_uuid_table[i], 0, sizeof(uuid_t)); |
196 | break; |
197 | } |
198 | ASSERT(i < xfs_uuid_table_size); |
199 | mutex_unlock(&xfs_uuid_table_mutex); |
200 | } |
201 | |
202 | |
203 | /* |
204 | * Reference counting access wrappers to the perag structures. |
205 | */ |
206 | struct xfs_perag * |
207 | xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno) |
208 | { |
209 | struct xfs_perag *pag; |
210 | int ref = 0; |
211 | |
212 | spin_lock(&mp->m_perag_lock); |
213 | pag = radix_tree_lookup(&mp->m_perag_tree, agno); |
214 | if (pag) { |
215 | ASSERT(atomic_read(&pag->pag_ref) >= 0); |
216 | /* catch leaks in the positive direction during testing */ |
217 | ASSERT(atomic_read(&pag->pag_ref) < 1000); |
218 | ref = atomic_inc_return(&pag->pag_ref); |
219 | } |
220 | spin_unlock(&mp->m_perag_lock); |
221 | trace_xfs_perag_get(mp, agno, ref, _RET_IP_); |
222 | return pag; |
223 | } |
224 | |
225 | void |
226 | xfs_perag_put(struct xfs_perag *pag) |
227 | { |
228 | int ref; |
229 | |
230 | ASSERT(atomic_read(&pag->pag_ref) > 0); |
231 | ref = atomic_dec_return(&pag->pag_ref); |
232 | trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_); |
233 | } |
234 | |
235 | /* |
236 | * Free up the resources associated with a mount structure. Assume that |
237 | * the structure was initially zeroed, so we can tell which fields got |
238 | * initialized. |
239 | */ |
240 | STATIC void |
241 | xfs_free_perag( |
242 | xfs_mount_t *mp) |
243 | { |
244 | xfs_agnumber_t agno; |
245 | struct xfs_perag *pag; |
246 | |
247 | for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { |
248 | spin_lock(&mp->m_perag_lock); |
249 | pag = radix_tree_delete(&mp->m_perag_tree, agno); |
250 | ASSERT(pag); |
251 | ASSERT(atomic_read(&pag->pag_ref) == 0); |
252 | spin_unlock(&mp->m_perag_lock); |
253 | kmem_free(pag); |
254 | } |
255 | } |
256 | |
257 | /* |
258 | * Check size of device based on the (data/realtime) block count. |
259 | * Note: this check is used by the growfs code as well as mount. |
260 | */ |
261 | int |
262 | xfs_sb_validate_fsb_count( |
263 | xfs_sb_t *sbp, |
264 | __uint64_t nblocks) |
265 | { |
266 | ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); |
267 | ASSERT(sbp->sb_blocklog >= BBSHIFT); |
268 | |
269 | #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */ |
270 | if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) |
271 | return E2BIG; |
272 | #else /* Limited by UINT_MAX of sectors */ |
273 | if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX) |
274 | return E2BIG; |
275 | #endif |
276 | return 0; |
277 | } |
278 | |
279 | /* |
280 | * Check the validity of the SB found. |
281 | */ |
282 | STATIC int |
283 | xfs_mount_validate_sb( |
284 | xfs_mount_t *mp, |
285 | xfs_sb_t *sbp, |
286 | int flags) |
287 | { |
288 | /* |
289 | * If the log device and data device have the |
290 | * same device number, the log is internal. |
291 | * Consequently, the sb_logstart should be non-zero. If |
292 | * we have a zero sb_logstart in this case, we may be trying to mount |
293 | * a volume filesystem in a non-volume manner. |
294 | */ |
295 | if (sbp->sb_magicnum != XFS_SB_MAGIC) { |
296 | xfs_fs_mount_cmn_err(flags, "bad magic number"); |
297 | return XFS_ERROR(EWRONGFS); |
298 | } |
299 | |
300 | if (!xfs_sb_good_version(sbp)) { |
301 | xfs_fs_mount_cmn_err(flags, "bad version"); |
302 | return XFS_ERROR(EWRONGFS); |
303 | } |
304 | |
305 | if (unlikely( |
306 | sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) { |
307 | xfs_fs_mount_cmn_err(flags, |
308 | "filesystem is marked as having an external log; " |
309 | "specify logdev on the\nmount command line."); |
310 | return XFS_ERROR(EINVAL); |
311 | } |
312 | |
313 | if (unlikely( |
314 | sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) { |
315 | xfs_fs_mount_cmn_err(flags, |
316 | "filesystem is marked as having an internal log; " |
317 | "do not specify logdev on\nthe mount command line."); |
318 | return XFS_ERROR(EINVAL); |
319 | } |
320 | |
321 | /* |
322 | * More sanity checking. These were stolen directly from |
323 | * xfs_repair. |
324 | */ |
325 | if (unlikely( |
326 | sbp->sb_agcount <= 0 || |
327 | sbp->sb_sectsize < XFS_MIN_SECTORSIZE || |
328 | sbp->sb_sectsize > XFS_MAX_SECTORSIZE || |
329 | sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG || |
330 | sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG || |
331 | sbp->sb_sectsize != (1 << sbp->sb_sectlog) || |
332 | sbp->sb_blocksize < XFS_MIN_BLOCKSIZE || |
333 | sbp->sb_blocksize > XFS_MAX_BLOCKSIZE || |
334 | sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || |
335 | sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || |
336 | sbp->sb_blocksize != (1 << sbp->sb_blocklog) || |
337 | sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || |
338 | sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || |
339 | sbp->sb_inodelog < XFS_DINODE_MIN_LOG || |
340 | sbp->sb_inodelog > XFS_DINODE_MAX_LOG || |
341 | sbp->sb_inodesize != (1 << sbp->sb_inodelog) || |
342 | (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) || |
343 | (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || |
344 | (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || |
345 | (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */))) { |
346 | xfs_fs_mount_cmn_err(flags, "SB sanity check 1 failed"); |
347 | return XFS_ERROR(EFSCORRUPTED); |
348 | } |
349 | |
350 | /* |
351 | * Sanity check AG count, size fields against data size field |
352 | */ |
353 | if (unlikely( |
354 | sbp->sb_dblocks == 0 || |
355 | sbp->sb_dblocks > |
356 | (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks || |
357 | sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) * |
358 | sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) { |
359 | xfs_fs_mount_cmn_err(flags, "SB sanity check 2 failed"); |
360 | return XFS_ERROR(EFSCORRUPTED); |
361 | } |
362 | |
363 | /* |
364 | * Until this is fixed only page-sized or smaller data blocks work. |
365 | */ |
366 | if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) { |
367 | xfs_fs_mount_cmn_err(flags, |
368 | "file system with blocksize %d bytes", |
369 | sbp->sb_blocksize); |
370 | xfs_fs_mount_cmn_err(flags, |
371 | "only pagesize (%ld) or less will currently work.", |
372 | PAGE_SIZE); |
373 | return XFS_ERROR(ENOSYS); |
374 | } |
375 | |
376 | /* |
377 | * Currently only very few inode sizes are supported. |
378 | */ |
379 | switch (sbp->sb_inodesize) { |
380 | case 256: |
381 | case 512: |
382 | case 1024: |
383 | case 2048: |
384 | break; |
385 | default: |
386 | xfs_fs_mount_cmn_err(flags, |
387 | "inode size of %d bytes not supported", |
388 | sbp->sb_inodesize); |
389 | return XFS_ERROR(ENOSYS); |
390 | } |
391 | |
392 | if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) || |
393 | xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) { |
394 | xfs_fs_mount_cmn_err(flags, |
395 | "file system too large to be mounted on this system."); |
396 | return XFS_ERROR(E2BIG); |
397 | } |
398 | |
399 | if (unlikely(sbp->sb_inprogress)) { |
400 | xfs_fs_mount_cmn_err(flags, "file system busy"); |
401 | return XFS_ERROR(EFSCORRUPTED); |
402 | } |
403 | |
404 | /* |
405 | * Version 1 directory format has never worked on Linux. |
406 | */ |
407 | if (unlikely(!xfs_sb_version_hasdirv2(sbp))) { |
408 | xfs_fs_mount_cmn_err(flags, |
409 | "file system using version 1 directory format"); |
410 | return XFS_ERROR(ENOSYS); |
411 | } |
412 | |
413 | return 0; |
414 | } |
415 | |
416 | STATIC void |
417 | xfs_initialize_perag_icache( |
418 | xfs_perag_t *pag) |
419 | { |
420 | if (!pag->pag_ici_init) { |
421 | rwlock_init(&pag->pag_ici_lock); |
422 | INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); |
423 | pag->pag_ici_init = 1; |
424 | } |
425 | } |
426 | |
427 | int |
428 | xfs_initialize_perag( |
429 | xfs_mount_t *mp, |
430 | xfs_agnumber_t agcount, |
431 | xfs_agnumber_t *maxagi) |
432 | { |
433 | xfs_agnumber_t index, max_metadata; |
434 | xfs_agnumber_t first_initialised = 0; |
435 | xfs_perag_t *pag; |
436 | xfs_agino_t agino; |
437 | xfs_ino_t ino; |
438 | xfs_sb_t *sbp = &mp->m_sb; |
439 | xfs_ino_t max_inum = XFS_MAXINUMBER_32; |
440 | int error = -ENOMEM; |
441 | |
442 | /* Check to see if the filesystem can overflow 32 bit inodes */ |
443 | agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); |
444 | ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); |
445 | |
446 | /* |
447 | * Walk the current per-ag tree so we don't try to initialise AGs |
448 | * that already exist (growfs case). Allocate and insert all the |
449 | * AGs we don't find ready for initialisation. |
450 | */ |
451 | for (index = 0; index < agcount; index++) { |
452 | pag = xfs_perag_get(mp, index); |
453 | if (pag) { |
454 | xfs_perag_put(pag); |
455 | continue; |
456 | } |
457 | if (!first_initialised) |
458 | first_initialised = index; |
459 | pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL); |
460 | if (!pag) |
461 | goto out_unwind; |
462 | if (radix_tree_preload(GFP_NOFS)) |
463 | goto out_unwind; |
464 | spin_lock(&mp->m_perag_lock); |
465 | if (radix_tree_insert(&mp->m_perag_tree, index, pag)) { |
466 | BUG(); |
467 | spin_unlock(&mp->m_perag_lock); |
468 | radix_tree_preload_end(); |
469 | error = -EEXIST; |
470 | goto out_unwind; |
471 | } |
472 | pag->pag_agno = index; |
473 | pag->pag_mount = mp; |
474 | spin_unlock(&mp->m_perag_lock); |
475 | radix_tree_preload_end(); |
476 | } |
477 | |
478 | /* Clear the mount flag if no inode can overflow 32 bits |
479 | * on this filesystem, or if specifically requested.. |
480 | */ |
481 | if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > max_inum) { |
482 | mp->m_flags |= XFS_MOUNT_32BITINODES; |
483 | } else { |
484 | mp->m_flags &= ~XFS_MOUNT_32BITINODES; |
485 | } |
486 | |
487 | /* If we can overflow then setup the ag headers accordingly */ |
488 | if (mp->m_flags & XFS_MOUNT_32BITINODES) { |
489 | /* Calculate how much should be reserved for inodes to |
490 | * meet the max inode percentage. |
491 | */ |
492 | if (mp->m_maxicount) { |
493 | __uint64_t icount; |
494 | |
495 | icount = sbp->sb_dblocks * sbp->sb_imax_pct; |
496 | do_div(icount, 100); |
497 | icount += sbp->sb_agblocks - 1; |
498 | do_div(icount, sbp->sb_agblocks); |
499 | max_metadata = icount; |
500 | } else { |
501 | max_metadata = agcount; |
502 | } |
503 | for (index = 0; index < agcount; index++) { |
504 | ino = XFS_AGINO_TO_INO(mp, index, agino); |
505 | if (ino > max_inum) { |
506 | index++; |
507 | break; |
508 | } |
509 | |
510 | /* This ag is preferred for inodes */ |
511 | pag = xfs_perag_get(mp, index); |
512 | pag->pagi_inodeok = 1; |
513 | if (index < max_metadata) |
514 | pag->pagf_metadata = 1; |
515 | xfs_initialize_perag_icache(pag); |
516 | xfs_perag_put(pag); |
517 | } |
518 | } else { |
519 | /* Setup default behavior for smaller filesystems */ |
520 | for (index = 0; index < agcount; index++) { |
521 | pag = xfs_perag_get(mp, index); |
522 | pag->pagi_inodeok = 1; |
523 | xfs_initialize_perag_icache(pag); |
524 | xfs_perag_put(pag); |
525 | } |
526 | } |
527 | if (maxagi) |
528 | *maxagi = index; |
529 | return 0; |
530 | |
531 | out_unwind: |
532 | kmem_free(pag); |
533 | for (; index > first_initialised; index--) { |
534 | pag = radix_tree_delete(&mp->m_perag_tree, index); |
535 | kmem_free(pag); |
536 | } |
537 | return error; |
538 | } |
539 | |
540 | void |
541 | xfs_sb_from_disk( |
542 | xfs_sb_t *to, |
543 | xfs_dsb_t *from) |
544 | { |
545 | to->sb_magicnum = be32_to_cpu(from->sb_magicnum); |
546 | to->sb_blocksize = be32_to_cpu(from->sb_blocksize); |
547 | to->sb_dblocks = be64_to_cpu(from->sb_dblocks); |
548 | to->sb_rblocks = be64_to_cpu(from->sb_rblocks); |
549 | to->sb_rextents = be64_to_cpu(from->sb_rextents); |
550 | memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid)); |
551 | to->sb_logstart = be64_to_cpu(from->sb_logstart); |
552 | to->sb_rootino = be64_to_cpu(from->sb_rootino); |
553 | to->sb_rbmino = be64_to_cpu(from->sb_rbmino); |
554 | to->sb_rsumino = be64_to_cpu(from->sb_rsumino); |
555 | to->sb_rextsize = be32_to_cpu(from->sb_rextsize); |
556 | to->sb_agblocks = be32_to_cpu(from->sb_agblocks); |
557 | to->sb_agcount = be32_to_cpu(from->sb_agcount); |
558 | to->sb_rbmblocks = be32_to_cpu(from->sb_rbmblocks); |
559 | to->sb_logblocks = be32_to_cpu(from->sb_logblocks); |
560 | to->sb_versionnum = be16_to_cpu(from->sb_versionnum); |
561 | to->sb_sectsize = be16_to_cpu(from->sb_sectsize); |
562 | to->sb_inodesize = be16_to_cpu(from->sb_inodesize); |
563 | to->sb_inopblock = be16_to_cpu(from->sb_inopblock); |
564 | memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname)); |
565 | to->sb_blocklog = from->sb_blocklog; |
566 | to->sb_sectlog = from->sb_sectlog; |
567 | to->sb_inodelog = from->sb_inodelog; |
568 | to->sb_inopblog = from->sb_inopblog; |
569 | to->sb_agblklog = from->sb_agblklog; |
570 | to->sb_rextslog = from->sb_rextslog; |
571 | to->sb_inprogress = from->sb_inprogress; |
572 | to->sb_imax_pct = from->sb_imax_pct; |
573 | to->sb_icount = be64_to_cpu(from->sb_icount); |
574 | to->sb_ifree = be64_to_cpu(from->sb_ifree); |
575 | to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks); |
576 | to->sb_frextents = be64_to_cpu(from->sb_frextents); |
577 | to->sb_uquotino = be64_to_cpu(from->sb_uquotino); |
578 | to->sb_gquotino = be64_to_cpu(from->sb_gquotino); |
579 | to->sb_qflags = be16_to_cpu(from->sb_qflags); |
580 | to->sb_flags = from->sb_flags; |
581 | to->sb_shared_vn = from->sb_shared_vn; |
582 | to->sb_inoalignmt = be32_to_cpu(from->sb_inoalignmt); |
583 | to->sb_unit = be32_to_cpu(from->sb_unit); |
584 | to->sb_width = be32_to_cpu(from->sb_width); |
585 | to->sb_dirblklog = from->sb_dirblklog; |
586 | to->sb_logsectlog = from->sb_logsectlog; |
587 | to->sb_logsectsize = be16_to_cpu(from->sb_logsectsize); |
588 | to->sb_logsunit = be32_to_cpu(from->sb_logsunit); |
589 | to->sb_features2 = be32_to_cpu(from->sb_features2); |
590 | to->sb_bad_features2 = be32_to_cpu(from->sb_bad_features2); |
591 | } |
592 | |
593 | /* |
594 | * Copy in core superblock to ondisk one. |
595 | * |
596 | * The fields argument is mask of superblock fields to copy. |
597 | */ |
598 | void |
599 | xfs_sb_to_disk( |
600 | xfs_dsb_t *to, |
601 | xfs_sb_t *from, |
602 | __int64_t fields) |
603 | { |
604 | xfs_caddr_t to_ptr = (xfs_caddr_t)to; |
605 | xfs_caddr_t from_ptr = (xfs_caddr_t)from; |
606 | xfs_sb_field_t f; |
607 | int first; |
608 | int size; |
609 | |
610 | ASSERT(fields); |
611 | if (!fields) |
612 | return; |
613 | |
614 | while (fields) { |
615 | f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); |
616 | first = xfs_sb_info[f].offset; |
617 | size = xfs_sb_info[f + 1].offset - first; |
618 | |
619 | ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1); |
620 | |
621 | if (size == 1 || xfs_sb_info[f].type == 1) { |
622 | memcpy(to_ptr + first, from_ptr + first, size); |
623 | } else { |
624 | switch (size) { |
625 | case 2: |
626 | *(__be16 *)(to_ptr + first) = |
627 | cpu_to_be16(*(__u16 *)(from_ptr + first)); |
628 | break; |
629 | case 4: |
630 | *(__be32 *)(to_ptr + first) = |
631 | cpu_to_be32(*(__u32 *)(from_ptr + first)); |
632 | break; |
633 | case 8: |
634 | *(__be64 *)(to_ptr + first) = |
635 | cpu_to_be64(*(__u64 *)(from_ptr + first)); |
636 | break; |
637 | default: |
638 | ASSERT(0); |
639 | } |
640 | } |
641 | |
642 | fields &= ~(1LL << f); |
643 | } |
644 | } |
645 | |
646 | /* |
647 | * xfs_readsb |
648 | * |
649 | * Does the initial read of the superblock. |
650 | */ |
651 | int |
652 | xfs_readsb(xfs_mount_t *mp, int flags) |
653 | { |
654 | unsigned int sector_size; |
655 | unsigned int extra_flags; |
656 | xfs_buf_t *bp; |
657 | int error; |
658 | |
659 | ASSERT(mp->m_sb_bp == NULL); |
660 | ASSERT(mp->m_ddev_targp != NULL); |
661 | |
662 | /* |
663 | * Allocate a (locked) buffer to hold the superblock. |
664 | * This will be kept around at all times to optimize |
665 | * access to the superblock. |
666 | */ |
667 | sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); |
668 | extra_flags = XBF_LOCK | XBF_FS_MANAGED | XBF_MAPPED; |
669 | |
670 | bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size), |
671 | extra_flags); |
672 | if (!bp || XFS_BUF_ISERROR(bp)) { |
673 | xfs_fs_mount_cmn_err(flags, "SB read failed"); |
674 | error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; |
675 | goto fail; |
676 | } |
677 | ASSERT(XFS_BUF_ISBUSY(bp)); |
678 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); |
679 | |
680 | /* |
681 | * Initialize the mount structure from the superblock. |
682 | * But first do some basic consistency checking. |
683 | */ |
684 | xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp)); |
685 | |
686 | error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags); |
687 | if (error) { |
688 | xfs_fs_mount_cmn_err(flags, "SB validate failed"); |
689 | goto fail; |
690 | } |
691 | |
692 | /* |
693 | * We must be able to do sector-sized and sector-aligned IO. |
694 | */ |
695 | if (sector_size > mp->m_sb.sb_sectsize) { |
696 | xfs_fs_mount_cmn_err(flags, |
697 | "device supports only %u byte sectors (not %u)", |
698 | sector_size, mp->m_sb.sb_sectsize); |
699 | error = ENOSYS; |
700 | goto fail; |
701 | } |
702 | |
703 | /* |
704 | * If device sector size is smaller than the superblock size, |
705 | * re-read the superblock so the buffer is correctly sized. |
706 | */ |
707 | if (sector_size < mp->m_sb.sb_sectsize) { |
708 | XFS_BUF_UNMANAGE(bp); |
709 | xfs_buf_relse(bp); |
710 | sector_size = mp->m_sb.sb_sectsize; |
711 | bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, |
712 | BTOBB(sector_size), extra_flags); |
713 | if (!bp || XFS_BUF_ISERROR(bp)) { |
714 | xfs_fs_mount_cmn_err(flags, "SB re-read failed"); |
715 | error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; |
716 | goto fail; |
717 | } |
718 | ASSERT(XFS_BUF_ISBUSY(bp)); |
719 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); |
720 | } |
721 | |
722 | /* Initialize per-cpu counters */ |
723 | xfs_icsb_reinit_counters(mp); |
724 | |
725 | mp->m_sb_bp = bp; |
726 | xfs_buf_relse(bp); |
727 | ASSERT(XFS_BUF_VALUSEMA(bp) > 0); |
728 | return 0; |
729 | |
730 | fail: |
731 | if (bp) { |
732 | XFS_BUF_UNMANAGE(bp); |
733 | xfs_buf_relse(bp); |
734 | } |
735 | return error; |
736 | } |
737 | |
738 | |
739 | /* |
740 | * xfs_mount_common |
741 | * |
742 | * Mount initialization code establishing various mount |
743 | * fields from the superblock associated with the given |
744 | * mount structure |
745 | */ |
746 | STATIC void |
747 | xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) |
748 | { |
749 | mp->m_agfrotor = mp->m_agirotor = 0; |
750 | spin_lock_init(&mp->m_agirotor_lock); |
751 | mp->m_maxagi = mp->m_sb.sb_agcount; |
752 | mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; |
753 | mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; |
754 | mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT; |
755 | mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1; |
756 | mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog; |
757 | mp->m_blockmask = sbp->sb_blocksize - 1; |
758 | mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG; |
759 | mp->m_blockwmask = mp->m_blockwsize - 1; |
760 | |
761 | mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1); |
762 | mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0); |
763 | mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2; |
764 | mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2; |
765 | |
766 | mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1); |
767 | mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0); |
768 | mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2; |
769 | mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2; |
770 | |
771 | mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1); |
772 | mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0); |
773 | mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2; |
774 | mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2; |
775 | |
776 | mp->m_bsize = XFS_FSB_TO_BB(mp, 1); |
777 | mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK, |
778 | sbp->sb_inopblock); |
779 | mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog; |
780 | } |
781 | |
782 | /* |
783 | * xfs_initialize_perag_data |
784 | * |
785 | * Read in each per-ag structure so we can count up the number of |
786 | * allocated inodes, free inodes and used filesystem blocks as this |
787 | * information is no longer persistent in the superblock. Once we have |
788 | * this information, write it into the in-core superblock structure. |
789 | */ |
790 | STATIC int |
791 | xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount) |
792 | { |
793 | xfs_agnumber_t index; |
794 | xfs_perag_t *pag; |
795 | xfs_sb_t *sbp = &mp->m_sb; |
796 | uint64_t ifree = 0; |
797 | uint64_t ialloc = 0; |
798 | uint64_t bfree = 0; |
799 | uint64_t bfreelst = 0; |
800 | uint64_t btree = 0; |
801 | int error; |
802 | |
803 | for (index = 0; index < agcount; index++) { |
804 | /* |
805 | * read the agf, then the agi. This gets us |
806 | * all the information we need and populates the |
807 | * per-ag structures for us. |
808 | */ |
809 | error = xfs_alloc_pagf_init(mp, NULL, index, 0); |
810 | if (error) |
811 | return error; |
812 | |
813 | error = xfs_ialloc_pagi_init(mp, NULL, index); |
814 | if (error) |
815 | return error; |
816 | pag = xfs_perag_get(mp, index); |
817 | ifree += pag->pagi_freecount; |
818 | ialloc += pag->pagi_count; |
819 | bfree += pag->pagf_freeblks; |
820 | bfreelst += pag->pagf_flcount; |
821 | btree += pag->pagf_btreeblks; |
822 | xfs_perag_put(pag); |
823 | } |
824 | /* |
825 | * Overwrite incore superblock counters with just-read data |
826 | */ |
827 | spin_lock(&mp->m_sb_lock); |
828 | sbp->sb_ifree = ifree; |
829 | sbp->sb_icount = ialloc; |
830 | sbp->sb_fdblocks = bfree + bfreelst + btree; |
831 | spin_unlock(&mp->m_sb_lock); |
832 | |
833 | /* Fixup the per-cpu counters as well. */ |
834 | xfs_icsb_reinit_counters(mp); |
835 | |
836 | return 0; |
837 | } |
838 | |
839 | /* |
840 | * Update alignment values based on mount options and sb values |
841 | */ |
842 | STATIC int |
843 | xfs_update_alignment(xfs_mount_t *mp) |
844 | { |
845 | xfs_sb_t *sbp = &(mp->m_sb); |
846 | |
847 | if (mp->m_dalign) { |
848 | /* |
849 | * If stripe unit and stripe width are not multiples |
850 | * of the fs blocksize turn off alignment. |
851 | */ |
852 | if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || |
853 | (BBTOB(mp->m_swidth) & mp->m_blockmask)) { |
854 | if (mp->m_flags & XFS_MOUNT_RETERR) { |
855 | cmn_err(CE_WARN, |
856 | "XFS: alignment check 1 failed"); |
857 | return XFS_ERROR(EINVAL); |
858 | } |
859 | mp->m_dalign = mp->m_swidth = 0; |
860 | } else { |
861 | /* |
862 | * Convert the stripe unit and width to FSBs. |
863 | */ |
864 | mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); |
865 | if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { |
866 | if (mp->m_flags & XFS_MOUNT_RETERR) { |
867 | return XFS_ERROR(EINVAL); |
868 | } |
869 | xfs_fs_cmn_err(CE_WARN, mp, |
870 | "stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)", |
871 | mp->m_dalign, mp->m_swidth, |
872 | sbp->sb_agblocks); |
873 | |
874 | mp->m_dalign = 0; |
875 | mp->m_swidth = 0; |
876 | } else if (mp->m_dalign) { |
877 | mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); |
878 | } else { |
879 | if (mp->m_flags & XFS_MOUNT_RETERR) { |
880 | xfs_fs_cmn_err(CE_WARN, mp, |
881 | "stripe alignment turned off: sunit(%d) less than bsize(%d)", |
882 | mp->m_dalign, |
883 | mp->m_blockmask +1); |
884 | return XFS_ERROR(EINVAL); |
885 | } |
886 | mp->m_swidth = 0; |
887 | } |
888 | } |
889 | |
890 | /* |
891 | * Update superblock with new values |
892 | * and log changes |
893 | */ |
894 | if (xfs_sb_version_hasdalign(sbp)) { |
895 | if (sbp->sb_unit != mp->m_dalign) { |
896 | sbp->sb_unit = mp->m_dalign; |
897 | mp->m_update_flags |= XFS_SB_UNIT; |
898 | } |
899 | if (sbp->sb_width != mp->m_swidth) { |
900 | sbp->sb_width = mp->m_swidth; |
901 | mp->m_update_flags |= XFS_SB_WIDTH; |
902 | } |
903 | } |
904 | } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && |
905 | xfs_sb_version_hasdalign(&mp->m_sb)) { |
906 | mp->m_dalign = sbp->sb_unit; |
907 | mp->m_swidth = sbp->sb_width; |
908 | } |
909 | |
910 | return 0; |
911 | } |
912 | |
913 | /* |
914 | * Set the maximum inode count for this filesystem |
915 | */ |
916 | STATIC void |
917 | xfs_set_maxicount(xfs_mount_t *mp) |
918 | { |
919 | xfs_sb_t *sbp = &(mp->m_sb); |
920 | __uint64_t icount; |
921 | |
922 | if (sbp->sb_imax_pct) { |
923 | /* |
924 | * Make sure the maximum inode count is a multiple |
925 | * of the units we allocate inodes in. |
926 | */ |
927 | icount = sbp->sb_dblocks * sbp->sb_imax_pct; |
928 | do_div(icount, 100); |
929 | do_div(icount, mp->m_ialloc_blks); |
930 | mp->m_maxicount = (icount * mp->m_ialloc_blks) << |
931 | sbp->sb_inopblog; |
932 | } else { |
933 | mp->m_maxicount = 0; |
934 | } |
935 | } |
936 | |
937 | /* |
938 | * Set the default minimum read and write sizes unless |
939 | * already specified in a mount option. |
940 | * We use smaller I/O sizes when the file system |
941 | * is being used for NFS service (wsync mount option). |
942 | */ |
943 | STATIC void |
944 | xfs_set_rw_sizes(xfs_mount_t *mp) |
945 | { |
946 | xfs_sb_t *sbp = &(mp->m_sb); |
947 | int readio_log, writeio_log; |
948 | |
949 | if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { |
950 | if (mp->m_flags & XFS_MOUNT_WSYNC) { |
951 | readio_log = XFS_WSYNC_READIO_LOG; |
952 | writeio_log = XFS_WSYNC_WRITEIO_LOG; |
953 | } else { |
954 | readio_log = XFS_READIO_LOG_LARGE; |
955 | writeio_log = XFS_WRITEIO_LOG_LARGE; |
956 | } |
957 | } else { |
958 | readio_log = mp->m_readio_log; |
959 | writeio_log = mp->m_writeio_log; |
960 | } |
961 | |
962 | if (sbp->sb_blocklog > readio_log) { |
963 | mp->m_readio_log = sbp->sb_blocklog; |
964 | } else { |
965 | mp->m_readio_log = readio_log; |
966 | } |
967 | mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog); |
968 | if (sbp->sb_blocklog > writeio_log) { |
969 | mp->m_writeio_log = sbp->sb_blocklog; |
970 | } else { |
971 | mp->m_writeio_log = writeio_log; |
972 | } |
973 | mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); |
974 | } |
975 | |
976 | /* |
977 | * Set whether we're using inode alignment. |
978 | */ |
979 | STATIC void |
980 | xfs_set_inoalignment(xfs_mount_t *mp) |
981 | { |
982 | if (xfs_sb_version_hasalign(&mp->m_sb) && |
983 | mp->m_sb.sb_inoalignmt >= |
984 | XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) |
985 | mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; |
986 | else |
987 | mp->m_inoalign_mask = 0; |
988 | /* |
989 | * If we are using stripe alignment, check whether |
990 | * the stripe unit is a multiple of the inode alignment |
991 | */ |
992 | if (mp->m_dalign && mp->m_inoalign_mask && |
993 | !(mp->m_dalign & mp->m_inoalign_mask)) |
994 | mp->m_sinoalign = mp->m_dalign; |
995 | else |
996 | mp->m_sinoalign = 0; |
997 | } |
998 | |
999 | /* |
1000 | * Check that the data (and log if separate) are an ok size. |
1001 | */ |
1002 | STATIC int |
1003 | xfs_check_sizes(xfs_mount_t *mp) |
1004 | { |
1005 | xfs_buf_t *bp; |
1006 | xfs_daddr_t d; |
1007 | int error; |
1008 | |
1009 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); |
1010 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { |
1011 | cmn_err(CE_WARN, "XFS: size check 1 failed"); |
1012 | return XFS_ERROR(E2BIG); |
1013 | } |
1014 | error = xfs_read_buf(mp, mp->m_ddev_targp, |
1015 | d - XFS_FSS_TO_BB(mp, 1), |
1016 | XFS_FSS_TO_BB(mp, 1), 0, &bp); |
1017 | if (!error) { |
1018 | xfs_buf_relse(bp); |
1019 | } else { |
1020 | cmn_err(CE_WARN, "XFS: size check 2 failed"); |
1021 | if (error == ENOSPC) |
1022 | error = XFS_ERROR(E2BIG); |
1023 | return error; |
1024 | } |
1025 | |
1026 | if (mp->m_logdev_targp != mp->m_ddev_targp) { |
1027 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); |
1028 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { |
1029 | cmn_err(CE_WARN, "XFS: size check 3 failed"); |
1030 | return XFS_ERROR(E2BIG); |
1031 | } |
1032 | error = xfs_read_buf(mp, mp->m_logdev_targp, |
1033 | d - XFS_FSB_TO_BB(mp, 1), |
1034 | XFS_FSB_TO_BB(mp, 1), 0, &bp); |
1035 | if (!error) { |
1036 | xfs_buf_relse(bp); |
1037 | } else { |
1038 | cmn_err(CE_WARN, "XFS: size check 3 failed"); |
1039 | if (error == ENOSPC) |
1040 | error = XFS_ERROR(E2BIG); |
1041 | return error; |
1042 | } |
1043 | } |
1044 | return 0; |
1045 | } |
1046 | |
1047 | /* |
1048 | * Clear the quotaflags in memory and in the superblock. |
1049 | */ |
1050 | int |
1051 | xfs_mount_reset_sbqflags( |
1052 | struct xfs_mount *mp) |
1053 | { |
1054 | int error; |
1055 | struct xfs_trans *tp; |
1056 | |
1057 | mp->m_qflags = 0; |
1058 | |
1059 | /* |
1060 | * It is OK to look at sb_qflags here in mount path, |
1061 | * without m_sb_lock. |
1062 | */ |
1063 | if (mp->m_sb.sb_qflags == 0) |
1064 | return 0; |
1065 | spin_lock(&mp->m_sb_lock); |
1066 | mp->m_sb.sb_qflags = 0; |
1067 | spin_unlock(&mp->m_sb_lock); |
1068 | |
1069 | /* |
1070 | * If the fs is readonly, let the incore superblock run |
1071 | * with quotas off but don't flush the update out to disk |
1072 | */ |
1073 | if (mp->m_flags & XFS_MOUNT_RDONLY) |
1074 | return 0; |
1075 | |
1076 | #ifdef QUOTADEBUG |
1077 | xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes"); |
1078 | #endif |
1079 | |
1080 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); |
1081 | error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, |
1082 | XFS_DEFAULT_LOG_COUNT); |
1083 | if (error) { |
1084 | xfs_trans_cancel(tp, 0); |
1085 | xfs_fs_cmn_err(CE_ALERT, mp, |
1086 | "xfs_mount_reset_sbqflags: Superblock update failed!"); |
1087 | return error; |
1088 | } |
1089 | |
1090 | xfs_mod_sb(tp, XFS_SB_QFLAGS); |
1091 | return xfs_trans_commit(tp, 0); |
1092 | } |
1093 | |
1094 | __uint64_t |
1095 | xfs_default_resblks(xfs_mount_t *mp) |
1096 | { |
1097 | __uint64_t resblks; |
1098 | |
1099 | /* |
1100 | * We default to 5% or 8192 fsbs of space reserved, whichever is |
1101 | * smaller. This is intended to cover concurrent allocation |
1102 | * transactions when we initially hit enospc. These each require a 4 |
1103 | * block reservation. Hence by default we cover roughly 2000 concurrent |
1104 | * allocation reservations. |
1105 | */ |
1106 | resblks = mp->m_sb.sb_dblocks; |
1107 | do_div(resblks, 20); |
1108 | resblks = min_t(__uint64_t, resblks, 8192); |
1109 | return resblks; |
1110 | } |
1111 | |
1112 | /* |
1113 | * This function does the following on an initial mount of a file system: |
1114 | * - reads the superblock from disk and init the mount struct |
1115 | * - if we're a 32-bit kernel, do a size check on the superblock |
1116 | * so we don't mount terabyte filesystems |
1117 | * - init mount struct realtime fields |
1118 | * - allocate inode hash table for fs |
1119 | * - init directory manager |
1120 | * - perform recovery and init the log manager |
1121 | */ |
1122 | int |
1123 | xfs_mountfs( |
1124 | xfs_mount_t *mp) |
1125 | { |
1126 | xfs_sb_t *sbp = &(mp->m_sb); |
1127 | xfs_inode_t *rip; |
1128 | __uint64_t resblks; |
1129 | uint quotamount = 0; |
1130 | uint quotaflags = 0; |
1131 | int error = 0; |
1132 | |
1133 | xfs_mount_common(mp, sbp); |
1134 | |
1135 | /* |
1136 | * Check for a mismatched features2 values. Older kernels |
1137 | * read & wrote into the wrong sb offset for sb_features2 |
1138 | * on some platforms due to xfs_sb_t not being 64bit size aligned |
1139 | * when sb_features2 was added, which made older superblock |
1140 | * reading/writing routines swap it as a 64-bit value. |
1141 | * |
1142 | * For backwards compatibility, we make both slots equal. |
1143 | * |
1144 | * If we detect a mismatched field, we OR the set bits into the |
1145 | * existing features2 field in case it has already been modified; we |
1146 | * don't want to lose any features. We then update the bad location |
1147 | * with the ORed value so that older kernels will see any features2 |
1148 | * flags, and mark the two fields as needing updates once the |
1149 | * transaction subsystem is online. |
1150 | */ |
1151 | if (xfs_sb_has_mismatched_features2(sbp)) { |
1152 | cmn_err(CE_WARN, |
1153 | "XFS: correcting sb_features alignment problem"); |
1154 | sbp->sb_features2 |= sbp->sb_bad_features2; |
1155 | sbp->sb_bad_features2 = sbp->sb_features2; |
1156 | mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2; |
1157 | |
1158 | /* |
1159 | * Re-check for ATTR2 in case it was found in bad_features2 |
1160 | * slot. |
1161 | */ |
1162 | if (xfs_sb_version_hasattr2(&mp->m_sb) && |
1163 | !(mp->m_flags & XFS_MOUNT_NOATTR2)) |
1164 | mp->m_flags |= XFS_MOUNT_ATTR2; |
1165 | } |
1166 | |
1167 | if (xfs_sb_version_hasattr2(&mp->m_sb) && |
1168 | (mp->m_flags & XFS_MOUNT_NOATTR2)) { |
1169 | xfs_sb_version_removeattr2(&mp->m_sb); |
1170 | mp->m_update_flags |= XFS_SB_FEATURES2; |
1171 | |
1172 | /* update sb_versionnum for the clearing of the morebits */ |
1173 | if (!sbp->sb_features2) |
1174 | mp->m_update_flags |= XFS_SB_VERSIONNUM; |
1175 | } |
1176 | |
1177 | /* |
1178 | * Check if sb_agblocks is aligned at stripe boundary |
1179 | * If sb_agblocks is NOT aligned turn off m_dalign since |
1180 | * allocator alignment is within an ag, therefore ag has |
1181 | * to be aligned at stripe boundary. |
1182 | */ |
1183 | error = xfs_update_alignment(mp); |
1184 | if (error) |
1185 | goto out; |
1186 | |
1187 | xfs_alloc_compute_maxlevels(mp); |
1188 | xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); |
1189 | xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); |
1190 | xfs_ialloc_compute_maxlevels(mp); |
1191 | |
1192 | xfs_set_maxicount(mp); |
1193 | |
1194 | mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog); |
1195 | |
1196 | error = xfs_uuid_mount(mp); |
1197 | if (error) |
1198 | goto out; |
1199 | |
1200 | /* |
1201 | * Set the minimum read and write sizes |
1202 | */ |
1203 | xfs_set_rw_sizes(mp); |
1204 | |
1205 | /* |
1206 | * Set the inode cluster size. |
1207 | * This may still be overridden by the file system |
1208 | * block size if it is larger than the chosen cluster size. |
1209 | */ |
1210 | mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; |
1211 | |
1212 | /* |
1213 | * Set inode alignment fields |
1214 | */ |
1215 | xfs_set_inoalignment(mp); |
1216 | |
1217 | /* |
1218 | * Check that the data (and log if separate) are an ok size. |
1219 | */ |
1220 | error = xfs_check_sizes(mp); |
1221 | if (error) |
1222 | goto out_remove_uuid; |
1223 | |
1224 | /* |
1225 | * Initialize realtime fields in the mount structure |
1226 | */ |
1227 | error = xfs_rtmount_init(mp); |
1228 | if (error) { |
1229 | cmn_err(CE_WARN, "XFS: RT mount failed"); |
1230 | goto out_remove_uuid; |
1231 | } |
1232 | |
1233 | /* |
1234 | * Copies the low order bits of the timestamp and the randomly |
1235 | * set "sequence" number out of a UUID. |
1236 | */ |
1237 | uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid); |
1238 | |
1239 | mp->m_dmevmask = 0; /* not persistent; set after each mount */ |
1240 | |
1241 | xfs_dir_mount(mp); |
1242 | |
1243 | /* |
1244 | * Initialize the attribute manager's entries. |
1245 | */ |
1246 | mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100; |
1247 | |
1248 | /* |
1249 | * Initialize the precomputed transaction reservations values. |
1250 | */ |
1251 | xfs_trans_init(mp); |
1252 | |
1253 | /* |
1254 | * Allocate and initialize the per-ag data. |
1255 | */ |
1256 | spin_lock_init(&mp->m_perag_lock); |
1257 | INIT_RADIX_TREE(&mp->m_perag_tree, GFP_NOFS); |
1258 | error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); |
1259 | if (error) { |
1260 | cmn_err(CE_WARN, "XFS: Failed per-ag init: %d", error); |
1261 | goto out_remove_uuid; |
1262 | } |
1263 | |
1264 | if (!sbp->sb_logblocks) { |
1265 | cmn_err(CE_WARN, "XFS: no log defined"); |
1266 | XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp); |
1267 | error = XFS_ERROR(EFSCORRUPTED); |
1268 | goto out_free_perag; |
1269 | } |
1270 | |
1271 | /* |
1272 | * log's mount-time initialization. Perform 1st part recovery if needed |
1273 | */ |
1274 | error = xfs_log_mount(mp, mp->m_logdev_targp, |
1275 | XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), |
1276 | XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); |
1277 | if (error) { |
1278 | cmn_err(CE_WARN, "XFS: log mount failed"); |
1279 | goto out_free_perag; |
1280 | } |
1281 | |
1282 | /* |
1283 | * Now the log is mounted, we know if it was an unclean shutdown or |
1284 | * not. If it was, with the first phase of recovery has completed, we |
1285 | * have consistent AG blocks on disk. We have not recovered EFIs yet, |
1286 | * but they are recovered transactionally in the second recovery phase |
1287 | * later. |
1288 | * |
1289 | * Hence we can safely re-initialise incore superblock counters from |
1290 | * the per-ag data. These may not be correct if the filesystem was not |
1291 | * cleanly unmounted, so we need to wait for recovery to finish before |
1292 | * doing this. |
1293 | * |
1294 | * If the filesystem was cleanly unmounted, then we can trust the |
1295 | * values in the superblock to be correct and we don't need to do |
1296 | * anything here. |
1297 | * |
1298 | * If we are currently making the filesystem, the initialisation will |
1299 | * fail as the perag data is in an undefined state. |
1300 | */ |
1301 | if (xfs_sb_version_haslazysbcount(&mp->m_sb) && |
1302 | !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) && |
1303 | !mp->m_sb.sb_inprogress) { |
1304 | error = xfs_initialize_perag_data(mp, sbp->sb_agcount); |
1305 | if (error) |
1306 | goto out_free_perag; |
1307 | } |
1308 | |
1309 | /* |
1310 | * Get and sanity-check the root inode. |
1311 | * Save the pointer to it in the mount structure. |
1312 | */ |
1313 | error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0); |
1314 | if (error) { |
1315 | cmn_err(CE_WARN, "XFS: failed to read root inode"); |
1316 | goto out_log_dealloc; |
1317 | } |
1318 | |
1319 | ASSERT(rip != NULL); |
1320 | |
1321 | if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) { |
1322 | cmn_err(CE_WARN, "XFS: corrupted root inode"); |
1323 | cmn_err(CE_WARN, "Device %s - root %llu is not a directory", |
1324 | XFS_BUFTARG_NAME(mp->m_ddev_targp), |
1325 | (unsigned long long)rip->i_ino); |
1326 | xfs_iunlock(rip, XFS_ILOCK_EXCL); |
1327 | XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, |
1328 | mp); |
1329 | error = XFS_ERROR(EFSCORRUPTED); |
1330 | goto out_rele_rip; |
1331 | } |
1332 | mp->m_rootip = rip; /* save it */ |
1333 | |
1334 | xfs_iunlock(rip, XFS_ILOCK_EXCL); |
1335 | |
1336 | /* |
1337 | * Initialize realtime inode pointers in the mount structure |
1338 | */ |
1339 | error = xfs_rtmount_inodes(mp); |
1340 | if (error) { |
1341 | /* |
1342 | * Free up the root inode. |
1343 | */ |
1344 | cmn_err(CE_WARN, "XFS: failed to read RT inodes"); |
1345 | goto out_rele_rip; |
1346 | } |
1347 | |
1348 | /* |
1349 | * If this is a read-only mount defer the superblock updates until |
1350 | * the next remount into writeable mode. Otherwise we would never |
1351 | * perform the update e.g. for the root filesystem. |
1352 | */ |
1353 | if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { |
1354 | error = xfs_mount_log_sb(mp, mp->m_update_flags); |
1355 | if (error) { |
1356 | cmn_err(CE_WARN, "XFS: failed to write sb changes"); |
1357 | goto out_rtunmount; |
1358 | } |
1359 | } |
1360 | |
1361 | /* |
1362 | * Initialise the XFS quota management subsystem for this mount |
1363 | */ |
1364 | if (XFS_IS_QUOTA_RUNNING(mp)) { |
1365 | error = xfs_qm_newmount(mp, "amount, "aflags); |
1366 | if (error) |
1367 | goto out_rtunmount; |
1368 | } else { |
1369 | ASSERT(!XFS_IS_QUOTA_ON(mp)); |
1370 | |
1371 | /* |
1372 | * If a file system had quotas running earlier, but decided to |
1373 | * mount without -o uquota/pquota/gquota options, revoke the |
1374 | * quotachecked license. |
1375 | */ |
1376 | if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) { |
1377 | cmn_err(CE_NOTE, |
1378 | "XFS: resetting qflags for filesystem %s", |
1379 | mp->m_fsname); |
1380 | |
1381 | error = xfs_mount_reset_sbqflags(mp); |
1382 | if (error) |
1383 | return error; |
1384 | } |
1385 | } |
1386 | |
1387 | /* |
1388 | * Finish recovering the file system. This part needed to be |
1389 | * delayed until after the root and real-time bitmap inodes |
1390 | * were consistently read in. |
1391 | */ |
1392 | error = xfs_log_mount_finish(mp); |
1393 | if (error) { |
1394 | cmn_err(CE_WARN, "XFS: log mount finish failed"); |
1395 | goto out_rtunmount; |
1396 | } |
1397 | |
1398 | /* |
1399 | * Complete the quota initialisation, post-log-replay component. |
1400 | */ |
1401 | if (quotamount) { |
1402 | ASSERT(mp->m_qflags == 0); |
1403 | mp->m_qflags = quotaflags; |
1404 | |
1405 | xfs_qm_mount_quotas(mp); |
1406 | } |
1407 | |
1408 | #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) |
1409 | if (XFS_IS_QUOTA_ON(mp)) |
1410 | xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on"); |
1411 | else |
1412 | xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on"); |
1413 | #endif |
1414 | |
1415 | /* |
1416 | * Now we are mounted, reserve a small amount of unused space for |
1417 | * privileged transactions. This is needed so that transaction |
1418 | * space required for critical operations can dip into this pool |
1419 | * when at ENOSPC. This is needed for operations like create with |
1420 | * attr, unwritten extent conversion at ENOSPC, etc. Data allocations |
1421 | * are not allowed to use this reserved space. |
1422 | * |
1423 | * This may drive us straight to ENOSPC on mount, but that implies |
1424 | * we were already there on the last unmount. Warn if this occurs. |
1425 | */ |
1426 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { |
1427 | resblks = xfs_default_resblks(mp); |
1428 | error = xfs_reserve_blocks(mp, &resblks, NULL); |
1429 | if (error) |
1430 | cmn_err(CE_WARN, "XFS: Unable to allocate reserve " |
1431 | "blocks. Continuing without a reserve pool."); |
1432 | } |
1433 | |
1434 | return 0; |
1435 | |
1436 | out_rtunmount: |
1437 | xfs_rtunmount_inodes(mp); |
1438 | out_rele_rip: |
1439 | IRELE(rip); |
1440 | out_log_dealloc: |
1441 | xfs_log_unmount(mp); |
1442 | out_free_perag: |
1443 | xfs_free_perag(mp); |
1444 | out_remove_uuid: |
1445 | xfs_uuid_unmount(mp); |
1446 | out: |
1447 | return error; |
1448 | } |
1449 | |
1450 | /* |
1451 | * This flushes out the inodes,dquots and the superblock, unmounts the |
1452 | * log and makes sure that incore structures are freed. |
1453 | */ |
1454 | void |
1455 | xfs_unmountfs( |
1456 | struct xfs_mount *mp) |
1457 | { |
1458 | __uint64_t resblks; |
1459 | int error; |
1460 | |
1461 | xfs_qm_unmount_quotas(mp); |
1462 | xfs_rtunmount_inodes(mp); |
1463 | IRELE(mp->m_rootip); |
1464 | |
1465 | /* |
1466 | * We can potentially deadlock here if we have an inode cluster |
1467 | * that has been freed has its buffer still pinned in memory because |
1468 | * the transaction is still sitting in a iclog. The stale inodes |
1469 | * on that buffer will have their flush locks held until the |
1470 | * transaction hits the disk and the callbacks run. the inode |
1471 | * flush takes the flush lock unconditionally and with nothing to |
1472 | * push out the iclog we will never get that unlocked. hence we |
1473 | * need to force the log first. |
1474 | */ |
1475 | xfs_log_force(mp, XFS_LOG_SYNC); |
1476 | |
1477 | /* |
1478 | * Do a delwri reclaim pass first so that as many dirty inodes are |
1479 | * queued up for IO as possible. Then flush the buffers before making |
1480 | * a synchronous path to catch all the remaining inodes are reclaimed. |
1481 | * This makes the reclaim process as quick as possible by avoiding |
1482 | * synchronous writeout and blocking on inodes already in the delwri |
1483 | * state as much as possible. |
1484 | */ |
1485 | xfs_reclaim_inodes(mp, 0); |
1486 | XFS_bflush(mp->m_ddev_targp); |
1487 | xfs_reclaim_inodes(mp, SYNC_WAIT); |
1488 | |
1489 | xfs_qm_unmount(mp); |
1490 | |
1491 | /* |
1492 | * Flush out the log synchronously so that we know for sure |
1493 | * that nothing is pinned. This is important because bflush() |
1494 | * will skip pinned buffers. |
1495 | */ |
1496 | xfs_log_force(mp, XFS_LOG_SYNC); |
1497 | |
1498 | xfs_binval(mp->m_ddev_targp); |
1499 | if (mp->m_rtdev_targp) { |
1500 | xfs_binval(mp->m_rtdev_targp); |
1501 | } |
1502 | |
1503 | /* |
1504 | * Unreserve any blocks we have so that when we unmount we don't account |
1505 | * the reserved free space as used. This is really only necessary for |
1506 | * lazy superblock counting because it trusts the incore superblock |
1507 | * counters to be absolutely correct on clean unmount. |
1508 | * |
1509 | * We don't bother correcting this elsewhere for lazy superblock |
1510 | * counting because on mount of an unclean filesystem we reconstruct the |
1511 | * correct counter value and this is irrelevant. |
1512 | * |
1513 | * For non-lazy counter filesystems, this doesn't matter at all because |
1514 | * we only every apply deltas to the superblock and hence the incore |
1515 | * value does not matter.... |
1516 | */ |
1517 | resblks = 0; |
1518 | error = xfs_reserve_blocks(mp, &resblks, NULL); |
1519 | if (error) |
1520 | cmn_err(CE_WARN, "XFS: Unable to free reserved block pool. " |
1521 | "Freespace may not be correct on next mount."); |
1522 | |
1523 | error = xfs_log_sbcount(mp, 1); |
1524 | if (error) |
1525 | cmn_err(CE_WARN, "XFS: Unable to update superblock counters. " |
1526 | "Freespace may not be correct on next mount."); |
1527 | xfs_unmountfs_writesb(mp); |
1528 | xfs_unmountfs_wait(mp); /* wait for async bufs */ |
1529 | xfs_log_unmount_write(mp); |
1530 | xfs_log_unmount(mp); |
1531 | xfs_uuid_unmount(mp); |
1532 | |
1533 | #if defined(DEBUG) |
1534 | xfs_errortag_clearall(mp, 0); |
1535 | #endif |
1536 | xfs_free_perag(mp); |
1537 | } |
1538 | |
1539 | STATIC void |
1540 | xfs_unmountfs_wait(xfs_mount_t *mp) |
1541 | { |
1542 | if (mp->m_logdev_targp != mp->m_ddev_targp) |
1543 | xfs_wait_buftarg(mp->m_logdev_targp); |
1544 | if (mp->m_rtdev_targp) |
1545 | xfs_wait_buftarg(mp->m_rtdev_targp); |
1546 | xfs_wait_buftarg(mp->m_ddev_targp); |
1547 | } |
1548 | |
1549 | int |
1550 | xfs_fs_writable(xfs_mount_t *mp) |
1551 | { |
1552 | return !(xfs_test_for_freeze(mp) || XFS_FORCED_SHUTDOWN(mp) || |
1553 | (mp->m_flags & XFS_MOUNT_RDONLY)); |
1554 | } |
1555 | |
1556 | /* |
1557 | * xfs_log_sbcount |
1558 | * |
1559 | * Called either periodically to keep the on disk superblock values |
1560 | * roughly up to date or from unmount to make sure the values are |
1561 | * correct on a clean unmount. |
1562 | * |
1563 | * Note this code can be called during the process of freezing, so |
1564 | * we may need to use the transaction allocator which does not not |
1565 | * block when the transaction subsystem is in its frozen state. |
1566 | */ |
1567 | int |
1568 | xfs_log_sbcount( |
1569 | xfs_mount_t *mp, |
1570 | uint sync) |
1571 | { |
1572 | xfs_trans_t *tp; |
1573 | int error; |
1574 | |
1575 | if (!xfs_fs_writable(mp)) |
1576 | return 0; |
1577 | |
1578 | xfs_icsb_sync_counters(mp, 0); |
1579 | |
1580 | /* |
1581 | * we don't need to do this if we are updating the superblock |
1582 | * counters on every modification. |
1583 | */ |
1584 | if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) |
1585 | return 0; |
1586 | |
1587 | tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP); |
1588 | error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, |
1589 | XFS_DEFAULT_LOG_COUNT); |
1590 | if (error) { |
1591 | xfs_trans_cancel(tp, 0); |
1592 | return error; |
1593 | } |
1594 | |
1595 | xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS); |
1596 | if (sync) |
1597 | xfs_trans_set_sync(tp); |
1598 | error = xfs_trans_commit(tp, 0); |
1599 | return error; |
1600 | } |
1601 | |
1602 | int |
1603 | xfs_unmountfs_writesb(xfs_mount_t *mp) |
1604 | { |
1605 | xfs_buf_t *sbp; |
1606 | int error = 0; |
1607 | |
1608 | /* |
1609 | * skip superblock write if fs is read-only, or |
1610 | * if we are doing a forced umount. |
1611 | */ |
1612 | if (!((mp->m_flags & XFS_MOUNT_RDONLY) || |
1613 | XFS_FORCED_SHUTDOWN(mp))) { |
1614 | |
1615 | sbp = xfs_getsb(mp, 0); |
1616 | |
1617 | XFS_BUF_UNDONE(sbp); |
1618 | XFS_BUF_UNREAD(sbp); |
1619 | XFS_BUF_UNDELAYWRITE(sbp); |
1620 | XFS_BUF_WRITE(sbp); |
1621 | XFS_BUF_UNASYNC(sbp); |
1622 | ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp); |
1623 | xfsbdstrat(mp, sbp); |
1624 | error = xfs_iowait(sbp); |
1625 | if (error) |
1626 | xfs_ioerror_alert("xfs_unmountfs_writesb", |
1627 | mp, sbp, XFS_BUF_ADDR(sbp)); |
1628 | xfs_buf_relse(sbp); |
1629 | } |
1630 | return error; |
1631 | } |
1632 | |
1633 | /* |
1634 | * xfs_mod_sb() can be used to copy arbitrary changes to the |
1635 | * in-core superblock into the superblock buffer to be logged. |
1636 | * It does not provide the higher level of locking that is |
1637 | * needed to protect the in-core superblock from concurrent |
1638 | * access. |
1639 | */ |
1640 | void |
1641 | xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) |
1642 | { |
1643 | xfs_buf_t *bp; |
1644 | int first; |
1645 | int last; |
1646 | xfs_mount_t *mp; |
1647 | xfs_sb_field_t f; |
1648 | |
1649 | ASSERT(fields); |
1650 | if (!fields) |
1651 | return; |
1652 | mp = tp->t_mountp; |
1653 | bp = xfs_trans_getsb(tp, mp, 0); |
1654 | first = sizeof(xfs_sb_t); |
1655 | last = 0; |
1656 | |
1657 | /* translate/copy */ |
1658 | |
1659 | xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields); |
1660 | |
1661 | /* find modified range */ |
1662 | f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields); |
1663 | ASSERT((1LL << f) & XFS_SB_MOD_BITS); |
1664 | last = xfs_sb_info[f + 1].offset - 1; |
1665 | |
1666 | f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); |
1667 | ASSERT((1LL << f) & XFS_SB_MOD_BITS); |
1668 | first = xfs_sb_info[f].offset; |
1669 | |
1670 | xfs_trans_log_buf(tp, bp, first, last); |
1671 | } |
1672 | |
1673 | |
1674 | /* |
1675 | * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply |
1676 | * a delta to a specified field in the in-core superblock. Simply |
1677 | * switch on the field indicated and apply the delta to that field. |
1678 | * Fields are not allowed to dip below zero, so if the delta would |
1679 | * do this do not apply it and return EINVAL. |
1680 | * |
1681 | * The m_sb_lock must be held when this routine is called. |
1682 | */ |
1683 | STATIC int |
1684 | xfs_mod_incore_sb_unlocked( |
1685 | xfs_mount_t *mp, |
1686 | xfs_sb_field_t field, |
1687 | int64_t delta, |
1688 | int rsvd) |
1689 | { |
1690 | int scounter; /* short counter for 32 bit fields */ |
1691 | long long lcounter; /* long counter for 64 bit fields */ |
1692 | long long res_used, rem; |
1693 | |
1694 | /* |
1695 | * With the in-core superblock spin lock held, switch |
1696 | * on the indicated field. Apply the delta to the |
1697 | * proper field. If the fields value would dip below |
1698 | * 0, then do not apply the delta and return EINVAL. |
1699 | */ |
1700 | switch (field) { |
1701 | case XFS_SBS_ICOUNT: |
1702 | lcounter = (long long)mp->m_sb.sb_icount; |
1703 | lcounter += delta; |
1704 | if (lcounter < 0) { |
1705 | ASSERT(0); |
1706 | return XFS_ERROR(EINVAL); |
1707 | } |
1708 | mp->m_sb.sb_icount = lcounter; |
1709 | return 0; |
1710 | case XFS_SBS_IFREE: |
1711 | lcounter = (long long)mp->m_sb.sb_ifree; |
1712 | lcounter += delta; |
1713 | if (lcounter < 0) { |
1714 | ASSERT(0); |
1715 | return XFS_ERROR(EINVAL); |
1716 | } |
1717 | mp->m_sb.sb_ifree = lcounter; |
1718 | return 0; |
1719 | case XFS_SBS_FDBLOCKS: |
1720 | lcounter = (long long) |
1721 | mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); |
1722 | res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); |
1723 | |
1724 | if (delta > 0) { /* Putting blocks back */ |
1725 | if (res_used > delta) { |
1726 | mp->m_resblks_avail += delta; |
1727 | } else { |
1728 | rem = delta - res_used; |
1729 | mp->m_resblks_avail = mp->m_resblks; |
1730 | lcounter += rem; |
1731 | } |
1732 | } else { /* Taking blocks away */ |
1733 | lcounter += delta; |
1734 | if (lcounter >= 0) { |
1735 | mp->m_sb.sb_fdblocks = lcounter + |
1736 | XFS_ALLOC_SET_ASIDE(mp); |
1737 | return 0; |
1738 | } |
1739 | |
1740 | /* |
1741 | * We are out of blocks, use any available reserved |
1742 | * blocks if were allowed to. |
1743 | */ |
1744 | if (!rsvd) |
1745 | return XFS_ERROR(ENOSPC); |
1746 | |
1747 | lcounter = (long long)mp->m_resblks_avail + delta; |
1748 | if (lcounter >= 0) { |
1749 | mp->m_resblks_avail = lcounter; |
1750 | return 0; |
1751 | } |
1752 | printk_once(KERN_WARNING |
1753 | "Filesystem \"%s\": reserve blocks depleted! " |
1754 | "Consider increasing reserve pool size.", |
1755 | mp->m_fsname); |
1756 | return XFS_ERROR(ENOSPC); |
1757 | } |
1758 | |
1759 | mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); |
1760 | return 0; |
1761 | case XFS_SBS_FREXTENTS: |
1762 | lcounter = (long long)mp->m_sb.sb_frextents; |
1763 | lcounter += delta; |
1764 | if (lcounter < 0) { |
1765 | return XFS_ERROR(ENOSPC); |
1766 | } |
1767 | mp->m_sb.sb_frextents = lcounter; |
1768 | return 0; |
1769 | case XFS_SBS_DBLOCKS: |
1770 | lcounter = (long long)mp->m_sb.sb_dblocks; |
1771 | lcounter += delta; |
1772 | if (lcounter < 0) { |
1773 | ASSERT(0); |
1774 | return XFS_ERROR(EINVAL); |
1775 | } |
1776 | mp->m_sb.sb_dblocks = lcounter; |
1777 | return 0; |
1778 | case XFS_SBS_AGCOUNT: |
1779 | scounter = mp->m_sb.sb_agcount; |
1780 | scounter += delta; |
1781 | if (scounter < 0) { |
1782 | ASSERT(0); |
1783 | return XFS_ERROR(EINVAL); |
1784 | } |
1785 | mp->m_sb.sb_agcount = scounter; |
1786 | return 0; |
1787 | case XFS_SBS_IMAX_PCT: |
1788 | scounter = mp->m_sb.sb_imax_pct; |
1789 | scounter += delta; |
1790 | if (scounter < 0) { |
1791 | ASSERT(0); |
1792 | return XFS_ERROR(EINVAL); |
1793 | } |
1794 | mp->m_sb.sb_imax_pct = scounter; |
1795 | return 0; |
1796 | case XFS_SBS_REXTSIZE: |
1797 | scounter = mp->m_sb.sb_rextsize; |
1798 | scounter += delta; |
1799 | if (scounter < 0) { |
1800 | ASSERT(0); |
1801 | return XFS_ERROR(EINVAL); |
1802 | } |
1803 | mp->m_sb.sb_rextsize = scounter; |
1804 | return 0; |
1805 | case XFS_SBS_RBMBLOCKS: |
1806 | scounter = mp->m_sb.sb_rbmblocks; |
1807 | scounter += delta; |
1808 | if (scounter < 0) { |
1809 | ASSERT(0); |
1810 | return XFS_ERROR(EINVAL); |
1811 | } |
1812 | mp->m_sb.sb_rbmblocks = scounter; |
1813 | return 0; |
1814 | case XFS_SBS_RBLOCKS: |
1815 | lcounter = (long long)mp->m_sb.sb_rblocks; |
1816 | lcounter += delta; |
1817 | if (lcounter < 0) { |
1818 | ASSERT(0); |
1819 | return XFS_ERROR(EINVAL); |
1820 | } |
1821 | mp->m_sb.sb_rblocks = lcounter; |
1822 | return 0; |
1823 | case XFS_SBS_REXTENTS: |
1824 | lcounter = (long long)mp->m_sb.sb_rextents; |
1825 | lcounter += delta; |
1826 | if (lcounter < 0) { |
1827 | ASSERT(0); |
1828 | return XFS_ERROR(EINVAL); |
1829 | } |
1830 | mp->m_sb.sb_rextents = lcounter; |
1831 | return 0; |
1832 | case XFS_SBS_REXTSLOG: |
1833 | scounter = mp->m_sb.sb_rextslog; |
1834 | scounter += delta; |
1835 | if (scounter < 0) { |
1836 | ASSERT(0); |
1837 | return XFS_ERROR(EINVAL); |
1838 | } |
1839 | mp->m_sb.sb_rextslog = scounter; |
1840 | return 0; |
1841 | default: |
1842 | ASSERT(0); |
1843 | return XFS_ERROR(EINVAL); |
1844 | } |
1845 | } |
1846 | |
1847 | /* |
1848 | * xfs_mod_incore_sb() is used to change a field in the in-core |
1849 | * superblock structure by the specified delta. This modification |
1850 | * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked() |
1851 | * routine to do the work. |
1852 | */ |
1853 | int |
1854 | xfs_mod_incore_sb( |
1855 | xfs_mount_t *mp, |
1856 | xfs_sb_field_t field, |
1857 | int64_t delta, |
1858 | int rsvd) |
1859 | { |
1860 | int status; |
1861 | |
1862 | /* check for per-cpu counters */ |
1863 | switch (field) { |
1864 | #ifdef HAVE_PERCPU_SB |
1865 | case XFS_SBS_ICOUNT: |
1866 | case XFS_SBS_IFREE: |
1867 | case XFS_SBS_FDBLOCKS: |
1868 | if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { |
1869 | status = xfs_icsb_modify_counters(mp, field, |
1870 | delta, rsvd); |
1871 | break; |
1872 | } |
1873 | /* FALLTHROUGH */ |
1874 | #endif |
1875 | default: |
1876 | spin_lock(&mp->m_sb_lock); |
1877 | status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); |
1878 | spin_unlock(&mp->m_sb_lock); |
1879 | break; |
1880 | } |
1881 | |
1882 | return status; |
1883 | } |
1884 | |
1885 | /* |
1886 | * xfs_mod_incore_sb_batch() is used to change more than one field |
1887 | * in the in-core superblock structure at a time. This modification |
1888 | * is protected by a lock internal to this module. The fields and |
1889 | * changes to those fields are specified in the array of xfs_mod_sb |
1890 | * structures passed in. |
1891 | * |
1892 | * Either all of the specified deltas will be applied or none of |
1893 | * them will. If any modified field dips below 0, then all modifications |
1894 | * will be backed out and EINVAL will be returned. |
1895 | */ |
1896 | int |
1897 | xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) |
1898 | { |
1899 | int status=0; |
1900 | xfs_mod_sb_t *msbp; |
1901 | |
1902 | /* |
1903 | * Loop through the array of mod structures and apply each |
1904 | * individually. If any fail, then back out all those |
1905 | * which have already been applied. Do all of this within |
1906 | * the scope of the m_sb_lock so that all of the changes will |
1907 | * be atomic. |
1908 | */ |
1909 | spin_lock(&mp->m_sb_lock); |
1910 | msbp = &msb[0]; |
1911 | for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { |
1912 | /* |
1913 | * Apply the delta at index n. If it fails, break |
1914 | * from the loop so we'll fall into the undo loop |
1915 | * below. |
1916 | */ |
1917 | switch (msbp->msb_field) { |
1918 | #ifdef HAVE_PERCPU_SB |
1919 | case XFS_SBS_ICOUNT: |
1920 | case XFS_SBS_IFREE: |
1921 | case XFS_SBS_FDBLOCKS: |
1922 | if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { |
1923 | spin_unlock(&mp->m_sb_lock); |
1924 | status = xfs_icsb_modify_counters(mp, |
1925 | msbp->msb_field, |
1926 | msbp->msb_delta, rsvd); |
1927 | spin_lock(&mp->m_sb_lock); |
1928 | break; |
1929 | } |
1930 | /* FALLTHROUGH */ |
1931 | #endif |
1932 | default: |
1933 | status = xfs_mod_incore_sb_unlocked(mp, |
1934 | msbp->msb_field, |
1935 | msbp->msb_delta, rsvd); |
1936 | break; |
1937 | } |
1938 | |
1939 | if (status != 0) { |
1940 | break; |
1941 | } |
1942 | } |
1943 | |
1944 | /* |
1945 | * If we didn't complete the loop above, then back out |
1946 | * any changes made to the superblock. If you add code |
1947 | * between the loop above and here, make sure that you |
1948 | * preserve the value of status. Loop back until |
1949 | * we step below the beginning of the array. Make sure |
1950 | * we don't touch anything back there. |
1951 | */ |
1952 | if (status != 0) { |
1953 | msbp--; |
1954 | while (msbp >= msb) { |
1955 | switch (msbp->msb_field) { |
1956 | #ifdef HAVE_PERCPU_SB |
1957 | case XFS_SBS_ICOUNT: |
1958 | case XFS_SBS_IFREE: |
1959 | case XFS_SBS_FDBLOCKS: |
1960 | if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { |
1961 | spin_unlock(&mp->m_sb_lock); |
1962 | status = xfs_icsb_modify_counters(mp, |
1963 | msbp->msb_field, |
1964 | -(msbp->msb_delta), |
1965 | rsvd); |
1966 | spin_lock(&mp->m_sb_lock); |
1967 | break; |
1968 | } |
1969 | /* FALLTHROUGH */ |
1970 | #endif |
1971 | default: |
1972 | status = xfs_mod_incore_sb_unlocked(mp, |
1973 | msbp->msb_field, |
1974 | -(msbp->msb_delta), |
1975 | rsvd); |
1976 | break; |
1977 | } |
1978 | ASSERT(status == 0); |
1979 | msbp--; |
1980 | } |
1981 | } |
1982 | spin_unlock(&mp->m_sb_lock); |
1983 | return status; |
1984 | } |
1985 | |
1986 | /* |
1987 | * xfs_getsb() is called to obtain the buffer for the superblock. |
1988 | * The buffer is returned locked and read in from disk. |
1989 | * The buffer should be released with a call to xfs_brelse(). |
1990 | * |
1991 | * If the flags parameter is BUF_TRYLOCK, then we'll only return |
1992 | * the superblock buffer if it can be locked without sleeping. |
1993 | * If it can't then we'll return NULL. |
1994 | */ |
1995 | xfs_buf_t * |
1996 | xfs_getsb( |
1997 | xfs_mount_t *mp, |
1998 | int flags) |
1999 | { |
2000 | xfs_buf_t *bp; |
2001 | |
2002 | ASSERT(mp->m_sb_bp != NULL); |
2003 | bp = mp->m_sb_bp; |
2004 | if (flags & XBF_TRYLOCK) { |
2005 | if (!XFS_BUF_CPSEMA(bp)) { |
2006 | return NULL; |
2007 | } |
2008 | } else { |
2009 | XFS_BUF_PSEMA(bp, PRIBIO); |
2010 | } |
2011 | XFS_BUF_HOLD(bp); |
2012 | ASSERT(XFS_BUF_ISDONE(bp)); |
2013 | return bp; |
2014 | } |
2015 | |
2016 | /* |
2017 | * Used to free the superblock along various error paths. |
2018 | */ |
2019 | void |
2020 | xfs_freesb( |
2021 | xfs_mount_t *mp) |
2022 | { |
2023 | xfs_buf_t *bp; |
2024 | |
2025 | /* |
2026 | * Use xfs_getsb() so that the buffer will be locked |
2027 | * when we call xfs_buf_relse(). |
2028 | */ |
2029 | bp = xfs_getsb(mp, 0); |
2030 | XFS_BUF_UNMANAGE(bp); |
2031 | xfs_buf_relse(bp); |
2032 | mp->m_sb_bp = NULL; |
2033 | } |
2034 | |
2035 | /* |
2036 | * Used to log changes to the superblock unit and width fields which could |
2037 | * be altered by the mount options, as well as any potential sb_features2 |
2038 | * fixup. Only the first superblock is updated. |
2039 | */ |
2040 | int |
2041 | xfs_mount_log_sb( |
2042 | xfs_mount_t *mp, |
2043 | __int64_t fields) |
2044 | { |
2045 | xfs_trans_t *tp; |
2046 | int error; |
2047 | |
2048 | ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID | |
2049 | XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 | |
2050 | XFS_SB_VERSIONNUM)); |
2051 | |
2052 | tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); |
2053 | error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, |
2054 | XFS_DEFAULT_LOG_COUNT); |
2055 | if (error) { |
2056 | xfs_trans_cancel(tp, 0); |
2057 | return error; |
2058 | } |
2059 | xfs_mod_sb(tp, fields); |
2060 | error = xfs_trans_commit(tp, 0); |
2061 | return error; |
2062 | } |
2063 | |
2064 | /* |
2065 | * If the underlying (data/log/rt) device is readonly, there are some |
2066 | * operations that cannot proceed. |
2067 | */ |
2068 | int |
2069 | xfs_dev_is_read_only( |
2070 | struct xfs_mount *mp, |
2071 | char *message) |
2072 | { |
2073 | if (xfs_readonly_buftarg(mp->m_ddev_targp) || |
2074 | xfs_readonly_buftarg(mp->m_logdev_targp) || |
2075 | (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { |
2076 | cmn_err(CE_NOTE, |
2077 | "XFS: %s required on read-only device.", message); |
2078 | cmn_err(CE_NOTE, |
2079 | "XFS: write access unavailable, cannot proceed."); |
2080 | return EROFS; |
2081 | } |
2082 | return 0; |
2083 | } |
2084 | |
2085 | #ifdef HAVE_PERCPU_SB |
2086 | /* |
2087 | * Per-cpu incore superblock counters |
2088 | * |
2089 | * Simple concept, difficult implementation |
2090 | * |
2091 | * Basically, replace the incore superblock counters with a distributed per cpu |
2092 | * counter for contended fields (e.g. free block count). |
2093 | * |
2094 | * Difficulties arise in that the incore sb is used for ENOSPC checking, and |
2095 | * hence needs to be accurately read when we are running low on space. Hence |
2096 | * there is a method to enable and disable the per-cpu counters based on how |
2097 | * much "stuff" is available in them. |
2098 | * |
2099 | * Basically, a counter is enabled if there is enough free resource to justify |
2100 | * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local |
2101 | * ENOSPC), then we disable the counters to synchronise all callers and |
2102 | * re-distribute the available resources. |
2103 | * |
2104 | * If, once we redistributed the available resources, we still get a failure, |
2105 | * we disable the per-cpu counter and go through the slow path. |
2106 | * |
2107 | * The slow path is the current xfs_mod_incore_sb() function. This means that |
2108 | * when we disable a per-cpu counter, we need to drain its resources back to |
2109 | * the global superblock. We do this after disabling the counter to prevent |
2110 | * more threads from queueing up on the counter. |
2111 | * |
2112 | * Essentially, this means that we still need a lock in the fast path to enable |
2113 | * synchronisation between the global counters and the per-cpu counters. This |
2114 | * is not a problem because the lock will be local to a CPU almost all the time |
2115 | * and have little contention except when we get to ENOSPC conditions. |
2116 | * |
2117 | * Basically, this lock becomes a barrier that enables us to lock out the fast |
2118 | * path while we do things like enabling and disabling counters and |
2119 | * synchronising the counters. |
2120 | * |
2121 | * Locking rules: |
2122 | * |
2123 | * 1. m_sb_lock before picking up per-cpu locks |
2124 | * 2. per-cpu locks always picked up via for_each_online_cpu() order |
2125 | * 3. accurate counter sync requires m_sb_lock + per cpu locks |
2126 | * 4. modifying per-cpu counters requires holding per-cpu lock |
2127 | * 5. modifying global counters requires holding m_sb_lock |
2128 | * 6. enabling or disabling a counter requires holding the m_sb_lock |
2129 | * and _none_ of the per-cpu locks. |
2130 | * |
2131 | * Disabled counters are only ever re-enabled by a balance operation |
2132 | * that results in more free resources per CPU than a given threshold. |
2133 | * To ensure counters don't remain disabled, they are rebalanced when |
2134 | * the global resource goes above a higher threshold (i.e. some hysteresis |
2135 | * is present to prevent thrashing). |
2136 | */ |
2137 | |
2138 | #ifdef CONFIG_HOTPLUG_CPU |
2139 | /* |
2140 | * hot-plug CPU notifier support. |
2141 | * |
2142 | * We need a notifier per filesystem as we need to be able to identify |
2143 | * the filesystem to balance the counters out. This is achieved by |
2144 | * having a notifier block embedded in the xfs_mount_t and doing pointer |
2145 | * magic to get the mount pointer from the notifier block address. |
2146 | */ |
2147 | STATIC int |
2148 | xfs_icsb_cpu_notify( |
2149 | struct notifier_block *nfb, |
2150 | unsigned long action, |
2151 | void *hcpu) |
2152 | { |
2153 | xfs_icsb_cnts_t *cntp; |
2154 | xfs_mount_t *mp; |
2155 | |
2156 | mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); |
2157 | cntp = (xfs_icsb_cnts_t *) |
2158 | per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu); |
2159 | switch (action) { |
2160 | case CPU_UP_PREPARE: |
2161 | case CPU_UP_PREPARE_FROZEN: |
2162 | /* Easy Case - initialize the area and locks, and |
2163 | * then rebalance when online does everything else for us. */ |
2164 | memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); |
2165 | break; |
2166 | case CPU_ONLINE: |
2167 | case CPU_ONLINE_FROZEN: |
2168 | xfs_icsb_lock(mp); |
2169 | xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); |
2170 | xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); |
2171 | xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); |
2172 | xfs_icsb_unlock(mp); |
2173 | break; |
2174 | case CPU_DEAD: |
2175 | case CPU_DEAD_FROZEN: |
2176 | /* Disable all the counters, then fold the dead cpu's |
2177 | * count into the total on the global superblock and |
2178 | * re-enable the counters. */ |
2179 | xfs_icsb_lock(mp); |
2180 | spin_lock(&mp->m_sb_lock); |
2181 | xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); |
2182 | xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); |
2183 | xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); |
2184 | |
2185 | mp->m_sb.sb_icount += cntp->icsb_icount; |
2186 | mp->m_sb.sb_ifree += cntp->icsb_ifree; |
2187 | mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks; |
2188 | |
2189 | memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); |
2190 | |
2191 | xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0); |
2192 | xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0); |
2193 | xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0); |
2194 | spin_unlock(&mp->m_sb_lock); |
2195 | xfs_icsb_unlock(mp); |
2196 | break; |
2197 | } |
2198 | |
2199 | return NOTIFY_OK; |
2200 | } |
2201 | #endif /* CONFIG_HOTPLUG_CPU */ |
2202 | |
2203 | int |
2204 | xfs_icsb_init_counters( |
2205 | xfs_mount_t *mp) |
2206 | { |
2207 | xfs_icsb_cnts_t *cntp; |
2208 | int i; |
2209 | |
2210 | mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t); |
2211 | if (mp->m_sb_cnts == NULL) |
2212 | return -ENOMEM; |
2213 | |
2214 | #ifdef CONFIG_HOTPLUG_CPU |
2215 | mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify; |
2216 | mp->m_icsb_notifier.priority = 0; |
2217 | register_hotcpu_notifier(&mp->m_icsb_notifier); |
2218 | #endif /* CONFIG_HOTPLUG_CPU */ |
2219 | |
2220 | for_each_online_cpu(i) { |
2221 | cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); |
2222 | memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); |
2223 | } |
2224 | |
2225 | mutex_init(&mp->m_icsb_mutex); |
2226 | |
2227 | /* |
2228 | * start with all counters disabled so that the |
2229 | * initial balance kicks us off correctly |
2230 | */ |
2231 | mp->m_icsb_counters = -1; |
2232 | return 0; |
2233 | } |
2234 | |
2235 | void |
2236 | xfs_icsb_reinit_counters( |
2237 | xfs_mount_t *mp) |
2238 | { |
2239 | xfs_icsb_lock(mp); |
2240 | /* |
2241 | * start with all counters disabled so that the |
2242 | * initial balance kicks us off correctly |
2243 | */ |
2244 | mp->m_icsb_counters = -1; |
2245 | xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); |
2246 | xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); |
2247 | xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); |
2248 | xfs_icsb_unlock(mp); |
2249 | } |
2250 | |
2251 | void |
2252 | xfs_icsb_destroy_counters( |
2253 | xfs_mount_t *mp) |
2254 | { |
2255 | if (mp->m_sb_cnts) { |
2256 | unregister_hotcpu_notifier(&mp->m_icsb_notifier); |
2257 | free_percpu(mp->m_sb_cnts); |
2258 | } |
2259 | mutex_destroy(&mp->m_icsb_mutex); |
2260 | } |
2261 | |
2262 | STATIC void |
2263 | xfs_icsb_lock_cntr( |
2264 | xfs_icsb_cnts_t *icsbp) |
2265 | { |
2266 | while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) { |
2267 | ndelay(1000); |
2268 | } |
2269 | } |
2270 | |
2271 | STATIC void |
2272 | xfs_icsb_unlock_cntr( |
2273 | xfs_icsb_cnts_t *icsbp) |
2274 | { |
2275 | clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags); |
2276 | } |
2277 | |
2278 | |
2279 | STATIC void |
2280 | xfs_icsb_lock_all_counters( |
2281 | xfs_mount_t *mp) |
2282 | { |
2283 | xfs_icsb_cnts_t *cntp; |
2284 | int i; |
2285 | |
2286 | for_each_online_cpu(i) { |
2287 | cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); |
2288 | xfs_icsb_lock_cntr(cntp); |
2289 | } |
2290 | } |
2291 | |
2292 | STATIC void |
2293 | xfs_icsb_unlock_all_counters( |
2294 | xfs_mount_t *mp) |
2295 | { |
2296 | xfs_icsb_cnts_t *cntp; |
2297 | int i; |
2298 | |
2299 | for_each_online_cpu(i) { |
2300 | cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); |
2301 | xfs_icsb_unlock_cntr(cntp); |
2302 | } |
2303 | } |
2304 | |
2305 | STATIC void |
2306 | xfs_icsb_count( |
2307 | xfs_mount_t *mp, |
2308 | xfs_icsb_cnts_t *cnt, |
2309 | int flags) |
2310 | { |
2311 | xfs_icsb_cnts_t *cntp; |
2312 | int i; |
2313 | |
2314 | memset(cnt, 0, sizeof(xfs_icsb_cnts_t)); |
2315 | |
2316 | if (!(flags & XFS_ICSB_LAZY_COUNT)) |
2317 | xfs_icsb_lock_all_counters(mp); |
2318 | |
2319 | for_each_online_cpu(i) { |
2320 | cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); |
2321 | cnt->icsb_icount += cntp->icsb_icount; |
2322 | cnt->icsb_ifree += cntp->icsb_ifree; |
2323 | cnt->icsb_fdblocks += cntp->icsb_fdblocks; |
2324 | } |
2325 | |
2326 | if (!(flags & XFS_ICSB_LAZY_COUNT)) |
2327 | xfs_icsb_unlock_all_counters(mp); |
2328 | } |
2329 | |
2330 | STATIC int |
2331 | xfs_icsb_counter_disabled( |
2332 | xfs_mount_t *mp, |
2333 | xfs_sb_field_t field) |
2334 | { |
2335 | ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); |
2336 | return test_bit(field, &mp->m_icsb_counters); |
2337 | } |
2338 | |
2339 | STATIC void |
2340 | xfs_icsb_disable_counter( |
2341 | xfs_mount_t *mp, |
2342 | xfs_sb_field_t field) |
2343 | { |
2344 | xfs_icsb_cnts_t cnt; |
2345 | |
2346 | ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); |
2347 | |
2348 | /* |
2349 | * If we are already disabled, then there is nothing to do |
2350 | * here. We check before locking all the counters to avoid |
2351 | * the expensive lock operation when being called in the |
2352 | * slow path and the counter is already disabled. This is |
2353 | * safe because the only time we set or clear this state is under |
2354 | * the m_icsb_mutex. |
2355 | */ |
2356 | if (xfs_icsb_counter_disabled(mp, field)) |
2357 | return; |
2358 | |
2359 | xfs_icsb_lock_all_counters(mp); |
2360 | if (!test_and_set_bit(field, &mp->m_icsb_counters)) { |
2361 | /* drain back to superblock */ |
2362 | |
2363 | xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT); |
2364 | switch(field) { |
2365 | case XFS_SBS_ICOUNT: |
2366 | mp->m_sb.sb_icount = cnt.icsb_icount; |
2367 | break; |
2368 | case XFS_SBS_IFREE: |
2369 | mp->m_sb.sb_ifree = cnt.icsb_ifree; |
2370 | break; |
2371 | case XFS_SBS_FDBLOCKS: |
2372 | mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; |
2373 | break; |
2374 | default: |
2375 | BUG(); |
2376 | } |
2377 | } |
2378 | |
2379 | xfs_icsb_unlock_all_counters(mp); |
2380 | } |
2381 | |
2382 | STATIC void |
2383 | xfs_icsb_enable_counter( |
2384 | xfs_mount_t *mp, |
2385 | xfs_sb_field_t field, |
2386 | uint64_t count, |
2387 | uint64_t resid) |
2388 | { |
2389 | xfs_icsb_cnts_t *cntp; |
2390 | int i; |
2391 | |
2392 | ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); |
2393 | |
2394 | xfs_icsb_lock_all_counters(mp); |
2395 | for_each_online_cpu(i) { |
2396 | cntp = per_cpu_ptr(mp->m_sb_cnts, i); |
2397 | switch (field) { |
2398 | case XFS_SBS_ICOUNT: |
2399 | cntp->icsb_icount = count + resid; |
2400 | break; |
2401 | case XFS_SBS_IFREE: |
2402 | cntp->icsb_ifree = count + resid; |
2403 | break; |
2404 | case XFS_SBS_FDBLOCKS: |
2405 | cntp->icsb_fdblocks = count + resid; |
2406 | break; |
2407 | default: |
2408 | BUG(); |
2409 | break; |
2410 | } |
2411 | resid = 0; |
2412 | } |
2413 | clear_bit(field, &mp->m_icsb_counters); |
2414 | xfs_icsb_unlock_all_counters(mp); |
2415 | } |
2416 | |
2417 | void |
2418 | xfs_icsb_sync_counters_locked( |
2419 | xfs_mount_t *mp, |
2420 | int flags) |
2421 | { |
2422 | xfs_icsb_cnts_t cnt; |
2423 | |
2424 | xfs_icsb_count(mp, &cnt, flags); |
2425 | |
2426 | if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT)) |
2427 | mp->m_sb.sb_icount = cnt.icsb_icount; |
2428 | if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE)) |
2429 | mp->m_sb.sb_ifree = cnt.icsb_ifree; |
2430 | if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS)) |
2431 | mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; |
2432 | } |
2433 | |
2434 | /* |
2435 | * Accurate update of per-cpu counters to incore superblock |
2436 | */ |
2437 | void |
2438 | xfs_icsb_sync_counters( |
2439 | xfs_mount_t *mp, |
2440 | int flags) |
2441 | { |
2442 | spin_lock(&mp->m_sb_lock); |
2443 | xfs_icsb_sync_counters_locked(mp, flags); |
2444 | spin_unlock(&mp->m_sb_lock); |
2445 | } |
2446 | |
2447 | /* |
2448 | * Balance and enable/disable counters as necessary. |
2449 | * |
2450 | * Thresholds for re-enabling counters are somewhat magic. inode counts are |
2451 | * chosen to be the same number as single on disk allocation chunk per CPU, and |
2452 | * free blocks is something far enough zero that we aren't going thrash when we |
2453 | * get near ENOSPC. We also need to supply a minimum we require per cpu to |
2454 | * prevent looping endlessly when xfs_alloc_space asks for more than will |
2455 | * be distributed to a single CPU but each CPU has enough blocks to be |
2456 | * reenabled. |
2457 | * |
2458 | * Note that we can be called when counters are already disabled. |
2459 | * xfs_icsb_disable_counter() optimises the counter locking in this case to |
2460 | * prevent locking every per-cpu counter needlessly. |
2461 | */ |
2462 | |
2463 | #define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64 |
2464 | #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \ |
2465 | (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp)) |
2466 | STATIC void |
2467 | xfs_icsb_balance_counter_locked( |
2468 | xfs_mount_t *mp, |
2469 | xfs_sb_field_t field, |
2470 | int min_per_cpu) |
2471 | { |
2472 | uint64_t count, resid; |
2473 | int weight = num_online_cpus(); |
2474 | uint64_t min = (uint64_t)min_per_cpu; |
2475 | |
2476 | /* disable counter and sync counter */ |
2477 | xfs_icsb_disable_counter(mp, field); |
2478 | |
2479 | /* update counters - first CPU gets residual*/ |
2480 | switch (field) { |
2481 | case XFS_SBS_ICOUNT: |
2482 | count = mp->m_sb.sb_icount; |
2483 | resid = do_div(count, weight); |
2484 | if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) |
2485 | return; |
2486 | break; |
2487 | case XFS_SBS_IFREE: |
2488 | count = mp->m_sb.sb_ifree; |
2489 | resid = do_div(count, weight); |
2490 | if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) |
2491 | return; |
2492 | break; |
2493 | case XFS_SBS_FDBLOCKS: |
2494 | count = mp->m_sb.sb_fdblocks; |
2495 | resid = do_div(count, weight); |
2496 | if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp))) |
2497 | return; |
2498 | break; |
2499 | default: |
2500 | BUG(); |
2501 | count = resid = 0; /* quiet, gcc */ |
2502 | break; |
2503 | } |
2504 | |
2505 | xfs_icsb_enable_counter(mp, field, count, resid); |
2506 | } |
2507 | |
2508 | STATIC void |
2509 | xfs_icsb_balance_counter( |
2510 | xfs_mount_t *mp, |
2511 | xfs_sb_field_t fields, |
2512 | int min_per_cpu) |
2513 | { |
2514 | spin_lock(&mp->m_sb_lock); |
2515 | xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu); |
2516 | spin_unlock(&mp->m_sb_lock); |
2517 | } |
2518 | |
2519 | STATIC int |
2520 | xfs_icsb_modify_counters( |
2521 | xfs_mount_t *mp, |
2522 | xfs_sb_field_t field, |
2523 | int64_t delta, |
2524 | int rsvd) |
2525 | { |
2526 | xfs_icsb_cnts_t *icsbp; |
2527 | long long lcounter; /* long counter for 64 bit fields */ |
2528 | int ret = 0; |
2529 | |
2530 | might_sleep(); |
2531 | again: |
2532 | preempt_disable(); |
2533 | icsbp = this_cpu_ptr(mp->m_sb_cnts); |
2534 | |
2535 | /* |
2536 | * if the counter is disabled, go to slow path |
2537 | */ |
2538 | if (unlikely(xfs_icsb_counter_disabled(mp, field))) |
2539 | goto slow_path; |
2540 | xfs_icsb_lock_cntr(icsbp); |
2541 | if (unlikely(xfs_icsb_counter_disabled(mp, field))) { |
2542 | xfs_icsb_unlock_cntr(icsbp); |
2543 | goto slow_path; |
2544 | } |
2545 | |
2546 | switch (field) { |
2547 | case XFS_SBS_ICOUNT: |
2548 | lcounter = icsbp->icsb_icount; |
2549 | lcounter += delta; |
2550 | if (unlikely(lcounter < 0)) |
2551 | goto balance_counter; |
2552 | icsbp->icsb_icount = lcounter; |
2553 | break; |
2554 | |
2555 | case XFS_SBS_IFREE: |
2556 | lcounter = icsbp->icsb_ifree; |
2557 | lcounter += delta; |
2558 | if (unlikely(lcounter < 0)) |
2559 | goto balance_counter; |
2560 | icsbp->icsb_ifree = lcounter; |
2561 | break; |
2562 | |
2563 | case XFS_SBS_FDBLOCKS: |
2564 | BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0); |
2565 | |
2566 | lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); |
2567 | lcounter += delta; |
2568 | if (unlikely(lcounter < 0)) |
2569 | goto balance_counter; |
2570 | icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); |
2571 | break; |
2572 | default: |
2573 | BUG(); |
2574 | break; |
2575 | } |
2576 | xfs_icsb_unlock_cntr(icsbp); |
2577 | preempt_enable(); |
2578 | return 0; |
2579 | |
2580 | slow_path: |
2581 | preempt_enable(); |
2582 | |
2583 | /* |
2584 | * serialise with a mutex so we don't burn lots of cpu on |
2585 | * the superblock lock. We still need to hold the superblock |
2586 | * lock, however, when we modify the global structures. |
2587 | */ |
2588 | xfs_icsb_lock(mp); |
2589 | |
2590 | /* |
2591 | * Now running atomically. |
2592 | * |
2593 | * If the counter is enabled, someone has beaten us to rebalancing. |
2594 | * Drop the lock and try again in the fast path.... |
2595 | */ |
2596 | if (!(xfs_icsb_counter_disabled(mp, field))) { |
2597 | xfs_icsb_unlock(mp); |
2598 | goto again; |
2599 | } |
2600 | |
2601 | /* |
2602 | * The counter is currently disabled. Because we are |
2603 | * running atomically here, we know a rebalance cannot |
2604 | * be in progress. Hence we can go straight to operating |
2605 | * on the global superblock. We do not call xfs_mod_incore_sb() |
2606 | * here even though we need to get the m_sb_lock. Doing so |
2607 | * will cause us to re-enter this function and deadlock. |
2608 | * Hence we get the m_sb_lock ourselves and then call |
2609 | * xfs_mod_incore_sb_unlocked() as the unlocked path operates |
2610 | * directly on the global counters. |
2611 | */ |
2612 | spin_lock(&mp->m_sb_lock); |
2613 | ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); |
2614 | spin_unlock(&mp->m_sb_lock); |
2615 | |
2616 | /* |
2617 | * Now that we've modified the global superblock, we |
2618 | * may be able to re-enable the distributed counters |
2619 | * (e.g. lots of space just got freed). After that |
2620 | * we are done. |
2621 | */ |
2622 | if (ret != ENOSPC) |
2623 | xfs_icsb_balance_counter(mp, field, 0); |
2624 | xfs_icsb_unlock(mp); |
2625 | return ret; |
2626 | |
2627 | balance_counter: |
2628 | xfs_icsb_unlock_cntr(icsbp); |
2629 | preempt_enable(); |
2630 | |
2631 | /* |
2632 | * We may have multiple threads here if multiple per-cpu |
2633 | * counters run dry at the same time. This will mean we can |
2634 | * do more balances than strictly necessary but it is not |
2635 | * the common slowpath case. |
2636 | */ |
2637 | xfs_icsb_lock(mp); |
2638 | |
2639 | /* |
2640 | * running atomically. |
2641 | * |
2642 | * This will leave the counter in the correct state for future |
2643 | * accesses. After the rebalance, we simply try again and our retry |
2644 | * will either succeed through the fast path or slow path without |
2645 | * another balance operation being required. |
2646 | */ |
2647 | xfs_icsb_balance_counter(mp, field, delta); |
2648 | xfs_icsb_unlock(mp); |
2649 | goto again; |
2650 | } |
2651 | |
2652 | #endif |
2653 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9