1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_iwalk.h"
17 #include "xfs_quota.h"
19 #include "xfs_bmap_util.h"
20 #include "xfs_trans.h"
21 #include "xfs_trans_space.h"
23 #include "xfs_trace.h"
24 #include "xfs_icache.h"
25 #include "xfs_error.h"
28 * The global quota manager. There is only one of these for the entire
29 * system, _not_ one per file system. XQM keeps track of the overall
30 * quota functionality, including maintaining the freelist and hash
33 STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
34 STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
36 STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
37 STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
39 * We use the batch lookup interface to iterate over the dquots as it
40 * currently is the only interface into the radix tree code that allows
41 * fuzzy lookups instead of exact matches. Holding the lock over multiple
42 * operations is fine as all callers are used either during mount/umount
45 #define XFS_DQ_LOOKUP_BATCH 32
51 int (*execute)(struct xfs_dquot *dqp, void *data),
54 struct xfs_quotainfo *qi = mp->m_quotainfo;
55 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
67 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
71 mutex_lock(&qi->qi_tree_lock);
72 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
73 next_index, XFS_DQ_LOOKUP_BATCH);
75 mutex_unlock(&qi->qi_tree_lock);
79 for (i = 0; i < nr_found; i++) {
80 struct xfs_dquot *dqp = batch[i];
82 next_index = dqp->q_id + 1;
84 error = execute(batch[i], data);
85 if (error == -EAGAIN) {
89 if (error && last_error != -EFSCORRUPTED)
93 mutex_unlock(&qi->qi_tree_lock);
95 /* bail out if the filesystem is corrupted. */
96 if (last_error == -EFSCORRUPTED) {
100 /* we're done if id overflows back to zero */
115 * Purge a dquot from all tracking data structures and free it.
119 struct xfs_dquot *dqp,
122 struct xfs_mount *mp = dqp->q_mount;
123 struct xfs_quotainfo *qi = mp->m_quotainfo;
127 if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
130 dqp->q_flags |= XFS_DQFLAG_FREEING;
135 * If we are turning this type of quotas off, we don't care
136 * about the dirty metadata sitting in this dquot. OTOH, if
137 * we're unmounting, we do care, so we flush it and wait.
139 if (XFS_DQ_IS_DIRTY(dqp)) {
140 struct xfs_buf *bp = NULL;
143 * We don't care about getting disk errors here. We need
144 * to purge this dquot anyway, so we go ahead regardless.
146 error = xfs_qm_dqflush(dqp, &bp);
148 error = xfs_bwrite(bp);
150 } else if (error == -EAGAIN) {
151 dqp->q_flags &= ~XFS_DQFLAG_FREEING;
157 ASSERT(atomic_read(&dqp->q_pincount) == 0);
158 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
159 !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
164 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
168 * We move dquots to the freelist as soon as their reference count
169 * hits zero, so it really should be on the freelist here.
171 ASSERT(!list_empty(&dqp->q_lru));
172 list_lru_del(&qi->qi_lru, &dqp->q_lru);
173 XFS_STATS_DEC(mp, xs_qm_dquot_unused);
175 xfs_qm_dqdestroy(dqp);
184 * Purge the dquot cache.
188 struct xfs_mount *mp,
191 if (flags & XFS_QMOPT_UQUOTA)
192 xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
193 if (flags & XFS_QMOPT_GQUOTA)
194 xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
195 if (flags & XFS_QMOPT_PQUOTA)
196 xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
200 * Just destroy the quotainfo structure.
204 struct xfs_mount *mp)
206 if (mp->m_quotainfo) {
207 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
208 xfs_qm_destroy_quotainfo(mp);
213 * Called from the vfsops layer.
216 xfs_qm_unmount_quotas(
220 * Release the dquots that root inode, et al might be holding,
221 * before we flush quotas and blow away the quotainfo structure.
223 ASSERT(mp->m_rootip);
224 xfs_qm_dqdetach(mp->m_rootip);
226 xfs_qm_dqdetach(mp->m_rbmip);
228 xfs_qm_dqdetach(mp->m_rsumip);
231 * Release the quota inodes.
233 if (mp->m_quotainfo) {
234 if (mp->m_quotainfo->qi_uquotaip) {
235 xfs_irele(mp->m_quotainfo->qi_uquotaip);
236 mp->m_quotainfo->qi_uquotaip = NULL;
238 if (mp->m_quotainfo->qi_gquotaip) {
239 xfs_irele(mp->m_quotainfo->qi_gquotaip);
240 mp->m_quotainfo->qi_gquotaip = NULL;
242 if (mp->m_quotainfo->qi_pquotaip) {
243 xfs_irele(mp->m_quotainfo->qi_pquotaip);
244 mp->m_quotainfo->qi_pquotaip = NULL;
251 struct xfs_inode *ip,
255 struct xfs_dquot **IO_idqpp)
257 struct xfs_dquot *dqp;
260 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
264 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
265 * or &i_gdquot. This made the code look weird, but made the logic a lot
270 trace_xfs_dqattach_found(dqp);
275 * Find the dquot from somewhere. This bumps the reference count of
276 * dquot and returns it locked. This can return ENOENT if dquot didn't
277 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
278 * turned off suddenly.
280 error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
284 trace_xfs_dqattach_get(dqp);
287 * dqget may have dropped and re-acquired the ilock, but it guarantees
288 * that the dquot returned is the one that should go in the inode.
296 xfs_qm_need_dqattach(
297 struct xfs_inode *ip)
299 struct xfs_mount *mp = ip->i_mount;
301 if (!XFS_IS_QUOTA_RUNNING(mp))
303 if (!XFS_IS_QUOTA_ON(mp))
305 if (!XFS_NOT_DQATTACHED(mp, ip))
307 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
313 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
315 * If @doalloc is true, the dquot(s) will be allocated if needed.
316 * Inode may get unlocked and relocked in here, and the caller must deal with
320 xfs_qm_dqattach_locked(
324 xfs_mount_t *mp = ip->i_mount;
327 if (!xfs_qm_need_dqattach(ip))
330 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
332 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
333 error = xfs_qm_dqattach_one(ip, i_uid_read(VFS_I(ip)),
334 XFS_DQTYPE_USER, doalloc, &ip->i_udquot);
337 ASSERT(ip->i_udquot);
340 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
341 error = xfs_qm_dqattach_one(ip, i_gid_read(VFS_I(ip)),
342 XFS_DQTYPE_GROUP, doalloc, &ip->i_gdquot);
345 ASSERT(ip->i_gdquot);
348 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
349 error = xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQTYPE_PROJ,
350 doalloc, &ip->i_pdquot);
353 ASSERT(ip->i_pdquot);
358 * Don't worry about the dquots that we may have attached before any
359 * error - they'll get detached later if it has not already been done.
361 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
367 struct xfs_inode *ip)
371 if (!xfs_qm_need_dqattach(ip))
374 xfs_ilock(ip, XFS_ILOCK_EXCL);
375 error = xfs_qm_dqattach_locked(ip, false);
376 xfs_iunlock(ip, XFS_ILOCK_EXCL);
382 * Release dquots (and their references) if any.
383 * The inode should be locked EXCL except when this's called by
390 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
393 trace_xfs_dquot_dqdetach(ip);
395 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
397 xfs_qm_dqrele(ip->i_udquot);
401 xfs_qm_dqrele(ip->i_gdquot);
405 xfs_qm_dqrele(ip->i_pdquot);
410 struct xfs_qm_isolate {
411 struct list_head buffers;
412 struct list_head dispose;
415 static enum lru_status
416 xfs_qm_dquot_isolate(
417 struct list_head *item,
418 struct list_lru_one *lru,
419 spinlock_t *lru_lock,
421 __releases(lru_lock) __acquires(lru_lock)
423 struct xfs_dquot *dqp = container_of(item,
424 struct xfs_dquot, q_lru);
425 struct xfs_qm_isolate *isol = arg;
427 if (!xfs_dqlock_nowait(dqp))
431 * This dquot has acquired a reference in the meantime remove it from
432 * the freelist and try again.
436 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
438 trace_xfs_dqreclaim_want(dqp);
439 list_lru_isolate(lru, &dqp->q_lru);
440 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
445 * If the dquot is dirty, flush it. If it's already being flushed, just
446 * skip it so there is time for the IO to complete before we try to
447 * reclaim it again on the next LRU pass.
449 if (!xfs_dqflock_nowait(dqp)) {
454 if (XFS_DQ_IS_DIRTY(dqp)) {
455 struct xfs_buf *bp = NULL;
458 trace_xfs_dqreclaim_dirty(dqp);
460 /* we have to drop the LRU lock to flush the dquot */
461 spin_unlock(lru_lock);
463 error = xfs_qm_dqflush(dqp, &bp);
465 goto out_unlock_dirty;
467 xfs_buf_delwri_queue(bp, &isol->buffers);
469 goto out_unlock_dirty;
474 * Prevent lookups now that we are past the point of no return.
476 dqp->q_flags |= XFS_DQFLAG_FREEING;
479 ASSERT(dqp->q_nrefs == 0);
480 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
481 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
482 trace_xfs_dqreclaim_done(dqp);
483 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
487 trace_xfs_dqreclaim_busy(dqp);
488 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
492 trace_xfs_dqreclaim_busy(dqp);
493 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
501 struct shrinker *shrink,
502 struct shrink_control *sc)
504 struct xfs_quotainfo *qi = container_of(shrink,
505 struct xfs_quotainfo, qi_shrinker);
506 struct xfs_qm_isolate isol;
510 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
513 INIT_LIST_HEAD(&isol.buffers);
514 INIT_LIST_HEAD(&isol.dispose);
516 freed = list_lru_shrink_walk(&qi->qi_lru, sc,
517 xfs_qm_dquot_isolate, &isol);
519 error = xfs_buf_delwri_submit(&isol.buffers);
521 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
523 while (!list_empty(&isol.dispose)) {
524 struct xfs_dquot *dqp;
526 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
527 list_del_init(&dqp->q_lru);
528 xfs_qm_dqfree_one(dqp);
536 struct shrinker *shrink,
537 struct shrink_control *sc)
539 struct xfs_quotainfo *qi = container_of(shrink,
540 struct xfs_quotainfo, qi_shrinker);
542 return list_lru_shrink_count(&qi->qi_lru, sc);
547 struct xfs_mount *mp,
549 struct xfs_quotainfo *qinf)
551 struct xfs_dquot *dqp;
552 struct xfs_def_quota *defq;
555 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
559 defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
562 * Timers and warnings have been already set, let's just set the
563 * default limits for this quota type
565 defq->blk.hard = dqp->q_blk.hardlimit;
566 defq->blk.soft = dqp->q_blk.softlimit;
567 defq->ino.hard = dqp->q_ino.hardlimit;
568 defq->ino.soft = dqp->q_ino.softlimit;
569 defq->rtb.hard = dqp->q_rtb.hardlimit;
570 defq->rtb.soft = dqp->q_rtb.softlimit;
571 xfs_qm_dqdestroy(dqp);
574 /* Initialize quota time limits from the root dquot. */
576 xfs_qm_init_timelimits(
577 struct xfs_mount *mp,
580 struct xfs_quotainfo *qinf = mp->m_quotainfo;
581 struct xfs_def_quota *defq;
582 struct xfs_dquot *dqp;
585 defq = xfs_get_defquota(qinf, type);
587 defq->blk.time = XFS_QM_BTIMELIMIT;
588 defq->ino.time = XFS_QM_ITIMELIMIT;
589 defq->rtb.time = XFS_QM_RTBTIMELIMIT;
590 defq->blk.warn = XFS_QM_BWARNLIMIT;
591 defq->ino.warn = XFS_QM_IWARNLIMIT;
592 defq->rtb.warn = XFS_QM_RTBWARNLIMIT;
595 * We try to get the limits from the superuser's limits fields.
596 * This is quite hacky, but it is standard quota practice.
598 * Since we may not have done a quotacheck by this point, just read
599 * the dquot without attaching it to any hashtables or lists.
601 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
606 * The warnings and timers set the grace period given to
607 * a user or group before he or she can not perform any
608 * more writing. If it is zero, a default is used.
610 if (dqp->q_blk.timer)
611 defq->blk.time = dqp->q_blk.timer;
612 if (dqp->q_ino.timer)
613 defq->ino.time = dqp->q_ino.timer;
614 if (dqp->q_rtb.timer)
615 defq->rtb.time = dqp->q_rtb.timer;
616 if (dqp->q_blk.warnings)
617 defq->blk.warn = dqp->q_blk.warnings;
618 if (dqp->q_ino.warnings)
619 defq->ino.warn = dqp->q_ino.warnings;
620 if (dqp->q_rtb.warnings)
621 defq->rtb.warn = dqp->q_rtb.warnings;
623 xfs_qm_dqdestroy(dqp);
627 * This initializes all the quota information that's kept in the
631 xfs_qm_init_quotainfo(
632 struct xfs_mount *mp)
634 struct xfs_quotainfo *qinf;
637 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
639 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
641 error = list_lru_init(&qinf->qi_lru);
646 * See if quotainodes are setup, and if not, allocate them,
647 * and change the superblock accordingly.
649 error = xfs_qm_init_quotainos(mp);
653 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
654 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
655 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
656 mutex_init(&qinf->qi_tree_lock);
658 /* mutex used to serialize quotaoffs */
659 mutex_init(&qinf->qi_quotaofflock);
661 /* Precalc some constants */
662 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
663 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
665 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
667 xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
668 xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
669 xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
671 if (XFS_IS_UQUOTA_RUNNING(mp))
672 xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
673 if (XFS_IS_GQUOTA_RUNNING(mp))
674 xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
675 if (XFS_IS_PQUOTA_RUNNING(mp))
676 xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
678 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
679 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
680 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
681 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
683 error = register_shrinker(&qinf->qi_shrinker);
690 mutex_destroy(&qinf->qi_quotaofflock);
691 mutex_destroy(&qinf->qi_tree_lock);
692 xfs_qm_destroy_quotainos(qinf);
694 list_lru_destroy(&qinf->qi_lru);
697 mp->m_quotainfo = NULL;
702 * Gets called when unmounting a filesystem or when all quotas get
704 * This purges the quota inodes, destroys locks and frees itself.
707 xfs_qm_destroy_quotainfo(
708 struct xfs_mount *mp)
710 struct xfs_quotainfo *qi;
712 qi = mp->m_quotainfo;
715 unregister_shrinker(&qi->qi_shrinker);
716 list_lru_destroy(&qi->qi_lru);
717 xfs_qm_destroy_quotainos(qi);
718 mutex_destroy(&qi->qi_tree_lock);
719 mutex_destroy(&qi->qi_quotaofflock);
721 mp->m_quotainfo = NULL;
725 * Create an inode and return with a reference already taken, but unlocked
726 * This is how we create quota inodes
736 bool need_alloc = true;
740 * With superblock that doesn't have separate pquotino, we
741 * share an inode between gquota and pquota. If the on-disk
742 * superblock has GQUOTA and the filesystem is now mounted
743 * with PQUOTA, just use sb_gquotino for sb_pquotino and
746 if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
747 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
748 xfs_ino_t ino = NULLFSINO;
750 if ((flags & XFS_QMOPT_PQUOTA) &&
751 (mp->m_sb.sb_gquotino != NULLFSINO)) {
752 ino = mp->m_sb.sb_gquotino;
753 if (XFS_IS_CORRUPT(mp,
754 mp->m_sb.sb_pquotino != NULLFSINO))
755 return -EFSCORRUPTED;
756 } else if ((flags & XFS_QMOPT_GQUOTA) &&
757 (mp->m_sb.sb_pquotino != NULLFSINO)) {
758 ino = mp->m_sb.sb_pquotino;
759 if (XFS_IS_CORRUPT(mp,
760 mp->m_sb.sb_gquotino != NULLFSINO))
761 return -EFSCORRUPTED;
763 if (ino != NULLFSINO) {
764 error = xfs_iget(mp, NULL, ino, 0, 0, ip);
767 mp->m_sb.sb_gquotino = NULLFSINO;
768 mp->m_sb.sb_pquotino = NULLFSINO;
773 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
774 need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
780 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip);
782 xfs_trans_cancel(tp);
788 * Make the changes in the superblock, and log those too.
789 * sbfields arg may contain fields other than *QUOTINO;
790 * VERSIONNUM for example.
792 spin_lock(&mp->m_sb_lock);
793 if (flags & XFS_QMOPT_SBVERSION) {
794 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
796 xfs_sb_version_addquota(&mp->m_sb);
797 mp->m_sb.sb_uquotino = NULLFSINO;
798 mp->m_sb.sb_gquotino = NULLFSINO;
799 mp->m_sb.sb_pquotino = NULLFSINO;
801 /* qflags will get updated fully _after_ quotacheck */
802 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
804 if (flags & XFS_QMOPT_UQUOTA)
805 mp->m_sb.sb_uquotino = (*ip)->i_ino;
806 else if (flags & XFS_QMOPT_GQUOTA)
807 mp->m_sb.sb_gquotino = (*ip)->i_ino;
809 mp->m_sb.sb_pquotino = (*ip)->i_ino;
810 spin_unlock(&mp->m_sb_lock);
813 error = xfs_trans_commit(tp);
815 ASSERT(XFS_FORCED_SHUTDOWN(mp));
816 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
819 xfs_finish_inode_setup(*ip);
825 xfs_qm_reset_dqcounts(
826 struct xfs_mount *mp,
831 struct xfs_dqblk *dqb;
834 trace_xfs_reset_dqcounts(bp, _RET_IP_);
837 * Reset all counters and timers. They'll be
838 * started afresh by xfs_qm_quotacheck.
841 j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
843 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
846 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
847 struct xfs_disk_dquot *ddq;
849 ddq = (struct xfs_disk_dquot *)&dqb[j];
852 * Do a sanity check, and if needed, repair the dqblk. Don't
853 * output any warnings because it's perfectly possible to
854 * find uninitialised dquot blks. See comment in
857 if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
858 (dqb[j].dd_diskdq.d_flags & XFS_DQTYPE_REC_MASK) != type)
859 xfs_dqblk_repair(mp, &dqb[j], id + j, type);
862 * Reset type in case we are reusing group quota file for
863 * project quotas or vice versa
871 * dquot id 0 stores the default grace period and the maximum
872 * warning limit that were set by the administrator, so we
873 * should not reset them.
875 if (ddq->d_id != 0) {
884 if (xfs_sb_version_hascrc(&mp->m_sb)) {
885 xfs_update_cksum((char *)&dqb[j],
886 sizeof(struct xfs_dqblk),
893 xfs_qm_reset_dqcounts_all(
894 struct xfs_mount *mp,
897 xfs_filblks_t blkcnt,
899 struct list_head *buffer_list)
907 * Blkcnt arg can be a very big number, and might even be
908 * larger than the log itself. So, we have to break it up into
909 * manageable-sized transactions.
910 * Note that we don't start a permanent transaction here; we might
911 * not be able to get a log reservation for the whole thing up front,
912 * and we don't really care to either, because we just discard
913 * everything if we were to crash in the middle of this loop.
916 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
917 XFS_FSB_TO_DADDR(mp, bno),
918 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
922 * CRC and validation errors will return a EFSCORRUPTED here. If
923 * this occurs, re-read without CRC validation so that we can
924 * repair the damage via xfs_qm_reset_dqcounts(). This process
925 * will leave a trace in the log indicating corruption has
928 if (error == -EFSCORRUPTED) {
929 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
930 XFS_FSB_TO_DADDR(mp, bno),
931 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
939 * A corrupt buffer might not have a verifier attached, so
940 * make sure we have the correct one attached before writeback
943 bp->b_ops = &xfs_dquot_buf_ops;
944 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
945 xfs_buf_delwri_queue(bp, buffer_list);
948 /* goto the next block. */
950 firstid += mp->m_quotainfo->qi_dqperchunk;
957 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
958 * counters for every chunk of dquots that we find.
961 xfs_qm_reset_dqcounts_buf(
962 struct xfs_mount *mp,
963 struct xfs_inode *qip,
965 struct list_head *buffer_list)
967 struct xfs_bmbt_irec *map;
968 int i, nmaps; /* number of map entries */
969 int error; /* return value */
970 xfs_fileoff_t lblkno;
971 xfs_filblks_t maxlblkcnt;
973 xfs_fsblock_t rablkno;
974 xfs_filblks_t rablkcnt;
978 * This looks racy, but we can't keep an inode lock across a
979 * trans_reserve. But, this gets called during quotacheck, and that
980 * happens only at mount time which is single threaded.
982 if (qip->i_d.di_nblocks == 0)
985 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
988 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
992 nmaps = XFS_DQITER_MAP_SIZE;
994 * We aren't changing the inode itself. Just changing
995 * some of its data. No new blocks are added here, and
996 * the inode is never added to the transaction.
998 lock_mode = xfs_ilock_data_map_shared(qip);
999 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1001 xfs_iunlock(qip, lock_mode);
1005 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1006 for (i = 0; i < nmaps; i++) {
1007 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1008 ASSERT(map[i].br_blockcount);
1011 lblkno += map[i].br_blockcount;
1013 if (map[i].br_startblock == HOLESTARTBLOCK)
1016 firstid = (xfs_dqid_t) map[i].br_startoff *
1017 mp->m_quotainfo->qi_dqperchunk;
1019 * Do a read-ahead on the next extent.
1021 if ((i+1 < nmaps) &&
1022 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1023 rablkcnt = map[i+1].br_blockcount;
1024 rablkno = map[i+1].br_startblock;
1025 while (rablkcnt--) {
1026 xfs_buf_readahead(mp->m_ddev_targp,
1027 XFS_FSB_TO_DADDR(mp, rablkno),
1028 mp->m_quotainfo->qi_dqchunklen,
1029 &xfs_dquot_buf_ops);
1034 * Iterate thru all the blks in the extent and
1035 * reset the counters of all the dquots inside them.
1037 error = xfs_qm_reset_dqcounts_all(mp, firstid,
1038 map[i].br_startblock,
1039 map[i].br_blockcount,
1044 } while (nmaps > 0);
1052 * Called by dqusage_adjust in doing a quotacheck.
1054 * Given the inode, and a dquot id this updates both the incore dqout as well
1055 * as the buffer copy. This is so that once the quotacheck is done, we can
1056 * just log all the buffers, as opposed to logging numerous updates to
1057 * individual dquots.
1060 xfs_qm_quotacheck_dqadjust(
1061 struct xfs_inode *ip,
1066 struct xfs_mount *mp = ip->i_mount;
1067 struct xfs_dquot *dqp;
1071 id = xfs_qm_id_for_quotatype(ip, type);
1072 error = xfs_qm_dqget(mp, id, type, true, &dqp);
1075 * Shouldn't be able to turn off quotas here.
1077 ASSERT(error != -ESRCH);
1078 ASSERT(error != -ENOENT);
1082 trace_xfs_dqadjust(dqp);
1085 * Adjust the inode count and the block count to reflect this inode's
1089 dqp->q_ino.reserved++;
1091 dqp->q_blk.count += nblks;
1092 dqp->q_blk.reserved += nblks;
1095 dqp->q_rtb.count += rtblks;
1096 dqp->q_rtb.reserved += rtblks;
1100 * Set default limits, adjust timers (since we changed usages)
1102 * There are no timers for the default values set in the root dquot.
1105 xfs_qm_adjust_dqlimits(dqp);
1106 xfs_qm_adjust_dqtimers(dqp);
1109 dqp->q_flags |= XFS_DQFLAG_DIRTY;
1115 * callback routine supplied to bulkstat(). Given an inumber, find its
1116 * dquots and update them to account for resources taken by that inode.
1120 xfs_qm_dqusage_adjust(
1121 struct xfs_mount *mp,
1122 struct xfs_trans *tp,
1126 struct xfs_inode *ip;
1128 xfs_filblks_t rtblks = 0; /* total rt blks */
1131 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1134 * rootino must have its resources accounted for, not so with the quota
1137 if (xfs_is_quota_inode(&mp->m_sb, ino))
1141 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1142 * at mount time and therefore nobody will be racing chown/chproj.
1144 error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1145 if (error == -EINVAL || error == -ENOENT)
1150 ASSERT(ip->i_delayed_blks == 0);
1152 if (XFS_IS_REALTIME_INODE(ip)) {
1153 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1155 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1156 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1161 xfs_bmap_count_leaves(ifp, &rtblks);
1164 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1167 * Add the (disk blocks and inode) resources occupied by this
1168 * inode to its dquots. We do this adjustment in the incore dquot,
1169 * and also copy the changes to its buffer.
1170 * We don't care about putting these changes in a transaction
1171 * envelope because if we crash in the middle of a 'quotacheck'
1172 * we have to start from the beginning anyway.
1173 * Once we're done, we'll log all the dquot bufs.
1175 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1176 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1178 if (XFS_IS_UQUOTA_ON(mp)) {
1179 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1185 if (XFS_IS_GQUOTA_ON(mp)) {
1186 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1192 if (XFS_IS_PQUOTA_ON(mp)) {
1193 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1206 struct xfs_dquot *dqp,
1209 struct xfs_mount *mp = dqp->q_mount;
1210 struct list_head *buffer_list = data;
1211 struct xfs_buf *bp = NULL;
1215 if (dqp->q_flags & XFS_DQFLAG_FREEING)
1217 if (!XFS_DQ_IS_DIRTY(dqp))
1221 * The only way the dquot is already flush locked by the time quotacheck
1222 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1223 * it for the final time. Quotacheck collects all dquot bufs in the
1224 * local delwri queue before dquots are dirtied, so reclaim can't have
1225 * possibly queued it for I/O. The only way out is to push the buffer to
1226 * cycle the flush lock.
1228 if (!xfs_dqflock_nowait(dqp)) {
1229 /* buf is pinned in-core by delwri list */
1230 bp = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1231 mp->m_quotainfo->qi_dqchunklen, 0);
1238 xfs_buf_delwri_pushbuf(bp, buffer_list);
1245 error = xfs_qm_dqflush(dqp, &bp);
1249 xfs_buf_delwri_queue(bp, buffer_list);
1257 * Walk thru all the filesystem inodes and construct a consistent view
1258 * of the disk quota world. If the quotacheck fails, disable quotas.
1266 LIST_HEAD (buffer_list);
1267 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1268 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1269 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1273 ASSERT(uip || gip || pip);
1274 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1276 xfs_notice(mp, "Quotacheck needed: Please wait.");
1279 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1280 * their counters to zero. We need a clean slate.
1281 * We don't log our changes till later.
1284 error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1288 flags |= XFS_UQUOTA_CHKD;
1292 error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1296 flags |= XFS_GQUOTA_CHKD;
1300 error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1304 flags |= XFS_PQUOTA_CHKD;
1307 error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1313 * We've made all the changes that we need to make incore. Flush them
1314 * down to disk buffers if everything was updated successfully.
1316 if (XFS_IS_UQUOTA_ON(mp)) {
1317 error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1320 if (XFS_IS_GQUOTA_ON(mp)) {
1321 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1326 if (XFS_IS_PQUOTA_ON(mp)) {
1327 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1333 error2 = xfs_buf_delwri_submit(&buffer_list);
1338 * We can get this error if we couldn't do a dquot allocation inside
1339 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1340 * dirty dquots that might be cached, we just want to get rid of them
1341 * and turn quotaoff. The dquots won't be attached to any of the inodes
1342 * at this point (because we intentionally didn't in dqget_noattach).
1345 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1350 * If one type of quotas is off, then it will lose its
1351 * quotachecked status, since we won't be doing accounting for
1352 * that type anymore.
1354 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1355 mp->m_qflags |= flags;
1358 xfs_buf_delwri_cancel(&buffer_list);
1362 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1365 * We must turn off quotas.
1367 ASSERT(mp->m_quotainfo != NULL);
1368 xfs_qm_destroy_quotainfo(mp);
1369 if (xfs_mount_reset_sbqflags(mp)) {
1371 "Quotacheck: Failed to reset quota flags.");
1374 xfs_notice(mp, "Quotacheck: Done.");
1379 * This is called from xfs_mountfs to start quotas and initialize all
1380 * necessary data structures like quotainfo. This is also responsible for
1381 * running a quotacheck as necessary. We are guaranteed that the superblock
1382 * is consistently read in at this point.
1384 * If we fail here, the mount will continue with quota turned off. We don't
1385 * need to inidicate success or failure at all.
1388 xfs_qm_mount_quotas(
1389 struct xfs_mount *mp)
1395 * If quotas on realtime volumes is not supported, we disable
1396 * quotas immediately.
1398 if (mp->m_sb.sb_rextents) {
1399 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1404 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1407 * Allocate the quotainfo structure inside the mount struct, and
1408 * create quotainode(s), and change/rev superblock if necessary.
1410 error = xfs_qm_init_quotainfo(mp);
1413 * We must turn off quotas.
1415 ASSERT(mp->m_quotainfo == NULL);
1420 * If any of the quotas are not consistent, do a quotacheck.
1422 if (XFS_QM_NEED_QUOTACHECK(mp)) {
1423 error = xfs_qm_quotacheck(mp);
1425 /* Quotacheck failed and disabled quotas. */
1430 * If one type of quotas is off, then it will lose its
1431 * quotachecked status, since we won't be doing accounting for
1432 * that type anymore.
1434 if (!XFS_IS_UQUOTA_ON(mp))
1435 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1436 if (!XFS_IS_GQUOTA_ON(mp))
1437 mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1438 if (!XFS_IS_PQUOTA_ON(mp))
1439 mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1443 * We actually don't have to acquire the m_sb_lock at all.
1444 * This can only be called from mount, and that's single threaded. XXX
1446 spin_lock(&mp->m_sb_lock);
1447 sbf = mp->m_sb.sb_qflags;
1448 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1449 spin_unlock(&mp->m_sb_lock);
1451 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1452 if (xfs_sync_sb(mp, false)) {
1454 * We could only have been turning quotas off.
1455 * We aren't in very good shape actually because
1456 * the incore structures are convinced that quotas are
1457 * off, but the on disk superblock doesn't know that !
1459 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
1460 xfs_alert(mp, "%s: Superblock update failed!",
1466 xfs_warn(mp, "Failed to initialize disk quotas.");
1472 * This is called after the superblock has been read in and we're ready to
1473 * iget the quota inodes.
1476 xfs_qm_init_quotainos(
1479 struct xfs_inode *uip = NULL;
1480 struct xfs_inode *gip = NULL;
1481 struct xfs_inode *pip = NULL;
1485 ASSERT(mp->m_quotainfo);
1488 * Get the uquota and gquota inodes
1490 if (xfs_sb_version_hasquota(&mp->m_sb)) {
1491 if (XFS_IS_UQUOTA_ON(mp) &&
1492 mp->m_sb.sb_uquotino != NULLFSINO) {
1493 ASSERT(mp->m_sb.sb_uquotino > 0);
1494 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1499 if (XFS_IS_GQUOTA_ON(mp) &&
1500 mp->m_sb.sb_gquotino != NULLFSINO) {
1501 ASSERT(mp->m_sb.sb_gquotino > 0);
1502 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1507 if (XFS_IS_PQUOTA_ON(mp) &&
1508 mp->m_sb.sb_pquotino != NULLFSINO) {
1509 ASSERT(mp->m_sb.sb_pquotino > 0);
1510 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1516 flags |= XFS_QMOPT_SBVERSION;
1520 * Create the three inodes, if they don't exist already. The changes
1521 * made above will get added to a transaction and logged in one of
1522 * the qino_alloc calls below. If the device is readonly,
1523 * temporarily switch to read-write to do this.
1525 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1526 error = xfs_qm_qino_alloc(mp, &uip,
1527 flags | XFS_QMOPT_UQUOTA);
1531 flags &= ~XFS_QMOPT_SBVERSION;
1533 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1534 error = xfs_qm_qino_alloc(mp, &gip,
1535 flags | XFS_QMOPT_GQUOTA);
1539 flags &= ~XFS_QMOPT_SBVERSION;
1541 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1542 error = xfs_qm_qino_alloc(mp, &pip,
1543 flags | XFS_QMOPT_PQUOTA);
1548 mp->m_quotainfo->qi_uquotaip = uip;
1549 mp->m_quotainfo->qi_gquotaip = gip;
1550 mp->m_quotainfo->qi_pquotaip = pip;
1565 xfs_qm_destroy_quotainos(
1566 struct xfs_quotainfo *qi)
1568 if (qi->qi_uquotaip) {
1569 xfs_irele(qi->qi_uquotaip);
1570 qi->qi_uquotaip = NULL; /* paranoia */
1572 if (qi->qi_gquotaip) {
1573 xfs_irele(qi->qi_gquotaip);
1574 qi->qi_gquotaip = NULL;
1576 if (qi->qi_pquotaip) {
1577 xfs_irele(qi->qi_pquotaip);
1578 qi->qi_pquotaip = NULL;
1584 struct xfs_dquot *dqp)
1586 struct xfs_mount *mp = dqp->q_mount;
1587 struct xfs_quotainfo *qi = mp->m_quotainfo;
1589 mutex_lock(&qi->qi_tree_lock);
1590 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1593 mutex_unlock(&qi->qi_tree_lock);
1595 xfs_qm_dqdestroy(dqp);
1598 /* --------------- utility functions for vnodeops ---------------- */
1602 * Given an inode, a uid, gid and prid make sure that we have
1603 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1604 * quotas by creating this file.
1605 * This also attaches dquot(s) to the given inode after locking it,
1606 * and returns the dquots corresponding to the uid and/or gid.
1608 * in : inode (unlocked)
1609 * out : udquot, gdquot with references taken and unlocked
1613 struct xfs_inode *ip,
1618 struct xfs_dquot **O_udqpp,
1619 struct xfs_dquot **O_gdqpp,
1620 struct xfs_dquot **O_pdqpp)
1622 struct xfs_mount *mp = ip->i_mount;
1623 struct inode *inode = VFS_I(ip);
1624 struct user_namespace *user_ns = inode->i_sb->s_user_ns;
1625 struct xfs_dquot *uq = NULL;
1626 struct xfs_dquot *gq = NULL;
1627 struct xfs_dquot *pq = NULL;
1631 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1634 lockflags = XFS_ILOCK_EXCL;
1635 xfs_ilock(ip, lockflags);
1637 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1641 * Attach the dquot(s) to this inode, doing a dquot allocation
1642 * if necessary. The dquot(s) will not be locked.
1644 if (XFS_NOT_DQATTACHED(mp, ip)) {
1645 error = xfs_qm_dqattach_locked(ip, true);
1647 xfs_iunlock(ip, lockflags);
1652 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1653 if (!uid_eq(inode->i_uid, uid)) {
1655 * What we need is the dquot that has this uid, and
1656 * if we send the inode to dqget, the uid of the inode
1657 * takes priority over what's sent in the uid argument.
1658 * We must unlock inode here before calling dqget if
1659 * we're not sending the inode, because otherwise
1660 * we'll deadlock by doing trans_reserve while
1663 xfs_iunlock(ip, lockflags);
1664 error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1665 XFS_DQTYPE_USER, true, &uq);
1667 ASSERT(error != -ENOENT);
1671 * Get the ilock in the right order.
1674 lockflags = XFS_ILOCK_SHARED;
1675 xfs_ilock(ip, lockflags);
1678 * Take an extra reference, because we'll return
1681 ASSERT(ip->i_udquot);
1682 uq = xfs_qm_dqhold(ip->i_udquot);
1685 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1686 if (!gid_eq(inode->i_gid, gid)) {
1687 xfs_iunlock(ip, lockflags);
1688 error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1689 XFS_DQTYPE_GROUP, true, &gq);
1691 ASSERT(error != -ENOENT);
1695 lockflags = XFS_ILOCK_SHARED;
1696 xfs_ilock(ip, lockflags);
1698 ASSERT(ip->i_gdquot);
1699 gq = xfs_qm_dqhold(ip->i_gdquot);
1702 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1703 if (ip->i_d.di_projid != prid) {
1704 xfs_iunlock(ip, lockflags);
1705 error = xfs_qm_dqget(mp, (xfs_dqid_t)prid,
1706 XFS_DQTYPE_PROJ, true, &pq);
1708 ASSERT(error != -ENOENT);
1712 lockflags = XFS_ILOCK_SHARED;
1713 xfs_ilock(ip, lockflags);
1715 ASSERT(ip->i_pdquot);
1716 pq = xfs_qm_dqhold(ip->i_pdquot);
1719 trace_xfs_dquot_dqalloc(ip);
1721 xfs_iunlock(ip, lockflags);
1743 * Actually transfer ownership, and do dquot modifications.
1744 * These were already reserved.
1748 struct xfs_trans *tp,
1749 struct xfs_inode *ip,
1750 struct xfs_dquot **IO_olddq,
1751 struct xfs_dquot *newdq)
1753 struct xfs_dquot *prevdq;
1754 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1755 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1758 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1759 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1764 ASSERT(prevdq != newdq);
1766 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1767 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1769 /* the sparkling new dquot */
1770 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1771 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1774 * Take an extra reference, because the inode is going to keep
1775 * this dquot pointer even after the trans_commit.
1777 *IO_olddq = xfs_qm_dqhold(newdq);
1783 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1786 xfs_qm_vop_chown_reserve(
1787 struct xfs_trans *tp,
1788 struct xfs_inode *ip,
1789 struct xfs_dquot *udqp,
1790 struct xfs_dquot *gdqp,
1791 struct xfs_dquot *pdqp,
1794 struct xfs_mount *mp = ip->i_mount;
1796 unsigned int blkflags;
1797 struct xfs_dquot *udq_unres = NULL;
1798 struct xfs_dquot *gdq_unres = NULL;
1799 struct xfs_dquot *pdq_unres = NULL;
1800 struct xfs_dquot *udq_delblks = NULL;
1801 struct xfs_dquot *gdq_delblks = NULL;
1802 struct xfs_dquot *pdq_delblks = NULL;
1806 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1807 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1809 delblks = ip->i_delayed_blks;
1810 blkflags = XFS_IS_REALTIME_INODE(ip) ?
1811 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1813 if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1814 i_uid_read(VFS_I(ip)) != udqp->q_id) {
1817 * If there are delayed allocation blocks, then we have to
1818 * unreserve those from the old dquot, and add them to the
1822 ASSERT(ip->i_udquot);
1823 udq_unres = ip->i_udquot;
1826 if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
1827 i_gid_read(VFS_I(ip)) != gdqp->q_id) {
1830 ASSERT(ip->i_gdquot);
1831 gdq_unres = ip->i_gdquot;
1835 if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
1836 ip->i_d.di_projid != pdqp->q_id) {
1839 ASSERT(ip->i_pdquot);
1840 pdq_unres = ip->i_pdquot;
1844 error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
1845 udq_delblks, gdq_delblks, pdq_delblks,
1846 ip->i_d.di_nblocks, 1, flags | blkflags);
1851 * Do the delayed blks reservations/unreservations now. Since, these
1852 * are done without the help of a transaction, if a reservation fails
1853 * its previous reservations won't be automatically undone by trans
1854 * code. So, we have to do it manually here.
1858 * Do the reservations first. Unreservation can't fail.
1860 ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
1861 ASSERT(udq_unres || gdq_unres || pdq_unres);
1862 error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1863 udq_delblks, gdq_delblks, pdq_delblks,
1864 (xfs_qcnt_t)delblks, 0, flags | blkflags);
1867 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
1868 udq_unres, gdq_unres, pdq_unres,
1869 -((xfs_qcnt_t)delblks), 0, blkflags);
1876 xfs_qm_vop_rename_dqattach(
1877 struct xfs_inode **i_tab)
1879 struct xfs_mount *mp = i_tab[0]->i_mount;
1882 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1885 for (i = 0; (i < 4 && i_tab[i]); i++) {
1886 struct xfs_inode *ip = i_tab[i];
1890 * Watch out for duplicate entries in the table.
1892 if (i == 0 || ip != i_tab[i-1]) {
1893 if (XFS_NOT_DQATTACHED(mp, ip)) {
1894 error = xfs_qm_dqattach(ip);
1904 xfs_qm_vop_create_dqattach(
1905 struct xfs_trans *tp,
1906 struct xfs_inode *ip,
1907 struct xfs_dquot *udqp,
1908 struct xfs_dquot *gdqp,
1909 struct xfs_dquot *pdqp)
1911 struct xfs_mount *mp = tp->t_mountp;
1913 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1916 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1918 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1919 ASSERT(ip->i_udquot == NULL);
1920 ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1922 ip->i_udquot = xfs_qm_dqhold(udqp);
1923 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
1925 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1926 ASSERT(ip->i_gdquot == NULL);
1927 ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1929 ip->i_gdquot = xfs_qm_dqhold(gdqp);
1930 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
1932 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1933 ASSERT(ip->i_pdquot == NULL);
1934 ASSERT(ip->i_d.di_projid == pdqp->q_id);
1936 ip->i_pdquot = xfs_qm_dqhold(pdqp);
1937 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);