1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
15 #include "xfs_mount.h"
16 #include "xfs_inode.h"
17 #include "xfs_trans.h"
18 #include "xfs_quota.h"
20 #include "xfs_icache.h"
22 STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
23 STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
27 * Turn off quota accounting and/or enforcement for all udquots and/or
28 * gdquots. Called only at unmount time.
30 * This assumes that there are no dquots of this file system cached
31 * incore, and modifies the ondisk dquot directly. Therefore, for example,
32 * it is an error to call this twice, without purging the cache.
35 xfs_qm_scall_quotaoff(
39 struct xfs_quotainfo *q = mp->m_quotainfo;
42 uint inactivate_flags;
43 xfs_qoff_logitem_t *qoffstart;
46 * No file system can have quotas enabled on disk but not in core.
47 * Note that quota utilities (like quotaoff) _expect_
48 * errno == -EEXIST here.
50 if ((mp->m_qflags & flags) == 0)
54 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
57 * We don't want to deal with two quotaoffs messing up each other,
58 * so we're going to serialize it. quotaoff isn't exactly a performance
60 * If quotaoff, then we must be dealing with the root filesystem.
63 mutex_lock(&q->qi_quotaofflock);
66 * If we're just turning off quota enforcement, change mp and go.
68 if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
69 mp->m_qflags &= ~(flags);
71 spin_lock(&mp->m_sb_lock);
72 mp->m_sb.sb_qflags = mp->m_qflags;
73 spin_unlock(&mp->m_sb_lock);
74 mutex_unlock(&q->qi_quotaofflock);
76 /* XXX what to do if error ? Revert back to old vals incore ? */
77 return xfs_sync_sb(mp, false);
83 * If accounting is off, we must turn enforcement off, clear the
84 * quota 'CHKD' certificate to make it known that we have to
85 * do a quotacheck the next time this quota is turned on.
87 if (flags & XFS_UQUOTA_ACCT) {
88 dqtype |= XFS_QMOPT_UQUOTA;
89 flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
90 inactivate_flags |= XFS_UQUOTA_ACTIVE;
92 if (flags & XFS_GQUOTA_ACCT) {
93 dqtype |= XFS_QMOPT_GQUOTA;
94 flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
95 inactivate_flags |= XFS_GQUOTA_ACTIVE;
97 if (flags & XFS_PQUOTA_ACCT) {
98 dqtype |= XFS_QMOPT_PQUOTA;
99 flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD);
100 inactivate_flags |= XFS_PQUOTA_ACTIVE;
104 * Nothing to do? Don't complain. This happens when we're just
105 * turning off quota enforcement.
107 if ((mp->m_qflags & flags) == 0)
111 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
112 * and synchronously. If we fail to write, we should abort the
113 * operation as it cannot be recovered safely if we crash.
115 error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
120 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
121 * to take care of the race between dqget and quotaoff. We don't take
122 * any special locks to reset these bits. All processes need to check
123 * these bits *after* taking inode lock(s) to see if the particular
124 * quota type is in the process of being turned off. If *ACTIVE, it is
125 * guaranteed that all dquot structures and all quotainode ptrs will all
126 * stay valid as long as that inode is kept locked.
128 * There is no turning back after this.
130 mp->m_qflags &= ~inactivate_flags;
133 * Give back all the dquot reference(s) held by inodes.
134 * Here we go thru every single incore inode in this file system, and
135 * do a dqrele on the i_udquot/i_gdquot that it may have.
136 * Essentially, as long as somebody has an inode locked, this guarantees
137 * that quotas will not be turned off. This is handy because in a
138 * transaction once we lock the inode(s) and check for quotaon, we can
139 * depend on the quota inodes (and other things) being valid as long as
140 * we keep the lock(s).
142 xfs_qm_dqrele_all_inodes(mp, flags);
145 * Next we make the changes in the quota flag in the mount struct.
146 * This isn't protected by a particular lock directly, because we
147 * don't want to take a mrlock every time we depend on quotas being on.
149 mp->m_qflags &= ~flags;
152 * Go through all the dquots of this file system and purge them,
153 * according to what was turned off.
155 xfs_qm_dqpurge_all(mp, dqtype);
158 * Transactions that had started before ACTIVE state bit was cleared
159 * could have logged many dquots, so they'd have higher LSNs than
160 * the first QUOTAOFF log record does. If we happen to crash when
161 * the tail of the log has gone past the QUOTAOFF record, but
162 * before the last dquot modification, those dquots __will__
163 * recover, and that's not good.
165 * So, we have QUOTAOFF start and end logitems; the start
166 * logitem won't get overwritten until the end logitem appears...
168 error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
170 /* We're screwed now. Shutdown is the only option. */
171 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
176 * If all quotas are completely turned off, close shop.
178 if (mp->m_qflags == 0) {
179 mutex_unlock(&q->qi_quotaofflock);
180 xfs_qm_destroy_quotainfo(mp);
185 * Release our quotainode references if we don't need them anymore.
187 if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
188 xfs_irele(q->qi_uquotaip);
189 q->qi_uquotaip = NULL;
191 if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
192 xfs_irele(q->qi_gquotaip);
193 q->qi_gquotaip = NULL;
195 if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
196 xfs_irele(q->qi_pquotaip);
197 q->qi_pquotaip = NULL;
201 mutex_unlock(&q->qi_quotaofflock);
206 xfs_qm_scall_trunc_qfile(
207 struct xfs_mount *mp,
210 struct xfs_inode *ip;
211 struct xfs_trans *tp;
214 if (ino == NULLFSINO)
217 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
221 xfs_ilock(ip, XFS_IOLOCK_EXCL);
223 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
225 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
229 xfs_ilock(ip, XFS_ILOCK_EXCL);
230 xfs_trans_ijoin(tp, ip, 0);
233 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
235 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
237 xfs_trans_cancel(tp);
241 ASSERT(ip->i_d.di_nextents == 0);
243 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
244 error = xfs_trans_commit(tp);
247 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
254 xfs_qm_scall_trunc_qfiles(
260 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 ||
261 (flags & ~XFS_DQ_ALLTYPES)) {
262 xfs_debug(mp, "%s: flags=%x m_qflags=%x",
263 __func__, flags, mp->m_qflags);
267 if (flags & XFS_DQ_USER) {
268 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
272 if (flags & XFS_DQ_GROUP) {
273 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
277 if (flags & XFS_DQ_PROJ)
278 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
284 * Switch on (a given) quota enforcement for a filesystem. This takes
285 * effect immediately.
286 * (Switching on quota accounting must be done at mount time.)
289 xfs_qm_scall_quotaon(
296 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
298 * Switching on quota accounting must be done at mount time.
300 flags &= ~(XFS_ALL_QUOTA_ACCT);
303 xfs_debug(mp, "%s: zero flags, m_qflags=%x",
304 __func__, mp->m_qflags);
309 * Can't enforce without accounting. We check the superblock
310 * qflags here instead of m_qflags because rootfs can have
311 * quota acct on ondisk without m_qflags' knowing.
313 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
314 (flags & XFS_UQUOTA_ENFD)) ||
315 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
316 (flags & XFS_GQUOTA_ENFD)) ||
317 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
318 (flags & XFS_PQUOTA_ENFD))) {
320 "%s: Can't enforce without acct, flags=%x sbflags=%x",
321 __func__, flags, mp->m_sb.sb_qflags);
325 * If everything's up to-date incore, then don't waste time.
327 if ((mp->m_qflags & flags) == flags)
331 * Change sb_qflags on disk but not incore mp->qflags
332 * if this is the root filesystem.
334 spin_lock(&mp->m_sb_lock);
335 qf = mp->m_sb.sb_qflags;
336 mp->m_sb.sb_qflags = qf | flags;
337 spin_unlock(&mp->m_sb_lock);
340 * There's nothing to change if it's the same.
342 if ((qf & flags) == flags)
345 error = xfs_sync_sb(mp, false);
349 * If we aren't trying to switch on quota enforcement, we are done.
351 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
352 (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
353 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
354 (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
355 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
356 (mp->m_qflags & XFS_GQUOTA_ACCT)))
359 if (! XFS_IS_QUOTA_RUNNING(mp))
363 * Switch on quota enforcement in core.
365 mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
366 mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
367 mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
372 #define XFS_QC_MASK \
373 (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
376 * Adjust quota limits, and start/stop timers accordingly.
379 xfs_qm_scall_setqlim(
380 struct xfs_mount *mp,
383 struct qc_dqblk *newlim)
385 struct xfs_quotainfo *q = mp->m_quotainfo;
386 struct xfs_disk_dquot *ddq;
387 struct xfs_dquot *dqp;
388 struct xfs_trans *tp;
389 struct xfs_def_quota *defq;
391 xfs_qcnt_t hard, soft;
393 if (newlim->d_fieldmask & ~XFS_QC_MASK)
395 if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
399 * We don't want to race with a quotaoff so take the quotaoff lock.
400 * We don't hold an inode lock, so there's nothing else to stop
401 * a quotaoff from happening.
403 mutex_lock(&q->qi_quotaofflock);
406 * Get the dquot (locked) before we start, as we need to do a
407 * transaction to allocate it if it doesn't exist. Once we have the
408 * dquot, unlock it so we can start the next transaction safely. We hold
409 * a reference to the dquot, so it's safe to do this unlock/lock without
410 * it being reclaimed in the mean time.
412 error = xfs_qm_dqget(mp, id, type, true, &dqp);
414 ASSERT(error != -ENOENT);
418 defq = xfs_get_defquota(dqp, q);
421 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
426 xfs_trans_dqjoin(tp, dqp);
430 * Make sure that hardlimits are >= soft limits before changing.
432 hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
433 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
434 be64_to_cpu(ddq->d_blk_hardlimit);
435 soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
436 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
437 be64_to_cpu(ddq->d_blk_softlimit);
438 if (hard == 0 || hard >= soft) {
439 ddq->d_blk_hardlimit = cpu_to_be64(hard);
440 ddq->d_blk_softlimit = cpu_to_be64(soft);
441 xfs_dquot_set_prealloc_limits(dqp);
443 defq->bhardlimit = hard;
444 defq->bsoftlimit = soft;
447 xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
449 hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
450 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
451 be64_to_cpu(ddq->d_rtb_hardlimit);
452 soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
453 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
454 be64_to_cpu(ddq->d_rtb_softlimit);
455 if (hard == 0 || hard >= soft) {
456 ddq->d_rtb_hardlimit = cpu_to_be64(hard);
457 ddq->d_rtb_softlimit = cpu_to_be64(soft);
459 defq->rtbhardlimit = hard;
460 defq->rtbsoftlimit = soft;
463 xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
466 hard = (newlim->d_fieldmask & QC_INO_HARD) ?
467 (xfs_qcnt_t) newlim->d_ino_hardlimit :
468 be64_to_cpu(ddq->d_ino_hardlimit);
469 soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
470 (xfs_qcnt_t) newlim->d_ino_softlimit :
471 be64_to_cpu(ddq->d_ino_softlimit);
472 if (hard == 0 || hard >= soft) {
473 ddq->d_ino_hardlimit = cpu_to_be64(hard);
474 ddq->d_ino_softlimit = cpu_to_be64(soft);
476 defq->ihardlimit = hard;
477 defq->isoftlimit = soft;
480 xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
484 * Update warnings counter(s) if requested
486 if (newlim->d_fieldmask & QC_SPC_WARNS)
487 ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
488 if (newlim->d_fieldmask & QC_INO_WARNS)
489 ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
490 if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
491 ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
495 * Timelimits for the super user set the relative time
496 * the other users can be over quota for this file system.
497 * If it is zero a default is used. Ditto for the default
498 * soft and hard limit values (already done, above), and
501 if (newlim->d_fieldmask & QC_SPC_TIMER) {
502 q->qi_btimelimit = newlim->d_spc_timer;
503 ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
505 if (newlim->d_fieldmask & QC_INO_TIMER) {
506 q->qi_itimelimit = newlim->d_ino_timer;
507 ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
509 if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
510 q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
511 ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
513 if (newlim->d_fieldmask & QC_SPC_WARNS)
514 q->qi_bwarnlimit = newlim->d_spc_warns;
515 if (newlim->d_fieldmask & QC_INO_WARNS)
516 q->qi_iwarnlimit = newlim->d_ino_warns;
517 if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
518 q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
521 * If the user is now over quota, start the timelimit.
522 * The user will not be 'warned'.
523 * Note that we keep the timers ticking, whether enforcement
524 * is on or off. We don't really want to bother with iterating
525 * over all ondisk dquots and turning the timers on/off.
527 xfs_qm_adjust_dqtimers(mp, ddq);
529 dqp->dq_flags |= XFS_DQ_DIRTY;
530 xfs_trans_log_dquot(tp, dqp);
532 error = xfs_trans_commit(tp);
537 mutex_unlock(&q->qi_quotaofflock);
542 xfs_qm_log_quotaoff_end(
544 xfs_qoff_logitem_t *startqoff,
549 xfs_qoff_logitem_t *qoffi;
551 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
555 qoffi = xfs_trans_get_qoff_item(tp, startqoff,
556 flags & XFS_ALL_QUOTA_ACCT);
557 xfs_trans_log_quotaoff_item(tp, qoffi);
560 * We have to make sure that the transaction is secure on disk before we
561 * return and actually stop quota accounting. So, make it synchronous.
562 * We don't care about quotoff's performance.
564 xfs_trans_set_sync(tp);
565 return xfs_trans_commit(tp);
572 xfs_qoff_logitem_t **qoffstartp,
577 xfs_qoff_logitem_t *qoffi;
581 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
585 qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
586 xfs_trans_log_quotaoff_item(tp, qoffi);
588 spin_lock(&mp->m_sb_lock);
589 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
590 spin_unlock(&mp->m_sb_lock);
595 * We have to make sure that the transaction is secure on disk before we
596 * return and actually stop quota accounting. So, make it synchronous.
597 * We don't care about quotoff's performance.
599 xfs_trans_set_sync(tp);
600 error = xfs_trans_commit(tp);
609 /* Fill out the quota context. */
611 xfs_qm_scall_getquota_fill_qc(
612 struct xfs_mount *mp,
614 const struct xfs_dquot *dqp,
615 struct qc_dqblk *dst)
617 memset(dst, 0, sizeof(*dst));
618 dst->d_spc_hardlimit =
619 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
620 dst->d_spc_softlimit =
621 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
622 dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
623 dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
624 dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
625 dst->d_ino_count = dqp->q_res_icount;
626 dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
627 dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
628 dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
629 dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
630 dst->d_rt_spc_hardlimit =
631 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
632 dst->d_rt_spc_softlimit =
633 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
634 dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
635 dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
636 dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
639 * Internally, we don't reset all the timers when quota enforcement
640 * gets turned off. No need to confuse the user level code,
641 * so return zeroes in that case.
643 if ((!XFS_IS_UQUOTA_ENFORCED(mp) &&
644 dqp->q_core.d_flags == XFS_DQ_USER) ||
645 (!XFS_IS_GQUOTA_ENFORCED(mp) &&
646 dqp->q_core.d_flags == XFS_DQ_GROUP) ||
647 (!XFS_IS_PQUOTA_ENFORCED(mp) &&
648 dqp->q_core.d_flags == XFS_DQ_PROJ)) {
649 dst->d_spc_timer = 0;
650 dst->d_ino_timer = 0;
651 dst->d_rt_spc_timer = 0;
655 if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
656 (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
657 (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
658 dqp->q_core.d_id != 0) {
659 if ((dst->d_space > dst->d_spc_softlimit) &&
660 (dst->d_spc_softlimit > 0)) {
661 ASSERT(dst->d_spc_timer != 0);
663 if ((dst->d_ino_count > dst->d_ino_softlimit) &&
664 (dst->d_ino_softlimit > 0)) {
665 ASSERT(dst->d_ino_timer != 0);
671 /* Return the quota information for the dquot matching id. */
673 xfs_qm_scall_getquota(
674 struct xfs_mount *mp,
677 struct qc_dqblk *dst)
679 struct xfs_dquot *dqp;
683 * Try to get the dquot. We don't want it allocated on disk, so don't
684 * set doalloc. If it doesn't exist, we'll get ENOENT back.
686 error = xfs_qm_dqget(mp, id, type, false, &dqp);
691 * If everything's NULL, this dquot doesn't quite exist as far as
692 * our utility programs are concerned.
694 if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
699 xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
707 * Return the quota information for the first initialized dquot whose id
708 * is at least as high as id.
711 xfs_qm_scall_getquota_next(
712 struct xfs_mount *mp,
715 struct qc_dqblk *dst)
717 struct xfs_dquot *dqp;
720 error = xfs_qm_dqget_next(mp, *id, type, &dqp);
724 /* Fill in the ID we actually read from disk */
725 *id = be32_to_cpu(dqp->q_core.d_id);
727 xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
735 struct xfs_inode *ip,
739 /* skip quota inodes */
740 if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
741 ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
742 ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
743 ASSERT(ip->i_udquot == NULL);
744 ASSERT(ip->i_gdquot == NULL);
745 ASSERT(ip->i_pdquot == NULL);
749 xfs_ilock(ip, XFS_ILOCK_EXCL);
750 if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
751 xfs_qm_dqrele(ip->i_udquot);
754 if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
755 xfs_qm_dqrele(ip->i_gdquot);
758 if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
759 xfs_qm_dqrele(ip->i_pdquot);
762 xfs_iunlock(ip, XFS_ILOCK_EXCL);
768 * Go thru all the inodes in the file system, releasing their dquots.
770 * Note that the mount structure gets modified to indicate that quotas are off
771 * AFTER this, in the case of quotaoff.
774 xfs_qm_dqrele_all_inodes(
775 struct xfs_mount *mp,
778 ASSERT(mp->m_quotainfo);
779 xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
780 XFS_AGITER_INEW_WAIT);