Merge tag 'irqchip-fixes-5.4-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-rpi.git] / fs / xfs / xfs_trans_dquot.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2002 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_quota.h"
17 #include "xfs_qm.h"
18
19 STATIC void     xfs_trans_alloc_dqinfo(xfs_trans_t *);
20
21 /*
22  * Add the locked dquot to the transaction.
23  * The dquot must be locked, and it cannot be associated with any
24  * transaction.
25  */
26 void
27 xfs_trans_dqjoin(
28         xfs_trans_t     *tp,
29         xfs_dquot_t     *dqp)
30 {
31         ASSERT(XFS_DQ_IS_LOCKED(dqp));
32         ASSERT(dqp->q_logitem.qli_dquot == dqp);
33
34         /*
35          * Get a log_item_desc to point at the new item.
36          */
37         xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
38 }
39
40 /*
41  * This is called to mark the dquot as needing
42  * to be logged when the transaction is committed.  The dquot must
43  * already be associated with the given transaction.
44  * Note that it marks the entire transaction as dirty. In the ordinary
45  * case, this gets called via xfs_trans_commit, after the transaction
46  * is already dirty. However, there's nothing stop this from getting
47  * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
48  * flag.
49  */
50 void
51 xfs_trans_log_dquot(
52         xfs_trans_t     *tp,
53         xfs_dquot_t     *dqp)
54 {
55         ASSERT(XFS_DQ_IS_LOCKED(dqp));
56
57         tp->t_flags |= XFS_TRANS_DIRTY;
58         set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags);
59 }
60
61 /*
62  * Carry forward whatever is left of the quota blk reservation to
63  * the spanky new transaction
64  */
65 void
66 xfs_trans_dup_dqinfo(
67         struct xfs_trans        *otp,
68         struct xfs_trans        *ntp)
69 {
70         struct xfs_dqtrx        *oq, *nq;
71         int                     i, j;
72         struct xfs_dqtrx        *oqa, *nqa;
73         uint64_t                blk_res_used;
74
75         if (!otp->t_dqinfo)
76                 return;
77
78         xfs_trans_alloc_dqinfo(ntp);
79
80         /*
81          * Because the quota blk reservation is carried forward,
82          * it is also necessary to carry forward the DQ_DIRTY flag.
83          */
84         if (otp->t_flags & XFS_TRANS_DQ_DIRTY)
85                 ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
86
87         for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
88                 oqa = otp->t_dqinfo->dqs[j];
89                 nqa = ntp->t_dqinfo->dqs[j];
90                 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
91                         blk_res_used = 0;
92
93                         if (oqa[i].qt_dquot == NULL)
94                                 break;
95                         oq = &oqa[i];
96                         nq = &nqa[i];
97
98                         if (oq->qt_blk_res && oq->qt_bcount_delta > 0)
99                                 blk_res_used = oq->qt_bcount_delta;
100
101                         nq->qt_dquot = oq->qt_dquot;
102                         nq->qt_bcount_delta = nq->qt_icount_delta = 0;
103                         nq->qt_rtbcount_delta = 0;
104
105                         /*
106                          * Transfer whatever is left of the reservations.
107                          */
108                         nq->qt_blk_res = oq->qt_blk_res - blk_res_used;
109                         oq->qt_blk_res = blk_res_used;
110
111                         nq->qt_rtblk_res = oq->qt_rtblk_res -
112                                 oq->qt_rtblk_res_used;
113                         oq->qt_rtblk_res = oq->qt_rtblk_res_used;
114
115                         nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
116                         oq->qt_ino_res = oq->qt_ino_res_used;
117
118                 }
119         }
120 }
121
122 /*
123  * Wrap around mod_dquot to account for both user and group quotas.
124  */
125 void
126 xfs_trans_mod_dquot_byino(
127         xfs_trans_t     *tp,
128         xfs_inode_t     *ip,
129         uint            field,
130         int64_t         delta)
131 {
132         xfs_mount_t     *mp = tp->t_mountp;
133
134         if (!XFS_IS_QUOTA_RUNNING(mp) ||
135             !XFS_IS_QUOTA_ON(mp) ||
136             xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
137                 return;
138
139         if (tp->t_dqinfo == NULL)
140                 xfs_trans_alloc_dqinfo(tp);
141
142         if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
143                 (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
144         if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
145                 (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
146         if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
147                 (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
148 }
149
150 STATIC struct xfs_dqtrx *
151 xfs_trans_get_dqtrx(
152         struct xfs_trans        *tp,
153         struct xfs_dquot        *dqp)
154 {
155         int                     i;
156         struct xfs_dqtrx        *qa;
157
158         if (XFS_QM_ISUDQ(dqp))
159                 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
160         else if (XFS_QM_ISGDQ(dqp))
161                 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
162         else if (XFS_QM_ISPDQ(dqp))
163                 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
164         else
165                 return NULL;
166
167         for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
168                 if (qa[i].qt_dquot == NULL ||
169                     qa[i].qt_dquot == dqp)
170                         return &qa[i];
171         }
172
173         return NULL;
174 }
175
176 /*
177  * Make the changes in the transaction structure.
178  * The moral equivalent to xfs_trans_mod_sb().
179  * We don't touch any fields in the dquot, so we don't care
180  * if it's locked or not (most of the time it won't be).
181  */
182 void
183 xfs_trans_mod_dquot(
184         struct xfs_trans        *tp,
185         struct xfs_dquot        *dqp,
186         uint                    field,
187         int64_t                 delta)
188 {
189         struct xfs_dqtrx        *qtrx;
190
191         ASSERT(tp);
192         ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
193         qtrx = NULL;
194
195         if (tp->t_dqinfo == NULL)
196                 xfs_trans_alloc_dqinfo(tp);
197         /*
198          * Find either the first free slot or the slot that belongs
199          * to this dquot.
200          */
201         qtrx = xfs_trans_get_dqtrx(tp, dqp);
202         ASSERT(qtrx);
203         if (qtrx->qt_dquot == NULL)
204                 qtrx->qt_dquot = dqp;
205
206         switch (field) {
207
208                 /*
209                  * regular disk blk reservation
210                  */
211               case XFS_TRANS_DQ_RES_BLKS:
212                 qtrx->qt_blk_res += delta;
213                 break;
214
215                 /*
216                  * inode reservation
217                  */
218               case XFS_TRANS_DQ_RES_INOS:
219                 qtrx->qt_ino_res += delta;
220                 break;
221
222                 /*
223                  * disk blocks used.
224                  */
225               case XFS_TRANS_DQ_BCOUNT:
226                 qtrx->qt_bcount_delta += delta;
227                 break;
228
229               case XFS_TRANS_DQ_DELBCOUNT:
230                 qtrx->qt_delbcnt_delta += delta;
231                 break;
232
233                 /*
234                  * Inode Count
235                  */
236               case XFS_TRANS_DQ_ICOUNT:
237                 if (qtrx->qt_ino_res && delta > 0) {
238                         qtrx->qt_ino_res_used += delta;
239                         ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
240                 }
241                 qtrx->qt_icount_delta += delta;
242                 break;
243
244                 /*
245                  * rtblk reservation
246                  */
247               case XFS_TRANS_DQ_RES_RTBLKS:
248                 qtrx->qt_rtblk_res += delta;
249                 break;
250
251                 /*
252                  * rtblk count
253                  */
254               case XFS_TRANS_DQ_RTBCOUNT:
255                 if (qtrx->qt_rtblk_res && delta > 0) {
256                         qtrx->qt_rtblk_res_used += delta;
257                         ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
258                 }
259                 qtrx->qt_rtbcount_delta += delta;
260                 break;
261
262               case XFS_TRANS_DQ_DELRTBCOUNT:
263                 qtrx->qt_delrtb_delta += delta;
264                 break;
265
266               default:
267                 ASSERT(0);
268         }
269         tp->t_flags |= XFS_TRANS_DQ_DIRTY;
270 }
271
272
273 /*
274  * Given an array of dqtrx structures, lock all the dquots associated and join
275  * them to the transaction, provided they have been modified.  We know that the
276  * highest number of dquots of one type - usr, grp and prj - involved in a
277  * transaction is 3 so we don't need to make this very generic.
278  */
279 STATIC void
280 xfs_trans_dqlockedjoin(
281         struct xfs_trans        *tp,
282         struct xfs_dqtrx        *q)
283 {
284         ASSERT(q[0].qt_dquot != NULL);
285         if (q[1].qt_dquot == NULL) {
286                 xfs_dqlock(q[0].qt_dquot);
287                 xfs_trans_dqjoin(tp, q[0].qt_dquot);
288         } else {
289                 ASSERT(XFS_QM_TRANS_MAXDQS == 2);
290                 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
291                 xfs_trans_dqjoin(tp, q[0].qt_dquot);
292                 xfs_trans_dqjoin(tp, q[1].qt_dquot);
293         }
294 }
295
296
297 /*
298  * Called by xfs_trans_commit() and similar in spirit to
299  * xfs_trans_apply_sb_deltas().
300  * Go thru all the dquots belonging to this transaction and modify the
301  * INCORE dquot to reflect the actual usages.
302  * Unreserve just the reservations done by this transaction.
303  * dquot is still left locked at exit.
304  */
305 void
306 xfs_trans_apply_dquot_deltas(
307         struct xfs_trans        *tp)
308 {
309         int                     i, j;
310         struct xfs_dquot        *dqp;
311         struct xfs_dqtrx        *qtrx, *qa;
312         struct xfs_disk_dquot   *d;
313         int64_t                 totalbdelta;
314         int64_t                 totalrtbdelta;
315
316         if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
317                 return;
318
319         ASSERT(tp->t_dqinfo);
320         for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
321                 qa = tp->t_dqinfo->dqs[j];
322                 if (qa[0].qt_dquot == NULL)
323                         continue;
324
325                 /*
326                  * Lock all of the dquots and join them to the transaction.
327                  */
328                 xfs_trans_dqlockedjoin(tp, qa);
329
330                 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
331                         qtrx = &qa[i];
332                         /*
333                          * The array of dquots is filled
334                          * sequentially, not sparsely.
335                          */
336                         if ((dqp = qtrx->qt_dquot) == NULL)
337                                 break;
338
339                         ASSERT(XFS_DQ_IS_LOCKED(dqp));
340
341                         /*
342                          * adjust the actual number of blocks used
343                          */
344                         d = &dqp->q_core;
345
346                         /*
347                          * The issue here is - sometimes we don't make a blkquota
348                          * reservation intentionally to be fair to users
349                          * (when the amount is small). On the other hand,
350                          * delayed allocs do make reservations, but that's
351                          * outside of a transaction, so we have no
352                          * idea how much was really reserved.
353                          * So, here we've accumulated delayed allocation blks and
354                          * non-delay blks. The assumption is that the
355                          * delayed ones are always reserved (outside of a
356                          * transaction), and the others may or may not have
357                          * quota reservations.
358                          */
359                         totalbdelta = qtrx->qt_bcount_delta +
360                                 qtrx->qt_delbcnt_delta;
361                         totalrtbdelta = qtrx->qt_rtbcount_delta +
362                                 qtrx->qt_delrtb_delta;
363 #ifdef DEBUG
364                         if (totalbdelta < 0)
365                                 ASSERT(be64_to_cpu(d->d_bcount) >=
366                                        -totalbdelta);
367
368                         if (totalrtbdelta < 0)
369                                 ASSERT(be64_to_cpu(d->d_rtbcount) >=
370                                        -totalrtbdelta);
371
372                         if (qtrx->qt_icount_delta < 0)
373                                 ASSERT(be64_to_cpu(d->d_icount) >=
374                                        -qtrx->qt_icount_delta);
375 #endif
376                         if (totalbdelta)
377                                 be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
378
379                         if (qtrx->qt_icount_delta)
380                                 be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);
381
382                         if (totalrtbdelta)
383                                 be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);
384
385                         /*
386                          * Get any default limits in use.
387                          * Start/reset the timer(s) if needed.
388                          */
389                         if (d->d_id) {
390                                 xfs_qm_adjust_dqlimits(tp->t_mountp, dqp);
391                                 xfs_qm_adjust_dqtimers(tp->t_mountp, d);
392                         }
393
394                         dqp->dq_flags |= XFS_DQ_DIRTY;
395                         /*
396                          * add this to the list of items to get logged
397                          */
398                         xfs_trans_log_dquot(tp, dqp);
399                         /*
400                          * Take off what's left of the original reservation.
401                          * In case of delayed allocations, there's no
402                          * reservation that a transaction structure knows of.
403                          */
404                         if (qtrx->qt_blk_res != 0) {
405                                 uint64_t        blk_res_used = 0;
406
407                                 if (qtrx->qt_bcount_delta > 0)
408                                         blk_res_used = qtrx->qt_bcount_delta;
409
410                                 if (qtrx->qt_blk_res != blk_res_used) {
411                                         if (qtrx->qt_blk_res > blk_res_used)
412                                                 dqp->q_res_bcount -= (xfs_qcnt_t)
413                                                         (qtrx->qt_blk_res -
414                                                          blk_res_used);
415                                         else
416                                                 dqp->q_res_bcount -= (xfs_qcnt_t)
417                                                         (blk_res_used -
418                                                          qtrx->qt_blk_res);
419                                 }
420                         } else {
421                                 /*
422                                  * These blks were never reserved, either inside
423                                  * a transaction or outside one (in a delayed
424                                  * allocation). Also, this isn't always a
425                                  * negative number since we sometimes
426                                  * deliberately skip quota reservations.
427                                  */
428                                 if (qtrx->qt_bcount_delta) {
429                                         dqp->q_res_bcount +=
430                                               (xfs_qcnt_t)qtrx->qt_bcount_delta;
431                                 }
432                         }
433                         /*
434                          * Adjust the RT reservation.
435                          */
436                         if (qtrx->qt_rtblk_res != 0) {
437                                 if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) {
438                                         if (qtrx->qt_rtblk_res >
439                                             qtrx->qt_rtblk_res_used)
440                                                dqp->q_res_rtbcount -= (xfs_qcnt_t)
441                                                        (qtrx->qt_rtblk_res -
442                                                         qtrx->qt_rtblk_res_used);
443                                         else
444                                                dqp->q_res_rtbcount -= (xfs_qcnt_t)
445                                                        (qtrx->qt_rtblk_res_used -
446                                                         qtrx->qt_rtblk_res);
447                                 }
448                         } else {
449                                 if (qtrx->qt_rtbcount_delta)
450                                         dqp->q_res_rtbcount +=
451                                             (xfs_qcnt_t)qtrx->qt_rtbcount_delta;
452                         }
453
454                         /*
455                          * Adjust the inode reservation.
456                          */
457                         if (qtrx->qt_ino_res != 0) {
458                                 ASSERT(qtrx->qt_ino_res >=
459                                        qtrx->qt_ino_res_used);
460                                 if (qtrx->qt_ino_res > qtrx->qt_ino_res_used)
461                                         dqp->q_res_icount -= (xfs_qcnt_t)
462                                                 (qtrx->qt_ino_res -
463                                                  qtrx->qt_ino_res_used);
464                         } else {
465                                 if (qtrx->qt_icount_delta)
466                                         dqp->q_res_icount +=
467                                             (xfs_qcnt_t)qtrx->qt_icount_delta;
468                         }
469
470                         ASSERT(dqp->q_res_bcount >=
471                                 be64_to_cpu(dqp->q_core.d_bcount));
472                         ASSERT(dqp->q_res_icount >=
473                                 be64_to_cpu(dqp->q_core.d_icount));
474                         ASSERT(dqp->q_res_rtbcount >=
475                                 be64_to_cpu(dqp->q_core.d_rtbcount));
476                 }
477         }
478 }
479
480 /*
481  * Release the reservations, and adjust the dquots accordingly.
482  * This is called only when the transaction is being aborted. If by
483  * any chance we have done dquot modifications incore (ie. deltas) already,
484  * we simply throw those away, since that's the expected behavior
485  * when a transaction is curtailed without a commit.
486  */
487 void
488 xfs_trans_unreserve_and_mod_dquots(
489         xfs_trans_t             *tp)
490 {
491         int                     i, j;
492         xfs_dquot_t             *dqp;
493         struct xfs_dqtrx        *qtrx, *qa;
494         bool                    locked;
495
496         if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
497                 return;
498
499         for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
500                 qa = tp->t_dqinfo->dqs[j];
501
502                 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
503                         qtrx = &qa[i];
504                         /*
505                          * We assume that the array of dquots is filled
506                          * sequentially, not sparsely.
507                          */
508                         if ((dqp = qtrx->qt_dquot) == NULL)
509                                 break;
510                         /*
511                          * Unreserve the original reservation. We don't care
512                          * about the number of blocks used field, or deltas.
513                          * Also we don't bother to zero the fields.
514                          */
515                         locked = false;
516                         if (qtrx->qt_blk_res) {
517                                 xfs_dqlock(dqp);
518                                 locked = true;
519                                 dqp->q_res_bcount -=
520                                         (xfs_qcnt_t)qtrx->qt_blk_res;
521                         }
522                         if (qtrx->qt_ino_res) {
523                                 if (!locked) {
524                                         xfs_dqlock(dqp);
525                                         locked = true;
526                                 }
527                                 dqp->q_res_icount -=
528                                         (xfs_qcnt_t)qtrx->qt_ino_res;
529                         }
530
531                         if (qtrx->qt_rtblk_res) {
532                                 if (!locked) {
533                                         xfs_dqlock(dqp);
534                                         locked = true;
535                                 }
536                                 dqp->q_res_rtbcount -=
537                                         (xfs_qcnt_t)qtrx->qt_rtblk_res;
538                         }
539                         if (locked)
540                                 xfs_dqunlock(dqp);
541
542                 }
543         }
544 }
545
546 STATIC void
547 xfs_quota_warn(
548         struct xfs_mount        *mp,
549         struct xfs_dquot        *dqp,
550         int                     type)
551 {
552         enum quota_type qtype;
553
554         if (dqp->dq_flags & XFS_DQ_PROJ)
555                 qtype = PRJQUOTA;
556         else if (dqp->dq_flags & XFS_DQ_USER)
557                 qtype = USRQUOTA;
558         else
559                 qtype = GRPQUOTA;
560
561         quota_send_warning(make_kqid(&init_user_ns, qtype,
562                                      be32_to_cpu(dqp->q_core.d_id)),
563                            mp->m_super->s_dev, type);
564 }
565
566 /*
567  * This reserves disk blocks and inodes against a dquot.
568  * Flags indicate if the dquot is to be locked here and also
569  * if the blk reservation is for RT or regular blocks.
570  * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
571  */
572 STATIC int
573 xfs_trans_dqresv(
574         xfs_trans_t     *tp,
575         xfs_mount_t     *mp,
576         xfs_dquot_t     *dqp,
577         int64_t         nblks,
578         long            ninos,
579         uint            flags)
580 {
581         xfs_qcnt_t      hardlimit;
582         xfs_qcnt_t      softlimit;
583         time_t          timer;
584         xfs_qwarncnt_t  warns;
585         xfs_qwarncnt_t  warnlimit;
586         xfs_qcnt_t      total_count;
587         xfs_qcnt_t      *resbcountp;
588         xfs_quotainfo_t *q = mp->m_quotainfo;
589         struct xfs_def_quota    *defq;
590
591
592         xfs_dqlock(dqp);
593
594         defq = xfs_get_defquota(dqp, q);
595
596         if (flags & XFS_TRANS_DQ_RES_BLKS) {
597                 hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
598                 if (!hardlimit)
599                         hardlimit = defq->bhardlimit;
600                 softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
601                 if (!softlimit)
602                         softlimit = defq->bsoftlimit;
603                 timer = be32_to_cpu(dqp->q_core.d_btimer);
604                 warns = be16_to_cpu(dqp->q_core.d_bwarns);
605                 warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
606                 resbcountp = &dqp->q_res_bcount;
607         } else {
608                 ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
609                 hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
610                 if (!hardlimit)
611                         hardlimit = defq->rtbhardlimit;
612                 softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
613                 if (!softlimit)
614                         softlimit = defq->rtbsoftlimit;
615                 timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
616                 warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
617                 warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
618                 resbcountp = &dqp->q_res_rtbcount;
619         }
620
621         if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
622             dqp->q_core.d_id &&
623             ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
624              (XFS_IS_GQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISGDQ(dqp)) ||
625              (XFS_IS_PQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISPDQ(dqp)))) {
626                 if (nblks > 0) {
627                         /*
628                          * dquot is locked already. See if we'd go over the
629                          * hardlimit or exceed the timelimit if we allocate
630                          * nblks.
631                          */
632                         total_count = *resbcountp + nblks;
633                         if (hardlimit && total_count > hardlimit) {
634                                 xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN);
635                                 goto error_return;
636                         }
637                         if (softlimit && total_count > softlimit) {
638                                 if ((timer != 0 && get_seconds() > timer) ||
639                                     (warns != 0 && warns >= warnlimit)) {
640                                         xfs_quota_warn(mp, dqp,
641                                                        QUOTA_NL_BSOFTLONGWARN);
642                                         goto error_return;
643                                 }
644
645                                 xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN);
646                         }
647                 }
648                 if (ninos > 0) {
649                         total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
650                         timer = be32_to_cpu(dqp->q_core.d_itimer);
651                         warns = be16_to_cpu(dqp->q_core.d_iwarns);
652                         warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
653                         hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
654                         if (!hardlimit)
655                                 hardlimit = defq->ihardlimit;
656                         softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
657                         if (!softlimit)
658                                 softlimit = defq->isoftlimit;
659
660                         if (hardlimit && total_count > hardlimit) {
661                                 xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
662                                 goto error_return;
663                         }
664                         if (softlimit && total_count > softlimit) {
665                                 if  ((timer != 0 && get_seconds() > timer) ||
666                                      (warns != 0 && warns >= warnlimit)) {
667                                         xfs_quota_warn(mp, dqp,
668                                                        QUOTA_NL_ISOFTLONGWARN);
669                                         goto error_return;
670                                 }
671                                 xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN);
672                         }
673                 }
674         }
675
676         /*
677          * Change the reservation, but not the actual usage.
678          * Note that q_res_bcount = q_core.d_bcount + resv
679          */
680         (*resbcountp) += (xfs_qcnt_t)nblks;
681         if (ninos != 0)
682                 dqp->q_res_icount += (xfs_qcnt_t)ninos;
683
684         /*
685          * note the reservation amt in the trans struct too,
686          * so that the transaction knows how much was reserved by
687          * it against this particular dquot.
688          * We don't do this when we are reserving for a delayed allocation,
689          * because we don't have the luxury of a transaction envelope then.
690          */
691         if (tp) {
692                 ASSERT(tp->t_dqinfo);
693                 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
694                 if (nblks != 0)
695                         xfs_trans_mod_dquot(tp, dqp,
696                                             flags & XFS_QMOPT_RESBLK_MASK,
697                                             nblks);
698                 if (ninos != 0)
699                         xfs_trans_mod_dquot(tp, dqp,
700                                             XFS_TRANS_DQ_RES_INOS,
701                                             ninos);
702         }
703         ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount));
704         ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));
705         ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));
706
707         xfs_dqunlock(dqp);
708         return 0;
709
710 error_return:
711         xfs_dqunlock(dqp);
712         if (flags & XFS_QMOPT_ENOSPC)
713                 return -ENOSPC;
714         return -EDQUOT;
715 }
716
717
718 /*
719  * Given dquot(s), make disk block and/or inode reservations against them.
720  * The fact that this does the reservation against user, group and
721  * project quotas is important, because this follows a all-or-nothing
722  * approach.
723  *
724  * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
725  *         XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT.  Used by pquota.
726  *         XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
727  *         XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
728  * dquots are unlocked on return, if they were not locked by caller.
729  */
730 int
731 xfs_trans_reserve_quota_bydquots(
732         struct xfs_trans        *tp,
733         struct xfs_mount        *mp,
734         struct xfs_dquot        *udqp,
735         struct xfs_dquot        *gdqp,
736         struct xfs_dquot        *pdqp,
737         int64_t                 nblks,
738         long                    ninos,
739         uint                    flags)
740 {
741         int             error;
742
743         if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
744                 return 0;
745
746         if (tp && tp->t_dqinfo == NULL)
747                 xfs_trans_alloc_dqinfo(tp);
748
749         ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
750
751         if (udqp) {
752                 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos,
753                                         (flags & ~XFS_QMOPT_ENOSPC));
754                 if (error)
755                         return error;
756         }
757
758         if (gdqp) {
759                 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
760                 if (error)
761                         goto unwind_usr;
762         }
763
764         if (pdqp) {
765                 error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
766                 if (error)
767                         goto unwind_grp;
768         }
769
770         /*
771          * Didn't change anything critical, so, no need to log
772          */
773         return 0;
774
775 unwind_grp:
776         flags |= XFS_QMOPT_FORCE_RES;
777         if (gdqp)
778                 xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
779 unwind_usr:
780         flags |= XFS_QMOPT_FORCE_RES;
781         if (udqp)
782                 xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
783         return error;
784 }
785
786
787 /*
788  * Lock the dquot and change the reservation if we can.
789  * This doesn't change the actual usage, just the reservation.
790  * The inode sent in is locked.
791  */
792 int
793 xfs_trans_reserve_quota_nblks(
794         struct xfs_trans        *tp,
795         struct xfs_inode        *ip,
796         int64_t                 nblks,
797         long                    ninos,
798         uint                    flags)
799 {
800         struct xfs_mount        *mp = ip->i_mount;
801
802         if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
803                 return 0;
804         if (XFS_IS_PQUOTA_ON(mp))
805                 flags |= XFS_QMOPT_ENOSPC;
806
807         ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
808
809         ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
810         ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
811                                 XFS_TRANS_DQ_RES_RTBLKS ||
812                (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
813                                 XFS_TRANS_DQ_RES_BLKS);
814
815         /*
816          * Reserve nblks against these dquots, with trans as the mediator.
817          */
818         return xfs_trans_reserve_quota_bydquots(tp, mp,
819                                                 ip->i_udquot, ip->i_gdquot,
820                                                 ip->i_pdquot,
821                                                 nblks, ninos, flags);
822 }
823
824 /*
825  * This routine is called to allocate a quotaoff log item.
826  */
827 xfs_qoff_logitem_t *
828 xfs_trans_get_qoff_item(
829         xfs_trans_t             *tp,
830         xfs_qoff_logitem_t      *startqoff,
831         uint                    flags)
832 {
833         xfs_qoff_logitem_t      *q;
834
835         ASSERT(tp != NULL);
836
837         q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
838         ASSERT(q != NULL);
839
840         /*
841          * Get a log_item_desc to point at the new item.
842          */
843         xfs_trans_add_item(tp, &q->qql_item);
844         return q;
845 }
846
847
848 /*
849  * This is called to mark the quotaoff logitem as needing
850  * to be logged when the transaction is committed.  The logitem must
851  * already be associated with the given transaction.
852  */
853 void
854 xfs_trans_log_quotaoff_item(
855         xfs_trans_t             *tp,
856         xfs_qoff_logitem_t      *qlp)
857 {
858         tp->t_flags |= XFS_TRANS_DIRTY;
859         set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags);
860 }
861
862 STATIC void
863 xfs_trans_alloc_dqinfo(
864         xfs_trans_t     *tp)
865 {
866         tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, 0);
867 }
868
869 void
870 xfs_trans_free_dqinfo(
871         xfs_trans_t     *tp)
872 {
873         if (!tp->t_dqinfo)
874                 return;
875         kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
876         tp->t_dqinfo = NULL;
877 }