blob: 1e3bc707c14fff4deb7a05a38582af21110f691d [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2002 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
16#include "xfs_quota.h"
17#include "xfs_qm.h"
18#include "xfs_error.h"
19
20STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
21
22/*
23 * Add the locked dquot to the transaction.
24 * The dquot must be locked, and it cannot be associated with any
25 * transaction.
26 */
27void
28xfs_trans_dqjoin(
29 struct xfs_trans *tp,
30 struct xfs_dquot *dqp)
31{
32 ASSERT(XFS_DQ_IS_LOCKED(dqp));
33 ASSERT(dqp->q_logitem.qli_dquot == dqp);
34
35 /*
36 * Get a log_item_desc to point at the new item.
37 */
38 xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
39}
40
41/*
42 * This is called to mark the dquot as needing
43 * to be logged when the transaction is committed. The dquot must
44 * already be associated with the given transaction.
45 * Note that it marks the entire transaction as dirty. In the ordinary
46 * case, this gets called via xfs_trans_commit, after the transaction
47 * is already dirty. However, there's nothing stop this from getting
48 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
49 * flag.
50 */
51void
52xfs_trans_log_dquot(
53 struct xfs_trans *tp,
54 struct xfs_dquot *dqp)
55{
56 ASSERT(XFS_DQ_IS_LOCKED(dqp));
57
58 tp->t_flags |= XFS_TRANS_DIRTY;
59 set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags);
60}
61
62/*
63 * Carry forward whatever is left of the quota blk reservation to
64 * the spanky new transaction
65 */
66void
67xfs_trans_dup_dqinfo(
68 struct xfs_trans *otp,
69 struct xfs_trans *ntp)
70{
71 struct xfs_dqtrx *oq, *nq;
72 int i, j;
73 struct xfs_dqtrx *oqa, *nqa;
74 uint64_t blk_res_used;
75
76 if (!otp->t_dqinfo)
77 return;
78
79 xfs_trans_alloc_dqinfo(ntp);
80
81 /*
82 * Because the quota blk reservation is carried forward,
83 * it is also necessary to carry forward the DQ_DIRTY flag.
84 */
85 if (otp->t_flags & XFS_TRANS_DQ_DIRTY)
86 ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
87
88 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
89 oqa = otp->t_dqinfo->dqs[j];
90 nqa = ntp->t_dqinfo->dqs[j];
91 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
92 blk_res_used = 0;
93
94 if (oqa[i].qt_dquot == NULL)
95 break;
96 oq = &oqa[i];
97 nq = &nqa[i];
98
99 if (oq->qt_blk_res && oq->qt_bcount_delta > 0)
100 blk_res_used = oq->qt_bcount_delta;
101
102 nq->qt_dquot = oq->qt_dquot;
103 nq->qt_bcount_delta = nq->qt_icount_delta = 0;
104 nq->qt_rtbcount_delta = 0;
105
106 /*
107 * Transfer whatever is left of the reservations.
108 */
109 nq->qt_blk_res = oq->qt_blk_res - blk_res_used;
110 oq->qt_blk_res = blk_res_used;
111
112 nq->qt_rtblk_res = oq->qt_rtblk_res -
113 oq->qt_rtblk_res_used;
114 oq->qt_rtblk_res = oq->qt_rtblk_res_used;
115
116 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
117 oq->qt_ino_res = oq->qt_ino_res_used;
118
119 }
120 }
121}
122
123/*
124 * Wrap around mod_dquot to account for both user and group quotas.
125 */
126void
127xfs_trans_mod_dquot_byino(
128 xfs_trans_t *tp,
129 xfs_inode_t *ip,
130 uint field,
131 int64_t delta)
132{
133 xfs_mount_t *mp = tp->t_mountp;
134
135 if (!XFS_IS_QUOTA_RUNNING(mp) ||
136 !XFS_IS_QUOTA_ON(mp) ||
137 xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
138 return;
139
140 if (tp->t_dqinfo == NULL)
141 xfs_trans_alloc_dqinfo(tp);
142
143 if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
144 (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
145 if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
146 (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
147 if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
148 (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
149}
150
151STATIC struct xfs_dqtrx *
152xfs_trans_get_dqtrx(
153 struct xfs_trans *tp,
154 struct xfs_dquot *dqp)
155{
156 int i;
157 struct xfs_dqtrx *qa;
158
159 if (XFS_QM_ISUDQ(dqp))
160 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
161 else if (XFS_QM_ISGDQ(dqp))
162 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
163 else if (XFS_QM_ISPDQ(dqp))
164 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
165 else
166 return NULL;
167
168 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
169 if (qa[i].qt_dquot == NULL ||
170 qa[i].qt_dquot == dqp)
171 return &qa[i];
172 }
173
174 return NULL;
175}
176
177/*
178 * Make the changes in the transaction structure.
179 * The moral equivalent to xfs_trans_mod_sb().
180 * We don't touch any fields in the dquot, so we don't care
181 * if it's locked or not (most of the time it won't be).
182 */
183void
184xfs_trans_mod_dquot(
185 struct xfs_trans *tp,
186 struct xfs_dquot *dqp,
187 uint field,
188 int64_t delta)
189{
190 struct xfs_dqtrx *qtrx;
191
192 ASSERT(tp);
193 ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
194 qtrx = NULL;
195
196 if (tp->t_dqinfo == NULL)
197 xfs_trans_alloc_dqinfo(tp);
198 /*
199 * Find either the first free slot or the slot that belongs
200 * to this dquot.
201 */
202 qtrx = xfs_trans_get_dqtrx(tp, dqp);
203 ASSERT(qtrx);
204 if (qtrx->qt_dquot == NULL)
205 qtrx->qt_dquot = dqp;
206
207 switch (field) {
208
209 /*
210 * regular disk blk reservation
211 */
212 case XFS_TRANS_DQ_RES_BLKS:
213 qtrx->qt_blk_res += delta;
214 break;
215
216 /*
217 * inode reservation
218 */
219 case XFS_TRANS_DQ_RES_INOS:
220 qtrx->qt_ino_res += delta;
221 break;
222
223 /*
224 * disk blocks used.
225 */
226 case XFS_TRANS_DQ_BCOUNT:
227 qtrx->qt_bcount_delta += delta;
228 break;
229
230 case XFS_TRANS_DQ_DELBCOUNT:
231 qtrx->qt_delbcnt_delta += delta;
232 break;
233
234 /*
235 * Inode Count
236 */
237 case XFS_TRANS_DQ_ICOUNT:
238 if (qtrx->qt_ino_res && delta > 0) {
239 qtrx->qt_ino_res_used += delta;
240 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
241 }
242 qtrx->qt_icount_delta += delta;
243 break;
244
245 /*
246 * rtblk reservation
247 */
248 case XFS_TRANS_DQ_RES_RTBLKS:
249 qtrx->qt_rtblk_res += delta;
250 break;
251
252 /*
253 * rtblk count
254 */
255 case XFS_TRANS_DQ_RTBCOUNT:
256 if (qtrx->qt_rtblk_res && delta > 0) {
257 qtrx->qt_rtblk_res_used += delta;
258 ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
259 }
260 qtrx->qt_rtbcount_delta += delta;
261 break;
262
263 case XFS_TRANS_DQ_DELRTBCOUNT:
264 qtrx->qt_delrtb_delta += delta;
265 break;
266
267 default:
268 ASSERT(0);
269 }
270 tp->t_flags |= XFS_TRANS_DQ_DIRTY;
271}
272
273
274/*
275 * Given an array of dqtrx structures, lock all the dquots associated and join
276 * them to the transaction, provided they have been modified. We know that the
277 * highest number of dquots of one type - usr, grp and prj - involved in a
278 * transaction is 3 so we don't need to make this very generic.
279 */
280STATIC void
281xfs_trans_dqlockedjoin(
282 struct xfs_trans *tp,
283 struct xfs_dqtrx *q)
284{
285 ASSERT(q[0].qt_dquot != NULL);
286 if (q[1].qt_dquot == NULL) {
287 xfs_dqlock(q[0].qt_dquot);
288 xfs_trans_dqjoin(tp, q[0].qt_dquot);
289 } else {
290 ASSERT(XFS_QM_TRANS_MAXDQS == 2);
291 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
292 xfs_trans_dqjoin(tp, q[0].qt_dquot);
293 xfs_trans_dqjoin(tp, q[1].qt_dquot);
294 }
295}
296
297
298/*
299 * Called by xfs_trans_commit() and similar in spirit to
300 * xfs_trans_apply_sb_deltas().
301 * Go thru all the dquots belonging to this transaction and modify the
302 * INCORE dquot to reflect the actual usages.
303 * Unreserve just the reservations done by this transaction.
304 * dquot is still left locked at exit.
305 */
306void
307xfs_trans_apply_dquot_deltas(
308 struct xfs_trans *tp)
309{
310 int i, j;
311 struct xfs_dquot *dqp;
312 struct xfs_dqtrx *qtrx, *qa;
313 struct xfs_disk_dquot *d;
314 int64_t totalbdelta;
315 int64_t totalrtbdelta;
316
317 if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
318 return;
319
320 ASSERT(tp->t_dqinfo);
321 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
322 qa = tp->t_dqinfo->dqs[j];
323 if (qa[0].qt_dquot == NULL)
324 continue;
325
326 /*
327 * Lock all of the dquots and join them to the transaction.
328 */
329 xfs_trans_dqlockedjoin(tp, qa);
330
331 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
332 qtrx = &qa[i];
333 /*
334 * The array of dquots is filled
335 * sequentially, not sparsely.
336 */
337 if ((dqp = qtrx->qt_dquot) == NULL)
338 break;
339
340 ASSERT(XFS_DQ_IS_LOCKED(dqp));
341
342 /*
343 * adjust the actual number of blocks used
344 */
345 d = &dqp->q_core;
346
347 /*
348 * The issue here is - sometimes we don't make a blkquota
349 * reservation intentionally to be fair to users
350 * (when the amount is small). On the other hand,
351 * delayed allocs do make reservations, but that's
352 * outside of a transaction, so we have no
353 * idea how much was really reserved.
354 * So, here we've accumulated delayed allocation blks and
355 * non-delay blks. The assumption is that the
356 * delayed ones are always reserved (outside of a
357 * transaction), and the others may or may not have
358 * quota reservations.
359 */
360 totalbdelta = qtrx->qt_bcount_delta +
361 qtrx->qt_delbcnt_delta;
362 totalrtbdelta = qtrx->qt_rtbcount_delta +
363 qtrx->qt_delrtb_delta;
364#ifdef DEBUG
365 if (totalbdelta < 0)
366 ASSERT(be64_to_cpu(d->d_bcount) >=
367 -totalbdelta);
368
369 if (totalrtbdelta < 0)
370 ASSERT(be64_to_cpu(d->d_rtbcount) >=
371 -totalrtbdelta);
372
373 if (qtrx->qt_icount_delta < 0)
374 ASSERT(be64_to_cpu(d->d_icount) >=
375 -qtrx->qt_icount_delta);
376#endif
377 if (totalbdelta)
378 be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
379
380 if (qtrx->qt_icount_delta)
381 be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);
382
383 if (totalrtbdelta)
384 be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);
385
386 /*
387 * Get any default limits in use.
388 * Start/reset the timer(s) if needed.
389 */
390 if (d->d_id) {
391 xfs_qm_adjust_dqlimits(tp->t_mountp, dqp);
392 xfs_qm_adjust_dqtimers(tp->t_mountp, d);
393 }
394
395 dqp->dq_flags |= XFS_DQ_DIRTY;
396 /*
397 * add this to the list of items to get logged
398 */
399 xfs_trans_log_dquot(tp, dqp);
400 /*
401 * Take off what's left of the original reservation.
402 * In case of delayed allocations, there's no
403 * reservation that a transaction structure knows of.
404 */
405 if (qtrx->qt_blk_res != 0) {
406 uint64_t blk_res_used = 0;
407
408 if (qtrx->qt_bcount_delta > 0)
409 blk_res_used = qtrx->qt_bcount_delta;
410
411 if (qtrx->qt_blk_res != blk_res_used) {
412 if (qtrx->qt_blk_res > blk_res_used)
413 dqp->q_res_bcount -= (xfs_qcnt_t)
414 (qtrx->qt_blk_res -
415 blk_res_used);
416 else
417 dqp->q_res_bcount -= (xfs_qcnt_t)
418 (blk_res_used -
419 qtrx->qt_blk_res);
420 }
421 } else {
422 /*
423 * These blks were never reserved, either inside
424 * a transaction or outside one (in a delayed
425 * allocation). Also, this isn't always a
426 * negative number since we sometimes
427 * deliberately skip quota reservations.
428 */
429 if (qtrx->qt_bcount_delta) {
430 dqp->q_res_bcount +=
431 (xfs_qcnt_t)qtrx->qt_bcount_delta;
432 }
433 }
434 /*
435 * Adjust the RT reservation.
436 */
437 if (qtrx->qt_rtblk_res != 0) {
438 if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) {
439 if (qtrx->qt_rtblk_res >
440 qtrx->qt_rtblk_res_used)
441 dqp->q_res_rtbcount -= (xfs_qcnt_t)
442 (qtrx->qt_rtblk_res -
443 qtrx->qt_rtblk_res_used);
444 else
445 dqp->q_res_rtbcount -= (xfs_qcnt_t)
446 (qtrx->qt_rtblk_res_used -
447 qtrx->qt_rtblk_res);
448 }
449 } else {
450 if (qtrx->qt_rtbcount_delta)
451 dqp->q_res_rtbcount +=
452 (xfs_qcnt_t)qtrx->qt_rtbcount_delta;
453 }
454
455 /*
456 * Adjust the inode reservation.
457 */
458 if (qtrx->qt_ino_res != 0) {
459 ASSERT(qtrx->qt_ino_res >=
460 qtrx->qt_ino_res_used);
461 if (qtrx->qt_ino_res > qtrx->qt_ino_res_used)
462 dqp->q_res_icount -= (xfs_qcnt_t)
463 (qtrx->qt_ino_res -
464 qtrx->qt_ino_res_used);
465 } else {
466 if (qtrx->qt_icount_delta)
467 dqp->q_res_icount +=
468 (xfs_qcnt_t)qtrx->qt_icount_delta;
469 }
470
471 ASSERT(dqp->q_res_bcount >=
472 be64_to_cpu(dqp->q_core.d_bcount));
473 ASSERT(dqp->q_res_icount >=
474 be64_to_cpu(dqp->q_core.d_icount));
475 ASSERT(dqp->q_res_rtbcount >=
476 be64_to_cpu(dqp->q_core.d_rtbcount));
477 }
478 }
479}
480
481/*
482 * Release the reservations, and adjust the dquots accordingly.
483 * This is called only when the transaction is being aborted. If by
484 * any chance we have done dquot modifications incore (ie. deltas) already,
485 * we simply throw those away, since that's the expected behavior
486 * when a transaction is curtailed without a commit.
487 */
488void
489xfs_trans_unreserve_and_mod_dquots(
490 struct xfs_trans *tp)
491{
492 int i, j;
493 struct xfs_dquot *dqp;
494 struct xfs_dqtrx *qtrx, *qa;
495 bool locked;
496
497 if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
498 return;
499
500 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
501 qa = tp->t_dqinfo->dqs[j];
502
503 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
504 qtrx = &qa[i];
505 /*
506 * We assume that the array of dquots is filled
507 * sequentially, not sparsely.
508 */
509 if ((dqp = qtrx->qt_dquot) == NULL)
510 break;
511 /*
512 * Unreserve the original reservation. We don't care
513 * about the number of blocks used field, or deltas.
514 * Also we don't bother to zero the fields.
515 */
516 locked = false;
517 if (qtrx->qt_blk_res) {
518 xfs_dqlock(dqp);
519 locked = true;
520 dqp->q_res_bcount -=
521 (xfs_qcnt_t)qtrx->qt_blk_res;
522 }
523 if (qtrx->qt_ino_res) {
524 if (!locked) {
525 xfs_dqlock(dqp);
526 locked = true;
527 }
528 dqp->q_res_icount -=
529 (xfs_qcnt_t)qtrx->qt_ino_res;
530 }
531
532 if (qtrx->qt_rtblk_res) {
533 if (!locked) {
534 xfs_dqlock(dqp);
535 locked = true;
536 }
537 dqp->q_res_rtbcount -=
538 (xfs_qcnt_t)qtrx->qt_rtblk_res;
539 }
540 if (locked)
541 xfs_dqunlock(dqp);
542
543 }
544 }
545}
546
547STATIC void
548xfs_quota_warn(
549 struct xfs_mount *mp,
550 struct xfs_dquot *dqp,
551 int type)
552{
553 enum quota_type qtype;
554
555 if (dqp->dq_flags & XFS_DQ_PROJ)
556 qtype = PRJQUOTA;
557 else if (dqp->dq_flags & XFS_DQ_USER)
558 qtype = USRQUOTA;
559 else
560 qtype = GRPQUOTA;
561
562 quota_send_warning(make_kqid(&init_user_ns, qtype,
563 be32_to_cpu(dqp->q_core.d_id)),
564 mp->m_super->s_dev, type);
565}
566
567/*
568 * This reserves disk blocks and inodes against a dquot.
569 * Flags indicate if the dquot is to be locked here and also
570 * if the blk reservation is for RT or regular blocks.
571 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
572 */
573STATIC int
574xfs_trans_dqresv(
575 struct xfs_trans *tp,
576 struct xfs_mount *mp,
577 struct xfs_dquot *dqp,
578 int64_t nblks,
579 long ninos,
580 uint flags)
581{
582 xfs_qcnt_t hardlimit;
583 xfs_qcnt_t softlimit;
584 time64_t timer;
585 xfs_qwarncnt_t warns;
586 xfs_qwarncnt_t warnlimit;
587 xfs_qcnt_t total_count;
588 xfs_qcnt_t *resbcountp;
589 xfs_quotainfo_t *q = mp->m_quotainfo;
590 struct xfs_def_quota *defq;
591
592
593 xfs_dqlock(dqp);
594
595 defq = xfs_get_defquota(dqp, q);
596
597 if (flags & XFS_TRANS_DQ_RES_BLKS) {
598 hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
599 if (!hardlimit)
600 hardlimit = defq->bhardlimit;
601 softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
602 if (!softlimit)
603 softlimit = defq->bsoftlimit;
604 timer = be32_to_cpu(dqp->q_core.d_btimer) +
605 ((u64)dqp->q_core.d_btimer_high << 32);
606 warns = be16_to_cpu(dqp->q_core.d_bwarns);
607 warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
608 resbcountp = &dqp->q_res_bcount;
609 } else {
610 ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
611 hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
612 if (!hardlimit)
613 hardlimit = defq->rtbhardlimit;
614 softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
615 if (!softlimit)
616 softlimit = defq->rtbsoftlimit;
617 timer = be32_to_cpu(dqp->q_core.d_rtbtimer) +
618 ((u64)dqp->q_core.d_rtbtimer_high << 32);
619 warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
620 warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
621 resbcountp = &dqp->q_res_rtbcount;
622 }
623
624 if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
625 dqp->q_core.d_id &&
626 ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
627 (XFS_IS_GQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISGDQ(dqp)) ||
628 (XFS_IS_PQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISPDQ(dqp)))) {
629 if (nblks > 0) {
630 /*
631 * dquot is locked already. See if we'd go over the
632 * hardlimit or exceed the timelimit if we allocate
633 * nblks.
634 */
635 total_count = *resbcountp + nblks;
636 if (hardlimit && total_count > hardlimit) {
637 xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN);
638 goto error_return;
639 }
640 if (softlimit && total_count > softlimit) {
641 if ((timer != 0 &&
642 ktime_get_real_seconds() > timer) ||
643 (warns != 0 && warns >= warnlimit)) {
644 xfs_quota_warn(mp, dqp,
645 QUOTA_NL_BSOFTLONGWARN);
646 goto error_return;
647 }
648
649 xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN);
650 }
651 }
652 if (ninos > 0) {
653 total_count = dqp->q_res_icount + ninos;
654 timer = be32_to_cpu(dqp->q_core.d_itimer) +
655 ((u64)dqp->q_core.d_itimer_high << 32);
656 warns = be16_to_cpu(dqp->q_core.d_iwarns);
657 warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
658 hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
659 if (!hardlimit)
660 hardlimit = defq->ihardlimit;
661 softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
662 if (!softlimit)
663 softlimit = defq->isoftlimit;
664
665 if (hardlimit && total_count > hardlimit) {
666 xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
667 goto error_return;
668 }
669 if (softlimit && total_count > softlimit) {
670 if ((timer != 0 &&
671 ktime_get_real_seconds() > timer) ||
672 (warns != 0 && warns >= warnlimit)) {
673 xfs_quota_warn(mp, dqp,
674 QUOTA_NL_ISOFTLONGWARN);
675 goto error_return;
676 }
677 xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN);
678 }
679 }
680 }
681
682 /*
683 * Change the reservation, but not the actual usage.
684 * Note that q_res_bcount = q_core.d_bcount + resv
685 */
686 (*resbcountp) += (xfs_qcnt_t)nblks;
687 if (ninos != 0)
688 dqp->q_res_icount += (xfs_qcnt_t)ninos;
689
690 /*
691 * note the reservation amt in the trans struct too,
692 * so that the transaction knows how much was reserved by
693 * it against this particular dquot.
694 * We don't do this when we are reserving for a delayed allocation,
695 * because we don't have the luxury of a transaction envelope then.
696 */
697 if (tp) {
698 ASSERT(tp->t_dqinfo);
699 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
700 if (nblks != 0)
701 xfs_trans_mod_dquot(tp, dqp,
702 flags & XFS_QMOPT_RESBLK_MASK,
703 nblks);
704 if (ninos != 0)
705 xfs_trans_mod_dquot(tp, dqp,
706 XFS_TRANS_DQ_RES_INOS,
707 ninos);
708 }
709
710 if (XFS_IS_CORRUPT(mp,
711 dqp->q_res_bcount < be64_to_cpu(dqp->q_core.d_bcount)) ||
712 XFS_IS_CORRUPT(mp,
713 dqp->q_res_rtbcount < be64_to_cpu(dqp->q_core.d_rtbcount)) ||
714 XFS_IS_CORRUPT(mp,
715 dqp->q_res_icount < be64_to_cpu(dqp->q_core.d_icount)))
716 goto error_corrupt;
717
718 xfs_dqunlock(dqp);
719 return 0;
720
721error_return:
722 xfs_dqunlock(dqp);
723 if (flags & XFS_QMOPT_ENOSPC)
724 return -ENOSPC;
725 return -EDQUOT;
726error_corrupt:
727 xfs_dqunlock(dqp);
728 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
729 return -EFSCORRUPTED;
730}
731
732
733/*
734 * Given dquot(s), make disk block and/or inode reservations against them.
735 * The fact that this does the reservation against user, group and
736 * project quotas is important, because this follows a all-or-nothing
737 * approach.
738 *
739 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
740 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
741 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
742 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
743 * dquots are unlocked on return, if they were not locked by caller.
744 */
745int
746xfs_trans_reserve_quota_bydquots(
747 struct xfs_trans *tp,
748 struct xfs_mount *mp,
749 struct xfs_dquot *udqp,
750 struct xfs_dquot *gdqp,
751 struct xfs_dquot *pdqp,
752 int64_t nblks,
753 long ninos,
754 uint flags)
755{
756 int error;
757
758 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
759 return 0;
760
761 if (tp && tp->t_dqinfo == NULL)
762 xfs_trans_alloc_dqinfo(tp);
763
764 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
765
766 if (udqp) {
767 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos,
768 (flags & ~XFS_QMOPT_ENOSPC));
769 if (error)
770 return error;
771 }
772
773 if (gdqp) {
774 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos,
775 (flags & ~XFS_QMOPT_ENOSPC));
776 if (error)
777 goto unwind_usr;
778 }
779
780 if (pdqp) {
781 error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
782 if (error)
783 goto unwind_grp;
784 }
785
786 /*
787 * Didn't change anything critical, so, no need to log
788 */
789 return 0;
790
791unwind_grp:
792 flags |= XFS_QMOPT_FORCE_RES;
793 if (gdqp)
794 xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
795unwind_usr:
796 flags |= XFS_QMOPT_FORCE_RES;
797 if (udqp)
798 xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
799 return error;
800}
801
802
803/*
804 * Lock the dquot and change the reservation if we can.
805 * This doesn't change the actual usage, just the reservation.
806 * The inode sent in is locked.
807 */
808int
809xfs_trans_reserve_quota_nblks(
810 struct xfs_trans *tp,
811 struct xfs_inode *ip,
812 int64_t nblks,
813 long ninos,
814 uint flags)
815{
816 struct xfs_mount *mp = ip->i_mount;
817
818 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
819 return 0;
820 if (XFS_IS_PQUOTA_ON(mp))
821 flags |= XFS_QMOPT_ENOSPC;
822
823 ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
824
825 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
826 ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
827 XFS_TRANS_DQ_RES_RTBLKS ||
828 (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
829 XFS_TRANS_DQ_RES_BLKS);
830
831 /*
832 * Reserve nblks against these dquots, with trans as the mediator.
833 */
834 return xfs_trans_reserve_quota_bydquots(tp, mp,
835 ip->i_udquot, ip->i_gdquot,
836 ip->i_pdquot,
837 nblks, ninos, flags);
838}
839
840/*
841 * This routine is called to allocate a quotaoff log item.
842 */
843struct xfs_qoff_logitem *
844xfs_trans_get_qoff_item(
845 struct xfs_trans *tp,
846 struct xfs_qoff_logitem *startqoff,
847 uint flags)
848{
849 struct xfs_qoff_logitem *q;
850
851 ASSERT(tp != NULL);
852
853 q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
854 ASSERT(q != NULL);
855
856 /*
857 * Get a log_item_desc to point at the new item.
858 */
859 xfs_trans_add_item(tp, &q->qql_item);
860 return q;
861}
862
863
864/*
865 * This is called to mark the quotaoff logitem as needing
866 * to be logged when the transaction is committed. The logitem must
867 * already be associated with the given transaction.
868 */
869void
870xfs_trans_log_quotaoff_item(
871 struct xfs_trans *tp,
872 struct xfs_qoff_logitem *qlp)
873{
874 tp->t_flags |= XFS_TRANS_DIRTY;
875 set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags);
876}
877
878STATIC void
879xfs_trans_alloc_dqinfo(
880 xfs_trans_t *tp)
881{
882 tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, 0);
883}
884
885void
886xfs_trans_free_dqinfo(
887 xfs_trans_t *tp)
888{
889 if (!tp->t_dqinfo)
890 return;
891 kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
892 tp->t_dqinfo = NULL;
893}