/* If quota was reenabled in the meantime, we have
* nothing to do */
if (!sb_has_quota_enabled(sb, cnt)) {
- mutex_lock(&toputinode[cnt]->i_mutex);
+ mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA);
toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
S_NOATIME | S_NOQUOTA);
truncate_inode_pages(&toputinode[cnt]->i_data, 0);
static void quota_sync_sb(struct super_block *sb, int type)
{
int cnt;
- struct inode *discard[MAXQUOTAS];
sb->s_qcop->quota_sync(sb, type);
/* This is not very clever (and fast) but currently I don't know about
sb->s_op->sync_fs(sb, 1);
sync_blockdev(sb->s_bdev);
- /* Now when everything is written we can discard the pagecache so
- * that userspace sees the changes. We need i_mutex and so we could
- * not do it inside dqonoff_mutex. Moreover we need to be carefull
- * about races with quotaoff() (that is the reason why we have own
- * reference to inode). */
+ /*
+ * Now when everything is written we can discard the pagecache so
+ * that userspace sees the changes.
+ */
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- discard[cnt] = NULL;
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_enabled(sb, cnt))
continue;
- discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]);
+ mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA);
+ truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
+ mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
}
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (discard[cnt]) {
- mutex_lock(&discard[cnt]->i_mutex);
- truncate_inode_pages(&discard[cnt]->i_data, 0);
- mutex_unlock(&discard[cnt]->i_mutex);
- iput(discard[cnt]);
- }
- }
}
void sync_dquots(struct super_block *sb, int type)