blk_trace_shutdown(q);
+ bdi_destroy(&q->backing_dev_info);
kmem_cache_free(requestq_cachep, q);
}
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
+ int err;
q = kmem_cache_alloc_node(requestq_cachep,
gfp_mask | __GFP_ZERO, node_id);
if (!q)
return NULL;
+ q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
+ q->backing_dev_info.unplug_io_data = q;
+ err = bdi_init(&q->backing_dev_info);
+ if (err) {
+ kmem_cache_free(requestq_cachep, q);
+ return NULL;
+ }
+
init_timer(&q->unplug_timer);
kobject_set_name(&q->kobj, "%s", "queue");
q->kobj.ktype = &queue_ktype;
kobject_init(&q->kobj);
- q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
- q->backing_dev_info.unplug_io_data = q;
-
mutex_init(&q->sysfs_lock);
return q;
blk_cleanup_queue(rd_queue[i]);
}
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
+
+ bdi_destroy(&rd_file_backing_dev_info);
+ bdi_destroy(&rd_backing_dev_info);
}
/*
static int __init rd_init(void)
{
int i;
- int err = -ENOMEM;
+ int err;
+
+ err = bdi_init(&rd_backing_dev_info);
+ if (err)
+ goto out2;
+
+ err = bdi_init(&rd_file_backing_dev_info);
+ if (err) {
+ bdi_destroy(&rd_backing_dev_info);
+ goto out2;
+ }
+
+ err = -ENOMEM;
if (rd_blocksize > PAGE_SIZE || rd_blocksize < 512 ||
(rd_blocksize & (rd_blocksize-1))) {
put_disk(rd_disks[i]);
blk_cleanup_queue(rd_queue[i]);
}
+ bdi_destroy(&rd_backing_dev_info);
+ bdi_destroy(&rd_file_backing_dev_info);
+out2:
return err;
}
static int __init chr_dev_init(void)
{
int i;
+ int err;
+
+ err = bdi_init(&zero_bdi);
+ if (err)
+ return err;
if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
printk("unable to get major %d for memory devs\n", MEM_MAJOR);
void __init chrdev_init(void)
{
cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
+ bdi_init(&directly_mappable_cdev_bdi);
}
extern struct inode * configfs_new_inode(mode_t mode, struct configfs_dirent *);
extern int configfs_create(struct dentry *, int mode, int (*init)(struct inode *));
+extern int configfs_inode_init(void);
+extern void configfs_inode_exit(void);
extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
extern int configfs_make_dirent(struct configfs_dirent *,
mutex_unlock(&dir->d_inode->i_mutex);
}
+int __init configfs_inode_init(void)
+{
+ return bdi_init(&configfs_backing_dev_info);
+}
+void __exit configfs_inode_exit(void)
+{
+ bdi_destroy(&configfs_backing_dev_info);
+}
subsystem_unregister(&config_subsys);
kmem_cache_destroy(configfs_dir_cachep);
configfs_dir_cachep = NULL;
+ goto out;
}
+ err = configfs_inode_init();
+ if (err) {
+ unregister_filesystem(&configfs_fs_type);
+ subsystem_unregister(&config_subsys);
+ kmem_cache_destroy(configfs_dir_cachep);
+ configfs_dir_cachep = NULL;
+ }
out:
return err;
}
subsystem_unregister(&config_subsys);
kmem_cache_destroy(configfs_dir_cachep);
configfs_dir_cachep = NULL;
+ configfs_inode_exit();
}
MODULE_AUTHOR("Oracle");
static struct fuse_conn *new_conn(void)
{
struct fuse_conn *fc;
+ int err;
fc = kzalloc(sizeof(*fc), GFP_KERNEL);
if (fc) {
atomic_set(&fc->num_waiting, 0);
fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
fc->bdi.unplug_io_fn = default_unplug_io_fn;
+ err = bdi_init(&fc->bdi);
+ if (err) {
+ kfree(fc);
+ fc = NULL;
+ goto out;
+ }
fc->reqctr = 0;
fc->blocked = 1;
get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
}
+out:
return fc;
}
if (fc->destroy_req)
fuse_request_free(fc->destroy_req);
mutex_destroy(&fc->inst_mutex);
+ bdi_destroy(&fc->bdi);
kfree(fc);
}
}
int error;
struct vfsmount *vfsmount;
+ error = bdi_init(&hugetlbfs_backing_dev_info);
+ if (error)
+ return error;
+
hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
sizeof(struct hugetlbfs_inode_info),
0, 0, init_once);
if (hugetlbfs_inode_cachep == NULL)
- return -ENOMEM;
+ goto out2;
error = register_filesystem(&hugetlbfs_fs_type);
if (error)
out:
if (error)
kmem_cache_destroy(hugetlbfs_inode_cachep);
+ out2:
+ bdi_destroy(&hugetlbfs_backing_dev_info);
return error;
}
{
kmem_cache_destroy(hugetlbfs_inode_cachep);
unregister_filesystem(&hugetlbfs_fs_type);
+ bdi_destroy(&hugetlbfs_backing_dev_info);
}
module_init(init_hugetlbfs_fs)
if (server->rsize > NFS_MAX_FILE_IO_SIZE)
server->rsize = NFS_MAX_FILE_IO_SIZE;
server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD;
if (server->wsize > max_rpc_payload)
goto out_error;
nfs_server_set_fsinfo(server, &fsinfo);
+ error = bdi_init(&server->backing_dev_info);
+ if (error)
+ goto out_error;
+
/* Get some general file system info */
if (server->namelen == 0) {
nfs_put_client(server->nfs_client);
nfs_free_iostats(server->io_stats);
+ bdi_destroy(&server->backing_dev_info);
kfree(server);
nfs_release_automount_timer();
dprintk("<-- nfs_free_server()\n");
dlmfs_print_version();
+ status = bdi_init(&dlmfs_backing_dev_info);
+ if (status)
+ return status;
+
dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache",
sizeof(struct dlmfs_inode_private),
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
dlmfs_init_once);
if (!dlmfs_inode_cache)
- return -ENOMEM;
+ goto bail;
cleanup_inode = 1;
user_dlm_worker = create_singlethread_workqueue("user_dlm");
kmem_cache_destroy(dlmfs_inode_cache);
if (cleanup_worker)
destroy_workqueue(user_dlm_worker);
+ bdi_destroy(&dlmfs_backing_dev_info);
} else
printk("OCFS2 User DLM kernel interface loaded\n");
return status;
destroy_workqueue(user_dlm_worker);
kmem_cache_destroy(dlmfs_inode_cache);
+
+ bdi_destroy(&dlmfs_backing_dev_info);
}
MODULE_AUTHOR("Oracle");
int __init init_rootfs(void)
{
- return register_filesystem(&rootfs_fs_type);
+ int err;
+
+ err = bdi_init(&ramfs_backing_dev_info);
+ if (err)
+ return err;
+
+ err = register_filesystem(&rootfs_fs_type);
+ if (err)
+ bdi_destroy(&ramfs_backing_dev_info);
+
+ return err;
}
MODULE_LICENSE("GPL");
.setattr = sysfs_setattr,
};
+int __init sysfs_inode_init(void)
+{
+ return bdi_init(&sysfs_backing_dev_info);
+}
+
int sysfs_setattr(struct dentry * dentry, struct iattr * iattr)
{
struct inode * inode = dentry->d_inode;
if (!sysfs_dir_cachep)
goto out;
+ err = sysfs_inode_init();
+ if (err)
+ goto out_err;
+
err = register_filesystem(&sysfs_fs_type);
if (!err) {
sysfs_mount = kern_mount(&sysfs_fs_type);
struct inode *sysfs_get_inode(struct sysfs_dirent *sd);
int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name);
+int sysfs_inode_init(void);
/*
* file.c
void *unplug_io_data;
};
+static inline int bdi_init(struct backing_dev_info *bdi)
+{
+ return 0;
+}
+
+static inline void bdi_destroy(struct backing_dev_info *bdi)
+{
+}
/*
* Flags in backing_dev_info::capability
+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
}
+static int __init readahead_init(void)
+{
+ return bdi_init(&default_backing_dev_info);
+}
+subsys_initcall(readahead_init);
+
/*
* Submit IO for the read-ahead request in file_ra_state.
*/
{
int error;
+ error = bdi_init(&shmem_backing_dev_info);
+ if (error)
+ goto out4;
+
error = init_inodecache();
if (error)
goto out3;
out2:
destroy_inodecache();
out3:
+ bdi_destroy(&shmem_backing_dev_info);
+out4:
shm_mnt = ERR_PTR(error);
return error;
}
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
+#include <linux/backing-dev.h>
/* How many pages do we try to swap or page in/out together? */
int page_cluster;
{
unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
+#ifdef CONFIG_SWAP
+ bdi_init(swapper_space.backing_dev_info);
+#endif
+
/* Use a smaller cluster for small-memory machines */
if (megs < 16)
page_cluster = 2;