obj->dev_addr = DMA_ERROR_CODE;
- mapping = file_inode(obj->obj.filp)->i_mapping;
+ mapping = obj->obj.filp->f_mapping;
mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
if (sg_alloc_table(sgt, count, GFP_KERNEL))
goto free_sgt;
- mapping = file_inode(dobj->obj.filp)->i_mapping;
+ mapping = dobj->obj.filp->f_mapping;
for_each_sg(sgt->sgl, sg, count, i) {
struct page *page;
int i, npages;
/* This is the shared memory object that backs the GEM resource */
- mapping = file_inode(obj->filp)->i_mapping;
+ mapping = obj->filp->f_mapping;
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless
* why this is required _and_ expected if you're
* going to pin these pages.
*/
- mapping = file_inode(obj->filp)->i_mapping;
+ mapping = obj->filp->f_mapping;
mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
}
static int
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
- struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+ struct address_space *mapping = obj->base.filp->f_mapping;
char *vaddr = obj->phys_handle->vaddr;
struct sg_table *st;
struct scatterlist *sg;
obj->dirty = 0;
if (obj->dirty) {
- struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+ struct address_space *mapping = obj->base.filp->f_mapping;
char *vaddr = obj->phys_handle->vaddr;
int i;
if (obj->base.filp == NULL)
return;
- mapping = file_inode(obj->base.filp)->i_mapping,
+ mapping = obj->base.filp->f_mapping,
invalidate_mapping_pages(mapping, 0, (loff_t)-1);
}
*
* Fail silently without starting the shrinker
*/
- mapping = file_inode(obj->base.filp)->i_mapping;
+ mapping = obj->base.filp->f_mapping;
gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
gfp |= __GFP_NORETRY | __GFP_NOWARN;
sg = st->sgl;
mask |= __GFP_DMA32;
}
- mapping = file_inode(obj->base.filp)->i_mapping;
+ mapping = obj->base.filp->f_mapping;
mapping_set_gfp_mask(mapping, mask);
i915_gem_object_init(obj, &i915_gem_object_ops);
if (ret)
goto err_free;
- mapping = file_inode(obj->filp)->i_mapping;
+ mapping = obj->filp->f_mapping;
mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
}
swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL);
- swap_space = file_inode(swap_storage)->i_mapping;
+ swap_space = swap_storage->f_mapping;
for (i = 0; i < ttm->num_pages; ++i) {
from_page = shmem_read_mapping_page(swap_space, i);
} else
swap_storage = persistent_swap_storage;
- swap_space = file_inode(swap_storage)->i_mapping;
+ swap_space = swap_storage->f_mapping;
for (i = 0; i < ttm->num_pages; ++i) {
from_page = ttm->pages[i];
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
- struct inode *inode = file_inode(file)->i_mapping->host;
+ struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
ssize_t ret;
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
- struct inode *inode = file_inode(file)->i_mapping->host;
+ struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
ssize_t ret;
struct page *page;
for (;;) {
- page = read_cache_page(file_inode(desc->file)->i_mapping,
+ page = read_cache_page(desc->file->f_mapping,
desc->page_index, (filler_t *)nfs_readdir_filler, desc);
if (IS_ERR(page) || grab_page(page))
break;
static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
- struct inode *inode = file_inode(file)->i_mapping->host;
+ struct inode *inode = file->f_mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
get_block_t *get_block;
address = address & huge_page_mask(h);
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff;
- mapping = file_inode(vma->vm_file)->i_mapping;
+ mapping = vma->vm_file->f_mapping;
/*
* Take the mapping lock for the duration of the table walk. As