From d973249e26002e461a6ea3da47213570447f1cc6 Mon Sep 17 00:00:00 2001 From: Jiyu Yang Date: Tue, 18 Jun 2019 15:03:48 +0800 Subject: [PATCH] staging: revert SWPL-9254 fixed issue [1/1] PD#SWPL-9788 Problem: UI sliding is not smooth. Solution: revert SWPL-9254 two commits. Verify: verify by franklin/galilei This reverts commit 4c0482c80ece66a40796c7b476a15640ed1eadfa. This reverts commit b78959e21bea51b0ab1882b7c55a35c5d4f7fff1. Change-Id: Ifc9ac43e69abb578795ac3f6f9b2f324cbf590f3 Signed-off-by: Jiyu Yang --- drivers/staging/android/ion/ion.c | 171 +++++++++------------------------ drivers/staging/android/ion/ion_priv.h | 1 - 2 files changed, 44 insertions(+), 128 deletions(-) diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 8b9b0ca..e19f66e 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -166,7 +166,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, buffer->dev = dev; buffer->size = len; INIT_LIST_HEAD(&buffer->vmas); - INIT_LIST_HEAD(&buffer->attachments); mutex_init(&buffer->lock); /* * this will set up dma addresses for the sglist -- it is not @@ -803,115 +802,24 @@ void ion_client_destroy(struct ion_client *client) } EXPORT_SYMBOL(ion_client_destroy); -static struct sg_table *dup_sg_table(struct sg_table *table) -{ - struct sg_table *new_table; - int ret, i; - struct scatterlist *sg, *new_sg; - - new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); - if (!new_table) - return ERR_PTR(-ENOMEM); - - ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL); - if (ret) { - kfree(new_table); - return ERR_PTR(-ENOMEM); - } - - new_sg = new_table->sgl; - for_each_sg(table->sgl, sg, table->nents, i) { - memcpy(new_sg, sg, sizeof(*sg)); - sg->dma_address = 0; - new_sg = sg_next(new_sg); - } - - return new_table; -} - -static void free_duped_table(struct sg_table *table) -{ - sg_free_table(table); - kfree(table); -} - -struct ion_dma_buf_attachment { - struct device *dev; - struct sg_table *table; - struct list_head list; -}; - -static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev, - struct dma_buf_attachment *attachment) -{ - struct ion_dma_buf_attachment *a; - struct sg_table *table; - struct ion_buffer *buffer = dmabuf->priv; - - a = kzalloc(sizeof(*a), GFP_KERNEL); - if (!a) - return -ENOMEM; - - table = dup_sg_table(buffer->sg_table); - if (IS_ERR(table)) { - kfree(a); - return -ENOMEM; - } - - a->table = table; - a->dev = dev; - INIT_LIST_HEAD(&a->list); - - attachment->priv = a; - - mutex_lock(&buffer->lock); - list_add(&a->list, &buffer->attachments); - mutex_unlock(&buffer->lock); - - return 0; -} - -static void ion_dma_buf_detatch(struct dma_buf *dmabuf, - struct dma_buf_attachment *attachment) -{ - struct ion_dma_buf_attachment *a = attachment->priv; - struct ion_buffer *buffer = dmabuf->priv; - - free_duped_table(a->table); - mutex_lock(&buffer->lock); - list_del(&a->list); - mutex_unlock(&buffer->lock); - - kfree(a); -} - +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction direction); static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction) { - struct ion_dma_buf_attachment *a = attachment->priv; - struct sg_table *table; - int ret; - - table = a->table; - - if (!dma_map_sg(attachment->dev, table->sgl, table->nents, - direction)){ - ret = -ENOMEM; - goto err; - } - return table; + struct dma_buf *dmabuf = attachment->dmabuf; + struct ion_buffer *buffer = dmabuf->priv; -err: - free_duped_table(table); - return ERR_PTR(ret); + ion_buffer_sync_for_device(buffer, attachment->dev, direction); + return buffer->sg_table; } static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction) { - dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction); } void ion_pages_sync_for_device(struct device *dev, struct page *page, @@ -935,6 +843,36 @@ struct ion_vma_list { struct vm_area_struct *vma; }; +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction dir) +{ + struct ion_vma_list *vma_list; + int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + int i; + + if (!ion_buffer_fault_user_mappings(buffer)) + return; + + mutex_lock(&buffer->lock); + for (i = 0; i < pages; i++) { + struct page *page = buffer->pages[i]; + + if (ion_buffer_page_is_dirty(page)) + ion_pages_sync_for_device(dev, ion_buffer_page(page), + PAGE_SIZE, dir); + + ion_buffer_page_clean(buffer->pages + i); + } + list_for_each_entry(vma_list, &buffer->vmas, list) { + struct vm_area_struct *vma = vma_list->vma; + + zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, + NULL); + } + mutex_unlock(&buffer->lock); +} + static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct ion_buffer *buffer = vma->vm_private_data; @@ -1049,45 +987,26 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, { struct ion_buffer *buffer = dmabuf->priv; void *vaddr; - struct ion_dma_buf_attachment *a; - /* - * TODO: Move this elsewhere because we don't always need a vaddr - */ - if (buffer->heap->ops->map_kernel) { - mutex_lock(&buffer->lock); - vaddr = ion_buffer_kmap_get(buffer); - mutex_unlock(&buffer->lock); + if (!buffer->heap->ops->map_kernel) { + pr_err("%s: map kernel is not implemented by this heap.\n", + __func__); + return -ENODEV; } - mutex_lock(&buffer->lock); - list_for_each_entry(a, &buffer->attachments, list) { - dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents, - DMA_BIDIRECTIONAL); - } + vaddr = ion_buffer_kmap_get(buffer); mutex_unlock(&buffer->lock); - - return 0; + return PTR_ERR_OR_ZERO(vaddr); } static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; - struct ion_dma_buf_attachment *a; - - if (buffer->heap->ops->map_kernel) { - mutex_lock(&buffer->lock); - ion_buffer_kmap_put(buffer); - mutex_unlock(&buffer->lock); - } mutex_lock(&buffer->lock); - list_for_each_entry(a, &buffer->attachments, list) { - dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents, - DMA_BIDIRECTIONAL); - } + ion_buffer_kmap_put(buffer); mutex_unlock(&buffer->lock); return 0; @@ -1098,8 +1017,6 @@ static struct dma_buf_ops dma_buf_ops = { .unmap_dma_buf = ion_unmap_dma_buf, .mmap = ion_mmap, .release = ion_dma_buf_release, - .attach = ion_dma_buf_attach, - .detach = ion_dma_buf_detatch, .begin_cpu_access = ion_dma_buf_begin_cpu_access, .end_cpu_access = ion_dma_buf_end_cpu_access, .kmap_atomic = ion_dma_buf_kmap, diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h index 0bac7a4..57a52d9 100644 --- a/drivers/staging/android/ion/ion_priv.h +++ b/drivers/staging/android/ion/ion_priv.h @@ -74,7 +74,6 @@ struct ion_buffer { struct sg_table *sg_table; struct page **pages; struct list_head vmas; - struct list_head attachments; /* used to track orphaned buffers */ int handle_count; char task_comm[TASK_COMM_LEN]; -- 2.7.4