From: Alexey Skidanov Date: Tue, 9 Oct 2018 19:08:37 +0000 (+0300) Subject: staging: android: ion: Add per-heap counters X-Git-Tag: v5.15~7300^2~331 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4073536c927421a8908490cf22ce912cb97d7f53;p=platform%2Fkernel%2Flinux-starfive.git staging: android: ion: Add per-heap counters Heap statistics have been removed and currently even basics statistics are missing. This patch creates per heap debugfs directory /sys/kernel/debug/ and adds the following counters: - the number of allocated buffers; - the number of allocated bytes; - the number of allocated bytes watermark. Signed-off-by: Alexey Skidanov Acked-by: Laura Abbott Signed-off-by: Greg Kroah-Hartman --- diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 9907332..6fd8979 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -95,6 +95,13 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, goto err1; } + spin_lock(&heap->stat_lock); + heap->num_of_buffers++; + heap->num_of_alloc_bytes += len; + if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm) + heap->alloc_bytes_wm = heap->num_of_alloc_bytes; + spin_unlock(&heap->stat_lock); + INIT_LIST_HEAD(&buffer->attachments); mutex_init(&buffer->lock); mutex_lock(&dev->buffer_lock); @@ -117,6 +124,11 @@ void ion_buffer_destroy(struct ion_buffer *buffer) buffer->heap->ops->unmap_kernel(buffer->heap, buffer); } buffer->heap->ops->free(buffer); + spin_lock(&buffer->heap->stat_lock); + buffer->heap->num_of_buffers--; + buffer->heap->num_of_alloc_bytes -= buffer->size; + spin_unlock(&buffer->heap->stat_lock); + kfree(buffer); } @@ -528,12 +540,15 @@ void ion_device_add_heap(struct ion_heap *heap) { struct ion_device *dev = internal_dev; int ret; + struct dentry *heap_root; + char debug_name[64]; if (!heap->ops->allocate || !heap->ops->free) pr_err("%s: can not add heap with invalid ops struct.\n", __func__); spin_lock_init(&heap->free_lock); + spin_lock_init(&heap->stat_lock); heap->free_list_size = 0; if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) @@ -546,6 +561,33 @@ void ion_device_add_heap(struct ion_heap *heap) } heap->dev = dev; + heap->num_of_buffers = 0; + heap->num_of_alloc_bytes = 0; + heap->alloc_bytes_wm = 0; + + heap_root = debugfs_create_dir(heap->name, dev->debug_root); + debugfs_create_u64("num_of_buffers", + 0444, heap_root, + &heap->num_of_buffers); + debugfs_create_u64("num_of_alloc_bytes", + 0444, + heap_root, + &heap->num_of_alloc_bytes); + debugfs_create_u64("alloc_bytes_wm", + 0444, + heap_root, + &heap->alloc_bytes_wm); + + if (heap->shrinker.count_objects && + heap->shrinker.scan_objects) { + snprintf(debug_name, 64, "%s_shrink", heap->name); + debugfs_create_file(debug_name, + 0644, + heap_root, + heap, + &debug_shrink_fops); + } + down_write(&dev->lock); heap->id = heap_id++; /* @@ -555,14 +597,6 @@ void ion_device_add_heap(struct ion_heap *heap) plist_node_init(&heap->node, -heap->id); plist_add(&heap->node, &dev->heaps); - if (heap->shrinker.count_objects && heap->shrinker.scan_objects) { - char debug_name[64]; - - snprintf(debug_name, 64, "%s_shrink", heap->name); - debugfs_create_file(debug_name, 0644, dev->debug_root, - heap, &debug_shrink_fops); - } - dev->heap_cnt++; up_write(&dev->lock); } diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index c006fc1..47b594c 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h @@ -157,6 +157,9 @@ struct ion_heap_ops { * @lock: protects the free list * @waitqueue: queue to wait on from deferred free thread * @task: task struct of deferred free thread + * @num_of_buffers the number of currently allocated buffers + * @num_of_alloc_bytes the number of allocated bytes + * @alloc_bytes_wm the number of allocated bytes watermark * * Represents a pool of memory from which buffers can be made. In some * systems the only heap is regular system memory allocated via vmalloc. @@ -177,6 +180,12 @@ struct ion_heap { spinlock_t free_lock; wait_queue_head_t waitqueue; struct task_struct *task; + u64 num_of_buffers; + u64 num_of_alloc_bytes; + u64 alloc_bytes_wm; + + /* protect heap statistics */ + spinlock_t stat_lock; }; /**