Merge branch 'work.get_user_pages_fast' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-starfive.git] / drivers / staging / media / atomisp / pci / atomisp2 / hmm / hmm_bo.c
1 /*
2  * Support for Medifield PNW Camera Imaging ISP subsystem.
3  *
4  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5  *
6  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  *
18  */
19 /*
20  * This file contains functions for buffer object structure management
21  */
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/gfp.h>          /* for GFP_ATOMIC */
25 #include <linux/mm.h>
26 #include <linux/mm_types.h>
27 #include <linux/hugetlb.h>
28 #include <linux/highmem.h>
29 #include <linux/slab.h>         /* for kmalloc */
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/string.h>
33 #include <linux/list.h>
34 #include <linux/errno.h>
35 #include <linux/io.h>
36 #include <asm/current.h>
37 #include <linux/sched/signal.h>
38 #include <linux/file.h>
39
40 #include <asm/set_memory.h>
41
42 #include "atomisp_internal.h"
43 #include "hmm/hmm_common.h"
44 #include "hmm/hmm_pool.h"
45 #include "hmm/hmm_bo.h"
46
47 static unsigned int order_to_nr(unsigned int order)
48 {
49         return 1U << order;
50 }
51
52 static unsigned int nr_to_order_bottom(unsigned int nr)
53 {
54         return fls(nr) - 1;
55 }
56
57 static struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
58 {
59         struct hmm_buffer_object *bo;
60
61         bo = kmem_cache_alloc(bo_cache, GFP_KERNEL);
62         if (!bo)
63                 dev_err(atomisp_dev, "%s: failed!\n", __func__);
64
65         return bo;
66 }
67
68 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
69                                         unsigned int pgnr)
70 {
71         check_bodev_null_return(bdev, -EINVAL);
72         var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
73                         "hmm_bo_device not inited yet.\n");
74         /* prevent zero size buffer object */
75         if (pgnr == 0) {
76                 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
77                 return -EINVAL;
78         }
79
80         memset(bo, 0, sizeof(*bo));
81         mutex_init(&bo->mutex);
82
83         /* init the bo->list HEAD as an element of entire_bo_list */
84         INIT_LIST_HEAD(&bo->list);
85
86         bo->bdev = bdev;
87         bo->vmap_addr = NULL;
88         bo->status = HMM_BO_FREE;
89         bo->start = bdev->start;
90         bo->pgnr = pgnr;
91         bo->end = bo->start + pgnr_to_size(pgnr);
92         bo->prev = NULL;
93         bo->next = NULL;
94
95         return 0;
96 }
97
98 static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
99                                 struct rb_node *node, unsigned int pgnr)
100 {
101         struct hmm_buffer_object *this, *ret_bo, *temp_bo;
102
103         this = rb_entry(node, struct hmm_buffer_object, node);
104         if (this->pgnr == pgnr ||
105                 (this->pgnr > pgnr && this->node.rb_left == NULL)) {
106                 goto remove_bo_and_return;
107         } else {
108                 if (this->pgnr < pgnr) {
109                         if (!this->node.rb_right)
110                                 return NULL;
111                         ret_bo = __bo_search_and_remove_from_free_rbtree(
112                                 this->node.rb_right, pgnr);
113                 } else {
114                         ret_bo = __bo_search_and_remove_from_free_rbtree(
115                                 this->node.rb_left, pgnr);
116                 }
117                 if (!ret_bo) {
118                         if (this->pgnr > pgnr)
119                                 goto remove_bo_and_return;
120                         else
121                                 return NULL;
122                 }
123                 return ret_bo;
124         }
125
126 remove_bo_and_return:
127         /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
128          * 1. check if 'this->next' is NULL:
129          *      yes: erase 'this' node and rebalance rbtree, return 'this'.
130          */
131         if (this->next == NULL) {
132                 rb_erase(&this->node, &this->bdev->free_rbtree);
133                 return this;
134         }
135         /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
136          * 2. check if 'this->next->next' is NULL:
137          *      yes: change the related 'next/prev' pointer,
138          *              return 'this->next' but the rbtree stays unchanged.
139          */
140         temp_bo = this->next;
141         this->next = temp_bo->next;
142         if (temp_bo->next)
143                 temp_bo->next->prev = this;
144         temp_bo->next = NULL;
145         temp_bo->prev = NULL;
146         return temp_bo;
147 }
148
149 static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
150                                                         ia_css_ptr start)
151 {
152         struct rb_node *n = root->rb_node;
153         struct hmm_buffer_object *bo;
154
155         do {
156                 bo = rb_entry(n, struct hmm_buffer_object, node);
157
158                 if (bo->start > start) {
159                         if (n->rb_left == NULL)
160                                 return NULL;
161                         n = n->rb_left;
162                 } else if (bo->start < start) {
163                         if (n->rb_right == NULL)
164                                 return NULL;
165                         n = n->rb_right;
166                 } else {
167                         return bo;
168                 }
169         } while (n);
170
171         return NULL;
172 }
173
174 static struct hmm_buffer_object *__bo_search_by_addr_in_range(
175                 struct rb_root *root, unsigned int start)
176 {
177         struct rb_node *n = root->rb_node;
178         struct hmm_buffer_object *bo;
179
180         do {
181                 bo = rb_entry(n, struct hmm_buffer_object, node);
182
183                 if (bo->start > start) {
184                         if (n->rb_left == NULL)
185                                 return NULL;
186                         n = n->rb_left;
187                 } else {
188                         if (bo->end > start)
189                                 return bo;
190                         if (n->rb_right == NULL)
191                                 return NULL;
192                         n = n->rb_right;
193                 }
194         } while (n);
195
196         return NULL;
197 }
198
199 static void __bo_insert_to_free_rbtree(struct rb_root *root,
200                                         struct hmm_buffer_object *bo)
201 {
202         struct rb_node **new = &(root->rb_node);
203         struct rb_node *parent = NULL;
204         struct hmm_buffer_object *this;
205         unsigned int pgnr = bo->pgnr;
206
207         while (*new) {
208                 parent = *new;
209                 this = container_of(*new, struct hmm_buffer_object, node);
210
211                 if (pgnr < this->pgnr) {
212                         new = &((*new)->rb_left);
213                 } else if (pgnr > this->pgnr) {
214                         new = &((*new)->rb_right);
215                 } else {
216                         bo->prev = this;
217                         bo->next = this->next;
218                         if (this->next)
219                                 this->next->prev = bo;
220                         this->next = bo;
221                         bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
222                         return;
223                 }
224         }
225
226         bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
227
228         rb_link_node(&bo->node, parent, new);
229         rb_insert_color(&bo->node, root);
230 }
231
232 static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
233                                         struct hmm_buffer_object *bo)
234 {
235         struct rb_node **new = &(root->rb_node);
236         struct rb_node *parent = NULL;
237         struct hmm_buffer_object *this;
238         unsigned int start = bo->start;
239
240         while (*new) {
241                 parent = *new;
242                 this = container_of(*new, struct hmm_buffer_object, node);
243
244                 if (start < this->start)
245                         new = &((*new)->rb_left);
246                 else
247                         new = &((*new)->rb_right);
248         }
249
250         kref_init(&bo->kref);
251         bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
252
253         rb_link_node(&bo->node, parent, new);
254         rb_insert_color(&bo->node, root);
255 }
256
257 static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
258                                         struct hmm_buffer_object *bo,
259                                         unsigned int pgnr)
260 {
261         struct hmm_buffer_object *new_bo;
262         unsigned long flags;
263         int ret;
264
265         new_bo = __bo_alloc(bdev->bo_cache);
266         if (!new_bo) {
267                 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
268                 return NULL;
269         }
270         ret = __bo_init(bdev, new_bo, pgnr);
271         if (ret) {
272                 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
273                 kmem_cache_free(bdev->bo_cache, new_bo);
274                 return NULL;
275         }
276
277         new_bo->start = bo->start;
278         new_bo->end = new_bo->start + pgnr_to_size(pgnr);
279         bo->start = new_bo->end;
280         bo->pgnr = bo->pgnr - pgnr;
281
282         spin_lock_irqsave(&bdev->list_lock, flags);
283         list_add_tail(&new_bo->list, &bo->list);
284         spin_unlock_irqrestore(&bdev->list_lock, flags);
285
286         return new_bo;
287 }
288
289 static void __bo_take_off_handling(struct hmm_buffer_object *bo)
290 {
291         struct hmm_bo_device *bdev = bo->bdev;
292         /* There are 4 situations when we take off a known bo from free rbtree:
293          * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
294          *      and does not have a linked list after bo, to take off this bo,
295          *      we just need erase bo directly and rebalance the free rbtree
296          */
297         if (bo->prev == NULL && bo->next == NULL) {
298                 rb_erase(&bo->node, &bdev->free_rbtree);
299         /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
300          *      and has a linked list,to take off this bo we need erase bo
301          *      first, then, insert bo->next into free rbtree and rebalance
302          *      the free rbtree
303          */
304         } else if (bo->prev == NULL && bo->next != NULL) {
305                 bo->next->prev = NULL;
306                 rb_erase(&bo->node, &bdev->free_rbtree);
307                 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
308                 bo->next = NULL;
309         /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
310          *      node, bo is the last element of the linked list after rbtree
311          *      node, to take off this bo, we just need set the "prev/next"
312          *      pointers to NULL, the free rbtree stays unchaged
313          */
314         } else if (bo->prev != NULL && bo->next == NULL) {
315                 bo->prev->next = NULL;
316                 bo->prev = NULL;
317         /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
318          *      node, bo is in the middle of the linked list after rbtree node,
319          *      to take off this bo, we just set take the "prev/next" pointers
320          *      to NULL, the free rbtree stays unchaged
321          */
322         } else {
323                 bo->next->prev = bo->prev;
324                 bo->prev->next = bo->next;
325                 bo->next = NULL;
326                 bo->prev = NULL;
327         }
328 }
329
330 static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
331                                         struct hmm_buffer_object *next_bo)
332 {
333         struct hmm_bo_device *bdev;
334         unsigned long flags;
335
336         bdev = bo->bdev;
337         next_bo->start = bo->start;
338         next_bo->pgnr = next_bo->pgnr + bo->pgnr;
339
340         spin_lock_irqsave(&bdev->list_lock, flags);
341         list_del(&bo->list);
342         spin_unlock_irqrestore(&bdev->list_lock, flags);
343
344         kmem_cache_free(bo->bdev->bo_cache, bo);
345
346         return next_bo;
347 }
348
349 /*
350  * hmm_bo_device functions.
351  */
352 int hmm_bo_device_init(struct hmm_bo_device *bdev,
353                                 struct isp_mmu_client *mmu_driver,
354                                 unsigned int vaddr_start,
355                                 unsigned int size)
356 {
357         struct hmm_buffer_object *bo;
358         unsigned long flags;
359         int ret;
360
361         check_bodev_null_return(bdev, -EINVAL);
362
363         ret = isp_mmu_init(&bdev->mmu, mmu_driver);
364         if (ret) {
365                 dev_err(atomisp_dev, "isp_mmu_init failed.\n");
366                 return ret;
367         }
368
369         bdev->start = vaddr_start;
370         bdev->pgnr = size_to_pgnr_ceil(size);
371         bdev->size = pgnr_to_size(bdev->pgnr);
372
373         spin_lock_init(&bdev->list_lock);
374         mutex_init(&bdev->rbtree_mutex);
375
376         bdev->flag = HMM_BO_DEVICE_INITED;
377
378         INIT_LIST_HEAD(&bdev->entire_bo_list);
379         bdev->allocated_rbtree = RB_ROOT;
380         bdev->free_rbtree = RB_ROOT;
381
382         bdev->bo_cache = kmem_cache_create("bo_cache",
383                                 sizeof(struct hmm_buffer_object), 0, 0, NULL);
384         if (!bdev->bo_cache) {
385                 dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
386                 isp_mmu_exit(&bdev->mmu);
387                 return -ENOMEM;
388         }
389
390         bo = __bo_alloc(bdev->bo_cache);
391         if (!bo) {
392                 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
393                 isp_mmu_exit(&bdev->mmu);
394                 return -ENOMEM;
395         }
396
397         ret = __bo_init(bdev, bo, bdev->pgnr);
398         if (ret) {
399                 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
400                 kmem_cache_free(bdev->bo_cache, bo);
401                 isp_mmu_exit(&bdev->mmu);
402                 return -EINVAL;
403         }
404
405         spin_lock_irqsave(&bdev->list_lock, flags);
406         list_add_tail(&bo->list, &bdev->entire_bo_list);
407         spin_unlock_irqrestore(&bdev->list_lock, flags);
408
409         __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
410
411         return 0;
412 }
413
414 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
415                                         unsigned int pgnr)
416 {
417         struct hmm_buffer_object *bo, *new_bo;
418         struct rb_root *root = &bdev->free_rbtree;
419
420         check_bodev_null_return(bdev, NULL);
421         var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
422                         "hmm_bo_device not inited yet.\n");
423
424         if (pgnr == 0) {
425                 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
426                 return NULL;
427         }
428
429         mutex_lock(&bdev->rbtree_mutex);
430         bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
431         if (!bo) {
432                 mutex_unlock(&bdev->rbtree_mutex);
433                 dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
434                         __func__);
435                 return NULL;
436         }
437
438         if (bo->pgnr > pgnr) {
439                 new_bo = __bo_break_up(bdev, bo, pgnr);
440                 if (!new_bo) {
441                         mutex_unlock(&bdev->rbtree_mutex);
442                         dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
443                                 __func__);
444                         return NULL;
445                 }
446
447                 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
448                 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
449
450                 mutex_unlock(&bdev->rbtree_mutex);
451                 return new_bo;
452         }
453
454         __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
455
456         mutex_unlock(&bdev->rbtree_mutex);
457         return bo;
458 }
459
460 void hmm_bo_release(struct hmm_buffer_object *bo)
461 {
462         struct hmm_bo_device *bdev = bo->bdev;
463         struct hmm_buffer_object *next_bo, *prev_bo;
464
465         mutex_lock(&bdev->rbtree_mutex);
466
467         /*
468          * FIX ME:
469          *
470          * how to destroy the bo when it is stilled MMAPED?
471          *
472          * ideally, this will not happened as hmm_bo_release
473          * will only be called when kref reaches 0, and in mmap
474          * operation the hmm_bo_ref will eventually be called.
475          * so, if this happened, something goes wrong.
476          */
477         if (bo->status & HMM_BO_MMAPED) {
478                 mutex_unlock(&bdev->rbtree_mutex);
479                 dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
480                 return;
481         }
482
483         if (bo->status & HMM_BO_BINDED) {
484                 dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
485                 hmm_bo_unbind(bo);
486         }
487
488         if (bo->status & HMM_BO_PAGE_ALLOCED) {
489                 dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
490                 hmm_bo_free_pages(bo);
491         }
492         if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
493                 dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
494                 hmm_bo_vunmap(bo);
495         }
496
497         rb_erase(&bo->node, &bdev->allocated_rbtree);
498
499         prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
500         next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
501
502         if (bo->list.prev != &bdev->entire_bo_list &&
503                 prev_bo->end == bo->start &&
504                 (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
505                 __bo_take_off_handling(prev_bo);
506                 bo = __bo_merge(prev_bo, bo);
507         }
508
509         if (bo->list.next != &bdev->entire_bo_list &&
510                 next_bo->start == bo->end &&
511                 (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
512                 __bo_take_off_handling(next_bo);
513                 bo = __bo_merge(bo, next_bo);
514         }
515
516         __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
517
518         mutex_unlock(&bdev->rbtree_mutex);
519         return;
520 }
521
522 void hmm_bo_device_exit(struct hmm_bo_device *bdev)
523 {
524         struct hmm_buffer_object *bo;
525         unsigned long flags;
526
527         dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
528
529         check_bodev_null_return_void(bdev);
530
531         /*
532          * release all allocated bos even they a in use
533          * and all bos will be merged into a big bo
534          */
535         while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
536                 hmm_bo_release(
537                         rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
538
539         dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
540                 __func__);
541
542         /* free all bos to release all ISP virtual memory */
543         while (!list_empty(&bdev->entire_bo_list)) {
544                 bo = list_to_hmm_bo(bdev->entire_bo_list.next);
545
546                 spin_lock_irqsave(&bdev->list_lock, flags);
547                 list_del(&bo->list);
548                 spin_unlock_irqrestore(&bdev->list_lock, flags);
549
550                 kmem_cache_free(bdev->bo_cache, bo);
551         }
552
553         dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
554
555         kmem_cache_destroy(bdev->bo_cache);
556
557         isp_mmu_exit(&bdev->mmu);
558 }
559
560 int hmm_bo_device_inited(struct hmm_bo_device *bdev)
561 {
562         check_bodev_null_return(bdev, -EINVAL);
563
564         return bdev->flag == HMM_BO_DEVICE_INITED;
565 }
566
567 int hmm_bo_allocated(struct hmm_buffer_object *bo)
568 {
569         check_bo_null_return(bo, 0);
570
571         return bo->status & HMM_BO_ALLOCED;
572 }
573
574 struct hmm_buffer_object *hmm_bo_device_search_start(
575         struct hmm_bo_device *bdev, ia_css_ptr vaddr)
576 {
577         struct hmm_buffer_object *bo;
578
579         check_bodev_null_return(bdev, NULL);
580
581         mutex_lock(&bdev->rbtree_mutex);
582         bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
583         if (!bo) {
584                 mutex_unlock(&bdev->rbtree_mutex);
585                 dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
586                         __func__, vaddr);
587                 return NULL;
588         }
589         mutex_unlock(&bdev->rbtree_mutex);
590
591         return bo;
592 }
593
594 struct hmm_buffer_object *hmm_bo_device_search_in_range(
595         struct hmm_bo_device *bdev, unsigned int vaddr)
596 {
597         struct hmm_buffer_object *bo;
598
599         check_bodev_null_return(bdev, NULL);
600
601         mutex_lock(&bdev->rbtree_mutex);
602         bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
603         if (!bo) {
604                 mutex_unlock(&bdev->rbtree_mutex);
605                 dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
606                         __func__, vaddr);
607                 return NULL;
608         }
609         mutex_unlock(&bdev->rbtree_mutex);
610
611         return bo;
612 }
613
614 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
615         struct hmm_bo_device *bdev, const void *vaddr)
616 {
617         struct list_head *pos;
618         struct hmm_buffer_object *bo;
619         unsigned long flags;
620
621         check_bodev_null_return(bdev, NULL);
622
623         spin_lock_irqsave(&bdev->list_lock, flags);
624         list_for_each(pos, &bdev->entire_bo_list) {
625                 bo = list_to_hmm_bo(pos);
626                 /* pass bo which has no vm_node allocated */
627                 if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
628                         continue;
629                 if (bo->vmap_addr == vaddr)
630                         goto found;
631         }
632         spin_unlock_irqrestore(&bdev->list_lock, flags);
633         return NULL;
634 found:
635         spin_unlock_irqrestore(&bdev->list_lock, flags);
636         return bo;
637
638 }
639
640
641 static void free_private_bo_pages(struct hmm_buffer_object *bo,
642                                 struct hmm_pool *dypool,
643                                 struct hmm_pool *repool,
644                                 int free_pgnr)
645 {
646         int i, ret;
647
648         for (i = 0; i < free_pgnr; i++) {
649                 switch (bo->page_obj[i].type) {
650                 case HMM_PAGE_TYPE_RESERVED:
651                         if (repool->pops
652                             && repool->pops->pool_free_pages) {
653                                 repool->pops->pool_free_pages(repool->pool_info,
654                                                         &bo->page_obj[i]);
655                                 hmm_mem_stat.res_cnt--;
656                         }
657                         break;
658                 /*
659                  * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
660                  * memory, so when free them, they should be put into dynamic
661                  * pool.
662                  */
663                 case HMM_PAGE_TYPE_DYNAMIC:
664                 case HMM_PAGE_TYPE_GENERAL:
665                         if (dypool->pops
666                             && dypool->pops->pool_inited
667                             && dypool->pops->pool_inited(dypool->pool_info)) {
668                                 if (dypool->pops->pool_free_pages)
669                                         dypool->pops->pool_free_pages(
670                                                               dypool->pool_info,
671                                                               &bo->page_obj[i]);
672                                 break;
673                         }
674
675                         /*
676                          * if dynamic memory pool doesn't exist, need to free
677                          * pages to system directly.
678                          */
679                 default:
680                         ret = set_pages_wb(bo->page_obj[i].page, 1);
681                         if (ret)
682                                 dev_err(atomisp_dev,
683                                                 "set page to WB err ...ret = %d\n",
684                                                         ret);
685                         /*
686                         W/A: set_pages_wb seldom return value = -EFAULT
687                         indicate that address of page is not in valid
688                         range(0xffff880000000000~0xffffc7ffffffffff)
689                         then, _free_pages would panic; Do not know why page
690                         address be valid,it maybe memory corruption by lowmemory
691                         */
692                         if (!ret) {
693                                 __free_pages(bo->page_obj[i].page, 0);
694                                 hmm_mem_stat.sys_size--;
695                         }
696                         break;
697                 }
698         }
699
700         return;
701 }
702
703 /*Allocate pages which will be used only by ISP*/
704 static int alloc_private_pages(struct hmm_buffer_object *bo,
705                                 int from_highmem,
706                                 bool cached,
707                                 struct hmm_pool *dypool,
708                                 struct hmm_pool *repool)
709 {
710         int ret;
711         unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
712         struct page *pages;
713         gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
714         int i, j;
715         int failure_number = 0;
716         bool reduce_order = false;
717         bool lack_mem = true;
718
719         if (from_highmem)
720                 gfp |= __GFP_HIGHMEM;
721
722         pgnr = bo->pgnr;
723
724         bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
725                                 GFP_KERNEL);
726         if (unlikely(!bo->page_obj))
727                 return -ENOMEM;
728
729         i = 0;
730         alloc_pgnr = 0;
731
732         /*
733          * get physical pages from dynamic pages pool.
734          */
735         if (dypool->pops && dypool->pops->pool_alloc_pages) {
736                 alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
737                                                         bo->page_obj, pgnr,
738                                                         cached);
739                 hmm_mem_stat.dyc_size -= alloc_pgnr;
740
741                 if (alloc_pgnr == pgnr)
742                         return 0;
743         }
744
745         pgnr -= alloc_pgnr;
746         i += alloc_pgnr;
747
748         /*
749          * get physical pages from reserved pages pool for atomisp.
750          */
751         if (repool->pops && repool->pops->pool_alloc_pages) {
752                 alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
753                                                         &bo->page_obj[i], pgnr,
754                                                         cached);
755                 hmm_mem_stat.res_cnt += alloc_pgnr;
756                 if (alloc_pgnr == pgnr)
757                         return 0;
758         }
759
760         pgnr -= alloc_pgnr;
761         i += alloc_pgnr;
762
763         while (pgnr) {
764                 order = nr_to_order_bottom(pgnr);
765                 /*
766                  * if be short of memory, we will set order to 0
767                  * everytime.
768                  */
769                 if (lack_mem)
770                         order = HMM_MIN_ORDER;
771                 else if (order > HMM_MAX_ORDER)
772                         order = HMM_MAX_ORDER;
773 retry:
774                 /*
775                  * When order > HMM_MIN_ORDER, for performance reasons we don't
776                  * want alloc_pages() to sleep. In case it fails and fallbacks
777                  * to HMM_MIN_ORDER or in case the requested order is originally
778                  * the minimum value, we can allow alloc_pages() to sleep for
779                  * robustness purpose.
780                  *
781                  * REVISIT: why __GFP_FS is necessary?
782                  */
783                 if (order == HMM_MIN_ORDER) {
784                         gfp &= ~GFP_NOWAIT;
785                         gfp |= __GFP_RECLAIM | __GFP_FS;
786                 }
787
788                 pages = alloc_pages(gfp, order);
789                 if (unlikely(!pages)) {
790                         /*
791                          * in low memory case, if allocation page fails,
792                          * we turn to try if order=0 allocation could
793                          * succeed. if order=0 fails too, that means there is
794                          * no memory left.
795                          */
796                         if (order == HMM_MIN_ORDER) {
797                                 dev_err(atomisp_dev,
798                                         "%s: cannot allocate pages\n",
799                                          __func__);
800                                 goto cleanup;
801                         }
802                         order = HMM_MIN_ORDER;
803                         failure_number++;
804                         reduce_order = true;
805                         /*
806                          * if fail two times continuously, we think be short
807                          * of memory now.
808                          */
809                         if (failure_number == 2) {
810                                 lack_mem = true;
811                                 failure_number = 0;
812                         }
813                         goto retry;
814                 } else {
815                         blk_pgnr = order_to_nr(order);
816
817                         if (!cached) {
818                                 /*
819                                  * set memory to uncacheable -- UC_MINUS
820                                  */
821                                 ret = set_pages_uc(pages, blk_pgnr);
822                                 if (ret) {
823                                         dev_err(atomisp_dev,
824                                                      "set page uncacheable"
825                                                         "failed.\n");
826
827                                         __free_pages(pages, order);
828
829                                         goto cleanup;
830                                 }
831                         }
832
833                         for (j = 0; j < blk_pgnr; j++) {
834                                 bo->page_obj[i].page = pages + j;
835                                 bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
836                         }
837
838                         pgnr -= blk_pgnr;
839                         hmm_mem_stat.sys_size += blk_pgnr;
840
841                         /*
842                          * if order is not reduced this time, clear
843                          * failure_number.
844                          */
845                         if (reduce_order)
846                                 reduce_order = false;
847                         else
848                                 failure_number = 0;
849                 }
850         }
851
852         return 0;
853 cleanup:
854         alloc_pgnr = i;
855         free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
856
857         kfree(bo->page_obj);
858
859         return -ENOMEM;
860 }
861
862 static void free_private_pages(struct hmm_buffer_object *bo,
863                                 struct hmm_pool *dypool,
864                                 struct hmm_pool *repool)
865 {
866         free_private_bo_pages(bo, dypool, repool, bo->pgnr);
867
868         kfree(bo->page_obj);
869 }
870
871 /*
872  * Hacked from kernel function __get_user_pages in mm/memory.c
873  *
874  * Handle buffers allocated by other kernel space driver and mmaped into user
875  * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
876  *
877  * Get physical pages from user space virtual address and update into page list
878  */
879 static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
880                               unsigned long start, int nr_pages,
881                               unsigned int gup_flags, struct page **pages,
882                               struct vm_area_struct **vmas)
883 {
884         int i, ret;
885         unsigned long vm_flags;
886
887         if (nr_pages <= 0)
888                 return 0;
889
890         VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
891
892         /*
893          * Require read or write permissions.
894          * If FOLL_FORCE is set, we only require the "MAY" flags.
895          */
896         vm_flags  = (gup_flags & FOLL_WRITE) ?
897                         (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
898         vm_flags &= (gup_flags & FOLL_FORCE) ?
899                         (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
900         i = 0;
901
902         do {
903                 struct vm_area_struct *vma;
904
905                 vma = find_vma(mm, start);
906                 if (!vma) {
907                         dev_err(atomisp_dev, "find_vma failed\n");
908                         return i ? : -EFAULT;
909                 }
910
911                 if (is_vm_hugetlb_page(vma)) {
912                         /*
913                         i = follow_hugetlb_page(mm, vma, pages, vmas,
914                                         &start, &nr_pages, i, gup_flags);
915                         */
916                         continue;
917                 }
918
919                 do {
920                         struct page *page;
921                         unsigned long pfn;
922
923                         /*
924                          * If we have a pending SIGKILL, don't keep faulting
925                          * pages and potentially allocating memory.
926                          */
927                         if (unlikely(fatal_signal_pending(current))) {
928                                 dev_err(atomisp_dev,
929                                         "fatal_signal_pending in %s\n",
930                                         __func__);
931                                 return i ? i : -ERESTARTSYS;
932                         }
933
934                         ret = follow_pfn(vma, start, &pfn);
935                         if (ret) {
936                                 dev_err(atomisp_dev, "follow_pfn() failed\n");
937                                 return i ? : -EFAULT;
938                         }
939
940                         page = pfn_to_page(pfn);
941                         if (IS_ERR(page))
942                                 return i ? i : PTR_ERR(page);
943                         if (pages) {
944                                 pages[i] = page;
945                                 get_page(page);
946                                 flush_anon_page(vma, page, start);
947                                 flush_dcache_page(page);
948                         }
949                         if (vmas)
950                                 vmas[i] = vma;
951                         i++;
952                         start += PAGE_SIZE;
953                         nr_pages--;
954                 } while (nr_pages && start < vma->vm_end);
955         } while (nr_pages);
956
957         return i;
958 }
959
960 static int get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
961                      unsigned long start, int nr_pages, int write, int force,
962                      struct page **pages, struct vm_area_struct **vmas)
963 {
964         int flags = FOLL_TOUCH;
965
966         if (pages)
967                 flags |= FOLL_GET;
968         if (write)
969                 flags |= FOLL_WRITE;
970         if (force)
971                 flags |= FOLL_FORCE;
972
973         return __get_pfnmap_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
974 }
975
976 /*
977  * Convert user space virtual address into pages list
978  */
979 static int alloc_user_pages(struct hmm_buffer_object *bo,
980                               void *userptr, bool cached)
981 {
982         int page_nr;
983         int i;
984         struct vm_area_struct *vma;
985         struct page **pages;
986
987         pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
988         if (unlikely(!pages))
989                 return -ENOMEM;
990
991         bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
992                 GFP_KERNEL);
993         if (unlikely(!bo->page_obj)) {
994                 kfree(pages);
995                 return -ENOMEM;
996         }
997
998         mutex_unlock(&bo->mutex);
999         down_read(&current->mm->mmap_sem);
1000         vma = find_vma(current->mm, (unsigned long)userptr);
1001         up_read(&current->mm->mmap_sem);
1002         if (vma == NULL) {
1003                 dev_err(atomisp_dev, "find_vma failed\n");
1004                 kfree(bo->page_obj);
1005                 kfree(pages);
1006                 mutex_lock(&bo->mutex);
1007                 return -EFAULT;
1008         }
1009         mutex_lock(&bo->mutex);
1010         /*
1011          * Handle frame buffer allocated in other kerenl space driver
1012          * and map to user space
1013          */
1014         if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
1015                 page_nr = get_pfnmap_pages(current, current->mm,
1016                                            (unsigned long)userptr,
1017                                            (int)(bo->pgnr), 1, 0,
1018                                            pages, NULL);
1019                 bo->mem_type = HMM_BO_MEM_TYPE_PFN;
1020         } else {
1021                 /*Handle frame buffer allocated in user space*/
1022                 mutex_unlock(&bo->mutex);
1023                 page_nr = get_user_pages_fast((unsigned long)userptr,
1024                                          (int)(bo->pgnr), 1, pages);
1025                 mutex_lock(&bo->mutex);
1026                 bo->mem_type = HMM_BO_MEM_TYPE_USER;
1027         }
1028
1029         /* can be written by caller, not forced */
1030         if (page_nr != bo->pgnr) {
1031                 dev_err(atomisp_dev,
1032                                 "get_user_pages err: bo->pgnr = %d, "
1033                                 "pgnr actually pinned = %d.\n",
1034                                 bo->pgnr, page_nr);
1035                 goto out_of_mem;
1036         }
1037
1038         for (i = 0; i < bo->pgnr; i++) {
1039                 bo->page_obj[i].page = pages[i];
1040                 bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
1041         }
1042         hmm_mem_stat.usr_size += bo->pgnr;
1043         kfree(pages);
1044
1045         return 0;
1046
1047 out_of_mem:
1048         for (i = 0; i < page_nr; i++)
1049                 put_page(pages[i]);
1050         kfree(pages);
1051         kfree(bo->page_obj);
1052
1053         return -ENOMEM;
1054 }
1055
1056 static void free_user_pages(struct hmm_buffer_object *bo)
1057 {
1058         int i;
1059
1060         for (i = 0; i < bo->pgnr; i++)
1061                 put_page(bo->page_obj[i].page);
1062         hmm_mem_stat.usr_size -= bo->pgnr;
1063
1064         kfree(bo->page_obj);
1065 }
1066
1067 /*
1068  * allocate/free physical pages for the bo.
1069  *
1070  * type indicate where are the pages from. currently we have 3 types
1071  * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
1072  *
1073  * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
1074  * try to alloc memory from highmem if from_highmem is set.
1075  *
1076  * userptr is only valid when type is HMM_BO_USER, it indicates
1077  * the start address from user space task.
1078  *
1079  * from_highmem and userptr will both be ignored when type is
1080  * HMM_BO_SHARE.
1081  */
1082 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
1083                        enum hmm_bo_type type, int from_highmem,
1084                        void *userptr, bool cached)
1085 {
1086         int ret = -EINVAL;
1087
1088         check_bo_null_return(bo, -EINVAL);
1089
1090         mutex_lock(&bo->mutex);
1091         check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1092
1093         /*
1094          * TO DO:
1095          * add HMM_BO_USER type
1096          */
1097         if (type == HMM_BO_PRIVATE) {
1098                 ret = alloc_private_pages(bo, from_highmem,
1099                                 cached, &dynamic_pool, &reserved_pool);
1100         } else if (type == HMM_BO_USER) {
1101                 ret = alloc_user_pages(bo, userptr, cached);
1102         } else {
1103                 dev_err(atomisp_dev, "invalid buffer type.\n");
1104                 ret = -EINVAL;
1105         }
1106         if (ret)
1107                 goto alloc_err;
1108
1109         bo->type = type;
1110
1111         bo->status |= HMM_BO_PAGE_ALLOCED;
1112
1113         mutex_unlock(&bo->mutex);
1114
1115         return 0;
1116
1117 alloc_err:
1118         mutex_unlock(&bo->mutex);
1119         dev_err(atomisp_dev, "alloc pages err...\n");
1120         return ret;
1121 status_err:
1122         mutex_unlock(&bo->mutex);
1123         dev_err(atomisp_dev,
1124                         "buffer object has already page allocated.\n");
1125         return -EINVAL;
1126 }
1127
1128 /*
1129  * free physical pages of the bo.
1130  */
1131 void hmm_bo_free_pages(struct hmm_buffer_object *bo)
1132 {
1133         check_bo_null_return_void(bo);
1134
1135         mutex_lock(&bo->mutex);
1136
1137         check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
1138
1139         /* clear the flag anyway. */
1140         bo->status &= (~HMM_BO_PAGE_ALLOCED);
1141
1142         if (bo->type == HMM_BO_PRIVATE)
1143                 free_private_pages(bo, &dynamic_pool, &reserved_pool);
1144         else if (bo->type == HMM_BO_USER)
1145                 free_user_pages(bo);
1146         else
1147                 dev_err(atomisp_dev, "invalid buffer type.\n");
1148         mutex_unlock(&bo->mutex);
1149
1150         return;
1151
1152 status_err2:
1153         mutex_unlock(&bo->mutex);
1154         dev_err(atomisp_dev,
1155                         "buffer object not page allocated yet.\n");
1156 }
1157
1158 int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
1159 {
1160         check_bo_null_return(bo, 0);
1161
1162         return bo->status & HMM_BO_PAGE_ALLOCED;
1163 }
1164
1165 /*
1166  * get physical page info of the bo.
1167  */
1168 int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
1169                          struct hmm_page_object **page_obj, int *pgnr)
1170 {
1171         check_bo_null_return(bo, -EINVAL);
1172
1173         mutex_lock(&bo->mutex);
1174
1175         check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1176
1177         *page_obj = bo->page_obj;
1178         *pgnr = bo->pgnr;
1179
1180         mutex_unlock(&bo->mutex);
1181
1182         return 0;
1183
1184 status_err:
1185         dev_err(atomisp_dev,
1186                         "buffer object not page allocated yet.\n");
1187         mutex_unlock(&bo->mutex);
1188         return -EINVAL;
1189 }
1190
1191 /*
1192  * bind the physical pages to a virtual address space.
1193  */
1194 int hmm_bo_bind(struct hmm_buffer_object *bo)
1195 {
1196         int ret;
1197         unsigned int virt;
1198         struct hmm_bo_device *bdev;
1199         unsigned int i;
1200
1201         check_bo_null_return(bo, -EINVAL);
1202
1203         mutex_lock(&bo->mutex);
1204
1205         check_bo_status_yes_goto(bo,
1206                                    HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
1207                                    status_err1);
1208
1209         check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
1210
1211         bdev = bo->bdev;
1212
1213         virt = bo->start;
1214
1215         for (i = 0; i < bo->pgnr; i++) {
1216                 ret =
1217                     isp_mmu_map(&bdev->mmu, virt,
1218                                 page_to_phys(bo->page_obj[i].page), 1);
1219                 if (ret)
1220                         goto map_err;
1221                 virt += (1 << PAGE_SHIFT);
1222         }
1223
1224         /*
1225          * flush TBL here.
1226          *
1227          * theoretically, we donot need to flush TLB as we didnot change
1228          * any existed address mappings, but for Silicon Hive's MMU, its
1229          * really a bug here. I guess when fetching PTEs (page table entity)
1230          * to TLB, its MMU will fetch additional INVALID PTEs automatically
1231          * for performance issue. EX, we only set up 1 page address mapping,
1232          * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
1233          * so the additional 3 PTEs are invalid.
1234          */
1235         if (bo->start != 0x0)
1236                 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1237                                                 (bo->pgnr << PAGE_SHIFT));
1238
1239         bo->status |= HMM_BO_BINDED;
1240
1241         mutex_unlock(&bo->mutex);
1242
1243         return 0;
1244
1245 map_err:
1246         /* unbind the physical pages with related virtual address space */
1247         virt = bo->start;
1248         for ( ; i > 0; i--) {
1249                 isp_mmu_unmap(&bdev->mmu, virt, 1);
1250                 virt += pgnr_to_size(1);
1251         }
1252
1253         mutex_unlock(&bo->mutex);
1254         dev_err(atomisp_dev,
1255                         "setup MMU address mapping failed.\n");
1256         return ret;
1257
1258 status_err2:
1259         mutex_unlock(&bo->mutex);
1260         dev_err(atomisp_dev, "buffer object already binded.\n");
1261         return -EINVAL;
1262 status_err1:
1263         mutex_unlock(&bo->mutex);
1264         dev_err(atomisp_dev,
1265                      "buffer object vm_node or page not allocated.\n");
1266         return -EINVAL;
1267 }
1268
1269 /*
1270  * unbind the physical pages with related virtual address space.
1271  */
1272 void hmm_bo_unbind(struct hmm_buffer_object *bo)
1273 {
1274         unsigned int virt;
1275         struct hmm_bo_device *bdev;
1276         unsigned int i;
1277
1278         check_bo_null_return_void(bo);
1279
1280         mutex_lock(&bo->mutex);
1281
1282         check_bo_status_yes_goto(bo,
1283                                    HMM_BO_PAGE_ALLOCED |
1284                                    HMM_BO_ALLOCED |
1285                                    HMM_BO_BINDED, status_err);
1286
1287         bdev = bo->bdev;
1288
1289         virt = bo->start;
1290
1291         for (i = 0; i < bo->pgnr; i++) {
1292                 isp_mmu_unmap(&bdev->mmu, virt, 1);
1293                 virt += pgnr_to_size(1);
1294         }
1295
1296         /*
1297          * flush TLB as the address mapping has been removed and
1298          * related TLBs should be invalidated.
1299          */
1300         isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1301                                 (bo->pgnr << PAGE_SHIFT));
1302
1303         bo->status &= (~HMM_BO_BINDED);
1304
1305         mutex_unlock(&bo->mutex);
1306
1307         return;
1308
1309 status_err:
1310         mutex_unlock(&bo->mutex);
1311         dev_err(atomisp_dev,
1312                      "buffer vm or page not allocated or not binded yet.\n");
1313 }
1314
1315 int hmm_bo_binded(struct hmm_buffer_object *bo)
1316 {
1317         int ret;
1318
1319         check_bo_null_return(bo, 0);
1320
1321         mutex_lock(&bo->mutex);
1322
1323         ret = bo->status & HMM_BO_BINDED;
1324
1325         mutex_unlock(&bo->mutex);
1326
1327         return ret;
1328 }
1329
1330 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
1331 {
1332         struct page **pages;
1333         int i;
1334
1335         check_bo_null_return(bo, NULL);
1336
1337         mutex_lock(&bo->mutex);
1338         if (((bo->status & HMM_BO_VMAPED) && !cached) ||
1339             ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
1340                 mutex_unlock(&bo->mutex);
1341                 return bo->vmap_addr;
1342         }
1343
1344         /* cached status need to be changed, so vunmap first */
1345         if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1346                 vunmap(bo->vmap_addr);
1347                 bo->vmap_addr = NULL;
1348                 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1349         }
1350
1351         pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
1352         if (unlikely(!pages)) {
1353                 mutex_unlock(&bo->mutex);
1354                 return NULL;
1355         }
1356
1357         for (i = 0; i < bo->pgnr; i++)
1358                 pages[i] = bo->page_obj[i].page;
1359
1360         bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
1361                 cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
1362         if (unlikely(!bo->vmap_addr)) {
1363                 kfree(pages);
1364                 mutex_unlock(&bo->mutex);
1365                 dev_err(atomisp_dev, "vmap failed...\n");
1366                 return NULL;
1367         }
1368         bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
1369
1370         kfree(pages);
1371
1372         mutex_unlock(&bo->mutex);
1373         return bo->vmap_addr;
1374 }
1375
1376 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
1377 {
1378         check_bo_null_return_void(bo);
1379
1380         mutex_lock(&bo->mutex);
1381         if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
1382                 mutex_unlock(&bo->mutex);
1383                 return;
1384         }
1385
1386         clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
1387         mutex_unlock(&bo->mutex);
1388 }
1389
1390 void hmm_bo_vunmap(struct hmm_buffer_object *bo)
1391 {
1392         check_bo_null_return_void(bo);
1393
1394         mutex_lock(&bo->mutex);
1395         if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1396                 vunmap(bo->vmap_addr);
1397                 bo->vmap_addr = NULL;
1398                 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1399         }
1400
1401         mutex_unlock(&bo->mutex);
1402         return;
1403 }
1404
1405 void hmm_bo_ref(struct hmm_buffer_object *bo)
1406 {
1407         check_bo_null_return_void(bo);
1408
1409         kref_get(&bo->kref);
1410 }
1411
1412 static void kref_hmm_bo_release(struct kref *kref)
1413 {
1414         if (!kref)
1415                 return;
1416
1417         hmm_bo_release(kref_to_hmm_bo(kref));
1418 }
1419
1420 void hmm_bo_unref(struct hmm_buffer_object *bo)
1421 {
1422         check_bo_null_return_void(bo);
1423
1424         kref_put(&bo->kref, kref_hmm_bo_release);
1425 }
1426
1427 static void hmm_bo_vm_open(struct vm_area_struct *vma)
1428 {
1429         struct hmm_buffer_object *bo =
1430             (struct hmm_buffer_object *)vma->vm_private_data;
1431
1432         check_bo_null_return_void(bo);
1433
1434         hmm_bo_ref(bo);
1435
1436         mutex_lock(&bo->mutex);
1437
1438         bo->status |= HMM_BO_MMAPED;
1439
1440         bo->mmap_count++;
1441
1442         mutex_unlock(&bo->mutex);
1443 }
1444
1445 static void hmm_bo_vm_close(struct vm_area_struct *vma)
1446 {
1447         struct hmm_buffer_object *bo =
1448             (struct hmm_buffer_object *)vma->vm_private_data;
1449
1450         check_bo_null_return_void(bo);
1451
1452         hmm_bo_unref(bo);
1453
1454         mutex_lock(&bo->mutex);
1455
1456         bo->mmap_count--;
1457
1458         if (!bo->mmap_count) {
1459                 bo->status &= (~HMM_BO_MMAPED);
1460                 vma->vm_private_data = NULL;
1461         }
1462
1463         mutex_unlock(&bo->mutex);
1464 }
1465
1466 static const struct vm_operations_struct hmm_bo_vm_ops = {
1467         .open = hmm_bo_vm_open,
1468         .close = hmm_bo_vm_close,
1469 };
1470
1471 /*
1472  * mmap the bo to user space.
1473  */
1474 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1475 {
1476         unsigned int start, end;
1477         unsigned int virt;
1478         unsigned int pgnr, i;
1479         unsigned int pfn;
1480
1481         check_bo_null_return(bo, -EINVAL);
1482
1483         check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1484
1485         pgnr = bo->pgnr;
1486         start = vma->vm_start;
1487         end = vma->vm_end;
1488
1489         /*
1490          * check vma's virtual address space size and buffer object's size.
1491          * must be the same.
1492          */
1493         if ((start + pgnr_to_size(pgnr)) != end) {
1494                 dev_warn(atomisp_dev,
1495                              "vma's address space size not equal"
1496                              " to buffer object's size");
1497                 return -EINVAL;
1498         }
1499
1500         virt = vma->vm_start;
1501         for (i = 0; i < pgnr; i++) {
1502                 pfn = page_to_pfn(bo->page_obj[i].page);
1503                 if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1504                         dev_warn(atomisp_dev,
1505                                         "remap_pfn_range failed:"
1506                                         " virt = 0x%x, pfn = 0x%x,"
1507                                         " mapped_pgnr = %d\n", virt, pfn, 1);
1508                         return -EINVAL;
1509                 }
1510                 virt += PAGE_SIZE;
1511         }
1512
1513         vma->vm_private_data = bo;
1514
1515         vma->vm_ops = &hmm_bo_vm_ops;
1516         vma->vm_flags |= VM_IO|VM_DONTEXPAND|VM_DONTDUMP;
1517
1518         /*
1519          * call hmm_bo_vm_open explictly.
1520          */
1521         hmm_bo_vm_open(vma);
1522
1523         return 0;
1524
1525 status_err:
1526         dev_err(atomisp_dev, "buffer page not allocated yet.\n");
1527         return -EINVAL;
1528 }