2 * Virtual Contiguous Memory core
3 * Copyright (c) 2010 by Samsung Electronics.
4 * Written by Michal Nazarewicz (m.nazarewicz@samsung.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License or (at your optional) any later version of the license.
13 * See Documentation/virtual-contiguous-memory.txt for details.
16 #include <linux/vcm-drv.h>
17 #include <linux/module.h>
19 #include <linux/vmalloc.h>
20 #include <linux/err.h>
21 #include <linux/slab.h>
22 #include <linux/genalloc.h>
24 #include <linux/dma-mapping.h>
25 #include <asm/cacheflush.h>
26 #include <asm/outercache.h>
28 /******************************** Devices API *******************************/
30 void vcm_destroy(struct vcm *vcm)
32 if (WARN_ON(atomic_read(&vcm->activations)))
33 vcm->driver->deactivate(vcm);
35 if (vcm->driver->cleanup)
36 vcm->driver->cleanup(vcm);
40 EXPORT_SYMBOL_GPL(vcm_destroy);
43 __vcm_alloc_and_reserve(struct vcm *vcm, resource_size_t size,
44 struct vcm_phys **phys, unsigned alloc_flags,
45 struct vcm_res **res, unsigned res_flags)
49 if (WARN_ON(!vcm) || !size) {
54 size = PAGE_ALIGN(size);
56 if (vcm->driver->alloc) {
57 ret = vcm->driver->alloc(vcm, size,
58 phys, alloc_flags, res, res_flags);
62 } else if ((res && !vcm->driver->res) || (phys && !vcm->driver->phys)) {
69 *res = vcm->driver->res(vcm, size, res_flags);
75 (*res)->bound_size = 0;
78 #ifdef CONFIG_VCM_RES_REFCNT
79 atomic_set(&(*res)->refcnt, 1);
85 *phys = vcm->driver->phys(vcm, size, alloc_flags);
86 if (WARN_ON(!(*phys)->free))
87 phys = ERR_PTR(-EINVAL);
93 atomic_set(&(*phys)->bindings, 0);
100 *phys = ERR_PTR(ret);
108 struct vcm_res *__must_check
109 vcm_make_binding(struct vcm *vcm, resource_size_t size,
110 unsigned alloc_flags, unsigned res_flags)
112 struct vcm_phys *phys;
115 if (WARN_ON(!vcm || !size || (size & (PAGE_SIZE - 1))))
116 return ERR_PTR(-EINVAL);
117 else if (vcm->driver->alloc || !vcm->driver->map) {
120 __vcm_alloc_and_reserve(vcm, size, &phys, alloc_flags,
126 ret = vcm_bind(res, phys);
130 if (vcm->driver->unreserve)
131 vcm->driver->unreserve(res);
135 __vcm_alloc_and_reserve(vcm, size, &phys, alloc_flags,
139 return ERR_CAST(res);
141 res = vcm_map(vcm, phys, res_flags);
148 EXPORT_SYMBOL_GPL(vcm_make_binding);
150 struct vcm_phys *__must_check
151 vcm_alloc(struct vcm *vcm, resource_size_t size, unsigned flags)
153 struct vcm_phys *phys;
155 __vcm_alloc_and_reserve(vcm, size, &phys, flags, NULL, 0);
159 EXPORT_SYMBOL_GPL(vcm_alloc);
161 struct vcm_res *__must_check
162 vcm_reserve(struct vcm *vcm, resource_size_t size, unsigned flags)
166 __vcm_alloc_and_reserve(vcm, size, NULL, 0, &res, flags);
170 EXPORT_SYMBOL_GPL(vcm_reserve);
172 #ifdef CONFIG_VCM_RES_REFCNT
173 int __must_check vcm_ref_reserve(struct vcm_res *res)
175 if (WARN_ON(!res) || (atomic_inc_return(&res->refcnt) < 2))
179 EXPORT_SYMBOL_GPL(vcm_ref_reserve);
182 struct vcm_res *__must_check
183 vcm_map(struct vcm *vcm, struct vcm_phys *phys, unsigned flags)
189 return ERR_PTR(-EINVAL);
191 if (vcm->driver->map) {
192 res = vcm->driver->map(vcm, phys, flags);
194 atomic_inc(&phys->bindings);
196 res->bound_size = phys->size;
198 #ifdef CONFIG_VCM_RES_REFCNT
199 atomic_set(&res->refcnt, 1);
205 res = vcm_reserve(vcm, phys->size, flags);
209 ret = vcm_bind(res, phys);
216 EXPORT_SYMBOL_GPL(vcm_map);
218 void vcm_unreserve(struct vcm_res *res)
220 if (!WARN_ON(!res)) {
221 #ifdef CONFIG_VCM_RES_REFCNT
222 if (!atomic_dec_and_test(&res->refcnt))
225 if (WARN_ON(res->phys))
227 if (!WARN_ON_ONCE(!res->vcm->driver->unreserve))
228 res->vcm->driver->unreserve(res);
231 EXPORT_SYMBOL_GPL(vcm_unreserve);
233 void vcm_free(struct vcm_phys *phys)
235 if (!WARN_ON(!phys || atomic_read(&phys->bindings)))
238 EXPORT_SYMBOL_GPL(vcm_free);
240 int __must_check vcm_bind(struct vcm_res *res, struct vcm_phys *phys)
244 if (WARN_ON(!res || !phys))
247 if (res->phys == phys)
253 if (phys->size > res->res_size)
256 if (!res->vcm->driver->bind)
259 ret = res->vcm->driver->bind(res, phys);
261 atomic_inc(&phys->bindings);
263 res->bound_size = phys->size;
267 EXPORT_SYMBOL_GPL(vcm_bind);
269 struct vcm_phys *vcm_unbind(struct vcm_res *res)
271 struct vcm_phys *phys = NULL;
272 if (!WARN_ON(!res || !res->phys)) {
274 if (res->vcm->driver->unbind)
275 res->vcm->driver->unbind(res);
276 WARN_ON(!atomic_add_unless(&phys->bindings, -1, 0));
282 EXPORT_SYMBOL_GPL(vcm_unbind);
284 void vcm_unmap(struct vcm_res *res)
286 #ifdef CONFIG_VCM_RES_REFCNT
287 if (atomic_read(&res->refcnt) > 1) {
288 atomic_dec(&res->refcnt);
295 EXPORT_SYMBOL_GPL(vcm_unmap);
297 void vcm_destroy_binding(struct vcm_res *res)
299 if (!WARN_ON(!res)) {
300 struct vcm_phys *phys;
301 #ifdef CONFIG_VCM_RES_REFCNT
302 if (atomic_read(&res->refcnt) > 1) {
303 atomic_dec(&res->refcnt);
307 phys = vcm_unbind(res);
313 EXPORT_SYMBOL_GPL(vcm_destroy_binding);
315 int __must_check vcm_activate(struct vcm *vcm)
319 else if (atomic_inc_return(&vcm->activations) != 1
320 || !vcm->driver->activate)
323 return vcm->driver->activate(vcm);
325 EXPORT_SYMBOL_GPL(vcm_activate);
327 void vcm_deactivate(struct vcm *vcm)
329 if (!WARN_ON(!vcm || !atomic_read(&vcm->activations))
330 && atomic_dec_and_test(&vcm->activations)
331 && vcm->driver->deactivate)
332 vcm->driver->deactivate(vcm);
334 EXPORT_SYMBOL_GPL(vcm_deactivate);
337 /****************************** VCM VMM driver ******************************/
339 static void vcm_vmm_cleanup(struct vcm *vcm)
341 /* This should never be called. vcm_vmm is a static object. */
345 static struct vcm_phys *
346 vcm_vmm_phys(struct vcm *vcm, resource_size_t size, unsigned flags)
348 static const unsigned char orders[] = { 0 };
349 return vcm_phys_alloc(size, flags, orders);
352 static void vcm_vmm_unreserve(struct vcm_res *res)
357 struct vcm_res *vcm_vmm_map(struct vcm *vcm, struct vcm_phys *phys,
361 * Original implementation written by Cho KyongHo
362 * (pullip.cho@samsung.com). Later rewritten by mina86.
364 struct vcm_phys_part *part;
365 struct page **pages, **p;
370 pages = kzalloc((phys->size >> PAGE_SHIFT) * sizeof *pages, GFP_KERNEL);
372 return ERR_PTR(-ENOMEM);
375 res = kzalloc(sizeof *res, GFP_KERNEL);
382 unsigned j = part->size >> PAGE_SHIFT;
383 struct page *page = part->page;
389 } while (++part, --i);
391 res->start = (dma_addr_t)vmap(pages, p - pages, VM_ALLOC, PAGE_KERNEL);
396 res->res_size = phys->size;
408 static void vcm_vmm_unbind(struct vcm_res *res)
410 vunmap((void *)res->start);
414 static int vcm_vmm_activate(struct vcm *vcm)
416 /* no operation, all bindings are immediately active */
420 static void vcm_vmm_deactivate(struct vcm *vcm)
423 * no operation, all bindings are immediately active and
424 * cannot be deactivated unless unbound.
428 struct vcm vcm_vmm[1] = { {
430 .size = ~(resource_size_t)0,
431 /* prevent activate/deactivate from being called */
432 .activations = ATOMIC_INIT(1),
433 .driver = &(const struct vcm_driver) {
434 .cleanup = vcm_vmm_cleanup,
436 .phys = vcm_vmm_phys,
437 .unbind = vcm_vmm_unbind,
438 .unreserve = vcm_vmm_unreserve,
439 .activate = vcm_vmm_activate,
440 .deactivate = vcm_vmm_deactivate,
443 EXPORT_SYMBOL_GPL(vcm_vmm);
446 /****************************** VCM Drivers API *****************************/
448 struct vcm *__must_check vcm_init(struct vcm *vcm)
450 if (WARN_ON(!vcm || !vcm->size
451 || ((vcm->start | vcm->size) & ~PAGE_MASK)
452 || !vcm->driver || !vcm->driver->unreserve))
453 return ERR_PTR(-EINVAL);
455 atomic_set(&vcm->activations, 0);
459 EXPORT_SYMBOL_GPL(vcm_init);
462 /*************************** Hardware MMU wrapper ***************************/
464 #ifdef CONFIG_VCM_MMU
468 struct list_head bound;
471 static void vcm_mmu_cleanup(struct vcm *vcm)
473 struct vcm_mmu *mmu = container_of(vcm, struct vcm_mmu, vcm);
474 WARN_ON(spin_is_locked(&mmu->lock) || !list_empty(&mmu->bound_res));
475 gen_pool_destroy(mmu->pool);
476 if (mmu->driver->cleanup)
477 mmu->driver->cleanup(vcm);
482 static struct vcm_res *
483 vcm_mmu_res(struct vcm *vcm, resource_size_t size, unsigned flags)
485 struct vcm_mmu *mmu = container_of(vcm, struct vcm_mmu, vcm);
486 const unsigned char *orders;
487 struct vcm_mmu_res *res;
491 res = kzalloc(sizeof *res, GFP_KERNEL);
493 return ERR_PTR(-ENOMEM);
495 order = ffs(size) - PAGE_SHIFT - 1;
496 for (orders = mmu->driver->orders; *orders > order; ++orders)
498 order = *orders + PAGE_SHIFT;
500 addr = gen_pool_alloc_aligned(mmu->pool, size, order);
503 return ERR_PTR(-ENOSPC);
506 INIT_LIST_HEAD(&res->bound);
507 res->res.start = addr;
508 res->res.res_size = size;
513 static struct vcm_phys *
514 vcm_mmu_phys(struct vcm *vcm, resource_size_t size, unsigned flags)
516 return vcm_phys_alloc(size, flags,
517 container_of(vcm, struct vcm_mmu,
518 vcm)->driver->orders);
521 static int __must_check
522 __vcm_mmu_activate(struct vcm_res *res, struct vcm_phys *phys)
524 struct vcm_mmu *mmu = container_of(res->vcm, struct vcm_mmu, vcm);
525 if (mmu->driver->activate)
526 return mmu->driver->activate(res, phys);
528 return vcm_phys_walk(res->start, phys, mmu->driver->orders,
529 mmu->driver->activate_page,
530 mmu->driver->deactivate_page, res->vcm);
533 static void __vcm_mmu_deactivate(struct vcm_res *res, struct vcm_phys *phys)
535 struct vcm_mmu *mmu = container_of(res->vcm, struct vcm_mmu, vcm);
536 if (mmu->driver->deactivate)
537 return mmu->driver->deactivate(res, phys);
539 vcm_phys_walk(res->start, phys, mmu->driver->orders,
540 mmu->driver->deactivate_page, NULL, res->vcm);
543 static int vcm_mmu_bind(struct vcm_res *_res, struct vcm_phys *phys)
545 struct vcm_mmu_res *res = container_of(_res, struct vcm_mmu_res, res);
546 struct vcm_mmu *mmu = container_of(_res->vcm, struct vcm_mmu, vcm);
550 spin_lock_irqsave(&mmu->lock, flags);
551 if (mmu->activated) {
552 ret = __vcm_mmu_activate(_res, phys);
556 list_add_tail(&res->bound, &mmu->bound_res);
559 spin_unlock_irqrestore(&mmu->lock, flags);
564 static void vcm_mmu_unbind(struct vcm_res *_res)
566 struct vcm_mmu_res *res = container_of(_res, struct vcm_mmu_res, res);
567 struct vcm_mmu *mmu = container_of(_res->vcm, struct vcm_mmu, vcm);
570 spin_lock_irqsave(&mmu->lock, flags);
572 __vcm_mmu_deactivate(_res, _res->phys);
573 list_del_init(&res->bound);
574 spin_unlock_irqrestore(&mmu->lock, flags);
577 static void vcm_mmu_unreserve(struct vcm_res *res)
579 struct vcm_mmu *mmu = container_of(res->vcm, struct vcm_mmu, vcm);
580 gen_pool_free(mmu->pool, res->start, res->res_size);
584 static int vcm_mmu_activate(struct vcm *vcm)
586 struct vcm_mmu *mmu = container_of(vcm, struct vcm_mmu, vcm);
587 struct vcm_mmu_res *r, *rr;
591 spin_lock_irqsave(&mmu->lock, flags);
593 list_for_each_entry(r, &mmu->bound_res, bound) {
594 ret = __vcm_mmu_activate(&r->res, r->res.phys);
598 list_for_each_entry(rr, &mmu->bound_res, bound) {
601 __vcm_mmu_deactivate(&rr->res, rr->res.phys);
609 spin_unlock_irqrestore(&mmu->lock, flags);
614 static void vcm_mmu_deactivate(struct vcm *vcm)
616 struct vcm_mmu *mmu = container_of(vcm, struct vcm_mmu, vcm);
617 struct vcm_mmu_res *r;
620 spin_lock_irqsave(&mmu->lock, flags);
624 list_for_each_entry(r, &mmu->bound_res, bound)
625 mmu->driver->deactivate(&r->res, r->res.phys);
627 spin_unlock_irqrestore(&mmu->lock, flags);
630 struct vcm *__must_check vcm_mmu_init(struct vcm_mmu *mmu)
632 static const struct vcm_driver driver = {
633 .cleanup = vcm_mmu_cleanup,
635 .phys = vcm_mmu_phys,
636 .bind = vcm_mmu_bind,
637 .unbind = vcm_mmu_unbind,
638 .unreserve = vcm_mmu_unreserve,
639 .activate = vcm_mmu_activate,
640 .deactivate = vcm_mmu_deactivate,
646 if (WARN_ON(!mmu || !mmu->driver ||
647 !(mmu->driver->activate ||
648 (mmu->driver->activate_page &&
649 mmu->driver->deactivate_page)) ||
650 !(mmu->driver->deactivate ||
651 mmu->driver->deactivate_page)))
652 return ERR_PTR(-EINVAL);
654 mmu->vcm.driver = &driver;
655 vcm = vcm_init(&mmu->vcm);
659 mmu->pool = gen_pool_create(PAGE_SHIFT, -1);
661 return ERR_PTR(-ENOMEM);
663 ret = gen_pool_add(mmu->pool, mmu->vcm.start, mmu->vcm.size, -1);
665 gen_pool_destroy(mmu->pool);
669 vcm->driver = &driver;
670 INIT_LIST_HEAD(&mmu->bound_res);
671 spin_lock_init(&mmu->lock);
675 EXPORT_SYMBOL_GPL(vcm_mmu_init);
680 /**************************** One-to-One wrapper ****************************/
682 #ifdef CONFIG_VCM_O2O
684 static void vcm_o2o_cleanup(struct vcm *vcm)
686 struct vcm_o2o *o2o = container_of(vcm, struct vcm_o2o, vcm);
687 if (o2o->driver->cleanup)
688 o2o->driver->cleanup(vcm);
693 static struct vcm_phys *
694 vcm_o2o_phys(struct vcm *vcm, resource_size_t size, unsigned flags)
696 struct vcm_o2o *o2o = container_of(vcm, struct vcm_o2o, vcm);
697 struct vcm_phys *phys;
699 phys = o2o->driver->phys(vcm, size, flags);
701 WARN_ON(!phys->free || !phys->parts->size ||
702 phys->parts->size < size ||
703 ((phys->parts->start | phys->parts->size) &
707 return ERR_PTR(-EINVAL);
713 static struct vcm_res *
714 vcm_o2o_map(struct vcm *vcm, struct vcm_phys *phys, unsigned flags)
718 if (phys->count != 1)
719 return ERR_PTR(-EOPNOTSUPP);
721 if (!phys->parts->size
722 || ((phys->parts->start | phys->parts->size) & ~PAGE_MASK))
723 return ERR_PTR(-EINVAL);
725 res = kmalloc(sizeof *res, GFP_KERNEL);
727 return ERR_PTR(-ENOMEM);
729 res->start = phys->parts->start;
730 res->res_size = phys->parts->size;
734 static int vcm_o2o_bind(struct vcm_res *res, struct vcm_phys *phys)
736 if (phys->count != 1)
739 if (!phys->parts->size
740 || ((phys->parts->start | phys->parts->size) & ~PAGE_MASK))
743 if (res->start != phys->parts->start)
749 struct vcm *__must_check vcm_o2o_init(struct vcm_o2o *o2o)
751 static const struct vcm_driver driver = {
752 .cleanup = vcm_o2o_cleanup,
753 .phys = vcm_o2o_phys,
755 .bind = vcm_o2o_bind,
756 .unreserve = (void (*)(struct vcm_res *))kfree,
759 if (WARN_ON(!o2o || !o2o->driver || !o2o->driver->phys))
760 return ERR_PTR(-EINVAL);
762 o2o->vcm.driver = &driver;
763 return vcm_init(&o2o->vcm);
765 EXPORT_SYMBOL_GPL(vcm_o2o_init);
770 /************************ Physical memory management ************************/
772 #ifdef CONFIG_VCM_PHYS
774 struct vcm_phys_list {
775 struct vcm_phys_list *next;
777 struct vcm_phys_part parts[31];
780 static struct vcm_phys_list *__must_check
781 vcm_phys_alloc_list_order(struct vcm_phys_list *last, resource_size_t *pages,
782 unsigned flags, unsigned order, unsigned *total,
787 count = *pages >> order;
789 /* When we allocate big chunks, system may reclaim memory and it makes
790 * the system slow. So allocating above order 0 does not wait and warn.
793 gfp |= (GFP_NOWAIT | __GFP_NOWARN);
796 struct page *page = alloc_pages(gfp, order);
800 * If allocation failed we may still
801 * try to continua allocating smaller
806 if (last->count == ARRAY_SIZE(last->parts)) {
807 struct vcm_phys_list *l;
808 l = kmalloc(sizeof *l, GFP_KERNEL);
818 last->parts[last->count].start = page_to_phys(page);
819 last->parts[last->count].size = (1 << order) << PAGE_SHIFT;
820 last->parts[last->count].page = page;
823 *pages -= 1 << order;
829 static unsigned __must_check
830 vcm_phys_alloc_list(struct vcm_phys_list *first,
831 resource_size_t size, unsigned flags,
832 const unsigned char *orders, gfp_t gfp)
834 struct vcm_phys_list *last = first;
835 unsigned total_parts = 0;
836 resource_size_t pages;
839 * We are trying to allocate as large pages as possible but
840 * not larger then pages that MMU driver that called us
841 * supports (ie. the ones provided by page_sizes). This makes
842 * it possible to map the region using fewest possible number
845 pages = size >> PAGE_SHIFT;
847 while (!(pages >> *orders))
850 last = vcm_phys_alloc_list_order(last, &pages, flags, *orders,
855 } while (*orders++ && pages);
863 static void vcm_phys_free_parts(struct vcm_phys_part *parts, unsigned count)
866 __free_pages(parts->page, ffs(parts->size) - 1 - PAGE_SHIFT);
867 } while (++parts, --count);
870 static void vcm_phys_free(struct vcm_phys *phys)
872 vcm_phys_free_parts(phys->parts, phys->count);
876 struct vcm_phys *__must_check
877 __vcm_phys_alloc_coherent(resource_size_t size, unsigned flags,
878 const unsigned char *orders, gfp_t gfp)
880 struct vcm_phys *phys;
883 /* the physical memory must reside in the lowmem */
884 phys = __vcm_phys_alloc(size, flags, orders, GFP_DMA32);
890 EXPORT_SYMBOL_GPL(__vcm_phys_alloc_coherent);
892 struct vcm_phys *__must_check
893 __vcm_phys_alloc(resource_size_t size, unsigned flags,
894 const unsigned char *orders, gfp_t gfp)
896 struct vcm_phys_list *lst, *n;
897 struct vcm_phys_part *out;
898 struct vcm_phys *phys;
901 if (WARN_ON((size & (PAGE_SIZE - 1)) || !size || !orders))
902 return ERR_PTR(-EINVAL);
904 lst = kmalloc(sizeof *lst, GFP_KERNEL);
906 return ERR_PTR(-ENOMEM);
911 count = vcm_phys_alloc_list(lst, size, flags, orders, gfp);
915 phys = kmalloc(sizeof *phys + count * sizeof *phys->parts, GFP_KERNEL);
919 phys->free = vcm_phys_free;
925 memcpy(out, lst->parts, lst->count * sizeof *out);
937 vcm_phys_free_parts(lst->parts, lst->count);
944 return ERR_PTR(-ENOMEM);
946 EXPORT_SYMBOL_GPL(__vcm_phys_alloc);
948 static inline bool is_of_order(dma_addr_t size, unsigned order)
950 return !(size & (((dma_addr_t)PAGE_SIZE << order) - 1));
954 __vcm_phys_walk_part(dma_addr_t vaddr, const struct vcm_phys_part *part,
955 const unsigned char *orders,
956 int (*callback)(dma_addr_t vaddr, dma_addr_t paddr,
957 unsigned order, void *priv), void *priv,
960 resource_size_t size = part->size;
961 dma_addr_t paddr = part->start;
964 while (!is_of_order(vaddr, *orders))
966 while (!is_of_order(paddr, *orders))
969 ps = PAGE_SIZE << *orders;
970 for (; *limit && size; --*limit) {
974 ps = PAGE_SIZE << *++orders;
976 ret = callback(vaddr, paddr, *orders, priv);
980 ps = PAGE_SIZE << *orders;
989 int vcm_phys_walk(dma_addr_t _vaddr, const struct vcm_phys *phys,
990 const unsigned char *orders,
991 int (*callback)(dma_addr_t vaddr, dma_addr_t paddr,
992 unsigned order, void *arg),
993 int (*recovery)(dma_addr_t vaddr, dma_addr_t paddr,
994 unsigned order, void *arg),
1000 if (WARN_ON(!phys || ((_vaddr | phys->size) & (PAGE_SIZE - 1)) ||
1001 !phys->size || !orders || !callback))
1005 const struct vcm_phys_part *part = phys->parts;
1006 unsigned count = phys->count;
1007 dma_addr_t vaddr = _vaddr;
1010 for (; count && limit; --count, ++part) {
1011 ret = __vcm_phys_walk_part(vaddr, part, orders,
1012 callback, priv, &limit);
1016 vaddr += part->size;
1020 /* We passed error recovery */
1024 * Either operation suceeded or we were not provided
1025 * with a recovery callback -- return.
1027 if (!ret || !recovery)
1030 /* Switch to recovery */
1032 callback = recovery;
1036 EXPORT_SYMBOL_GPL(vcm_phys_walk);