upload tizen1.0 source
[kernel/linux-2.6.36.git] / mm / vcm.c
1 /*
2  * Virtual Contiguous Memory core
3  * Copyright (c) 2010 by Samsung Electronics.
4  * Written by Michal Nazarewicz (m.nazarewicz@samsung.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation; either version 2 of the
9  * License or (at your optional) any later version of the license.
10  */
11
12 /*
13  * See Documentation/virtual-contiguous-memory.txt for details.
14  */
15
16 #include <linux/vcm-drv.h>
17 #include <linux/module.h>
18 #include <linux/mm.h>
19 #include <linux/vmalloc.h>
20 #include <linux/err.h>
21 #include <linux/slab.h>
22 #include <linux/genalloc.h>
23
24 #include <linux/dma-mapping.h>
25 #include <asm/cacheflush.h>
26 #include <asm/outercache.h>
27
28 /******************************** Devices API *******************************/
29
30 void vcm_destroy(struct vcm *vcm)
31 {
32         if (WARN_ON(atomic_read(&vcm->activations)))
33                 vcm->driver->deactivate(vcm);
34
35         if (vcm->driver->cleanup)
36                 vcm->driver->cleanup(vcm);
37         else
38                 kfree(vcm);
39 }
40 EXPORT_SYMBOL_GPL(vcm_destroy);
41
42 static void
43 __vcm_alloc_and_reserve(struct vcm *vcm, resource_size_t size,
44                         struct vcm_phys **phys, unsigned alloc_flags,
45                         struct vcm_res **res, unsigned res_flags)
46 {
47         int ret, alloc = 0;
48
49         if (WARN_ON(!vcm) || !size) {
50                 ret = -EINVAL;
51                 goto error;
52         }
53
54         size = PAGE_ALIGN(size);
55
56         if (vcm->driver->alloc) {
57                 ret = vcm->driver->alloc(vcm, size,
58                                          phys, alloc_flags, res, res_flags);
59                 if (ret)
60                         goto error;
61                 alloc = 1;
62         } else if ((res && !vcm->driver->res) || (phys && !vcm->driver->phys)) {
63                 ret = -EOPNOTSUPP;
64                 goto error;
65         }
66
67         if (res) {
68                 if (!alloc) {
69                         *res = vcm->driver->res(vcm, size, res_flags);
70                         if (IS_ERR(*res)) {
71                                 ret = PTR_ERR(*res);
72                                 goto error;
73                         }
74                 }
75                 (*res)->bound_size = 0;
76                 (*res)->vcm = vcm;
77                 (*res)->phys = NULL;
78 #ifdef CONFIG_VCM_RES_REFCNT
79                 atomic_set(&(*res)->refcnt, 1);
80 #endif
81         }
82
83         if (phys) {
84                 if (!alloc) {
85                         *phys = vcm->driver->phys(vcm, size, alloc_flags);
86                         if (WARN_ON(!(*phys)->free))
87                                 phys = ERR_PTR(-EINVAL);
88                         if (IS_ERR(*phys)) {
89                                 ret = PTR_ERR(*phys);
90                                 goto error;
91                         }
92                 }
93                 atomic_set(&(*phys)->bindings, 0);
94         }
95
96         return;
97
98 error:
99         if (phys)
100                 *phys = ERR_PTR(ret);
101         if (res) {
102                 if (*res)
103                         vcm_unreserve(*res);
104                 *res = ERR_PTR(ret);
105         }
106 }
107
108 struct vcm_res *__must_check
109 vcm_make_binding(struct vcm *vcm, resource_size_t size,
110                  unsigned alloc_flags, unsigned res_flags)
111 {
112         struct vcm_phys *phys;
113         struct vcm_res *res;
114
115         if (WARN_ON(!vcm || !size || (size & (PAGE_SIZE - 1))))
116                 return ERR_PTR(-EINVAL);
117         else if (vcm->driver->alloc || !vcm->driver->map) {
118                 int ret;
119
120                 __vcm_alloc_and_reserve(vcm, size, &phys, alloc_flags,
121                                         &res, res_flags);
122
123                 if (IS_ERR(res))
124                         return res;
125
126                 ret = vcm_bind(res, phys);
127                 if (!ret)
128                         return res;
129
130                 if (vcm->driver->unreserve)
131                         vcm->driver->unreserve(res);
132                 phys->free(phys);
133                 return ERR_PTR(ret);
134         } else {
135                 __vcm_alloc_and_reserve(vcm, size, &phys, alloc_flags,
136                                         NULL, 0);
137
138                 if (IS_ERR(phys))
139                         return ERR_CAST(res);
140
141                 res = vcm_map(vcm, phys, res_flags);
142                 if (IS_ERR(res))
143                         phys->free(phys);
144
145                 return res;
146         }
147 }
148 EXPORT_SYMBOL_GPL(vcm_make_binding);
149
150 struct vcm_phys *__must_check
151 vcm_alloc(struct vcm *vcm, resource_size_t size, unsigned flags)
152 {
153         struct vcm_phys *phys;
154
155         __vcm_alloc_and_reserve(vcm, size, &phys, flags, NULL, 0);
156
157         return phys;
158 }
159 EXPORT_SYMBOL_GPL(vcm_alloc);
160
161 struct vcm_res *__must_check
162 vcm_reserve(struct vcm *vcm, resource_size_t size, unsigned flags)
163 {
164         struct vcm_res *res;
165
166         __vcm_alloc_and_reserve(vcm, size, NULL, 0, &res, flags);
167
168         return res;
169 }
170 EXPORT_SYMBOL_GPL(vcm_reserve);
171
172 #ifdef CONFIG_VCM_RES_REFCNT
173 int __must_check vcm_ref_reserve(struct vcm_res *res)
174 {
175         if (WARN_ON(!res) || (atomic_inc_return(&res->refcnt) < 2))
176                 return -EINVAL;
177         return 0;
178 }
179 EXPORT_SYMBOL_GPL(vcm_ref_reserve);
180 #endif
181
182 struct vcm_res *__must_check
183 vcm_map(struct vcm *vcm, struct vcm_phys *phys, unsigned flags)
184 {
185         struct vcm_res *res;
186         int ret;
187
188         if (WARN_ON(!vcm))
189                 return ERR_PTR(-EINVAL);
190
191         if (vcm->driver->map) {
192                 res = vcm->driver->map(vcm, phys, flags);
193                 if (!IS_ERR(res)) {
194                         atomic_inc(&phys->bindings);
195                         res->phys       = phys;
196                         res->bound_size = phys->size;
197                         res->vcm        = vcm;
198 #ifdef CONFIG_VCM_RES_REFCNT
199                         atomic_set(&res->refcnt, 1);
200 #endif
201                 }
202                 return res;
203         }
204
205         res = vcm_reserve(vcm, phys->size, flags);
206         if (IS_ERR(res))
207                 return res;
208
209         ret = vcm_bind(res, phys);
210         if (!ret)
211                 return res;
212
213         vcm_unreserve(res);
214         return ERR_PTR(ret);
215 }
216 EXPORT_SYMBOL_GPL(vcm_map);
217
218 void vcm_unreserve(struct vcm_res *res)
219 {
220         if (!WARN_ON(!res)) {
221 #ifdef CONFIG_VCM_RES_REFCNT
222                 if (!atomic_dec_and_test(&res->refcnt))
223                         return;
224 #endif
225                 if (WARN_ON(res->phys))
226                         vcm_unbind(res);
227                 if (!WARN_ON_ONCE(!res->vcm->driver->unreserve))
228                         res->vcm->driver->unreserve(res);
229         }
230 }
231 EXPORT_SYMBOL_GPL(vcm_unreserve);
232
233 void vcm_free(struct vcm_phys *phys)
234 {
235         if (!WARN_ON(!phys || atomic_read(&phys->bindings)))
236                 phys->free(phys);
237 }
238 EXPORT_SYMBOL_GPL(vcm_free);
239
240 int  __must_check vcm_bind(struct vcm_res *res, struct vcm_phys *phys)
241 {
242         int ret;
243
244         if (WARN_ON(!res || !phys))
245                 return -EINVAL;
246
247         if (res->phys == phys)
248                 return -EALREADY;
249
250         if (res->phys)
251                 return -EADDRINUSE;
252
253         if (phys->size > res->res_size)
254                 return -ENOSPC;
255
256         if (!res->vcm->driver->bind)
257                 return -EOPNOTSUPP;
258
259         ret = res->vcm->driver->bind(res, phys);
260         if (ret >= 0) {
261                 atomic_inc(&phys->bindings);
262                 res->phys = phys;
263                 res->bound_size = phys->size;
264         }
265         return ret;
266 }
267 EXPORT_SYMBOL_GPL(vcm_bind);
268
269 struct vcm_phys *vcm_unbind(struct vcm_res *res)
270 {
271         struct vcm_phys *phys = NULL;
272         if (!WARN_ON(!res || !res->phys)) {
273                 phys = res->phys;
274                 if (res->vcm->driver->unbind)
275                         res->vcm->driver->unbind(res);
276                 WARN_ON(!atomic_add_unless(&phys->bindings, -1, 0));
277                 res->phys = NULL;
278                 res->bound_size = 0;
279         }
280         return phys;
281 }
282 EXPORT_SYMBOL_GPL(vcm_unbind);
283
284 void vcm_unmap(struct vcm_res *res)
285 {
286 #ifdef CONFIG_VCM_RES_REFCNT
287         if (atomic_read(&res->refcnt) > 1) {
288                 atomic_dec(&res->refcnt);
289                 return;
290         }
291 #endif
292         vcm_unbind(res);
293         vcm_unreserve(res);
294 }
295 EXPORT_SYMBOL_GPL(vcm_unmap);
296
297 void vcm_destroy_binding(struct vcm_res *res)
298 {
299         if (!WARN_ON(!res)) {
300                 struct vcm_phys *phys;
301 #ifdef CONFIG_VCM_RES_REFCNT
302                 if (atomic_read(&res->refcnt) > 1) {
303                         atomic_dec(&res->refcnt);
304                         return;
305                 }
306 #endif
307                 phys = vcm_unbind(res);
308                 if (phys)
309                         vcm_free(phys);
310                 vcm_unreserve(res);
311         }
312 }
313 EXPORT_SYMBOL_GPL(vcm_destroy_binding);
314
315 int  __must_check vcm_activate(struct vcm *vcm)
316 {
317         if (WARN_ON(!vcm))
318                 return -EINVAL;
319         else if (atomic_inc_return(&vcm->activations) != 1
320               || !vcm->driver->activate)
321                 return 0;
322         else
323                 return vcm->driver->activate(vcm);
324 }
325 EXPORT_SYMBOL_GPL(vcm_activate);
326
327 void vcm_deactivate(struct vcm *vcm)
328 {
329         if (!WARN_ON(!vcm || !atomic_read(&vcm->activations))
330          && atomic_dec_and_test(&vcm->activations)
331          && vcm->driver->deactivate)
332                 vcm->driver->deactivate(vcm);
333 }
334 EXPORT_SYMBOL_GPL(vcm_deactivate);
335
336
337 /****************************** VCM VMM driver ******************************/
338
339 static void vcm_vmm_cleanup(struct vcm *vcm)
340 {
341         /* This should never be called.  vcm_vmm is a static object. */
342         BUG_ON(1);
343 }
344
345 static struct vcm_phys *
346 vcm_vmm_phys(struct vcm *vcm, resource_size_t size, unsigned flags)
347 {
348         static const unsigned char orders[] = { 0 };
349         return vcm_phys_alloc(size, flags, orders);
350 }
351
352 static void vcm_vmm_unreserve(struct vcm_res *res)
353 {
354         kfree(res);
355 }
356
357 struct vcm_res *vcm_vmm_map(struct vcm *vcm, struct vcm_phys *phys,
358                             unsigned flags)
359 {
360         /*
361          * Original implementation written by Cho KyongHo
362          * (pullip.cho@samsung.com).  Later rewritten by mina86.
363          */
364         struct vcm_phys_part *part;
365         struct page **pages, **p;
366         struct vcm_res *res;
367         int ret = -ENOMEM;
368         unsigned i;
369
370         pages = kzalloc((phys->size >> PAGE_SHIFT) * sizeof *pages, GFP_KERNEL);
371         if (!pages)
372                 return ERR_PTR(-ENOMEM);
373         p = pages;
374
375         res = kzalloc(sizeof *res, GFP_KERNEL);
376         if (!res)
377                 goto error_pages;
378
379         i    = phys->count;
380         part = phys->parts;
381         do {
382                 unsigned j = part->size >> PAGE_SHIFT;
383                 struct page *page = part->page;
384                 if (!page)
385                         goto error_notsupp;
386                 do {
387                         *p++ = page++;
388                 } while (--j);
389         } while (++part, --i);
390
391         res->start = (dma_addr_t)vmap(pages, p - pages, VM_ALLOC, PAGE_KERNEL);
392         if (!res->start)
393                 goto error_res;
394
395         kfree(pages);
396         res->res_size = phys->size;
397         return res;
398
399 error_notsupp:
400         ret = -EOPNOTSUPP;
401 error_res:
402         kfree(res);
403 error_pages:
404         kfree(pages);
405         return ERR_PTR(ret);
406 }
407
408 static void vcm_vmm_unbind(struct vcm_res *res)
409 {
410         vunmap((void *)res->start);
411         res->phys = NULL;
412 }
413
414 static int vcm_vmm_activate(struct vcm *vcm)
415 {
416         /* no operation, all bindings are immediately active */
417         return 0;
418 }
419
420 static void vcm_vmm_deactivate(struct vcm *vcm)
421 {
422         /*
423          * no operation, all bindings are immediately active and
424          * cannot be deactivated unless unbound.
425          */
426 }
427
428 struct vcm vcm_vmm[1] = { {
429         .start       = 0,
430         .size        = ~(resource_size_t)0,
431         /* prevent activate/deactivate from being called */
432         .activations = ATOMIC_INIT(1),
433         .driver      = &(const struct vcm_driver) {
434                 .cleanup        = vcm_vmm_cleanup,
435                 .map            = vcm_vmm_map,
436                 .phys           = vcm_vmm_phys,
437                 .unbind         = vcm_vmm_unbind,
438                 .unreserve      = vcm_vmm_unreserve,
439                 .activate       = vcm_vmm_activate,
440                 .deactivate     = vcm_vmm_deactivate,
441         }
442 } };
443 EXPORT_SYMBOL_GPL(vcm_vmm);
444
445
446 /****************************** VCM Drivers API *****************************/
447
448 struct vcm *__must_check vcm_init(struct vcm *vcm)
449 {
450         if (WARN_ON(!vcm || !vcm->size
451                  || ((vcm->start | vcm->size) & ~PAGE_MASK)
452                  || !vcm->driver || !vcm->driver->unreserve))
453                 return ERR_PTR(-EINVAL);
454
455         atomic_set(&vcm->activations, 0);
456
457         return vcm;
458 }
459 EXPORT_SYMBOL_GPL(vcm_init);
460
461
462 /*************************** Hardware MMU wrapper ***************************/
463
464 #ifdef CONFIG_VCM_MMU
465
466 struct vcm_mmu_res {
467         struct vcm_res                  res;
468         struct list_head                bound;
469 };
470
471 static void vcm_mmu_cleanup(struct vcm *vcm)
472 {
473         struct vcm_mmu *mmu = container_of(vcm, struct vcm_mmu, vcm);
474         WARN_ON(spin_is_locked(&mmu->lock) || !list_empty(&mmu->bound_res));
475         gen_pool_destroy(mmu->pool);
476         if (mmu->driver->cleanup)
477                 mmu->driver->cleanup(vcm);
478         else
479                 kfree(mmu);
480 }
481
482 static struct vcm_res *
483 vcm_mmu_res(struct vcm *vcm, resource_size_t size, unsigned flags)
484 {
485         struct vcm_mmu *mmu = container_of(vcm, struct vcm_mmu, vcm);
486         const unsigned char *orders;
487         struct vcm_mmu_res *res;
488         dma_addr_t addr;
489         unsigned order;
490
491         res = kzalloc(sizeof *res, GFP_KERNEL);
492         if (!res)
493                 return ERR_PTR(-ENOMEM);
494
495         order = ffs(size) - PAGE_SHIFT - 1;
496         for (orders = mmu->driver->orders; *orders > order; ++orders)
497                 /* nop */;
498         order = *orders + PAGE_SHIFT;
499
500         addr = gen_pool_alloc_aligned(mmu->pool, size, order);
501         if (!addr) {
502                 kfree(res);
503                 return ERR_PTR(-ENOSPC);
504         }
505
506         INIT_LIST_HEAD(&res->bound);
507         res->res.start = addr;
508         res->res.res_size = size;
509
510         return &res->res;
511 }
512
513 static struct vcm_phys *
514 vcm_mmu_phys(struct vcm *vcm, resource_size_t size, unsigned flags)
515 {
516         return vcm_phys_alloc(size, flags,
517                               container_of(vcm, struct vcm_mmu,
518                                            vcm)->driver->orders);
519 }
520
521 static int __must_check
522 __vcm_mmu_activate(struct vcm_res *res, struct vcm_phys *phys)
523 {
524         struct vcm_mmu *mmu = container_of(res->vcm, struct vcm_mmu, vcm);
525         if (mmu->driver->activate)
526                 return mmu->driver->activate(res, phys);
527
528         return vcm_phys_walk(res->start, phys, mmu->driver->orders,
529                              mmu->driver->activate_page,
530                              mmu->driver->deactivate_page, res->vcm);
531 }
532
533 static void __vcm_mmu_deactivate(struct vcm_res *res, struct vcm_phys *phys)
534 {
535         struct vcm_mmu *mmu = container_of(res->vcm, struct vcm_mmu, vcm);
536         if (mmu->driver->deactivate)
537                 return mmu->driver->deactivate(res, phys);
538
539         vcm_phys_walk(res->start, phys, mmu->driver->orders,
540                       mmu->driver->deactivate_page, NULL, res->vcm);
541 }
542
543 static int vcm_mmu_bind(struct vcm_res *_res, struct vcm_phys *phys)
544 {
545         struct vcm_mmu_res *res = container_of(_res, struct vcm_mmu_res, res);
546         struct vcm_mmu *mmu = container_of(_res->vcm, struct vcm_mmu, vcm);
547         unsigned long flags;
548         int ret;
549
550         spin_lock_irqsave(&mmu->lock, flags);
551         if (mmu->activated) {
552                 ret = __vcm_mmu_activate(_res, phys);
553                 if (ret < 0)
554                         goto done;
555         }
556         list_add_tail(&res->bound, &mmu->bound_res);
557         ret = 0;
558 done:
559         spin_unlock_irqrestore(&mmu->lock, flags);
560
561         return ret;
562 }
563
564 static void vcm_mmu_unbind(struct vcm_res *_res)
565 {
566         struct vcm_mmu_res *res = container_of(_res, struct vcm_mmu_res, res);
567         struct vcm_mmu *mmu = container_of(_res->vcm, struct vcm_mmu, vcm);
568         unsigned long flags;
569
570         spin_lock_irqsave(&mmu->lock, flags);
571         if (mmu->activated)
572                 __vcm_mmu_deactivate(_res, _res->phys);
573         list_del_init(&res->bound);
574         spin_unlock_irqrestore(&mmu->lock, flags);
575 }
576
577 static void vcm_mmu_unreserve(struct vcm_res *res)
578 {
579         struct vcm_mmu *mmu = container_of(res->vcm, struct vcm_mmu, vcm);
580         gen_pool_free(mmu->pool, res->start, res->res_size);
581         kfree(res);
582 }
583
584 static int vcm_mmu_activate(struct vcm *vcm)
585 {
586         struct vcm_mmu *mmu = container_of(vcm, struct vcm_mmu, vcm);
587         struct vcm_mmu_res *r, *rr;
588         unsigned long flags;
589         int ret;
590
591         spin_lock_irqsave(&mmu->lock, flags);
592
593         list_for_each_entry(r, &mmu->bound_res, bound) {
594                 ret = __vcm_mmu_activate(&r->res, r->res.phys);
595                 if (ret >= 0)
596                         continue;
597
598                 list_for_each_entry(rr, &mmu->bound_res, bound) {
599                         if (r == rr)
600                                 goto done;
601                         __vcm_mmu_deactivate(&rr->res, rr->res.phys);
602                 }
603         }
604
605         mmu->activated = 1;
606         ret = 0;
607
608 done:
609         spin_unlock_irqrestore(&mmu->lock, flags);
610
611         return ret;
612 }
613
614 static void vcm_mmu_deactivate(struct vcm *vcm)
615 {
616         struct vcm_mmu *mmu = container_of(vcm, struct vcm_mmu, vcm);
617         struct vcm_mmu_res *r;
618         unsigned long flags;
619
620         spin_lock_irqsave(&mmu->lock, flags);
621
622         mmu->activated = 0;
623
624         list_for_each_entry(r, &mmu->bound_res, bound)
625                 mmu->driver->deactivate(&r->res, r->res.phys);
626
627         spin_unlock_irqrestore(&mmu->lock, flags);
628 }
629
630 struct vcm *__must_check vcm_mmu_init(struct vcm_mmu *mmu)
631 {
632         static const struct vcm_driver driver = {
633                 .cleanup        = vcm_mmu_cleanup,
634                 .res            = vcm_mmu_res,
635                 .phys           = vcm_mmu_phys,
636                 .bind           = vcm_mmu_bind,
637                 .unbind         = vcm_mmu_unbind,
638                 .unreserve      = vcm_mmu_unreserve,
639                 .activate       = vcm_mmu_activate,
640                 .deactivate     = vcm_mmu_deactivate,
641         };
642
643         struct vcm *vcm;
644         int ret;
645
646         if (WARN_ON(!mmu || !mmu->driver ||
647                     !(mmu->driver->activate ||
648                       (mmu->driver->activate_page &&
649                        mmu->driver->deactivate_page)) ||
650                     !(mmu->driver->deactivate ||
651                       mmu->driver->deactivate_page)))
652                 return ERR_PTR(-EINVAL);
653
654         mmu->vcm.driver = &driver;
655         vcm = vcm_init(&mmu->vcm);
656         if (IS_ERR(vcm))
657                 return vcm;
658
659         mmu->pool = gen_pool_create(PAGE_SHIFT, -1);
660         if (!mmu->pool)
661                 return ERR_PTR(-ENOMEM);
662
663         ret = gen_pool_add(mmu->pool, mmu->vcm.start, mmu->vcm.size, -1);
664         if (ret) {
665                 gen_pool_destroy(mmu->pool);
666                 return ERR_PTR(ret);
667         }
668
669         vcm->driver     = &driver;
670         INIT_LIST_HEAD(&mmu->bound_res);
671         spin_lock_init(&mmu->lock);
672
673         return &mmu->vcm;
674 }
675 EXPORT_SYMBOL_GPL(vcm_mmu_init);
676
677 #endif
678
679
680 /**************************** One-to-One wrapper ****************************/
681
682 #ifdef CONFIG_VCM_O2O
683
684 static void vcm_o2o_cleanup(struct vcm *vcm)
685 {
686         struct vcm_o2o *o2o = container_of(vcm, struct vcm_o2o, vcm);
687         if (o2o->driver->cleanup)
688                 o2o->driver->cleanup(vcm);
689         else
690                 kfree(o2o);
691 }
692
693 static struct vcm_phys *
694 vcm_o2o_phys(struct vcm *vcm, resource_size_t size, unsigned flags)
695 {
696         struct vcm_o2o *o2o = container_of(vcm, struct vcm_o2o, vcm);
697         struct vcm_phys *phys;
698
699         phys = o2o->driver->phys(vcm, size, flags);
700         if (!IS_ERR(phys) &&
701             WARN_ON(!phys->free || !phys->parts->size ||
702                     phys->parts->size < size ||
703                     ((phys->parts->start | phys->parts->size) &
704                      ~PAGE_MASK))) {
705                 if (phys->free)
706                         phys->free(phys);
707                 return ERR_PTR(-EINVAL);
708         }
709
710         return phys;
711 }
712
713 static struct vcm_res *
714 vcm_o2o_map(struct vcm *vcm, struct vcm_phys *phys, unsigned flags)
715 {
716         struct vcm_res *res;
717
718         if (phys->count != 1)
719                 return ERR_PTR(-EOPNOTSUPP);
720
721         if (!phys->parts->size
722          || ((phys->parts->start | phys->parts->size) & ~PAGE_MASK))
723                 return ERR_PTR(-EINVAL);
724
725         res = kmalloc(sizeof *res, GFP_KERNEL);
726         if (!res)
727                 return ERR_PTR(-ENOMEM);
728
729         res->start    = phys->parts->start;
730         res->res_size = phys->parts->size;
731         return res;
732 }
733
734 static int vcm_o2o_bind(struct vcm_res *res, struct vcm_phys *phys)
735 {
736         if (phys->count != 1)
737                 return -EOPNOTSUPP;
738
739         if (!phys->parts->size
740          || ((phys->parts->start | phys->parts->size) & ~PAGE_MASK))
741                 return -EINVAL;
742
743         if (res->start != phys->parts->start)
744                 return -EOPNOTSUPP;
745
746         return 0;
747 }
748
749 struct vcm *__must_check vcm_o2o_init(struct vcm_o2o *o2o)
750 {
751         static const struct vcm_driver driver = {
752                 .cleanup        = vcm_o2o_cleanup,
753                 .phys           = vcm_o2o_phys,
754                 .map            = vcm_o2o_map,
755                 .bind           = vcm_o2o_bind,
756                 .unreserve      = (void (*)(struct vcm_res *))kfree,
757         };
758
759         if (WARN_ON(!o2o || !o2o->driver || !o2o->driver->phys))
760                 return ERR_PTR(-EINVAL);
761
762         o2o->vcm.driver = &driver;
763         return vcm_init(&o2o->vcm);
764 }
765 EXPORT_SYMBOL_GPL(vcm_o2o_init);
766
767 #endif
768
769
770 /************************ Physical memory management ************************/
771
772 #ifdef CONFIG_VCM_PHYS
773
774 struct vcm_phys_list {
775         struct vcm_phys_list    *next;
776         unsigned                count;
777         struct vcm_phys_part    parts[31];
778 };
779
780 static struct vcm_phys_list *__must_check
781 vcm_phys_alloc_list_order(struct vcm_phys_list *last, resource_size_t *pages,
782                           unsigned flags, unsigned order, unsigned *total,
783                           gfp_t gfp)
784 {
785         unsigned count;
786
787         count   = *pages >> order;
788
789         /* When we allocate big chunks, system may reclaim memory and it makes
790          * the system slow. So allocating above order 0 does not wait and warn.
791          */
792         if (order > 0)
793                 gfp |= (GFP_NOWAIT | __GFP_NOWARN);
794
795         do {
796                 struct page *page = alloc_pages(gfp, order);
797
798                 if (!page)
799                         /*
800                          * If allocation failed we may still
801                          * try to continua allocating smaller
802                          * pages.
803                          */
804                         break;
805
806                 if (last->count == ARRAY_SIZE(last->parts)) {
807                         struct vcm_phys_list *l;
808                         l = kmalloc(sizeof *l, GFP_KERNEL);
809                         if (!l)
810                                 return NULL;
811
812                         l->next = NULL;
813                         l->count = 0;
814                         last->next = l;
815                         last = l;
816                 }
817
818                 last->parts[last->count].start = page_to_phys(page);
819                 last->parts[last->count].size  = (1 << order) << PAGE_SHIFT;
820                 last->parts[last->count].page  = page;
821                 ++last->count;
822                 ++*total;
823                 *pages -= 1 << order;
824         } while (--count);
825
826         return last;
827 }
828
829 static unsigned __must_check
830 vcm_phys_alloc_list(struct vcm_phys_list *first,
831                     resource_size_t size, unsigned flags,
832                     const unsigned char *orders, gfp_t gfp)
833 {
834         struct vcm_phys_list *last = first;
835         unsigned total_parts = 0;
836         resource_size_t pages;
837
838         /*
839          * We are trying to allocate as large pages as possible but
840          * not larger then pages that MMU driver that called us
841          * supports (ie. the ones provided by page_sizes).  This makes
842          * it possible to map the region using fewest possible number
843          * of entries.
844          */
845         pages = size >> PAGE_SHIFT;
846         do {
847                 while (!(pages >> *orders))
848                         ++orders;
849
850                 last = vcm_phys_alloc_list_order(last, &pages, flags, *orders,
851                                                  &total_parts, gfp);
852                 if (!last)
853                         return 0;
854
855         } while (*orders++ && pages);
856
857         if (pages)
858                 return 0;
859
860         return total_parts;
861 }
862
863 static void vcm_phys_free_parts(struct vcm_phys_part *parts, unsigned count)
864 {
865         do {
866                 __free_pages(parts->page, ffs(parts->size) - 1 - PAGE_SHIFT);
867         } while (++parts, --count);
868 }
869
870 static void vcm_phys_free(struct vcm_phys *phys)
871 {
872         vcm_phys_free_parts(phys->parts, phys->count);
873         kfree(phys);
874 }
875
876 struct vcm_phys *__must_check
877 __vcm_phys_alloc_coherent(resource_size_t size, unsigned flags,
878                  const unsigned char *orders, gfp_t gfp)
879 {
880         struct vcm_phys *phys;
881         int i;
882
883         /* the physical memory must reside in the lowmem */
884         phys = __vcm_phys_alloc(size, flags, orders, GFP_DMA32);
885         if (IS_ERR(phys))
886                 return phys;
887
888         return phys;
889 }
890 EXPORT_SYMBOL_GPL(__vcm_phys_alloc_coherent);
891
892 struct vcm_phys *__must_check
893 __vcm_phys_alloc(resource_size_t size, unsigned flags,
894                  const unsigned char *orders, gfp_t gfp)
895 {
896         struct vcm_phys_list *lst, *n;
897         struct vcm_phys_part *out;
898         struct vcm_phys *phys;
899         unsigned count;
900
901         if (WARN_ON((size & (PAGE_SIZE - 1)) || !size || !orders))
902                 return ERR_PTR(-EINVAL);
903
904         lst = kmalloc(sizeof *lst, GFP_KERNEL);
905         if (!lst)
906                 return ERR_PTR(-ENOMEM);
907
908         lst->next = NULL;
909         lst->count = 0;
910
911         count = vcm_phys_alloc_list(lst, size, flags, orders, gfp);
912         if (!count)
913                 goto error;
914
915         phys = kmalloc(sizeof *phys + count * sizeof *phys->parts, GFP_KERNEL);
916         if (!phys)
917                 goto error;
918
919         phys->free  = vcm_phys_free;
920         phys->count = count;
921         phys->size  = size;
922
923         out = phys->parts;
924         do {
925                 memcpy(out, lst->parts, lst->count * sizeof *out);
926                 out += lst->count;
927
928                 n = lst->next;
929                 kfree(lst);
930                 lst = n;
931         } while (lst);
932
933         return phys;
934
935 error:
936         do {
937                 vcm_phys_free_parts(lst->parts, lst->count);
938
939                 n = lst->next;
940                 kfree(lst);
941                 lst = n;
942         } while (lst);
943
944         return ERR_PTR(-ENOMEM);
945 }
946 EXPORT_SYMBOL_GPL(__vcm_phys_alloc);
947
948 static inline bool is_of_order(dma_addr_t size, unsigned order)
949 {
950         return !(size & (((dma_addr_t)PAGE_SIZE << order) - 1));
951 }
952
953 static int
954 __vcm_phys_walk_part(dma_addr_t vaddr, const struct vcm_phys_part *part,
955                      const unsigned char *orders,
956                      int (*callback)(dma_addr_t vaddr, dma_addr_t paddr,
957                                      unsigned order, void *priv), void *priv,
958                      unsigned *limit)
959 {
960         resource_size_t size = part->size;
961         dma_addr_t paddr = part->start;
962         resource_size_t ps;
963
964         while (!is_of_order(vaddr, *orders))
965                 ++orders;
966         while (!is_of_order(paddr, *orders))
967                 ++orders;
968
969         ps = PAGE_SIZE << *orders;
970         for (; *limit && size; --*limit) {
971                 int ret;
972
973                 while (ps > size)
974                         ps = PAGE_SIZE << *++orders;
975
976                 ret = callback(vaddr, paddr, *orders, priv);
977                 if (ret < 0)
978                         return ret;
979
980                 ps = PAGE_SIZE << *orders;
981                 vaddr += ps;
982                 paddr += ps;
983                 size  -= ps;
984         }
985
986         return 0;
987 }
988
989 int vcm_phys_walk(dma_addr_t _vaddr, const struct vcm_phys *phys,
990                   const unsigned char *orders,
991                   int (*callback)(dma_addr_t vaddr, dma_addr_t paddr,
992                                   unsigned order, void *arg),
993                   int (*recovery)(dma_addr_t vaddr, dma_addr_t paddr,
994                                   unsigned order, void *arg),
995                   void *priv)
996 {
997         unsigned limit = ~0;
998         int r = 0;
999
1000         if (WARN_ON(!phys || ((_vaddr | phys->size) & (PAGE_SIZE - 1)) ||
1001                     !phys->size || !orders || !callback))
1002                 return -EINVAL;
1003
1004         for (;;) {
1005                 const struct vcm_phys_part *part = phys->parts;
1006                 unsigned count = phys->count;
1007                 dma_addr_t vaddr = _vaddr;
1008                 int ret = 0;
1009
1010                 for (; count && limit; --count, ++part) {
1011                         ret = __vcm_phys_walk_part(vaddr, part, orders,
1012                                                    callback, priv, &limit);
1013                         if (ret)
1014                                 break;
1015
1016                         vaddr += part->size;
1017                 }
1018
1019                 if (r)
1020                         /* We passed error recovery */
1021                         return r;
1022
1023                 /*
1024                  * Either operation suceeded or we were not provided
1025                  * with a recovery callback -- return.
1026                  */
1027                 if (!ret || !recovery)
1028                         return ret;
1029
1030                 /* Switch to recovery */
1031                 limit = ~0 - limit;
1032                 callback = recovery;
1033                 r = ret;
1034         }
1035 }
1036 EXPORT_SYMBOL_GPL(vcm_phys_walk);
1037
1038 #endif