upload tizen1.0 source
[kernel/linux-2.6.36.git] / arch / arm / plat-s5p / s5p-vcm.c
1 #include <asm/pgtable.h>
2 #include <linux/module.h>
3 #include <linux/mm.h>
4 #include <linux/cma.h>
5 #include <linux/vmalloc.h>
6 #include <linux/slab.h>
7 #include <plat/s5p-vcm.h>
8 #include <asm/cacheflush.h>
9 #include <asm/outercache.h>
10 #include <linux/err.h>
11 #include <linux/bitops.h>
12 #include <linux/genalloc.h>
13
14 #define PG_FLAG_MASK 0x3
15 #define PG_LV1_SECTION_FLAG 0x2
16 #define PG_LV1_PAGE_FLAG 0x1
17 #define PG_LV2_SPAGE_FLAG 0x2
18 #define PG_LV2_LPAGE_FLAG 0x1
19 #define PG_FAULT_FLAG 0
20
21 #define PG_LV1_LV2BASE_MASK 0xFFFFFC00
22 #define PG_LV1_SECTION_ADDR 0xFFF00000
23
24 #define LPAGESHIFT 16
25 #define SPAGESHIFT PAGE_SHIFT
26 #define SECTIONSHIFT 20
27 #define KBSHIFT 10
28
29 #define SECTIONSIZE (1 << SECTIONSHIFT)
30 #define LPAGESIZE (1 << LPAGESHIFT)
31 #define SPAGESIZE (1 << SPAGESHIFT)
32
33 #define SECTIONMASK (SECTIONSIZE - 1)
34 #define LPAGEMASK (LPAGESIZE - 1)
35 #define SPAGEMASK (SPAGESIZE - 1)
36
37 #define MAXSECTIONS (1 << (32 - SECTIONSHIFT))
38 #define LV1ENTRIES MAXSECTIONS
39 #define LV1TABLESIZE (LV1ENTRIES * sizeof(int))
40 #define LV2ENTRIES (1 << (SECTIONSHIFT - SPAGESHIFT))
41 #define LV2TABLESIZE (LV2ENTRIES * sizeof(int))
42
43 #define LV1OFF_FROM_VADDR(addr) (addr >> SECTIONSHIFT)
44 #define LV2OFF_FROM_VADDR(addr) ((addr & 0xFF000) >> SPAGESHIFT)
45 #define LV2OFF_FROM_VADDR_LPAGE(addr) ((addr & 0xF0000) >> SPAGESHIFT)
46
47 /* slab cache for 2nd level page tables */
48 static struct kmem_cache *l2pgtbl_cachep;
49
50 /* function pointer to vcm_mmu_activate() defined in mm/vcm.c */
51 static int (*vcm_mmu_activate)(struct vcm *vcm);
52
53 /* S5PV310/S5PC210's implemenation of VCM context */
54 struct s5p_vcm_mmu {
55         const struct s5p_vcm_driver *driver;
56         atomic_t refcnt;
57         enum vcm_dev_id id;
58         struct vcm_mmu mmu;
59 };
60
61 /* The singleton object that maintains all vcm contexts.*/
62 struct s5p_vcm_unified {
63         struct s5p_vcm_mmu *vcms[VCM_DEV_NUM];
64         struct gen_pool *pool;
65         unsigned long *pgd;
66 } shared_vcm;
67
68 /*** Conversion Functions on vcm contexts ************************************/
69
70 static inline struct s5p_vcm_mmu *downcast_vcm(struct vcm *vcm)
71 {
72         return container_of(
73                         container_of(vcm, struct vcm_mmu, vcm),
74                         struct s5p_vcm_mmu,
75                         mmu
76                         );
77 }
78
79 static inline enum vcm_dev_id vcmid_from_s5p_vcm(struct s5p_vcm_mmu *s5p_vcm)
80 {
81         int i;
82
83         for (i = 0; i < VCM_DEV_NUM; i++)
84                 if (shared_vcm.vcms[i] == s5p_vcm)
85                         return (enum vcm_dev_id)i;
86
87         return VCM_DEV_NONE;
88 }
89
90 static inline enum vcm_dev_id find_s5p_vcm_mmu_id(struct vcm *vcm)
91 {
92         return vcmid_from_s5p_vcm(downcast_vcm(vcm));
93 }
94
95 static inline struct s5p_vcm_mmu *find_s5p_vcm_mmu(struct vcm *vcm)
96 {
97         enum vcm_dev_id id;
98
99         id = find_s5p_vcm_mmu_id(vcm);
100
101         return (id == VCM_DEV_NONE) ? NULL : shared_vcm.vcms[id];
102 }
103
104 /*** SYSMMU handling **********************************************************/
105
106 static inline void sysmmu_turn_on(struct s5p_vcm_mmu *s5p_vcm)
107 {
108         unsigned long ptbase = 0;
109
110         if (s5p_vcm->mmu.activated)
111                 ptbase = virt_to_phys(shared_vcm.pgd);
112
113         sysmmu_on(vcmid_to_smmuid(s5p_vcm->id), ptbase);
114         if (s5p_vcm->id == VCM_DEV_MFC)
115                 sysmmu_on(SYSMMU_MFC_R, ptbase);
116 }
117
118 static inline void sysmmu_turn_off(struct s5p_vcm_mmu *s5p_vcm)
119 {
120         sysmmu_off(vcmid_to_smmuid(s5p_vcm->id));
121         if (s5p_vcm->id == VCM_DEV_MFC)
122                 sysmmu_off(SYSMMU_MFC_R);
123 }
124
125 static inline void invalidate_tlb(struct s5p_vcm_mmu *s5p_vcm)
126 {
127         if (s5p_vcm->driver && s5p_vcm->driver->tlb_invalidator)
128                 s5p_vcm->driver->tlb_invalidator(s5p_vcm->id);
129 }
130
131 static inline void set_pagetable_base(struct s5p_vcm_mmu *s5p_vcm)
132 {
133         if (s5p_vcm->driver && s5p_vcm->driver->pgd_base_specifier)
134                 s5p_vcm->driver->pgd_base_specifier(
135                                 find_s5p_vcm_mmu_id(&s5p_vcm->mmu.vcm),
136                                 virt_to_phys(shared_vcm.pgd));
137 }
138
139 static void default_tlb_invalidator(enum vcm_dev_id id)
140 {
141         /* the caller must ensure that the sysmmu for id is turned on */
142         sysmmu_tlb_invalidate(vcmid_to_smmuid(id));
143         if (id == VCM_DEV_MFC)
144                 sysmmu_tlb_invalidate(SYSMMU_MFC_R);
145 }
146
147 static void default_pgd_base_specifier(enum vcm_dev_id id, unsigned long base)
148 {
149         /* the caller must ensure that the sysmmu for id is turned on */
150         sysmmu_set_tablebase_pgd(vcmid_to_smmuid(id), base);
151         if (id == VCM_DEV_MFC)
152                 sysmmu_set_tablebase_pgd(SYSMMU_MFC_R, base);
153 }
154
155 static const struct s5p_vcm_driver default_s5p_vcm_driver = {
156         .tlb_invalidator = &default_tlb_invalidator,
157         .pgd_base_specifier = &default_pgd_base_specifier
158 };
159
160 /*** Page Table updating functions *******************************************/
161
162 static inline void s5p_mmu_cacheflush_contig(void *vastart, void *vaend)
163 {
164         dmac_flush_range(vastart, vaend);
165         outer_flush_range(virt_to_phys(vastart),
166                                 virt_to_phys(vaend));
167 }
168
169 static void s5p_remove_2nd_mapping(unsigned long *base, resource_size_t *vaddr,
170                 resource_size_t *vsize)
171 {
172         unsigned long *entry;
173         resource_size_t wsize;
174         unsigned long *flush_start;
175
176         BUG_ON((*vaddr & SPAGEMASK) || (*vsize & SPAGEMASK));
177
178         wsize = SECTIONSIZE - (*vaddr & SECTIONMASK);
179         if (*vsize < wsize)
180                 wsize = *vsize;
181
182         entry = base + LV2OFF_FROM_VADDR(*vaddr);
183         flush_start = entry;
184
185         *vsize -= wsize;
186         *vaddr += wsize;
187
188         /* This works on both of small pages and large pages */
189         while (wsize) {
190                 *entry = 0;
191                 wsize -= SPAGESIZE;
192                 entry++;
193         }
194
195         s5p_mmu_cacheflush_contig(flush_start, entry);
196 }
197
198 static void s5p_remove_mapping(unsigned long *pgd, resource_size_t vaddr,
199                         resource_size_t vsize)
200 {
201         unsigned long *flush_start;
202         unsigned long *flush_end;
203
204         flush_start = pgd + LV1OFF_FROM_VADDR(vaddr);
205         flush_end = flush_start + ((vsize + SECTIONSIZE) >> SECTIONSHIFT);
206
207         while (vsize) {
208                 unsigned long *first_entry;
209                 unsigned long *second_pgtable;
210                 unsigned long diff;
211
212                 first_entry = pgd + LV1OFF_FROM_VADDR(vaddr);
213
214                 switch (*first_entry & PG_FLAG_MASK) {
215                 case PG_LV1_PAGE_FLAG:
216                         second_pgtable =
217                         phys_to_virt(*first_entry & PG_LV1_LV2BASE_MASK);
218                         s5p_remove_2nd_mapping(second_pgtable, &vaddr, &vsize);
219                         break;
220                 case PG_LV1_SECTION_FLAG:
221                         BUG_ON(vsize < SECTIONSIZE);
222                         BUG_ON(vaddr & SECTIONMASK);
223
224                         *first_entry = 0;
225                         vsize -= SECTIONSIZE;
226                         vaddr += SECTIONSIZE;
227                         break;
228                 default:
229                         /* Invalid and illegal entry. Clear it anyway */
230                         WARN_ON(1);
231                         *first_entry = 0;
232                         diff = SECTIONSIZE - (vaddr & SECTIONMASK);
233                         vaddr = vaddr + diff;
234                         if (vsize > diff)
235                                 vsize -= diff;
236                         else
237                                 vsize = 0;
238                         break;
239                 }
240         }
241         s5p_mmu_cacheflush_contig(flush_start, flush_end);
242 }
243
244 static int s5p_write_2nd_table(unsigned long *base, resource_size_t *vaddr,
245                 resource_size_t vend, struct vcm_phys_part **_parts_cur,
246                 resource_size_t mapped, struct vcm_phys_part *parts_end)
247 {
248         unsigned long *entry;
249         unsigned long *flush_start;
250         struct vcm_phys_part *parts_cur = *_parts_cur;
251         struct vcm_phys_part chunk = {0};
252
253         BUG_ON(*vaddr & SPAGEMASK);
254
255         if ((vend - (*vaddr & ~SECTIONMASK)) > SECTIONSIZE)
256                 vend = (*vaddr & ~SECTIONMASK) + SECTIONSIZE;
257
258         entry = base + LV2OFF_FROM_VADDR(*vaddr);
259
260         flush_start = entry;
261
262         /* If 'mapped' is equal to parts_cur->start, parts_cur chunk has been
263          * never mapped. */
264         chunk.start = parts_cur->start + mapped;
265         chunk.size = parts_cur->size - mapped;
266         /* This routine never touch the page table entry and thus return with
267          * error code immediately if it is not 'fault mapping', no matter what
268          * the content the entry contains. An entry has 'fault mapping' if its
269          * last significant 2 bits are 0b00 which means no mapping exists.
270          *
271          * vaddr contains the address of the next page of the last page
272          * written: This is important to continue mapping or to recover
273          * from error */
274         while ((parts_cur != parts_end) && (*vaddr < vend)) {
275                 unsigned long update_size;
276
277                 if (chunk.size == 0) {
278                         chunk.start = parts_cur->start;
279                         chunk.size = parts_cur->size;
280                 }
281
282                 /* Reports an error if the size of a chunk to map is
283                  * smaller than the smallest page size. */
284                 if (chunk.size < SPAGESIZE)
285                         return -EBADR;
286
287                 if ((*entry & PG_FLAG_MASK) != PG_FAULT_FLAG)
288                         return -EADDRINUSE;
289
290                 if (((*vaddr & LPAGEMASK) == 0)
291                                 && ((chunk.start & LPAGEMASK) == 0)
292                                 && ((vend - *vaddr) >= LPAGESIZE)
293                                 && (chunk.size >= LPAGESIZE)) {
294                         int i;
295
296                         for (i = 0; i < (1 << (LPAGESHIFT - SPAGESHIFT)); i++) {
297                                 if ((*entry & PG_FLAG_MASK) != PG_FAULT_FLAG)
298                                         return -EADDRINUSE;
299                                 *entry = chunk.start | PG_LV2_LPAGE_FLAG;
300                                 entry++;
301                                 *vaddr += SPAGESIZE;
302                         }
303                         update_size = LPAGESIZE;
304                 } else if (((*vaddr & SPAGEMASK) == 0)
305                                 && ((vend - *vaddr) >= SPAGESIZE)
306                                 && (chunk.size >= SPAGESIZE)
307                                 && ((chunk.start & SPAGEMASK) == 0)) {
308                         *entry = chunk.start | PG_LV2_SPAGE_FLAG;
309                         entry++;
310                         update_size = SPAGESIZE;
311                         *vaddr += update_size;
312                 } else {
313                         return -EBADR;
314                 }
315
316                 chunk.size -= update_size;
317                 if (chunk.size == 0)
318                         parts_cur++;
319                 else
320                         chunk.start += update_size;
321         }
322
323         *_parts_cur = parts_cur;
324
325         s5p_mmu_cacheflush_contig(flush_start, entry);
326
327         /* Return value here must be positive number including zero */
328         /* Returning larger than 0 means the current chunk isn't exausted. */
329         return (int)(chunk.size >> SPAGESHIFT);
330 }
331
332 inline int lv2_pgtable_empty(unsigned long *pte_start)
333 {
334         unsigned long *pte_end = pte_start + LV2ENTRIES;
335         while ((pte_start != pte_end) && (*pte_start == 0))
336                 pte_start++;
337
338         return (pte_start == pte_end) ? -1 : 0; /* true : false */
339 }
340
341 static int s5p_write_mapping(unsigned long *pgd, resource_size_t vaddr,
342                         resource_size_t vsize, struct vcm_phys_part *parts,
343                         unsigned int num_chunks)
344 {
345         unsigned long *first_entry;
346         resource_size_t vcur = vaddr;
347         resource_size_t vend = vaddr + vsize;
348         struct vcm_phys_part *parts_end;
349         struct vcm_phys_part chunk = {0};
350         int i;
351         int ret = 0;
352         unsigned long *second_pgtable = NULL;
353
354         if (WARN_ON((vaddr | vsize) & SPAGEMASK))
355                 return -EFAULT;
356
357         for (i = 0; i < num_chunks; i++)
358                 if (WARN_ON((parts[i].start | parts[i].size) & SPAGEMASK))
359                         return -EFAULT;
360         /* ASSUMPTION: Sum of physical chunks is not smaller than vsize */
361
362         parts_end = parts + num_chunks;
363
364         first_entry = pgd + LV1OFF_FROM_VADDR(vaddr);
365
366         /* Physical memory chunks in array 'parts'
367          * must be aligned by 1M, 64K or 4K */
368         /* NO local variable is allowed in the below 'while' clause
369          * because of so many 'goto's! */
370         while ((parts != parts_end) && (vcur < vend)) {
371                 if (chunk.size == 0) {
372                         chunk.start = parts->start;
373                         chunk.size = parts->size;
374                 }
375
376                 switch (*first_entry & PG_FLAG_MASK) {
377                 case PG_LV1_SECTION_FLAG:
378                         /* mapping already exists */
379                         ret = -EADDRINUSE;
380                         goto fail;
381
382                 case PG_LV1_PAGE_FLAG:
383                         second_pgtable = phys_to_virt(
384                                         *first_entry & PG_LV1_LV2BASE_MASK);
385
386                         if ((((vcur | chunk.start) & SECTIONMASK) != 0)
387                                         || (chunk.size < SECTIONSIZE)
388                                         || ((vend - vcur) < SECTIONSIZE)) {
389                                 goto page_mapping;
390                         }
391                         /* else */
392                         if (lv2_pgtable_empty(second_pgtable)) {
393                                 kmem_cache_free(l2pgtbl_cachep, second_pgtable);
394                                 *first_entry = 0;
395                                 goto section_mapping;
396                         }
397                         /* else */
398                         ret = -EADDRINUSE;
399                         goto fail;
400
401                 case PG_FAULT_FLAG:
402                         if ((((vcur | chunk.start) & SECTIONMASK) == 0)
403                                         && ((vend - vcur) >= SECTIONSIZE)
404                                         && (chunk.size >= SECTIONSIZE))
405                                 goto section_mapping;
406                         else
407                                 goto new_page_mapping;
408                 }
409
410                 BUG();
411                 continue; /* guard: never reach here! */
412
413 section_mapping:
414                 do {
415                         *first_entry = (chunk.start & ~SECTIONMASK)
416                                         | PG_LV1_SECTION_FLAG;
417                         first_entry++;
418                         chunk.start += SECTIONSIZE;
419                         chunk.size -= SECTIONSIZE;
420                         vcur += SECTIONSIZE;
421                 } while ((chunk.size >= SECTIONSIZE)
422                                 && ((vend - vcur) >= SECTIONSIZE)
423                                 && (*first_entry == 0));
424
425                 if (chunk.size == 0)
426                         parts++;
427                 continue;
428
429 new_page_mapping:
430                 second_pgtable = kmem_cache_zalloc(l2pgtbl_cachep,
431                                                         GFP_ATOMIC | GFP_IOFS);
432                 if (!second_pgtable) {
433                         ret = -ENOMEM;
434                         goto fail;
435                 }
436                 BUG_ON((unsigned long)second_pgtable & ~PG_LV1_LV2BASE_MASK);
437                 *first_entry = virt_to_phys(second_pgtable) | PG_LV1_PAGE_FLAG;
438
439 page_mapping:
440                 ret = s5p_write_2nd_table(second_pgtable, &vcur , vend,
441                                 &parts, parts->size - chunk.size, parts_end);
442                 if (ret < 0)
443                         goto fail;
444                 /* ret is the adjustment of the current chunk */
445                 chunk.size = (resource_size_t)(ret << SPAGESHIFT);
446                 chunk.start = parts->start + chunk.size;
447
448                 first_entry++;
449         }
450
451         s5p_mmu_cacheflush_contig(pgd + LV1OFF_FROM_VADDR(vaddr), first_entry);
452
453 fail:
454         if (IS_ERR_VALUE(ret))
455                 s5p_remove_mapping(pgd, vaddr, vcur - vaddr);
456         return ret;
457 }
458
459 /*** VCM and VCM-MMU drivers **************************************************/
460
461 static void s5p_mmu_cleanup(struct vcm *vcm)
462 {
463         enum vcm_dev_id id;
464
465         id = find_s5p_vcm_mmu_id(vcm);
466         if (WARN_ON(id != VCM_DEV_NONE) &&
467                         atomic_dec_and_test(&shared_vcm.vcms[id]->refcnt)) {
468                 kfree(shared_vcm.vcms[id]);
469                 shared_vcm.vcms[id] = NULL;
470         }
471 }
472
473 static int s5p_mmu_activate(struct vcm_res *res, struct vcm_phys *phys)
474 {
475         /* We don't need to check res and phys because vcm_bind() in mm/vcm.c
476          * already have checked them.
477          */
478         return s5p_write_mapping(shared_vcm.pgd, res->start, phys->size,
479                                         phys->parts, phys->count);
480 }
481
482 static void s5p_mmu_deactivate(struct vcm_res *res, struct vcm_phys *phys)
483 {
484         enum vcm_dev_id id;
485         struct s5p_vcm_mmu *s5p_mmu;
486
487         if (WARN_ON(!res || !phys))
488                 return;
489
490         id = find_s5p_vcm_mmu_id(res->vcm);
491         if (id == VCM_DEV_NONE)
492                 return;
493
494         s5p_mmu = shared_vcm.vcms[id];
495         if (!s5p_mmu)
496                 return;
497
498         s5p_remove_mapping(shared_vcm.pgd, res->start, res->bound_size);
499
500         invalidate_tlb(s5p_mmu);
501 }
502
503 /* This is exactly same as vcm_mmu_activate() in mm/vcm.c. We have to include
504  * TLB invalidation in the critical section of mmu->mutex. That's why we have
505  * copied exactly same code.
506  */
507 static int s5p_vcm_mmu_activate(struct vcm *vcm)
508 {
509         struct s5p_vcm_mmu *s5p_mmu;
510         enum vcm_dev_id id;
511         int ret;
512
513         id = find_s5p_vcm_mmu_id(vcm);
514         if (id == VCM_DEV_NONE)
515                 return -EINVAL;
516
517         s5p_mmu = shared_vcm.vcms[id];
518         if (!s5p_mmu)
519                 return -EINVAL;
520
521         /* pointer to vcm_mmu_activate in mm/vcm.c */
522         ret = vcm_mmu_activate(vcm);
523         if (ret)
524                 return ret;
525
526         set_pagetable_base(s5p_mmu);
527
528         return 0;
529 }
530
531 static struct vcm_phys *s5p_vcm_mmu_phys(struct vcm *vcm, resource_size_t size,
532                                         unsigned flags)
533 {
534         struct vcm_phys *phys = NULL;
535         struct s5p_vcm_mmu *list;
536
537         if (WARN_ON(!vcm || !size))
538                 return ERR_PTR(-EINVAL);
539
540         list = downcast_vcm(vcm);
541
542         if (list->driver->phys_alloc) {
543                 dma_addr_t phys_addr;
544
545                 phys_addr = list->driver->phys_alloc(size, flags);
546
547                 if (!IS_ERR_VALUE(phys_addr)) {
548                         phys = kmalloc(sizeof(*phys) + sizeof(*phys->parts),
549                                         GFP_KERNEL);
550                         if (phys == NULL)
551                                 return ERR_PTR(-ENOMEM);
552                         phys->count = 1;
553                         phys->size = size;
554                         phys->free = list->driver->phys_free;
555                         phys->parts[0].start = phys_addr;
556                         phys->parts[0].size = size;
557                 }
558         } else {
559                 phys = vcm_phys_alloc(size, 0,
560                         container_of(vcm, struct vcm_mmu, vcm)->driver->orders);
561         }
562         return phys;
563 }
564
565 static const unsigned char s5p_alloc_orders[] = {
566         20 - PAGE_SHIFT,
567         16 - PAGE_SHIFT,
568         12 - PAGE_SHIFT
569 };
570
571 static struct vcm_mmu_driver s5p_vcm_mmu_driver = {
572         .orders = s5p_alloc_orders,
573         .cleanup = &s5p_mmu_cleanup,
574         .activate = &s5p_mmu_activate,
575         .deactivate = &s5p_mmu_deactivate
576 };
577
578 /*** External interface of s5p-vcm *******************************************/
579
580 static int __init s5p_vcm_init(void)
581 {
582         int ret;
583
584         shared_vcm.pgd = (unsigned long *)__get_free_pages(
585                         GFP_KERNEL | __GFP_ZERO,
586                         fls(LV1TABLESIZE / PAGE_SIZE) - 1); /* 2 */
587         if (shared_vcm.pgd == NULL)
588                 return -ENOMEM;
589
590         shared_vcm.pool = gen_pool_create(SECTIONSHIFT, -1);
591         if (shared_vcm.pool == NULL)
592                 return -ENOMEM;
593
594         ret = gen_pool_add(shared_vcm.pool, 0x100000, 0xFFE00000, -1);
595         if (ret)
596                 return ret;
597
598         l2pgtbl_cachep = kmem_cache_create("VCM_L2_PGTable",
599                         1024, 1024, 0, NULL);
600         if (!l2pgtbl_cachep)
601                 return -ENOMEM;
602
603         return 0;
604 }
605 subsys_initcall(s5p_vcm_init);
606
607 struct vcm *__must_check
608 vcm_create_unified(resource_size_t size, enum vcm_dev_id id,
609                                         const struct s5p_vcm_driver *driver)
610 {
611         static struct vcm_driver vcm_driver;
612         struct s5p_vcm_mmu *s5p_vcm_mmu = NULL;
613
614         BUG_ON(!shared_vcm.pgd);
615
616         if (size & SPAGEMASK)
617                 return ERR_PTR(-EINVAL);
618
619         WARN_ON(size & SECTIONMASK);
620         if (WARN_ON(id >= VCM_DEV_NUM))
621                 return ERR_PTR(-EINVAL);
622
623         if (shared_vcm.vcms[id]) {
624                 atomic_inc(&shared_vcm.vcms[id]->refcnt);
625                 return &shared_vcm.vcms[id]->mmu.vcm;
626         }
627
628         s5p_vcm_mmu = kzalloc(sizeof *s5p_vcm_mmu, GFP_KERNEL);
629         if (!s5p_vcm_mmu)
630                 return ERR_PTR(-ENOMEM);
631
632         s5p_vcm_mmu->mmu.vcm.size = (size + SECTIONSIZE - 1) & (~SECTIONMASK);
633         s5p_vcm_mmu->mmu.vcm.start = gen_pool_alloc(shared_vcm.pool,
634                                                 s5p_vcm_mmu->mmu.vcm.size);
635         s5p_vcm_mmu->mmu.driver = &s5p_vcm_mmu_driver;
636         if (&s5p_vcm_mmu->mmu.vcm != vcm_mmu_init(&s5p_vcm_mmu->mmu)) {
637                 kfree(s5p_vcm_mmu);
638                 return ERR_PTR(-EBADR);
639         }
640
641         memcpy(&vcm_driver, s5p_vcm_mmu->mmu.vcm.driver,
642                                                 sizeof(struct vcm_driver));
643         vcm_driver.phys = &s5p_vcm_mmu_phys;
644         vcm_mmu_activate = vcm_driver.activate;
645         vcm_driver.activate = &s5p_vcm_mmu_activate;
646         s5p_vcm_mmu->mmu.vcm.driver = &vcm_driver;
647
648         s5p_vcm_mmu->driver = (driver) ? driver : &default_s5p_vcm_driver;
649         s5p_vcm_mmu->id = id;
650
651         shared_vcm.vcms[id] = s5p_vcm_mmu;
652         atomic_set(&shared_vcm.vcms[id]->refcnt, 1);
653
654         return &(shared_vcm.vcms[id]->mmu.vcm);
655 }
656 EXPORT_SYMBOL(vcm_create_unified);
657
658 /* You will use this function when you want to make your peripheral device
659  * refer to the given reservation and you don't know if you can specify give the
660  * address of the reservation to your peripheral deivce.
661  * Thus, you can specify the given reservation to your peripheral device unless
662  * this function returns S5PVCM_RES_NOT_IN_VCM.
663  * If the given reservation does not belogns to the given VCM context but to the
664  * same virtual address space, you can also use the reservation. But you have to
665  * refelect the mapping of reservation in the other VCM context with TLB
666  * invalidation.
667  */
668 enum S5PVCM_RESCHECK vcm_reservation_in_vcm(struct vcm *vcm,
669                                                         struct vcm_res *res)
670 {
671         enum vcm_dev_id id;
672
673         if (WARN_ON(!vcm || !res))
674                 return S5PVCM_RES_NOT_IN_VCM;
675
676         if (res->vcm == vcm)
677                 return S5PVCM_RES_IN_VCM;
678
679         id = find_s5p_vcm_mmu_id(res->vcm);
680         if (id == VCM_DEV_NONE)
681                 return S5PVCM_RES_NOT_IN_VCM;
682
683         id = find_s5p_vcm_mmu_id(vcm);
684         invalidate_tlb(shared_vcm.vcms[id]);
685
686         return S5PVCM_RES_IN_ADDRSPACE;
687 }
688 EXPORT_SYMBOL(vcm_reservation_in_vcm);
689
690 struct vcm *vcm_find_vcm(enum vcm_dev_id id)
691 {
692         if ((id < VCM_DEV_NUM) && (id > VCM_DEV_NONE))
693                 return &(shared_vcm.vcms[id]->mmu.vcm);
694
695         return NULL;
696 }
697 EXPORT_SYMBOL(vcm_find_vcm);
698
699 void vcm_set_pgtable_base(enum vcm_dev_id id)
700 {
701         if ((id > VCM_DEV_NONE) && (id < VCM_DEV_NUM))
702                 set_pagetable_base(shared_vcm.vcms[id]);
703 }
704 EXPORT_SYMBOL(vcm_set_pgtable_base);
705
706 void s5p_vcm_turn_on(struct vcm *vcm)
707 {
708         struct s5p_vcm_mmu *s5p_vcm;
709
710         s5p_vcm = downcast_vcm(vcm);
711         sysmmu_turn_on(s5p_vcm);
712 }
713 EXPORT_SYMBOL(s5p_vcm_turn_on);
714
715 void s5p_vcm_turn_off(struct vcm *vcm)
716 {
717         struct s5p_vcm_mmu *s5p_vcm;
718
719         s5p_vcm = downcast_vcm(vcm);
720         sysmmu_turn_off(s5p_vcm);
721 }
722 EXPORT_SYMBOL(s5p_vcm_turn_off);