1 #include <asm/pgtable.h>
2 #include <linux/module.h>
5 #include <linux/vmalloc.h>
6 #include <linux/slab.h>
7 #include <plat/s5p-vcm.h>
8 #include <asm/cacheflush.h>
9 #include <asm/outercache.h>
10 #include <linux/err.h>
11 #include <linux/bitops.h>
12 #include <linux/genalloc.h>
14 #define PG_FLAG_MASK 0x3
15 #define PG_LV1_SECTION_FLAG 0x2
16 #define PG_LV1_PAGE_FLAG 0x1
17 #define PG_LV2_SPAGE_FLAG 0x2
18 #define PG_LV2_LPAGE_FLAG 0x1
19 #define PG_FAULT_FLAG 0
21 #define PG_LV1_LV2BASE_MASK 0xFFFFFC00
22 #define PG_LV1_SECTION_ADDR 0xFFF00000
25 #define SPAGESHIFT PAGE_SHIFT
26 #define SECTIONSHIFT 20
29 #define SECTIONSIZE (1 << SECTIONSHIFT)
30 #define LPAGESIZE (1 << LPAGESHIFT)
31 #define SPAGESIZE (1 << SPAGESHIFT)
33 #define SECTIONMASK (SECTIONSIZE - 1)
34 #define LPAGEMASK (LPAGESIZE - 1)
35 #define SPAGEMASK (SPAGESIZE - 1)
37 #define MAXSECTIONS (1 << (32 - SECTIONSHIFT))
38 #define LV1ENTRIES MAXSECTIONS
39 #define LV1TABLESIZE (LV1ENTRIES * sizeof(int))
40 #define LV2ENTRIES (1 << (SECTIONSHIFT - SPAGESHIFT))
41 #define LV2TABLESIZE (LV2ENTRIES * sizeof(int))
43 #define LV1OFF_FROM_VADDR(addr) (addr >> SECTIONSHIFT)
44 #define LV2OFF_FROM_VADDR(addr) ((addr & 0xFF000) >> SPAGESHIFT)
45 #define LV2OFF_FROM_VADDR_LPAGE(addr) ((addr & 0xF0000) >> SPAGESHIFT)
47 /* slab cache for 2nd level page tables */
48 static struct kmem_cache *l2pgtbl_cachep;
50 /* function pointer to vcm_mmu_activate() defined in mm/vcm.c */
51 static int (*vcm_mmu_activate)(struct vcm *vcm);
53 /* S5PV310/S5PC210's implemenation of VCM context */
55 const struct s5p_vcm_driver *driver;
61 /* The singleton object that maintains all vcm contexts.*/
62 struct s5p_vcm_unified {
63 struct s5p_vcm_mmu *vcms[VCM_DEV_NUM];
64 struct gen_pool *pool;
68 /*** Conversion Functions on vcm contexts ************************************/
70 static inline struct s5p_vcm_mmu *downcast_vcm(struct vcm *vcm)
73 container_of(vcm, struct vcm_mmu, vcm),
79 static inline enum vcm_dev_id vcmid_from_s5p_vcm(struct s5p_vcm_mmu *s5p_vcm)
83 for (i = 0; i < VCM_DEV_NUM; i++)
84 if (shared_vcm.vcms[i] == s5p_vcm)
85 return (enum vcm_dev_id)i;
90 static inline enum vcm_dev_id find_s5p_vcm_mmu_id(struct vcm *vcm)
92 return vcmid_from_s5p_vcm(downcast_vcm(vcm));
95 static inline struct s5p_vcm_mmu *find_s5p_vcm_mmu(struct vcm *vcm)
99 id = find_s5p_vcm_mmu_id(vcm);
101 return (id == VCM_DEV_NONE) ? NULL : shared_vcm.vcms[id];
104 /*** SYSMMU handling **********************************************************/
106 static inline void sysmmu_turn_on(struct s5p_vcm_mmu *s5p_vcm)
108 unsigned long ptbase = 0;
110 if (s5p_vcm->mmu.activated)
111 ptbase = virt_to_phys(shared_vcm.pgd);
113 sysmmu_on(vcmid_to_smmuid(s5p_vcm->id), ptbase);
114 if (s5p_vcm->id == VCM_DEV_MFC)
115 sysmmu_on(SYSMMU_MFC_R, ptbase);
118 static inline void sysmmu_turn_off(struct s5p_vcm_mmu *s5p_vcm)
120 sysmmu_off(vcmid_to_smmuid(s5p_vcm->id));
121 if (s5p_vcm->id == VCM_DEV_MFC)
122 sysmmu_off(SYSMMU_MFC_R);
125 static inline void invalidate_tlb(struct s5p_vcm_mmu *s5p_vcm)
127 if (s5p_vcm->driver && s5p_vcm->driver->tlb_invalidator)
128 s5p_vcm->driver->tlb_invalidator(s5p_vcm->id);
131 static inline void set_pagetable_base(struct s5p_vcm_mmu *s5p_vcm)
133 if (s5p_vcm->driver && s5p_vcm->driver->pgd_base_specifier)
134 s5p_vcm->driver->pgd_base_specifier(
135 find_s5p_vcm_mmu_id(&s5p_vcm->mmu.vcm),
136 virt_to_phys(shared_vcm.pgd));
139 static void default_tlb_invalidator(enum vcm_dev_id id)
141 /* the caller must ensure that the sysmmu for id is turned on */
142 sysmmu_tlb_invalidate(vcmid_to_smmuid(id));
143 if (id == VCM_DEV_MFC)
144 sysmmu_tlb_invalidate(SYSMMU_MFC_R);
147 static void default_pgd_base_specifier(enum vcm_dev_id id, unsigned long base)
149 /* the caller must ensure that the sysmmu for id is turned on */
150 sysmmu_set_tablebase_pgd(vcmid_to_smmuid(id), base);
151 if (id == VCM_DEV_MFC)
152 sysmmu_set_tablebase_pgd(SYSMMU_MFC_R, base);
155 static const struct s5p_vcm_driver default_s5p_vcm_driver = {
156 .tlb_invalidator = &default_tlb_invalidator,
157 .pgd_base_specifier = &default_pgd_base_specifier
160 /*** Page Table updating functions *******************************************/
162 static inline void s5p_mmu_cacheflush_contig(void *vastart, void *vaend)
164 dmac_flush_range(vastart, vaend);
165 outer_flush_range(virt_to_phys(vastart),
166 virt_to_phys(vaend));
169 static void s5p_remove_2nd_mapping(unsigned long *base, resource_size_t *vaddr,
170 resource_size_t *vsize)
172 unsigned long *entry;
173 resource_size_t wsize;
174 unsigned long *flush_start;
176 BUG_ON((*vaddr & SPAGEMASK) || (*vsize & SPAGEMASK));
178 wsize = SECTIONSIZE - (*vaddr & SECTIONMASK);
182 entry = base + LV2OFF_FROM_VADDR(*vaddr);
188 /* This works on both of small pages and large pages */
195 s5p_mmu_cacheflush_contig(flush_start, entry);
198 static void s5p_remove_mapping(unsigned long *pgd, resource_size_t vaddr,
199 resource_size_t vsize)
201 unsigned long *flush_start;
202 unsigned long *flush_end;
204 flush_start = pgd + LV1OFF_FROM_VADDR(vaddr);
205 flush_end = flush_start + ((vsize + SECTIONSIZE) >> SECTIONSHIFT);
208 unsigned long *first_entry;
209 unsigned long *second_pgtable;
212 first_entry = pgd + LV1OFF_FROM_VADDR(vaddr);
214 switch (*first_entry & PG_FLAG_MASK) {
215 case PG_LV1_PAGE_FLAG:
217 phys_to_virt(*first_entry & PG_LV1_LV2BASE_MASK);
218 s5p_remove_2nd_mapping(second_pgtable, &vaddr, &vsize);
220 case PG_LV1_SECTION_FLAG:
221 BUG_ON(vsize < SECTIONSIZE);
222 BUG_ON(vaddr & SECTIONMASK);
225 vsize -= SECTIONSIZE;
226 vaddr += SECTIONSIZE;
229 /* Invalid and illegal entry. Clear it anyway */
232 diff = SECTIONSIZE - (vaddr & SECTIONMASK);
233 vaddr = vaddr + diff;
241 s5p_mmu_cacheflush_contig(flush_start, flush_end);
244 static int s5p_write_2nd_table(unsigned long *base, resource_size_t *vaddr,
245 resource_size_t vend, struct vcm_phys_part **_parts_cur,
246 resource_size_t mapped, struct vcm_phys_part *parts_end)
248 unsigned long *entry;
249 unsigned long *flush_start;
250 struct vcm_phys_part *parts_cur = *_parts_cur;
251 struct vcm_phys_part chunk = {0};
253 BUG_ON(*vaddr & SPAGEMASK);
255 if ((vend - (*vaddr & ~SECTIONMASK)) > SECTIONSIZE)
256 vend = (*vaddr & ~SECTIONMASK) + SECTIONSIZE;
258 entry = base + LV2OFF_FROM_VADDR(*vaddr);
262 /* If 'mapped' is equal to parts_cur->start, parts_cur chunk has been
264 chunk.start = parts_cur->start + mapped;
265 chunk.size = parts_cur->size - mapped;
266 /* This routine never touch the page table entry and thus return with
267 * error code immediately if it is not 'fault mapping', no matter what
268 * the content the entry contains. An entry has 'fault mapping' if its
269 * last significant 2 bits are 0b00 which means no mapping exists.
271 * vaddr contains the address of the next page of the last page
272 * written: This is important to continue mapping or to recover
274 while ((parts_cur != parts_end) && (*vaddr < vend)) {
275 unsigned long update_size;
277 if (chunk.size == 0) {
278 chunk.start = parts_cur->start;
279 chunk.size = parts_cur->size;
282 /* Reports an error if the size of a chunk to map is
283 * smaller than the smallest page size. */
284 if (chunk.size < SPAGESIZE)
287 if ((*entry & PG_FLAG_MASK) != PG_FAULT_FLAG)
290 if (((*vaddr & LPAGEMASK) == 0)
291 && ((chunk.start & LPAGEMASK) == 0)
292 && ((vend - *vaddr) >= LPAGESIZE)
293 && (chunk.size >= LPAGESIZE)) {
296 for (i = 0; i < (1 << (LPAGESHIFT - SPAGESHIFT)); i++) {
297 if ((*entry & PG_FLAG_MASK) != PG_FAULT_FLAG)
299 *entry = chunk.start | PG_LV2_LPAGE_FLAG;
303 update_size = LPAGESIZE;
304 } else if (((*vaddr & SPAGEMASK) == 0)
305 && ((vend - *vaddr) >= SPAGESIZE)
306 && (chunk.size >= SPAGESIZE)
307 && ((chunk.start & SPAGEMASK) == 0)) {
308 *entry = chunk.start | PG_LV2_SPAGE_FLAG;
310 update_size = SPAGESIZE;
311 *vaddr += update_size;
316 chunk.size -= update_size;
320 chunk.start += update_size;
323 *_parts_cur = parts_cur;
325 s5p_mmu_cacheflush_contig(flush_start, entry);
327 /* Return value here must be positive number including zero */
328 /* Returning larger than 0 means the current chunk isn't exausted. */
329 return (int)(chunk.size >> SPAGESHIFT);
332 inline int lv2_pgtable_empty(unsigned long *pte_start)
334 unsigned long *pte_end = pte_start + LV2ENTRIES;
335 while ((pte_start != pte_end) && (*pte_start == 0))
338 return (pte_start == pte_end) ? -1 : 0; /* true : false */
341 static int s5p_write_mapping(unsigned long *pgd, resource_size_t vaddr,
342 resource_size_t vsize, struct vcm_phys_part *parts,
343 unsigned int num_chunks)
345 unsigned long *first_entry;
346 resource_size_t vcur = vaddr;
347 resource_size_t vend = vaddr + vsize;
348 struct vcm_phys_part *parts_end;
349 struct vcm_phys_part chunk = {0};
352 unsigned long *second_pgtable = NULL;
354 if (WARN_ON((vaddr | vsize) & SPAGEMASK))
357 for (i = 0; i < num_chunks; i++)
358 if (WARN_ON((parts[i].start | parts[i].size) & SPAGEMASK))
360 /* ASSUMPTION: Sum of physical chunks is not smaller than vsize */
362 parts_end = parts + num_chunks;
364 first_entry = pgd + LV1OFF_FROM_VADDR(vaddr);
366 /* Physical memory chunks in array 'parts'
367 * must be aligned by 1M, 64K or 4K */
368 /* NO local variable is allowed in the below 'while' clause
369 * because of so many 'goto's! */
370 while ((parts != parts_end) && (vcur < vend)) {
371 if (chunk.size == 0) {
372 chunk.start = parts->start;
373 chunk.size = parts->size;
376 switch (*first_entry & PG_FLAG_MASK) {
377 case PG_LV1_SECTION_FLAG:
378 /* mapping already exists */
382 case PG_LV1_PAGE_FLAG:
383 second_pgtable = phys_to_virt(
384 *first_entry & PG_LV1_LV2BASE_MASK);
386 if ((((vcur | chunk.start) & SECTIONMASK) != 0)
387 || (chunk.size < SECTIONSIZE)
388 || ((vend - vcur) < SECTIONSIZE)) {
392 if (lv2_pgtable_empty(second_pgtable)) {
393 kmem_cache_free(l2pgtbl_cachep, second_pgtable);
395 goto section_mapping;
402 if ((((vcur | chunk.start) & SECTIONMASK) == 0)
403 && ((vend - vcur) >= SECTIONSIZE)
404 && (chunk.size >= SECTIONSIZE))
405 goto section_mapping;
407 goto new_page_mapping;
411 continue; /* guard: never reach here! */
415 *first_entry = (chunk.start & ~SECTIONMASK)
416 | PG_LV1_SECTION_FLAG;
418 chunk.start += SECTIONSIZE;
419 chunk.size -= SECTIONSIZE;
421 } while ((chunk.size >= SECTIONSIZE)
422 && ((vend - vcur) >= SECTIONSIZE)
423 && (*first_entry == 0));
430 second_pgtable = kmem_cache_zalloc(l2pgtbl_cachep,
431 GFP_ATOMIC | GFP_IOFS);
432 if (!second_pgtable) {
436 BUG_ON((unsigned long)second_pgtable & ~PG_LV1_LV2BASE_MASK);
437 *first_entry = virt_to_phys(second_pgtable) | PG_LV1_PAGE_FLAG;
440 ret = s5p_write_2nd_table(second_pgtable, &vcur , vend,
441 &parts, parts->size - chunk.size, parts_end);
444 /* ret is the adjustment of the current chunk */
445 chunk.size = (resource_size_t)(ret << SPAGESHIFT);
446 chunk.start = parts->start + chunk.size;
451 s5p_mmu_cacheflush_contig(pgd + LV1OFF_FROM_VADDR(vaddr), first_entry);
454 if (IS_ERR_VALUE(ret))
455 s5p_remove_mapping(pgd, vaddr, vcur - vaddr);
459 /*** VCM and VCM-MMU drivers **************************************************/
461 static void s5p_mmu_cleanup(struct vcm *vcm)
465 id = find_s5p_vcm_mmu_id(vcm);
466 if (WARN_ON(id != VCM_DEV_NONE) &&
467 atomic_dec_and_test(&shared_vcm.vcms[id]->refcnt)) {
468 kfree(shared_vcm.vcms[id]);
469 shared_vcm.vcms[id] = NULL;
473 static int s5p_mmu_activate(struct vcm_res *res, struct vcm_phys *phys)
475 /* We don't need to check res and phys because vcm_bind() in mm/vcm.c
476 * already have checked them.
478 return s5p_write_mapping(shared_vcm.pgd, res->start, phys->size,
479 phys->parts, phys->count);
482 static void s5p_mmu_deactivate(struct vcm_res *res, struct vcm_phys *phys)
485 struct s5p_vcm_mmu *s5p_mmu;
487 if (WARN_ON(!res || !phys))
490 id = find_s5p_vcm_mmu_id(res->vcm);
491 if (id == VCM_DEV_NONE)
494 s5p_mmu = shared_vcm.vcms[id];
498 s5p_remove_mapping(shared_vcm.pgd, res->start, res->bound_size);
500 invalidate_tlb(s5p_mmu);
503 /* This is exactly same as vcm_mmu_activate() in mm/vcm.c. We have to include
504 * TLB invalidation in the critical section of mmu->mutex. That's why we have
505 * copied exactly same code.
507 static int s5p_vcm_mmu_activate(struct vcm *vcm)
509 struct s5p_vcm_mmu *s5p_mmu;
513 id = find_s5p_vcm_mmu_id(vcm);
514 if (id == VCM_DEV_NONE)
517 s5p_mmu = shared_vcm.vcms[id];
521 /* pointer to vcm_mmu_activate in mm/vcm.c */
522 ret = vcm_mmu_activate(vcm);
526 set_pagetable_base(s5p_mmu);
531 static struct vcm_phys *s5p_vcm_mmu_phys(struct vcm *vcm, resource_size_t size,
534 struct vcm_phys *phys = NULL;
535 struct s5p_vcm_mmu *list;
537 if (WARN_ON(!vcm || !size))
538 return ERR_PTR(-EINVAL);
540 list = downcast_vcm(vcm);
542 if (list->driver->phys_alloc) {
543 dma_addr_t phys_addr;
545 phys_addr = list->driver->phys_alloc(size, flags);
547 if (!IS_ERR_VALUE(phys_addr)) {
548 phys = kmalloc(sizeof(*phys) + sizeof(*phys->parts),
551 return ERR_PTR(-ENOMEM);
554 phys->free = list->driver->phys_free;
555 phys->parts[0].start = phys_addr;
556 phys->parts[0].size = size;
559 phys = vcm_phys_alloc(size, 0,
560 container_of(vcm, struct vcm_mmu, vcm)->driver->orders);
565 static const unsigned char s5p_alloc_orders[] = {
571 static struct vcm_mmu_driver s5p_vcm_mmu_driver = {
572 .orders = s5p_alloc_orders,
573 .cleanup = &s5p_mmu_cleanup,
574 .activate = &s5p_mmu_activate,
575 .deactivate = &s5p_mmu_deactivate
578 /*** External interface of s5p-vcm *******************************************/
580 static int __init s5p_vcm_init(void)
584 shared_vcm.pgd = (unsigned long *)__get_free_pages(
585 GFP_KERNEL | __GFP_ZERO,
586 fls(LV1TABLESIZE / PAGE_SIZE) - 1); /* 2 */
587 if (shared_vcm.pgd == NULL)
590 shared_vcm.pool = gen_pool_create(SECTIONSHIFT, -1);
591 if (shared_vcm.pool == NULL)
594 ret = gen_pool_add(shared_vcm.pool, 0x100000, 0xFFE00000, -1);
598 l2pgtbl_cachep = kmem_cache_create("VCM_L2_PGTable",
599 1024, 1024, 0, NULL);
605 subsys_initcall(s5p_vcm_init);
607 struct vcm *__must_check
608 vcm_create_unified(resource_size_t size, enum vcm_dev_id id,
609 const struct s5p_vcm_driver *driver)
611 static struct vcm_driver vcm_driver;
612 struct s5p_vcm_mmu *s5p_vcm_mmu = NULL;
614 BUG_ON(!shared_vcm.pgd);
616 if (size & SPAGEMASK)
617 return ERR_PTR(-EINVAL);
619 WARN_ON(size & SECTIONMASK);
620 if (WARN_ON(id >= VCM_DEV_NUM))
621 return ERR_PTR(-EINVAL);
623 if (shared_vcm.vcms[id]) {
624 atomic_inc(&shared_vcm.vcms[id]->refcnt);
625 return &shared_vcm.vcms[id]->mmu.vcm;
628 s5p_vcm_mmu = kzalloc(sizeof *s5p_vcm_mmu, GFP_KERNEL);
630 return ERR_PTR(-ENOMEM);
632 s5p_vcm_mmu->mmu.vcm.size = (size + SECTIONSIZE - 1) & (~SECTIONMASK);
633 s5p_vcm_mmu->mmu.vcm.start = gen_pool_alloc(shared_vcm.pool,
634 s5p_vcm_mmu->mmu.vcm.size);
635 s5p_vcm_mmu->mmu.driver = &s5p_vcm_mmu_driver;
636 if (&s5p_vcm_mmu->mmu.vcm != vcm_mmu_init(&s5p_vcm_mmu->mmu)) {
638 return ERR_PTR(-EBADR);
641 memcpy(&vcm_driver, s5p_vcm_mmu->mmu.vcm.driver,
642 sizeof(struct vcm_driver));
643 vcm_driver.phys = &s5p_vcm_mmu_phys;
644 vcm_mmu_activate = vcm_driver.activate;
645 vcm_driver.activate = &s5p_vcm_mmu_activate;
646 s5p_vcm_mmu->mmu.vcm.driver = &vcm_driver;
648 s5p_vcm_mmu->driver = (driver) ? driver : &default_s5p_vcm_driver;
649 s5p_vcm_mmu->id = id;
651 shared_vcm.vcms[id] = s5p_vcm_mmu;
652 atomic_set(&shared_vcm.vcms[id]->refcnt, 1);
654 return &(shared_vcm.vcms[id]->mmu.vcm);
656 EXPORT_SYMBOL(vcm_create_unified);
658 /* You will use this function when you want to make your peripheral device
659 * refer to the given reservation and you don't know if you can specify give the
660 * address of the reservation to your peripheral deivce.
661 * Thus, you can specify the given reservation to your peripheral device unless
662 * this function returns S5PVCM_RES_NOT_IN_VCM.
663 * If the given reservation does not belogns to the given VCM context but to the
664 * same virtual address space, you can also use the reservation. But you have to
665 * refelect the mapping of reservation in the other VCM context with TLB
668 enum S5PVCM_RESCHECK vcm_reservation_in_vcm(struct vcm *vcm,
673 if (WARN_ON(!vcm || !res))
674 return S5PVCM_RES_NOT_IN_VCM;
677 return S5PVCM_RES_IN_VCM;
679 id = find_s5p_vcm_mmu_id(res->vcm);
680 if (id == VCM_DEV_NONE)
681 return S5PVCM_RES_NOT_IN_VCM;
683 id = find_s5p_vcm_mmu_id(vcm);
684 invalidate_tlb(shared_vcm.vcms[id]);
686 return S5PVCM_RES_IN_ADDRSPACE;
688 EXPORT_SYMBOL(vcm_reservation_in_vcm);
690 struct vcm *vcm_find_vcm(enum vcm_dev_id id)
692 if ((id < VCM_DEV_NUM) && (id > VCM_DEV_NONE))
693 return &(shared_vcm.vcms[id]->mmu.vcm);
697 EXPORT_SYMBOL(vcm_find_vcm);
699 void vcm_set_pgtable_base(enum vcm_dev_id id)
701 if ((id > VCM_DEV_NONE) && (id < VCM_DEV_NUM))
702 set_pagetable_base(shared_vcm.vcms[id]);
704 EXPORT_SYMBOL(vcm_set_pgtable_base);
706 void s5p_vcm_turn_on(struct vcm *vcm)
708 struct s5p_vcm_mmu *s5p_vcm;
710 s5p_vcm = downcast_vcm(vcm);
711 sysmmu_turn_on(s5p_vcm);
713 EXPORT_SYMBOL(s5p_vcm_turn_on);
715 void s5p_vcm_turn_off(struct vcm *vcm)
717 struct s5p_vcm_mmu *s5p_vcm;
719 s5p_vcm = downcast_vcm(vcm);
720 sysmmu_turn_off(s5p_vcm);
722 EXPORT_SYMBOL(s5p_vcm_turn_off);