Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / s390 / mm / pgtable.c
1 /*
2  *    Copyright IBM Corp. 2007, 2011
3  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4  */
5
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/gfp.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
18 #include <linux/rcupdate.h>
19 #include <linux/slab.h>
20
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
23 #include <asm/tlb.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
26
27 #ifndef CONFIG_64BIT
28 #define ALLOC_ORDER     1
29 #define FRAG_MASK       0x0f
30 #else
31 #define ALLOC_ORDER     2
32 #define FRAG_MASK       0x03
33 #endif
34
35
36 unsigned long *crst_table_alloc(struct mm_struct *mm)
37 {
38         struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
39
40         if (!page)
41                 return NULL;
42         return (unsigned long *) page_to_phys(page);
43 }
44
45 void crst_table_free(struct mm_struct *mm, unsigned long *table)
46 {
47         free_pages((unsigned long) table, ALLOC_ORDER);
48 }
49
50 #ifdef CONFIG_64BIT
51 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
52 {
53         unsigned long *table, *pgd;
54         unsigned long entry;
55
56         BUG_ON(limit > (1UL << 53));
57 repeat:
58         table = crst_table_alloc(mm);
59         if (!table)
60                 return -ENOMEM;
61         spin_lock_bh(&mm->page_table_lock);
62         if (mm->context.asce_limit < limit) {
63                 pgd = (unsigned long *) mm->pgd;
64                 if (mm->context.asce_limit <= (1UL << 31)) {
65                         entry = _REGION3_ENTRY_EMPTY;
66                         mm->context.asce_limit = 1UL << 42;
67                         mm->context.asce_bits = _ASCE_TABLE_LENGTH |
68                                                 _ASCE_USER_BITS |
69                                                 _ASCE_TYPE_REGION3;
70                 } else {
71                         entry = _REGION2_ENTRY_EMPTY;
72                         mm->context.asce_limit = 1UL << 53;
73                         mm->context.asce_bits = _ASCE_TABLE_LENGTH |
74                                                 _ASCE_USER_BITS |
75                                                 _ASCE_TYPE_REGION2;
76                 }
77                 crst_table_init(table, entry);
78                 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
79                 mm->pgd = (pgd_t *) table;
80                 mm->task_size = mm->context.asce_limit;
81                 table = NULL;
82         }
83         spin_unlock_bh(&mm->page_table_lock);
84         if (table)
85                 crst_table_free(mm, table);
86         if (mm->context.asce_limit < limit)
87                 goto repeat;
88         return 0;
89 }
90
91 void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
92 {
93         pgd_t *pgd;
94
95         while (mm->context.asce_limit > limit) {
96                 pgd = mm->pgd;
97                 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
98                 case _REGION_ENTRY_TYPE_R2:
99                         mm->context.asce_limit = 1UL << 42;
100                         mm->context.asce_bits = _ASCE_TABLE_LENGTH |
101                                                 _ASCE_USER_BITS |
102                                                 _ASCE_TYPE_REGION3;
103                         break;
104                 case _REGION_ENTRY_TYPE_R3:
105                         mm->context.asce_limit = 1UL << 31;
106                         mm->context.asce_bits = _ASCE_TABLE_LENGTH |
107                                                 _ASCE_USER_BITS |
108                                                 _ASCE_TYPE_SEGMENT;
109                         break;
110                 default:
111                         BUG();
112                 }
113                 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
114                 mm->task_size = mm->context.asce_limit;
115                 crst_table_free(mm, (unsigned long *) pgd);
116         }
117 }
118 #endif
119
120 #ifdef CONFIG_PGSTE
121
122 /**
123  * gmap_alloc - allocate a guest address space
124  * @mm: pointer to the parent mm_struct
125  *
126  * Returns a guest address space structure.
127  */
128 struct gmap *gmap_alloc(struct mm_struct *mm)
129 {
130         struct gmap *gmap;
131         struct page *page;
132         unsigned long *table;
133
134         gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
135         if (!gmap)
136                 goto out;
137         INIT_LIST_HEAD(&gmap->crst_list);
138         gmap->mm = mm;
139         page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
140         if (!page)
141                 goto out_free;
142         list_add(&page->lru, &gmap->crst_list);
143         table = (unsigned long *) page_to_phys(page);
144         crst_table_init(table, _REGION1_ENTRY_EMPTY);
145         gmap->table = table;
146         gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
147                      _ASCE_USER_BITS | __pa(table);
148         list_add(&gmap->list, &mm->context.gmap_list);
149         return gmap;
150
151 out_free:
152         kfree(gmap);
153 out:
154         return NULL;
155 }
156 EXPORT_SYMBOL_GPL(gmap_alloc);
157
158 static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
159 {
160         struct gmap_pgtable *mp;
161         struct gmap_rmap *rmap;
162         struct page *page;
163
164         if (*table & _SEGMENT_ENTRY_INV)
165                 return 0;
166         page = pfn_to_page(*table >> PAGE_SHIFT);
167         mp = (struct gmap_pgtable *) page->index;
168         list_for_each_entry(rmap, &mp->mapper, list) {
169                 if (rmap->entry != table)
170                         continue;
171                 list_del(&rmap->list);
172                 kfree(rmap);
173                 break;
174         }
175         *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
176         return 1;
177 }
178
179 static void gmap_flush_tlb(struct gmap *gmap)
180 {
181         if (MACHINE_HAS_IDTE)
182                 __tlb_flush_idte((unsigned long) gmap->table |
183                                  _ASCE_TYPE_REGION1);
184         else
185                 __tlb_flush_global();
186 }
187
188 /**
189  * gmap_free - free a guest address space
190  * @gmap: pointer to the guest address space structure
191  */
192 void gmap_free(struct gmap *gmap)
193 {
194         struct page *page, *next;
195         unsigned long *table;
196         int i;
197
198
199         /* Flush tlb. */
200         if (MACHINE_HAS_IDTE)
201                 __tlb_flush_idte((unsigned long) gmap->table |
202                                  _ASCE_TYPE_REGION1);
203         else
204                 __tlb_flush_global();
205
206         /* Free all segment & region tables. */
207         down_read(&gmap->mm->mmap_sem);
208         spin_lock(&gmap->mm->page_table_lock);
209         list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
210                 table = (unsigned long *) page_to_phys(page);
211                 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
212                         /* Remove gmap rmap structures for segment table. */
213                         for (i = 0; i < PTRS_PER_PMD; i++, table++)
214                                 gmap_unlink_segment(gmap, table);
215                 __free_pages(page, ALLOC_ORDER);
216         }
217         spin_unlock(&gmap->mm->page_table_lock);
218         up_read(&gmap->mm->mmap_sem);
219         list_del(&gmap->list);
220         kfree(gmap);
221 }
222 EXPORT_SYMBOL_GPL(gmap_free);
223
224 /**
225  * gmap_enable - switch primary space to the guest address space
226  * @gmap: pointer to the guest address space structure
227  */
228 void gmap_enable(struct gmap *gmap)
229 {
230         S390_lowcore.gmap = (unsigned long) gmap;
231 }
232 EXPORT_SYMBOL_GPL(gmap_enable);
233
234 /**
235  * gmap_disable - switch back to the standard primary address space
236  * @gmap: pointer to the guest address space structure
237  */
238 void gmap_disable(struct gmap *gmap)
239 {
240         S390_lowcore.gmap = 0UL;
241 }
242 EXPORT_SYMBOL_GPL(gmap_disable);
243
244 /*
245  * gmap_alloc_table is assumed to be called with mmap_sem held
246  */
247 static int gmap_alloc_table(struct gmap *gmap,
248                                unsigned long *table, unsigned long init)
249 {
250         struct page *page;
251         unsigned long *new;
252
253         /* since we dont free the gmap table until gmap_free we can unlock */
254         spin_unlock(&gmap->mm->page_table_lock);
255         page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
256         spin_lock(&gmap->mm->page_table_lock);
257         if (!page)
258                 return -ENOMEM;
259         new = (unsigned long *) page_to_phys(page);
260         crst_table_init(new, init);
261         if (*table & _REGION_ENTRY_INV) {
262                 list_add(&page->lru, &gmap->crst_list);
263                 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
264                         (*table & _REGION_ENTRY_TYPE_MASK);
265         } else
266                 __free_pages(page, ALLOC_ORDER);
267         return 0;
268 }
269
270 /**
271  * gmap_unmap_segment - unmap segment from the guest address space
272  * @gmap: pointer to the guest address space structure
273  * @addr: address in the guest address space
274  * @len: length of the memory area to unmap
275  *
276  * Returns 0 if the unmap succeded, -EINVAL if not.
277  */
278 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
279 {
280         unsigned long *table;
281         unsigned long off;
282         int flush;
283
284         if ((to | len) & (PMD_SIZE - 1))
285                 return -EINVAL;
286         if (len == 0 || to + len < to)
287                 return -EINVAL;
288
289         flush = 0;
290         down_read(&gmap->mm->mmap_sem);
291         spin_lock(&gmap->mm->page_table_lock);
292         for (off = 0; off < len; off += PMD_SIZE) {
293                 /* Walk the guest addr space page table */
294                 table = gmap->table + (((to + off) >> 53) & 0x7ff);
295                 if (*table & _REGION_ENTRY_INV)
296                         goto out;
297                 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
298                 table = table + (((to + off) >> 42) & 0x7ff);
299                 if (*table & _REGION_ENTRY_INV)
300                         goto out;
301                 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
302                 table = table + (((to + off) >> 31) & 0x7ff);
303                 if (*table & _REGION_ENTRY_INV)
304                         goto out;
305                 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
306                 table = table + (((to + off) >> 20) & 0x7ff);
307
308                 /* Clear segment table entry in guest address space. */
309                 flush |= gmap_unlink_segment(gmap, table);
310                 *table = _SEGMENT_ENTRY_INV;
311         }
312 out:
313         spin_unlock(&gmap->mm->page_table_lock);
314         up_read(&gmap->mm->mmap_sem);
315         if (flush)
316                 gmap_flush_tlb(gmap);
317         return 0;
318 }
319 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
320
321 /**
322  * gmap_mmap_segment - map a segment to the guest address space
323  * @gmap: pointer to the guest address space structure
324  * @from: source address in the parent address space
325  * @to: target address in the guest address space
326  *
327  * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
328  */
329 int gmap_map_segment(struct gmap *gmap, unsigned long from,
330                      unsigned long to, unsigned long len)
331 {
332         unsigned long *table;
333         unsigned long off;
334         int flush;
335
336         if ((from | to | len) & (PMD_SIZE - 1))
337                 return -EINVAL;
338         if (len == 0 || from + len > PGDIR_SIZE ||
339             from + len < from || to + len < to)
340                 return -EINVAL;
341
342         flush = 0;
343         down_read(&gmap->mm->mmap_sem);
344         spin_lock(&gmap->mm->page_table_lock);
345         for (off = 0; off < len; off += PMD_SIZE) {
346                 /* Walk the gmap address space page table */
347                 table = gmap->table + (((to + off) >> 53) & 0x7ff);
348                 if ((*table & _REGION_ENTRY_INV) &&
349                     gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
350                         goto out_unmap;
351                 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
352                 table = table + (((to + off) >> 42) & 0x7ff);
353                 if ((*table & _REGION_ENTRY_INV) &&
354                     gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
355                         goto out_unmap;
356                 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
357                 table = table + (((to + off) >> 31) & 0x7ff);
358                 if ((*table & _REGION_ENTRY_INV) &&
359                     gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
360                         goto out_unmap;
361                 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
362                 table = table + (((to + off) >> 20) & 0x7ff);
363
364                 /* Store 'from' address in an invalid segment table entry. */
365                 flush |= gmap_unlink_segment(gmap, table);
366                 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
367         }
368         spin_unlock(&gmap->mm->page_table_lock);
369         up_read(&gmap->mm->mmap_sem);
370         if (flush)
371                 gmap_flush_tlb(gmap);
372         return 0;
373
374 out_unmap:
375         spin_unlock(&gmap->mm->page_table_lock);
376         up_read(&gmap->mm->mmap_sem);
377         gmap_unmap_segment(gmap, to, len);
378         return -ENOMEM;
379 }
380 EXPORT_SYMBOL_GPL(gmap_map_segment);
381
382 static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
383 {
384         unsigned long *table;
385
386         table = gmap->table + ((address >> 53) & 0x7ff);
387         if (unlikely(*table & _REGION_ENTRY_INV))
388                 return ERR_PTR(-EFAULT);
389         table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
390         table = table + ((address >> 42) & 0x7ff);
391         if (unlikely(*table & _REGION_ENTRY_INV))
392                 return ERR_PTR(-EFAULT);
393         table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
394         table = table + ((address >> 31) & 0x7ff);
395         if (unlikely(*table & _REGION_ENTRY_INV))
396                 return ERR_PTR(-EFAULT);
397         table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
398         table = table + ((address >> 20) & 0x7ff);
399         return table;
400 }
401
402 /**
403  * __gmap_translate - translate a guest address to a user space address
404  * @address: guest address
405  * @gmap: pointer to guest mapping meta data structure
406  *
407  * Returns user space address which corresponds to the guest address or
408  * -EFAULT if no such mapping exists.
409  * This function does not establish potentially missing page table entries.
410  * The mmap_sem of the mm that belongs to the address space must be held
411  * when this function gets called.
412  */
413 unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
414 {
415         unsigned long *segment_ptr, vmaddr, segment;
416         struct gmap_pgtable *mp;
417         struct page *page;
418
419         current->thread.gmap_addr = address;
420         segment_ptr = gmap_table_walk(address, gmap);
421         if (IS_ERR(segment_ptr))
422                 return PTR_ERR(segment_ptr);
423         /* Convert the gmap address to an mm address. */
424         segment = *segment_ptr;
425         if (!(segment & _SEGMENT_ENTRY_INV)) {
426                 page = pfn_to_page(segment >> PAGE_SHIFT);
427                 mp = (struct gmap_pgtable *) page->index;
428                 return mp->vmaddr | (address & ~PMD_MASK);
429         } else if (segment & _SEGMENT_ENTRY_RO) {
430                 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
431                 return vmaddr | (address & ~PMD_MASK);
432         }
433         return -EFAULT;
434 }
435 EXPORT_SYMBOL_GPL(__gmap_translate);
436
437 /**
438  * gmap_translate - translate a guest address to a user space address
439  * @address: guest address
440  * @gmap: pointer to guest mapping meta data structure
441  *
442  * Returns user space address which corresponds to the guest address or
443  * -EFAULT if no such mapping exists.
444  * This function does not establish potentially missing page table entries.
445  */
446 unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
447 {
448         unsigned long rc;
449
450         down_read(&gmap->mm->mmap_sem);
451         rc = __gmap_translate(address, gmap);
452         up_read(&gmap->mm->mmap_sem);
453         return rc;
454 }
455 EXPORT_SYMBOL_GPL(gmap_translate);
456
457 static int gmap_connect_pgtable(unsigned long segment,
458                                 unsigned long *segment_ptr,
459                                 struct gmap *gmap)
460 {
461         unsigned long vmaddr;
462         struct vm_area_struct *vma;
463         struct gmap_pgtable *mp;
464         struct gmap_rmap *rmap;
465         struct mm_struct *mm;
466         struct page *page;
467         pgd_t *pgd;
468         pud_t *pud;
469         pmd_t *pmd;
470
471         mm = gmap->mm;
472         vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
473         vma = find_vma(mm, vmaddr);
474         if (!vma || vma->vm_start > vmaddr)
475                 return -EFAULT;
476         /* Walk the parent mm page table */
477         pgd = pgd_offset(mm, vmaddr);
478         pud = pud_alloc(mm, pgd, vmaddr);
479         if (!pud)
480                 return -ENOMEM;
481         pmd = pmd_alloc(mm, pud, vmaddr);
482         if (!pmd)
483                 return -ENOMEM;
484         if (!pmd_present(*pmd) &&
485             __pte_alloc(mm, vma, pmd, vmaddr))
486                 return -ENOMEM;
487         /* pmd now points to a valid segment table entry. */
488         rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
489         if (!rmap)
490                 return -ENOMEM;
491         /* Link gmap segment table entry location to page table. */
492         page = pmd_page(*pmd);
493         mp = (struct gmap_pgtable *) page->index;
494         rmap->entry = segment_ptr;
495         spin_lock(&mm->page_table_lock);
496         if (*segment_ptr == segment) {
497                 list_add(&rmap->list, &mp->mapper);
498                 /* Set gmap segment table entry to page table. */
499                 *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
500                 rmap = NULL;
501         }
502         spin_unlock(&mm->page_table_lock);
503         kfree(rmap);
504         return 0;
505 }
506
507 static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
508 {
509         struct gmap_rmap *rmap, *next;
510         struct gmap_pgtable *mp;
511         struct page *page;
512         int flush;
513
514         flush = 0;
515         spin_lock(&mm->page_table_lock);
516         page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
517         mp = (struct gmap_pgtable *) page->index;
518         list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
519                 *rmap->entry =
520                         _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
521                 list_del(&rmap->list);
522                 kfree(rmap);
523                 flush = 1;
524         }
525         spin_unlock(&mm->page_table_lock);
526         if (flush)
527                 __tlb_flush_global();
528 }
529
530 /*
531  * this function is assumed to be called with mmap_sem held
532  */
533 unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
534 {
535         unsigned long *segment_ptr, segment;
536         struct gmap_pgtable *mp;
537         struct page *page;
538         int rc;
539
540         current->thread.gmap_addr = address;
541         segment_ptr = gmap_table_walk(address, gmap);
542         if (IS_ERR(segment_ptr))
543                 return -EFAULT;
544         /* Convert the gmap address to an mm address. */
545         while (1) {
546                 segment = *segment_ptr;
547                 if (!(segment & _SEGMENT_ENTRY_INV)) {
548                         /* Page table is present */
549                         page = pfn_to_page(segment >> PAGE_SHIFT);
550                         mp = (struct gmap_pgtable *) page->index;
551                         return mp->vmaddr | (address & ~PMD_MASK);
552                 }
553                 if (!(segment & _SEGMENT_ENTRY_RO))
554                         /* Nothing mapped in the gmap address space. */
555                         break;
556                 rc = gmap_connect_pgtable(segment, segment_ptr, gmap);
557                 if (rc)
558                         return rc;
559         }
560         return -EFAULT;
561 }
562
563 unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
564 {
565         unsigned long rc;
566
567         down_read(&gmap->mm->mmap_sem);
568         rc = __gmap_fault(address, gmap);
569         up_read(&gmap->mm->mmap_sem);
570
571         return rc;
572 }
573 EXPORT_SYMBOL_GPL(gmap_fault);
574
575 void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
576 {
577
578         unsigned long *table, address, size;
579         struct vm_area_struct *vma;
580         struct gmap_pgtable *mp;
581         struct page *page;
582
583         down_read(&gmap->mm->mmap_sem);
584         address = from;
585         while (address < to) {
586                 /* Walk the gmap address space page table */
587                 table = gmap->table + ((address >> 53) & 0x7ff);
588                 if (unlikely(*table & _REGION_ENTRY_INV)) {
589                         address = (address + PMD_SIZE) & PMD_MASK;
590                         continue;
591                 }
592                 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
593                 table = table + ((address >> 42) & 0x7ff);
594                 if (unlikely(*table & _REGION_ENTRY_INV)) {
595                         address = (address + PMD_SIZE) & PMD_MASK;
596                         continue;
597                 }
598                 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
599                 table = table + ((address >> 31) & 0x7ff);
600                 if (unlikely(*table & _REGION_ENTRY_INV)) {
601                         address = (address + PMD_SIZE) & PMD_MASK;
602                         continue;
603                 }
604                 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
605                 table = table + ((address >> 20) & 0x7ff);
606                 if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
607                         address = (address + PMD_SIZE) & PMD_MASK;
608                         continue;
609                 }
610                 page = pfn_to_page(*table >> PAGE_SHIFT);
611                 mp = (struct gmap_pgtable *) page->index;
612                 vma = find_vma(gmap->mm, mp->vmaddr);
613                 size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
614                 zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
615                                size, NULL);
616                 address = (address + PMD_SIZE) & PMD_MASK;
617         }
618         up_read(&gmap->mm->mmap_sem);
619 }
620 EXPORT_SYMBOL_GPL(gmap_discard);
621
622 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
623                                                     unsigned long vmaddr)
624 {
625         struct page *page;
626         unsigned long *table;
627         struct gmap_pgtable *mp;
628
629         page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
630         if (!page)
631                 return NULL;
632         mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
633         if (!mp) {
634                 __free_page(page);
635                 return NULL;
636         }
637         pgtable_page_ctor(page);
638         mp->vmaddr = vmaddr & PMD_MASK;
639         INIT_LIST_HEAD(&mp->mapper);
640         page->index = (unsigned long) mp;
641         atomic_set(&page->_mapcount, 3);
642         table = (unsigned long *) page_to_phys(page);
643         clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
644         clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
645         return table;
646 }
647
648 static inline void page_table_free_pgste(unsigned long *table)
649 {
650         struct page *page;
651         struct gmap_pgtable *mp;
652
653         page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
654         mp = (struct gmap_pgtable *) page->index;
655         BUG_ON(!list_empty(&mp->mapper));
656         pgtable_page_dtor(page);
657         atomic_set(&page->_mapcount, -1);
658         kfree(mp);
659         __free_page(page);
660 }
661
662 #else /* CONFIG_PGSTE */
663
664 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
665                                                     unsigned long vmaddr)
666 {
667         return NULL;
668 }
669
670 static inline void page_table_free_pgste(unsigned long *table)
671 {
672 }
673
674 static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
675                                            unsigned long *table)
676 {
677 }
678
679 #endif /* CONFIG_PGSTE */
680
681 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
682 {
683         unsigned int old, new;
684
685         do {
686                 old = atomic_read(v);
687                 new = old ^ bits;
688         } while (atomic_cmpxchg(v, old, new) != old);
689         return new;
690 }
691
692 /*
693  * page table entry allocation/free routines.
694  */
695 unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
696 {
697         unsigned long *uninitialized_var(table);
698         struct page *uninitialized_var(page);
699         unsigned int mask, bit;
700
701         if (mm_has_pgste(mm))
702                 return page_table_alloc_pgste(mm, vmaddr);
703         /* Allocate fragments of a 4K page as 1K/2K page table */
704         spin_lock_bh(&mm->context.list_lock);
705         mask = FRAG_MASK;
706         if (!list_empty(&mm->context.pgtable_list)) {
707                 page = list_first_entry(&mm->context.pgtable_list,
708                                         struct page, lru);
709                 table = (unsigned long *) page_to_phys(page);
710                 mask = atomic_read(&page->_mapcount);
711                 mask = mask | (mask >> 4);
712         }
713         if ((mask & FRAG_MASK) == FRAG_MASK) {
714                 spin_unlock_bh(&mm->context.list_lock);
715                 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
716                 if (!page)
717                         return NULL;
718                 pgtable_page_ctor(page);
719                 atomic_set(&page->_mapcount, 1);
720                 table = (unsigned long *) page_to_phys(page);
721                 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
722                 spin_lock_bh(&mm->context.list_lock);
723                 list_add(&page->lru, &mm->context.pgtable_list);
724         } else {
725                 for (bit = 1; mask & bit; bit <<= 1)
726                         table += PTRS_PER_PTE;
727                 mask = atomic_xor_bits(&page->_mapcount, bit);
728                 if ((mask & FRAG_MASK) == FRAG_MASK)
729                         list_del(&page->lru);
730         }
731         spin_unlock_bh(&mm->context.list_lock);
732         return table;
733 }
734
735 void page_table_free(struct mm_struct *mm, unsigned long *table)
736 {
737         struct page *page;
738         unsigned int bit, mask;
739
740         if (mm_has_pgste(mm)) {
741                 gmap_disconnect_pgtable(mm, table);
742                 return page_table_free_pgste(table);
743         }
744         /* Free 1K/2K page table fragment of a 4K page */
745         page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
746         bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
747         spin_lock_bh(&mm->context.list_lock);
748         if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
749                 list_del(&page->lru);
750         mask = atomic_xor_bits(&page->_mapcount, bit);
751         if (mask & FRAG_MASK)
752                 list_add(&page->lru, &mm->context.pgtable_list);
753         spin_unlock_bh(&mm->context.list_lock);
754         if (mask == 0) {
755                 pgtable_page_dtor(page);
756                 atomic_set(&page->_mapcount, -1);
757                 __free_page(page);
758         }
759 }
760
761 static void __page_table_free_rcu(void *table, unsigned bit)
762 {
763         struct page *page;
764
765         if (bit == FRAG_MASK)
766                 return page_table_free_pgste(table);
767         /* Free 1K/2K page table fragment of a 4K page */
768         page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
769         if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
770                 pgtable_page_dtor(page);
771                 atomic_set(&page->_mapcount, -1);
772                 __free_page(page);
773         }
774 }
775
776 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
777 {
778         struct mm_struct *mm;
779         struct page *page;
780         unsigned int bit, mask;
781
782         mm = tlb->mm;
783         if (mm_has_pgste(mm)) {
784                 gmap_disconnect_pgtable(mm, table);
785                 table = (unsigned long *) (__pa(table) | FRAG_MASK);
786                 tlb_remove_table(tlb, table);
787                 return;
788         }
789         bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
790         page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
791         spin_lock_bh(&mm->context.list_lock);
792         if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
793                 list_del(&page->lru);
794         mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
795         if (mask & FRAG_MASK)
796                 list_add_tail(&page->lru, &mm->context.pgtable_list);
797         spin_unlock_bh(&mm->context.list_lock);
798         table = (unsigned long *) (__pa(table) | (bit << 4));
799         tlb_remove_table(tlb, table);
800 }
801
802 void __tlb_remove_table(void *_table)
803 {
804         const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
805         void *table = (void *)((unsigned long) _table & ~mask);
806         unsigned type = (unsigned long) _table & mask;
807
808         if (type)
809                 __page_table_free_rcu(table, type);
810         else
811                 free_pages((unsigned long) table, ALLOC_ORDER);
812 }
813
814 static void tlb_remove_table_smp_sync(void *arg)
815 {
816         /* Simply deliver the interrupt */
817 }
818
819 static void tlb_remove_table_one(void *table)
820 {
821         /*
822          * This isn't an RCU grace period and hence the page-tables cannot be
823          * assumed to be actually RCU-freed.
824          *
825          * It is however sufficient for software page-table walkers that rely
826          * on IRQ disabling. See the comment near struct mmu_table_batch.
827          */
828         smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
829         __tlb_remove_table(table);
830 }
831
832 static void tlb_remove_table_rcu(struct rcu_head *head)
833 {
834         struct mmu_table_batch *batch;
835         int i;
836
837         batch = container_of(head, struct mmu_table_batch, rcu);
838
839         for (i = 0; i < batch->nr; i++)
840                 __tlb_remove_table(batch->tables[i]);
841
842         free_page((unsigned long)batch);
843 }
844
845 void tlb_table_flush(struct mmu_gather *tlb)
846 {
847         struct mmu_table_batch **batch = &tlb->batch;
848
849         if (*batch) {
850                 __tlb_flush_mm(tlb->mm);
851                 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
852                 *batch = NULL;
853         }
854 }
855
856 void tlb_remove_table(struct mmu_gather *tlb, void *table)
857 {
858         struct mmu_table_batch **batch = &tlb->batch;
859
860         if (*batch == NULL) {
861                 *batch = (struct mmu_table_batch *)
862                         __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
863                 if (*batch == NULL) {
864                         __tlb_flush_mm(tlb->mm);
865                         tlb_remove_table_one(table);
866                         return;
867                 }
868                 (*batch)->nr = 0;
869         }
870         (*batch)->tables[(*batch)->nr++] = table;
871         if ((*batch)->nr == MAX_TABLE_BATCH)
872                 tlb_table_flush(tlb);
873 }
874
875 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
876 void thp_split_vma(struct vm_area_struct *vma)
877 {
878         unsigned long addr;
879         struct page *page;
880
881         for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
882                 page = follow_page(vma, addr, FOLL_SPLIT);
883         }
884 }
885
886 void thp_split_mm(struct mm_struct *mm)
887 {
888         struct vm_area_struct *vma = mm->mmap;
889
890         while (vma != NULL) {
891                 thp_split_vma(vma);
892                 vma->vm_flags &= ~VM_HUGEPAGE;
893                 vma->vm_flags |= VM_NOHUGEPAGE;
894                 vma = vma->vm_next;
895         }
896 }
897 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
898
899 /*
900  * switch on pgstes for its userspace process (for kvm)
901  */
902 int s390_enable_sie(void)
903 {
904         struct task_struct *tsk = current;
905         struct mm_struct *mm, *old_mm;
906
907         /* Do we have switched amode? If no, we cannot do sie */
908         if (s390_user_mode == HOME_SPACE_MODE)
909                 return -EINVAL;
910
911         /* Do we have pgstes? if yes, we are done */
912         if (mm_has_pgste(tsk->mm))
913                 return 0;
914
915         /* lets check if we are allowed to replace the mm */
916         task_lock(tsk);
917         if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
918 #ifdef CONFIG_AIO
919             !hlist_empty(&tsk->mm->ioctx_list) ||
920 #endif
921             tsk->mm != tsk->active_mm) {
922                 task_unlock(tsk);
923                 return -EINVAL;
924         }
925         task_unlock(tsk);
926
927         /* we copy the mm and let dup_mm create the page tables with_pgstes */
928         tsk->mm->context.alloc_pgste = 1;
929         /* make sure that both mms have a correct rss state */
930         sync_mm_rss(tsk->mm);
931         mm = dup_mm(tsk);
932         tsk->mm->context.alloc_pgste = 0;
933         if (!mm)
934                 return -ENOMEM;
935
936 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
937         /* split thp mappings and disable thp for future mappings */
938         thp_split_mm(mm);
939         mm->def_flags |= VM_NOHUGEPAGE;
940 #endif
941
942         /* Now lets check again if something happened */
943         task_lock(tsk);
944         if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
945 #ifdef CONFIG_AIO
946             !hlist_empty(&tsk->mm->ioctx_list) ||
947 #endif
948             tsk->mm != tsk->active_mm) {
949                 mmput(mm);
950                 task_unlock(tsk);
951                 return -EINVAL;
952         }
953
954         /* ok, we are alone. No ptrace, no threads, etc. */
955         old_mm = tsk->mm;
956         tsk->mm = tsk->active_mm = mm;
957         preempt_disable();
958         update_mm(mm, tsk);
959         atomic_inc(&mm->context.attach_count);
960         atomic_dec(&old_mm->context.attach_count);
961         cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
962         preempt_enable();
963         task_unlock(tsk);
964         mmput(old_mm);
965         return 0;
966 }
967 EXPORT_SYMBOL_GPL(s390_enable_sie);
968
969 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
970 int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
971                            pmd_t *pmdp)
972 {
973         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
974         /* No need to flush TLB
975          * On s390 reference bits are in storage key and never in TLB */
976         return pmdp_test_and_clear_young(vma, address, pmdp);
977 }
978
979 int pmdp_set_access_flags(struct vm_area_struct *vma,
980                           unsigned long address, pmd_t *pmdp,
981                           pmd_t entry, int dirty)
982 {
983         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
984
985         if (pmd_same(*pmdp, entry))
986                 return 0;
987         pmdp_invalidate(vma, address, pmdp);
988         set_pmd_at(vma->vm_mm, address, pmdp, entry);
989         return 1;
990 }
991
992 static void pmdp_splitting_flush_sync(void *arg)
993 {
994         /* Simply deliver the interrupt */
995 }
996
997 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
998                           pmd_t *pmdp)
999 {
1000         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1001         if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
1002                               (unsigned long *) pmdp)) {
1003                 /* need to serialize against gup-fast (IRQ disabled) */
1004                 smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
1005         }
1006 }
1007
1008 void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
1009 {
1010         struct list_head *lh = (struct list_head *) pgtable;
1011
1012         assert_spin_locked(&mm->page_table_lock);
1013
1014         /* FIFO */
1015         if (!mm->pmd_huge_pte)
1016                 INIT_LIST_HEAD(lh);
1017         else
1018                 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
1019         mm->pmd_huge_pte = pgtable;
1020 }
1021
1022 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
1023 {
1024         struct list_head *lh;
1025         pgtable_t pgtable;
1026         pte_t *ptep;
1027
1028         assert_spin_locked(&mm->page_table_lock);
1029
1030         /* FIFO */
1031         pgtable = mm->pmd_huge_pte;
1032         lh = (struct list_head *) pgtable;
1033         if (list_empty(lh))
1034                 mm->pmd_huge_pte = NULL;
1035         else {
1036                 mm->pmd_huge_pte = (pgtable_t) lh->next;
1037                 list_del(lh);
1038         }
1039         ptep = (pte_t *) pgtable;
1040         pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1041         ptep++;
1042         pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1043         return pgtable;
1044 }
1045 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */