Merge v5.9-rc1 into drm-misc-next
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *          Ben Skeggs   <darktama@iinet.net.au>
27  *          Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29
30 #include <linux/dma-mapping.h>
31 #include <linux/swiotlb.h>
32
33 #include "nouveau_drv.h"
34 #include "nouveau_chan.h"
35 #include "nouveau_fence.h"
36
37 #include "nouveau_bo.h"
38 #include "nouveau_ttm.h"
39 #include "nouveau_gem.h"
40 #include "nouveau_mem.h"
41 #include "nouveau_vmm.h"
42
43 #include <nvif/class.h>
44 #include <nvif/if500b.h>
45 #include <nvif/if900b.h>
46
47 /*
48  * NV10-NV40 tiling helpers
49  */
50
51 static void
52 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
53                            u32 addr, u32 size, u32 pitch, u32 flags)
54 {
55         struct nouveau_drm *drm = nouveau_drm(dev);
56         int i = reg - drm->tile.reg;
57         struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
58         struct nvkm_fb_tile *tile = &fb->tile.region[i];
59
60         nouveau_fence_unref(&reg->fence);
61
62         if (tile->pitch)
63                 nvkm_fb_tile_fini(fb, i, tile);
64
65         if (pitch)
66                 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
67
68         nvkm_fb_tile_prog(fb, i, tile);
69 }
70
71 static struct nouveau_drm_tile *
72 nv10_bo_get_tile_region(struct drm_device *dev, int i)
73 {
74         struct nouveau_drm *drm = nouveau_drm(dev);
75         struct nouveau_drm_tile *tile = &drm->tile.reg[i];
76
77         spin_lock(&drm->tile.lock);
78
79         if (!tile->used &&
80             (!tile->fence || nouveau_fence_done(tile->fence)))
81                 tile->used = true;
82         else
83                 tile = NULL;
84
85         spin_unlock(&drm->tile.lock);
86         return tile;
87 }
88
89 static void
90 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
91                         struct dma_fence *fence)
92 {
93         struct nouveau_drm *drm = nouveau_drm(dev);
94
95         if (tile) {
96                 spin_lock(&drm->tile.lock);
97                 tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
98                 tile->used = false;
99                 spin_unlock(&drm->tile.lock);
100         }
101 }
102
103 static struct nouveau_drm_tile *
104 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
105                    u32 size, u32 pitch, u32 zeta)
106 {
107         struct nouveau_drm *drm = nouveau_drm(dev);
108         struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
109         struct nouveau_drm_tile *tile, *found = NULL;
110         int i;
111
112         for (i = 0; i < fb->tile.regions; i++) {
113                 tile = nv10_bo_get_tile_region(dev, i);
114
115                 if (pitch && !found) {
116                         found = tile;
117                         continue;
118
119                 } else if (tile && fb->tile.region[i].pitch) {
120                         /* Kill an unused tile region. */
121                         nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
122                 }
123
124                 nv10_bo_put_tile_region(dev, tile, NULL);
125         }
126
127         if (found)
128                 nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
129         return found;
130 }
131
132 static void
133 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
134 {
135         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
136         struct drm_device *dev = drm->dev;
137         struct nouveau_bo *nvbo = nouveau_bo(bo);
138
139         WARN_ON(nvbo->pin_refcnt > 0);
140         nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
141
142         /*
143          * If nouveau_bo_new() allocated this buffer, the GEM object was never
144          * initialized, so don't attempt to release it.
145          */
146         if (bo->base.dev)
147                 drm_gem_object_release(&bo->base);
148
149         kfree(nvbo);
150 }
151
152 static inline u64
153 roundup_64(u64 x, u32 y)
154 {
155         x += y - 1;
156         do_div(x, y);
157         return x * y;
158 }
159
160 static void
161 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
162                        int *align, u64 *size)
163 {
164         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
165         struct nvif_device *device = &drm->client.device;
166
167         if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
168                 if (nvbo->mode) {
169                         if (device->info.chipset >= 0x40) {
170                                 *align = 65536;
171                                 *size = roundup_64(*size, 64 * nvbo->mode);
172
173                         } else if (device->info.chipset >= 0x30) {
174                                 *align = 32768;
175                                 *size = roundup_64(*size, 64 * nvbo->mode);
176
177                         } else if (device->info.chipset >= 0x20) {
178                                 *align = 16384;
179                                 *size = roundup_64(*size, 64 * nvbo->mode);
180
181                         } else if (device->info.chipset >= 0x10) {
182                                 *align = 16384;
183                                 *size = roundup_64(*size, 32 * nvbo->mode);
184                         }
185                 }
186         } else {
187                 *size = roundup_64(*size, (1 << nvbo->page));
188                 *align = max((1 <<  nvbo->page), *align);
189         }
190
191         *size = roundup_64(*size, PAGE_SIZE);
192 }
193
194 struct nouveau_bo *
195 nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
196                  u32 tile_mode, u32 tile_flags)
197 {
198         struct nouveau_drm *drm = cli->drm;
199         struct nouveau_bo *nvbo;
200         struct nvif_mmu *mmu = &cli->mmu;
201         struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
202         int i, pi = -1;
203
204         if (!*size) {
205                 NV_WARN(drm, "skipped size %016llx\n", *size);
206                 return ERR_PTR(-EINVAL);
207         }
208
209         nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
210         if (!nvbo)
211                 return ERR_PTR(-ENOMEM);
212         INIT_LIST_HEAD(&nvbo->head);
213         INIT_LIST_HEAD(&nvbo->entry);
214         INIT_LIST_HEAD(&nvbo->vma_list);
215         nvbo->bo.bdev = &drm->ttm.bdev;
216
217         /* This is confusing, and doesn't actually mean we want an uncached
218          * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
219          * into in nouveau_gem_new().
220          */
221         if (flags & TTM_PL_FLAG_UNCACHED) {
222                 /* Determine if we can get a cache-coherent map, forcing
223                  * uncached mapping if we can't.
224                  */
225                 if (!nouveau_drm_use_coherent_gpu_mapping(drm))
226                         nvbo->force_coherent = true;
227         }
228
229         if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
230                 nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
231                 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
232                         kfree(nvbo);
233                         return ERR_PTR(-EINVAL);
234                 }
235
236                 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
237         } else
238         if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
239                 nvbo->kind = (tile_flags & 0x00007f00) >> 8;
240                 nvbo->comp = (tile_flags & 0x00030000) >> 16;
241                 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
242                         kfree(nvbo);
243                         return ERR_PTR(-EINVAL);
244                 }
245         } else {
246                 nvbo->zeta = (tile_flags & 0x00000007);
247         }
248         nvbo->mode = tile_mode;
249         nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
250
251         /* Determine the desirable target GPU page size for the buffer. */
252         for (i = 0; i < vmm->page_nr; i++) {
253                 /* Because we cannot currently allow VMM maps to fail
254                  * during buffer migration, we need to determine page
255                  * size for the buffer up-front, and pre-allocate its
256                  * page tables.
257                  *
258                  * Skip page sizes that can't support needed domains.
259                  */
260                 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
261                     (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
262                         continue;
263                 if ((flags & TTM_PL_FLAG_TT) &&
264                     (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
265                         continue;
266
267                 /* Select this page size if it's the first that supports
268                  * the potential memory domains, or when it's compatible
269                  * with the requested compression settings.
270                  */
271                 if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
272                         pi = i;
273
274                 /* Stop once the buffer is larger than the current page size. */
275                 if (*size >= 1ULL << vmm->page[i].shift)
276                         break;
277         }
278
279         if (WARN_ON(pi < 0))
280                 return ERR_PTR(-EINVAL);
281
282         /* Disable compression if suitable settings couldn't be found. */
283         if (nvbo->comp && !vmm->page[pi].comp) {
284                 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
285                         nvbo->kind = mmu->kind[nvbo->kind];
286                 nvbo->comp = 0;
287         }
288         nvbo->page = vmm->page[pi].shift;
289
290         nouveau_bo_fixup_align(nvbo, flags, align, size);
291
292         return nvbo;
293 }
294
295 int
296 nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
297                 struct sg_table *sg, struct dma_resv *robj)
298 {
299         int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
300         size_t acc_size;
301         int ret;
302
303         acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
304
305         nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
306         nouveau_bo_placement_set(nvbo, flags, 0);
307
308         ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
309                           &nvbo->placement, align >> PAGE_SHIFT, false,
310                           acc_size, sg, robj, nouveau_bo_del_ttm);
311         if (ret) {
312                 /* ttm will call nouveau_bo_del_ttm if it fails.. */
313                 return ret;
314         }
315
316         return 0;
317 }
318
319 int
320 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
321                uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
322                struct sg_table *sg, struct dma_resv *robj,
323                struct nouveau_bo **pnvbo)
324 {
325         struct nouveau_bo *nvbo;
326         int ret;
327
328         nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
329                                 tile_flags);
330         if (IS_ERR(nvbo))
331                 return PTR_ERR(nvbo);
332
333         ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
334         if (ret)
335                 return ret;
336
337         *pnvbo = nvbo;
338         return 0;
339 }
340
341 static void
342 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
343 {
344         *n = 0;
345
346         if (type & TTM_PL_FLAG_VRAM)
347                 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
348         if (type & TTM_PL_FLAG_TT)
349                 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
350         if (type & TTM_PL_FLAG_SYSTEM)
351                 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
352 }
353
354 static void
355 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
356 {
357         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
358         u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
359         unsigned i, fpfn, lpfn;
360
361         if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
362             nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
363             nvbo->bo.mem.num_pages < vram_pages / 4) {
364                 /*
365                  * Make sure that the color and depth buffers are handled
366                  * by independent memory controller units. Up to a 9x
367                  * speed up when alpha-blending and depth-test are enabled
368                  * at the same time.
369                  */
370                 if (nvbo->zeta) {
371                         fpfn = vram_pages / 2;
372                         lpfn = ~0;
373                 } else {
374                         fpfn = 0;
375                         lpfn = vram_pages / 2;
376                 }
377                 for (i = 0; i < nvbo->placement.num_placement; ++i) {
378                         nvbo->placements[i].fpfn = fpfn;
379                         nvbo->placements[i].lpfn = lpfn;
380                 }
381                 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
382                         nvbo->busy_placements[i].fpfn = fpfn;
383                         nvbo->busy_placements[i].lpfn = lpfn;
384                 }
385         }
386 }
387
388 void
389 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
390 {
391         struct ttm_placement *pl = &nvbo->placement;
392         uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
393                                                  TTM_PL_MASK_CACHING) |
394                          (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
395
396         pl->placement = nvbo->placements;
397         set_placement_list(nvbo->placements, &pl->num_placement,
398                            type, flags);
399
400         pl->busy_placement = nvbo->busy_placements;
401         set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
402                            type | busy, flags);
403
404         set_placement_range(nvbo, type);
405 }
406
407 int
408 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
409 {
410         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
411         struct ttm_buffer_object *bo = &nvbo->bo;
412         bool force = false, evict = false;
413         int ret;
414
415         ret = ttm_bo_reserve(bo, false, false, NULL);
416         if (ret)
417                 return ret;
418
419         if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
420             memtype == TTM_PL_FLAG_VRAM && contig) {
421                 if (!nvbo->contig) {
422                         nvbo->contig = true;
423                         force = true;
424                         evict = true;
425                 }
426         }
427
428         if (nvbo->pin_refcnt) {
429                 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
430                         NV_ERROR(drm, "bo %p pinned elsewhere: "
431                                       "0x%08x vs 0x%08x\n", bo,
432                                  1 << bo->mem.mem_type, memtype);
433                         ret = -EBUSY;
434                 }
435                 nvbo->pin_refcnt++;
436                 goto out;
437         }
438
439         if (evict) {
440                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
441                 ret = nouveau_bo_validate(nvbo, false, false);
442                 if (ret)
443                         goto out;
444         }
445
446         nvbo->pin_refcnt++;
447         nouveau_bo_placement_set(nvbo, memtype, 0);
448
449         /* drop pin_refcnt temporarily, so we don't trip the assertion
450          * in nouveau_bo_move() that makes sure we're not trying to
451          * move a pinned buffer
452          */
453         nvbo->pin_refcnt--;
454         ret = nouveau_bo_validate(nvbo, false, false);
455         if (ret)
456                 goto out;
457         nvbo->pin_refcnt++;
458
459         switch (bo->mem.mem_type) {
460         case TTM_PL_VRAM:
461                 drm->gem.vram_available -= bo->mem.size;
462                 break;
463         case TTM_PL_TT:
464                 drm->gem.gart_available -= bo->mem.size;
465                 break;
466         default:
467                 break;
468         }
469
470 out:
471         if (force && ret)
472                 nvbo->contig = false;
473         ttm_bo_unreserve(bo);
474         return ret;
475 }
476
477 int
478 nouveau_bo_unpin(struct nouveau_bo *nvbo)
479 {
480         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
481         struct ttm_buffer_object *bo = &nvbo->bo;
482         int ret, ref;
483
484         ret = ttm_bo_reserve(bo, false, false, NULL);
485         if (ret)
486                 return ret;
487
488         ref = --nvbo->pin_refcnt;
489         WARN_ON_ONCE(ref < 0);
490         if (ref)
491                 goto out;
492
493         nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
494
495         ret = nouveau_bo_validate(nvbo, false, false);
496         if (ret == 0) {
497                 switch (bo->mem.mem_type) {
498                 case TTM_PL_VRAM:
499                         drm->gem.vram_available += bo->mem.size;
500                         break;
501                 case TTM_PL_TT:
502                         drm->gem.gart_available += bo->mem.size;
503                         break;
504                 default:
505                         break;
506                 }
507         }
508
509 out:
510         ttm_bo_unreserve(bo);
511         return ret;
512 }
513
514 int
515 nouveau_bo_map(struct nouveau_bo *nvbo)
516 {
517         int ret;
518
519         ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
520         if (ret)
521                 return ret;
522
523         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
524
525         ttm_bo_unreserve(&nvbo->bo);
526         return ret;
527 }
528
529 void
530 nouveau_bo_unmap(struct nouveau_bo *nvbo)
531 {
532         if (!nvbo)
533                 return;
534
535         ttm_bo_kunmap(&nvbo->kmap);
536 }
537
538 void
539 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
540 {
541         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
542         struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
543         int i;
544
545         if (!ttm_dma)
546                 return;
547
548         /* Don't waste time looping if the object is coherent */
549         if (nvbo->force_coherent)
550                 return;
551
552         for (i = 0; i < ttm_dma->ttm.num_pages; i++)
553                 dma_sync_single_for_device(drm->dev->dev,
554                                            ttm_dma->dma_address[i],
555                                            PAGE_SIZE, DMA_TO_DEVICE);
556 }
557
558 void
559 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
560 {
561         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
562         struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
563         int i;
564
565         if (!ttm_dma)
566                 return;
567
568         /* Don't waste time looping if the object is coherent */
569         if (nvbo->force_coherent)
570                 return;
571
572         for (i = 0; i < ttm_dma->ttm.num_pages; i++)
573                 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
574                                         PAGE_SIZE, DMA_FROM_DEVICE);
575 }
576
577 int
578 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
579                     bool no_wait_gpu)
580 {
581         struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
582         int ret;
583
584         ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
585         if (ret)
586                 return ret;
587
588         nouveau_bo_sync_for_device(nvbo);
589
590         return 0;
591 }
592
593 void
594 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
595 {
596         bool is_iomem;
597         u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
598
599         mem += index;
600
601         if (is_iomem)
602                 iowrite16_native(val, (void __force __iomem *)mem);
603         else
604                 *mem = val;
605 }
606
607 u32
608 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
609 {
610         bool is_iomem;
611         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
612
613         mem += index;
614
615         if (is_iomem)
616                 return ioread32_native((void __force __iomem *)mem);
617         else
618                 return *mem;
619 }
620
621 void
622 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
623 {
624         bool is_iomem;
625         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
626
627         mem += index;
628
629         if (is_iomem)
630                 iowrite32_native(val, (void __force __iomem *)mem);
631         else
632                 *mem = val;
633 }
634
635 static struct ttm_tt *
636 nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
637 {
638 #if IS_ENABLED(CONFIG_AGP)
639         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
640
641         if (drm->agp.bridge) {
642                 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
643         }
644 #endif
645
646         return nouveau_sgdma_create_ttm(bo, page_flags);
647 }
648
649 static void
650 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
651 {
652         struct nouveau_bo *nvbo = nouveau_bo(bo);
653
654         switch (bo->mem.mem_type) {
655         case TTM_PL_VRAM:
656                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
657                                          TTM_PL_FLAG_SYSTEM);
658                 break;
659         default:
660                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
661                 break;
662         }
663
664         *pl = nvbo->placement;
665 }
666
667 static int
668 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
669                      struct ttm_resource *reg)
670 {
671         struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
672         struct nouveau_mem *new_mem = nouveau_mem(reg);
673         struct nvif_vmm *vmm = &drm->client.vmm.vmm;
674         int ret;
675
676         ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
677                            old_mem->mem.size, &old_mem->vma[0]);
678         if (ret)
679                 return ret;
680
681         ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
682                            new_mem->mem.size, &old_mem->vma[1]);
683         if (ret)
684                 goto done;
685
686         ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
687         if (ret)
688                 goto done;
689
690         ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
691 done:
692         if (ret) {
693                 nvif_vmm_put(vmm, &old_mem->vma[1]);
694                 nvif_vmm_put(vmm, &old_mem->vma[0]);
695         }
696         return 0;
697 }
698
699 static int
700 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
701                      bool no_wait_gpu, struct ttm_resource *new_reg)
702 {
703         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
704         struct nouveau_channel *chan = drm->ttm.chan;
705         struct nouveau_cli *cli = (void *)chan->user.client;
706         struct nouveau_fence *fence;
707         int ret;
708
709         /* create temporary vmas for the transfer and attach them to the
710          * old nvkm_mem node, these will get cleaned up after ttm has
711          * destroyed the ttm_resource
712          */
713         if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
714                 ret = nouveau_bo_move_prep(drm, bo, new_reg);
715                 if (ret)
716                         return ret;
717         }
718
719         mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
720         ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
721         if (ret == 0) {
722                 ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
723                 if (ret == 0) {
724                         ret = nouveau_fence_new(chan, false, &fence);
725                         if (ret == 0) {
726                                 ret = ttm_bo_move_accel_cleanup(bo,
727                                                                 &fence->base,
728                                                                 evict,
729                                                                 new_reg);
730                                 nouveau_fence_unref(&fence);
731                         }
732                 }
733         }
734         mutex_unlock(&cli->mutex);
735         return ret;
736 }
737
738 void
739 nouveau_bo_move_init(struct nouveau_drm *drm)
740 {
741         static const struct _method_table {
742                 const char *name;
743                 int engine;
744                 s32 oclass;
745                 int (*exec)(struct nouveau_channel *,
746                             struct ttm_buffer_object *,
747                             struct ttm_resource *, struct ttm_resource *);
748                 int (*init)(struct nouveau_channel *, u32 handle);
749         } _methods[] = {
750                 {  "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
751                 {  "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
752                 {  "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
753                 {  "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
754                 {  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
755                 {  "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
756                 {  "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
757                 {  "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
758                 {  "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
759                 {  "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
760                 {  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
761                 {  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
762                 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
763                 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
764                 {  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
765                 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
766                 {  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
767                 {  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
768                 {  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
769                 {},
770         };
771         const struct _method_table *mthd = _methods;
772         const char *name = "CPU";
773         int ret;
774
775         do {
776                 struct nouveau_channel *chan;
777
778                 if (mthd->engine)
779                         chan = drm->cechan;
780                 else
781                         chan = drm->channel;
782                 if (chan == NULL)
783                         continue;
784
785                 ret = nvif_object_ctor(&chan->user, "ttmBoMove",
786                                        mthd->oclass | (mthd->engine << 16),
787                                        mthd->oclass, NULL, 0,
788                                        &drm->ttm.copy);
789                 if (ret == 0) {
790                         ret = mthd->init(chan, drm->ttm.copy.handle);
791                         if (ret) {
792                                 nvif_object_dtor(&drm->ttm.copy);
793                                 continue;
794                         }
795
796                         drm->ttm.move = mthd->exec;
797                         drm->ttm.chan = chan;
798                         name = mthd->name;
799                         break;
800                 }
801         } while ((++mthd)->exec);
802
803         NV_INFO(drm, "MM: using %s for buffer copies\n", name);
804 }
805
806 static int
807 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
808                       bool no_wait_gpu, struct ttm_resource *new_reg)
809 {
810         struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
811         struct ttm_place placement_memtype = {
812                 .fpfn = 0,
813                 .lpfn = 0,
814                 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
815         };
816         struct ttm_placement placement;
817         struct ttm_resource tmp_reg;
818         int ret;
819
820         placement.num_placement = placement.num_busy_placement = 1;
821         placement.placement = placement.busy_placement = &placement_memtype;
822
823         tmp_reg = *new_reg;
824         tmp_reg.mm_node = NULL;
825         ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
826         if (ret)
827                 return ret;
828
829         ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx);
830         if (ret)
831                 goto out;
832
833         ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
834         if (ret)
835                 goto out;
836
837         ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
838 out:
839         ttm_resource_free(bo, &tmp_reg);
840         return ret;
841 }
842
843 static int
844 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
845                       bool no_wait_gpu, struct ttm_resource *new_reg)
846 {
847         struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
848         struct ttm_place placement_memtype = {
849                 .fpfn = 0,
850                 .lpfn = 0,
851                 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
852         };
853         struct ttm_placement placement;
854         struct ttm_resource tmp_reg;
855         int ret;
856
857         placement.num_placement = placement.num_busy_placement = 1;
858         placement.placement = placement.busy_placement = &placement_memtype;
859
860         tmp_reg = *new_reg;
861         tmp_reg.mm_node = NULL;
862         ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
863         if (ret)
864                 return ret;
865
866         ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg);
867         if (ret)
868                 goto out;
869
870         ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
871         if (ret)
872                 goto out;
873
874 out:
875         ttm_resource_free(bo, &tmp_reg);
876         return ret;
877 }
878
879 static void
880 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
881                      struct ttm_resource *new_reg)
882 {
883         struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
884         struct nouveau_bo *nvbo = nouveau_bo(bo);
885         struct nouveau_vma *vma;
886
887         /* ttm can now (stupidly) pass the driver bos it didn't create... */
888         if (bo->destroy != nouveau_bo_del_ttm)
889                 return;
890
891         if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
892             mem->mem.page == nvbo->page) {
893                 list_for_each_entry(vma, &nvbo->vma_list, head) {
894                         nouveau_vma_map(vma, mem);
895                 }
896         } else {
897                 list_for_each_entry(vma, &nvbo->vma_list, head) {
898                         WARN_ON(ttm_bo_wait(bo, false, false));
899                         nouveau_vma_unmap(vma);
900                 }
901         }
902
903         if (new_reg) {
904                 if (new_reg->mm_node)
905                         nvbo->offset = (new_reg->start << PAGE_SHIFT);
906                 else
907                         nvbo->offset = 0;
908         }
909
910 }
911
912 static int
913 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
914                    struct nouveau_drm_tile **new_tile)
915 {
916         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
917         struct drm_device *dev = drm->dev;
918         struct nouveau_bo *nvbo = nouveau_bo(bo);
919         u64 offset = new_reg->start << PAGE_SHIFT;
920
921         *new_tile = NULL;
922         if (new_reg->mem_type != TTM_PL_VRAM)
923                 return 0;
924
925         if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
926                 *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
927                                                nvbo->mode, nvbo->zeta);
928         }
929
930         return 0;
931 }
932
933 static void
934 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
935                       struct nouveau_drm_tile *new_tile,
936                       struct nouveau_drm_tile **old_tile)
937 {
938         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
939         struct drm_device *dev = drm->dev;
940         struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
941
942         nv10_bo_put_tile_region(dev, *old_tile, fence);
943         *old_tile = new_tile;
944 }
945
946 static int
947 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
948                 struct ttm_operation_ctx *ctx,
949                 struct ttm_resource *new_reg)
950 {
951         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
952         struct nouveau_bo *nvbo = nouveau_bo(bo);
953         struct ttm_resource *old_reg = &bo->mem;
954         struct nouveau_drm_tile *new_tile = NULL;
955         int ret = 0;
956
957         ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
958         if (ret)
959                 return ret;
960
961         if (nvbo->pin_refcnt)
962                 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
963
964         if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
965                 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
966                 if (ret)
967                         return ret;
968         }
969
970         /* Fake bo copy. */
971         if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
972                 BUG_ON(bo->mem.mm_node != NULL);
973                 bo->mem = *new_reg;
974                 new_reg->mm_node = NULL;
975                 goto out;
976         }
977
978         /* Hardware assisted copy. */
979         if (drm->ttm.move) {
980                 if (new_reg->mem_type == TTM_PL_SYSTEM)
981                         ret = nouveau_bo_move_flipd(bo, evict,
982                                                     ctx->interruptible,
983                                                     ctx->no_wait_gpu, new_reg);
984                 else if (old_reg->mem_type == TTM_PL_SYSTEM)
985                         ret = nouveau_bo_move_flips(bo, evict,
986                                                     ctx->interruptible,
987                                                     ctx->no_wait_gpu, new_reg);
988                 else
989                         ret = nouveau_bo_move_m2mf(bo, evict,
990                                                    ctx->interruptible,
991                                                    ctx->no_wait_gpu, new_reg);
992                 if (!ret)
993                         goto out;
994         }
995
996         /* Fallback to software copy. */
997         ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
998         if (ret == 0)
999                 ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
1000
1001 out:
1002         if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1003                 if (ret)
1004                         nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1005                 else
1006                         nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1007         }
1008
1009         return ret;
1010 }
1011
1012 static int
1013 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1014 {
1015         struct nouveau_bo *nvbo = nouveau_bo(bo);
1016
1017         return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
1018                                           filp->private_data);
1019 }
1020
1021 static int
1022 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
1023 {
1024         struct nouveau_drm *drm = nouveau_bdev(bdev);
1025         struct nvkm_device *device = nvxx_device(&drm->client.device);
1026         struct nouveau_mem *mem = nouveau_mem(reg);
1027
1028         reg->bus.addr = NULL;
1029         reg->bus.offset = 0;
1030         reg->bus.size = reg->num_pages << PAGE_SHIFT;
1031         reg->bus.base = 0;
1032         reg->bus.is_iomem = false;
1033
1034         switch (reg->mem_type) {
1035         case TTM_PL_SYSTEM:
1036                 /* System memory */
1037                 return 0;
1038         case TTM_PL_TT:
1039 #if IS_ENABLED(CONFIG_AGP)
1040                 if (drm->agp.bridge) {
1041                         reg->bus.offset = reg->start << PAGE_SHIFT;
1042                         reg->bus.base = drm->agp.base;
1043                         reg->bus.is_iomem = !drm->agp.cma;
1044                 }
1045 #endif
1046                 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
1047                         /* untiled */
1048                         break;
1049                 fallthrough;    /* tiled memory */
1050         case TTM_PL_VRAM:
1051                 reg->bus.offset = reg->start << PAGE_SHIFT;
1052                 reg->bus.base = device->func->resource_addr(device, 1);
1053                 reg->bus.is_iomem = true;
1054                 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1055                         union {
1056                                 struct nv50_mem_map_v0 nv50;
1057                                 struct gf100_mem_map_v0 gf100;
1058                         } args;
1059                         u64 handle, length;
1060                         u32 argc = 0;
1061                         int ret;
1062
1063                         switch (mem->mem.object.oclass) {
1064                         case NVIF_CLASS_MEM_NV50:
1065                                 args.nv50.version = 0;
1066                                 args.nv50.ro = 0;
1067                                 args.nv50.kind = mem->kind;
1068                                 args.nv50.comp = mem->comp;
1069                                 argc = sizeof(args.nv50);
1070                                 break;
1071                         case NVIF_CLASS_MEM_GF100:
1072                                 args.gf100.version = 0;
1073                                 args.gf100.ro = 0;
1074                                 args.gf100.kind = mem->kind;
1075                                 argc = sizeof(args.gf100);
1076                                 break;
1077                         default:
1078                                 WARN_ON(1);
1079                                 break;
1080                         }
1081
1082                         ret = nvif_object_map_handle(&mem->mem.object,
1083                                                      &args, argc,
1084                                                      &handle, &length);
1085                         if (ret != 1) {
1086                                 if (WARN_ON(ret == 0))
1087                                         return -EINVAL;
1088                                 return ret;
1089                         }
1090
1091                         reg->bus.base = 0;
1092                         reg->bus.offset = handle;
1093                 }
1094                 break;
1095         default:
1096                 return -EINVAL;
1097         }
1098         return 0;
1099 }
1100
1101 static void
1102 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
1103 {
1104         struct nouveau_drm *drm = nouveau_bdev(bdev);
1105         struct nouveau_mem *mem = nouveau_mem(reg);
1106
1107         if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1108                 switch (reg->mem_type) {
1109                 case TTM_PL_TT:
1110                         if (mem->kind)
1111                                 nvif_object_unmap_handle(&mem->mem.object);
1112                         break;
1113                 case TTM_PL_VRAM:
1114                         nvif_object_unmap_handle(&mem->mem.object);
1115                         break;
1116                 default:
1117                         break;
1118                 }
1119         }
1120 }
1121
1122 static int
1123 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1124 {
1125         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1126         struct nouveau_bo *nvbo = nouveau_bo(bo);
1127         struct nvkm_device *device = nvxx_device(&drm->client.device);
1128         u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1129         int i, ret;
1130
1131         /* as long as the bo isn't in vram, and isn't tiled, we've got
1132          * nothing to do here.
1133          */
1134         if (bo->mem.mem_type != TTM_PL_VRAM) {
1135                 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1136                     !nvbo->kind)
1137                         return 0;
1138
1139                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1140                         nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1141
1142                         ret = nouveau_bo_validate(nvbo, false, false);
1143                         if (ret)
1144                                 return ret;
1145                 }
1146                 return 0;
1147         }
1148
1149         /* make sure bo is in mappable vram */
1150         if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1151             bo->mem.start + bo->mem.num_pages < mappable)
1152                 return 0;
1153
1154         for (i = 0; i < nvbo->placement.num_placement; ++i) {
1155                 nvbo->placements[i].fpfn = 0;
1156                 nvbo->placements[i].lpfn = mappable;
1157         }
1158
1159         for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1160                 nvbo->busy_placements[i].fpfn = 0;
1161                 nvbo->busy_placements[i].lpfn = mappable;
1162         }
1163
1164         nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1165         return nouveau_bo_validate(nvbo, false, false);
1166 }
1167
1168 static int
1169 nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1170 {
1171         struct ttm_dma_tt *ttm_dma = (void *)ttm;
1172         struct nouveau_drm *drm;
1173         struct device *dev;
1174         bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1175
1176         if (ttm->state != tt_unpopulated)
1177                 return 0;
1178
1179         if (slave && ttm->sg) {
1180                 /* make userspace faulting work */
1181                 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1182                                                  ttm_dma->dma_address, ttm->num_pages);
1183                 ttm->state = tt_unbound;
1184                 return 0;
1185         }
1186
1187         drm = nouveau_bdev(ttm->bdev);
1188         dev = drm->dev->dev;
1189
1190 #if IS_ENABLED(CONFIG_AGP)
1191         if (drm->agp.bridge) {
1192                 return ttm_agp_tt_populate(ttm, ctx);
1193         }
1194 #endif
1195
1196 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1197         if (swiotlb_nr_tbl()) {
1198                 return ttm_dma_populate((void *)ttm, dev, ctx);
1199         }
1200 #endif
1201         return ttm_populate_and_map_pages(dev, ttm_dma, ctx);
1202 }
1203
1204 static void
1205 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1206 {
1207         struct ttm_dma_tt *ttm_dma = (void *)ttm;
1208         struct nouveau_drm *drm;
1209         struct device *dev;
1210         bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1211
1212         if (slave)
1213                 return;
1214
1215         drm = nouveau_bdev(ttm->bdev);
1216         dev = drm->dev->dev;
1217
1218 #if IS_ENABLED(CONFIG_AGP)
1219         if (drm->agp.bridge) {
1220                 ttm_agp_tt_unpopulate(ttm);
1221                 return;
1222         }
1223 #endif
1224
1225 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1226         if (swiotlb_nr_tbl()) {
1227                 ttm_dma_unpopulate((void *)ttm, dev);
1228                 return;
1229         }
1230 #endif
1231
1232         ttm_unmap_and_unpopulate_pages(dev, ttm_dma);
1233 }
1234
1235 void
1236 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1237 {
1238         struct dma_resv *resv = nvbo->bo.base.resv;
1239
1240         if (exclusive)
1241                 dma_resv_add_excl_fence(resv, &fence->base);
1242         else if (fence)
1243                 dma_resv_add_shared_fence(resv, &fence->base);
1244 }
1245
1246 struct ttm_bo_driver nouveau_bo_driver = {
1247         .ttm_tt_create = &nouveau_ttm_tt_create,
1248         .ttm_tt_populate = &nouveau_ttm_tt_populate,
1249         .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1250         .eviction_valuable = ttm_bo_eviction_valuable,
1251         .evict_flags = nouveau_bo_evict_flags,
1252         .move_notify = nouveau_bo_move_ntfy,
1253         .move = nouveau_bo_move,
1254         .verify_access = nouveau_bo_verify_access,
1255         .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1256         .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1257         .io_mem_free = &nouveau_ttm_io_mem_free,
1258 };