1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2010-2015, NVIDIA Corporation.
8 #include <linux/dma-mapping.h>
10 #include <linux/host1x.h>
11 #include <linux/iommu.h>
12 #include <linux/kref.h>
13 #include <linux/module.h>
14 #include <linux/scatterlist.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <trace/events/host1x.h>
24 #define HOST1X_WAIT_SYNCPT_OFFSET 0x8
26 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
27 u32 num_cmdbufs, u32 num_relocs)
29 struct host1x_job *job = NULL;
30 unsigned int num_unpins = num_relocs;
34 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
35 num_unpins += num_cmdbufs;
37 /* Check that we're not going to overflow */
38 total = sizeof(struct host1x_job) +
39 (u64)num_relocs * sizeof(struct host1x_reloc) +
40 (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
41 (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
42 (u64)num_unpins * sizeof(dma_addr_t) +
43 (u64)num_unpins * sizeof(u32 *);
44 if (total > ULONG_MAX)
47 mem = job = kzalloc(total, GFP_KERNEL);
54 /* Redistribute memory to the structs */
55 mem += sizeof(struct host1x_job);
56 job->relocs = num_relocs ? mem : NULL;
57 mem += num_relocs * sizeof(struct host1x_reloc);
58 job->unpins = num_unpins ? mem : NULL;
59 mem += num_unpins * sizeof(struct host1x_job_unpin_data);
60 job->gathers = num_cmdbufs ? mem : NULL;
61 mem += num_cmdbufs * sizeof(struct host1x_job_gather);
62 job->addr_phys = num_unpins ? mem : NULL;
64 job->reloc_addr_phys = job->addr_phys;
65 job->gather_addr_phys = &job->addr_phys[num_relocs];
69 EXPORT_SYMBOL(host1x_job_alloc);
71 struct host1x_job *host1x_job_get(struct host1x_job *job)
76 EXPORT_SYMBOL(host1x_job_get);
78 static void job_free(struct kref *ref)
80 struct host1x_job *job = container_of(ref, struct host1x_job, ref);
85 void host1x_job_put(struct host1x_job *job)
87 kref_put(&job->ref, job_free);
89 EXPORT_SYMBOL(host1x_job_put);
91 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
92 unsigned int words, unsigned int offset)
94 struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
96 gather->words = words;
98 gather->offset = offset;
102 EXPORT_SYMBOL(host1x_job_add_gather);
104 static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
106 struct host1x_client *client = job->client;
107 struct device *dev = client->dev;
108 struct host1x_job_gather *g;
109 struct iommu_domain *domain;
113 domain = iommu_get_domain_for_dev(dev);
116 for (i = 0; i < job->num_relocs; i++) {
117 struct host1x_reloc *reloc = &job->relocs[i];
118 dma_addr_t phys_addr, *phys;
119 struct sg_table *sgt;
121 reloc->target.bo = host1x_bo_get(reloc->target.bo);
122 if (!reloc->target.bo) {
128 * If the client device is not attached to an IOMMU, the
129 * physical address of the buffer object can be used.
131 * Similarly, when an IOMMU domain is shared between all
132 * host1x clients, the IOVA is already available, so no
133 * need to map the buffer object again.
135 * XXX Note that this isn't always safe to do because it
136 * relies on an assumption that no cache maintenance is
137 * needed on the buffer objects.
139 if (!domain || client->group)
144 sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
151 unsigned long mask = HOST1X_RELOC_READ |
153 enum dma_data_direction dir;
155 switch (reloc->flags & mask) {
156 case HOST1X_RELOC_READ:
160 case HOST1X_RELOC_WRITE:
161 dir = DMA_FROM_DEVICE;
164 case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
165 dir = DMA_BIDIRECTIONAL;
173 err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
179 job->unpins[job->num_unpins].dev = dev;
180 job->unpins[job->num_unpins].dir = dir;
181 phys_addr = sg_dma_address(sgt->sgl);
184 job->addr_phys[job->num_unpins] = phys_addr;
185 job->unpins[job->num_unpins].bo = reloc->target.bo;
186 job->unpins[job->num_unpins].sgt = sgt;
191 * We will copy gathers BO content later, so there is no need to
194 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
197 for (i = 0; i < job->num_gathers; i++) {
198 size_t gather_size = 0;
199 struct scatterlist *sg;
200 struct sg_table *sgt;
201 dma_addr_t phys_addr;
207 g = &job->gathers[i];
208 g->bo = host1x_bo_get(g->bo);
215 * If the host1x is not attached to an IOMMU, there is no need
216 * to map the buffer object for the host1x, since the physical
217 * address can simply be used.
219 if (!iommu_get_domain_for_dev(host->dev))
224 sgt = host1x_bo_pin(host->dev, g->bo, phys);
231 for_each_sg(sgt->sgl, sg, sgt->nents, j)
232 gather_size += sg->length;
233 gather_size = iova_align(&host->iova, gather_size);
235 shift = iova_shift(&host->iova);
236 alloc = alloc_iova(&host->iova, gather_size >> shift,
237 host->iova_end >> shift, true);
243 err = iommu_map_sg(host->domain,
244 iova_dma_addr(&host->iova, alloc),
245 sgt->sgl, sgt->nents, IOMMU_READ);
247 __free_iova(&host->iova, alloc);
252 job->unpins[job->num_unpins].size = gather_size;
253 phys_addr = iova_dma_addr(&host->iova, alloc);
255 err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
262 job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
263 job->unpins[job->num_unpins].dev = host->dev;
264 phys_addr = sg_dma_address(sgt->sgl);
267 job->addr_phys[job->num_unpins] = phys_addr;
268 job->gather_addr_phys[i] = phys_addr;
270 job->unpins[job->num_unpins].bo = g->bo;
271 job->unpins[job->num_unpins].sgt = sgt;
278 host1x_bo_put(g->bo);
280 host1x_job_unpin(job);
284 static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
286 void *cmdbuf_addr = NULL;
287 struct host1x_bo *cmdbuf = g->bo;
290 /* pin & patch the relocs for one gather */
291 for (i = 0; i < job->num_relocs; i++) {
292 struct host1x_reloc *reloc = &job->relocs[i];
293 u32 reloc_addr = (job->reloc_addr_phys[i] +
294 reloc->target.offset) >> reloc->shift;
297 /* skip all other gathers */
298 if (cmdbuf != reloc->cmdbuf.bo)
301 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
302 target = (u32 *)job->gather_copy_mapped +
303 reloc->cmdbuf.offset / sizeof(u32) +
304 g->offset / sizeof(u32);
309 cmdbuf_addr = host1x_bo_mmap(cmdbuf);
311 if (unlikely(!cmdbuf_addr)) {
312 pr_err("Could not map cmdbuf for relocation\n");
317 target = cmdbuf_addr + reloc->cmdbuf.offset;
319 *target = reloc_addr;
323 host1x_bo_munmap(cmdbuf, cmdbuf_addr);
328 static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
331 offset *= sizeof(u32);
333 if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
336 /* relocation shift value validation isn't implemented yet */
343 struct host1x_firewall {
344 struct host1x_job *job;
347 unsigned int num_relocs;
348 struct host1x_reloc *reloc;
350 struct host1x_bo *cmdbuf;
360 static int check_register(struct host1x_firewall *fw, unsigned long offset)
362 if (!fw->job->is_addr_reg)
365 if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
369 if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
379 static int check_class(struct host1x_firewall *fw, u32 class)
381 if (!fw->job->is_valid_class) {
382 if (fw->class != class)
385 if (!fw->job->is_valid_class(fw->class))
392 static int check_mask(struct host1x_firewall *fw)
403 ret = check_register(fw, reg);
417 static int check_incr(struct host1x_firewall *fw)
419 u32 count = fw->count;
427 ret = check_register(fw, reg);
440 static int check_nonincr(struct host1x_firewall *fw)
442 u32 count = fw->count;
449 ret = check_register(fw, fw->reg);
461 static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
463 u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
464 (g->offset / sizeof(u32));
465 u32 job_class = fw->class;
468 fw->words = g->words;
472 while (fw->words && !err) {
473 u32 word = cmdbuf_base[fw->offset];
474 u32 opcode = (word & 0xf0000000) >> 28;
484 fw->class = word >> 6 & 0x3ff;
485 fw->mask = word & 0x3f;
486 fw->reg = word >> 16 & 0xfff;
487 err = check_class(fw, job_class);
489 err = check_mask(fw);
494 fw->reg = word >> 16 & 0xfff;
495 fw->count = word & 0xffff;
496 err = check_incr(fw);
502 fw->reg = word >> 16 & 0xfff;
503 fw->count = word & 0xffff;
504 err = check_nonincr(fw);
510 fw->mask = word & 0xffff;
511 fw->reg = word >> 16 & 0xfff;
512 err = check_mask(fw);
529 static inline int copy_gathers(struct device *host, struct host1x_job *job,
532 struct host1x_firewall fw;
539 fw.reloc = job->relocs;
540 fw.num_relocs = job->num_relocs;
541 fw.class = job->class;
543 for (i = 0; i < job->num_gathers; i++) {
544 struct host1x_job_gather *g = &job->gathers[i];
546 size += g->words * sizeof(u32);
550 * Try a non-blocking allocation from a higher priority pools first,
551 * as awaiting for the allocation here is a major performance hit.
553 job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
556 /* the higher priority allocation failed, try the generic-blocking */
557 if (!job->gather_copy_mapped)
558 job->gather_copy_mapped = dma_alloc_wc(host, size,
561 if (!job->gather_copy_mapped)
564 job->gather_copy_size = size;
566 for (i = 0; i < job->num_gathers; i++) {
567 struct host1x_job_gather *g = &job->gathers[i];
570 /* Copy the gather */
571 gather = host1x_bo_mmap(g->bo);
572 memcpy(job->gather_copy_mapped + offset, gather + g->offset,
573 g->words * sizeof(u32));
574 host1x_bo_munmap(g->bo, gather);
576 /* Store the location in the buffer */
577 g->base = job->gather_copy;
580 /* Validate the job */
581 if (validate(&fw, g))
584 offset += g->words * sizeof(u32);
587 /* No relocs should remain at this point */
594 int host1x_job_pin(struct host1x_job *job, struct device *dev)
598 struct host1x *host = dev_get_drvdata(dev->parent);
601 err = pin_job(host, job);
605 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
606 err = copy_gathers(host->dev, job, dev);
612 for (i = 0; i < job->num_gathers; i++) {
613 struct host1x_job_gather *g = &job->gathers[i];
615 /* process each gather mem only once */
619 /* copy_gathers() sets gathers base if firewall is enabled */
620 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
621 g->base = job->gather_addr_phys[i];
623 for (j = i + 1; j < job->num_gathers; j++) {
624 if (job->gathers[j].bo == g->bo) {
625 job->gathers[j].handled = true;
626 job->gathers[j].base = g->base;
630 err = do_relocs(job, g);
637 host1x_job_unpin(job);
642 EXPORT_SYMBOL(host1x_job_pin);
644 void host1x_job_unpin(struct host1x_job *job)
646 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
649 for (i = 0; i < job->num_unpins; i++) {
650 struct host1x_job_unpin_data *unpin = &job->unpins[i];
651 struct device *dev = unpin->dev ?: host->dev;
652 struct sg_table *sgt = unpin->sgt;
654 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
655 unpin->size && host->domain) {
656 iommu_unmap(host->domain, job->addr_phys[i],
658 free_iova(&host->iova,
659 iova_pfn(&host->iova, job->addr_phys[i]));
662 if (unpin->dev && sgt)
663 dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
666 host1x_bo_unpin(dev, unpin->bo, sgt);
667 host1x_bo_put(unpin->bo);
672 if (job->gather_copy_size)
673 dma_free_wc(host->dev, job->gather_copy_size,
674 job->gather_copy_mapped, job->gather_copy);
676 EXPORT_SYMBOL(host1x_job_unpin);
679 * Debug routine used to dump job entries
681 void host1x_job_dump(struct device *dev, struct host1x_job *job)
683 dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
684 dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
685 dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
686 dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
687 dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
688 dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);