2 * drivers/gpu/iommu/iommu.c *
3 * Copyright (C) 2011 Google, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
16 #include"../sprd_iommu_common.h"
18 #ifdef CONFIG_HAS_EARLYSUSPEND
19 static void sprd_iommu_gsp_early_suspend(struct early_suspend *es)
23 struct sprd_iommu_dev *dev = container_of(es,
24 struct sprd_iommu_dev, early_suspend);
26 mutex_lock(&dev->mutex_map);
27 pr_info("%s, map_count:%d\n", __func__, dev->map_count);
28 if (dev->map_count > 0)
29 err = sprd_iommu_backup(dev);
30 mutex_unlock(&dev->mutex_map);
33 static void sprd_iommu_gsp_late_resume(struct early_suspend *es)
37 struct sprd_iommu_dev *dev = container_of(es,
38 struct sprd_iommu_dev, early_suspend);
40 mutex_lock(&dev->mutex_map);
41 pr_info("%s, map_count:%d\n", __func__, dev->map_count);
42 if (dev->map_count > 0)
43 err = sprd_iommu_restore(dev);
44 mutex_unlock(&dev->mutex_map);
48 void sprd_iommu_gsp_clk_enable(struct sprd_iommu_dev *dev)
50 pr_debug("%s\n", __func__);
51 #ifndef CONFIG_SC_FPGA
52 clk_prepare_enable(dev->mmu_mclock);
53 clk_prepare_enable(dev->mmu_clock);
57 void sprd_iommu_gsp_clk_disable(struct sprd_iommu_dev *dev)
59 pr_debug("%s\n", __func__);
60 #ifndef CONFIG_SC_FPGA
61 clk_disable_unprepare(dev->mmu_clock);
62 clk_disable_unprepare(dev->mmu_mclock);
66 void sprd_iommu_gsp_open(struct sprd_iommu_dev *dev)
70 void sprd_iommu_gsp_release(struct sprd_iommu_dev *dev)
74 int sprd_iommu_gsp_init(struct sprd_iommu_dev *dev,
75 struct sprd_iommu_init_data *data)
78 #ifndef CONFIG_SC_FPGA
79 struct device_node *np;
81 np = dev->misc_dev.this_device->of_node;
85 dev->mmu_mclock = of_clk_get(np, 0);
86 dev->mmu_clock = of_clk_get(np, 2);
88 if (IS_ERR(dev->mmu_clock) || IS_ERR(dev->mmu_mclock)) {
89 pr_info("%s, can't get clock:%p, %p\n", __func__,
90 dev->mmu_clock, dev->mmu_mclock);
94 #ifdef CONFIG_HAS_EARLYSUSPEND
95 dev->early_suspend.suspend = sprd_iommu_gsp_early_suspend;
96 dev->early_suspend.resume = sprd_iommu_gsp_late_resume;
97 dev->early_suspend.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING;
98 register_early_suspend(&dev->early_suspend);
101 sprd_iommu_gsp_clk_enable(dev);
102 err = sprd_iommu_init(dev, data);
103 sprd_iommu_gsp_clk_disable(dev);
107 #ifndef CONFIG_SC_FPGA
110 clk_put(dev->mmu_clock);
113 clk_put(dev->mmu_mclock);
119 int sprd_iommu_gsp_exit(struct sprd_iommu_dev *dev)
123 sprd_iommu_gsp_clk_enable(dev);
124 err = sprd_iommu_exit(dev);
125 sprd_iommu_gsp_clk_disable(dev);
130 unsigned long sprd_iommu_gsp_iova_alloc(struct sprd_iommu_dev *dev,
133 return sprd_iommu_iova_alloc(dev, iova_length);
136 void sprd_iommu_gsp_iova_free(struct sprd_iommu_dev *dev, unsigned long iova,
139 sprd_iommu_iova_free(dev, iova, iova_length);
142 int sprd_iommu_gsp_iova_map(struct sprd_iommu_dev *dev, unsigned long iova,
143 size_t iova_length, struct sg_table *table)
147 mutex_lock(&dev->mutex_map);
148 sprd_iommu_gsp_clk_enable(dev);
149 if (0 == dev->map_count)
150 sprd_iommu_enable(dev);
153 err = sprd_iommu_iova_map(dev, iova, iova_length, table);
154 sprd_iommu_gsp_clk_disable(dev);
155 mutex_unlock(&dev->mutex_map);
160 int sprd_iommu_gsp_iova_unmap(struct sprd_iommu_dev *dev, unsigned long iova,
165 mutex_lock(&dev->mutex_map);
166 sprd_iommu_gsp_clk_enable(dev);
167 err = sprd_iommu_iova_unmap(dev, iova, iova_length);
170 if (0 == dev->map_count)
171 sprd_iommu_disable(dev);
173 sprd_iommu_gsp_clk_disable(dev);
174 mutex_unlock(&dev->mutex_map);
179 int sprd_iommu_gsp_backup(struct sprd_iommu_dev *dev)
181 #ifdef CONFIG_HAS_EARLYSUSPEND
186 mutex_lock(&dev->mutex_map);
187 pr_info("%s, map_count:%d\n", __func__, dev->map_count);
188 if (dev->map_count > 0) {
189 sprd_iommu_gsp_clk_enable(dev);
190 err = sprd_iommu_backup(dev);
191 sprd_iommu_gsp_clk_disable(dev);
193 mutex_unlock(&dev->mutex_map);
199 int sprd_iommu_gsp_restore(struct sprd_iommu_dev *dev)
201 #ifdef CONFIG_HAS_EARLYSUSPEND
206 mutex_lock(&dev->mutex_map);
207 pr_info("%s, map_count:%d\n", __func__, dev->map_count);
208 if (dev->map_count > 0) {
209 sprd_iommu_gsp_clk_enable(dev);
210 err = sprd_iommu_restore(dev);
211 sprd_iommu_gsp_clk_disable(dev);
213 mutex_unlock(&dev->mutex_map);
219 int sprd_iommu_gsp_dump(struct sprd_iommu_dev *dev, unsigned long iova,
222 return sprd_iommu_dump(dev, iova, iova_length);
225 struct sprd_iommu_ops iommu_gsp_ops = {
226 .init = sprd_iommu_gsp_init,
227 .exit = sprd_iommu_gsp_exit,
228 .iova_alloc = sprd_iommu_gsp_iova_alloc,
229 .iova_free = sprd_iommu_gsp_iova_free,
230 .iova_map = sprd_iommu_gsp_iova_map,
231 .iova_unmap = sprd_iommu_gsp_iova_unmap,
232 .backup = sprd_iommu_gsp_backup,
233 .restore = sprd_iommu_gsp_restore,
234 .enable = sprd_iommu_gsp_clk_enable,
235 .disable = sprd_iommu_gsp_clk_disable,
236 .dump = sprd_iommu_gsp_dump,
237 .open = sprd_iommu_gsp_open,
238 .release = sprd_iommu_gsp_release,