Initial commit
[kernel/linux-3.0.git] / drivers / gpu / drm / exynos_tmp / exynos_drm_buf.c
1 /* exynos_drm_buf.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  */
25
26 #include "drmP.h"
27 #include "drm.h"
28 #include "exynos_drm.h"
29
30 #include <linux/cma.h>
31 #include "exynos_drm_drv.h"
32 #include "exynos_drm_gem.h"
33 #include "exynos_drm_buf.h"
34
35 static int lowlevel_buffer_allocate(struct drm_device *dev,
36                 unsigned int flags, struct exynos_drm_gem_buf *buf)
37 {
38         dma_addr_t start_addr;
39         unsigned int npages, i = 0;
40         struct scatterlist *sgl;
41         int ret = 0;
42
43         DRM_DEBUG_KMS("%s\n", __FILE__);
44
45         if (IS_NONCONTIG_BUFFER(flags)) {
46                 DRM_DEBUG_KMS("not support allocation type.\n");
47                 return -EINVAL;
48         }
49
50         if (buf->dma_addr) {
51                 DRM_DEBUG_KMS("already allocated.\n");
52                 return 0;
53         }
54
55         if (buf->size >= SZ_1M) {
56                 npages = buf->size >> SECTION_SHIFT;
57                 buf->page_size = SECTION_SIZE;
58         } else if (buf->size >= SZ_64K) {
59                 npages = buf->size >> 16;
60                 buf->page_size = SZ_64K;
61         } else {
62                 npages = buf->size >> PAGE_SHIFT;
63                 buf->page_size = PAGE_SIZE;
64         }
65
66         buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
67         if (!buf->sgt) {
68                 DRM_ERROR("failed to allocate sg table.\n");
69                 return -ENOMEM;
70         }
71
72         ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
73         if (ret < 0) {
74                 DRM_ERROR("failed to initialize sg table.\n");
75                 kfree(buf->sgt);
76                 buf->sgt = NULL;
77                 return -ENOMEM;
78         }
79
80 #ifdef CONFIG_CMA
81         buf->dma_addr = cma_alloc(dev->dev, "drm", buf->size,
82                                         buf->page_size);
83         if (IS_ERR((void *)buf->dma_addr)) {
84                 DRM_DEBUG_KMS("cma_alloc of size %ld failed\n",
85                                 buf->size);
86                 ret = -ENOMEM;
87                 goto err1;
88         }
89
90         buf->kvaddr = phys_to_virt(buf->dma_addr);
91 #else
92         /* align it as page size(page or section) TODO */
93
94         buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
95                         &buf->dma_addr, GFP_KERNEL);
96         if (!buf->kvaddr) {
97                 DRM_ERROR("failed to allocate buffer.\n");
98                 ret = -ENOMEM;
99                 goto err1;
100         }
101 #endif
102         buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
103         if (!buf->pages) {
104                 DRM_ERROR("failed to allocate pages.\n");
105                 ret = -ENOMEM;
106                 goto err2;
107         }
108
109         sgl = buf->sgt->sgl;
110         start_addr = buf->dma_addr;
111
112         while (i < npages) {
113                 buf->pages[i] = phys_to_page(start_addr);
114                 sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
115                 sg_dma_address(sgl) = start_addr;
116                 start_addr += buf->page_size;
117                 sgl = sg_next(sgl);
118                 i++;
119         }
120
121         DRM_INFO("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
122                         (unsigned long)buf->kvaddr,
123                         (unsigned long)buf->dma_addr,
124                         buf->size);
125
126         return ret;
127 err2:
128 #ifdef CONFIG_CMA
129         cma_free(buf->dma_addr);
130 #else
131         dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
132                         (dma_addr_t)buf->dma_addr);
133 #endif
134         buf->dma_addr = (dma_addr_t)NULL;
135 err1:
136         sg_free_table(buf->sgt);
137         kfree(buf->sgt);
138         buf->sgt = NULL;
139
140         return ret;
141 }
142
143 static void lowlevel_buffer_deallocate(struct drm_device *dev,
144                 unsigned int flags, struct exynos_drm_gem_buf *buf)
145 {
146         DRM_DEBUG_KMS("%s.\n", __FILE__);
147
148         /*
149          * now buffer is being shared and it would be released
150          * by original owner so ignor free action.
151          */
152         if (buf->shared || atomic_read(&buf->shared_refcount))
153                 return;
154
155         /*
156          * release only physically continuous memory and
157          * non-continuous memory would be released by exynos
158          * gem framework.
159          */
160         if (IS_NONCONTIG_BUFFER(flags)) {
161                 DRM_DEBUG_KMS("not support allocation type.\n");
162                 return;
163         }
164
165         if (!buf->dma_addr) {
166                 DRM_DEBUG_KMS("dma_addr is invalid.\n");
167                 return;
168         }
169
170         sg_free_table(buf->sgt);
171
172         kfree(buf->sgt);
173         buf->sgt = NULL;
174
175         kfree(buf->pages);
176         buf->pages = NULL;
177 #ifdef CONFIG_CMA
178         cma_free(buf->dma_addr);
179 #else
180         dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
181                                 (dma_addr_t)buf->dma_addr);
182 #endif
183         buf->dma_addr = (dma_addr_t)NULL;
184 }
185
186 struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
187                                                 unsigned int size)
188 {
189         struct exynos_drm_gem_buf *buffer;
190
191         DRM_DEBUG_KMS("%s.\n", __FILE__);
192         DRM_DEBUG_KMS("desired size = 0x%x\n", size);
193
194         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
195         if (!buffer) {
196                 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
197                 return NULL;
198         }
199
200         buffer->size = size;
201         return buffer;
202 }
203
204 void exynos_drm_fini_buf(struct drm_device *dev,
205                                 struct exynos_drm_gem_buf *buffer)
206 {
207         DRM_DEBUG_KMS("%s.\n", __FILE__);
208
209         if (!buffer) {
210                 DRM_DEBUG_KMS("buffer is null.\n");
211                 return;
212         }
213
214         kfree(buffer);
215         buffer = NULL;
216 }
217
218 int exynos_drm_alloc_buf(struct drm_device *dev,
219                 struct exynos_drm_gem_buf *buf, unsigned int flags)
220 {
221
222         /*
223          * allocate memory region and set the memory information
224          * to vaddr and dma_addr of a buffer object.
225          */
226         if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
227                 return -ENOMEM;
228
229         return 0;
230 }
231
232 void exynos_drm_free_buf(struct drm_device *dev,
233                 unsigned int flags, struct exynos_drm_gem_buf *buffer)
234 {
235
236         lowlevel_buffer_deallocate(dev, flags, buffer);
237 }
238
239 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
240 MODULE_DESCRIPTION("Samsung SoC DRM Buffer Management Module");
241 MODULE_LICENSE("GPL");