2 * Virtual Contiguous Memory driver API header
3 * Copyright (c) 2010 by Samsung Electronics.
4 * Written by Michal Nazarewicz (m.nazarewicz@samsung.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License or (at your optional) any later version of the license.
13 * See Documentation/virtual-contiguous-memory.txt for details.
16 #ifndef __LINUX_VCM_DRV_H
17 #define __LINUX_VCM_DRV_H
19 #include <linux/vcm.h>
20 #include <linux/list.h>
21 #include <linux/mutex.h>
22 #include <linux/gfp.h>
24 #include <asm/atomic.h>
27 * struct vcm_driver - the MMU driver operations.
28 * @cleanup: called when vcm objects is destroyed; if omitted,
29 * kfree() will be used.
30 * @alloc: callback function for allocating physical memory and
31 * reserving virtual address space; XXX FIXME: document;
32 * if set, @res and @alloc are ignored.
33 * @res: creates a reservation of virtual address space; XXX FIXME:
34 * document; if @alloc is provided this is ignored.
35 * @phys: allocates a physical memory; XXX FIXME: document; if @alloc
36 * is provided this is ignored.
37 * @unreserve: destroys a virtual address space reservation created by @alloc;
39 * @map: reserves address space and binds a physical memory to it.
40 * @bind: binds a physical memory to a reserved address space.
41 * @unbind: unbinds a physical memory from reserved address space.
42 * @activate: activates the context making all bindings active; once
43 * the context has been activated, this callback is not
44 * called again until context is deactivated and
45 * activated again (so if user calls vcm_activate()
46 * several times only the first call in sequence will
47 * invoke this callback).
48 * @deactivate: deactivates the context making all bindings inactive;
49 * call this callback always accompanies call to the
53 void (*cleanup)(struct vcm *vcm);
55 int (*alloc)(struct vcm *vcm, resource_size_t size,
56 struct vcm_phys **phys, unsigned alloc_flags,
57 struct vcm_res **res, unsigned res_flags);
58 struct vcm_res *(*res)(struct vcm *vcm, resource_size_t size,
60 struct vcm_phys *(*phys)(struct vcm *vcm, resource_size_t size,
63 void (*unreserve)(struct vcm_res *res);
65 struct vcm_res *(*map)(struct vcm *vcm, struct vcm_phys *phys,
67 int (*bind)(struct vcm_res *res, struct vcm_phys *phys);
68 void (*unbind)(struct vcm_res *res);
70 int (*activate)(struct vcm *vcm);
71 void (*deactivate)(struct vcm *vcm);
75 * struct vcm_phys - representation of allocated physical memory.
76 * @count: number of contiguous parts the memory consists of; if this
77 * equals one the whole memory block is physically contiguous;
79 * @size: total size of the allocated memory; read only.
80 * @free: callback function called when memory is freed; internal.
81 * @bindings: how many virtual address space reservations this memory has
82 * been bound to; internal.
83 * @parts: array of @count parts describing each physically contiguous
84 * memory block that the whole area consists of; each element
85 * describes part's physical starting address in bytes
86 * (@parts->start), its size in bytes (@parts->size) and
87 * (optionally) pointer to first struct poge (@parts->page);
94 void (*free)(struct vcm_phys *phys);
97 struct vcm_phys_part {
100 resource_size_t size;
105 * vcm_init() - initialises VCM context structure.
106 * @vcm: the VCM context to initialise.
108 * This function initialises the vcm structure created by a MMU driver
109 * when setting things up. It sets up all fields of the vcm structure
110 * expect for @vcm->start, @vcm->size and @vcm->driver which are
111 * validated by this function. If they have invalid value function
112 * produces warning and returns an error-pointer. If everything is
113 * fine, @vcm is returned.
115 struct vcm *__must_check vcm_init(struct vcm *vcm);
117 #ifdef CONFIG_VCM_MMU
122 * struct vcm_mmu_driver - a driver used for real MMUs.
123 * @orders: array of orders of pages supported by the MMU sorted from
124 * the largest to the smallest. The last element is always
125 * zero (which means 4K page).
126 * @cleanup: Function called when the VCM context is destroyed;
127 * optional, if not provided, kfree() is used.
128 * @activate: callback function for activating a single mapping; it's
129 * role is to set up the MMU so that reserved address space
130 * donated by res will point to physical memory donated by
131 * phys; called under spinlock with IRQs disabled - cannot
132 * sleep; required unless @activate_page and @deactivate_page
134 * @deactivate: this reverses the effect of @activate; called under spinlock
135 * with IRQs disabled - cannot sleep; required unless
136 * @deactivate_page is provided.
137 * @activate_page: callback function for activating a single page; it is
138 * ignored if @activate is provided; it's given a single
139 * page such that its order (given as third argument) is
140 * one of the supported orders specified in @orders;
141 * called under spinlock with IRQs disabled - cannot
142 * sleep; required unless @activate is provided.
143 * @deactivate_page: this reverses the effect of the @activate_page
144 * callback; called under spinlock with IRQs disabled
145 * - cannot sleep; required unless @activate and
146 * @deactivate are both provided.
148 struct vcm_mmu_driver {
149 const unsigned char *orders;
151 void (*cleanup)(struct vcm *vcm);
152 int (*activate)(struct vcm_res *res, struct vcm_phys *phys);
153 void (*deactivate)(struct vcm_res *res, struct vcm_phys *phys);
154 int (*activate_page)(dma_addr_t vaddr, dma_addr_t paddr,
155 unsigned order, void *vcm);
156 int (*deactivate_page)(dma_addr_t vaddr, dma_addr_t paddr,
157 unsigned order, void *vcm);
161 * struct vcm_mmu - VCM MMU context
163 * @driver: VCM MMU driver's operations.
164 * @pool: virtual address space allocator; internal.
165 * @bound_res: list of bound reservations; internal.
166 * @lock: protects @bound_res and calls to activate/deactivate
167 * operations; internal.
168 * @activated: whether VCM context has been activated; internal.
172 const struct vcm_mmu_driver *driver;
174 struct gen_pool *pool;
175 struct list_head bound_res;
176 /* Protects operations on bound_res list. */
182 * vcm_mmu_init() - initialises a VCM context for a real MMU.
183 * @mmu: the vcm_mmu context to initialise.
185 * This function initialises the vcm_mmu structure created by a MMU
186 * driver when setting things up. It sets up all fields of the
187 * structure expect for @mmu->vcm.start, @mmu.vcm->size and
188 * @mmu->driver which are validated by this function. If they have
189 * invalid value function produces warning and returns an
190 * error-pointer. On any other error, an error-pointer is returned as
191 * well. If everything is fine, address of @mmu->vcm is returned.
193 struct vcm *__must_check vcm_mmu_init(struct vcm_mmu *mmu);
197 #ifdef CONFIG_VCM_O2O
200 * struct vcm_o2o_driver - VCM One-to-One driver
201 * @cleanup: cleans up the VCM context; if not specified. kfree() is used.
202 * @phys: allocates a physical contiguous memory block; this is used in
203 * the same way &struct vcm_driver's phys is used expect it must
204 * provide a contiguous block (ie. exactly one part); required.
206 struct vcm_o2o_driver {
207 void (*cleanup)(struct vcm *vcm);
208 struct vcm_phys *(*phys)(struct vcm *vcm, resource_size_t size,
213 * struct vcm_o2o - VCM One-to-One context
215 * @driver: VCM One-to-One driver's operations.
219 const struct vcm_o2o_driver *driver;
223 * vcm_o2o_init() - initialises a VCM context for a one-to-one context.
224 * @o2o: the vcm_o2o context to initialise.
226 * This function initialises the vcm_o2o structure created by a O2O
227 * driver when setting things up. It sets up all fields of the
228 * structure expect for @o2o->vcm.start, @o2o->vcm.size and
229 * @o2o->driver which are validated by this function. If they have
230 * invalid value function produces warning and returns an
231 * error-pointer. On any other error, an error-pointer is returned as
232 * well. If everything is fine, address of @o2o->vcm is returned.
234 struct vcm *__must_check vcm_o2o_init(struct vcm_o2o *o2o);
238 #ifdef CONFIG_VCM_PHYS
241 * __vcm_phys_alloc() - allocates physical discontiguous space
242 * @size: size of the block to allocate.
243 * @flags: additional allocation flags; XXX FIXME: document
244 * @orders: array of orders of pages supported by the MMU sorted from
245 * the largest to the smallest. The last element is always
246 * zero (which means 4K page).
247 * @gfp: the gfp flags for pages to allocate.
249 * This function tries to allocate a physical discontiguous space in
250 * such a way that it allocates the largest possible blocks from the
251 * sizes donated by the @orders array. So if @orders is { 8, 0 }
252 * (which means 1MiB and 4KiB pages are to be used) and requested
253 * @size is 2MiB and 12KiB the function will try to allocate two 1MiB
254 * pages and three 4KiB pages (in that order). If big page cannot be
255 * allocated the function will still try to allocate more smaller
258 struct vcm_phys *__must_check
259 __vcm_phys_alloc(resource_size_t size, unsigned flags,
260 const unsigned char *orders, gfp_t gfp);
263 * __vcm_phys_alloc_coherent() - allocates coherent physical discontiguous space
264 * @size: size of the block to allocate.
265 * @flags: additional allocation flags; XXX FIXME: document
266 * @orders: array of orders of pages supported by the MMU sorted from
267 * the largest to the smallest. The last element is always
268 * zero (which means 4K page).
269 * @gfp: the gfp flags for pages to allocate.
271 * Everything is same to __vcm_phys_alloc() except, this function invalidates
272 * all H/W cache lines as soon as it allocates physical memory.
274 struct vcm_phys *__must_check
275 __vcm_phys_alloc_coherent(resource_size_t size, unsigned flags,
276 const unsigned char *orders, gfp_t gfp);
279 * vcm_phys_alloc_raw() - allocates physical discontiguous space
280 * @size: size of the block to allocate.
281 * @flags: additional allocation flags; XXX FIXME: document
282 * @orders: array of orders of pages supported by the MMU sorted from
283 * the largest to the smallest. The last element is always
284 * zero (which means 4K page).
286 * This function tries to allocate a physical discontiguous space in
287 * such a way that it allocates the largest possible blocks from the
288 * sizes donated by the @orders array. So if @orders is { 8, 0 }
289 * (which means 1MiB and 4KiB pages are to be used) and requested
290 * @size is 2MiB and 12KiB the function will try to allocate two 1MiB
291 * pages and three 4KiB pages (in that order). If big page cannot be
292 * allocated the function will still try to allocate more smaller
295 static inline struct vcm_phys *__must_check
296 vcm_phys_alloc_raw(resource_size_t size, unsigned flags,
297 const unsigned char *orders) {
298 return __vcm_phys_alloc(size, flags, orders, GFP_DMA32);
302 * vcm_phys_alloc() - allocates coherent physical discontiguous space
303 * @size: size of the block to allocate.
304 * @flags: additional allocation flags; XXX FIXME: document
305 * @orders: array of orders of pages supported by the MMU sorted from
306 * the largest to the smallest. The last element is always
307 * zero (which means 4K page).
309 * This function exactly same as vcm_phys_alloc_raw() except, this function
310 * guarentees that the allocated page frames are not cached either H/W inner and
313 static inline struct vcm_phys *__must_check
314 vcm_phys_alloc(resource_size_t size, unsigned flags,
315 const unsigned char *orders) {
316 return __vcm_phys_alloc_coherent(size, flags, orders, GFP_DMA32);
320 * vcm_phys_walk() - helper function for mapping physical pages
321 * @vaddr: virtual address to map/unmap physical space to/from
322 * @phys: physical space
323 * @orders: array of orders of pages supported by the MMU sorted from
324 * the largest to the smallest. The last element is always
325 * zero (which means 4K page).
326 * @callback: function called for each page.
327 * @recover: function called for each page when @callback returns
328 * negative number; if it also returns negative number
329 * function terminates; may be NULL.
330 * @priv: private data for the callbacks.
332 * This function walks through @phys trying to mach largest possible
333 * page size donated by @orders. For each such page @callback is
334 * called. If @callback returns negative number the function calls
335 * @recover for each page @callback was called successfully.
337 * So, for instance, if we have a physical memory which consist of
338 * 1Mib part and 8KiB part and @orders is { 8, 0 } (which means 1MiB
339 * and 4KiB pages are to be used), @callback will be called first with
340 * 1MiB page and then two times with 4KiB page. This is of course
341 * provided that @vaddr has correct alignment.
343 * The idea is for hardware MMU drivers to call this function and
344 * provide a callbacks for mapping/unmapping a single page. The
345 * function divides the region into pages that the MMU can handle.
347 * If @callback at one point returns a negative number this is the
348 * return value of the function; otherwise zero is returned.
350 int vcm_phys_walk(dma_addr_t vaddr, const struct vcm_phys *phys,
351 const unsigned char *orders,
352 int (*callback)(dma_addr_t vaddr, dma_addr_t paddr,
353 unsigned order, void *priv),
354 int (*recovery)(dma_addr_t vaddr, dma_addr_t paddr,
355 unsigned order, void *priv),