2 * Copyright 2010 Dave Airlie
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <pipe/p_compiler.h>
27 #include <pipe/p_screen.h>
28 #include <pipebuffer/pb_bufmgr.h>
29 #include "state_tracker/drm_driver.h"
30 #include "r600_priv.h"
33 #include "radeon_drm.h"
35 struct r600_bo *r600_bo(struct radeon *radeon,
36 unsigned size, unsigned alignment,
37 unsigned binding, unsigned usage)
40 struct radeon_bo *rbo;
41 uint32_t initial_domain, domains;
43 /* Staging resources particpate in transfers and blits only
44 * and are used for uploads and downloads from regular
45 * resources. We generate them internally for some transfers.
47 if (usage == PIPE_USAGE_STAGING)
48 domains = RADEON_GEM_DOMAIN_CPU | RADEON_GEM_DOMAIN_GTT;
50 domains = (RADEON_GEM_DOMAIN_CPU |
51 RADEON_GEM_DOMAIN_GTT |
52 RADEON_GEM_DOMAIN_VRAM);
54 if (binding & (PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) {
55 bo = r600_bomgr_bo_create(radeon->bomgr, size, alignment, *radeon->cfence);
57 bo->domains = domains;
63 case PIPE_USAGE_DYNAMIC:
64 case PIPE_USAGE_STREAM:
65 case PIPE_USAGE_STAGING:
66 initial_domain = RADEON_GEM_DOMAIN_GTT;
68 case PIPE_USAGE_DEFAULT:
69 case PIPE_USAGE_STATIC:
70 case PIPE_USAGE_IMMUTABLE:
72 initial_domain = RADEON_GEM_DOMAIN_VRAM;
75 rbo = radeon_bo(radeon, 0, size, alignment, initial_domain);
80 bo = calloc(1, sizeof(struct r600_bo));
82 bo->alignment = alignment;
83 bo->domains = domains;
85 if (binding & (PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) {
86 r600_bomgr_bo_init(radeon->bomgr, bo);
89 pipe_reference_init(&bo->reference, 1);
93 struct r600_bo *r600_bo_handle(struct radeon *radeon,
94 unsigned handle, unsigned *array_mode)
96 struct r600_bo *bo = calloc(1, sizeof(struct r600_bo));
97 struct radeon_bo *rbo;
99 rbo = bo->bo = radeon_bo(radeon, handle, 0, 0, 0);
104 bo->size = rbo->size;
105 bo->domains = (RADEON_GEM_DOMAIN_CPU |
106 RADEON_GEM_DOMAIN_GTT |
107 RADEON_GEM_DOMAIN_VRAM);
109 pipe_reference_init(&bo->reference, 1);
111 radeon_bo_get_tiling_flags(radeon, rbo, &bo->tiling_flags, &bo->kernel_pitch);
113 if (bo->tiling_flags) {
114 if (bo->tiling_flags & RADEON_TILING_MACRO)
115 *array_mode = V_0280A0_ARRAY_2D_TILED_THIN1;
116 else if (bo->tiling_flags & RADEON_TILING_MICRO)
117 *array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
125 void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, unsigned usage, void *ctx)
127 struct pipe_context *pctx = ctx;
129 if (usage & PB_USAGE_UNSYNCHRONIZED) {
130 radeon_bo_map(radeon, bo->bo);
131 return (uint8_t *) bo->bo->data + bo->offset;
134 if (p_atomic_read(&bo->bo->reference.count) > 1) {
135 if (usage & PB_USAGE_DONTBLOCK) {
139 pctx->flush(pctx, NULL);
143 if (usage & PB_USAGE_DONTBLOCK) {
146 if (radeon_bo_busy(radeon, bo->bo, &domain))
148 if (radeon_bo_map(radeon, bo->bo)) {
154 radeon_bo_map(radeon, bo->bo);
155 if (radeon_bo_wait(radeon, bo->bo)) {
156 radeon_bo_unmap(radeon, bo->bo);
161 return (uint8_t *) bo->bo->data + bo->offset;
164 void r600_bo_unmap(struct radeon *radeon, struct r600_bo *bo)
166 radeon_bo_unmap(radeon, bo->bo);
169 void r600_bo_destroy(struct radeon *radeon, struct r600_bo *bo)
171 if (bo->manager_id) {
172 if (!r600_bomgr_bo_destroy(radeon->bomgr, bo)) {
173 /* destroy is delayed by buffer manager */
177 radeon_bo_reference(radeon, &bo->bo, NULL);
181 boolean r600_bo_get_winsys_handle(struct radeon *radeon, struct r600_bo *bo,
182 unsigned stride, struct winsys_handle *whandle)
184 whandle->stride = stride;
185 switch(whandle->type) {
186 case DRM_API_HANDLE_TYPE_KMS:
187 whandle->handle = bo->bo->handle;
189 case DRM_API_HANDLE_TYPE_SHARED:
190 if (radeon_bo_get_name(radeon, bo->bo, &whandle->handle))