Tizen 2.0 Release
[profile/ivi/osmesa.git] / src / gallium / drivers / nvfx / nvfx_transfer.c
1 #include "pipe/p_state.h"
2 #include "pipe/p_defines.h"
3 #include "util/u_inlines.h"
4 #include "util/u_format.h"
5 #include "util/u_memory.h"
6 #include "util/u_math.h"
7 #include "util/u_staging.h"
8 #include "nvfx_context.h"
9 #include "nvfx_screen.h"
10 #include "nvfx_state.h"
11 #include "nvfx_resource.h"
12 #include "nvfx_transfer.h"
13
14 struct nvfx_staging_transfer
15 {
16         struct util_staging_transfer base;
17
18         unsigned offset;
19         unsigned map_count;
20 };
21
22 struct pipe_transfer *
23 nvfx_transfer_new(struct pipe_context *pipe,
24                   struct pipe_resource *pt,
25                   unsigned level,
26                   unsigned usage,
27                   const struct pipe_box *box)
28 {
29         if((usage & (PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_DONTBLOCK)) == PIPE_TRANSFER_DONTBLOCK)
30         {
31                 struct nouveau_bo* bo = ((struct nvfx_resource*)pt)->bo;
32                 if(bo && nouveau_bo_busy(bo, NOUVEAU_BO_WR))
33                         return NULL;
34         }
35
36         if(pt->target == PIPE_BUFFER)
37         {
38                 // it would be nice if we could avoid all this ridiculous overhead...
39                 struct pipe_transfer* tx;
40                 struct nvfx_buffer* buffer = nvfx_buffer(pt);
41
42                 tx = CALLOC_STRUCT(pipe_transfer);
43                 if (!tx)
44                         return NULL;
45
46                 pipe_resource_reference(&tx->resource, pt);
47                 tx->level = level;
48                 tx->usage = usage;
49                 tx->box = *box;
50
51                 tx->layer_stride = tx->stride = util_format_get_stride(pt->format, box->width);
52                 tx->data = buffer->data + util_format_get_stride(pt->format, box->x);
53
54                 return tx;
55         }
56         else
57         {
58                 struct nvfx_staging_transfer* tx;
59                 boolean direct = !nvfx_resource_on_gpu(pt) && pt->flags & NVFX_RESOURCE_FLAG_LINEAR;
60
61                 tx = CALLOC_STRUCT(nvfx_staging_transfer);
62                 if(!tx)
63                         return NULL;
64
65                 util_staging_transfer_init(pipe, pt, level, usage, box, direct, &tx->base);
66
67                 if(direct)
68                 {
69                         tx->base.base.stride = nvfx_subresource_pitch(pt, level);
70                         tx->base.base.layer_stride = tx->base.base.stride * u_minify(pt->height0, level);
71                         tx->offset = nvfx_subresource_offset(pt, box->z, level, box->z)
72                                 + util_format_get_2d_size(pt->format, tx->base.base.stride, box->y)
73                                 + util_format_get_stride(pt->format, box->x);
74                 }
75                 else
76                 {
77                         tx->base.base.stride = nvfx_subresource_pitch(tx->base.staging_resource, 0);
78                         tx->base.base.layer_stride = tx->base.base.stride * tx->base.staging_resource->height0;
79                         tx->offset = 0;
80                 }
81
82                 assert(tx->base.base.stride);
83
84                 return &tx->base.base;
85         }
86 }
87
88 static void nvfx_buffer_dirty_interval(struct nvfx_buffer* buffer, unsigned begin, unsigned size, boolean unsynchronized)
89 {
90         struct nvfx_screen* screen = nvfx_screen(buffer->base.base.screen);
91         buffer->last_update_static = buffer->bytes_to_draw_until_static < 0;
92         if(buffer->dirty_begin == buffer->dirty_end)
93         {
94                 buffer->dirty_begin = begin;
95                 buffer->dirty_end = begin + size;
96                 buffer->dirty_unsynchronized = unsynchronized;
97         }
98         else
99         {
100                 buffer->dirty_begin = MIN2(buffer->dirty_begin, begin);
101                 buffer->dirty_end = MAX2(buffer->dirty_end, begin + size);
102                 buffer->dirty_unsynchronized &= unsynchronized;
103         }
104
105         if(unsynchronized)
106         {
107                 // TODO: revisit this, it doesn't seem quite right
108                 //printf("UNSYNC UPDATE %p %u %u\n", buffer, begin, size);
109                 buffer->bytes_to_draw_until_static += size * screen->static_reuse_threshold;
110         }
111         else
112                 buffer->bytes_to_draw_until_static = buffer->size * screen->static_reuse_threshold;
113 }
114
115 static void nvfx_transfer_flush_region( struct pipe_context *pipe,
116                                       struct pipe_transfer *ptx,
117                                       const struct pipe_box *box)
118 {
119         if(ptx->resource->target == PIPE_BUFFER && (ptx->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
120         {
121                 struct nvfx_buffer* buffer = nvfx_buffer(ptx->resource);
122                 nvfx_buffer_dirty_interval(buffer,
123                                 (uint8_t*)ptx->data - buffer->data + util_format_get_stride(buffer->base.base.format, box->x),
124                                 util_format_get_stride(buffer->base.base.format, box->width),
125                                 !!(ptx->usage & PIPE_TRANSFER_UNSYNCHRONIZED));
126         }
127 }
128
129 static void
130 nvfx_transfer_destroy(struct pipe_context *pipe, struct pipe_transfer *ptx)
131 {
132         if(ptx->resource->target == PIPE_BUFFER)
133         {
134                 struct nvfx_buffer* buffer = nvfx_buffer(ptx->resource);
135                 if((ptx->usage & (PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT)) == PIPE_TRANSFER_WRITE)
136                         nvfx_buffer_dirty_interval(buffer,
137                                 (uint8_t*)ptx->data - buffer->data,
138                                 ptx->stride,
139                                 !!(ptx->usage & PIPE_TRANSFER_UNSYNCHRONIZED));
140                 pipe_resource_reference(&ptx->resource, 0);
141                 FREE(ptx);
142         }
143         else
144         {
145                 struct nouveau_channel* chan = nvfx_context(pipe)->screen->base.channel;
146                 util_staging_transfer_destroy(pipe, ptx);
147
148                 FIRE_RING(chan);
149         }
150 }
151
152 void *
153 nvfx_transfer_map(struct pipe_context *pipe, struct pipe_transfer *ptx)
154 {
155         if(ptx->resource->target == PIPE_BUFFER)
156                 return ptx->data;
157         else
158         {
159                 struct nvfx_staging_transfer *tx = (struct nvfx_staging_transfer *)ptx;
160                 if(!ptx->data)
161                 {
162                         struct nvfx_miptree *mt = (struct nvfx_miptree *)tx->base.staging_resource;
163                         uint8_t *map = nouveau_screen_bo_map(pipe->screen, mt->base.bo, nouveau_screen_transfer_flags(ptx->usage));
164                         ptx->data = map + tx->offset;
165                 }
166
167                 ++tx->map_count;
168                 return ptx->data;
169         }
170 }
171
172 void
173 nvfx_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *ptx)
174 {
175         if(ptx->resource->target != PIPE_BUFFER)
176         {
177                 struct nvfx_staging_transfer *tx = (struct nvfx_staging_transfer *)ptx;
178                 struct nvfx_miptree *mt = (struct nvfx_miptree *)tx->base.staging_resource;
179
180                 if(!--tx->map_count)
181                 {
182                         nouveau_screen_bo_unmap(pipe->screen, mt->base.bo);
183                         ptx->data = 0;
184                 }
185         }
186 }
187
188 static void nvfx_transfer_inline_write( struct pipe_context *pipe,
189                                       struct pipe_resource *pr,
190                                       unsigned level,
191                                       unsigned usage,
192                                       const struct pipe_box *box,
193                                       const void *data,
194                                       unsigned stride,
195                                       unsigned slice_stride)
196 {
197         if(pr->target != PIPE_BUFFER)
198         {
199                 u_default_transfer_inline_write(pipe, pr, level, usage, box, data, stride, slice_stride);
200         }
201         else
202         {
203                 struct nvfx_buffer* buffer = nvfx_buffer(pr);
204                 unsigned begin = util_format_get_stride(pr->format, box->x);
205                 unsigned size = util_format_get_stride(pr->format, box->width);
206                 memcpy(buffer->data + begin, data, size);
207                 nvfx_buffer_dirty_interval(buffer, begin, size,
208                                 !!(pr->flags & PIPE_TRANSFER_UNSYNCHRONIZED));
209         }
210 }
211
212 void
213 nvfx_init_transfer_functions(struct pipe_context *pipe)
214 {
215         pipe->get_transfer = nvfx_transfer_new;
216         pipe->transfer_map = nvfx_transfer_map;
217         pipe->transfer_flush_region = nvfx_transfer_flush_region;
218         pipe->transfer_unmap = nvfx_transfer_unmap;
219         pipe->transfer_destroy = nvfx_transfer_destroy;
220         pipe->transfer_inline_write = nvfx_transfer_inline_write;
221 }