}
+ brw_upload_init(&brw->upload, brw->bufmgr, 65536);
+
brw_init_state(brw);
intelInitExtensions(ctx);
uint32_t n_b_counter_regs;
};
+struct brw_uploader {
+ struct brw_bufmgr *bufmgr;
+ struct brw_bo *bo;
+ void *map;
+ uint32_t next_offset;
+ unsigned default_size;
+};
+
/**
* brw_context is derived from gl_context.
*/
struct intel_batchbuffer batch;
- struct {
- struct brw_bo *bo;
- void *map;
- uint32_t next_offset;
- } upload;
+ struct brw_uploader upload;
/**
* Set if rendering has occurred to the drawable's front buffer.
goto emit;
}
- buf = intel_upload_space(brw, bufsz, 64,
- &brw->curbe.curbe_bo, &brw->curbe.curbe_offset);
+ buf = brw_upload_space(&brw->upload, bufsz, 64,
+ &brw->curbe.curbe_bo, &brw->curbe.curbe_offset);
STATIC_ASSERT(sizeof(gl_constant_value) == sizeof(float));
* to replicate it out.
*/
if (src_stride == 0) {
- intel_upload_data(brw, element->glarray->Ptr,
- element->glarray->_ElementSize,
- element->glarray->_ElementSize,
- &buffer->bo, &buffer->offset);
+ brw_upload_data(&brw->upload, element->glarray->Ptr,
+ element->glarray->_ElementSize,
+ element->glarray->_ElementSize,
+ &buffer->bo, &buffer->offset);
buffer->stride = 0;
buffer->size = element->glarray->_ElementSize;
const unsigned char *src = element->glarray->Ptr + min * src_stride;
int count = max - min + 1;
GLuint size = count * dst_stride;
- uint8_t *dst = intel_upload_space(brw, size, dst_stride,
- &buffer->bo, &buffer->offset);
+ uint8_t *dst = brw_upload_space(&brw->upload, size, dst_stride,
+ &buffer->bo, &buffer->offset);
/* The GL 4.5 spec says:
* "If any enabled array’s buffer binding is zero when DrawArrays or
/* For non-indirect draws, upload gl_BaseVertex. */
if ((vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) &&
brw->draw.draw_params_bo == NULL) {
- intel_upload_data(brw, &brw->draw.params, sizeof(brw->draw.params), 4,
- &brw->draw.draw_params_bo,
- &brw->draw.draw_params_offset);
+ brw_upload_data(&brw->upload,
+ &brw->draw.params, sizeof(brw->draw.params), 4,
+ &brw->draw.draw_params_bo,
+ &brw->draw.draw_params_offset);
}
if (vs_prog_data->uses_drawid) {
- intel_upload_data(brw, &brw->draw.gl_drawid, sizeof(brw->draw.gl_drawid), 4,
- &brw->draw.draw_id_bo,
- &brw->draw.draw_id_offset);
+ brw_upload_data(&brw->upload,
+ &brw->draw.gl_drawid, sizeof(brw->draw.gl_drawid), 4,
+ &brw->draw.draw_id_bo,
+ &brw->draw.draw_id_offset);
}
}
if (!_mesa_is_bufferobj(bufferobj)) {
/* Get new bufferobj, offset:
*/
- intel_upload_data(brw, index_buffer->ptr, ib_size, ib_type_size,
- &brw->ib.bo, &offset);
+ brw_upload_data(&brw->upload, index_buffer->ptr, ib_size, ib_type_size,
+ &brw->ib.bo, &offset);
brw->ib.size = brw->ib.bo->size;
} else {
offset = (GLuint) (unsigned long) index_buffer->ptr;
if (brw->compute.num_work_groups_bo == NULL) {
bo = NULL;
- intel_upload_data(brw,
- (void *)brw->compute.num_work_groups,
- 3 * sizeof(GLuint),
- sizeof(GLuint),
- &bo,
- &bo_offset);
+ brw_upload_data(&brw->upload,
+ (void *)brw->compute.num_work_groups,
+ 3 * sizeof(GLuint),
+ sizeof(GLuint),
+ &bo,
+ &bo_offset);
} else {
bo = brw->compute.num_work_groups_bo;
bo_offset = brw->compute.num_work_groups_offset;
const int size = prog_data->nr_params * sizeof(gl_constant_value);
gl_constant_value *param;
if (devinfo->gen >= 8 || devinfo->is_haswell) {
- param = intel_upload_space(brw, size, 32,
- &stage_state->push_const_bo,
- &stage_state->push_const_offset);
+ param = brw_upload_space(&brw->upload, size, 32,
+ &stage_state->push_const_bo,
+ &stage_state->push_const_offset);
} else {
param = brw_state_batch(brw, size, 32,
&stage_state->push_const_offset);
uint32_t size = prog_data->nr_pull_params * 4;
struct brw_bo *const_bo = NULL;
uint32_t const_offset;
- gl_constant_value *constants = intel_upload_space(brw, size, 64,
- &const_bo, &const_offset);
+ gl_constant_value *constants = brw_upload_space(&brw->upload, size, 64,
+ &const_bo, &const_offset);
STATIC_ASSERT(sizeof(gl_constant_value) == sizeof(float));
assert(!brw->batch.no_wrap);
brw_finish_batch(brw);
- intel_upload_finish(brw);
+ brw_upload_finish(&brw->upload);
finish_growing_bos(&brw->batch.batch);
finish_growing_bos(&brw->batch.state);
uint32_t size,
bool write);
-void intel_upload_data(struct brw_context *brw,
- const void *data,
+void brw_upload_data(struct brw_uploader *upload,
+ const void *data,
+ uint32_t size,
+ uint32_t alignment,
+ struct brw_bo **out_bo,
+ uint32_t *out_offset);
+
+void *brw_upload_space(struct brw_uploader *upload,
uint32_t size,
uint32_t alignment,
struct brw_bo **out_bo,
uint32_t *out_offset);
-void *intel_upload_space(struct brw_context *brw,
- uint32_t size,
- uint32_t alignment,
- struct brw_bo **out_bo,
- uint32_t *out_offset);
-
-void intel_upload_finish(struct brw_context *brw);
+void brw_upload_finish(struct brw_uploader *upload);
+void brw_upload_init(struct brw_uploader *upload,
+ struct brw_bufmgr *bufmgr,
+ unsigned default_size);
/* Hook the bufferobject implementation into mesa:
*/
*/
#include "main/imports.h"
-#include "main/mtypes.h"
#include "main/macros.h"
-#include "main/bufferobj.h"
-
+#include "brw_bufmgr.h"
#include "brw_context.h"
-#include "intel_blit.h"
#include "intel_buffer_objects.h"
-#include "intel_batchbuffer.h"
-#include "intel_fbo.h"
-#include "intel_mipmap_tree.h"
-
-#include "brw_context.h"
-
-#define INTEL_UPLOAD_SIZE (64*1024)
void
-intel_upload_finish(struct brw_context *brw)
+brw_upload_finish(struct brw_uploader *upload)
{
- assert((brw->upload.bo == NULL) == (brw->upload.map == NULL));
- if (!brw->upload.bo)
+ assert((upload->bo == NULL) == (upload->map == NULL));
+ if (!upload->bo)
return;
- brw_bo_unmap(brw->upload.bo);
- brw_bo_unreference(brw->upload.bo);
- brw->upload.bo = NULL;
- brw->upload.map = NULL;
- brw->upload.next_offset = 0;
+ brw_bo_unmap(upload->bo);
+ brw_bo_unreference(upload->bo);
+ upload->bo = NULL;
+ upload->map = NULL;
+ upload->next_offset = 0;
}
/**
* In most cases, streamed data (for GPU state structures, for example) is
* uploaded through brw_state_batch(), since that interface allows relocations
* from the streamed space returned to other BOs. However, that interface has
- * the restriction that the amount of space allocated has to be "small" (see
- * estimated_max_prim_size in brw_draw.c).
+ * the restriction that the amount of space allocated has to be "small".
*
* This interface, on the other hand, is able to handle arbitrary sized
* allocation requests, though it will batch small allocations into the same
* BO for efficiency and reduced memory footprint.
*
- * \note The returned pointer is valid only until intel_upload_finish(), which
- * will happen at batch flush or the next
- * intel_upload_space()/intel_upload_data().
+ * \note The returned pointer is valid only until brw_upload_finish().
*
* \param out_bo Pointer to a BO, which must point to a valid BO or NULL on
* entry, and will have a reference to the new BO containing the state on
* \param out_offset Offset within the buffer object that the data will land.
*/
void *
-intel_upload_space(struct brw_context *brw,
- uint32_t size,
- uint32_t alignment,
- struct brw_bo **out_bo,
- uint32_t *out_offset)
+brw_upload_space(struct brw_uploader *upload,
+ uint32_t size,
+ uint32_t alignment,
+ struct brw_bo **out_bo,
+ uint32_t *out_offset)
{
uint32_t offset;
- offset = ALIGN_NPOT(brw->upload.next_offset, alignment);
- if (brw->upload.bo && offset + size > brw->upload.bo->size) {
- intel_upload_finish(brw);
+ offset = ALIGN_NPOT(upload->next_offset, alignment);
+ if (upload->bo && offset + size > upload->bo->size) {
+ brw_upload_finish(upload);
offset = 0;
}
- assert((brw->upload.bo == NULL) == (brw->upload.map == NULL));
- if (!brw->upload.bo) {
- brw->upload.bo = brw_bo_alloc(brw->bufmgr, "streamed data",
- MAX2(INTEL_UPLOAD_SIZE, size), 4096);
- brw->upload.map = brw_bo_map(brw, brw->upload.bo, MAP_READ | MAP_WRITE);
+ assert((upload->bo == NULL) == (upload->map == NULL));
+ if (!upload->bo) {
+ upload->bo = brw_bo_alloc(upload->bufmgr, "streamed data",
+ MAX2(upload->default_size, size), 4096);
+ upload->map = brw_bo_map(NULL, upload->bo, MAP_READ | MAP_WRITE);
}
- brw->upload.next_offset = offset + size;
+ upload->next_offset = offset + size;
*out_offset = offset;
- if (*out_bo != brw->upload.bo) {
+ if (*out_bo != upload->bo) {
brw_bo_unreference(*out_bo);
- *out_bo = brw->upload.bo;
- brw_bo_reference(brw->upload.bo);
+ *out_bo = upload->bo;
+ brw_bo_reference(upload->bo);
}
- return brw->upload.map + offset;
+ return upload->map + offset;
}
/**
* References to this memory should not be retained across batch flushes.
*/
void
-intel_upload_data(struct brw_context *brw,
- const void *data,
- uint32_t size,
- uint32_t alignment,
- struct brw_bo **out_bo,
- uint32_t *out_offset)
+brw_upload_data(struct brw_uploader *upload,
+ const void *data,
+ uint32_t size,
+ uint32_t alignment,
+ struct brw_bo **out_bo,
+ uint32_t *out_offset)
{
- void *dst = intel_upload_space(brw, size, alignment, out_bo, out_offset);
+ void *dst = brw_upload_space(upload, size, alignment, out_bo, out_offset);
memcpy(dst, data, size);
}
+
+void
+brw_upload_init(struct brw_uploader *upload,
+ struct brw_bufmgr *bufmgr,
+ unsigned default_size)
+{
+ upload->bufmgr = bufmgr;
+ upload->bo = NULL;
+ upload->map = NULL;
+ upload->next_offset = 0;
+ upload->default_size = default_size;
+}