if (array->vbo) {
yagl_gles_buffer_release(array->vbo);
array->vbo = NULL;
+ array->vbo_local_name = 0;
array->offset = 0;
} else {
array->target_data = 0;
if (array->vbo) {
yagl_sharegroup_reap_object(array->ctx->base.sg, &array->vbo->base);
array->vbo = NULL;
+ array->vbo_local_name = 0;
array->offset = 0;
} else {
array->target_data = 0;
GLboolean normalized,
GLsizei stride,
struct yagl_gles_buffer *vbo,
+ yagl_object_name vbo_local_name,
GLint offset)
{
if (!yagl_get_el_size(type, &array->el_size)) {
}
array->vbo = vbo;
+ array->vbo_local_name = vbo_local_name;
array->offset = offset;
return true;
bool enabled;
struct yagl_gles_buffer *vbo;
+ yagl_object_name vbo_local_name;
union
{
GLboolean normalized,
GLsizei stride,
struct yagl_gles_buffer *vbo,
+ yagl_object_name vbo_local_name,
GLint offset);
/*
#include <GLES2/gl2.h>
#include "yagl_gles2_validate.h"
+
+bool yagl_gles2_get_array_param_count(GLenum pname, int *count)
+{
+ switch (pname) {
+ case GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING: *count = 1; break;
+ case GL_VERTEX_ATTRIB_ARRAY_ENABLED: *count = 1; break;
+ case GL_VERTEX_ATTRIB_ARRAY_SIZE: *count = 1; break;
+ case GL_VERTEX_ATTRIB_ARRAY_STRIDE: *count = 1; break;
+ case GL_VERTEX_ATTRIB_ARRAY_TYPE: *count = 1; break;
+ case GL_VERTEX_ATTRIB_ARRAY_NORMALIZED: *count = 1; break;
+ case GL_CURRENT_VERTEX_ATTRIB: *count = 4; break;
+ default: return false;
+ }
+ return true;
+}
#include "yagl_types.h"
+bool yagl_gles2_get_array_param_count(GLenum pname, int *count);
+
#endif
#include "yagl_gles2_context.h"
#include "yagl_gles2_shader.h"
#include "yagl_gles2_program.h"
+#include "yagl_gles2_validate.h"
#include "yagl_egl_interface.h"
#include "yagl_tls.h"
#include "yagl_log.h"
void yagl_host_glGetVertexAttribiv(GLuint index,
GLenum pname,
- target_ulong /* GLint* */ params)
+ target_ulong /* GLint* */ params_)
{
- YAGL_UNIMPLEMENTED(glGetVertexAttribiv);
+ struct yagl_gles_array *array = NULL;
+ int i, count = 0;
+ GLint *params = NULL;
+
+ YAGL_GET_CTX(glGetVertexAttribiv);
+
+ array = yagl_gles_context_get_array(&ctx->base, index);
+
+ if (!array) {
+ YAGL_SET_ERR(GL_INVALID_VALUE);
+ goto out;
+ }
+
+ if (!yagl_gles2_get_array_param_count(pname, &count)) {
+ YAGL_SET_ERR(GL_INVALID_ENUM);
+ goto out;
+ }
+
+ params = g_malloc0(count * sizeof(*params));
+
+ switch (pname) {
+ case GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING:
+ params[0] = array->vbo_local_name;
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_ENABLED:
+ params[0] = array->enabled;
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_SIZE:
+ params[0] = array->size;
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_STRIDE:
+ params[0] = array->stride;
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_TYPE:
+ params[0] = array->type;
+ break;
+ case GL_VERTEX_ATTRIB_ARRAY_NORMALIZED:
+ params[0] = array->normalized;
+ break;
+ default:
+ ctx->driver_ps->GetVertexAttribiv(ctx->driver_ps, index, pname, params);
+ break;
+ }
+
+ if (params_) {
+ for (i = 0; i < count; ++i) {
+ yagl_mem_put_GLint(gles2_api_ts->ts,
+ params_ + (i * sizeof(*params)),
+ params[i]);
+ }
+ }
+
+out:
+ g_free(params);
}
void yagl_host_glGetVertexAttribPointerv(GLuint index,
normalized,
stride,
ctx->base.vbo,
+ ctx->base.vbo_local_name,
ptr)) {
YAGL_SET_ERR(GL_INVALID_VALUE);
}
target_ulong /* GLfloat* */ params);
void yagl_host_glGetVertexAttribiv(GLuint index,
GLenum pname,
- target_ulong /* GLint* */ params);
+ target_ulong /* GLint* */ params_);
void yagl_host_glGetVertexAttribPointerv(GLuint index,
GLenum pname,
target_ulong /* GLvoid** */ pointer);
struct yagl_user
{
- target_phys_addr_t buff_pa;
+ uint8_t *buff;
yagl_pid process_id;
yagl_tid thread_id;
};
{
yagl_pid target_pid;
yagl_tid target_tid;
- target_phys_addr_t buff_len = YAGL_BUFF_SIZE;
+ target_phys_addr_t buff_len = YAGL_MARSHAL_SIZE;
uint8_t *buff = NULL, *tmp = NULL;
YAGL_LOG_FUNC_ENTER_NPT(yagl_device_operate,
user_index,
(uint32_t)buff_pa);
- if (buff_pa && s->users[user_index].buff_pa) {
+ if (buff_pa && s->users[user_index].buff) {
YAGL_LOG_CRITICAL("user %d is already activated", user_index);
goto out;
}
- if (!buff_pa && !s->users[user_index].buff_pa) {
+ if (!buff_pa && !s->users[user_index].buff) {
YAGL_LOG_CRITICAL("user %d is not activated", user_index);
goto out;
}
buff = cpu_physical_memory_map(buff_pa, &buff_len, false);
- if (!buff || (buff_len != YAGL_BUFF_SIZE)) {
+ if (!buff || (buff_len != YAGL_MARSHAL_SIZE)) {
YAGL_LOG_CRITICAL("cpu_physical_memory_map(read) failed for user %d, buff_ptr = 0x%X",
user_index,
(uint32_t)buff_pa);
target_tid,
buff,
s->in_buff)) {
- cpu_physical_memory_unmap(buff,
- YAGL_BUFF_SIZE,
- 0,
- YAGL_BUFF_SIZE);
-
- buff = cpu_physical_memory_map(buff_pa, &buff_len, true);
-
- if (!buff || (buff_len != YAGL_BUFF_SIZE)) {
- YAGL_LOG_CRITICAL("cpu_physical_memory_map(write) failed for user %d, buff_ptr = 0x%X",
- user_index,
- (uint32_t)buff_pa);
- goto out;
- }
memcpy(buff, s->in_buff, YAGL_MARSHAL_MAX_RESPONSE);
- s->users[user_index].buff_pa = buff_pa;
+ s->users[user_index].buff = buff;
s->users[user_index].process_id = target_pid;
s->users[user_index].thread_id = target_tid;
+ buff = NULL;
+
YAGL_LOG_INFO("user %d activated", user_index);
}
} else {
s->users[user_index].process_id,
s->users[user_index].thread_id);
+ cpu_physical_memory_unmap(s->users[user_index].buff,
+ YAGL_MARSHAL_SIZE,
+ 0,
+ YAGL_MARSHAL_SIZE);
+
memset(&s->users[user_index], 0, sizeof(s->users[user_index]));
YAGL_LOG_INFO("user %d deactivated", user_index);
out:
if (buff) {
cpu_physical_memory_unmap(buff,
- YAGL_BUFF_SIZE,
+ YAGL_MARSHAL_SIZE,
0,
- YAGL_BUFF_SIZE);
+ YAGL_MARSHAL_SIZE);
}
YAGL_LOG_FUNC_EXIT(NULL);
static void yagl_device_trigger(YaGLState *s, int user_index)
{
- target_phys_addr_t buff_len = YAGL_BUFF_SIZE;
- uint8_t *buff = NULL;
-
YAGL_LOG_FUNC_ENTER_NPT(yagl_device_trigger, "%d", user_index);
- if (!s->users[user_index].buff_pa) {
+ if (!s->users[user_index].buff) {
YAGL_LOG_CRITICAL("user %d not activated", user_index);
goto out;
}
- buff = cpu_physical_memory_map(s->users[user_index].buff_pa,
- &buff_len,
- false);
-
- if (!buff || (buff_len != YAGL_BUFF_SIZE)) {
- YAGL_LOG_CRITICAL("cpu_physical_memory_map(read) failed for user %d, buff_ptr = 0x%X",
- user_index,
- (uint32_t)s->users[user_index].buff_pa);
- goto out;
- }
-
yagl_server_dispatch(s->ss,
s->users[user_index].process_id,
s->users[user_index].thread_id,
- buff,
+ s->users[user_index].buff,
s->in_buff);
- memcpy(buff, s->in_buff, YAGL_MARSHAL_MAX_RESPONSE);
+ memcpy(s->users[user_index].buff, s->in_buff, YAGL_MARSHAL_MAX_RESPONSE);
out:
- if (buff) {
- cpu_physical_memory_unmap(buff,
- YAGL_BUFF_SIZE,
- 0,
- YAGL_BUFF_SIZE);
- }
-
YAGL_LOG_FUNC_EXIT(NULL);
}
#define YAGL_MARSHAL_MAX_RESPONSE (8 * 2)
/*
- * Max batch buffer size.
+ * Max marshal buffer size.
*/
-#define YAGL_BUFF_SIZE 0x1000
+#define YAGL_MARSHAL_SIZE 0x1000
static __inline int yagl_marshal_skip(uint8_t** buff)
{
struct yagl_api_ps *api_ps;
yagl_api_func func;
- if (current_buff >= (ts->current_out_buff + YAGL_BUFF_SIZE)) {
+ if (current_buff >= (ts->current_out_buff + YAGL_MARSHAL_SIZE)) {
YAGL_LOG_CRITICAL("batch passes the end of buffer, protocol error");
ret = false;
/*
* Whenever protocol changes be sure to bump this.
*/
-#define YAGL_VERSION 3
+#define YAGL_VERSION 4
#endif