AC_LANG_POP([Objective C])])
])
-AC_INIT([evas], [v_ver.beta2], [enlightenment-devel@lists.sourceforge.net])
+AC_INIT([evas], [v_ver.beta3], [enlightenment-devel@lists.sourceforge.net])
AC_PREREQ([2.52])
AC_CONFIG_SRCDIR([configure.ac])
AC_CONFIG_MACRO_DIR([m4])
## NEON
build_cpu_neon="no"
case $host_cpu in
- armv7*)
+ arm*)
build_cpu_neon="yes"
;;
esac
AC_MSG_CHECKING(whether to build neon code)
AC_ARG_ENABLE(cpu-neon,
- AC_HELP_STRING([--enable-cpu-neon], [enable neon code - with gcc you will need these CFLAGS for it to begin to work, and even then your gcc may have broken or non-existent support: -mcpu=cortex-a8 -mfloat-abi=softfp -mfpu=neon]),
+ AC_HELP_STRING([--enable-cpu-neon], [build neon code, the code will be
+ checked at run time to see if the CPU supports it]),
[
if test "x$enableval" = "xyes" ; then
- AC_TRY_COMPILE([#include <arm_neon.h>],
- [asm volatile ("vqadd.u8 d0, d1, d0\n")],
+ AC_TRY_COMPILE([],
+ [asm volatile (".fpu neon\nvqadd.u8 d0, d1, d0\n")],
[
AC_MSG_RESULT(yes)
AC_DEFINE(BUILD_NEON, 1, [Build NEON Code])
evas_image_loader_[]$1[]_cflags=""
evas_image_loader_[]$1[]_libs=""
-PKG_CHECK_MODULES([SVG], [librsvg-2.0 >= 2.14.0],
- [have_dep="yes" requirement="librsvg-2.0"],
+PKG_CHECK_MODULES([SVG], [librsvg-2.0 >= 2.14.0
+ cairo >= 1.0.0],
+ [have_dep="yes" requirement="librsvg-2.0 cairo"],
[have_svg="no"]
)
void **rows; /**< an array of pointers (size depends on format) pointing to left edge of each scanline */
};
+/* magic version number to know what the native surf struct looks like */
+#define EVAS_NATIVE_SURFACE_VERSION 2
+
+typedef enum _Evas_Native_Surface_Type
+{
+ EVAS_NATIVE_SURFACE_NONE,
+ EVAS_NATIVE_SURFACE_X11,
+ EVAS_NATIVE_SURFACE_OPENGL
+} Evas_Native_Surface_Type;
+
struct _Evas_Native_Surface
{
+ int version;
+ Evas_Native_Surface_Type type;
union {
- struct {
- void *visual;
- unsigned long pixmap;
- } x11;
- struct { /* padding data for future use - have space for 8 pointers */
- void *d[8];
- } padding;
+ struct {
+ void *visual; /**< visual of the pixmap to use (Visual) */
+ unsigned long pixmap; /**< pixmap id to use (Pixmap) */
+ } x11;
+ struct {
+ unsigned int texture_id; /**< opengl texture id to use from glGenTextures() */
+ unsigned int framebuffer_id; /**< 0 if not a FBO, FBO id otherwise from glGenFramebuffers() */
+ unsigned int internal_format; /**< same as 'internalFormat' for glTexImage2D() */
+ unsigned int format; /**< same as 'format' for glTexImage2D() */
+ unsigned int x, y, w, h; /**< region inside the texture to use (image size is assumed as texture size, with 0, 0 being the top-left and co-ordinates working down to the right and bottom being positive) */
+ } opengl;
} data;
};
{
Evas_Coord x1, x2, y1, y2;
const Evas_Map_Point *p, *p_end;
- int ch;
+ Eina_Bool ch = EINA_FALSE;
if (!obj->cur.map) return;
+ if (obj->prev.map)
+ {
+ if (obj->prev.map->count == obj->cur.map->count)
+ {
+ const Evas_Map_Point *p2;
+
+ p = obj->cur.map->points;
+ p_end = p + obj->cur.map->count;
+ p2 = obj->cur.map->points;
+
+ for (; p < p_end; p++, p2++)
+ if (p->a != p2->a ||
+ p->r != p2->r ||
+ p->g != p2->g ||
+ p->b != p2->b)
+ {
+ ch = 1;
+ break;
+ }
+
+ }
+ else
+ {
+ ch = 1;
+ }
+ }
+
p = obj->cur.map->points;
p_end = p + obj->cur.map->count;
x1 = p->x;
if (p->y < y1) y1 = p->y;
if (p->y > y2) y2 = p->y;
}
- ch = 0;
if (obj->cur.map->normal_geometry.x != x1) ch = 1;
if (obj->cur.map->normal_geometry.y != y1) ch = 1;
if (obj->cur.map->normal_geometry.w != (x2 - x1)) ch = 1;
return;
MAGIC_CHECK_END();
if (!obj->layer->evas->engine.func->image_native_set) return;
+ if ((surf) &&
+ ((surf->version < 2) ||
+ (surf->version > EVAS_NATIVE_SURFACE_VERSION))) return;
o->engine_data =
obj->layer->evas->engine.func->image_native_set(obj->layer->evas->engine.data.output,
o->engine_data,
_evas_textblock_node_format_last_at_off(const Evas_Object_Textblock_Node_Format *n)
{
const Evas_Object_Textblock_Node_Format *nnode;
+ const Evas_Object_Textblock_Node_Text *tnode;
if (!n) return NULL;
nnode = n;
+ tnode = n->text_node;
do
{
n = nnode;
nnode = _NODE_FORMAT(EINA_INLIST_GET(nnode)->next);
}
- while (nnode && (nnode->offset == 0));
+ while (nnode && (nnode->text_node == tnode) && (nnode->offset == 0));
return (Evas_Object_Textblock_Node_Format *) n;
}
uint32_t *tmp = (void *)37;
#define AP "evas_common_copy_rev_pixels_neon_"
asm volatile (
+ ".fpu neon \n\t"
// Can we do 32 byte?
"andS %[tmp], %[d], $0x1f \n\t"
"beq "AP"quadstart \n\t"
e = dst + len;
#define AP "evas_common_copy_pixels_neon_"
asm volatile (
+ ".fpu neon \n\t"
// Can we do 32 byte?
"andS %[tmp], %[d], $0x1f \n\t"
"beq "AP"quadstart \n\t"
} else {
#define AP "convert_rgba32_rot_90_"
asm volatile (
+ ".fpu neon \n\t"
" mov %[s1], %[src] \n\t"
" add %[s1], %[h],lsl #2 \n\t"
" sub %[s1], #8 \n\t"
//#if defined(__ARM_ARCH__) && (__ARM_ARCH__ >= 70)
#ifdef BUILD_NEON
asm volatile (
+ ".fpu neon \n\t"
"vqadd.u8 d0, d1, d0\n"
);
#endif
if (getenv("EVAS_CPU_NO_VIS"))
cpu_feature_mask &= ~CPU_FEATURE_VIS;
#endif /* __SPARC__ */
-#if defined(__ARM_ARCH__) && (__ARM_ARCH__ >= 70)
+#if defined(__ARM_ARCH__)
#ifdef BUILD_NEON
cpu_feature_mask |= CPU_FEATURE_NEON *
evas_common_cpu_feature_test(evas_common_cpu_neon_test);
EAPI void
evas_common_image_image_all_unload(void)
{
- evas_common_rgba_image_scalecache_flush();
+ evas_common_rgba_image_scalecache_dump();
evas_cache_image_unload_all(eci);
}
}
EAPI void
+evas_common_rgba_image_scalecache_dump(void)
+{
+#ifdef SCALECACHE
+ int t;
+ LKL(cache_lock);
+ t = max_cache_size;
+ max_cache_size = 0;
+ _cache_prune(NULL, 0);
+ max_cache_size = t;
+ LKU(cache_lock);
+#endif
+}
+
+EAPI void
evas_common_rgba_image_scalecache_flush(void)
{
#ifdef SCALECACHE
DATA32 *e, *tmp = 0;
#define AP "B_C_DP"
asm volatile (
+ ".fpu neon \n\t"
"vdup.u32 q6, %[c] \n\t"
"vmov.i8 q5, #1 \n\t"
"vmvn.u8 q7,q6 \n\t"
"vld1.32 d0[0], [%[d]] \n\t"
// Only touch d1
"vmull.u8 q0, d0, d14 \n\t"
- "vshrn.u16 d0, q0, #8 \n\t"
+ "vqrshrn.u16 d0, q0, #8 \n\t"
"vadd.u8 d0, d12, d0 \n\t"
"vst1.32 d0[0], [%[d]] \n\t"
AP "dualloopint: \n\t"
"vldr.32 d0, [%[d]] \n\t"
"vmull.u8 q1, d0, d14 \n\t"
- "vshrn.u16 d0, q1, #8 \n\t"
+ "vqrshrn.u16 d0, q1, #8 \n\t"
"vqadd.u8 d0, d0, d12 \n\t"
"vstm %[d]!, {d0} \n\t"
"vmull.u8 q4, d2, d14 \n\t"
"vmull.u8 q5, d3, d15 \n\t"
- "vshrn.u16 d0, q2, #8 \n\t"
- "vshrn.u16 d1, q3, #8 \n\t"
- "vshrn.u16 d2, q4, #8 \n\t"
- "vshrn.u16 d3, q5, #8 \n\t"
+ "vqrshrn.u16 d0, q2, #8 \n\t"
+ "vqrshrn.u16 d1, q3, #8 \n\t"
+ "vqrshrn.u16 d2, q4, #8 \n\t"
+ "vqrshrn.u16 d3, q5, #8 \n\t"
"vqadd.u8 q0, q6, q0 \n\t"
"vqadd.u8 q1, q6, q1 \n\t"
AP "dualloop2int: \n\t"
"vldr.64 d0, [%[d]] \n\t"
"vmull.u8 q1, d0, d14 \n\t"
- "vshrn.u16 d0, q1, #8 \n\t"
+ "vqrshrn.u16 d0, q1, #8 \n\t"
"vqadd.u8 d0, d0, d12 \n\t"
"vstr.64 d0, [%[d]] \n\t"
AP "singleloop2: \n\t"
"vld1.32 d0[0], [%[d]] \n\t"
"vmull.u8 q1, d0, d14 \n\t"
- "vshrn.u16 d0, q1, #8 \n\t"
+ "vqrshrn.u16 d0, q1, #8 \n\t"
"vqadd.u8 d0, d0, d12 \n\t"
"vst1.32 d0[0], [%[d]] \n\t"
#define AP "blend_mas_c_dp_"
asm volatile (
+ ".fpu neon \n\t"
" vdup.i32 q15, %[c] \n\t"
" vmov.i8 q14, #1 \n\t"
" vld1.32 d4[0], [%[d]] \n\t"
" vdup.u8 d0, d0[0] \n\t"
" vmull.u8 q4, d0, d30 \n\t"
- " vshrn.u16 d12, q4, #8 \n\t"
+ " vqrshrn.u16 d12, q4, #8 \n\t"
" vmvn.u16 d14, d12 \n\t"
" vshr.u32 d16, d14, #24 \n\t"
" vmul.u32 d16, d16, d28 \n\t"
" vmull.u8 q7, d16, d4 \n\t"
- " vshrn.u16 d0, q7, #8 \n\t"
+ " vqrshrn.u16 d0, q7, #8 \n\t"
" vqadd.u8 d0, d0, d12 \n\t"
" vst1.32 d0[0], [%[d]]! \n\t"
" vmovl.u8 q0, d0 \n\t"
" vmul.u32 q0, q14 \n\t"
" vmull.u8 q4, d0, d30 \n\t"
- " vshrn.u16 d12, q4, #8 \n\t"
+ " vqrshrn.u16 d12, q4, #8 \n\t"
" vmvn.u16 d14, d12 \n\t"
" vshr.u32 d16, d14, #24 \n\t"
" vmul.u32 d16, d16, d28 \n\t"
" vmull.u8 q7, d16, d4 \n\t"
- " vshrn.u16 d0, q7, #8 \n\t"
+ " vqrshrn.u16 d0, q7, #8 \n\t"
" vqadd.u8 q0, q0, q6 \n\t"
" vstm %[d]!, {d0} \n\t"
" vmull.u8 q5, d1, d31 \n\t"
// Shorten
- " vshrn.u16 d12, q4, #8 \n\t"
- " vshrn.u16 d13, q5, #8 \n\t"
+ " vqrshrn.u16 d12, q4, #8 \n\t"
+ " vqrshrn.u16 d13, q5, #8 \n\t"
// extract negated alpha
" vmvn.u16 q7, q6 \n\t"
" vmull.u8 q7, d16, d4 \n\t"
" vmull.u8 q8, d17, d5 \n\t"
- " vshrn.u16 d0, q7, #8 \n\t"
- " vshrn.u16 d1, q8, #8 \n\t"
+ " vqrshrn.u16 d0, q7, #8 \n\t"
+ " vqrshrn.u16 d1, q8, #8 \n\t"
// Add
" vqadd.u8 q0, q0, q6 \n\t"
" vmovl.u8 q0, d0 \n\t"
" vmul.u32 q0, q14 \n\t"
" vmull.u8 q4, d0, d30 \n\t"
- " vshrn.u16 d12, q4, #8 \n\t"
+ " vqrshrn.u16 d12, q4, #8 \n\t"
" vmvn.u16 d14, d12 \n\t"
" vshr.u32 d16, d14, #24 \n\t"
" vmul.u32 d16, d16, d28 \n\t"
" vmull.u8 q7, d16, d4 \n\t"
- " vshrn.u16 d0, q7, #8 \n\t"
+ " vqrshrn.u16 d0, q7, #8 \n\t"
" vqadd.u8 q0, q0, q6 \n\t"
" vstm %[d]!, {d0} \n\t"
" vld1.32 d4[0], [%[d]] \n\t"
" vdup.u8 d0, d0[0] \n\t"
" vmull.u8 q4, d0, d30 \n\t"
- " vshrn.u16 d12, q4, #8 \n\t"
+ " vqrshrn.u16 d12, q4, #8 \n\t"
" vmvn.u16 d14, d12 \n\t"
" vshr.u32 d16, d14, #24 \n\t"
" vmul.u32 d16, d16, d28 \n\t"
" vmull.u8 q7, d16, d4 \n\t"
- " vshrn.u16 d0, q7, #8 \n\t"
+ " vqrshrn.u16 d0, q7, #8 \n\t"
" vqadd.u8 q0, q0, q6 \n\t"
" vst1.32 d0[0], [%[d]]! \n\t"
#define AP "_blend_mas_can_dp_neon_"
asm volatile (
+ ".fpu neon \n\t"
"vdup.u32 q9, %[c] \n\t"
"vmov.i8 q15, #1 \n\t"
"vmov.i8 q14, #0 \n\t"
_op_blend_p_c_dp_neon(DATA32 *s, DATA8 *m __UNUSED__, DATA32 c, DATA32 *d, int l) {
#define AP "blend_p_c_dp_"
asm volatile (
+ ".fpu neon \n\t"
// Load 'c'
- "vdup.u32 q7, %[c] \n\t"
- "vmov.i8 q6, #1 \n\t"
+ "vdup.u32 q7, %[c] \n\t"
+ "vmov.i8 q6, #1 \n\t"
// Choose a loop
"andS %[tmp], %[d], $0xf \n\t"
// Mulitply s * c (= sc)
"vmull.u8 q4, d0,d14 \n\t"
// sc in d8
- "vshrn.u16 d4, q4, #8 \n\t"
+ "vqrshrn.u16 d4, q4, #8 \n\t"
// sca in d9
"vmvn.u32 d6, d4 \n\t"
/* d * alpha */
"vmull.u8 q4, d6, d2 \n\t"
- "vshrn.u16 d0, q4, #8 \n\t"
+ "vqrshrn.u16 d0, q4, #8 \n\t"
"vqadd.u8 d2, d0, d4 \n\t"
// Mulitply s * c (= sc)
"vmull.u8 q4, d0,d14 \n\t"
// sc in d8
- "vshrn.u16 d4, q4, #8 \n\t"
+ "vqrshrn.u16 d4, q4, #8 \n\t"
// sca in d9
"vmvn.u32 d6, d4 \n\t"
/* d * alpha */
"vmull.u8 q4, d6, d2 \n\t"
- "vshrn.u16 d0, q4, #8 \n\t"
+ "vqrshrn.u16 d0, q4, #8 \n\t"
"vqadd.u8 d2, d0, d4 \n\t"
"vmull.u8 q5, d1,d14 \n\t"
// Get sc & sc alpha
- "vshrn.u16 d4, q4, #8 \n\t"
- "vshrn.u16 d5, q5, #8 \n\t"
+ "vqrshrn.u16 d4, q4, #8 \n\t"
+ "vqrshrn.u16 d5, q5, #8 \n\t"
// sc is now in q2, 8bpp
// Shift out, then spread alpha for q2
"vmvn.u32 q3, q2 \n\t"
"vmull.u8 q4, d6,d2 \n\t"
"vmull.u8 q5, d7,d3 \n\t"
- "vshrn.u16 d0, q4, #8 \n\t"
- "vshrn.u16 d1, q5, #8 \n\t"
+ "vqrshrn.u16 d0, q4, #8 \n\t"
+ "vqrshrn.u16 d1, q5, #8 \n\t"
"vqadd.u8 q1, q0, q2 \n\t"
// Mulitply s * c (= sc)
"vmull.u8 q4, d0,d14 \n\t"
// sc in d8
- "vshrn.u16 d4, q4, #8 \n\t"
+ "vqrshrn.u16 d4, q4, #8 \n\t"
// sca in d9
// XXX: I can probably squash one of these 3
/* d * alpha */
"vmull.u8 q4, d6, d2 \n\t"
- "vshrn.u16 d0, q4, #8 \n\t"
+ "vqrshrn.u16 d0, q4, #8 \n\t"
"vqadd.u8 d2, d0, d4 \n\t"
// Mulitply s * c (= sc)
"vmull.u8 q4, d0,d14 \n\t"
// sc in d8
- "vshrn.u16 d4, q4, #8 \n\t"
+ "vqrshrn.u16 d4, q4, #8 \n\t"
// sca in d6
"vmvn.u32 d6, d4 \n\t"
/* d * alpha */
"vmull.u8 q4, d6, d2 \n\t"
- "vshrn.u16 d0, q4, #8 \n\t"
+ "vqrshrn.u16 d0, q4, #8 \n\t"
"vqadd.u8 d2, d0, d4 \n\t"
#define AP "_op_blend_pan_caa_dp_"
DATA32 *e = d + l, *tmp = (void*)73;
asm volatile (
+ ".fpu neon \n\t"
/* Set up 'c' */
"vdup.u8 d14, %[c] \n\t"
"vmov.i8 d15, #1 \n\t"
static void
_op_blend_pas_dp_mmx(DATA32 *s, DATA8 *m __UNUSED__, DATA32 c __UNUSED__, DATA32 *d, int l) {
+ _op_blend_p_dp_mmx(s, m, c, d, l);
+ return;
DATA32 *e = d + l;
pxor_r2r(mm0, mm0);
MOV_A2R(ALPHA_256, mm6)
_op_blend_p_dp_neon(DATA32 *s, DATA8 *m, DATA32 c, DATA32 *d, int l) {
#define AP "blend_p_dp_"
asm volatile (
+ ".fpu neon \n\t"
//** init
- "vmov.i8 q8, $0x1 \n\t"
+ "vmov.i8 q8, $0x1 \n\t"
AP "loopchoose: \n\t"
// If aligned already - straight to octs
- "andS %[tmp], %[d],$0x1f \n\t"
- "beq "AP"octloops \n\t"
+ "andS %[tmp], %[d],$0x1f \n\t"
+ "beq "AP"octloops \n\t"
- "andS %[tmp], %[d],$0xf \n\t"
- "beq "AP"quadloops \n\t"
+ "andS %[tmp], %[d],$0xf \n\t"
+ "beq "AP"quadloops \n\t"
- "andS %[tmp], %[d],$0x4 \n\t"
- "beq "AP"dualloop \n\t"
+ "andS %[tmp], %[d],$0x4 \n\t"
+ "beq "AP"dualloop \n\t"
// Only ever executes once, fall through to dual
AP "singleloop: \n\t"
"vmul.u32 d8, d16, d8 \n\t"
"vmull.u8 q6, d4,d8 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to 's'
"vqadd.u8 q2, q4,q0 \n\t"
"vmul.u32 d8, d16, d8 \n\t"
"vmull.u8 q6, d4,d8 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to 's'
"vqadd.u8 d4, d8,d0 \n\t"
"vstr d4, [%[d]] \n\t"
"vmull.u8 q2, d5,d9 \n\t"
// Shift & narrow it
- "vshrn.u16 d8, q6, #8 \n\t"
- "vshrn.u16 d9, q2, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d9, q2, #8 \n\t"
// Add to s
"vqadd.u8 q2, q4,q0 \n\t"
"cmp %[tmp], #32 \n\t"
"ble "AP"loopout \n\t"
- "sub %[tmp],%[e],#64 \n\t"
+ "sub %[tmp],%[e],#64 \n\t"
AP "octloopint:\n\t"
"cmp %[tmp], %[d]\n\t"
// Shift & narrow it
- "vshrn.u16 d8, q6, #8 \n\t"
- "vshrn.u16 d9, q2, #8 \n\t"
- "vshrn.u16 d10, q7, #8 \n\t"
- "vshrn.u16 d11, q3, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d9, q2, #8 \n\t"
+ "vqrshrn.u16 d10, q7, #8 \n\t"
+ "vqrshrn.u16 d11, q3, #8 \n\t"
// Add to s
"bhi "AP"octloopint\n\t"
AP "loopout: \n\t"
-//"sub %[tmp], %[d], #4\n\t"
-//"vmov.i16 d0, $0xff00 \n\t"
-//"vst1.32 d0[0], [%[tmp]] \n\t"
-
- "cmp %[d], %[e]\n\t"
- "beq "AP"done\n\t"
+ "cmp %[d], %[e] \n\t"
+ "beq "AP"done \n\t"
"sub %[tmp],%[e], %[d] \n\t"
"cmp %[tmp],$0x04 \n\t"
"ble "AP"singleloop2 \n\t"
"vmul.u32 d8, d16, d8 \n\t"
"vmull.u8 q6, d4,d8 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to 's'
"vqadd.u8 d4, d8,d0 \n\t"
"add %[d], #8 \n\t"
"cmp %[tmp], %[d] \n\t"
- "bhi "AP"dualloop2int \n\t"
+ "bhi "AP"dualloop2int \n\t"
// Single ??
"cmp %[e], %[d] \n\t"
"vmul.u32 d8, d8, d16 \n\t"
"vmull.u8 q6, d8,d4 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to 's'
"vqadd.u8 d0, d0,d8 \n\t"
"vst1.32 d0[0], [%[d]] \n\t"
#define AP "blend_pas_dp_"
DATA32 *e = d + l,*tmp = e + 32,*pl=(void*)912;
asm volatile (
+ ".fpu neon \n\t"
"vmov.i8 q8, #1 \n\t"
AP"loopchoose: \n\t"
// If aliged - go as fast we can
- "andS %[tmp], %[d], #31 \n\t"
+ "andS %[tmp], %[d], #31 \n\t"
"beq "AP"quadstart \n\t"
// See if we can at least do our double loop
// Multiply out
"vmull.u8 q6, d8, d4 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to s
"vqadd.u8 d0, d0,d8 \n\t"
// Multiply out
"vmull.u8 q6, d8, d4 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to s
"vqadd.u8 d0, d0,d8 \n\t"
"vshr.u32 q5, q5,$0x18 \n\t"
// Prepare to preload
- "add %[pl], %[s], #32\n\t"
+ "add %[pl], %[s], #32 \n\t"
// Mulitply into all fields
"vmul.u32 q4, q4, q8 \n\t"
"vmul.u32 q5, q5, q8 \n\t"
- "pld [%[pl]]\n\t"
+ "pld [%[pl]] \n\t"
// Multiply out
"vmull.u8 q6, d8, d4 \n\t"
"vmull.u8 q2, d9, d5 \n\t"
"vmull.u8 q3, d11, d7 \n\t"
- "add %[pl], %[d], #32\n\t"
+ "add %[pl], %[d], #32 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
- "vshrn.u16 d10, q7, #8 \n\t"
- "vshrn.u16 d9, q2, #8 \n\t"
- "vshrn.u16 d11, q3, #8 \n\t"
- "pld [%[pl]]\n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d10, q7, #8 \n\t"
+ "vqrshrn.u16 d9, q2, #8 \n\t"
+ "vqrshrn.u16 d11, q3, #8 \n\t"
+ "pld [%[pl]] \n\t"
"cmp %[tmp], %[pl] \n\t"
// Add to s
"cmp %[tmp],$0x04 \n\t"
"beq "AP"singleloop2 \n\t"
- "sub %[tmp],%[e],$0x7 \n\t"
+ "sub %[tmp],%[e],$0x7 \n\t"
AP"dualloop2: \n\t"
- "vldm %[s]!, {d0) \n\t"
- "vldm %[d], {d4} \n\t"
+ "vldm %[s]!, {d0) \n\t"
+ "vldm %[d], {d4} \n\t"
// Subtract from 255 (ie negate) and extract alpha channel
"vmvn.u8 d8, d0 \n\t"
// Multiply out
"vmull.u8 q6, d8, d4 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to s
"vqadd.u8 d0, d0,d8 \n\t"
"bhi "AP"dualloop2 \n\t"
// Single ??
- "cmp %[e], %[d] \n\t"
- "beq "AP"done \n\t"
+ "cmp %[e], %[d] \n\t"
+ "beq "AP"done \n\t"
AP "singleloop2: \n\t"
"vld1.32 d0[0], [%[s]] \n\t"
// Multiply out
"vmull.u8 q6, d8, d4 \n\t"
- "vshrn.u16 d8, q6, #8 \n\t"
+ "vqrshrn.u16 d8, q6, #8 \n\t"
// Add to s
"vqadd.u8 d0, d0,d8 \n\t"
#define AP "COPY_C_DP_"
uint32_t *e = d + l,*tmp;
asm volatile (
+ ".fpu neon \n\t"
"vdup.i32 q0, %[c] \n\t"
uint32_t *e;
e = d + l;
//#ifdef NEON_INSTRINSICS_OK
-#if 1
+#if 0
// odd this is faster than the below asm... :(
e -= 15;
uint32x4_t col1, col2, col3, col4;
s3 = s + 8;
s4 = s + 12;
asm volatile (
+ ".fpu neon \n\t"
"asmloop2:\n\t"
"cmp %[e], %[d]\n\t"
"vld1.32 {d16-d17}, [%[s]]!\n\t"
g += ((gg - g) * yap) >> 8;
b += ((bb - b) * yap) >> 8;
}
- *pbuf++ = ARGB_JOIN(a >> 4, r >> 4, g >> 4, b >> 4);
+ *pbuf++ = ARGB_JOIN(((a + (1 << 3)) >> 4),
+ ((r + (1 << 3)) >> 4),
+ ((g + (1 << 3)) >> 4),
+ ((b + (1 << 3)) >> 4));
xp++; xapp++;
}
g += ((gg - g) * yap) >> 8;
b += ((bb - b) * yap) >> 8;
}
- *pbuf++ = ARGB_JOIN(0xff, r >> 4, g >> 4, b >> 4);
+ *pbuf++ = ARGB_JOIN(0xff,
+ ((r + (1 << 3)) >> 4),
+ ((g + (1 << 3)) >> 4),
+ ((b + (1 << 3)) >> 4));
xp++; xapp++;
}
}
g += ((gg - g) * yap) >> 8;
b += ((bb - b) * yap) >> 8;
}
- *pbuf++ = ARGB_JOIN(0xff, r >> 4, g >> 4, b >> 4);
+ *pbuf++ = ARGB_JOIN(0xff,
+ ((r + (1 << 3)) >> 4),
+ ((g + (1 << 3)) >> 4),
+ ((b + (1 << 3)) >> 4));
xp++; xapp++;
}
g += (gx * j) >> 14;
b += (bx * j) >> 14;
}
- *pbuf++ = ARGB_JOIN(a >> 5, r >> 5, g >> 5, b >> 5);
+ *pbuf++ = ARGB_JOIN(((a + (1 << 4)) >> 5),
+ ((r + (1 << 4)) >> 5),
+ ((g + (1 << 4)) >> 5),
+ ((b + (1 << 4)) >> 5));
xp++; xapp++;
}
g += (gx * j) >> 14;
b += (bx * j) >> 14;
}
- *pbuf++ = ARGB_JOIN(0xff, r >> 5, g >> 5, b >> 5);
+ *pbuf++ = ARGB_JOIN(0xff,
+ ((r + (1 << 4)) >> 5),
+ ((g + (1 << 4)) >> 5),
+ ((b + (1 << 4)) >> 5));
xp++; xapp++;
}
}
g += (gx * j) >> 14;
b += (bx * j) >> 14;
}
- *pbuf++ = ARGB_JOIN(0xff, r >> 5, g >> 5, b >> 5);
+ *pbuf++ = ARGB_JOIN(0xff,
+ ((r + (1 << 4)) >> 5),
+ ((g + (1 << 4)) >> 5),
+ ((b + (1 << 4)) >> 5));
xp++; xapp++;
}
g += ((gg - g) * xap) >> 8;
b += ((bb - b) * xap) >> 8;
}
- *pbuf++ = ARGB_JOIN(a >> 4, r >> 4, g >> 4, b >> 4);
+ *pbuf++ = ARGB_JOIN(((a + (1 << 3)) >> 4),
+ ((r + (1 << 3)) >> 4),
+ ((g + (1 << 3)) >> 4),
+ ((b + (1 << 3)) >> 4));
xp++; xapp++;
}
g += ((gg - g) * xap) >> 8;
b += ((bb - b) * xap) >> 8;
}
- *pbuf++ = ARGB_JOIN(0xff, r >> 4, g >> 4, b >> 4);
+ *pbuf++ = ARGB_JOIN(0xff,
+ ((r + (1 << 3)) >> 4),
+ ((g + (1 << 3)) >> 4),
+ ((b + (1 << 3)) >> 4));
xp++; xapp++;
}
}
g += ((gg - g) * xap) >> 8;
b += ((bb - b) * xap) >> 8;
}
- *pbuf++ = ARGB_JOIN(0xff, r >> 4, g >> 4, b >> 4);
+ *pbuf++ = ARGB_JOIN(0xff,
+ ((r + (1 << 3)) >> 4),
+ ((g + (1 << 3)) >> 4),
+ ((b + (1 << 3)) >> 4));
xp++; xapp++;
}
+#include <math.h>
#include <evas_common_soft16.h>
#include "evas_soft16_scanline_fill.c"
-#include <math.h>
typedef struct _RGBA_Edge RGBA_Edge;
typedef struct _RGBA_Vertex RGBA_Vertex;
/*****************************************************************************/
-//#if defined(__ARM_ARCH__) && (__ARM_ARCH__ >= 70)
-#ifdef BUILD_NEON
-# include <arm_neon.h>
-#endif
-//#endif
-
#ifdef __cplusplus
}
#endif
re = (Render_Engine *)data;
/* get the upate rect surface - return engine data as dummy */
if (!re->win->draw.redraw) return NULL;
+ eng_window_use(NULL);
+ eng_window_use(re->win);
if (!_re_wincheck(re)) return NULL;
evas_gl_common_context_flush(re->win->gl_context);
- eng_window_use(re->win);
evas_gl_common_context_newframe(re->win->gl_context);
if (x) *x = re->win->draw.x1;
if (y) *y = re->win->draw.y1;
Evas_GL_Image *im = image;
Native *n = im->native.data;
+ if (n->ns.type == EVAS_NATIVE_SURFACE_X11)
+ {
#if defined (GLES_VARIETY_S3C6410) || defined (GLES_VARIETY_SGX)
- if (n->egl_surface)
- {
- if (glsym_glEGLImageTargetTexture2DOES)
- {
- glsym_glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, n->egl_surface);
- if (eglGetError() != EGL_SUCCESS)
- ERR("glEGLImageTargetTexture2DOES() failed.");
- }
- else
- ERR("Try glEGLImageTargetTexture2DOES on EGL with no support");
- }
+ if (n->egl_surface)
+ {
+ if (glsym_glEGLImageTargetTexture2DOES)
+ {
+ glsym_glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, n->egl_surface);
+ if (eglGetError() != EGL_SUCCESS)
+ ERR("glEGLImageTargetTexture2DOES() failed.");
+ }
+ else
+ ERR("Try glEGLImageTargetTexture2DOES on EGL with no support");
+ }
#else
# ifdef GLX_BIND_TO_TEXTURE_TARGETS_EXT
- Render_Engine *re = data;
-
- if (glsym_glXBindTexImage)
- {
- glsym_glXBindTexImage(re->win->disp, n->glx_pixmap,
- GLX_FRONT_LEFT_EXT, NULL);
- GLERR(__FUNCTION__, __FILE__, __LINE__, "");
- }
- else
- ERR("Try glXBindTexImage on GLX with no support");
+ Render_Engine *re = data;
+
+ if (glsym_glXBindTexImage)
+ {
+ glsym_glXBindTexImage(re->win->disp, n->glx_pixmap,
+ GLX_FRONT_LEFT_EXT, NULL);
+ GLERR(__FUNCTION__, __FILE__, __LINE__, "");
+ }
+ else
+ ERR("Try glXBindTexImage on GLX with no support");
# endif
#endif
+ }
+ else if (n->ns.type == EVAS_NATIVE_SURFACE_OPENGL)
+ {
+ // FIXME: implement
+ }
}
static void
_native_unbind_cb(void *data, void *image)
{
- Evas_GL_Image *im = image;
-
+ Evas_GL_Image *im = image;
+ Native *n = im->native.data;
+
+ if (n->ns.type == EVAS_NATIVE_SURFACE_X11)
+ {
#if defined (GLES_VARIETY_S3C6410) || defined (GLES_VARIETY_SGX)
- // nothing
+ // nothing
#else
# ifdef GLX_BIND_TO_TEXTURE_TARGETS_EXT
- Render_Engine *re = data;
- Native *n = im->native.data;
-
- if (glsym_glXReleaseTexImage)
- {
- glsym_glXReleaseTexImage(re->win->disp, n->glx_pixmap,
- GLX_FRONT_LEFT_EXT);
- GLERR(__FUNCTION__, __FILE__, __LINE__, "");
- }
- else
- ERR("Try glXReleaseTexImage on GLX with no support");
+ Render_Engine *re = data;
+
+ if (glsym_glXReleaseTexImage)
+ {
+ glsym_glXReleaseTexImage(re->win->disp, n->glx_pixmap,
+ GLX_FRONT_LEFT_EXT);
+ GLERR(__FUNCTION__, __FILE__, __LINE__, "");
+ }
+ else
+ ERR("Try glXReleaseTexImage on GLX with no support");
# endif
#endif
+ }
+ else if (n->ns.type == EVAS_NATIVE_SURFACE_OPENGL)
+ {
+ // FIXME: implement
+ }
}
static void
_native_free_cb(void *data, void *image)
{
- Render_Engine *re = data;
- Evas_GL_Image *im = image;
- Native *n = im->native.data;
- uint32_t pmid;
+ Render_Engine *re = data;
+ Evas_GL_Image *im = image;
+ Native *n = im->native.data;
+ uint32_t pmid;
- pmid = n->pixmap;
- eina_hash_del(re->win->gl_context->shared->native_hash, &pmid, im);
+ if (n->ns.type == EVAS_NATIVE_SURFACE_X11)
+ {
+ pmid = n->pixmap;
+ eina_hash_del(re->win->gl_context->shared->native_hash, &pmid, im);
#if defined (GLES_VARIETY_S3C6410) || defined (GLES_VARIETY_SGX)
- if (n->egl_surface)
- {
- if (glsym_eglDestroyImage)
- {
- glsym_eglDestroyImage(re->win->egl_disp,
- n->egl_surface);
- if (eglGetError() != EGL_SUCCESS)
- ERR("eglDestroyImage() failed.");
- }
- else
- ERR("Try eglDestroyImage on EGL with no support");
- }
+ if (n->egl_surface)
+ {
+ if (glsym_eglDestroyImage)
+ {
+ glsym_eglDestroyImage(re->win->egl_disp,
+ n->egl_surface);
+ if (eglGetError() != EGL_SUCCESS)
+ ERR("eglDestroyImage() failed.");
+ }
+ else
+ ERR("Try eglDestroyImage on EGL with no support");
+ }
#else
# ifdef GLX_BIND_TO_TEXTURE_TARGETS_EXT
- if (n->glx_pixmap)
- {
- if (im->native.loose)
- {
- if (glsym_glXReleaseTexImage)
- {
+ if (n->glx_pixmap)
+ {
+ if (im->native.loose)
+ {
+ if (glsym_glXReleaseTexImage)
+ {
glsym_glXReleaseTexImage(re->win->disp, n->glx_pixmap,
GLX_FRONT_LEFT_EXT);
GLERR(__FUNCTION__, __FILE__, __LINE__, "");
- }
- else
- ERR("Try glXReleaseTexImage on GLX with no support");
- }
- if (glsym_glXDestroyPixmap)
- {
- glsym_glXDestroyPixmap(re->win->disp, n->glx_pixmap);
- GLERR(__FUNCTION__, __FILE__, __LINE__, "");
- }
- else
- ERR("Try glXDestroyPixmap on GLX with no support");
- n->glx_pixmap = 0;
- }
+ }
+ else
+ ERR("Try glXReleaseTexImage on GLX with no support");
+ }
+ if (glsym_glXDestroyPixmap)
+ {
+ glsym_glXDestroyPixmap(re->win->disp, n->glx_pixmap);
+ GLERR(__FUNCTION__, __FILE__, __LINE__, "");
+ }
+ else
+ ERR("Try glXDestroyPixmap on GLX with no support");
+ n->glx_pixmap = 0;
+ }
# endif
#endif
- im->native.data = NULL;
- im->native.func.data = NULL;
- im->native.func.bind = NULL;
- im->native.func.unbind = NULL;
- im->native.func.free = NULL;
- free(n);
+ }
+ else if (n->ns.type == EVAS_NATIVE_SURFACE_OPENGL)
+ {
+ // FIXME: implement
+ }
+ im->native.data = NULL;
+ im->native.func.data = NULL;
+ im->native.func.bind = NULL;
+ im->native.func.unbind = NULL;
+ im->native.func.free = NULL;
+ free(n);
}
static void *
eng_image_native_set(void *data, void *image, void *native)
{
- Render_Engine *re = (Render_Engine *)data;
- Evas_Native_Surface *ns = native;
- Evas_GL_Image *im = image, *im2 = NULL;
- Visual *vis = NULL;
- Pixmap pm = 0;
- Native *n = NULL;
- uint32_t pmid;
-
- if (!im) return NULL;
-
- if (ns)
- {
- vis = ns->data.x11.visual;
- pm = ns->data.x11.pixmap;
- if (im->native.data)
- {
- Evas_Native_Surface *ens = im->native.data;
- if ((ens->data.x11.visual == vis) && (ens->data.x11.pixmap == pm))
+ Render_Engine *re = (Render_Engine *)data;
+ Evas_Native_Surface *ns = native;
+ Evas_GL_Image *im = image, *im2 = NULL;
+ Visual *vis = NULL;
+ Pixmap pm = 0;
+ Native *n = NULL;
+ uint32_t pmid;
+
+ if (!im) return NULL;
+
+ if (ns)
+ {
+ if (ns->type == EVAS_NATIVE_SURFACE_X11)
+ {
+ vis = ns->data.x11.visual;
+ pm = ns->data.x11.pixmap;
+ if (im->native.data)
+ {
+ Evas_Native_Surface *ens = im->native.data;
+ if ((ens->data.x11.visual == vis) &&
+ (ens->data.x11.pixmap == pm))
return im;
- }
- }
- if ((!ns) && (!im->native.data)) return im;
-
- eng_window_use(re->win);
-
- if (im->native.data)
- {
- if (im->native.func.free)
- im->native.func.free(im->native.func.data, im);
- evas_gl_common_image_native_disable(im);
- }
-
- pmid = pm;
- im2 = eina_hash_find(re->win->gl_context->shared->native_hash, &pmid);
- if (im2 == im) return im;
- if (im2)
- {
- n = im2->native.data;
- if (n)
- {
- im2->references++;
- evas_gl_common_image_free(im);
- return im2;
- }
- }
- im2 = evas_gl_common_image_new_from_data(re->win->gl_context,
- im->w, im->h, NULL, im->alpha,
- EVAS_COLORSPACE_ARGB8888);
- evas_gl_common_image_free(im);
- im = im2;
+ }
+ }
+ else if (ns->type == EVAS_NATIVE_SURFACE_OPENGL)
+ {
+ // FIXME: implement
+ }
+ }
+ if ((!ns) && (!im->native.data)) return im;
+
+ eng_window_use(re->win);
+
+ if (im->native.data)
+ {
+ if (im->native.func.free)
+ im->native.func.free(im->native.func.data, im);
+ evas_gl_common_image_native_disable(im);
+ }
+
+ if (!ns) return im;
+
+ if (ns->type == EVAS_NATIVE_SURFACE_X11)
+ {
+ pmid = pm;
+ im2 = eina_hash_find(re->win->gl_context->shared->native_hash, &pmid);
+ if (im2 == im) return im;
+ if (im2)
+ {
+ n = im2->native.data;
+ if (n)
+ {
+ im2->references++;
+ evas_gl_common_image_free(im);
+ return im2;
+ }
+ }
+ }
+ else if (ns->type == EVAS_NATIVE_SURFACE_OPENGL)
+ {
+ // FIXME: implement
+ }
+ im2 = evas_gl_common_image_new_from_data(re->win->gl_context,
+ im->w, im->h, NULL, im->alpha,
+ EVAS_COLORSPACE_ARGB8888);
+ evas_gl_common_image_free(im);
+ im = im2;
+ if (ns->type == EVAS_NATIVE_SURFACE_X11)
+ {
#if defined (GLES_VARIETY_S3C6410) || defined (GLES_VARIETY_SGX)
- if (native)
- {
- n = calloc(1, sizeof(Native));
- if (n)
- {
- EGLConfig egl_config;
- int config_attrs[20];
- int num_config, i = 0;
-
- eina_hash_add(re->win->gl_context->shared->native_hash, &pmid, im);
-
- config_attrs[i++] = EGL_RED_SIZE;
- config_attrs[i++] = 8;
- config_attrs[i++] = EGL_GREEN_SIZE;
- config_attrs[i++] = 8;
- config_attrs[i++] = EGL_BLUE_SIZE;
- config_attrs[i++] = 8;
- config_attrs[i++] = EGL_ALPHA_SIZE;
- config_attrs[i++] = 8;
- config_attrs[i++] = EGL_DEPTH_SIZE;
- config_attrs[i++] = 0;
- config_attrs[i++] = EGL_STENCIL_SIZE;
- config_attrs[i++] = 0;
- config_attrs[i++] = EGL_RENDERABLE_TYPE;
- config_attrs[i++] = EGL_OPENGL_ES2_BIT;
- config_attrs[i++] = EGL_SURFACE_TYPE;
- config_attrs[i++] = EGL_PIXMAP_BIT;
- config_attrs[i++] = EGL_NONE;
-
- if (!eglChooseConfig(re->win->egl_disp, config_attrs,
- &egl_config, 1, &num_config))
- ERR("eglChooseConfig() failed for pixmap 0x%x, num_config = %i", (unsigned int)pm, num_config);
- n->pixmap = pm;
- n->visual = vis;
- if (glsym_eglCreateImage)
+ if (native)
+ {
+ n = calloc(1, sizeof(Native));
+ if (n)
+ {
+ EGLConfig egl_config;
+ int config_attrs[20];
+ int num_config, i = 0;
+
+ eina_hash_add(re->win->gl_context->shared->native_hash, &pmid, im);
+
+ config_attrs[i++] = EGL_RED_SIZE;
+ config_attrs[i++] = 8;
+ config_attrs[i++] = EGL_GREEN_SIZE;
+ config_attrs[i++] = 8;
+ config_attrs[i++] = EGL_BLUE_SIZE;
+ config_attrs[i++] = 8;
+ config_attrs[i++] = EGL_ALPHA_SIZE;
+ config_attrs[i++] = 8;
+ config_attrs[i++] = EGL_DEPTH_SIZE;
+ config_attrs[i++] = 0;
+ config_attrs[i++] = EGL_STENCIL_SIZE;
+ config_attrs[i++] = 0;
+ config_attrs[i++] = EGL_RENDERABLE_TYPE;
+ config_attrs[i++] = EGL_OPENGL_ES2_BIT;
+ config_attrs[i++] = EGL_SURFACE_TYPE;
+ config_attrs[i++] = EGL_PIXMAP_BIT;
+ config_attrs[i++] = EGL_NONE;
+
+ if (!eglChooseConfig(re->win->egl_disp, config_attrs,
+ &egl_config, 1, &num_config))
+ ERR("eglChooseConfig() failed for pixmap 0x%x, num_config = %i", (unsigned int)pm, num_config);
+ memcpy(&(n->ns), ns, sizeof(Evas_Native_Surface));
+ n->pixmap = pm;
+ n->visual = vis;
+ if (glsym_eglCreateImage)
n->egl_surface = glsym_eglCreateImage(re->win->egl_disp,
EGL_NO_CONTEXT,
EGL_NATIVE_PIXMAP_KHR,
(void *)pm,
NULL);
- else
- ERR("Try eglCreateImage on EGL with no support");
- if (!n->egl_surface)
- ERR("eglCreatePixmapSurface() for 0x%x failed", (unsigned int)pm);
- im->native.yinvert = 1;
- im->native.loose = 0;
- im->native.data = n;
- im->native.func.data = re;
- im->native.func.bind = _native_bind_cb;
- im->native.func.unbind = _native_unbind_cb;
- im->native.func.free = _native_free_cb;
- im->native.target = GL_TEXTURE_2D;
- im->native.mipmap = 0;
- evas_gl_common_image_native_enable(im);
- }
- }
+ else
+ ERR("Try eglCreateImage on EGL with no support");
+ if (!n->egl_surface)
+ ERR("eglCreatePixmapSurface() for 0x%x failed", (unsigned int)pm);
+ im->native.yinvert = 1;
+ im->native.loose = 0;
+ im->native.data = n;
+ im->native.func.data = re;
+ im->native.func.bind = _native_bind_cb;
+ im->native.func.unbind = _native_unbind_cb;
+ im->native.func.free = _native_free_cb;
+ im->native.target = GL_TEXTURE_2D;
+ im->native.mipmap = 0;
+ evas_gl_common_image_native_enable(im);
+ }
+ }
#else
# ifdef GLX_BIND_TO_TEXTURE_TARGETS_EXT
- if (native)
- {
- int dummy;
- unsigned int w, h, depth = 32, border;
- Window wdummy;
-
- // fixme: round trip :(
- XGetGeometry(re->win->disp, pm, &wdummy, &dummy, &dummy,
- &w, &h, &border, &depth);
- n = calloc(1, sizeof(Native));
- if (n)
- {
- int pixmap_att[20];
- unsigned int target = 0;
- unsigned int i = 0;
-
- eina_hash_add(re->win->gl_context->shared->native_hash, &pmid, im);
- if ((re->win->depth_cfg[depth].tex_target &
- GLX_TEXTURE_2D_BIT_EXT)
-// && (1) // we assume npo2 for now
- // size is pow2 || mnpo2 supported
- )
+ if (native)
+ {
+ int dummy;
+ unsigned int w, h, depth = 32, border;
+ Window wdummy;
+
+ // fixme: round trip :(
+ XGetGeometry(re->win->disp, pm, &wdummy, &dummy, &dummy,
+ &w, &h, &border, &depth);
+ n = calloc(1, sizeof(Native));
+ if (n)
+ {
+ int pixmap_att[20];
+ unsigned int target = 0;
+ unsigned int i = 0;
+
+ eina_hash_add(re->win->gl_context->shared->native_hash, &pmid, im);
+ if ((re->win->depth_cfg[depth].tex_target &
+ GLX_TEXTURE_2D_BIT_EXT)
+ // && (1) // we assume npo2 for now
+ // size is pow2 || mnpo2 supported
+ )
target = GLX_TEXTURE_2D_EXT;
- else if ((re->win->depth_cfg[depth].tex_target &
- GLX_TEXTURE_RECTANGLE_BIT_EXT))
- {
+ else if ((re->win->depth_cfg[depth].tex_target &
+ GLX_TEXTURE_RECTANGLE_BIT_EXT))
+ {
ERR("rect!!! (not handled)");
target = GLX_TEXTURE_RECTANGLE_EXT;
- }
- if (!target)
- {
+ }
+ if (!target)
+ {
ERR("broken text-from-pixmap");
if (!(re->win->depth_cfg[depth].tex_target &
GLX_TEXTURE_2D_BIT_EXT))
- target = GLX_TEXTURE_RECTANGLE_EXT;
+ target = GLX_TEXTURE_RECTANGLE_EXT;
else if (!(re->win->depth_cfg[depth].tex_target &
GLX_TEXTURE_RECTANGLE_BIT_EXT))
- target = GLX_TEXTURE_2D_EXT;
- }
-
-
- pixmap_att[i++] = GLX_TEXTURE_FORMAT_EXT;
- pixmap_att[i++] = re->win->depth_cfg[depth].tex_format;
- pixmap_att[i++] = GLX_MIPMAP_TEXTURE_EXT;
- pixmap_att[i++] = re->win->depth_cfg[depth].mipmap;
-
- if (target)
- {
+ target = GLX_TEXTURE_2D_EXT;
+ }
+
+
+ pixmap_att[i++] = GLX_TEXTURE_FORMAT_EXT;
+ pixmap_att[i++] = re->win->depth_cfg[depth].tex_format;
+ pixmap_att[i++] = GLX_MIPMAP_TEXTURE_EXT;
+ pixmap_att[i++] = re->win->depth_cfg[depth].mipmap;
+
+ if (target)
+ {
pixmap_att[i++] = GLX_TEXTURE_TARGET_EXT;
pixmap_att[i++] = target;
- }
-
- pixmap_att[i++] = 0;
-
- memcpy(&(n->ns), ns, sizeof(Evas_Native_Surface));
- n->pixmap = pm;
- n->visual = vis;
- n->fbc = re->win->depth_cfg[depth].fbc;
- if (glsym_glXCreatePixmap)
+ }
+
+ pixmap_att[i++] = 0;
+
+ memcpy(&(n->ns), ns, sizeof(Evas_Native_Surface));
+ n->pixmap = pm;
+ n->visual = vis;
+ n->fbc = re->win->depth_cfg[depth].fbc;
+ if (glsym_glXCreatePixmap)
n->glx_pixmap = glsym_glXCreatePixmap(re->win->disp,
n->fbc,
n->pixmap,
pixmap_att);
- else
- ERR("Try glXCreatePixmap on GLX with no support");
- if (n->glx_pixmap)
- {
+ else
+ ERR("Try glXCreatePixmap on GLX with no support");
+ if (n->glx_pixmap)
+ {
// printf("%p: new native texture for %x | %4i x %4i @ %2i = %p\n",
// n, pm, w, h, depth, n->glx_pixmap);
if (!target)
{
- ERR("no target :(");
- if (glsym_glXQueryDrawable)
- glsym_glXQueryDrawable(re->win->disp,
- n->pixmap,
- GLX_TEXTURE_TARGET_EXT,
- &target);
+ ERR("no target :(");
+ if (glsym_glXQueryDrawable)
+ glsym_glXQueryDrawable(re->win->disp,
+ n->pixmap,
+ GLX_TEXTURE_TARGET_EXT,
+ &target);
}
if (target == GLX_TEXTURE_2D_EXT)
{
- im->native.target = GL_TEXTURE_2D;
- im->native.mipmap = re->win->depth_cfg[depth].mipmap;
+ im->native.target = GL_TEXTURE_2D;
+ im->native.mipmap = re->win->depth_cfg[depth].mipmap;
}
# ifdef GL_TEXTURE_RECTANGLE_ARB
else if (target == GLX_TEXTURE_RECTANGLE_EXT)
{
- im->native.target = GL_TEXTURE_RECTANGLE_ARB;
- im->native.mipmap = 0;
+ im->native.target = GL_TEXTURE_RECTANGLE_ARB;
+ im->native.mipmap = 0;
}
# endif
else
{
- im->native.target = GL_TEXTURE_2D;
- im->native.mipmap = 0;
- ERR("still unknown target");
+ im->native.target = GL_TEXTURE_2D;
+ im->native.mipmap = 0;
+ ERR("still unknown target");
}
- }
- else
- ERR("GLX Pixmap create fail");
- im->native.yinvert = re->win->depth_cfg[depth].yinvert;
- im->native.loose = re->win->detected.loose_binding;
- im->native.data = n;
- im->native.func.data = re;
- im->native.func.bind = _native_bind_cb;
- im->native.func.unbind = _native_unbind_cb;
- im->native.func.free = _native_free_cb;
-
- evas_gl_common_image_native_enable(im);
- }
- }
+ }
+ else
+ ERR("GLX Pixmap create fail");
+ im->native.yinvert = re->win->depth_cfg[depth].yinvert;
+ im->native.loose = re->win->detected.loose_binding;
+ im->native.data = n;
+ im->native.func.data = re;
+ im->native.func.bind = _native_bind_cb;
+ im->native.func.unbind = _native_unbind_cb;
+ im->native.func.free = _native_free_cb;
+
+ evas_gl_common_image_native_enable(im);
+ }
+ }
# endif
#endif
+ }
+ else if (ns->type == EVAS_NATIVE_SURFACE_OPENGL)
+ {
+ // FIXME: implement
+ }
return im;
}
if (_evas_gl_x11_window)
evas_gl_common_context_flush(_evas_gl_x11_window->gl_context);
_evas_gl_x11_window = gw;
+ if (gw)
+ {
// EGL / GLES
#if defined (GLES_VARIETY_S3C6410) || defined (GLES_VARIETY_SGX)
- if (gw->egl_surface[0] != EGL_NO_SURFACE)
- {
- if (eglMakeCurrent(gw->egl_disp,
- gw->egl_surface[0],
- gw->egl_surface[0],
- gw->egl_context[0]) == EGL_FALSE)
- {
- ERR("eglMakeCurrent() failed!");
- }
- }
+ if (gw->egl_surface[0] != EGL_NO_SURFACE)
+ {
+ if (eglMakeCurrent(gw->egl_disp,
+ gw->egl_surface[0],
+ gw->egl_surface[0],
+ gw->egl_context[0]) == EGL_FALSE)
+ {
+ ERR("eglMakeCurrent() failed!");
+ }
+ }
// GLX
#else
- if (gw->glxwin)
- {
- if (!glXMakeContextCurrent(gw->disp, gw->glxwin, gw->glxwin,
- gw->context))
- {
- ERR("glXMakeContextCurrent(%p, %p, %p, %p)", (void *)gw->disp, (void *)gw->win, (void *)gw->win, (void *)gw->context);
- }
- }
- else
- {
- if (!glXMakeCurrent(gw->disp, gw->win, gw->context))
- {
- ERR("glXMakeCurrent(%p, 0x%x, %p) failed", gw->disp, (unsigned int)gw->win, (void *)gw->context);
- }
- }
+ if (gw->glxwin)
+ {
+ if (!glXMakeContextCurrent(gw->disp, gw->glxwin, gw->glxwin,
+ gw->context))
+ {
+ ERR("glXMakeContextCurrent(%p, %p, %p, %p)", (void *)gw->disp, (void *)gw->win, (void *)gw->win, (void *)gw->context);
+ }
+ }
+ else
+ {
+ if (!glXMakeCurrent(gw->disp, gw->win, gw->context))
+ {
+ ERR("glXMakeCurrent(%p, 0x%x, %p) failed", gw->disp, (unsigned int)gw->win, (void *)gw->context);
+ }
+ }
#endif
+ }
}
- evas_gl_common_context_use(gw->gl_context);
+ if (gw) evas_gl_common_context_use(gw->gl_context);
}
void