From: Cedric Bail Date: Thu, 10 Oct 2013 08:44:24 +0000 (+0900) Subject: evas: Use Eina_Spinlock for Evas_Scalecache, Evas_Async_Events and Image_Entry. X-Git-Tag: submit/devel/efl/20131029.075644~102 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=6dcffec491f2281dd8649f1196f576fa715507f3;p=platform%2Fupstream%2Fefl.git evas: Use Eina_Spinlock for Evas_Scalecache, Evas_Async_Events and Image_Entry. --- diff --git a/ChangeLog b/ChangeLog index 9113fc1..4109be5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -4,6 +4,7 @@ use Eina_Spinlock in Eina_Log, replace Eina_Lock by Eina_Spinlock in Eina_Stringshare, Eina_Chained_Mempool. * Eet: replace Eina_Lock by Eina_Spinlock in Eet_Dictionnary. + * Evas: replace Eina_Lock by Eina_Spinlock in Evas_ScaleCache, Evas_Async_Events and Image_Entry. 2013-10-10 Carsten Haitzler (The Rasterman) diff --git a/NEWS b/NEWS index 64bb665..f8895cc 100644 --- a/NEWS +++ b/NEWS @@ -224,6 +224,7 @@ Improvements: - Add neon assembly for upscaling and map routines - Use mmap/munmap for image data allocation on system that have mmap. - Add iterator for walking child of smart objects, table and a box. + - Use Eina_Spinlock for Evas_ScaleCache, Evas_Async_Events and Image_Entry. * Ecore_Con: - Rebase dns.c against upstream - URL support now dynamically loads libcurl at runtime via eina_module. diff --git a/src/lib/evas/cache/evas_cache_image.c b/src/lib/evas/cache/evas_cache_image.c index d8ed655..2e2e277 100644 --- a/src/lib/evas/cache/evas_cache_image.c +++ b/src/lib/evas/cache/evas_cache_image.c @@ -25,7 +25,7 @@ struct _Evas_Cache_Preload Image_Entry *ie; }; -static LK(engine_lock); +static SLK(engine_lock); static LK(wakeup); static int _evas_cache_mutex_init = 0; @@ -199,9 +199,9 @@ _evas_cache_image_entry_delete(Evas_Cache_Image *cache, Image_Entry *ie) ie->cache = NULL; cache->func.surface_delete(ie); - LKD(ie->lock); - LKD(ie->lock_cancel); - LKD(ie->lock_task); + SLKD(ie->lock); + SLKD(ie->lock_cancel); + SLKD(ie->lock_task); cache->func.dealloc(ie); } @@ -271,9 +271,9 @@ _evas_cache_image_entry_new(Evas_Cache_Image *cache, if (tstamp) ie->tstamp = *tstamp; else memset(&ie->tstamp, 0, sizeof(Image_Timestamp)); - LKI(ie->lock); - LKI(ie->lock_cancel); - LKI(ie->lock_task); + SLKI(ie->lock); + SLKI(ie->lock_cancel); + SLKI(ie->lock_task); if (lo) ie->load_opts = *lo; if (ie->file || ie->f) @@ -315,9 +315,9 @@ _evas_cache_image_entry_surface_alloc(Evas_Cache_Image *cache, { int wmin = w > 0 ? w : 1; int hmin = h > 0 ? h : 1; - LKL(engine_lock); + SLKL(engine_lock); _evas_cache_image_entry_surface_alloc__locked(cache, ie, wmin, hmin); - LKU(engine_lock); + SLKU(engine_lock); } static void @@ -331,7 +331,7 @@ _evas_cache_image_async_heavy(void *data) current = data; - LKL(current->lock); + SLKL(current->lock); pchannel = current->channel; current->channel++; cache = current->cache; @@ -352,7 +352,7 @@ _evas_cache_image_async_heavy(void *data) { current->flags.loaded = 1; - LKL(current->lock_task); + SLKL(current->lock_task); EINA_LIST_FREE(current->tasks, task) { if (task != &dummy_task) @@ -361,12 +361,12 @@ _evas_cache_image_async_heavy(void *data) free(task); } } - LKU(current->lock_task); + SLKU(current->lock_task); } } current->channel = pchannel; // check the unload cancel flag - LKL(current->lock_cancel); + SLKL(current->lock_cancel); if (current->flags.unload_cancel) { current->flags.unload_cancel = EINA_FALSE; @@ -374,8 +374,8 @@ _evas_cache_image_async_heavy(void *data) current->flags.loaded = 0; current->flags.preload_done = 0; } - LKU(current->lock_cancel); - LKU(current->lock); + SLKU(current->lock_cancel); + SLKU(current->lock); } static void @@ -460,9 +460,9 @@ _evas_cache_image_entry_preload_add(Image_Entry *ie, const Eo *target, ie->targets = (Evas_Cache_Target *) eina_inlist_append(EINA_INLIST_GET(ie->targets), EINA_INLIST_GET(tg)); - LKL(ie->lock_task); + SLKL(ie->lock_task); ie->tasks = eina_list_append(ie->tasks, task); - LKU(ie->lock_task); + SLKU(ie->lock_task); if (!ie->preload) { @@ -485,7 +485,7 @@ _evas_cache_image_entry_preload_remove(Image_Entry *ie, const Eo *target) if (target) { - LKL(ie->lock_task); + SLKL(ie->lock_task); l = ie->tasks; EINA_INLIST_FOREACH(ie->targets, tg) { @@ -499,7 +499,7 @@ _evas_cache_image_entry_preload_remove(Image_Entry *ie, const Eo *target) task = eina_list_data_get(l); ie->tasks = eina_list_remove_list(ie->tasks, l); if (task != &dummy_task) free(task); - LKU(ie->lock_task); + SLKU(ie->lock_task); free(tg); break; @@ -507,7 +507,7 @@ _evas_cache_image_entry_preload_remove(Image_Entry *ie, const Eo *target) l = eina_list_next(l); } - LKU(ie->lock_task); + SLKU(ie->lock_task); } else { @@ -520,10 +520,10 @@ _evas_cache_image_entry_preload_remove(Image_Entry *ie, const Eo *target) free(tg); } - LKL(ie->lock_task); + SLKL(ie->lock_task); EINA_LIST_FREE(ie->tasks, task) if (task != &dummy_task) free(task); - LKU(ie->lock_task); + SLKU(ie->lock_task); } if ((!ie->targets) && (ie->preload) && (!ie->flags.pending)) @@ -565,7 +565,7 @@ evas_cache_image_init(const Evas_Cache_Image_Func *cb) if (_evas_cache_mutex_init++ == 0) { - LKI(engine_lock); + SLKI(engine_lock); LKI(wakeup); eina_condition_new(&cond_wakeup, &wakeup); } @@ -649,7 +649,7 @@ evas_cache_image_shutdown(Evas_Cache_Image *cache) if (--_evas_cache_mutex_init == 0) { eina_condition_free(&cond_wakeup); - LKD(engine_lock); + SLKD(engine_lock); LKD(wakeup); } } @@ -1170,11 +1170,11 @@ evas_cache_image_load_data(Image_Entry *im) if ((im->flags.loaded) && (!im->animated.animated)) return error; - LKL(im->lock); + SLKL(im->lock); im->flags.in_progress = EINA_TRUE; error = im->cache->func.load(im); im->flags.in_progress = EINA_FALSE; - LKU(im->lock); + SLKU(im->lock); im->flags.loaded = 1; if (im->cache->func.debug) im->cache->func.debug("load", im); @@ -1193,23 +1193,23 @@ evas_cache_image_unload_data(Image_Entry *im) if (im->flags.in_progress) return; evas_cache_image_preload_cancel(im, NULL); - LKL(im->lock_cancel); - if (LKT(im->lock) == EINA_FALSE) /* can't get image lock - busy async load */ + SLKL(im->lock_cancel); + if (SLKT(im->lock) == EINA_FALSE) /* can't get image lock - busy async load */ { im->flags.unload_cancel = EINA_TRUE; - LKU(im->lock_cancel); + SLKU(im->lock_cancel); return; } - LKU(im->lock_cancel); + SLKU(im->lock_cancel); if ((!im->flags.loaded) || (!im->file && !im->f) || (!im->info.module) || (im->flags.dirty)) { - LKU(im->lock); + SLKU(im->lock); return; } im->cache->func.destructor(im); - LKU(im->lock); + SLKU(im->lock); //FIXME: imagedataunload - inform owners } diff --git a/src/lib/evas/canvas/evas_async_events.c b/src/lib/evas/canvas/evas_async_events.c index dd3c593..0990ec8 100644 --- a/src/lib/evas/canvas/evas_async_events.c +++ b/src/lib/evas/canvas/evas_async_events.c @@ -37,7 +37,7 @@ static Eina_Condition _thread_feedback_cond; static int _thread_loop = 0; -static Eina_Lock _thread_id_lock; +static Eina_Spinlock _thread_id_lock; static int _thread_id = -1; static int _thread_id_max = 0; static int _thread_id_update = 0; @@ -46,7 +46,7 @@ static int _fd_write = -1; static int _fd_read = -1; static pid_t _fd_pid = 0; -static Eina_Lock async_lock; +static Eina_Spinlock async_lock; static Eina_Inarray async_queue; static Evas_Event_Async *async_queue_cache = NULL; static unsigned int async_queue_cache_max = 0; @@ -99,7 +99,7 @@ evas_async_events_init(void) fcntl(_fd_read, F_SETFL, O_NONBLOCK); #endif - eina_lock_new(&async_lock); + eina_spinlock_new(&async_lock); eina_inarray_step_set(&async_queue, sizeof (Eina_Inarray), sizeof (Evas_Event_Async), 16); eina_lock_new(&_thread_mutex); @@ -108,7 +108,7 @@ evas_async_events_init(void) eina_lock_new(&_thread_feedback_mutex); eina_condition_new(&_thread_feedback_cond, &_thread_feedback_mutex); - eina_lock_new(&_thread_id_lock); + eina_spinlock_new(&_thread_id_lock); return _init_evas_event; } @@ -123,9 +123,9 @@ evas_async_events_shutdown(void) eina_lock_free(&_thread_mutex); eina_condition_free(&_thread_feedback_cond); eina_lock_free(&_thread_feedback_mutex); - eina_lock_free(&_thread_id_lock); + eina_spinlock_free(&_thread_id_lock); - eina_lock_free(&async_lock); + eina_spinlock_free(&async_lock); eina_inarray_flush(&async_queue); free(async_queue_cache); @@ -180,7 +180,7 @@ _evas_async_events_process_single(void) unsigned int len, max; int nr; - eina_lock_take(&async_lock); + eina_spinlock_take(&async_lock); ev = async_queue.members; async_queue.members = async_queue_cache; @@ -193,7 +193,7 @@ _evas_async_events_process_single(void) len = async_queue.len; async_queue.len = 0; - eina_lock_release(&async_lock); + eina_spinlock_release(&async_lock); DBG("Evas async events queue length: %u", len); nr = len; @@ -269,13 +269,13 @@ evas_async_events_put(const void *target, Evas_Callback_Type type, void *event_i _evas_async_events_fork_handle(); - eina_lock_take(&async_lock); + eina_spinlock_take(&async_lock); count = async_queue.len; ev = eina_inarray_grow(&async_queue, 1); if (!ev) { - eina_lock_release(&async_lock); + eina_spinlock_release(&async_lock); return EINA_FALSE; } @@ -284,7 +284,7 @@ evas_async_events_put(const void *target, Evas_Callback_Type type, void *event_i ev->type = type; ev->event_info = event_info; - eina_lock_release(&async_lock); + eina_spinlock_release(&async_lock); if (count == 0) { @@ -365,14 +365,14 @@ evas_thread_main_loop_begin(void) order = malloc(sizeof (Evas_Safe_Call)); if (!order) return -1; - eina_lock_take(&_thread_id_lock); + eina_spinlock_take(&_thread_id_lock); order->current_id = ++_thread_id_max; if (order->current_id < 0) { _thread_id_max = 0; order->current_id = ++_thread_id_max; } - eina_lock_release(&_thread_id_lock); + eina_spinlock_release(&_thread_id_lock); eina_lock_new(&order->m); eina_condition_new(&order->c, &order->m); diff --git a/src/lib/evas/common/evas_image_scalecache.c b/src/lib/evas/common/evas_image_scalecache.c index 490d3c8..fc90761 100644 --- a/src/lib/evas/common/evas_image_scalecache.c +++ b/src/lib/evas/common/evas_image_scalecache.c @@ -57,7 +57,7 @@ struct _Scaleitem #ifdef SCALECACHE static unsigned long long use_counter = 0; -static LK(cache_lock); +static SLK(cache_lock); static Eina_Inlist *cache_list = NULL; static unsigned int cache_size = 0; static int init = 0; @@ -121,7 +121,7 @@ evas_common_scalecache_init(void) init++; if (init > 1) return; use_counter = 0; - LKI(cache_lock); + SLKI(cache_lock); s = getenv("EVAS_SCALECACHE_SIZE"); if (s) max_cache_size = atoi(s) * 1024; s = getenv("EVAS_SCALECACHE_MAX_DIMENSION"); @@ -141,7 +141,7 @@ evas_common_scalecache_shutdown(void) #ifdef SCALECACHE init--; if (init ==0) - LKD(cache_lock); + SLKD(cache_lock); #endif } @@ -151,7 +151,7 @@ evas_common_rgba_image_scalecache_init(Image_Entry *ie) #ifdef SCALECACHE RGBA_Image *im = (RGBA_Image *)ie; // NOTE: this conflicts with evas image cache init and del of lock - LKI(im->cache.lock); + SLKI(im->cache.lock); #endif } @@ -162,7 +162,7 @@ evas_common_rgba_image_scalecache_shutdown(Image_Entry *ie) RGBA_Image *im = (RGBA_Image *)ie; evas_common_rgba_image_scalecache_dirty(ie); // NOTE: this conflicts with evas image cache init and del of lock - LKD(im->cache.lock); + SLKD(im->cache.lock); #endif } @@ -172,7 +172,7 @@ evas_common_rgba_image_scalecache_dirty(Image_Entry *ie) #ifdef SCALECACHE RGBA_Image *im = (RGBA_Image *)ie; - LKL(im->cache.lock); + SLKL(im->cache.lock); while (im->cache.list) { Scaleitem *sci = im->cache.list->data; @@ -180,7 +180,7 @@ evas_common_rgba_image_scalecache_dirty(Image_Entry *ie) im->cache.list = eina_list_remove(im->cache.list, sci); if ((sci->im) && (sci->im->cache_entry.references == 0)) { - LKL(cache_lock); + SLKL(cache_lock); evas_common_rgba_image_free(&sci->im->cache_entry); sci->im = NULL; @@ -191,7 +191,7 @@ evas_common_rgba_image_scalecache_dirty(Image_Entry *ie) cache_size -= sci->size_adjust; cache_list = eina_inlist_remove(cache_list, (Eina_Inlist *)sci); - LKU(cache_lock); + SLKU(cache_lock); } if (!sci->im) @@ -199,7 +199,7 @@ evas_common_rgba_image_scalecache_dirty(Image_Entry *ie) } eina_hash_free(im->cache.hash); im->cache.hash = NULL; - LKU(im->cache.lock); + SLKU(im->cache.lock); #endif } @@ -208,13 +208,13 @@ evas_common_rgba_image_scalecache_orig_use(Image_Entry *ie) { #ifdef SCALECACHE RGBA_Image *im = (RGBA_Image *)ie; - LKL(im->cache.lock); + SLKL(im->cache.lock); use_counter++; // FIXME: if orig not loaded, reload // FIXME: mark orig with current used counter im->cache.orig_usage++; im->cache.usage_count = use_counter; - LKU(im->cache.lock); + SLKU(im->cache.lock); #endif } @@ -226,12 +226,12 @@ evas_common_rgba_image_scalecache_usage_get(Image_Entry *ie) int size = 0; Eina_List *l; Scaleitem *sci; - LKL(im->cache.lock); + SLKL(im->cache.lock); EINA_LIST_FOREACH(im->cache.list, l, sci) { if (sci->im) size += sci->key.dst_w * sci->key.dst_h * 4; } - LKU(im->cache.lock); + SLKU(im->cache.lock); return size; #else return 0; @@ -247,7 +247,7 @@ evas_common_rgba_image_scalecache_items_ref(Image_Entry *ie, Eina_Array *ret) Eina_List *l; Scaleitem *sci; - LKL(im->cache.lock); + SLKL(im->cache.lock); EINA_LIST_FOREACH(im->cache.list, l, sci) { if (sci->im) @@ -258,7 +258,7 @@ evas_common_rgba_image_scalecache_items_ref(Image_Entry *ie, Eina_Array *ret) eina_array_push(ret, scie); } } - LKU(im->cache.lock); + SLKU(im->cache.lock); #endif } @@ -435,13 +435,13 @@ EAPI void evas_common_rgba_image_scalecache_size_set(unsigned int size) { #ifdef SCALECACHE - LKL(cache_lock); + SLKL(cache_lock); if (size != max_cache_size) { max_cache_size = size; _cache_prune(NULL, 1); } - LKU(cache_lock); + SLKU(cache_lock); #endif } @@ -450,9 +450,9 @@ evas_common_rgba_image_scalecache_size_get(void) { #ifdef SCALECACHE int t; - LKL(cache_lock); + SLKL(cache_lock); t = max_cache_size; - LKU(cache_lock); + SLKU(cache_lock); return t; #else return 0; @@ -463,9 +463,9 @@ EAPI void evas_common_rgba_image_scalecache_prune(void) { #ifdef SCALECACHE - LKL(cache_lock); + SLKL(cache_lock); _cache_prune(NULL, 0); - LKU(cache_lock); + SLKU(cache_lock); #endif } @@ -474,12 +474,12 @@ evas_common_rgba_image_scalecache_dump(void) { #ifdef SCALECACHE int t; - LKL(cache_lock); + SLKL(cache_lock); t = max_cache_size; max_cache_size = 0; _cache_prune(NULL, 0); max_cache_size = t; - LKU(cache_lock); + SLKU(cache_lock); #endif } @@ -488,12 +488,12 @@ evas_common_rgba_image_scalecache_flush(void) { #ifdef SCALECACHE int t; - LKL(cache_lock); + SLKL(cache_lock); t = max_cache_size; max_cache_size = 0; _cache_prune(NULL, 1); max_cache_size = t; - LKU(cache_lock); + SLKU(cache_lock); #endif } @@ -513,11 +513,11 @@ evas_common_rgba_image_scalecache_prepare(Image_Entry *ie, RGBA_Image *dst EINA_ if (!im->image.data) return; if ((dst_region_w == 0) || (dst_region_h == 0) || (src_region_w == 0) || (src_region_h == 0)) return; - // was having major lock issues here - LKL was deadlocking. what was + // was having major lock issues here - SLKL was deadlocking. what was // going on? it may have been an eina treads badness but this will stay here // for now for debug #if 1 - ret = LKT(im->cache.lock); + ret = SLKT(im->cache.lock); if (ret == EINA_FALSE) /* can't get image lock */ { useconds_t slp = 1, slpt = 0; @@ -531,7 +531,7 @@ evas_common_rgba_image_scalecache_prepare(Image_Entry *ie, RGBA_Image *dst EINA_ #endif slpt += slp; slp++; - ret = LKT(im->cache.lock); + ret = SLKT(im->cache.lock); if (ret == EINA_LOCK_DEADLOCK) { printf("WARNING: DEADLOCK on image %p (%s)\n", im, ie->file); @@ -546,7 +546,7 @@ evas_common_rgba_image_scalecache_prepare(Image_Entry *ie, RGBA_Image *dst EINA_ { printf("WARNING: lock still there after %i usec\n", slpt); printf("WARNING: stucklock on image %p (%s)\n", im, ie->file); - LKDBG(im->cache.lock); + /* SLKDBG(im->cache.lock); */ } } else if (ret == EINA_LOCK_DEADLOCK) @@ -555,14 +555,14 @@ evas_common_rgba_image_scalecache_prepare(Image_Entry *ie, RGBA_Image *dst EINA_ } else locked = 1; #endif - if (!locked) { LKL(im->cache.lock); locked = 1; } + if (!locked) { SLKL(im->cache.lock); locked = 1; } use_counter++; if ((src_region_w == dst_region_w) && (src_region_h == dst_region_h)) { // 1:1 scale. im->cache.orig_usage++; im->cache.usage_count = use_counter; - if (locked) LKU(im->cache.lock); + if (locked) SLKU(im->cache.lock); return; } if ((!im->cache_entry.flags.alpha) && (!smooth)) @@ -571,17 +571,17 @@ evas_common_rgba_image_scalecache_prepare(Image_Entry *ie, RGBA_Image *dst EINA_ // or in some cases faster not cached im->cache.orig_usage++; im->cache.usage_count = use_counter; - if (locked) LKU(im->cache.lock); + if (locked) SLKU(im->cache.lock); return; } - LKL(cache_lock); + SLKL(cache_lock); sci = _sci_find(im, dc, smooth, src_region_x, src_region_y, src_region_w, src_region_h, dst_region_w, dst_region_h); if (!sci) { - LKU(cache_lock); - if (locked) LKU(im->cache.lock); + SLKU(cache_lock); + if (locked) SLKU(im->cache.lock); return; } // INF("%10i | %4i %4i %4ix%4i -> %4i %4i %4ix%4i | %i", @@ -609,7 +609,7 @@ evas_common_rgba_image_scalecache_prepare(Image_Entry *ie, RGBA_Image *dst EINA_ } sci->usage++; sci->usage_count = use_counter; - LKU(cache_lock); + SLKU(cache_lock); if (sci->usage > im->cache.newest_usage) im->cache.newest_usage = sci->usage; // INF("newset? %p %i > %i", im, @@ -618,7 +618,7 @@ evas_common_rgba_image_scalecache_prepare(Image_Entry *ie, RGBA_Image *dst EINA_ if (sci->usage_count > im->cache.newest_usage_count) im->cache.newest_usage_count = sci->usage_count; // INF(" -------------- used %8i#, %8i@", (int)sci->usage, (int)sci->usage_count); - if (locked) LKU(im->cache.lock); + if (locked) SLKU(im->cache.lock); #endif } @@ -682,11 +682,11 @@ evas_common_rgba_image_scalecache_do_cbs(Image_Entry *ie, RGBA_Image *dst, } return EINA_FALSE; } - LKL(cache_lock); + SLKL(cache_lock); sci = _sci_find(im, dc, smooth, src_region_x, src_region_y, src_region_w, src_region_h, dst_region_w, dst_region_h); - LKU(cache_lock); + SLKU(cache_lock); if (!sci) { if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888) @@ -718,7 +718,7 @@ evas_common_rgba_image_scalecache_do_cbs(Image_Entry *ie, RGBA_Image *dst, } return EINA_FALSE; } - LKL(im->cache.lock); + SLKL(im->cache.lock); if (sci->populate_me) { int size, osize, used; @@ -768,7 +768,7 @@ evas_common_rgba_image_scalecache_do_cbs(Image_Entry *ie, RGBA_Image *dst, { static RGBA_Draw_Context *ct = NULL; - LKL(cache_lock); + SLKL(cache_lock); im->cache.orig_usage++; im->cache.usage_count = use_counter; im->cache.populate_count--; @@ -780,7 +780,7 @@ evas_common_rgba_image_scalecache_do_cbs(Image_Entry *ie, RGBA_Image *dst, ct = evas_common_draw_context_new(); evas_common_draw_context_set_render_op(ct, _EVAS_RENDER_COPY); } - LKU(im->cache.lock); + SLKU(im->cache.lock); if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888) { #ifdef EVAS_CSERVE2 @@ -790,7 +790,7 @@ evas_common_rgba_image_scalecache_do_cbs(Image_Entry *ie, RGBA_Image *dst, #endif evas_cache_image_load_data(&im->cache_entry); } - LKL(im->cache.lock); + SLKL(im->cache.lock); evas_common_image_colorspace_normalize(im); if (im->image.data) { @@ -845,7 +845,7 @@ evas_common_rgba_image_scalecache_do_cbs(Image_Entry *ie, RGBA_Image *dst, // sci->dst_w * sci->dst_h * 4, sci->flop, // sci->dst_w, sci->dst_h); cache_list = eina_inlist_append(cache_list, (Eina_Inlist *)sci); - LKU(cache_lock); + SLKU(cache_lock); didpop = 1; } } @@ -853,17 +853,17 @@ evas_common_rgba_image_scalecache_do_cbs(Image_Entry *ie, RGBA_Image *dst, { if (!didpop) { - LKL(cache_lock); + SLKL(cache_lock); cache_list = eina_inlist_remove(cache_list, (Eina_Inlist *)sci); cache_list = eina_inlist_append(cache_list, (Eina_Inlist *)sci); - LKU(cache_lock); + SLKU(cache_lock); } else { if (sci->flop >= FLOP_DEL) sci->flop -= FLOP_DEL; } // INF("use cached!"); - LKU(im->cache.lock); + SLKU(im->cache.lock); ret |= cb_sample(sci->im, dst, dc, 0, 0, dst_region_w, dst_region_h, @@ -909,7 +909,7 @@ evas_common_rgba_image_scalecache_do_cbs(Image_Entry *ie, RGBA_Image *dst, } else { - LKU(im->cache.lock); + SLKU(im->cache.lock); if (im->cache_entry.space == EVAS_COLORSPACE_ARGB8888) { #ifdef EVAS_CSERVE2 diff --git a/src/lib/evas/include/evas_common_private.h b/src/lib/evas/include/evas_common_private.h index c07f6e6..8d8b9f6 100644 --- a/src/lib/evas/include/evas_common_private.h +++ b/src/lib/evas/include/evas_common_private.h @@ -156,6 +156,13 @@ extern EAPI int _evas_log_dom_global; # define __ARM_ARCH__ 73 #endif +#define SLK(x) Eina_Spinlock x +#define SLKI(x) eina_spinlock_new(&(x)) +#define SLKD(x) eina_spinlock_free(&(x)) +#define SLKL(x) eina_spinlock_take(&(x)) +#define SLKT(x) eina_spinlock_take_try(&(x)) +#define SLKU(x) eina_spinlock_release(&(x)) + #define LK(x) Eina_Lock x #define LKI(x) eina_lock_new(&(x)) #define LKD(x) eina_lock_free(&(x)) @@ -600,9 +607,9 @@ struct _Image_Entry Evas_Image_Load_Func *loader; } info; - LK(lock); - LK(lock_cancel); - LK(lock_task); + SLK(lock); + SLK(lock_cancel); + SLK(lock_task); /* for animation feature */ Evas_Image_Animated animated; @@ -796,7 +803,7 @@ struct _RGBA_Image } image; struct { - LK(lock); + SLK(lock); Eina_List *list; Eina_Hash *hash; unsigned long long orig_usage;