tizen 2.4 release
[kernel/linux-3.0.git] / drivers / gpu / arm / mali400 / r4p0_rel0 / common / mali_l2_cache.c
1 /*
2  * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
3  * 
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  * 
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10 #include "mali_kernel_common.h"
11 #include "mali_osk.h"
12 #include "mali_l2_cache.h"
13 #include "mali_hw_core.h"
14 #include "mali_scheduler.h"
15 #include "mali_pm_domain.h"
16
17 /**
18  * Size of the Mali L2 cache registers in bytes
19  */
20 #define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
21
22 /**
23  * Mali L2 cache register numbers
24  * Used in the register read/write routines.
25  * See the hardware documentation for more information about each register
26  */
27 typedef enum mali_l2_cache_register {
28         MALI400_L2_CACHE_REGISTER_SIZE         = 0x0004,
29         MALI400_L2_CACHE_REGISTER_STATUS       = 0x0008,
30         /*unused                               = 0x000C */
31         MALI400_L2_CACHE_REGISTER_COMMAND      = 0x0010, /**< Misc cache commands, e.g. clear */
32         MALI400_L2_CACHE_REGISTER_CLEAR_PAGE   = 0x0014,
33         MALI400_L2_CACHE_REGISTER_MAX_READS    = 0x0018, /**< Limit of outstanding read requests */
34         MALI400_L2_CACHE_REGISTER_ENABLE       = 0x001C, /**< Enable misc cache features */
35         MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
36         MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
37         MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
38         MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C,
39 } mali_l2_cache_register;
40
41 /**
42  * Mali L2 cache commands
43  * These are the commands that can be sent to the Mali L2 cache unit
44  */
45 typedef enum mali_l2_cache_command {
46         MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
47         /* Read HW TRM carefully before adding/using other commands than the clear above */
48 } mali_l2_cache_command;
49
50 /**
51  * Mali L2 cache commands
52  * These are the commands that can be sent to the Mali L2 cache unit
53  */
54 typedef enum mali_l2_cache_enable {
55         MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
56         MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
57         MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
58 } mali_l2_cache_enable;
59
60 /**
61  * Mali L2 cache status bits
62  */
63 typedef enum mali_l2_cache_status {
64         MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
65         MALI400_L2_CACHE_STATUS_DATA_BUSY    = 0x02, /**< L2 cache is busy handling data requests */
66 } mali_l2_cache_status;
67
68 #define MALI400_L2_MAX_READS_DEFAULT 0x1C
69
70 static struct mali_l2_cache_core *mali_global_l2_cache_cores[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
71 static u32 mali_global_num_l2_cache_cores = 0;
72
73 int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
74
75
76 /* Local helper functions */
77 static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val);
78
79
80 static void mali_l2_cache_counter_lock(struct mali_l2_cache_core *cache)
81 {
82 #ifdef MALI_UPPER_HALF_SCHEDULING
83         _mali_osk_spinlock_irq_lock(cache->counter_lock);
84 #else
85         _mali_osk_spinlock_lock(cache->counter_lock);
86 #endif
87 }
88
89 static void mali_l2_cache_counter_unlock(struct mali_l2_cache_core *cache)
90 {
91 #ifdef MALI_UPPER_HALF_SCHEDULING
92         _mali_osk_spinlock_irq_unlock(cache->counter_lock);
93 #else
94         _mali_osk_spinlock_unlock(cache->counter_lock);
95 #endif
96 }
97
98 static void mali_l2_cache_command_lock(struct mali_l2_cache_core *cache)
99 {
100 #ifdef MALI_UPPER_HALF_SCHEDULING
101         _mali_osk_spinlock_irq_lock(cache->command_lock);
102 #else
103         _mali_osk_spinlock_lock(cache->command_lock);
104 #endif
105 }
106
107 static void mali_l2_cache_command_unlock(struct mali_l2_cache_core *cache)
108 {
109 #ifdef MALI_UPPER_HALF_SCHEDULING
110         _mali_osk_spinlock_irq_unlock(cache->command_lock);
111 #else
112         _mali_osk_spinlock_unlock(cache->command_lock);
113 #endif
114 }
115
116 struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource)
117 {
118         struct mali_l2_cache_core *cache = NULL;
119
120         MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n", resource->description));
121
122         if (mali_global_num_l2_cache_cores >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) {
123                 MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 cache core objects created\n"));
124                 return NULL;
125         }
126
127         cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
128         if (NULL != cache) {
129                 cache->core_id =  mali_global_num_l2_cache_cores;
130                 cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
131                 cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
132                 cache->pm_domain = NULL;
133                 cache->mali_l2_status = MALI_L2_NORMAL;
134                 if (_MALI_OSK_ERR_OK == mali_hw_core_create(&cache->hw_core, resource, MALI400_L2_CACHE_REGISTERS_SIZE)) {
135                         MALI_DEBUG_CODE(u32 cache_size = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_SIZE));
136                         MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n",
137                                              resource->description,
138                                              1 << (((cache_size >> 16) & 0xff) - 10),
139                                              1 << ((cache_size >> 8) & 0xff),
140                                              1 << (cache_size & 0xff),
141                                              1 << ((cache_size >> 24) & 0xff)));
142
143 #ifdef MALI_UPPER_HALF_SCHEDULING
144                         cache->command_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
145 #else
146                         cache->command_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
147 #endif
148                         if (NULL != cache->command_lock) {
149 #ifdef MALI_UPPER_HALF_SCHEDULING
150                                 cache->counter_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
151 #else
152                                 cache->counter_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
153 #endif
154                                 if (NULL != cache->counter_lock) {
155                                         mali_l2_cache_reset(cache);
156
157                                         cache->last_invalidated_id = 0;
158
159                                         mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = cache;
160                                         mali_global_num_l2_cache_cores++;
161
162                                         return cache;
163                                 } else {
164                                         MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", cache->hw_core.description));
165                                 }
166 #ifdef MALI_UPPER_HALF_SCHEDULING
167                                 _mali_osk_spinlock_irq_term(cache->command_lock);
168 #else
169                                 _mali_osk_spinlock_term(cache->command_lock);
170 #endif
171                         } else {
172                                 MALI_PRINT_ERROR(("Mali L2 cache: Failed to create command lock for L2 cache core %s\n", cache->hw_core.description));
173                         }
174
175                         mali_hw_core_delete(&cache->hw_core);
176                 }
177
178                 _mali_osk_free(cache);
179         } else {
180                 MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
181         }
182
183         return NULL;
184 }
185
186 void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
187 {
188         u32 i;
189
190         /* reset to defaults */
191         mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
192         mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
193
194 #ifdef MALI_UPPER_HALF_SCHEDULING
195         _mali_osk_spinlock_irq_term(cache->counter_lock);
196         _mali_osk_spinlock_irq_term(cache->command_lock);
197 #else
198         _mali_osk_spinlock_term(cache->command_lock);
199         _mali_osk_spinlock_term(cache->counter_lock);
200 #endif
201
202         mali_hw_core_delete(&cache->hw_core);
203
204         for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
205                 if (mali_global_l2_cache_cores[i] == cache) {
206                         mali_global_l2_cache_cores[i] = NULL;
207                         mali_global_num_l2_cache_cores--;
208
209                         if (i != mali_global_num_l2_cache_cores) {
210                                 /* We removed a l2 cache from the middle of the array -- move the last
211                                  * l2 cache to the current position to close the gap */
212                                 mali_global_l2_cache_cores[i] = mali_global_l2_cache_cores[mali_global_num_l2_cache_cores];
213                                 mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = NULL;
214                         }
215
216                         break;
217                 }
218         }
219
220         _mali_osk_free(cache);
221 }
222
223 u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
224 {
225         return cache->core_id;
226 }
227
228 static void mali_l2_cache_core_set_counter_internal(struct mali_l2_cache_core *cache, u32 source_id, u32 counter)
229 {
230         u32 value = 0; /* disabled src */
231         u32 reg_offset = 0;
232         mali_bool core_is_on;
233
234         MALI_DEBUG_ASSERT_POINTER(cache);
235
236         core_is_on = mali_l2_cache_lock_power_state(cache);
237
238         mali_l2_cache_counter_lock(cache);
239
240         switch (source_id) {
241         case 0:
242                 cache->counter_src0 = counter;
243                 reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0;
244                 break;
245
246         case 1:
247                 cache->counter_src1 = counter;
248                 reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1;
249                 break;
250
251         default:
252                 MALI_DEBUG_ASSERT(0);
253                 break;
254         }
255
256         if (MALI_L2_PAUSE == cache->mali_l2_status) {
257                 mali_l2_cache_counter_unlock(cache);
258                 mali_l2_cache_unlock_power_state(cache);
259                 return;
260         }
261
262         if (MALI_HW_CORE_NO_COUNTER != counter) {
263                 value = counter;
264         }
265
266         if (MALI_TRUE == core_is_on) {
267                 mali_hw_core_register_write(&cache->hw_core, reg_offset, value);
268         }
269
270         mali_l2_cache_counter_unlock(cache);
271         mali_l2_cache_unlock_power_state(cache);
272 }
273
274 void mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter)
275 {
276         mali_l2_cache_core_set_counter_internal(cache, 0, counter);
277 }
278
279 void mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter)
280 {
281         mali_l2_cache_core_set_counter_internal(cache, 1, counter);
282 }
283
284 u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache)
285 {
286         return cache->counter_src0;
287 }
288
289 u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache)
290 {
291         return cache->counter_src1;
292 }
293
294 void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1)
295 {
296         MALI_DEBUG_ASSERT(NULL != src0);
297         MALI_DEBUG_ASSERT(NULL != value0);
298         MALI_DEBUG_ASSERT(NULL != src1);
299         MALI_DEBUG_ASSERT(NULL != value1);
300
301         /* Caller must hold the PM lock and know that we are powered on */
302
303         mali_l2_cache_counter_lock(cache);
304
305         if (MALI_L2_PAUSE == cache->mali_l2_status) {
306                 mali_l2_cache_counter_unlock(cache);
307
308                 return;
309         }
310
311         *src0 = cache->counter_src0;
312         *src1 = cache->counter_src1;
313
314         if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
315                 *value0 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
316         }
317
318         if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
319                 *value1 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
320         }
321
322         mali_l2_cache_counter_unlock(cache);
323 }
324
325 static void mali_l2_cache_reset_counters_all(void)
326 {
327         int i;
328         u32 value;
329         struct mali_l2_cache_core *cache;
330         u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
331
332         for (i = 0; i < num_cores; i++) {
333                 cache = mali_l2_cache_core_get_glob_l2_core(i);
334                 if (MALI_TRUE == mali_l2_cache_lock_power_state(cache)) {
335                         mali_l2_cache_counter_lock(cache);
336
337                         if (MALI_L2_PAUSE == cache->mali_l2_status) {
338                                 mali_l2_cache_counter_unlock(cache);
339                                 mali_l2_cache_unlock_power_state(cache);
340                                 return;
341                         }
342
343                         /* Reset performance counters */
344                         if (MALI_HW_CORE_NO_COUNTER == cache->counter_src0) {
345                                 value = 0;
346                         } else {
347                                 value = cache->counter_src0;
348                         }
349                         mali_hw_core_register_write(&cache->hw_core,
350                                                     MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, value);
351
352                         if (MALI_HW_CORE_NO_COUNTER == cache->counter_src1) {
353                                 value = 0;
354                         } else {
355                                 value = cache->counter_src1;
356                         }
357                         mali_hw_core_register_write(&cache->hw_core,
358                                                     MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value);
359
360                         mali_l2_cache_counter_unlock(cache);
361                 }
362
363                 mali_l2_cache_unlock_power_state(cache);
364         }
365 }
366
367
368 struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
369 {
370         if (mali_global_num_l2_cache_cores > index) {
371                 return mali_global_l2_cache_cores[index];
372         }
373
374         return NULL;
375 }
376
377 u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
378 {
379         return mali_global_num_l2_cache_cores;
380 }
381
382 void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
383 {
384         /* Invalidate cache (just to keep it in a known state at startup) */
385         mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
386
387         mali_l2_cache_counter_lock(cache);
388
389         if (MALI_L2_PAUSE == cache->mali_l2_status) {
390                 mali_l2_cache_counter_unlock(cache);
391
392                 return;
393         }
394
395         /* Enable cache */
396         mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
397         mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
398
399         /* Restart any performance counters (if enabled) */
400         if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
401                 mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
402         }
403
404         if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
405                 mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
406         }
407
408         mali_l2_cache_counter_unlock(cache);
409 }
410
411 void mali_l2_cache_reset_all(void)
412 {
413         int i;
414         u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
415
416         for (i = 0; i < num_cores; i++) {
417                 mali_l2_cache_reset(mali_l2_cache_core_get_glob_l2_core(i));
418         }
419 }
420
421 void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
422 {
423         MALI_DEBUG_ASSERT_POINTER(cache);
424
425         if (NULL != cache) {
426                 cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
427                 mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
428         }
429 }
430
431 mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id)
432 {
433         MALI_DEBUG_ASSERT_POINTER(cache);
434
435         if (NULL != cache) {
436                 /* If the last cache invalidation was done by a job with a higher id we
437                  * don't have to flush. Since user space will store jobs w/ their
438                  * corresponding memory in sequence (first job #0, then job #1, ...),
439                  * we don't have to flush for job n-1 if job n has already invalidated
440                  * the cache since we know for sure that job n-1's memory was already
441                  * written when job n was started. */
442                 if (((s32)id) <= ((s32)cache->last_invalidated_id)) {
443                         return MALI_FALSE;
444                 } else {
445                         cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
446                 }
447
448                 mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
449         }
450         return MALI_TRUE;
451 }
452
453 void mali_l2_cache_invalidate_all(void)
454 {
455         u32 i;
456         for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
457                 /*additional check*/
458                 if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
459                         _mali_osk_errcode_t ret;
460                         mali_global_l2_cache_cores[i]->last_invalidated_id = mali_scheduler_get_new_cache_order();
461                         ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
462                         if (_MALI_OSK_ERR_OK != ret) {
463                                 MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
464                         }
465                 }
466                 mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
467         }
468 }
469
470 void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
471 {
472         u32 i;
473         for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
474                 /*additional check*/
475                 if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
476                         u32 j;
477                         for (j = 0; j < num_pages; j++) {
478                                 _mali_osk_errcode_t ret;
479                                 ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[j]);
480                                 if (_MALI_OSK_ERR_OK != ret) {
481                                         MALI_PRINT_ERROR(("Failed to invalidate page cache\n"));
482                                 }
483                         }
484                 }
485                 mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
486         }
487 }
488
489 mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache)
490 {
491         return mali_pm_domain_lock_state(cache->pm_domain);
492 }
493
494 void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache)
495 {
496         return mali_pm_domain_unlock_state(cache->pm_domain);
497 }
498
499 /* -------- local helper functions below -------- */
500
501
502 static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val)
503 {
504         int i = 0;
505         const int loop_count = 100000;
506
507         /*
508          * Grab lock in order to send commands to the L2 cache in a serialized fashion.
509          * The L2 cache will ignore commands if it is busy.
510          */
511         mali_l2_cache_command_lock(cache);
512
513         if (MALI_L2_PAUSE == cache->mali_l2_status) {
514                 mali_l2_cache_command_unlock(cache);
515                 MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for L2 come back\n"));
516
517                 MALI_ERROR( _MALI_OSK_ERR_BUSY );
518         }
519
520         /* First, wait for L2 cache command handler to go idle */
521
522         for (i = 0; i < loop_count; i++) {
523                 if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) {
524                         break;
525                 }
526         }
527
528         if (i == loop_count) {
529                 mali_l2_cache_command_unlock(cache);
530                 MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n"));
531                 MALI_ERROR( _MALI_OSK_ERR_FAULT );
532         }
533
534         /* then issue the command */
535         mali_hw_core_register_write(&cache->hw_core, reg, val);
536
537         mali_l2_cache_command_unlock(cache);
538
539         MALI_SUCCESS;
540 }
541
542 void mali_l2_cache_pause_all(mali_bool pause)
543 {
544         int i;
545         struct mali_l2_cache_core * cache;
546         u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
547         mali_l2_power_status status = MALI_L2_NORMAL;
548
549         if (pause) {
550                 status = MALI_L2_PAUSE;
551         }
552
553         for (i = 0; i < num_cores; i++) {
554                 cache = mali_l2_cache_core_get_glob_l2_core(i);
555                 if (NULL != cache) {
556                         cache->mali_l2_status = status;
557
558                         /* Take and release the counter and command locks to
559                          * ensure there are no active threads that didn't get
560                          * the status flag update.
561                          *
562                          * The locks will also ensure the necessary memory
563                          * barriers are done on SMP systems.
564                          */
565                         mali_l2_cache_counter_lock(cache);
566                         mali_l2_cache_counter_unlock(cache);
567
568                         mali_l2_cache_command_lock(cache);
569                         mali_l2_cache_command_unlock(cache);
570                 }
571         }
572
573         /* Resume from pause: do the cache invalidation here to prevent any
574          * loss of cache operation during the pause period to make sure the SW
575          * status is consistent with L2 cache status.
576          */
577         if(!pause) {
578                 mali_l2_cache_invalidate_all();
579                 mali_l2_cache_reset_counters_all();
580         }
581 }