* @desc Provides controller functionalities to kill procs/apps
*/
+#include <malloc.h>
#include <string.h>
#include <limits.h>
#include <vconf.h>
return RESOURCED_ERROR_NONE;
}
+static void lowmem_memory_level_send_system_event(int lv)
+{
+ bundle *b;
+ const char *bundle_str;
+
+ switch (lv) {
+ case MEM_LEVEL_HIGH:
+ case MEM_LEVEL_MEDIUM:
+ case MEM_LEVEL_LOW:
+ bundle_str = EVT_VAL_MEMORY_NORMAL;
+ break;
+ case MEM_LEVEL_CRITICAL:
+ bundle_str = EVT_VAL_MEMORY_SOFT_WARNING;
+ break;
+ case MEM_LEVEL_OOM:
+ bundle_str = EVT_VAL_MEMORY_HARD_WARNING;
+ break;
+ default:
+ _E("Invalid state");
+ return;
+ }
+
+ b = bundle_create();
+ if (!b) {
+ _E("Failed to create bundle");
+ return;
+ }
+
+ bundle_add_str(b, EVT_KEY_LOW_MEMORY, bundle_str);
+ eventsystem_send_system_event(SYS_EVENT_LOW_MEMORY, b);
+ bundle_free(b);
+}
+
static void swap_activate_act(void)
{
int ret, status;
}
}
-int lowmem_control_handler(void *data)
+static int lowmem_control_handler(void *data)
{
struct lowmem_control_data *lowmem_data = (struct lowmem_control_data *)data;
return RESOURCED_ERROR_NONE;
}
+static void medium_cb(struct lowmem_control *ctl)
+{
+ if (ctl->status == LOWMEM_RECLAIM_DONE)
+ lowmem_set_oom_popup(false);
+ lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
+}
+
+
+
/* lowmem actions */
static int high_mem_act(void *data)
{
return RESOURCED_ERROR_NONE;
}
+static int oom_mem_act(void *data)
+{
+ unsigned int available_mb;
+ int status = VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL;
+
+ if (vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status) != 0)
+ _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
+
+ lowmem_memory_level_send_system_event(MEM_LEVEL_OOM);
+ if (status != VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING) {
+ if (proc_get_freezer_status() == CGROUP_FREEZER_ENABLED)
+ resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
+ (void *)CGROUP_FREEZER_PAUSED);
+ vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
+ VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
+ }
+ available_mb = proc_get_mem_available();
+
+ lowmem_change_lowmem_state(MEM_LEVEL_OOM);
+
+ if (available_mb < get_root_memcg_info()->threshold_leave_mb) {
+ lowmem_queue_new_request(OOM_IN_DEPTH, OOM_SCORE_LOW,
+ get_root_memcg_info()->threshold_leave_mb,
+ lowmem_get_num_max_victims(), medium_cb);
+ }
+
+ resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT,
+ (void *)SWAP_COMPACT_MEM_LEVEL_OOM);
+
+ /**
+ * Flush resourced memory such as other processes.
+ * Resourced can use both many fast bins and sqlite3 cache memery.
+ */
+ malloc_trim(0);
+
+ return RESOURCED_ERROR_NONE;
+}
static int lowmem_controller_initialize(void *data)
lowmem_initialize_controller_ops_action(MEM_LEVEL_MEDIUM, medium_mem_act);
lowmem_initialize_controller_ops_action(MEM_LEVEL_LOW, low_mem_act);
lowmem_initialize_controller_ops_action(MEM_LEVEL_CRITICAL, critical_mem_act);
+ lowmem_initialize_controller_ops_action(MEM_LEVEL_OOM, oom_mem_act);
register_notifier(RESOURCED_NOTIFIER_MEM_CONTROL, lowmem_control_handler);
#define LMW_LOOP_WAIT_TIMEOUT_MSEC OOM_TIMER_INTERVAL_SEC*(G_USEC_PER_SEC)
#define LMW_RETRY_WAIT_TIMEOUT_MSEC (G_USEC_PER_SEC)
-struct lowmem_control {
- /*
- * For each queued request the following properties
- * are required with two exceptions:
- * - status is being set by LMK
- * - callback is optional
- */
- /* Processing flags*/
- unsigned int flags;
- /* Indictator for OOM score of targeted processes */
- enum oom_score score;
-
- /* Desired size to be restored - level to be reached (MB)*/
- unsigned int size_mb;
- /* Max number of processes to be considered */
- unsigned int count;
- /* Memory reclaim status */
- int status;
- /*
- * Optional - if set, will be triggered by LMK once the request
- * is handled.
- */
- void (*callback) (struct lowmem_control *);
-};
-
struct lowmem_worker {
pthread_t worker_thread;
GAsyncQueue *queue;
#define LOWMEM_WORKER_ACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 1)
#define LOWMEM_WORKER_DEACTIVATE(_lmw) g_atomic_int_set(&(_lmw)->active, 0)
-#define LOWMEM_WORKER_IS_RUNNING(_lmw) g_atomic_int_get(&(_lmw)->running)
#define LOWMEM_WORKER_RUN(_lmw) g_atomic_int_set(&(_lmw)->running, 1)
#define LOWMEM_WORKER_IDLE(_lmw) g_atomic_int_set(&(_lmw)->running, 0)
#define LOWMEM_NEW_REQUEST() g_slice_new0(struct lowmem_control)
-
#define LOWMEM_DESTROY_REQUEST(_ctl) \
g_slice_free(typeof(*(_ctl)), _ctl); \
(c)->callback = __cb; \
}
+int lowmem_worker_is_running()
+{
+ return g_atomic_int_get(&(lmw.running));
+}
+
static void lowmem_queue_request(struct lowmem_worker *lmw,
struct lowmem_control *ctl)
{
/*-------------------------------------------------*/
-/* low memory action function for cgroup */
-/* low memory action function */
-static void lmk_act(void);
-
struct lowmem_controller_ops {
int (*governor)(void *data);
int (*action)(void *data);
case MEM_LEVEL_MEDIUM:
case MEM_LEVEL_LOW:
case MEM_LEVEL_CRITICAL:
+ case MEM_LEVEL_OOM:
lowmem_actions[mem_state].governor = governor;
return;
default:
case MEM_LEVEL_HIGH:
case MEM_LEVEL_MEDIUM:
case MEM_LEVEL_CRITICAL:
+ case MEM_LEVEL_OOM:
lowmem_actions[mem_state].action = action;
return;
default:
static size_t cur_mem_state = MEM_LEVEL_HIGH;
static int num_max_victims = MAX_MEMORY_CGROUP_VICTIMS;
+int lowmem_get_num_max_victims()
+{
+ return num_max_victims;
+}
static unsigned long long totalram_bytes;
static unsigned long totalram_kb;
return should_be_freed_mb;
}
+void lowmem_set_oom_popup(bool popup)
+{
+ oom_popup = popup;
+}
+
static void lowmem_oom_popup_once(void)
{
if (oom_popup_enable && !oom_popup) {
resourced_notify(RESOURCED_NOTIFIER_SWAP_START, path);
}
-void lowmem_memory_level_send_system_event(int lv)
-{
- bundle *b;
- const char *str;
-
- switch (lv) {
- case MEM_LEVEL_HIGH:
- case MEM_LEVEL_MEDIUM:
- case MEM_LEVEL_LOW:
- str = EVT_VAL_MEMORY_NORMAL;
- break;
- case MEM_LEVEL_CRITICAL:
- str = EVT_VAL_MEMORY_SOFT_WARNING;
- break;
- case MEM_LEVEL_OOM:
- str = EVT_VAL_MEMORY_HARD_WARNING;
- break;
- default:
- _E("Invalid state");
- return;
- }
-
- b = bundle_create();
- if (!b) {
- _E("Failed to create bundle");
- return;
- }
-
- bundle_add_str(b, EVT_KEY_LOW_MEMORY, str);
- eventsystem_send_system_event(SYS_EVENT_LOW_MEMORY, b);
- bundle_free(b);
-}
-
-static void medium_cb(struct lowmem_control *ctl)
-{
- if (ctl->status == LOWMEM_RECLAIM_DONE)
- oom_popup = false;
- lowmem_change_memory_state(MEM_LEVEL_HIGH, 0);
-}
-
-static void lmk_act(void)
-{
- unsigned int available_mb;
- int ret;
- int status = VCONFKEY_SYSMAN_LOW_MEMORY_NORMAL;
-
- /*
- * Don't trigger reclaim worker
- * if it is already running
- */
- if (LOWMEM_WORKER_IS_RUNNING(&lmw))
- return;
-
- ret = vconf_get_int(VCONFKEY_SYSMAN_LOW_MEMORY, &status);
- if (ret)
- _D("vconf_get_int fail %s", VCONFKEY_SYSMAN_LOW_MEMORY);
-
- lowmem_memory_level_send_system_event(MEM_LEVEL_OOM);
- if (status != VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING) {
- if (proc_get_freezer_status() == CGROUP_FREEZER_ENABLED)
- resourced_notify(RESOURCED_NOTIFIER_FREEZER_CGROUP_STATE,
- (void *)CGROUP_FREEZER_PAUSED);
- vconf_set_int(VCONFKEY_SYSMAN_LOW_MEMORY,
- VCONFKEY_SYSMAN_LOW_MEMORY_HARD_WARNING);
- }
- available_mb = proc_get_mem_available();
-
- lowmem_change_lowmem_state(MEM_LEVEL_OOM);
-
- if (available_mb < get_root_memcg_info()->threshold_leave_mb) {
- struct lowmem_control *ctl;
-
- ctl = LOWMEM_NEW_REQUEST();
- if (ctl) {
- LOWMEM_SET_REQUEST(ctl, OOM_IN_DEPTH,
- OOM_SCORE_LOW, get_root_memcg_info()->threshold_leave_mb,
- num_max_victims, medium_cb);
- lowmem_queue_request(&lmw, ctl);
- }
- }
-
- resourced_notify(RESOURCED_NOTIFIER_SWAP_COMPACT, (void *)SWAP_COMPACT_MEM_LEVEL_OOM);
-
- /*
- * Flush resourced memory such as other processes.
- * Resourced can use both many fast bins and sqlite3 cache memery.
- */
- malloc_trim(0);
-
- return;
-}
-
void lowmem_trigger_memory_state_action(int mem_state)
{
/*
case MEM_LEVEL_MEDIUM:
case MEM_LEVEL_LOW:
case MEM_LEVEL_CRITICAL:
+ case MEM_LEVEL_OOM:
assert(lowmem_actions[mem_state].governor != NULL);
assert(lowmem_actions[mem_state].action != NULL);
if (lowmem_actions[mem_state].governor(NULL) < 0)
break;
lowmem_actions[mem_state].action(NULL);
break;
- case MEM_LEVEL_OOM:
- lmk_act();
- break;
default:
assert(0);
}
return 0;
}
+int lowmem_queue_new_request(unsigned int flags, enum oom_score score,
+ unsigned int size_mb, unsigned int count,
+ void (*callback)(struct lowmem_control *))
+{
+ struct lowmem_control *ctl = LOWMEM_NEW_REQUEST();
+
+ if (!ctl)
+ return RESOURCED_ERROR_OUT_OF_MEMORY;
+
+ ctl->flags = flags;
+ ctl->score = score;
+ ctl->size_mb = size_mb;
+ ctl->count = count;
+ ctl->callback = callback;
+
+ lowmem_queue_request(&lmw, ctl);
+
+ return RESOURCED_ERROR_NONE;
+}
+
void lowmem_trigger_swap_reclaim(enum oom_score score, unsigned long long swap_size_bytes)
{
int size_mb, victims;
void lowmem_initialize_controller_ops_action(int mem_state, int (*action)(void *data));
unsigned int lowmem_get_lowmem_state();
void lowmem_change_lowmem_state(unsigned int mem_state);
-void lowmem_memory_level_send_system_event(int lv);
bool lowmem_get_memcg_swap_status();
void lowmem_set_memcg_swap_status(bool status);
+struct lowmem_control {
+ /*
+ * For each queued request the following properties
+ * are required with two exceptions:
+ * - status is being set by LMK
+ * - callback is optional
+ */
+ /* Processing flags*/
+ unsigned int flags;
+ /* Indictator for OOM score of targeted processes */
+ enum oom_score score;
+
+ /* Desired size to be restored - level to be reached (MB)*/
+ unsigned int size_mb;
+ /* Max number of processes to be considered */
+ unsigned int count;
+ /* Memory reclaim status */
+ int status;
+ /*
+ * Optional - if set, will be triggered by LMK once the request
+ * is handled.
+ */
+ void (*callback) (struct lowmem_control *);
+};
+
+int lowmem_worker_is_running();
+int lowmem_queue_new_request(unsigned int flags, enum oom_score score,
+ unsigned int size_mb, unsigned int count,
+ void (*callback) (struct lowmem_control *));
+void lowmem_set_oom_popup(bool popup);
+int lowmem_get_num_max_victims();
+
#ifdef __cplusplus
}
#endif /* __cplusplus */