AC_PREREQ([2.68])
-AC_INIT([nitra-corewatcher],[0.9.2],[timothy.c.pepper@linux.intel.com])
+AC_INIT([nitra-corewatcher],[0.9.3],[timothy.c.pepper@linux.intel.com])
AM_INIT_AUTOMAKE([foreign -Wall -Werror])
AC_CONFIG_FILES([Makefile src/Makefile])
AC_CONFIG_SRCDIR([src/corewatcher.c])
AM_PROG_CC_C_O
AC_PROG_INSTALL
-# Checks for libraries.
-AC_CHECK_LIB([pthread], [pthread_mutex_unlock], , AC_MSG_ERROR([libpthread is required but was not found]))
-
# PkgConfig tests
PKG_CHECK_MODULES([GLIB2], [glib-2.0])
PKG_CHECK_MODULES([LIBPROXY], [libproxy-1.0])
#include <string.h>
#include <assert.h>
#include <fcntl.h>
-#include <pthread.h>
#include <asm/unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
const char *core_folder = "/var/lib/corewatcher/";
const char *processed_folder = "/var/lib/corewatcher/processed/";
+/*
+ * the application must initialize the GMutex's
+ * core_status.processing_mtx, core_status.queued_mtx,
+ * processing_queue_mtx and gdb_mtx
+ * before calling into this file's scan_corefolders()
+ * (also since that calls submit_queue() there are dependencies
+ * there which need taken care of too)
+ */
/* Always pick up the processing_mtx and then the
processing_queue_mtx, reverse for setting down */
/* Always pick up the gdb_mtx and then the
/* so order for pick up should be:
processing_mtx -> gdb_mtx -> processing_queue_mtx
and the reverse for setting down */
-static pthread_mutex_t processing_queue_mtx = PTHREAD_MUTEX_INITIALIZER;
+GMutex processing_queue_mtx;
static char *processing_queue[MAX_PROCESSING_OOPS];
-static pthread_mutex_t gdb_mtx = PTHREAD_MUTEX_INITIALIZER;
-static int tail = 0;
-static int head = 0;
+static int pq_tail = 0;
+static int pq_head = 0;
+GMutex gdb_mtx;
static char *get_release(void)
{
/*
* Finds the full path for the application that crashed,
- * and depending on what opted_in was configured as will:
- * opted_in 2 (always submit) -> move file to processed_folder
- * to be processed further
- * opted_in 1 (ask user) -> ask user if we should submit
- * the crash and add to asked_oops hash so we don't get
- * called again for this corefile
- * opted_in 0 (don't submit) -> do nothing
- *
- * Picks up and sets down the asked_mtx.
+ * and moves file to processed_folder for processing
*/
static char *get_appfile(char *fullpath)
{
*/
static void remove_from_processing_queue(void)
{
- free(processing_queue[head]);
- processing_queue[head++] = NULL;
+ free(processing_queue[pq_head]);
+ processing_queue[pq_head++] = NULL;
- if (head == MAX_PROCESSING_OOPS)
- head = 0;
+ if (pq_head == MAX_PROCESSING_OOPS)
+ pq_head = 0;
}
/*
struct oops *oops = NULL;
char *procfn = NULL, *corefn = NULL, *fullpath = NULL;
- pthread_mutex_lock(&core_status.processing_mtx);
- pthread_mutex_lock(&gdb_mtx);
- pthread_mutex_lock(&processing_queue_mtx);
+ g_mutex_lock(&core_status.processing_mtx);
+ g_mutex_lock(&gdb_mtx);
+ g_mutex_lock(&processing_queue_mtx);
- if (!(fullpath = processing_queue[head])) {
+ if (!(fullpath = processing_queue[pq_head])) {
/* something went quite wrong */
- pthread_mutex_unlock(&processing_queue_mtx);
- pthread_mutex_unlock(&gdb_mtx);
- pthread_mutex_unlock(&core_status.processing_mtx);
+ g_mutex_unlock(&processing_queue_mtx);
+ g_mutex_unlock(&gdb_mtx);
+ g_mutex_unlock(&core_status.processing_mtx);
return NULL;
}
remove_from_processing_queue();
- pthread_mutex_unlock(&processing_queue_mtx);
- pthread_mutex_unlock(&gdb_mtx);
- pthread_mutex_unlock(&core_status.processing_mtx);
+ g_mutex_unlock(&processing_queue_mtx);
+ g_mutex_unlock(&gdb_mtx);
+ g_mutex_unlock(&core_status.processing_mtx);
write_core_detail_file(corefn, oops->text);
- pthread_mutex_lock(&core_status.queued_mtx);
+ g_mutex_lock(&core_status.queued_mtx);
queue_backtrace(oops);
- pthread_mutex_unlock(&core_status.queued_mtx);
+ g_mutex_unlock(&core_status.queued_mtx);
/* don't need to free procfn because was set to oops->filename and that gets free'd */
free(corefn);
procfn = NULL; /* don't know if oops->filename == procfn so be safe */
free(corefn);
FREE_OOPS(oops);
- pthread_mutex_unlock(&processing_queue_mtx);
- pthread_mutex_unlock(&gdb_mtx);
- pthread_mutex_unlock(&core_status.processing_mtx);
+ g_mutex_unlock(&processing_queue_mtx);
+ g_mutex_unlock(&gdb_mtx);
+ g_mutex_unlock(&core_status.processing_mtx);
return NULL;
}
struct oops *oops = NULL;
char *corefn = NULL, *fullpath = NULL;
- pthread_mutex_lock(&gdb_mtx);
- pthread_mutex_lock(&processing_queue_mtx);
+ g_mutex_lock(&gdb_mtx);
+ g_mutex_lock(&processing_queue_mtx);
- if (!(fullpath = processing_queue[head])) {
+ if (!(fullpath = processing_queue[pq_head])) {
/* something went quite wrong */
- pthread_mutex_unlock(&processing_queue_mtx);
- pthread_mutex_unlock(&gdb_mtx);
+ g_mutex_unlock(&processing_queue_mtx);
+ g_mutex_unlock(&gdb_mtx);
return NULL;
}
remove_from_processing_queue();
- pthread_mutex_unlock(&processing_queue_mtx);
- pthread_mutex_unlock(&gdb_mtx);
+ g_mutex_unlock(&processing_queue_mtx);
+ g_mutex_unlock(&gdb_mtx);
- pthread_mutex_lock(&core_status.queued_mtx);
+ g_mutex_lock(&core_status.queued_mtx);
queue_backtrace(oops);
- pthread_mutex_unlock(&core_status.queued_mtx);
+ g_mutex_unlock(&core_status.queued_mtx);
free(corefn);
FREE_OOPS(oops);
remove_from_processing_queue();
free(corefn);
FREE_OOPS(oops);
- pthread_mutex_unlock(&processing_queue_mtx);
- pthread_mutex_unlock(&gdb_mtx);
+ g_mutex_unlock(&processing_queue_mtx);
+ g_mutex_unlock(&gdb_mtx);
return NULL;
}
free(c1);
c1 = NULL;
- pthread_mutex_lock(&core_status.processing_mtx);
+ g_mutex_lock(&core_status.processing_mtx);
if (g_hash_table_lookup(core_status.processing_oops, c2)) {
- pthread_mutex_unlock(&core_status.processing_mtx);
+ g_mutex_unlock(&core_status.processing_mtx);
goto clean_add_to_processing;
}
- pthread_mutex_lock(&processing_queue_mtx);
- if (processing_queue[tail]) {
- pthread_mutex_unlock(&processing_queue_mtx);
- pthread_mutex_unlock(&core_status.processing_mtx);
+ g_mutex_lock(&processing_queue_mtx);
+ if (processing_queue[pq_tail]) {
+ g_mutex_unlock(&processing_queue_mtx);
+ g_mutex_unlock(&core_status.processing_mtx);
goto clean_add_to_processing;
}
g_hash_table_insert(core_status.processing_oops, c2, c2);
- processing_queue[tail++] = fp;
- if (tail == MAX_PROCESSING_OOPS)
- tail = 0;
+ processing_queue[pq_tail++] = fp;
+ if (pq_tail == MAX_PROCESSING_OOPS)
+ pq_tail = 0;
- pthread_mutex_unlock(&processing_queue_mtx);
- pthread_mutex_unlock(&core_status.processing_mtx);
+ g_mutex_unlock(&processing_queue_mtx);
+ g_mutex_unlock(&core_status.processing_mtx);
return 0;
clean_add_to_processing:
free(fp);
*/
static void process_corefile(char *fullpath)
{
- pthread_t thrd;
+ GThread *thrd = NULL;
int r = 1;
r = add_to_processing(fullpath);
if (r)
return;
- if (pthread_create(&thrd, NULL, process_new, NULL))
- fprintf(stderr, "Couldn't start up gdb extract core thread\n");
+ thrd = g_thread_new("process_new", process_new, NULL);
+ if (thrd == NULL)
+ fprintf(stderr, "Couldn't start thread for process_new()\n");
}
/*
*/
static void reprocess_corefile(char *fullpath)
{
- pthread_t thrd;
+ GThread *thrd = NULL;
int r = 0;
r = add_to_processing(fullpath);
if (r)
return;
- if (pthread_create(&thrd, NULL, process_old, NULL))
- fprintf(stderr, "Couldn't start up gdb extract core thread\n");
+ thrd = g_thread_new("process_old", process_old, NULL);
+ if (thrd == NULL)
+ fprintf(stderr, "Couldn't start thread for process_old()\n");
}
-int scan_corefolders(void __unused *unused)
+static void scan_core_folder(void __unused *unused)
{
+ /* scan for new crash data */
DIR *dir = NULL;
struct dirent *entry = NULL;
char *fullpath = NULL, *appfile = NULL;
int r = 0;
- pthread_mutex_init(&core_status.processing_mtx, NULL);
- pthread_mutex_init(&core_status.queued_mtx, NULL);
- pthread_mutex_init(&core_status.asked_mtx, NULL);
-
- /* scan for new crash data */
dir = opendir(core_folder);
if (!dir)
- return 1;
+ return;
fprintf(stderr, "+ scanning %s...\n", core_folder);
while(1) {
free(fullpath);
} else if (((unsigned int)r) != strlen(core_folder) + strlen(entry->d_name)) {
continue;
}
- /* already found, waiting for response from user */
- pthread_mutex_lock(&core_status.asked_mtx);
- if (g_hash_table_lookup(core_status.asked_oops, fullpath)) {
- pthread_mutex_unlock(&core_status.asked_mtx);
- continue;
- }
- pthread_mutex_unlock(&core_status.asked_mtx);
+
+ /* If one were to prompt the user before submitting, that
+ * might happen here. */
+
fprintf(stderr, "+ Looking at %s\n", fullpath);
appfile = get_appfile(fullpath);
}
}
closedir(dir);
+}
+static void scan_processed_folder(void __unused *unused)
+{
/* scan for partially processed crash data */
+ DIR *dir = NULL;
+ struct dirent *entry = NULL;
+ char *fullpath = NULL;
+ int r = 0;
+
dir = opendir(processed_folder);
if (!dir)
- return 1;
+ return;
fprintf(stderr, "+ scanning %s...\n", processed_folder);
while(1) {
free(fullpath);
reprocess_corefile(fullpath);
}
closedir(dir);
+}
+
+int scan_corefolders(void __unused *unused)
+{
+ scan_core_folder(NULL);
+ scan_processed_folder(NULL);
submit_queue();
#include <getopt.h>
#include <sys/prctl.h>
#include <asm/unistd.h>
-#include <pthread.h>
#include <curl/curl.h>
#include <dirent.h>
#include <sys/stat.h>
#include "corewatcher.h"
+/*
+ * rather than malloc() on each inotify event, preallocate a decent chunk
+ * of memory so multiple events can be read in one go, trading a little
+ * extra memory for less runtime overhead if/when multiple crashes happen
+ * in short order.
+ */
+#include <sys/inotify.h>
+#define BUF_LEN 2048
static struct option opts[] = {
{ "nodaemon", 0, NULL, 'n' },
fprintf(stderr, " -h, --help Display this help message\n");
}
+gboolean inotify_source_prepare(__unused GSource *source, gint *timeout_)
+{
+ *timeout_ = -1;
+ fprintf(stderr, "+ inotification prepare\n");
+ return FALSE;
+}
+
+gboolean inotify_source_check(__unused GSource *source)
+{
+ int fd, wd;
+ char buffer[BUF_LEN];
+ size_t len;
+
+ fprintf(stderr, "+ inotification check\n");
+ /* inotification of crashes */
+ fd = inotify_init();
+ if (fd < 0) {
+ fprintf(stderr, "corewatcher inotify init failed.. exiting\n");
+ return EXIT_FAILURE;
+ }
+ wd = inotify_add_watch(fd, core_folder, IN_CLOSE_WRITE);
+ if (wd < 0) {
+ fprintf(stderr, "corewatcher inotify add failed.. exiting\n");
+ return EXIT_FAILURE;
+ }
+ fprintf(stderr, "+ awaiting inotification...\n");
+ len = read(fd, buffer, BUF_LEN);
+ if (len <=0 ) {
+ fprintf(stderr, "corewatcher inotify read failed.. exiting\n");
+ return FALSE;
+ }
+ fprintf(stderr, "+ inotification received!\n");
+ /* for now simply ignore the actual crash files we've been notified of
+ * and let our callback be dispatched to go look for any/all crash
+ * files */
+
+ /* slight delay to minimize storms of notifications (the inotify
+ * read() can return a batch of notifications*/
+ sleep(5);
+ return TRUE;
+}
+
+gboolean inotify_source_dispatch(__unused GSource *source,
+ GSourceFunc callback, gpointer user_data)
+{
+ fprintf(stderr, "+ inotify dispatch\n");
+ if(callback(user_data)) {
+ fprintf(stderr, "+ inotify dispatch success\n");
+ return TRUE;
+ } else {
+ //should not happen as our callback always returns 1
+ fprintf(stderr, "+ inotify dispatch failed.\n");
+ return FALSE;
+ }
+}
+
+void *inotify_loop(void __unused *unused)
+{
+ /* inotification of crashes */
+ GMainLoop *loop;
+ GMainContext *context;
+ GSource *source;
+ GSourceFuncs InotifySourceFuncs = {
+ inotify_source_prepare,
+ inotify_source_check,
+ inotify_source_dispatch,
+ NULL,
+ NULL,
+ NULL,
+ };
+
+ context = g_main_context_new();
+ loop = g_main_loop_new(context, FALSE);
+ loop = g_main_loop_ref(loop);
+ source = g_source_new(&InotifySourceFuncs, sizeof(GSource));
+ g_source_attach(source, context);
+ g_source_set_callback(source, scan_corefolders, NULL, NULL);
+ g_main_loop_run(loop);
+ g_main_loop_unref(loop);
+
+ return NULL;
+}
+
int main(int argc, char**argv)
{
GMainLoop *loop;
int debug = 0;
int j = 0;
DIR *dir = NULL;
+ GThread *inotify_thread = NULL;
+
+ g_thread_init (NULL);
- core_status.asked_oops = g_hash_table_new_full(g_str_hash, g_str_equal, free, free);
core_status.processing_oops = g_hash_table_new_full(g_str_hash, g_str_equal, free, NULL);
core_status.queued_oops = g_hash_table_new(g_str_hash, g_str_equal);
- if (pthread_mutex_init(&core_status.asked_mtx, NULL))
- return EXIT_FAILURE;
- if (pthread_mutex_init(&core_status.processing_mtx, NULL))
- return EXIT_FAILURE;
- if (pthread_mutex_init(&core_status.queued_mtx, NULL))
- return EXIT_FAILURE;
/*
* Signal the kernel that we're not timing critical
sched_yield();
loop = g_main_loop_new(NULL, FALSE);
+ loop = g_main_loop_ref(loop);
if (!debug)
sleep(20);
- scan_corefolders(NULL);
-
if (testmode) {
- g_main_loop_unref(loop);
- for (j = 0; j < url_count; j++)
- free(submit_url[j]);
- g_hash_table_destroy(core_status.asked_oops);
- g_hash_table_destroy(core_status.processing_oops);
- g_hash_table_destroy(core_status.queued_oops);
- pthread_mutex_destroy(&core_status.asked_mtx);
- pthread_mutex_destroy(&core_status.processing_mtx);
- pthread_mutex_destroy(&core_status.queued_mtx);
+ scan_corefolders(NULL);
fprintf(stderr, "+ Exiting from testmode\n");
- return EXIT_SUCCESS;
+ goto out;
}
- /* now, start polling for oopses to occur */
+ inotify_thread = g_thread_new("corewatcher inotify", inotify_loop, NULL);
+ if (inotify_thread == NULL)
+ fprintf(stderr, "+ Unable to start inotify thread\n");
- g_timeout_add_seconds(10, scan_corefolders, NULL);
+ /*
+ * TODO: add a thread / event source tied to a connmand plugin
+ * o network up: trigger scan_corefolders(), enables event sources
+ * o network down: disable sources (or allow them to run and create
+ * a low quality crash reports?)
+ * o low bandwidth net up: allow transmitting of .txt crash
+ * summaries (ie: no running gdb)
+ * o high bandwidth net up: look at existing cores vs .txt's for
+ * quality and if debuginfo is retrievable, try to improve
+ * the report quality and submit again
+ */
- g_main_loop_run(loop);
+ /*
+ * long poll for crashes: at inotify time we might not have been
+ * able to fully process things, here we'd push those reports out
+ */
+ g_timeout_add_seconds(900, scan_corefolders, NULL);
+ g_main_loop_run(loop);
+out:
g_main_loop_unref(loop);
+
for (j = 0; j < url_count; j++)
free(submit_url[j]);
- g_hash_table_destroy(core_status.asked_oops);
g_hash_table_destroy(core_status.processing_oops);
g_hash_table_destroy(core_status.queued_oops);
- pthread_mutex_destroy(&core_status.asked_mtx);
- pthread_mutex_destroy(&core_status.processing_mtx);
- pthread_mutex_destroy(&core_status.queued_mtx);
return EXIT_SUCCESS;
}
processing_mtx, reverse for setting down */
/* Considering the static mutexes the total global order should be:
queued_mtx -> processing_mtx -> gdb_mtx ->processing_queue_mtx */
-/* The asked_mtx doesn't overlap with any of these */
struct core_status {
- GHashTable *asked_oops;
GHashTable *processing_oops;
GHashTable *queued_oops;
- pthread_mutex_t asked_mtx;
- pthread_mutex_t processing_mtx;
- pthread_mutex_t queued_mtx;
+ GMutex processing_mtx;
+ GMutex queued_mtx;
};
/* submit.c */
+extern GMutex queued_bt_mtx;
extern void queue_backtrace(struct oops *oops);
extern void submit_queue(void);
extern char *replace_name(char *filename, char *replace, char *new);
/* coredump.c */
+extern GMutex processing_queue_mtx;
+extern GMutex gdb_mtx;
extern int move_core(char *fullpath, char *ext);
extern int scan_corefolders(void * unused);
extern char *strip_directories(char *fullpath);
/* configfile.c */
extern void read_config_file(char *filename);
-extern int opted_in;
extern int allow_distro_to_pass_on;
extern char *submit_url[MAX_URLS];
extern int url_count;
#include <sys/stat.h>
#include <glib.h>
#include <asm/unistd.h>
-#include <pthread.h>
#include <proxy.h>
#include <curl/curl.h>
#include "corewatcher.h"
+/*
+ * the application must initialize the GMutex queued_bt_mtx
+ * before calling into this file's functions
+ */
/* Always pick up the queued_mtx and then the
queued_bt_mtx, reverse for setting down */
-static pthread_mutex_t queued_bt_mtx = PTHREAD_MUTEX_INITIALIZER;
+GMutex queued_bt_mtx;
static struct oops *queued_backtraces = NULL;
static char result_url[4096];
new = malloc(sizeof(struct oops));
if (!new)
return;
- pthread_mutex_lock(&queued_bt_mtx);
+ g_mutex_lock(&queued_bt_mtx);
new->next = queued_backtraces;
if (oops->application)
new->application = strdup(oops->application);
else
new->detail_filename = NULL;
queued_backtraces = new;
- pthread_mutex_unlock(&queued_bt_mtx);
+ g_mutex_unlock(&queued_bt_mtx);
g_hash_table_insert(core_status.queued_oops, new->filename, new->filename);
}
struct oops *oops = NULL, *next = NULL, *queue = NULL;
int count = 0;
- pthread_mutex_lock(&queued_bt_mtx);
+ g_mutex_lock(&queued_bt_mtx);
queue = queued_backtraces;
queued_backtraces = NULL;
barrier();
oops = next;
count++;
}
- pthread_mutex_unlock(&queued_bt_mtx);
- pthread_mutex_lock(&core_status.processing_mtx);
+ g_mutex_unlock(&queued_bt_mtx);
+ g_mutex_lock(&core_status.processing_mtx);
g_hash_table_remove_all(core_status.processing_oops);
- pthread_mutex_unlock(&core_status.processing_mtx);
+ g_mutex_unlock(&core_status.processing_mtx);
g_hash_table_remove_all(core_status.queued_oops);
}
char *nf = NULL;
nf = replace_name(oops->filename, ".processed", ".submitted");
rename(oops->filename, nf);
- pthread_mutex_lock(&core_status.processing_mtx);
+ g_mutex_lock(&core_status.processing_mtx);
remove_pid_from_hash(oops->filename, core_status.processing_oops);
- pthread_mutex_unlock(&core_status.processing_mtx);
+ g_mutex_unlock(&core_status.processing_mtx);
free(nf);
g_hash_table_remove(core_status.queued_oops, oops->filename);
char **proxies = NULL;
char *proxy = NULL;
- pthread_mutex_lock(&core_status.queued_mtx);
+ g_mutex_lock(&core_status.queued_mtx);
if (!g_hash_table_size(core_status.queued_oops)) {
- pthread_mutex_unlock(&core_status.queued_mtx);
+ g_mutex_unlock(&core_status.queued_mtx);
return;
}
if (testmode) {
print_queue();
- pthread_mutex_unlock(&core_status.queued_mtx);
+ g_mutex_unlock(&core_status.queued_mtx);
return;
}
- pthread_mutex_lock(&queued_bt_mtx);
+ g_mutex_lock(&queued_bt_mtx);
queue = queued_backtraces;
queued_backtraces = NULL;
barrier();
- pthread_mutex_unlock(&queued_bt_mtx);
+ g_mutex_unlock(&queued_bt_mtx);
pf = px_proxy_factory_new();
handle = curl_easy_init();
oops = next;
}
} else {
- pthread_mutex_lock(&queued_bt_mtx);
+ g_mutex_lock(&queued_bt_mtx);
queued_backtraces = queue;
- pthread_mutex_unlock(&queued_bt_mtx);
+ g_mutex_unlock(&queued_bt_mtx);
}
curl_easy_cleanup(handle);
curl_global_cleanup();
- pthread_mutex_unlock(&core_status.queued_mtx);
+ g_mutex_unlock(&core_status.queued_mtx);
}