Currently only adds fastlz. Miniz to be added later.
Change-Id: I8703d45a2a5253a5823968fb81b5a9074a747a1c
Signed-off-by: Michal Bloch <m.bloch@samsung.com>
external/fastlz/fastlz.c \
src/logger/logger.c \
src/logger/logger_privileges.c \
+ src/logger/compression_fastlz.c \
src/logger/dlogutil_line.c \
src/logger/fd_entity.c \
src/logger/log_buffer.c \
src_tests_test_logger_log_storage_CFLAGS = $(check_CFLAGS)
src_tests_test_logger_log_storage_LDFLAGS = $(AM_LDFLAGS) -Wl,--wrap=malloc
-src_tests_test_logger_compressed_storage_SOURCES = src/tests/test_logger_compressed_storage.c src/logger/log_compressed_storage.c src/shared/ptrs_list.c src/shared/queued_entry_timestamp.c external/fastlz/fastlz.c
+src_tests_test_logger_compressed_storage_SOURCES = src/tests/test_logger_compressed_storage.c \
+ src/logger/compression_fastlz.c \
+ src/logger/log_compressed_storage.c \
+ src/shared/ptrs_list.c \
+ src/shared/queued_entry_timestamp.c \
+ external/fastlz/fastlz.c
src_tests_test_logger_compressed_storage_CFLAGS = $(check_CFLAGS)
src_tests_test_logger_compressed_storage_LDFLAGS = $(AM_LDFLAGS) -Wl,--wrap=malloc
PARSE_VERSION_REQUESTED, /**< Version requested */
PARSE_HELP_REQUESTED, /**< Help requested */
PARSE_NO_PARSE, /**< getopt failure */
+ PARSE_MISSING_COMPRESSED_BUFFER, /**< Missing destination name */
} status; /**< Parse status */
union {
struct {
unsigned dump_size; /**< If dump mode is enabled, number of logs to be dumped, or #DLOGUTIL_MAX_DUMP_SIZE if infinite */
dlogutil_sorting_order_e sort_by; /**< The timestamp type to be sorted on */
char *compression; /**< The compression storage name */
+ char *mem_algo; /**< Name of the compression algo to be used */
};
const char *which_option; /**< An statically allocated string meaning an option, including the initial dash(es) */
const char *bad_contents; /**< An string from input argument which has been not a valid argument */
* but too large defeats the point */
#define COMPRESSION_CHUNK_SIZE 131072
-/* The minimum required buffer size for the above, which is
- * what the result of the FASTLZ_NEEDED_OUTPUT_SIZE macro
- * would return if it didn't ruin constant contexts such as
- * struct member array sizes. */
-#define COMPRESSION_CHUNK_SIZE_OUT 137626
+#include <stdbool.h>
+#include <stddef.h>
+
+struct compression_algo {
+ int (*comp)(const char *in, size_t size_in, char *compressed, size_t compressed_capacity);
+ int (*decomp)(const char *compressed, size_t size_compressed, char *decompressed, size_t size_decompressed);
+ bool (*is_legal)(char *data, size_t size);
+ size_t (*get_workspace_size)(size_t input_size);
+};
+
+extern struct compression_algo fastlz;
--- /dev/null
+#include "log_compressed_storage.h"
+#include "fastlz.h"
+
+static int fastlz_compression(const char *in, size_t size_in, char *compressed, size_t compressed_capacity)
+{
+ (void) compressed_capacity;
+ return fastlz_compress(in, size_in, compressed);
+}
+
+static int fastlz_decompression(const char *compressed, size_t size_compressed, char *decompressed, size_t size_decompressed)
+{
+ return fastlz_decompress(compressed, size_compressed, decompressed, size_decompressed);
+}
+
+static bool fastlz_is_legal_to_compress(char *data, size_t size)
+{
+ return size >= 16;
+}
+
+static size_t fastlz_get_workspace_size(size_t input_size)
+{
+ return FASTLZ_NEEDED_OUTPUT_SIZE(input_size);
+}
+
+struct compression_algo fastlz = {
+ .comp = fastlz_compression,
+ .decomp = fastlz_decompression,
+ .is_legal = fastlz_is_legal_to_compress,
+ .get_workspace_size = fastlz_get_workspace_size,
+};
+
params->buf_id = LOG_ID_INVALID;
params->file_path = NULL;
params->compression = NULL;
+ params->mem_algo = NULL;
params->filter = log_filter_new();
if (!params->filter)
log_filter_free(params->filter);
free(params->compression);
free(params->file_path);
+ free(params->mem_algo);
}
static int make_argc_argv_from_dlogutil_line(const char *cmdl, size_t buf_size, char buffer[buf_size], int *argc, char **argv)
}
params->compression = pr.compression ? strdup(pr.compression) : NULL;
+ params->mem_algo = pr.mem_algo ? strdup(pr.mem_algo) : NULL;
params->file.rotate_size_kbytes = pr.rotate_size_kbytes;
params->file.max_rotated = pr.max_rotated;
params->file.format.format = pr.format;
log_id_t buf_id; /**< The buffer being dumped, or 0 if none selected */
char *file_path; /**< The file to be written to, or NULL if none selected */
struct log_filter *filter; /**< The filter to be applied to the logs */
+ char *mem_algo; /**< Name of the compression algo to be used */
};
bool initialize_dlogutil_line_params(struct dlogutil_line_params *params, struct buf_params buf);
*/
#include "log_compressed_storage.h"
#include "fastlz.h"
-#include "compression_common.h"
#include <ptrs_list.h>
#include <stdlib.h>
uint64_t counter_end; // "pointer" to the end of the stored entry stream
log_compressed_storage_entry *entries; // the entries are stored here
list_head waiting_readers; // these are the readers that did not read anything from the current entries
- char compression_workspace[COMPRESSION_CHUNK_SIZE_OUT];
+ char *compression_workspace;
+ size_t compression_workspace_size;
+ struct compression_algo *algo; // compression algorithm implementation
};
struct log_compressed_storage_reader {
reader->current = lse;
}
-log_compressed_storage *log_compressed_storage_create(unsigned capacity, const char *name)
+struct compression_algo *get_algo_from_storage(const struct log_compressed_storage *storage)
{
+ return storage->algo;
+}
+
+static struct compression_algo *get_algo_by_name(const char *algo_name)
+{
+ // default if unspecified
+ if (!algo_name)
+ return &fastlz;
+
+ if (!strcmp(algo_name, "fastlz"))
+ return &fastlz;
+ return NULL;
+}
+
+log_compressed_storage *log_compressed_storage_create(unsigned capacity, const char *name, const char *algo_name)
+{
+ struct compression_algo *algo = get_algo_by_name(algo_name);
+ if (!algo)
+ return NULL;
+
log_compressed_storage *storage = malloc(sizeof(*storage));
if (!storage)
return NULL;
return NULL;
}
+ storage->compression_workspace_size = algo->get_workspace_size(COMPRESSION_CHUNK_SIZE);
+ storage->compression_workspace = malloc(storage->compression_workspace_size);
+ if (!storage->compression_workspace) {
+ free(storage->name);
+ free(storage);
+ return NULL;
+ }
+
+ storage->algo = algo;
storage->capacity = capacity;
storage->counter_begin = 0;
storage->counter_end = 0;
log_compressed_storage_clear(storage);
// disconnect all the remaining readers
list_foreach(storage->waiting_readers, NULL, log_compressed_storage_reader_set_storage);
+ free(storage->compression_workspace);
free(storage->name);
free(storage);
}
assert(storage);
assert(buf_in);
- assert(size_in >= 16); // fastlz requirement
- assert(size_in <= COMPRESSION_CHUNK_SIZE); // compression workspace capacity
+ assert(storage->algo->is_legal(buf_in, size_in));
+ assert(size_in <= COMPRESSION_CHUNK_SIZE);
- size_t size_out = (size_t) fastlz_compress(buf_in, size_in, storage->compression_workspace);
+ size_t size_out = storage->algo->comp(buf_in, size_in, storage->compression_workspace, storage->compression_workspace_size);
if (size_out == 0)
return false;
(NULL == reader->current && NULL != reader->storage->entries);
}
-const struct compression_entry *log_compressed_storage_reader_get_new_entry(log_compressed_storage_reader *reader)
+size_t log_compressed_storage_reader_get_new_entry(log_compressed_storage_reader *reader, char *uncompressed)
{
assert(reader);
// storage is NULL if a dumping/one-shot reader has finished reading
if (NULL == reader->storage)
- return NULL;
+ return 0;
+ const struct compression_algo *const algo = reader->storage->algo; // save in case a dumping reader gets disconnected (nulling reader->storage)
if (NULL == reader->current) {
if (NULL != reader->storage->entries) {
// there are some logs in the buffer, let's get them
list_add(&reader->current->readers, reader);
}
- if (NULL != ce)
- reader->counter += ce->size_out ?: ce->size_in;
+ if (NULL == ce)
+ return 0;
+
+ reader->counter += ce->size_out ?: ce->size_in;
+
+ if (!ce->size_out){
+ // Data weren't compressed
+ memcpy(uncompressed, ce->compressed_data, ce->size_in);
+ } else {
+ size_t decompressed_size = (size_t) algo->decomp(ce->compressed_data, ce->size_out, uncompressed, ce->size_in);
+ assert(decompressed_size == ce->size_in);
+ }
- return ce;
+ return ce->size_in;
}
uint64_t log_compressed_storage_reader_get_ready_bytes(const log_compressed_storage_reader *reader)
* limitations under the License.
*/
+#include "compression_common.h"
+
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
* @brief Creates a storage instance.
* @param[in] capacity Maximum capacity of the created instance.
* @param[in] name Name of the instance for identification.
+ * @param[in] algo_name Name of the compression algo to be used.
* @return The created storage instance or NULL in case of lack of memory.
*/
-log_compressed_storage *log_compressed_storage_create(unsigned capacity, const char *name);
+log_compressed_storage *log_compressed_storage_create(unsigned capacity, const char *name, const char *algo_name);
/**
* @brief Frees the storage and all the resources associated with it.
* @brief This function is responsible for retrieving next unread log entry from the storage associated
* with a reader.
* @param[in] reader The reader which will be used for reading.
- * @return A new, freshly retrieved, shiny log entry.
- * @note Enjoy the entry, but do not free it. It is not yours. Do not store the pointer (for a long time),
- * because this log entry may be deleted by the storage without warning. Precisely, when you put
- * another log entry into the storage.
+ * @param[in] uncompressed The buffer which will hold the uncompressed entry.
+ * @return Size of the uncompressed entry.
*/
-const struct compression_entry *log_compressed_storage_reader_get_new_entry(log_compressed_storage_reader *reader);
+size_t log_compressed_storage_reader_get_new_entry(log_compressed_storage_reader *reader, char *uncompressed);
/**
* @brief Returns the storage that is the owner of a reader.
*/
const char *log_compressed_storage_get_name(const log_compressed_storage *storage);
+/**
+ * @brief Returns a struct, containing set of functions for a specific compression algorithm.
+ * @param[in] storage From which set of the functions will be taken.
+ * @return Struct containing set of functions for a specific compression algorithm.
+ */
+struct compression_algo *get_algo_from_storage(const struct log_compressed_storage *storage);
+
#ifdef __cplusplus
}
#endif
static int create_memory_subreader_for_common(struct dlogutil_line_params *params, struct reader_common *reader, struct logger *server)
{
- struct log_compressed_storage *storage = log_compressed_storage_create(params->file.rotate_size_kbytes * 1024, params->compression);
+ struct log_compressed_storage *storage = log_compressed_storage_create(params->file.rotate_size_kbytes * 1024, params->compression, params->mem_algo);
if (!storage)
return -ENOMEM;
return 0;
}
-void reader_memory_decompress_chunk(struct reader_memory *reader, const struct compression_entry *ce)
-{
- if (reader->uncompressed_chunk_index < reader->uncompressed_chunk_size)
- return;
-
- /* Could also allocate the chunk here instead of a "static" large one,
- * which saves some memory overhead in the optimistic case, but
- * adds a lot of juggling around instead. */
-
- reader->uncompressed_chunk_index = 0;
- reader->uncompressed_chunk_size = ce->size_in;
-
- if (!ce->size_out){
- // Data wasn't compressed
- memcpy(reader->uncompressed_chunk, ce->compressed_data, ce->size_in);
- } else {
- int decompressed_size = fastlz_decompress(ce->compressed_data, ce->size_out, reader->uncompressed_chunk, ce->size_in);
- assert((size_t) decompressed_size == ce->size_in);
- }
-}
-
/**
* @brief Print out logs
* @details Make sure the reader is up to date on printed logs
return r > 0;
while (log_compressed_storage_reader_is_new_entry_available(reader->log_compressed_storage_reader_ptr)) {
- const struct compression_entry *ce = log_compressed_storage_reader_get_new_entry(reader->log_compressed_storage_reader_ptr);
- assert(ce); // `is_new_entry_available` guarantees this
+ const size_t size_uncompressed = log_compressed_storage_reader_get_new_entry(reader->log_compressed_storage_reader_ptr, reader->uncompressed_chunk);
+ if (size_uncompressed == 0)
+ return 1;
- reader_memory_decompress_chunk(reader, ce);
+ reader->uncompressed_chunk_index = 0;
+ reader->uncompressed_chunk_size = size_uncompressed;
r = spend_queued_chunk(reader);
if (r != 0)
struct subreader_memory *const srm = (struct subreader_memory *) userdata;
assert(srm);
- /* Compression minimum buffer requirement.
- * Keep this even if excessive flushing is
- * fixed and the check below is removed. */
- if (srm->to_be_compressed_size < 16)
+ if (srm->to_be_compressed_size < NO_FLUSH_SIZE)
return 0;
- if (srm->to_be_compressed_size < NO_FLUSH_SIZE)
+ if (!get_algo_from_storage(srm->storage)->is_legal(srm->to_be_compressed, srm->to_be_compressed_size))
return 0;
/* Disregard failures (other than a mention), since
case PARSE_NO_PARSE:
show_help(argv[0], true);
return EXIT_FAILURE;
+ case PARSE_MISSING_COMPRESSED_BUFFER:
+ ERR("Error: %s requires the memory buffer name\n", pr.which_option);
+ return EXIT_FAILURE;
}
if (pr.compression != NULL && pr.enabled_buffers != 0) {
size_t max_rotated = DEFAULT_ROTATE_NUM_FILES;
const char *file_path = NULL;
char *compression = NULL;
+ char *mem_algo = NULL;
size_t write_buffer_size = 0;
int enabled_buffers = 0;
action_e action = ACTION_PRINT;
{"color" , required_argument, NULL, 3},
{"sort-by" , required_argument, NULL, 4},
{"monitor" , no_argument, NULL, 5},
+ {"mem_algo", required_argument, NULL, 6},
{"help" , no_argument, NULL, 'h'},
{0}
};
case 5: /* memory compression */
mode = DLOGUTIL_MODE_MONITOR;
break;
+ case 6:
+ mem_algo = optarg;
+ break;
case 'd':
mode = DLOGUTIL_MODE_DUMP;
break;
if (!buffer_made && mode == DLOGUTIL_MODE_DUMP)
write_buffer_size = DEFAULT_WRITE_BUFFER_SIZE;
+ if (mem_algo && mode != DLOGUTIL_MODE_COMPRESSED_MEMORY_DUMP)
+ return (struct parse_result) { .status = PARSE_MISSING_COMPRESSED_BUFFER, };
+
struct parse_result ret = (struct parse_result) {
.status = PARSE_OK,
.colors_auto = colors_auto,
.dump_size = dump_size,
.sort_by = sort_by,
.compression = compression,
+ .mem_algo = mem_algo,
};
filterspecs = NULL;
pid_filters = NULL;
*/
#include "../src/logger/log_compressed_storage.h"
#include "fastlz.h"
+#include "logcommon.h"
#include <stdio.h>
#include <stdlib.h>
const size_t out_weak_size = sizeof weak_compressible_input;
size_t out_good_size;
+char output_buffer[16384];
+const char *g_algo;
+
// we usually don't care about the name
-#define MAKE_STORAGE(x) log_compressed_storage_create(x, "foo")
+#define MAKE_STORAGE(x) log_compressed_storage_create(x, "foo", g_algo)
void test_tiny_storage()
{
assert(r1);
// check there is no log entry stored
assert(!log_compressed_storage_reader_is_new_entry_available(r1));
- assert(NULL == log_compressed_storage_reader_get_new_entry(r1));
+ assert(0 == log_compressed_storage_reader_get_new_entry(r1, NULL));
// remove the reader
log_compressed_storage_release_reader(r1);
assert(callback_count == 2*(i+1));
}
- assert(log_compressed_storage_reader_get_new_entry(r1) == NULL);
- assert(log_compressed_storage_reader_get_new_entry(r2) == NULL);
+ assert(log_compressed_storage_reader_get_new_entry(r1, NULL) == 0);
+ assert(log_compressed_storage_reader_get_new_entry(r2, NULL) == 0);
log_compressed_storage_clear(s1);
- assert(log_compressed_storage_reader_get_new_entry(r1) == NULL);
- assert(log_compressed_storage_reader_get_new_entry(r2) == NULL);
+ assert(log_compressed_storage_reader_get_new_entry(r1, NULL) == 0);
+ assert(log_compressed_storage_reader_get_new_entry(r2, NULL) == 0);
log_compressed_storage_release_reader(r1);
log_compressed_storage_release_reader(r2);
// read by both readers, alternately
for (int i = 0; i < CNT; i++) {
- const struct compression_entry *ce = log_compressed_storage_reader_get_new_entry(r1);
- assert(ce->size_in == i + 16);
- ce = log_compressed_storage_reader_get_new_entry(r2);
- assert(ce->size_in == i + 16);
+ size_t size_in = log_compressed_storage_reader_get_new_entry(r1, output_buffer);
+ assert(size_in == i + 16);
+ size_in = log_compressed_storage_reader_get_new_entry(r2, output_buffer);
+ assert(size_in == i + 16);
}
- assert(log_compressed_storage_reader_get_new_entry(r1) == NULL);
- assert(log_compressed_storage_reader_get_new_entry(r2) == NULL);
+ assert(log_compressed_storage_reader_get_new_entry(r1, NULL) == 0);
+ assert(log_compressed_storage_reader_get_new_entry(r2, NULL) == 0);
log_compressed_storage_release_reader(r1);
log_compressed_storage_release_reader(r2);
// read logs by both readers, sequentially
for (int i = 0; i < CNT; i++) {
- const struct compression_entry *ce = log_compressed_storage_reader_get_new_entry(r1);
- assert(ce->size_in == i + 16);
+ size_t size_in = log_compressed_storage_reader_get_new_entry(r1, output_buffer);
+ assert(size_in == i + 16);
}
- assert(log_compressed_storage_reader_get_new_entry(r1) == NULL);
+ assert(log_compressed_storage_reader_get_new_entry(r1, NULL) == 0);
for (int i = 0; i < CNT; i++) {
- const struct compression_entry *ce = log_compressed_storage_reader_get_new_entry(r2);
- assert(ce->size_in == i + 16);
+ size_t size_in = log_compressed_storage_reader_get_new_entry(r2, output_buffer);
+ assert(size_in == i + 16);
}
- assert(log_compressed_storage_reader_get_new_entry(r2) == NULL);
+ assert(log_compressed_storage_reader_get_new_entry(r2, NULL) == 0);
assert(callback_count == 0);
log_compressed_storage_release_reader(r1);
// get reader r2 halfway through
for (int i = 0; i < CNT; i++) {
- const struct compression_entry *ce = log_compressed_storage_reader_get_new_entry(r2);
- assert(ce->size_in == i + 16);
+ size_t size_in = log_compressed_storage_reader_get_new_entry(r2, output_buffer);
+ assert(size_in == i + 16);
}
// get reader r3 to the end
for (int i = 0; i < 2*CNT; i++) {
- const struct compression_entry *ce = log_compressed_storage_reader_get_new_entry(r3);
- assert(ce->size_in == i + 16);
+ size_t size_in = log_compressed_storage_reader_get_new_entry(r3, output_buffer);
+ assert(size_in == i + 16);
}
// clear the storage
assert(callback_count == 3*CNT);
- assert(log_compressed_storage_reader_get_new_entry(r1) == NULL);
- assert(log_compressed_storage_reader_get_new_entry(r2) == NULL);
- assert(log_compressed_storage_reader_get_new_entry(r3) == NULL);
+ assert(log_compressed_storage_reader_get_new_entry(r1, NULL) == 0);
+ assert(log_compressed_storage_reader_get_new_entry(r2, NULL) == 0);
+ assert(log_compressed_storage_reader_get_new_entry(r3, NULL) == 0);
// add logs again
for (int i = 0; i < CNT; i++)
// read by all the readers, alternately
for (int i = 0; i < CNT; i++) {
- const struct compression_entry *ce;
- ce = log_compressed_storage_reader_get_new_entry(r1);
- assert(ce->size_in == i + 16);
- ce = log_compressed_storage_reader_get_new_entry(r2);
- assert(ce->size_in == i + 16);
- ce = log_compressed_storage_reader_get_new_entry(r3);
- assert(ce->size_in == i + 16);
+ size_t size_in;
+ size_in = log_compressed_storage_reader_get_new_entry(r1, output_buffer);
+ assert(size_in == i + 16);
+ size_in = log_compressed_storage_reader_get_new_entry(r2, output_buffer);
+ assert(size_in == i + 16);
+ size_in = log_compressed_storage_reader_get_new_entry(r3, output_buffer);
+ assert(size_in == i + 16);
}
- assert(log_compressed_storage_reader_get_new_entry(r1) == NULL);
- assert(log_compressed_storage_reader_get_new_entry(r2) == NULL);
- assert(log_compressed_storage_reader_get_new_entry(r3) == NULL);
+ assert(log_compressed_storage_reader_get_new_entry(r1, NULL) == 0);
+ assert(log_compressed_storage_reader_get_new_entry(r2, NULL) == 0);
+ assert(log_compressed_storage_reader_get_new_entry(r3, NULL) == 0);
log_compressed_storage_release_reader(r1);
log_compressed_storage_release_reader(r2);
assert(r1);
assert(!log_compressed_storage_reader_is_new_entry_available(r1));
- assert(log_compressed_storage_reader_get_new_entry(r1) == NULL);
+ assert(log_compressed_storage_reader_get_new_entry(r1, NULL) == 0);
log_compressed_storage_release_reader(r1);
log_compressed_storage_add_new_entry(s1, good_compressible_input, 789);
for (int i = 0; i < CNT; i++) {
- const struct compression_entry *ce;
- ce = log_compressed_storage_reader_get_new_entry(r1);
- assert(ce->size_in == 123);
- ce = log_compressed_storage_reader_get_new_entry(r2);
- assert(ce->size_in == 123);
- ce = log_compressed_storage_reader_get_new_entry(r3);
- assert(ce->size_in == 123);
+ size_t size_in;
+ size_in = log_compressed_storage_reader_get_new_entry(r1, output_buffer);
+ assert(size_in == 123);
+ size_in = log_compressed_storage_reader_get_new_entry(r2, output_buffer);
+ assert(size_in == 123);
+ size_in = log_compressed_storage_reader_get_new_entry(r3, output_buffer);
+ assert(size_in == 123);
}
assert(!log_compressed_storage_reader_is_new_entry_available(r1));
assert(log_compressed_storage_reader_is_new_entry_available(r2));
assert(log_compressed_storage_reader_is_new_entry_available(r3));
- assert(log_compressed_storage_reader_get_new_entry(r1) == NULL);
+ assert(log_compressed_storage_reader_get_new_entry(r1, NULL) == 0);
log_compressed_storage_release_reader(r1);
// r2 reads what he can
for (int i = 0; i < CNT; i++) {
- const struct compression_entry *ce = log_compressed_storage_reader_get_new_entry(r2);
- assert(ce->size_in == 456);
+ size_t size_in = log_compressed_storage_reader_get_new_entry(r2, output_buffer);
+ assert(size_in == 456);
}
assert(!log_compressed_storage_reader_is_new_entry_available(r2));
- assert(log_compressed_storage_reader_get_new_entry(r2) == NULL);
+ assert(log_compressed_storage_reader_get_new_entry(r2, NULL) == 0);
log_compressed_storage_release_reader(r2);
/* Add a bunch of entries to force-flush for r3. Two of those thiccbois should do the job.
// in the monitor mode, old logs are not taken into consideration
assert(!log_compressed_storage_reader_is_new_entry_available(r1));
- assert(NULL == log_compressed_storage_reader_get_new_entry(r1));
+ assert(0 == log_compressed_storage_reader_get_new_entry(r1, NULL));
// but new ones are
log_compressed_storage_add_new_entry(s1, good_compressible_input, sizeof good_compressible_input);
assert(log_compressed_storage_reader_is_new_entry_available(r1));
- assert(NULL != log_compressed_storage_reader_get_new_entry(r1));
+ assert(0 != log_compressed_storage_reader_get_new_entry(r1, output_buffer));
log_compressed_storage_release_reader(r1);
log_compressed_storage_free(s1);
void test_naming()
{
- log_compressed_storage *s1 = log_compressed_storage_create(123, "przemek");
+ log_compressed_storage *s1 = log_compressed_storage_create(123, "przemek", g_algo);
assert(s1);
assert(!strcmp(log_compressed_storage_get_name(s1), "przemek"));
log_compressed_storage_free(s1);
// Make sure it makes a copy and not just a shallow pointer
char emprah[7] = "sugmar";
- s1 = log_compressed_storage_create(123, emprah);
+ s1 = log_compressed_storage_create(123, emprah, g_algo);
emprah[1] = 'i';
assert(!strcmp(log_compressed_storage_get_name(s1), "sugmar"));
log_compressed_storage_free(s1);
}
+static size_t get_good_size_fastlz() {
+ char buffer [FASTLZ_NEEDED_OUTPUT_SIZE(sizeof good_compressible_input)];
+ return fastlz_compress(good_compressible_input, sizeof good_compressible_input, buffer);
+}
+
int main(void)
{
srand(0);
for (size_t i = 0; i < sizeof weak_compressible_input; ++i)
weak_compressible_input[i] = rand() % 255;
- char out_good[FASTLZ_NEEDED_OUTPUT_SIZE(sizeof good_compressible_input)];
- out_good_size = (size_t) fastlz_compress(good_compressible_input, sizeof good_compressible_input, out_good);
-
- test_tiny_storage();
- test_basic_operation();
- test_clear();
- test_dumping_oneshot();
- test_circularity();
- test_size_bookkeeping();
- test_syscall_failures();
- test_get_usage();
- test_monitor();
- test_naming();
+ const struct {
+ const char *algo;
+ size_t size;
+ } algos[] =
+ { { .algo = "fastlz", .size = get_good_size_fastlz(), }
+ };
+
+ for (size_t i = 0; i < NELEMS(algos); ++i) {
+ g_algo = algos[i].algo;
+ out_good_size = algos[i].size;
+ test_tiny_storage();
+ test_basic_operation();
+ test_clear();
+ test_dumping_oneshot();
+ test_circularity();
+ test_size_bookkeeping();
+ test_syscall_failures();
+ test_get_usage();
+ test_monitor();
+ test_naming();
+ }
return 0;
}