tree-affine.o \
asan.o \
tsan.o \
+ esan.o \
ubsan.o \
sanopt.o \
sancov.o \
$(srcdir)/asan.c \
$(srcdir)/ubsan.c \
$(srcdir)/tsan.c \
+ $(srcdir)/esan.c \
$(srcdir)/sanopt.c \
$(srcdir)/sancov.c \
$(srcdir)/ipa-devirt.c \
DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
true, true, true, ATTRS, true, \
(flag_sanitize & (SANITIZE_ADDRESS | SANITIZE_THREAD \
- | SANITIZE_UNDEFINED | SANITIZE_NONDEFAULT) \
+ | SANITIZE_UNDEFINED | SANITIZE_NONDEFAULT \
+ | SANITIZE_EFFICIENCY_WORKING_SET \
+ | SANITIZE_EFFICIENCY_CACHE_FRAG) \
|| flag_sanitize_coverage))
#undef DEF_CILKPLUS_BUILTIN
--- /dev/null
+/* EfficiencySanitizer.
+ Copyright (C) 2011-2018 Free Software Foundation, Inc.
+ Contributed by Denis Khalikov.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "rtl.h"
+#include "tree.h"
+#include "memmodel.h"
+#include "gimple.h"
+#include "tree-pass.h"
+#include "cgraph.h"
+#include "fold-const.h"
+#include "gimplify.h"
+#include "gimple-iterator.h"
+#include "gimplify-me.h"
+#include "tree-cfg.h"
+#include "tree-iterator.h"
+#include "esan.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "builtins.h"
+#include "asan.h"
+#include "output.h"
+#include "stor-layout.h"
+#include "ssa.h"
+#include "gimple-fold.h"
+
+/* The size taken from llvm part. */
+static const unsigned max_struct_field_counter_name_size = 512;
+static char field_counter_name[max_struct_field_counter_name_size];
+static char counter_prefix[] = "struct.";
+
+/* Struct represents internal information about processed
+ record types. */
+struct esan_type
+{
+ esan_type ()
+ : type (NULL_TREE), array_type (NULL_TREE), field_counter_name (NULL),
+ fields_count (0)
+ {
+ }
+
+ esan_type (tree t, tree a)
+ : type (t), array_type (a), field_counter_name (NULL), fields_count (0)
+ {
+ }
+
+ esan_type (const int value)
+ {
+ if (!value)
+ type = array_type = NULL_TREE;
+ }
+
+ tree type;
+ tree array_type;
+ char *field_counter_name;
+ size_t fields_count;
+};
+
+/* Represents the type, field and index related to them.
+ Why do we need this type at all ?
+
+ Assume we have following classes A, B, C as A <- B <- C,
+ and field access load %reg, &C->A_field, so we should
+ go recurively and find actual type A and field index
+ inside the A class.
+ This struct hepls to solve that problem.
+*/
+struct esan_field_index
+{
+ esan_field_index () : type (NULL_TREE), field (NULL_TREE), index (0) {}
+
+ esan_field_index (tree base_type, tree base_field, size_t base_index)
+ : type (base_type), field (base_field), index (base_index)
+ {
+ }
+
+ esan_field_index (const esan_field_index &other)
+ {
+ type = other.type;
+ field = other.field;
+ index = other.index;
+ }
+
+ esan_field_index &
+ operator= (const esan_field_index &other)
+ {
+ if (this == &other)
+ return *this;
+
+ type = other.type;
+ field = other.field;
+ index = other.index;
+ return *this;
+ }
+
+ tree type;
+ tree field;
+ size_t index;
+};
+
+/* FIXME: use hash_set<K, K_Traits> instead vec<T>
+ to improve search from O(n) to O(1). */
+static vec<esan_type> vec_esan_type;
+
+static tree
+get_memory_access_decl (bool is_store, unsigned size)
+{
+ /* Choose an appropriate builtin. */
+ enum built_in_function fcode;
+ if (size <= 1)
+ fcode
+ = is_store ? BUILT_IN_ESAN_ALIGNED_STORE1 : BUILT_IN_ESAN_ALIGNED_LOAD1;
+ else if (size <= 3)
+ fcode
+ = is_store ? BUILT_IN_ESAN_ALIGNED_STORE2 : BUILT_IN_ESAN_ALIGNED_LOAD2;
+ else if (size <= 7)
+ fcode
+ = is_store ? BUILT_IN_ESAN_ALIGNED_STORE4 : BUILT_IN_ESAN_ALIGNED_LOAD4;
+ else if (size <= 15)
+ fcode
+ = is_store ? BUILT_IN_ESAN_ALIGNED_STORE8 : BUILT_IN_ESAN_ALIGNED_LOAD8;
+ else
+ fcode
+ = is_store ? BUILT_IN_ESAN_ALIGNED_STORE16 : BUILT_IN_ESAN_ALIGNED_LOAD16;
+ return builtin_decl_implicit (fcode);
+}
+
+static void
+instrument_expr (gimple_stmt_iterator gsi, tree expr, bool is_store)
+{
+ tree base, expr_ptr;
+ basic_block bb;
+ HOST_WIDE_INT size;
+ gimple *stmt, *g;
+ gimple_seq seq;
+ location_t loc;
+ unsigned int align;
+
+ size = int_size_in_bytes (TREE_TYPE (expr));
+ /* Can't instrument memory accesses in case of size <= 0. */
+ if (size <= 0)
+ return;
+
+ HOST_WIDE_INT unused_bitsize, unused_bitpos;
+ tree offset;
+ machine_mode mode;
+ int unsignedp, reversep, volatilep = 0;
+
+ base = get_inner_reference (expr, &unused_bitsize, &unused_bitpos, &offset,
+ &mode, &unsignedp, &reversep, &volatilep, false);
+
+ if (TREE_READONLY (base) || (VAR_P (base) && DECL_HARD_REGISTER (base)))
+ return;
+
+ stmt = gsi_stmt (gsi);
+ loc = gimple_location (stmt);
+ align = get_object_alignment (expr);
+ /* In this case we can't instrument memmory access. */
+ if (align < BITS_PER_UNIT)
+ return;
+
+ /* In this case we need folded ptr to expression. */
+ expr_ptr = build_fold_addr_expr (unshare_expr (expr));
+
+ expr_ptr = force_gimple_operand (expr_ptr, &seq, true, NULL_TREE);
+ /* Build the esan's builtin. */
+ g = gimple_build_call (get_memory_access_decl (is_store, size), 1, expr_ptr);
+ /* Set location. */
+ gimple_set_location (g, loc);
+ gimple_seq_add_stmt_without_update (&seq, g);
+
+ if (is_gimple_call (stmt) && is_store)
+ {
+ /* Could be a fallthrough edge. */
+ if (is_ctrl_altering_stmt (stmt))
+ {
+ edge e;
+
+ bb = gsi_bb (gsi);
+ e = find_fallthru_edge (bb->succs);
+ if (e)
+ gsi_insert_seq_on_edge_immediate (e, seq);
+ }
+ else
+ /* Insert after the call on store. */
+ gsi_insert_seq_after (&gsi, seq, GSI_NEW_STMT);
+ }
+ else
+ /* Insert before. */
+ gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
+}
+
+static void
+maybe_instrument_working_set_gimple (gimple_stmt_iterator *gsi)
+{
+ gimple *stmt;
+ tree rhs, lhs;
+
+ stmt = gsi_stmt (*gsi);
+
+ if (is_gimple_call (stmt))
+ return;
+
+ if (is_gimple_assign (stmt) && !gimple_clobber_p (stmt))
+ {
+ if (gimple_store_p (stmt))
+ {
+ lhs = gimple_assign_lhs (stmt);
+ instrument_expr (*gsi, lhs, true);
+ }
+ if (gimple_assign_load_p (stmt))
+ {
+ rhs = gimple_assign_rhs1 (stmt);
+ instrument_expr (*gsi, rhs, false);
+ }
+ }
+}
+
+static size_t type_count;
+static size_t default_count;
+
+/* Function generates unique id. */
+static size_t
+gen_unique_id (char *buff, size_t *t_count)
+{
+ size_t size = 1;
+ size_t count = *t_count;
+ while (count)
+ {
+ *buff++ = '0' + count % 10;
+ count /= 10;
+ ++size;
+ }
+ *t_count += 1;
+ return size;
+}
+
+static int
+convert_to_type_id (char *buff, tree field_type)
+{
+ switch (TREE_CODE (field_type))
+ {
+ case INTEGER_TYPE:
+ *buff = '1';
+ return 1;
+ case REAL_TYPE:
+ *buff = '2';
+ return 1;
+ case COMPLEX_TYPE:
+ *buff = '3';
+ return 1;
+ case VECTOR_TYPE:
+ *buff = '4';
+ return 1;
+ case ENUMERAL_TYPE:
+ *buff = '5';
+ return 1;
+ case BOOLEAN_TYPE:
+ *buff = '6';
+ return 1;
+ case POINTER_TYPE:
+ {
+ *buff++ = '8';
+ return gen_unique_id (buff, &type_count);
+ }
+ default:
+ {
+ *buff++ = '9';
+ return gen_unique_id (buff, &default_count);
+ }
+ }
+}
+
+static void
+reverse (char *buff, size_t len)
+{
+ if (!len)
+ return;
+
+ size_t start, end;
+
+ for (start = 0, end = len - 1; start < end; ++start, --end)
+ {
+ char temp = buff[end];
+ buff[end] = buff[start];
+ buff[start] = temp;
+ }
+}
+
+static size_t
+unique_id_size (size_t t_count)
+{
+ size_t size = 1;
+ while (t_count)
+ {
+ t_count /= 10;
+ ++size;
+ }
+ return size;
+}
+
+static size_t
+field_type_size (tree field)
+{
+ if (TREE_CODE (field) == FIELD_DECL)
+ {
+ switch (TREE_CODE (TREE_TYPE (field)))
+ {
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ case COMPLEX_TYPE:
+ case VECTOR_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ return 1;
+ case POINTER_TYPE:
+ {
+ return unique_id_size (type_count);
+ }
+ default:
+ {
+ return unique_id_size (default_count);
+ }
+ }
+ }
+ /* Skip nested types. */
+ return 0;
+}
+
+/* Functions creates struct field counter name consistent with
+ llvm part, because runtime relies on it. */
+static ssize_t
+create_struct_field_counter_name (tree type, size_t *count)
+{
+ tree field;
+ size_t offset, start;
+ const char *type_name;
+
+ /* Could be NULL type identifier. */
+ if (!TYPE_IDENTIFIER (type))
+ return -1;
+
+ type_name = IDENTIFIER_POINTER (TYPE_IDENTIFIER (type));
+ *count = offset = start = 0;
+
+ memset (field_counter_name, 0, max_struct_field_counter_name_size);
+
+ size_t counter_prefix_len = strlen (counter_prefix);
+ memcpy (field_counter_name, counter_prefix, counter_prefix_len);
+ offset += counter_prefix_len;
+
+ size_t type_name_len = strlen (type_name);
+
+ if (type_name_len + counter_prefix_len >= max_struct_field_counter_name_size)
+ return -1;
+
+ memcpy (field_counter_name + offset, type_name, type_name_len);
+ offset += type_name_len;
+
+ start = offset;
+
+ for (field = TYPE_FIELDS (type); field
+ /* offset + sizeof (field) + sizeof ($). */
+ && (offset + field_type_size (field) + 1)
+ < max_struct_field_counter_name_size;
+ field = DECL_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL)
+ {
+ offset += convert_to_type_id (field_counter_name + offset,
+ TREE_TYPE (field));
+ field_counter_name[offset++] = '$';
+ /* Don't count nested types as a field. */
+ if (TREE_CODE (TREE_TYPE (field)) != RECORD_TYPE)
+ ++*count;
+ }
+ }
+
+ /* This is strangely, but llvm part writes fields id in reverse order. */
+ reverse (field_counter_name + start, offset - start);
+ return offset;
+}
+
+/* This should be kept consistent with LLVM's EfficiencySanitizer StructInfo.
+ struct StructInfo {
+ const char *StructName;
+ u32 Size;
+ u32 NumFields;
+ u32 *FieldOffset; // auxiliary struct field info.
+ u32 *FieldSize; // auxiliary struct field info.
+ const char **FieldTypeName; // auxiliary struct field info.
+ u64 *FieldCounters;
+ u64 *ArrayCounter;
+ }; */
+
+/* Cached strcut_info_type tree. */
+static tree esan_struct_info_type;
+
+/* Returns struct_info_type tree. */
+static tree
+esan_get_struct_info_type (void)
+{
+ /* Decription taken from llvm. */
+ static const unsigned int count = 8;
+ static const char *field_names[count]
+ = {"StructName", "Size", "NumFields", "FieldOffset",
+ "FieldSize", "FieldTypeName", "FieldCounters", "ArrayCounters"};
+ tree fields[count], ret;
+
+ if (esan_struct_info_type)
+ return esan_struct_info_type;
+
+ tree range_type = build_range_type (sizetype, size_zero_node, NULL_TREE);
+ /* StructName is a ptr to char with flex size. */
+ tree flex_char_arr_type = build_array_type (char_type_node, range_type);
+ /* FieldOffset and FieldSize ptrs. */
+ tree flex_uint_arr_type = build_array_type (unsigned_type_node, range_type);
+ /* FieldCounter and ArrayCounter. */
+ tree flex_luint_arr_type
+ = build_array_type (long_long_unsigned_type_node, range_type);
+
+ ret = make_node (RECORD_TYPE);
+ for (unsigned int i = 0; i < count; ++i)
+ {
+ switch (i)
+ {
+ case 0:
+ case 5:
+ fields[i] = build_decl (UNKNOWN_LOCATION, FIELD_DECL,
+ get_identifier (field_names[i]),
+ build_pointer_type (flex_char_arr_type));
+ break;
+ case 1:
+ case 2:
+ fields[i]
+ = build_decl (UNKNOWN_LOCATION, FIELD_DECL,
+ get_identifier (field_names[i]), unsigned_type_node);
+ break;
+ case 3:
+ case 4:
+ fields[i] = build_decl (UNKNOWN_LOCATION, FIELD_DECL,
+ get_identifier (field_names[i]),
+ build_pointer_type (flex_uint_arr_type));
+ break;
+ case 6:
+ case 7:
+ fields[i] = build_decl (UNKNOWN_LOCATION, FIELD_DECL,
+ get_identifier (field_names[i]),
+ build_pointer_type (flex_luint_arr_type));
+ break;
+ default:
+ break;
+ }
+
+ DECL_CONTEXT (fields[i]) = ret;
+ if (i)
+ DECL_CHAIN (fields[i - 1]) = fields[i];
+ }
+
+ tree type_decl = build_decl (input_location, TYPE_DECL,
+ get_identifier ("__esan_strcut_type_info"), ret);
+
+ DECL_IGNORED_P (type_decl) = 1;
+ DECL_ARTIFICIAL (type_decl) = 1;
+ TYPE_FIELDS (ret) = fields[0];
+ TYPE_NAME (ret) = type_decl;
+ TYPE_STUB_DECL (ret) = type_decl;
+ layout_type (ret);
+ esan_struct_info_type = ret;
+ return ret;
+}
+
+/* This should be kept consistent with LLVM's EfficiencySanitizer CacheFragInfo.
+ The tool-specific information per compilation unit (module).
+ struct CacheFragInfo {
+ const char *UnitName;
+ u32 NumStructs;
+ StructInfo *Structs;
+ }; */
+
+static tree esan_cache_frag_info_type;
+
+static tree
+esan_get_cache_frag_info_type (void)
+{
+ static const unsigned int count = 3;
+ static const char *field_names[count] = {"UnitName", "NumStructs", "Structs"};
+ tree fields[count], ret;
+
+ if (esan_cache_frag_info_type)
+ return esan_cache_frag_info_type;
+
+ tree esan_struct_type_info = esan_get_struct_info_type ();
+
+ tree range_type = build_range_type (sizetype, size_zero_node, NULL_TREE);
+ /* StructInfo Array Type. */
+ tree struct_info_arr_type
+ = build_array_type (esan_struct_type_info, range_type);
+
+ /* Unit Name. */
+ tree flex_char_arr_type = build_array_type (char_type_node, range_type);
+
+ ret = make_node (RECORD_TYPE);
+
+ for (unsigned int i = 0; i < count; ++i)
+ {
+ switch (i)
+ {
+ case 0:
+ fields[i] = build_decl (UNKNOWN_LOCATION, FIELD_DECL,
+ get_identifier (field_names[i]),
+ build_pointer_type (flex_char_arr_type));
+ break;
+ case 1:
+ fields[i]
+ = build_decl (UNKNOWN_LOCATION, FIELD_DECL,
+ get_identifier (field_names[i]), unsigned_type_node);
+ break;
+ case 2:
+ fields[i] = build_decl (UNKNOWN_LOCATION, FIELD_DECL,
+ get_identifier (field_names[i]),
+ build_pointer_type (struct_info_arr_type));
+ break;
+ default:
+ break;
+ }
+ DECL_CONTEXT (fields[i]) = ret;
+ if (i)
+ DECL_CHAIN (fields[i - 1]) = fields[i];
+ }
+
+ tree type_decl
+ = build_decl (input_location, TYPE_DECL,
+ get_identifier ("__esan_cache_frag_info_type"), ret);
+
+ DECL_IGNORED_P (type_decl) = 1;
+ DECL_ARTIFICIAL (type_decl) = 1;
+ TYPE_FIELDS (ret) = fields[0];
+ TYPE_NAME (ret) = type_decl;
+ TYPE_STUB_DECL (ret) = type_decl;
+ layout_type (ret);
+ esan_cache_frag_info_type = ret;
+ return ret;
+}
+
+static unsigned int esan_ids[2];
+static tree esan_array_counter_type;
+
+static tree
+esan_get_array_counter_type (void)
+{
+ tree ret;
+
+ if (esan_array_counter_type)
+ return esan_array_counter_type;
+
+ tree range_type = build_range_type (sizetype, size_zero_node, NULL_TREE);
+ ret = build_array_type (long_long_unsigned_type_node, range_type);
+
+ esan_array_counter_type = ret;
+ return ret;
+}
+
+static void
+esan_add_struct (tree array_addr, vec<constructor_elt, va_gc> *v,
+ size_t fields_count, char *field_counter_name_type,
+ size_t struct_size)
+{
+ /* Create StructName. */
+ const char *struct_name = field_counter_name_type;
+ size_t struct_name_len = strlen (struct_name) + 1;
+ tree struct_name_tree = build_string (struct_name_len, struct_name);
+ TREE_TYPE (struct_name_tree)
+ = build_array_type_nelts (char_type_node, struct_name_len);
+ TREE_READONLY (struct_name_tree) = 1;
+ TREE_STATIC (struct_name_tree) = 1;
+
+ /* Create StructInfo type. */
+ tree dtype = esan_get_struct_info_type ();
+
+ /* Create an instance of StructInfo. */
+ char tmp_name[32];
+ ASM_GENERATE_INTERNAL_LABEL (tmp_name, "Lesan_type", esan_ids[0]++);
+ tree decl
+ = build_decl (UNKNOWN_LOCATION, VAR_DECL, get_identifier (tmp_name), dtype);
+ TREE_STATIC (decl) = 1;
+ TREE_PUBLIC (decl) = 0;
+ TREE_READONLY (decl) = 1;
+ DECL_ARTIFICIAL (decl) = 1;
+ DECL_IGNORED_P (decl) = 1;
+ DECL_EXTERNAL (decl) = 0;
+
+ /* Initialize created struct. */
+ tree ctor = build_constructor_va (
+ dtype, 8, NULL_TREE, build_fold_addr_expr (struct_name_tree), NULL_TREE,
+ build_int_cst (unsigned_type_node, struct_size), NULL_TREE,
+ build_int_cst (unsigned_type_node, fields_count), NULL_TREE,
+ build_zero_cst (long_long_unsigned_type_node), NULL_TREE,
+ build_zero_cst (long_long_unsigned_type_node), NULL_TREE,
+ build_zero_cst (long_long_unsigned_type_node), NULL_TREE, array_addr,
+ NULL_TREE, build_zero_cst (long_long_unsigned_type_node));
+
+ TREE_CONSTANT (ctor) = 1;
+ TREE_STATIC (ctor) = 1;
+ DECL_INITIAL (decl) = ctor;
+ CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, ctor);
+}
+
+/* Go recursively and find the actual class type and field index inside it. */
+static esan_field_index
+esan_get_field_index_internal (tree base_type, tree field, bool *found)
+{
+ tree field_it = TYPE_FIELDS (base_type);
+ esan_field_index field_index (base_type, field, 0);
+
+ for (; field_it && !*found; field_it = DECL_CHAIN (field_it))
+ {
+ if (TREE_CODE (field_it) == FIELD_DECL)
+ {
+ if (TREE_CODE (TREE_TYPE (field_it)) == RECORD_TYPE)
+ {
+ esan_field_index nested_field_index
+ = esan_get_field_index_internal (TREE_TYPE (field_it), field,
+ found);
+ if (*found)
+ return nested_field_index;
+ }
+ else if (field_it == field)
+ *found = true;
+ else
+ ++field_index.index;
+ }
+ }
+ return *found ? field_index : esan_field_index (NULL_TREE, NULL_TREE, 0);
+}
+
+static bool
+esan_get_field_index (tree base_type, tree field, esan_field_index *field_index)
+{
+ bool found = false;
+ /* At this momet we don't really know the record type where field is defined,
+ so can't cash the previous search. */
+ *field_index = esan_get_field_index_internal (base_type, field, &found);
+ return found;
+}
+
+static ssize_t
+esan_vec_contains_type (tree type)
+{
+ ssize_t index = 0;
+
+ if (type == NULL_TREE)
+ return -1;
+
+ esan_type esan_type_instance;
+ /* This is O(n) cost, but I think we can improve it to ~ O(1)
+ by using a hash_set. */
+ while (vec_esan_type.iterate (index, &esan_type_instance))
+ {
+ if ((TYPE_IDENTIFIER (type)) == TYPE_IDENTIFIER (esan_type_instance.type))
+ return index;
+ ++index;
+ }
+ return -1;
+}
+
+bool
+esan_expand_record_access_ifn (gimple_stmt_iterator *gsip)
+{
+ tree field;
+ size_t fields_count;
+ esan_field_index field_index_internal;
+
+ tree base, base_type;
+ gimple_stmt_iterator gsi, gsi_origin;
+ gimple *stmt;
+ location_t loc;
+
+ /* Expand internal ESAN_ACCESS. */
+ gsi_origin = gsi = *gsip;
+ stmt = gsi_stmt (gsi);
+ loc = gimple_location (stmt);
+ base = gimple_call_arg (stmt, 0);
+ field = gimple_call_arg (stmt, 1);
+ esan_type esan_type_instance;
+
+ tree array = NULL_TREE;
+ tree ptr = NULL_TREE;
+
+ /* In case we packed pointer type into integer to allow some level of
+ optimization work properly. */
+ base_type = TREE_CODE (base) == INTEGER_CST ? TREE_TYPE (TREE_TYPE (base))
+ : TREE_TYPE (base);
+
+ /* Don't forget to verify field index, before creating the fields counter
+ array. */
+ if (!base_type
+ || !esan_get_field_index (base_type, field, &field_index_internal))
+ {
+ unlink_stmt_vdef (stmt);
+ return gsi_remove (&gsi_origin, true);
+ }
+
+ size_t field_index = field_index_internal.index;
+ base_type = TYPE_MAIN_VARIANT (field_index_internal.type);
+
+ ssize_t index = esan_vec_contains_type (base_type);
+
+ /* In case we have not found type in the pool of types
+ and field has valid index, we should
+ create fields counter array and add type to the pool of types. */
+ if (index == -1)
+ {
+ fields_count = 0;
+ ssize_t field_counter_name_len
+ = create_struct_field_counter_name (base_type, &fields_count);
+
+ if (field_counter_name_len == -1)
+ goto finish;
+
+ char *field_counter_name_type
+ = (char *) xmalloc (field_counter_name_len + 1);
+ memset (field_counter_name_type, 0, field_counter_name_len + 1);
+ memcpy (field_counter_name_type, field_counter_name,
+ field_counter_name_len + 1);
+
+ tree atype = esan_get_array_counter_type ();
+ array = build_decl (UNKNOWN_LOCATION, VAR_DECL,
+ get_identifier (field_counter_name), atype);
+
+ TREE_STATIC (array) = 1;
+ TREE_PUBLIC (array) = 1;
+ DECL_ARTIFICIAL (array) = 1;
+ DECL_IGNORED_P (array) = 1;
+ DECL_WEAK (array) = 1;
+ DECL_SIZE (array) = build_int_cst (
+ integer_type_node,
+ fields_count * tree_to_uhwi (TYPE_SIZE (long_long_unsigned_type_node)));
+
+ DECL_SIZE_UNIT (array)
+ = build_int_cst (integer_type_node,
+ fields_count
+ * tree_to_uhwi (
+ TYPE_SIZE_UNIT (long_long_unsigned_type_node)));
+ DECL_EXTERNAL (array) = 0;
+
+ tree counter_ctor
+ = build_constructor_va (atype, 1, NULL_TREE, build_zero_cst (atype));
+ TREE_CONSTANT (counter_ctor) = 1;
+ TREE_STATIC (counter_ctor) = 1;
+ DECL_INITIAL (array) = counter_ctor;
+ varpool_node::finalize_decl (array);
+
+ ptr = build_fold_addr_expr (array);
+ esan_type_instance.type = base_type;
+ esan_type_instance.array_type = array;
+ esan_type_instance.field_counter_name = field_counter_name_type;
+ esan_type_instance.fields_count = fields_count;
+ vec_esan_type.safe_push (esan_type_instance);
+ }
+ /* We have already added the type into the pool, so fields counter
+ array was created. */
+ else if (index >= 0)
+ {
+ if (vec_esan_type.iterate (index, &esan_type_instance))
+ ptr = build_fold_addr_expr (esan_type_instance.array_type);
+ else
+ goto finish;
+ }
+ {
+ gimple *g;
+ /* Field number + sizeof (long int). */
+ tree offset = build_int_cst (unsigned_type_node,
+ field_index
+ * tree_to_uhwi (TYPE_SIZE_UNIT (
+ long_long_unsigned_type_node)));
+ /* array + offset. */
+ g = gimple_build_assign (make_ssa_name (pointer_sized_int_node), PLUS_EXPR,
+ ptr, offset);
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsip, g, GSI_SAME_STMT);
+ ptr = gimple_assign_lhs (g);
+
+ tree call_decl = builtin_decl_implicit (BUILT_IN_ESAN_INCREMENT);
+
+ g = gimple_build_call (call_decl, 1, ptr);
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsip, g, GSI_SAME_STMT);
+ /* FIXME Inline __esan_increment for the better performance. */
+ }
+finish:
+ unlink_stmt_vdef (stmt);
+ return gsi_remove (&gsi_origin, true);
+}
+
+static void
+instrument_record_field_access (tree mem, tree base, gimple_stmt_iterator *iter,
+ tree field, const enum tree_code code)
+{
+ tree t;
+ gcall *g;
+
+ t = TREE_OPERAND (base, 0);
+
+ if ((code == MEM_REF && !POINTER_TYPE_P (TREE_TYPE (t))) || !field)
+ return;
+
+ /* Don't instrument union accesses. */
+ if ((TREE_CODE (TREE_TYPE (base)) != RECORD_TYPE) || mem == base)
+ return;
+
+ /* Pack the pointer to integer, so other pass will keep it the same. */
+ g = gimple_build_call_internal (
+ IFN_ESAN_RECORD_ACCESS, 2,
+ (code == MEM_REF && POINTER_TYPE_P (TREE_TYPE (t)))
+ ? build_int_cst (build_pointer_type (TREE_TYPE (base)), 0)
+ : base,
+ field);
+
+ gimple_set_location (g, gimple_location (gsi_stmt (*iter)));
+ gsi_insert_before (iter, g, GSI_SAME_STMT);
+}
+
+static void
+instrument_record_access (gimple_stmt_iterator *gsi, tree expr, bool is_store)
+{
+ tree field, t, base;
+ HOST_WIDE_INT unused_bitsize, unused_bitpos;
+ tree offset;
+ machine_mode mode;
+ int unsignedp, reversep, volatilep = 0;
+
+ gimple *stmt = gsi_stmt (*gsi);
+
+ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (expr));
+
+ if (size <= 0)
+ return;
+
+ tree base_tree
+ = get_inner_reference (expr, &unused_bitsize, &unused_bitpos, &offset,
+ &mode, &unsignedp, &reversep, &volatilep, false);
+
+ if (TREE_READONLY (base_tree)
+ || (VAR_P (base_tree) && DECL_HARD_REGISTER (base_tree)))
+ return;
+
+ if (get_object_alignment (expr) < BITS_PER_UNIT)
+ return;
+
+ t = is_store ? gimple_get_lhs (stmt) : gimple_assign_rhs1 (stmt);
+ base = get_base_address (t);
+ field = NULL_TREE;
+
+ /* Get the field. */
+ if (TREE_CODE (t) == COMPONENT_REF)
+ field = TREE_OPERAND (t, 1);
+
+ const enum tree_code code = TREE_CODE (base);
+
+ if ((code == MEM_REF && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
+ || code == VAR_DECL)
+ instrument_record_field_access (t, base, gsi, field, code);
+}
+
+static void
+maybe_instrument_cache_frag_gimple (gimple_stmt_iterator *gsi)
+{
+ gimple *stmt;
+ tree lhs, rhs;
+ stmt = gsi_stmt (*gsi);
+
+ if (is_gimple_call (stmt))
+ return;
+
+ if (is_gimple_assign (stmt) && !gimple_clobber_p (stmt))
+ {
+ if (gimple_store_p (stmt))
+ {
+ lhs = gimple_assign_lhs (stmt);
+ instrument_record_access (gsi, lhs, true);
+ }
+ else if (gimple_assign_load_p (stmt))
+ {
+ rhs = gimple_assign_rhs1 (stmt);
+ instrument_record_access (gsi, rhs, false);
+ }
+ }
+}
+
+static void
+instrument_memory_accesses (void)
+{
+ basic_block bb;
+ gimple_stmt_iterator gsi;
+
+ /* So, walk through all basic blocks
+ and try to instrument memory accesses. */
+ FOR_EACH_BB_FN (bb, cfun)
+ {
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ if (flag_sanitize & SANITIZE_EFFICIENCY_WORKING_SET)
+ maybe_instrument_working_set_gimple (&gsi);
+ else if (flag_sanitize & SANITIZE_EFFICIENCY_CACHE_FRAG)
+ maybe_instrument_cache_frag_gimple (&gsi);
+ }
+ }
+}
+
+/* EfficiencySanitizer instrumentation pass. */
+
+static unsigned
+esan_pass (void)
+{
+ initialize_sanitizer_builtins ();
+ instrument_memory_accesses ();
+ return 0;
+}
+
+/* Creates an arrray of StructInfo structs and CacheInfo struct.
+ Inserts __esan_init () into the list of CTORs and
+ __esan_exit() into the listof DCTORs. */
+
+void
+esan_finish_file (void)
+{
+ tree ctor_statements = NULL_TREE;
+ tree dctor_statements = NULL_TREE;
+ initialize_sanitizer_builtins ();
+ tree tool_id = NULL_TREE;
+
+ tree init_decl = builtin_decl_implicit (BUILT_IN_ESAN_INIT);
+ tree exit_decl = builtin_decl_implicit (BUILT_IN_ESAN_EXIT);
+
+ if (flag_sanitize & SANITIZE_EFFICIENCY_CACHE_FRAG)
+ {
+ esan_type esan_type_instance;
+ size_t index = 0;
+ size_t num_types = vec_esan_type.length ();
+ vec<constructor_elt, va_gc> *v;
+ vec_alloc (v, num_types);
+
+ /* Create StructInfo struct for the every struct we have instrumented. */
+ while (vec_esan_type.iterate (index, &esan_type_instance))
+ {
+ tree ptr = build_fold_addr_expr (esan_type_instance.array_type);
+ esan_add_struct (ptr, v, esan_type_instance.fields_count,
+ esan_type_instance.field_counter_name,
+ int_size_in_bytes (esan_type_instance.type));
+
+ /* Was allocated by xmalloc, so free the memory. */
+ free (esan_type_instance.field_counter_name);
+ ++index;
+ }
+
+ /* Create module name string. */
+ size_t module_name_len = strlen (main_input_filename) + 1;
+ tree module_name_str
+ = build_string (module_name_len, main_input_filename);
+ TREE_TYPE (module_name_str)
+ = build_array_type_nelts (char_type_node, module_name_len);
+ TREE_READONLY (module_name_str) = 1;
+ TREE_STATIC (module_name_str) = 1;
+
+ tool_id = build_int_cst (unsigned_type_node, 1);
+ /* First, create the array of struct info type. */
+ tree stype = esan_get_struct_info_type ();
+
+ stype = build_array_type_nelts (stype, num_types);
+
+ char tmp_info_name[32];
+ ASM_GENERATE_INTERNAL_LABEL (tmp_info_name, "Lesan_info", 0);
+
+ tree var = build_decl (UNKNOWN_LOCATION, VAR_DECL,
+ get_identifier (tmp_info_name), stype);
+
+ TREE_STATIC (var) = 1;
+ TREE_PUBLIC (var) = 0;
+ TREE_READONLY (var) = 1;
+ DECL_ARTIFICIAL (var) = 1;
+ DECL_IGNORED_P (var) = 1;
+ DECL_EXTERNAL (var) = 0;
+
+ tree ctor = build_constructor (stype, v);
+
+ TREE_CONSTANT (ctor) = 1;
+ TREE_STATIC (ctor) = 1;
+ DECL_INITIAL (var) = ctor;
+
+ varpool_node::finalize_decl (var);
+
+ tree ctype = esan_get_cache_frag_info_type ();
+ char tmp_cache_name[32];
+ ASM_GENERATE_INTERNAL_LABEL (tmp_cache_name, "Lesan_cache", 0);
+
+ tree c_var = build_decl (UNKNOWN_LOCATION, VAR_DECL,
+ get_identifier (tmp_cache_name), ctype);
+ TREE_STATIC (c_var) = 1;
+ TREE_PUBLIC (c_var) = 0;
+ DECL_ARTIFICIAL (c_var) = 1;
+ DECL_IGNORED_P (c_var) = 1;
+ DECL_EXTERNAL (c_var) = 0;
+
+ tree c_ctor
+ = build_constructor_va (ctype, 3, NULL_TREE,
+ build_fold_addr_expr (module_name_str),
+ NULL_TREE,
+ build_int_cst (unsigned_type_node, num_types),
+ NULL_TREE, build_fold_addr_expr (var));
+
+ TREE_CONSTANT (c_ctor) = 1;
+ TREE_STATIC (c_ctor) = 1;
+ DECL_INITIAL (c_var) = c_ctor;
+ varpool_node::finalize_decl (c_var);
+
+ append_to_statement_list (
+ build_call_expr (init_decl, 2, build_int_cst (unsigned_type_node, 1),
+ build_fold_addr_expr (c_var)),
+ &ctor_statements);
+ cgraph_build_static_cdtor ('I', ctor_statements,
+ MAX_RESERVED_INIT_PRIORITY - 1);
+
+ append_to_statement_list (build_call_expr (exit_decl, 1,
+ build_fold_addr_expr (c_var)),
+ &dctor_statements);
+ cgraph_build_static_cdtor ('D', dctor_statements,
+ MAX_RESERVED_INIT_PRIORITY - 1);
+ }
+ else if (flag_sanitize & SANITIZE_EFFICIENCY_WORKING_SET)
+ {
+ tool_id = build_int_cst (unsigned_type_node, 2);
+ append_to_statement_list (
+ build_call_expr (init_decl, 2, build_int_cst (unsigned_type_node, 2),
+ build_zero_cst (long_unsigned_type_node)),
+ &ctor_statements);
+ cgraph_build_static_cdtor ('I', ctor_statements,
+ MAX_RESERVED_INIT_PRIORITY - 1);
+ append_to_statement_list (build_call_expr (exit_decl, 0),
+ &dctor_statements);
+ cgraph_build_static_cdtor ('D', dctor_statements,
+ MAX_RESERVED_INIT_PRIORITY - 1);
+ }
+
+ tree tool_id_type = build_array_type_nelts (unsigned_type_node, 1);
+
+ /* Runtime part relies on __esan_which_tool global, which
+ informs the runtime about what part of runtime library should run. */
+ tree tool_id_decl
+ = build_decl (UNKNOWN_LOCATION, VAR_DECL,
+ get_identifier ("__esan_which_tool"), tool_id_type);
+ TREE_STATIC (tool_id_decl) = 1;
+ TREE_PUBLIC (tool_id_decl) = 1;
+ DECL_WEAK (tool_id_decl) = 1;
+ DECL_ARTIFICIAL (tool_id_decl) = 1;
+ DECL_IGNORED_P (tool_id_decl) = 1;
+ DECL_EXTERNAL (tool_id_decl) = 0;
+
+ tree tool_id_ctor
+ = build_constructor_va (TREE_TYPE (tool_id_decl), 1, NULL_TREE, tool_id);
+ TREE_CONSTANT (tool_id_ctor) = 1;
+ TREE_STATIC (tool_id_ctor) = 1;
+ DECL_INITIAL (tool_id_decl) = tool_id_ctor;
+ varpool_node::finalize_decl (tool_id_decl);
+}
+
+/* The pass descriptor. */
+
+namespace {
+
+const pass_data pass_data_esan = {
+ GIMPLE_PASS, /* type */
+ "esan", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_NONE, /* tv_id */
+ (PROP_ssa | PROP_cfg), /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_update_ssa, /* todo_flags_finish */
+};
+
+class pass_esan : public gimple_opt_pass
+{
+public:
+ pass_esan (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_esan, ctxt) {}
+
+ /* opt_pass methods: */
+ opt_pass *
+ clone () { return new pass_esan (m_ctxt); }
+
+ virtual bool
+ gate (function *)
+ {
+ return (
+ (flag_sanitize
+ & (SANITIZE_EFFICIENCY_WORKING_SET | SANITIZE_EFFICIENCY_CACHE_FRAG))
+ != 0);
+ }
+
+ virtual unsigned int
+ execute (function *) { return esan_pass (); }
+
+}; // class pass_esan
+
+} // namespace
+
+gimple_opt_pass *
+make_pass_esan (gcc::context *ctxt)
+{
+ return new pass_esan (ctxt);
+}
+
+namespace {
+
+const pass_data pass_data_esan_O0 = {
+ GIMPLE_PASS, /* type */
+ "esan0", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_NONE, /* tv_id */
+ (PROP_ssa | PROP_cfg), /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_update_ssa, /* todo_flags_finish */
+};
+
+class pass_esan_O0 : public gimple_opt_pass
+{
+public:
+ pass_esan_O0 (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_esan_O0, ctxt) {}
+
+ /* opt_pass methods: */
+ virtual bool
+ gate (function *)
+ {
+ return (
+ (flag_sanitize
+ & (SANITIZE_EFFICIENCY_WORKING_SET | SANITIZE_EFFICIENCY_CACHE_FRAG))
+ != 0
+ && !optimize);
+ }
+
+ virtual unsigned int
+ execute (function *) { return esan_pass (); }
+
+}; // class pass_esan_O0
+
+} // namespace
+
+gimple_opt_pass *
+make_pass_esan_O0 (gcc::context *ctxt)
+{
+ return new pass_esan_O0 (ctxt);
+}
--- /dev/null
+/* EfficiencySanitizer.
+ Copyright (C) 2011-2018 Free Software Foundation, Inc.
+ Contributed by Denis Khalikov.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef TREE_ESAN
+#define TREE_ESAN
+
+extern void esan_finish_file (void);
+extern bool esan_expand_record_access_ifn (gimple_stmt_iterator *gsip);
+
+#endif /* TREE_ESAN */
SANITIZE_VPTR = 1UL << 21,
SANITIZE_BOUNDS_STRICT = 1UL << 22,
SANITIZE_UI_OVERFLOW = 1 << 23,
+ SANITIZE_EFFICIENCY_WORKING_SET = 1UL << 24,
+ SANITIZE_EFFICIENCY_CACHE_FRAG = 1UL << 25,
SANITIZE_UNDEFINED = SANITIZE_SHIFT | SANITIZE_DIVIDE | SANITIZE_UNREACHABLE
| SANITIZE_VLA | SANITIZE_NULL | SANITIZE_RETURN
| SANITIZE_SI_OVERFLOW | SANITIZE_BOOL | SANITIZE_ENUM
#endif
#endif
+#ifndef LIBESAN_SPEC
+#define STATIC_LIBESAN_LIBS \
+ " %{static-libesan:%:include(libsanitizer.spec)%(link_libesan)}"
+#ifdef HAVE_LD_STATIC_DYNAMIC
+#define LIBESAN_SPEC "%{static-libesan:" LD_STATIC_OPTION \
+ "} -lesan %{static-libesan:" LD_DYNAMIC_OPTION "}" \
+ STATIC_LIBESAN_LIBS
+#else
+#define LIBESAN_SPEC "-lesan" STATIC_LIBESAN_LIBS
+#endif
+#endif
+
+
/* Linker options for compressed debug sections. */
#if HAVE_LD_COMPRESS_DEBUG == 0
/* No linker support. */
%{%:sanitize(thread):" LIBTSAN_SPEC "\
%{static:%ecannot specify -static with -fsanitize=thread}}\
%{!%:sanitize(address):%{%:sanitize(undefined):" LIBUBSAN_SPEC "}}\
+ %{%:sanitize(efficiency-working-set):" LIBESAN_SPEC "}\
+ %{%:sanitize(efficiency-cache-frag):" LIBESAN_SPEC "}\
%{%:sanitize(leak):" LIBLSAN_SPEC "}}}"
#endif
return ((flag_sanitize
& (SANITIZE_ADDRESS | SANITIZE_LEAK | SANITIZE_THREAD))
== SANITIZE_LEAK) ? "" : NULL;
+ if (strcmp (argv[0], "efficiency-working-set") == 0)
+ return (flag_sanitize & SANITIZE_EFFICIENCY_WORKING_SET) ? "" : NULL;
+ if (strcmp (argv[0], "efficiency-cache-frag") == 0)
+ return (flag_sanitize & SANITIZE_EFFICIENCY_CACHE_FRAG) ? "" : NULL;
return NULL;
}
/* This should get expanded in the sanopt pass. */
static void
+expand_ESAN_RECORD_ACCESS (internal_fn, gcall *)
+{
+ gcc_unreachable ();
+}
+
+/* This should get expanded in the sanopt pass. */
+
+static void
expand_ASAN_CHECK (internal_fn, gcall *)
{
gcc_unreachable ();
DEF_INTERNAL_FN (MUL_OVERFLOW, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
DEF_INTERNAL_FN (TSAN_FUNC_EXIT, ECF_NOVOPS | ECF_LEAF | ECF_NOTHROW, NULL)
DEF_INTERNAL_FN (VA_ARG, ECF_NOTHROW | ECF_LEAF, NULL)
+DEF_INTERNAL_FN (ESAN_RECORD_ACCESS, ECF_LEAF | ECF_NOTHROW, NULL)
/* An unduplicable, uncombinable function. Generally used to preserve
a CFG property in the face of jump threading, tail merging or
SANITIZER_OPT (returns-nonnull-attribute, SANITIZE_RETURNS_NONNULL_ATTRIBUTE),
SANITIZER_OPT (object-size, SANITIZE_OBJECT_SIZE),
SANITIZER_OPT (vptr, SANITIZE_VPTR),
+ SANITIZER_OPT (efficiency-working-set, SANITIZE_EFFICIENCY_WORKING_SET),
+ SANITIZER_OPT (efficiency-cache-frag, SANITIZE_EFFICIENCY_CACHE_FRAG),
SANITIZER_OPT (all, ~0),
#undef SANITIZER_OPT
{ NULL, 0, 0 }
NEXT_PASS (pass_sancov);
NEXT_PASS (pass_asan);
NEXT_PASS (pass_tsan);
+ NEXT_PASS (pass_esan);
/* Pass group that runs when 1) enabled, 2) there are loops
in the function. Make sure to run pass_fix_loops before
to discover/remove loops before running the gate function
NEXT_PASS (pass_sancov);
NEXT_PASS (pass_asan);
NEXT_PASS (pass_tsan);
+ NEXT_PASS (pass_esan);
/* ??? We do want some kind of loop invariant motion, but we possibly
need to adjust LIM to be more friendly towards preserving accurate
debug information here. */
NEXT_PASS (pass_sancov_O0);
NEXT_PASS (pass_asan_O0);
NEXT_PASS (pass_tsan_O0);
+ NEXT_PASS (pass_esan_O0);
NEXT_PASS (pass_sanopt);
NEXT_PASS (pass_cleanup_eh);
NEXT_PASS (pass_lower_resx);
"__asan_after_dynamic_init",
BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+/* Efficiency Sanitizer */
+DEF_SANITIZER_BUILTIN(BUILT_IN_ESAN_INIT, "__esan_init",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ESAN_EXIT, "__esan_exit",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_ALIGNED_STORE1, "__esan_aligned_store1",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_ALIGNED_LOAD1, "__esan_aligned_load1",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_ALIGNED_STORE2, "__esan_aligned_store2",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_ALIGNED_LOAD2, "__esan_aligned_load2",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_ALIGNED_STORE4, "__esan_aligned_store4",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_ALIGNED_LOAD4, "__esan_aligned_load4",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_ALIGNED_STORE8, "__esan_aligned_store8",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_ALIGNED_LOAD8, "__esan_aligned_load8",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_ALIGNED_STORE16, "__esan_aligned_store16",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_ALIGNED_LOAD16, "__esan_aligned_load16",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_UNALIGNED_STORE1, "__esan_unaligned_store1",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_UNALIGNED_LOAD1, "__esan_unaligned_load1",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_UNALIGNED_STORE2, "__esan_unaligned_store2",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_UNALIGNED_LOAD2, "__esan_unaligned_load2",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_UNALIGNED_STORE4, "__esan_unaligned_store4",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_UNALIGNED_LOAD4, "__esan_unaligned_load4",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_UNALIGNED_STORE8, "__esan_unaligned_store8",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_UNALIGNED_LOAD8, "__esan_unaligned_load8",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_UNALIGNED_STORE16, "__esan_unaligned_store16",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_UNALIGNED_LOAD16, "__esan_unaligned_load16",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN (BUILT_IN_ESAN_INCREMENT, "__esan_increment",
+ BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
+
+
/* Thread Sanitizer */
DEF_SANITIZER_BUILTIN(BUILT_IN_TSAN_INIT, "__tsan_init",
BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
#include "gimple-iterator.h"
#include "asan.h"
#include "ubsan.h"
+#include "esan.h"
#include "params.h"
#include "tree-hash-traits.h"
case IFN_ASAN_CHECK:
no_next = asan_expand_check_ifn (&gsi, use_calls);
break;
+ case IFN_ESAN_RECORD_ACCESS:
+ no_next = esan_expand_record_access_ifn (&gsi);
default:
break;
}
--- /dev/null
+// { dg-do run { target { x86_64-*-linux* } } }
+// { dg-options "-fsanitize=efficiency-cache-frag" }
+
+struct A
+{
+ int a;
+ char b;
+ float c;
+ double d;
+ unsigned e;
+ unsigned long f;
+ unsigned long long j;
+};
+
+struct B : A
+{
+ int f1;
+ int f2;
+ int f3;
+};
+
+struct C : B
+{
+ int f4;
+ int f5;
+ int f6;
+};
+
+struct E
+{
+ int f7;
+ int f8;
+};
+
+struct D : B, E
+{
+ int f9;
+};
+
+static int count = 10;
+
+static void
+foo ()
+{
+ A a;
+ for (int i = 0; i < count; ++i)
+ {
+ a.a = i;
+ a.b = a.a;
+ a.c = i;
+ a.d = i;
+ a.e = i;
+ a.f = i;
+ a.j = i;
+ }
+
+ A b;
+ A *ptr = &b;
+ for (int i = 0; i < count; ++i)
+ {
+ ptr->a = i;
+ ptr->b = ptr->a;
+ ptr->c = i;
+ ptr->d = i;
+ ptr->e = i;
+ ptr->f = i;
+ ptr->j = i;
+ }
+}
+
+void
+bar ()
+{
+ C c;
+ for (int i = 0; i < count; ++i)
+ {
+ c.a = i;
+ c.f1 = i;
+ c.f4 = i;
+ }
+
+ D d;
+ for (int i = 0; i < count; ++i)
+ {
+ d.f9 = i;
+ d.f7 = i;
+ }
+}
+
+int
+main (int argc, char **argv)
+{
+ foo ();
+ bar ();
+ return 0;
+}
--- /dev/null
+# Copyright (C) 2012-2016 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Load support procs.
+load_lib g++-dg.exp
+load_lib esan-dg.exp
+
+# Initialize `dg'.
+dg-init
+esan_init
+
+# Main loop.
+if [check_effective_target_fsanitize_esan] {
+ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.C]] "" ""
+}
+
+# All done.
+esan_finish
+dg-finish
--- /dev/null
+// { dg-do run { target { x86_64-*-linux* } } }
+// { dg-options "-fsanitize=efficiency-working-set" }
+
+class A {
+public:
+ bool : 1;
+ bool IsExpandedByDefault : 1;
+};
+
+void fn1() {
+ A a;
+ a.IsExpandedByDefault = 0;
+}
+
+int main (int argc, char **argv) {
+ fn1 ();
+ return 0;
+}
--- /dev/null
+// { dg-do run { target { x86_64-*-linux* } } }
+// { dg-options "-fsanitize=efficiency-working-set" }
+
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <assert.h>
+
+const int size = 0x1 << 25; // 520k cache lines
+const int line_size = 64;
+
+int main(int argc, char **argv) {
+ char *bufA = (char *)malloc(sizeof(char) * line_size);
+ char bufB[64];
+ char *bufC = (char *)mmap(0, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ bufA[0] = 0;
+ // This additional access to the same line should not increase the line
+ // count: but it's difficult to make a non-flaky test that measures the
+ // lines down to the ones digit so right now we're not really testing that.
+ // If we add a heap-only mode we may be able to be more precise.
+ bufA[1] = 0;
+ bufB[33] = 1;
+ for (int i = 0; i < size; i += line_size)
+ bufC[i] = 0;
+ free(bufA);
+ munmap(bufC, 0x4000);
+ return 0;
+}
--- /dev/null
+# Copyright (C) 2013-2016 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Return 1 if compilation with -fsanitize=efficiency-working-set is error-free for trivial
+# code, 0 otherwise. Also set what to do by default here, depending on the
+# result of a runtime test.
+
+proc check_effective_target_fsanitize_esan {} {
+ global individual_timeout
+ global dg-do-what-default
+
+ if ![check_no_compiler_messages fsanitize_esan executable {
+ int main (void) { return 0; }
+ }] {
+ return 0
+ }
+
+ # Lower timeout value in case test does not terminate properly.
+ set individual_timeout 20
+ if [check_runtime_nocache esan_works {
+ int main () { return 0; }
+ }] {
+ set dg-do-what-default run
+ } else {
+ set dg-do-what-default link
+ }
+ unset individual_timeout
+
+ return 1
+}
+
+#
+# esan_link_flags -- compute library path and flags to find libesan.
+# (originally from g++.exp)
+#
+
+proc esan_link_flags { paths } {
+ global srcdir
+ global ld_library_path
+ global shlib_ext
+ global esan_saved_library_path
+
+ set gccpath ${paths}
+ set flags ""
+
+ set shlib_ext [get_shlib_extension]
+ set esan_saved_library_path $ld_library_path
+
+ if { $gccpath != "" } {
+ if { [file exists "${gccpath}/libsanitizer/esan/.libs/libesan.a"]
+ || [file exists "${gccpath}/libsanitizer/esan/.libs/libesan.${shlib_ext}"] } {
+ append flags " -B${gccpath}/libsanitizer/esan/ "
+ append flags " -L${gccpath}/libsanitizer/esan/.libs "
+ append ld_library_path ":${gccpath}/libsanitizer/esan/.libs"
+ }
+ } else {
+ global tool_root_dir
+
+ set libesan [lookfor_file ${tool_root_dir} libesan]
+ if { $libesan != "" } {
+ append flags "-L${libesan} "
+ append ld_library_path ":${libesan}"
+ }
+ }
+
+ set_ld_library_path_env_vars
+
+ return "$flags"
+}
+
+#
+# esan_init -- called at the start of each subdir of tests
+#
+
+proc esan_init { args } {
+ global TEST_ALWAYS_FLAGS
+ global ALWAYS_CXXFLAGS
+ global TOOL_OPTIONS
+ global esan_saved_TEST_ALWAYS_FLAGS
+ global esan_saved_ALWAYS_CXXFLAGS
+ global dg-do-what-default
+ global esan_saved_dg-do-what-default
+
+ set link_flags ""
+ if ![is_remote host] {
+ if [info exists TOOL_OPTIONS] {
+ set link_flags "[esan_link_flags [get_multilibs ${TOOL_OPTIONS}]]"
+ } else {
+ set link_flags "[esan_link_flags [get_multilibs]]"
+ }
+ }
+
+ if [info exists dg-do-what-default] {
+ set esan_saved_dg-do-what-default ${dg-do-what-default}
+ }
+ if [info exists TEST_ALWAYS_FLAGS] {
+ set esan_saved_TEST_ALWAYS_FLAGS $TEST_ALWAYS_FLAGS
+ }
+ if [info exists ALWAYS_CXXFLAGS] {
+ set esan_saved_ALWAYS_CXXFLAGS $ALWAYS_CXXFLAGS
+ set ALWAYS_CXXFLAGS [concat "{ldflags=$link_flags}" $ALWAYS_CXXFLAGS]
+ set ALWAYS_CXXFLAGS [concat "{additional_flags=-fsanitize=efficiency-working-set -g}" $ALWAYS_CXXFLAGS]
+ } else {
+ if [info exists TEST_ALWAYS_FLAGS] {
+ set TEST_ALWAYS_FLAGS "$link_flags -fsanitize=efficiency-working-set -g $TEST_ALWAYS_FLAGS"
+ } else {
+ set TEST_ALWAYS_FLAGS "$link_flags -fsanitize=efficiency-working-set -g"
+ }
+ }
+}
+
+#
+# esan_finish -- called at the end of each subdir of tests
+#
+
+proc esan_finish { args } {
+ global TEST_ALWAYS_FLAGS
+ global esan_saved_TEST_ALWAYS_FLAGS
+ global esan_saved_ALWAYS_CXXFLAGS
+ global dg-do-what-default
+ global esan_saved_dg-do-what-default
+ global esan_saved_library_path
+ global ld_library_path
+
+ if [info exists esan_saved_ALWAYS_CXXFLAGS ] {
+ set ALWAYS_CXXFLAGS $esan_saved_ALWAYS_CXXFLAGS
+ } else {
+ if [info exists esan_saved_TEST_ALWAYS_FLAGS] {
+ set TEST_ALWAYS_FLAGS $esan_saved_TEST_ALWAYS_FLAGS
+ } else {
+ unset TEST_ALWAYS_FLAGS
+ }
+ }
+
+ if [info exists esan_saved_dg-do-what-default] {
+ set dg-do-what-default ${esan_saved_dg-do-what-default}
+ } else {
+ unset dg-do-what-default
+ }
+ set ld_library_path $esan_saved_library_path
+ set_ld_library_path_env_vars
+ clear_effective_target_cache
+}
#include "opts-diagnostic.h"
#include "asan.h"
#include "tsan.h"
+#include "esan.h"
#include "plugin.h"
#include "context.h"
#include "pass_manager.h"
if (flag_sanitize & SANITIZE_THREAD)
tsan_finish_file ();
+ if (flag_sanitize
+ & (SANITIZE_EFFICIENCY_WORKING_SET | SANITIZE_EFFICIENCY_CACHE_FRAG))
+ esan_finish_file ();
+
if (flag_check_pointer_bounds)
chkp_finish_file ();
extern gimple_opt_pass *make_pass_asan_O0 (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_tsan (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_tsan_O0 (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_esan (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_esan_O0 (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_sancov (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_sancov_O0 (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_lower_cf (gcc::context *ctxt);
case IFN_UBSAN_VPTR:
case IFN_UBSAN_OBJECT_SIZE:
case IFN_ASAN_CHECK:
+ case IFN_ESAN_RECORD_ACCESS:
return false;
default:
break;
nodist_saninclude_HEADERS += \
include/sanitizer/tsan_interface_atomic.h
endif
+if ESAN_SUPPORTED
+SUBDIRS += esan
+nodist_saninclude_HEADERS += \
+ include/sanitizer/esan_interface.h
+endif
endif
## May be used by toolexeclibdir.
@SANITIZER_SUPPORTED_TRUE@@TSAN_SUPPORTED_TRUE@am__append_5 = \
@SANITIZER_SUPPORTED_TRUE@@TSAN_SUPPORTED_TRUE@ include/sanitizer/tsan_interface_atomic.h
+@ESAN_SUPPORTED_TRUE@@SANITIZER_SUPPORTED_TRUE@am__append_6 = esan
+@ESAN_SUPPORTED_TRUE@@SANITIZER_SUPPORTED_TRUE@am__append_7 = \
+@ESAN_SUPPORTED_TRUE@@SANITIZER_SUPPORTED_TRUE@ include/sanitizer/esan_interface.h
+
subdir = .
DIST_COMMON = ChangeLog $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
$(top_srcdir)/configure $(am__configure_deps) \
ETAGS = etags
CTAGS = ctags
DIST_SUBDIRS = sanitizer_common interception libbacktrace lsan ubsan \
- asan tsan
+ asan tsan esan
ACLOCAL = @ACLOCAL@
ALLOC_FILE = @ALLOC_FILE@
AMTAR = @AMTAR@
libdir = @libdir@
libexecdir = @libexecdir@
link_libasan = @link_libasan@
+link_libesan = @link_libesan@
link_liblsan = @link_liblsan@
link_libtsan = @link_libtsan@
link_libubsan = @link_libubsan@
top_srcdir = @top_srcdir@
ACLOCAL_AMFLAGS = -I .. -I ../config
sanincludedir = $(libdir)/gcc/$(target_alias)/$(gcc_version)/include/sanitizer
-nodist_saninclude_HEADERS = $(am__append_1) $(am__append_5)
+nodist_saninclude_HEADERS = $(am__append_1) $(am__append_5) \
+ $(am__append_7)
@SANITIZER_SUPPORTED_TRUE@SUBDIRS = sanitizer_common $(am__append_2) \
@SANITIZER_SUPPORTED_TRUE@ $(am__append_3) lsan ubsan asan \
-@SANITIZER_SUPPORTED_TRUE@ $(am__append_4)
+@SANITIZER_SUPPORTED_TRUE@ $(am__append_4) $(am__append_6)
gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
# Work around what appears to be a GNU make bug handling MAKEFLAGS
libdir = @libdir@
libexecdir = @libexecdir@
link_libasan = @link_libasan@
+link_libesan = @link_libesan@
link_liblsan = @link_liblsan@
link_libtsan = @link_libtsan@
link_libubsan = @link_libubsan@
SANITIZER_SUPPORTED_TRUE
USING_MAC_INTERPOSE_FALSE
USING_MAC_INTERPOSE_TRUE
+link_libesan
link_liblsan
link_libubsan
link_libtsan
link_libasan
+ESAN_SUPPORTED_FALSE
+ESAN_SUPPORTED_TRUE
LSAN_SUPPORTED_FALSE
LSAN_SUPPORTED_TRUE
TSAN_SUPPORTED_FALSE
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 12032 "configure"
+#line 12035 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 12138 "configure"
+#line 12141 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
# Get target configury.
unset TSAN_SUPPORTED
unset LSAN_SUPPORTED
+unset ESAN_SUPPORTED
. ${srcdir}/configure.tgt
if test "x$TSAN_SUPPORTED" = "xyes"; then
TSAN_SUPPORTED_TRUE=
LSAN_SUPPORTED_FALSE=
fi
+ if test "x$ESAN_SUPPORTED" = "xyes"; then
+ ESAN_SUPPORTED_TRUE=
+ ESAN_SUPPORTED_FALSE='#'
+else
+ ESAN_SUPPORTED_TRUE='#'
+ ESAN_SUPPORTED_FALSE=
+fi
+
# Check for functions needed.
for ac_func in clock_getres clock_gettime clock_settime
link_liblsan=$link_sanitizer_common
+# Set up the set of additional libraries that we need to link against for libesan.
+link_libesan=$link_sanitizer_common
+
# At least for glibc, clock_gettime is in librt. But don't pull that
# in if it still doesn't give us the function we want. This
fi
+if test "x$ESAN_SUPPORTED" = "xyes"; then
+ ac_config_files="$ac_config_files esan/Makefile"
+
+fi
+
as_fn_error "conditional \"LSAN_SUPPORTED\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
+if test -z "${ESAN_SUPPORTED_TRUE}" && test -z "${ESAN_SUPPORTED_FALSE}"; then
+ as_fn_error "conditional \"ESAN_SUPPORTED\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
if test -z "${USING_MAC_INTERPOSE_TRUE}" && test -z "${USING_MAC_INTERPOSE_FALSE}"; then
as_fn_error "conditional \"USING_MAC_INTERPOSE\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
"ubsan/Makefile") CONFIG_FILES="$CONFIG_FILES ubsan/Makefile" ;;
"asan/Makefile") CONFIG_FILES="$CONFIG_FILES asan/Makefile" ;;
"tsan/Makefile") CONFIG_FILES="$CONFIG_FILES tsan/Makefile" ;;
+ "esan/Makefile") CONFIG_FILES="$CONFIG_FILES esan/Makefile" ;;
*) as_fn_error "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
esac
. ${multi_basedir}/config-ml.in
{ ml_norecursion=; unset ml_norecursion;}
;;
+ "esan/Makefile":F) cat > vpsed$$ << \_EOF
+s!`test -f '$<' || echo '$(srcdir)/'`!!
+_EOF
+ sed -f vpsed$$ $ac_file > tmp$$
+ mv tmp$$ $ac_file
+ rm vpsed$$
+ echo 'MULTISUBDIR =' >> $ac_file
+ ml_norecursion=yes
+ . ${multi_basedir}/config-ml.in
+ { ml_norecursion=; unset ml_norecursion;}
+ ;;
esac
done # for ac_tag
# Get target configury.
unset TSAN_SUPPORTED
unset LSAN_SUPPORTED
+unset ESAN_SUPPORTED
. ${srcdir}/configure.tgt
AM_CONDITIONAL(TSAN_SUPPORTED, [test "x$TSAN_SUPPORTED" = "xyes"])
AM_CONDITIONAL(LSAN_SUPPORTED, [test "x$LSAN_SUPPORTED" = "xyes"])
+AM_CONDITIONAL(ESAN_SUPPORTED, [test "x$ESAN_SUPPORTED" = "xyes"])
# Check for functions needed.
AC_CHECK_FUNCS(clock_getres clock_gettime clock_settime)
link_liblsan=$link_sanitizer_common
AC_SUBST(link_liblsan)
+# Set up the set of additional libraries that we need to link against for libesan.
+link_libesan=$link_sanitizer_common
+AC_SUBST(link_libesan)
# At least for glibc, clock_gettime is in librt. But don't pull that
# in if it still doesn't give us the function we want. This
])
fi
+if test "x$ESAN_SUPPORTED" = "xyes"; then
+ AC_CONFIG_FILES(AC_FOREACH([DIR], [esan], [DIR/Makefile ]),
+ [cat > vpsed$$ << \_EOF
+s!`test -f '$<' || echo '$(srcdir)/'`!!
+_EOF
+ sed -f vpsed$$ $ac_file > tmp$$
+ mv tmp$$ $ac_file
+ rm vpsed$$
+ echo 'MULTISUBDIR =' >> $ac_file
+ ml_norecursion=yes
+ . ${multi_basedir}/config-ml.in
+ AS_UNSET([ml_norecursion])
+])
+fi
+
AC_SUBST([TSAN_TARGET_DEPENDENT_OBJECTS])
AC_SUBST([SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS])
x86_64-*-linux* | i?86-*-linux*)
if test x$ac_cv_sizeof_void_p = x8; then
TSAN_SUPPORTED=yes
+ ESAN_SUPPORTED=yes
TSAN_TARGET_DEPENDENT_OBJECTS=tsan_rtl_amd64.lo
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS=sanitizer_linux_x86_64.lo
fi
;;
arm*-*-linux*)
LSAN_SUPPORTED=yes
+ ESAN_SUPPORTED=yes
;;
aarch64*-*-linux*)
if test x$ac_cv_sizeof_void_p = x8; then
;;
x86_64-*-darwin[1]* | i?86-*-darwin[1]*)
TSAN_SUPPORTED=no
+ ESAN_SUPPORTED=no
;;
*)
UNSUPPORTED=1
--- /dev/null
+AM_CPPFLAGS = -I $(top_srcdir) -I $(top_srcdir)/include
+
+# May be used by toolexeclibdir.
+gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
+
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC
+AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
+AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+AM_CXXFLAGS += -std=gnu++11
+AM_CXXFLAGS += $(EXTRA_CXXFLAGS)
+ACLOCAL_AMFLAGS = -I m4
+
+toolexeclib_LTLIBRARIES = libesan.la
+
+esan_files = \
+ esan.cpp \
+ esan_flags.cpp \
+ esan_interface.cpp \
+ esan_interceptors.cpp \
+ esan_linux.cpp \
+ esan_sideline_linux.cpp \
+ cache_frag.cpp \
+ working_set.cpp \
+ working_set_posix.cpp
+
+libesan_la_SOURCES = $(esan_files)
+libesan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la
+if !USING_MAC_INTERPOSE
+libesan_la_LIBADD += $(top_builddir)/interception/libinterception.la
+endif
+if LIBBACKTRACE_SUPPORTED
+libesan_la_LIBADD += $(top_builddir)/libbacktrace/libsanitizer_libbacktrace.la
+endif
+libesan_la_LIBADD += $(LIBSTDCXX_RAW_CXX_LDFLAGS)
+libesan_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(link_libesan)
+
+# Work around what appears to be a GNU make bug handling MAKEFLAGS
+# values defined in terms of make variables, as is the case for CC and
+# friends when we are called from the top level Makefile.
+AM_MAKEFLAGS = \
+ "AR_FLAGS=$(AR_FLAGS)" \
+ "CC_FOR_BUILD=$(CC_FOR_BUILD)" \
+ "CFLAGS=$(CFLAGS)" \
+ "CXXFLAGS=$(CXXFLAGS)" \
+ "CFLAGS_FOR_BUILD=$(CFLAGS_FOR_BUILD)" \
+ "CFLAGS_FOR_TARGET=$(CFLAGS_FOR_TARGET)" \
+ "INSTALL=$(INSTALL)" \
+ "INSTALL_DATA=$(INSTALL_DATA)" \
+ "INSTALL_PROGRAM=$(INSTALL_PROGRAM)" \
+ "INSTALL_SCRIPT=$(INSTALL_SCRIPT)" \
+ "JC1FLAGS=$(JC1FLAGS)" \
+ "LDFLAGS=$(LDFLAGS)" \
+ "LIBCFLAGS=$(LIBCFLAGS)" \
+ "LIBCFLAGS_FOR_TARGET=$(LIBCFLAGS_FOR_TARGET)" \
+ "MAKE=$(MAKE)" \
+ "MAKEINFO=$(MAKEINFO) $(MAKEINFOFLAGS)" \
+ "PICFLAG=$(PICFLAG)" \
+ "PICFLAG_FOR_TARGET=$(PICFLAG_FOR_TARGET)" \
+ "SHELL=$(SHELL)" \
+ "RUNTESTFLAGS=$(RUNTESTFLAGS)" \
+ "exec_prefix=$(exec_prefix)" \
+ "infodir=$(infodir)" \
+ "libdir=$(libdir)" \
+ "prefix=$(prefix)" \
+ "includedir=$(includedir)" \
+ "AR=$(AR)" \
+ "AS=$(AS)" \
+ "LD=$(LD)" \
+ "LIBCFLAGS=$(LIBCFLAGS)" \
+ "NM=$(NM)" \
+ "PICFLAG=$(PICFLAG)" \
+ "RANLIB=$(RANLIB)" \
+ "DESTDIR=$(DESTDIR)"
+
+MAKEOVERRIDES=
+
+## ################################################################
+
--- /dev/null
+# Makefile.in generated by automake 1.11.6 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+am__make_dryrun = \
+ { \
+ am__dry=no; \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \
+ | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
+ *) \
+ for am__flg in $$MAKEFLAGS; do \
+ case $$am__flg in \
+ *=*|--*) ;; \
+ *n*) am__dry=yes; break;; \
+ esac; \
+ done;; \
+ esac; \
+ test $$am__dry = yes; \
+ }
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+@USING_MAC_INTERPOSE_FALSE@am__append_1 = $(top_builddir)/interception/libinterception.la
+@LIBBACKTRACE_SUPPORTED_TRUE@am__append_2 = $(top_builddir)/libbacktrace/libsanitizer_libbacktrace.la
+subdir = esan
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/../config/acx.m4 \
+ $(top_srcdir)/../config/depstand.m4 \
+ $(top_srcdir)/../config/lead-dot.m4 \
+ $(top_srcdir)/../config/libstdc++-raw-cxx.m4 \
+ $(top_srcdir)/../config/multi.m4 \
+ $(top_srcdir)/../config/override.m4 \
+ $(top_srcdir)/../config/stdint.m4 \
+ $(top_srcdir)/../ltoptions.m4 $(top_srcdir)/../ltsugar.m4 \
+ $(top_srcdir)/../ltversion.m4 $(top_srcdir)/../lt~obsolete.m4 \
+ $(top_srcdir)/acinclude.m4 $(top_srcdir)/../libtool.m4 \
+ $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+mkinstalldirs = $(SHELL) $(top_srcdir)/../mkinstalldirs
+CONFIG_HEADER = $(top_builddir)/config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+ *) f=$$p;; \
+ esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+ for p in $$list; do echo "$$p $$p"; done | \
+ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+ if (++n[$$2] == $(am__install_max)) \
+ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+ END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(toolexeclibdir)"
+LTLIBRARIES = $(toolexeclib_LTLIBRARIES)
+am__DEPENDENCIES_1 =
+libesan_la_DEPENDENCIES = \
+ $(top_builddir)/sanitizer_common/libsanitizer_common.la \
+ $(am__append_1) $(am__append_2) $(am__DEPENDENCIES_1)
+am__objects_1 = esan.lo esan_flags.lo esan_interface.lo \
+ esan_interceptors.lo esan_linux.lo esan_sideline_linux.lo \
+ cache_frag.lo working_set.lo working_set_posix.lo
+am_libesan_la_OBJECTS = $(am__objects_1)
+libesan_la_OBJECTS = $(am_libesan_la_OBJECTS)
+libesan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
+ $(CXXFLAGS) $(libesan_la_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/../depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+CXXLD = $(CXX)
+CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
+ $(LDFLAGS) -o $@
+SOURCES = $(libesan_la_SOURCES)
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
+ETAGS = etags
+CTAGS = ctags
+ACLOCAL = @ACLOCAL@
+ALLOC_FILE = @ALLOC_FILE@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BACKTRACE_SUPPORTED = @BACKTRACE_SUPPORTED@
+BACKTRACE_SUPPORTS_THREADS = @BACKTRACE_SUPPORTS_THREADS@
+BACKTRACE_USES_MALLOC = @BACKTRACE_USES_MALLOC@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FORMAT_FILE = @FORMAT_FILE@
+GREP = @GREP@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSTDCXX_RAW_CXX_CXXFLAGS = @LIBSTDCXX_RAW_CXX_CXXFLAGS@
+LIBSTDCXX_RAW_CXX_LDFLAGS = @LIBSTDCXX_RAW_CXX_LDFLAGS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+NM = @NM@
+NMEDIT = @NMEDIT@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OBSTACK_DEFS = @OBSTACK_DEFS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+RPC_DEFS = @RPC_DEFS@
+SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
+VERSION = @VERSION@
+VIEW_FILE = @VIEW_FILE@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+enable_shared = @enable_shared@
+enable_static = @enable_static@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+link_libasan = @link_libasan@
+link_libesan = @link_libesan@
+link_liblsan = @link_liblsan@
+link_libtsan = @link_libtsan@
+link_libubsan = @link_libubsan@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+multi_basedir = @multi_basedir@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_noncanonical = @target_noncanonical@
+target_os = @target_os@
+target_vendor = @target_vendor@
+toolexecdir = @toolexecdir@
+toolexeclibdir = @toolexeclibdir@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AM_CPPFLAGS = -I $(top_srcdir) -I $(top_srcdir)/include
+
+# May be used by toolexeclibdir.
+gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
+AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
+ -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
+ -fomit-frame-pointer -funwind-tables -fvisibility=hidden \
+ -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) \
+ -std=gnu++11 $(EXTRA_CXXFLAGS)
+ACLOCAL_AMFLAGS = -I m4
+toolexeclib_LTLIBRARIES = libesan.la
+esan_files = \
+ esan.cpp \
+ esan_flags.cpp \
+ esan_interface.cpp \
+ esan_interceptors.cpp \
+ esan_linux.cpp \
+ esan_sideline_linux.cpp \
+ cache_frag.cpp \
+ working_set.cpp \
+ working_set_posix.cpp
+
+libesan_la_SOURCES = $(esan_files)
+libesan_la_LIBADD = \
+ $(top_builddir)/sanitizer_common/libsanitizer_common.la \
+ $(am__append_1) $(am__append_2) $(LIBSTDCXX_RAW_CXX_LDFLAGS)
+libesan_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(link_libesan)
+
+# Work around what appears to be a GNU make bug handling MAKEFLAGS
+# values defined in terms of make variables, as is the case for CC and
+# friends when we are called from the top level Makefile.
+AM_MAKEFLAGS = \
+ "AR_FLAGS=$(AR_FLAGS)" \
+ "CC_FOR_BUILD=$(CC_FOR_BUILD)" \
+ "CFLAGS=$(CFLAGS)" \
+ "CXXFLAGS=$(CXXFLAGS)" \
+ "CFLAGS_FOR_BUILD=$(CFLAGS_FOR_BUILD)" \
+ "CFLAGS_FOR_TARGET=$(CFLAGS_FOR_TARGET)" \
+ "INSTALL=$(INSTALL)" \
+ "INSTALL_DATA=$(INSTALL_DATA)" \
+ "INSTALL_PROGRAM=$(INSTALL_PROGRAM)" \
+ "INSTALL_SCRIPT=$(INSTALL_SCRIPT)" \
+ "JC1FLAGS=$(JC1FLAGS)" \
+ "LDFLAGS=$(LDFLAGS)" \
+ "LIBCFLAGS=$(LIBCFLAGS)" \
+ "LIBCFLAGS_FOR_TARGET=$(LIBCFLAGS_FOR_TARGET)" \
+ "MAKE=$(MAKE)" \
+ "MAKEINFO=$(MAKEINFO) $(MAKEINFOFLAGS)" \
+ "PICFLAG=$(PICFLAG)" \
+ "PICFLAG_FOR_TARGET=$(PICFLAG_FOR_TARGET)" \
+ "SHELL=$(SHELL)" \
+ "RUNTESTFLAGS=$(RUNTESTFLAGS)" \
+ "exec_prefix=$(exec_prefix)" \
+ "infodir=$(infodir)" \
+ "libdir=$(libdir)" \
+ "prefix=$(prefix)" \
+ "includedir=$(includedir)" \
+ "AR=$(AR)" \
+ "AS=$(AS)" \
+ "LD=$(LD)" \
+ "LIBCFLAGS=$(LIBCFLAGS)" \
+ "NM=$(NM)" \
+ "PICFLAG=$(PICFLAG)" \
+ "RANLIB=$(RANLIB)" \
+ "DESTDIR=$(DESTDIR)"
+
+MAKEOVERRIDES =
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .cpp .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign esan/Makefile'; \
+ $(am__cd) $(top_srcdir) && \
+ $(AUTOMAKE) --foreign esan/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-toolexeclibLTLIBRARIES: $(toolexeclib_LTLIBRARIES)
+ @$(NORMAL_INSTALL)
+ @list='$(toolexeclib_LTLIBRARIES)'; test -n "$(toolexeclibdir)" || list=; \
+ list2=; for p in $$list; do \
+ if test -f $$p; then \
+ list2="$$list2 $$p"; \
+ else :; fi; \
+ done; \
+ test -z "$$list2" || { \
+ echo " $(MKDIR_P) '$(DESTDIR)$(toolexeclibdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(toolexeclibdir)" || exit 1; \
+ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(toolexeclibdir)'"; \
+ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(toolexeclibdir)"; \
+ }
+
+uninstall-toolexeclibLTLIBRARIES:
+ @$(NORMAL_UNINSTALL)
+ @list='$(toolexeclib_LTLIBRARIES)'; test -n "$(toolexeclibdir)" || list=; \
+ for p in $$list; do \
+ $(am__strip_dir) \
+ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(toolexeclibdir)/$$f'"; \
+ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(toolexeclibdir)/$$f"; \
+ done
+
+clean-toolexeclibLTLIBRARIES:
+ -test -z "$(toolexeclib_LTLIBRARIES)" || rm -f $(toolexeclib_LTLIBRARIES)
+ @list='$(toolexeclib_LTLIBRARIES)'; for p in $$list; do \
+ dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+ test "$$dir" != "$$p" || dir=.; \
+ echo "rm -f \"$${dir}/so_locations\""; \
+ rm -f "$${dir}/so_locations"; \
+ done
+libesan.la: $(libesan_la_OBJECTS) $(libesan_la_DEPENDENCIES) $(EXTRA_libesan_la_DEPENDENCIES)
+ $(libesan_la_LINK) -rpath $(toolexeclibdir) $(libesan_la_OBJECTS) $(libesan_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+ -rm -f *.$(OBJEXT)
+
+distclean-compile:
+ -rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cache_frag.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/esan.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/esan_flags.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/esan_interceptors.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/esan_interface.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/esan_linux.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/esan_sideline_linux.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/working_set.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/working_set_posix.Plo@am__quote@
+
+.cpp.o:
+@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $<
+
+.cpp.obj:
+@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.cpp.lo:
+@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ set x; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ shift; \
+ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ if test $$# -gt 0; then \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ "$$@" $$unique; \
+ else \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$unique; \
+ fi; \
+ fi
+ctags: CTAGS
+CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+ END { if (nonempty) { for (i in files) print i; }; }'`; \
+ test -z "$(CTAGS_ARGS)$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && $(am__cd) $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+ for dir in "$(DESTDIR)$(toolexeclibdir)"; do \
+ test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+ done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+ -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+ @echo "This command is intended for maintainers to use"
+ @echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-toolexeclibLTLIBRARIES \
+ mostlyclean-am
+
+distclean: distclean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+ distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-toolexeclibLTLIBRARIES
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+ -rm -rf ./$(DEPDIR)
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-toolexeclibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+ clean-libtool clean-toolexeclibLTLIBRARIES ctags distclean \
+ distclean-compile distclean-generic distclean-libtool \
+ distclean-tags dvi dvi-am html html-am info info-am install \
+ install-am install-data install-data-am install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-ps install-ps-am \
+ install-strip install-toolexeclibLTLIBRARIES installcheck \
+ installcheck-am installdirs maintainer-clean \
+ maintainer-clean-generic mostlyclean mostlyclean-compile \
+ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+ tags uninstall uninstall-am uninstall-toolexeclibLTLIBRARIES
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
--- /dev/null
+//===-- cache_frag.cpp ----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// This file contains cache fragmentation-specific code.
+//===----------------------------------------------------------------------===//
+
+#include "esan.h"
+#include "esan_flags.h"
+#include "sanitizer_common/sanitizer_addrhashmap.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include <string.h>
+
+namespace __esan {
+
+//===-- Struct field access counter runtime -------------------------------===//
+
+// This should be kept consistent with LLVM's EfficiencySanitizer StructInfo.
+struct StructInfo {
+ const char *StructName;
+ u32 Size;
+ u32 NumFields;
+ u32 *FieldOffset; // auxiliary struct field info.
+ u32 *FieldSize; // auxiliary struct field info.
+ const char **FieldTypeName; // auxiliary struct field info.
+ u64 *FieldCounters;
+ u64 *ArrayCounter;
+ bool hasAuxFieldInfo() { return FieldOffset != nullptr; }
+};
+
+// This should be kept consistent with LLVM's EfficiencySanitizer CacheFragInfo.
+// The tool-specific information per compilation unit (module).
+struct CacheFragInfo {
+ const char *UnitName;
+ u32 NumStructs;
+ StructInfo *Structs;
+};
+
+struct StructCounter {
+ StructInfo *Struct;
+ u64 Count; // The total access count of the struct.
+ u64 Ratio; // Difference ratio for the struct layout access.
+};
+
+// We use StructHashMap to keep track of an unique copy of StructCounter.
+typedef AddrHashMap<StructCounter, 31051> StructHashMap;
+struct Context {
+ StructHashMap StructMap;
+ u32 NumStructs;
+ u64 TotalCount; // The total access count of all structs.
+};
+static Context *Ctx;
+
+static void reportStructSummary() {
+ // FIXME: provide a better struct field access summary report.
+ Report("%s: total struct field access count = %llu\n", SanitizerToolName,
+ Ctx->TotalCount);
+}
+
+// FIXME: we are still exploring proper ways to evaluate the difference between
+// struct field counts. Currently, we use a simple formula to calculate the
+// difference ratio: V1/V2.
+static inline u64 computeDifferenceRatio(u64 Val1, u64 Val2) {
+ if (Val2 > Val1) {
+ Swap(Val1, Val2);
+ }
+ if (Val2 == 0)
+ Val2 = 1;
+ return (Val1 / Val2);
+}
+
+static void reportStructCounter(StructHashMap::Handle &Handle) {
+ const u32 TypePrintLimit = 512;
+ const char *type, *start, *end;
+ StructInfo *Struct = Handle->Struct;
+ // Union field address calculation is done via bitcast instead of GEP,
+ // so the count for union is always 0.
+ // We skip the union report to avoid confusion.
+ if (strncmp(Struct->StructName, "union.", 6) == 0)
+ return;
+ // Remove the '.' after class/struct during print.
+ if (strncmp(Struct->StructName, "class.", 6) == 0) {
+ type = "class";
+ start = &Struct->StructName[6];
+ } else {
+ type = "struct";
+ start = &Struct->StructName[7];
+ }
+ // Remove the suffixes with '$' during print.
+ end = strchr(start, '$');
+ if (end == nullptr)
+ return;
+
+ Report(" %s %.*s\n", type, end - start, start);
+ Report(" size = %u, count = %llu, ratio = %llu\n", Struct->Size,
+ Handle->Count, Handle->Ratio);
+ if (Struct->hasAuxFieldInfo()) {
+ for (u32 i = 0; i < Struct->NumFields; ++i) {
+ Report(" #%2u: offset = %u,\t size = %u,"
+ "\t count = %llu,\t type = %.*s\n",
+ i, Struct->FieldOffset[i], Struct->FieldSize[i],
+ Struct->FieldCounters[i], TypePrintLimit, Struct->FieldTypeName[i]);
+ }
+ } else {
+ for (u32 i = 0; i < Struct->NumFields; ++i) {
+ Report(" #%2u: count = %llu\n", i, Struct->FieldCounters[i]);
+ }
+ }
+}
+
+static void computeStructRatio(StructHashMap::Handle &Handle) {
+ Handle->Ratio = 0;
+ Handle->Count = Handle->Struct->FieldCounters[0];
+ for (u32 i = 1; i < Handle->Struct->NumFields; ++i) {
+ Handle->Count += Handle->Struct->FieldCounters[i];
+ Handle->Ratio += computeDifferenceRatio(
+ Handle->Struct->FieldCounters[i - 1], Handle->Struct->FieldCounters[i]);
+ }
+ Ctx->TotalCount += Handle->Count;
+ if (Handle->Ratio >= (u64)getFlags()->report_threshold ||
+ (Verbosity() >= 1 && Handle->Count > 0))
+ reportStructCounter(Handle);
+}
+
+static void registerStructInfo(CacheFragInfo *CacheFrag) {
+ for (u32 i = 0; i < CacheFrag->NumStructs; ++i) {
+ StructInfo *Struct = &CacheFrag->Structs[i];
+ StructHashMap::Handle H(&Ctx->StructMap, (uptr)Struct->FieldCounters);
+ if (H.created()) {
+ VPrintf(2, " Register %s: %u fields\n", Struct->StructName,
+ Struct->NumFields);
+ H->Struct = Struct;
+ ++Ctx->NumStructs;
+ } else {
+ VPrintf(2, " Duplicated %s: %u fields\n", Struct->StructName,
+ Struct->NumFields);
+ }
+ }
+}
+
+static void unregisterStructInfo(CacheFragInfo *CacheFrag) {
+ // FIXME: if the library is unloaded before finalizeCacheFrag, we should
+ // collect the result for later report.
+ for (u32 i = 0; i < CacheFrag->NumStructs; ++i) {
+ StructInfo *Struct = &CacheFrag->Structs[i];
+ StructHashMap::Handle H(&Ctx->StructMap, (uptr)Struct->FieldCounters, true);
+ if (H.exists()) {
+ VPrintf(2, " Unregister %s: %u fields\n", Struct->StructName,
+ Struct->NumFields);
+ // FIXME: we should move this call to finalizeCacheFrag once we can
+ // iterate over the hash map there.
+ computeStructRatio(H);
+ --Ctx->NumStructs;
+ } else {
+ VPrintf(2, " Duplicated %s: %u fields\n", Struct->StructName,
+ Struct->NumFields);
+ }
+ }
+ static bool Reported = false;
+ if (Ctx->NumStructs == 0 && !Reported) {
+ Reported = true;
+ reportStructSummary();
+ }
+}
+
+//===-- Init/exit functions -----------------------------------------------===//
+
+void processCacheFragCompilationUnitInit(void *Ptr) {
+ if (!getFlags()->build_mode) {
+ CacheFragInfo *CacheFrag = (CacheFragInfo *)Ptr;
+ VPrintf(2, "in esan::%s: %s with %u class(es)/struct(s)\n", __FUNCTION__,
+ CacheFrag->UnitName, CacheFrag->NumStructs);
+ registerStructInfo(CacheFrag);
+ }
+}
+
+void processCacheFragCompilationUnitExit(void *Ptr) {
+ if (!getFlags()->build_mode) {
+ CacheFragInfo *CacheFrag = (CacheFragInfo *)Ptr;
+ VPrintf(2, "in esan::%s: %s with %u class(es)/struct(s)\n", __FUNCTION__,
+ CacheFrag->UnitName, CacheFrag->NumStructs);
+ unregisterStructInfo(CacheFrag);
+ }
+}
+
+void initializeCacheFrag() {
+ if (!getFlags()->build_mode) {
+ VPrintf(2, "in esan::%s\n", __FUNCTION__);
+ // We use placement new to initialize Ctx before C++ static initializaion.
+ // We make CtxMem 8-byte aligned for atomic operations in AddrHashMap.
+ static u64 CtxMem[sizeof(Context) / sizeof(u64) + 1];
+ Ctx = new (CtxMem) Context();
+ Ctx->NumStructs = 0;
+ }
+}
+
+int finalizeCacheFrag() {
+ VPrintf(2, "in esan::%s\n", __FUNCTION__);
+ return 0;
+}
+
+void reportCacheFrag() {
+ VPrintf(2, "in esan::%s\n", __FUNCTION__);
+ // FIXME: Not yet implemented. We need to iterate over all of the
+ // compilation unit data.
+}
+
+} // namespace __esan
--- /dev/null
+//===-- cache_frag.h --------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Header for cache-fragmentation-specific code.
+//===----------------------------------------------------------------------===//
+
+#ifndef CACHE_FRAG_H
+#define CACHE_FRAG_H
+
+namespace __esan {
+
+void processCacheFragCompilationUnitInit(void *Ptr);
+void processCacheFragCompilationUnitExit(void *Ptr);
+
+void initializeCacheFrag();
+int finalizeCacheFrag();
+void reportCacheFrag();
+
+} // namespace __esan
+
+#endif // CACHE_FRAG_H
--- /dev/null
+//===-- esan.cpp ----------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Main file (entry points) for the Esan run-time.
+//===----------------------------------------------------------------------===//
+
+#include "esan.h"
+#include "esan_flags.h"
+#include "esan_interface_internal.h"
+#include "esan_shadow.h"
+#include "cache_frag.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "working_set.h"
+
+// See comment below.
+extern "C" {
+extern void __cxa_atexit(void (*function)(void));
+}
+
+// Delete this, after supporting Cache fragmentation tool.
+//ToolType __esan_which_tool = ESAN_WorkingSet;
+namespace __esan {
+
+bool EsanIsInitialized;
+bool EsanDuringInit;
+ShadowMapping Mapping;
+
+// Different tools use different scales within the same shadow mapping scheme.
+// The scale used here must match that used by the compiler instrumentation.
+// This array is indexed by the ToolType enum.
+static const uptr ShadowScale[] = {
+ 0, // ESAN_None.
+ 2, // ESAN_CacheFrag: 4B:1B, so 4 to 1 == >>2.
+ 6, // ESAN_WorkingSet: 64B:1B, so 64 to 1 == >>6.
+};
+
+// We are combining multiple performance tuning tools under the umbrella of
+// one EfficiencySanitizer super-tool. Most of our tools have very similar
+// memory access instrumentation, shadow memory mapping, libc interception,
+// etc., and there is typically more shared code than distinct code.
+//
+// We are not willing to dispatch on tool dynamically in our fastpath
+// instrumentation: thus, which tool to use is a static option selected
+// at compile time and passed to __esan_init().
+//
+// We are willing to pay the overhead of tool dispatch in the slowpath to more
+// easily share code. We expect to only come here rarely.
+// If this becomes a performance hit, we can add separate interface
+// routines for each subtool (e.g., __esan_cache_frag_aligned_load_4).
+// But for libc interceptors, we'll have to do one of the following:
+// A) Add multiple-include support to sanitizer_common_interceptors.inc,
+// instantiate it separately for each tool, and call the selected
+// tool's intercept setup code.
+// B) Build separate static runtime libraries, one for each tool.
+// C) Completely split the tools into separate sanitizers.
+
+void processRangeAccess(uptr PC, uptr Addr, int Size, bool IsWrite) {
+ // Could be a shadow fault while building the package on the OBS worker.
+ // So, just disable this by default.
+ if (getFlags()->process_range_access) {
+ VPrintf(3, "in esan::%s %p: %c %p %d\n", __FUNCTION__, PC,
+ IsWrite ? 'w' : 'r', Addr, Size);
+ if (__esan_which_tool == ESAN_CacheFrag) {
+ // TODO(bruening): add shadow mapping and update shadow bits here.
+ // We'll move this to cache_frag.cpp once we have something.
+ } else if (__esan_which_tool == ESAN_WorkingSet) {
+ processRangeAccessWorkingSet(PC, Addr, Size, IsWrite);
+ }
+ }
+}
+
+void incrementFieldCounter(void *Addr) {
+ if (!getFlags()->build_mode) {
+ ++(*(unsigned long int *)Addr);
+ }
+}
+
+bool processSignal(int SigNum, void (*Handler)(int), void (**Result)(int)) {
+ if (__esan_which_tool == ESAN_WorkingSet)
+ return processWorkingSetSignal(SigNum, Handler, Result);
+ return true;
+}
+
+bool processSigaction(int SigNum, const void *Act, void *OldAct) {
+ if (__esan_which_tool == ESAN_WorkingSet)
+ return processWorkingSetSigaction(SigNum, Act, OldAct);
+ return true;
+}
+
+bool processSigprocmask(int How, void *Set, void *OldSet) {
+ if (__esan_which_tool == ESAN_WorkingSet)
+ return processWorkingSetSigprocmask(How, Set, OldSet);
+ return true;
+}
+
+#if SANITIZER_DEBUG
+static bool verifyShadowScheme() {
+ // Sanity checks for our shadow mapping scheme.
+ uptr AppStart, AppEnd;
+ if (Verbosity() >= 3) {
+ for (int i = 0; getAppRegion(i, &AppStart, &AppEnd); ++i) {
+ VPrintf(3, "App #%d: [%zx-%zx) (%zuGB)\n", i, AppStart, AppEnd,
+ (AppEnd - AppStart) >> 30);
+ }
+ }
+ for (int Scale = 0; Scale < 8; ++Scale) {
+ Mapping.initialize(Scale);
+ if (Verbosity() >= 3) {
+ VPrintf(3, "\nChecking scale %d\n", Scale);
+ uptr ShadowStart, ShadowEnd;
+ for (int i = 0; getShadowRegion(i, &ShadowStart, &ShadowEnd); ++i) {
+ VPrintf(3, "Shadow #%d: [%zx-%zx) (%zuGB)\n", i, ShadowStart,
+ ShadowEnd, (ShadowEnd - ShadowStart) >> 30);
+ }
+ for (int i = 0; getShadowRegion(i, &ShadowStart, &ShadowEnd); ++i) {
+ VPrintf(3, "Shadow(Shadow) #%d: [%zx-%zx)\n", i,
+ appToShadow(ShadowStart), appToShadow(ShadowEnd - 1)+1);
+ }
+ }
+ for (int i = 0; getAppRegion(i, &AppStart, &AppEnd); ++i) {
+ DCHECK(isAppMem(AppStart));
+ DCHECK(!isAppMem(AppStart - 1));
+ DCHECK(isAppMem(AppEnd - 1));
+ DCHECK(!isAppMem(AppEnd));
+ DCHECK(!isShadowMem(AppStart));
+ DCHECK(!isShadowMem(AppEnd - 1));
+ DCHECK(isShadowMem(appToShadow(AppStart)));
+ DCHECK(isShadowMem(appToShadow(AppEnd - 1)));
+ // Double-shadow checks.
+ DCHECK(!isShadowMem(appToShadow(appToShadow(AppStart))));
+ DCHECK(!isShadowMem(appToShadow(appToShadow(AppEnd - 1))));
+ }
+ // Ensure no shadow regions overlap each other.
+ uptr ShadowAStart, ShadowBStart, ShadowAEnd, ShadowBEnd;
+ for (int i = 0; getShadowRegion(i, &ShadowAStart, &ShadowAEnd); ++i) {
+ for (int j = 0; getShadowRegion(j, &ShadowBStart, &ShadowBEnd); ++j) {
+ DCHECK(i == j || ShadowAStart >= ShadowBEnd ||
+ ShadowAEnd <= ShadowBStart);
+ }
+ }
+ }
+ return true;
+}
+#endif
+
+uptr VmaSize;
+
+static void initializeShadow() {
+ verifyAddressSpace();
+
+ // This is based on the assumption that the intial stack is always allocated
+ // in the topmost segment of the user address space and the assumption
+ // holds true on all the platforms currently supported.
+ VmaSize = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
+
+ DCHECK(verifyShadowScheme());
+
+ Mapping.initialize(ShadowScale[__esan_which_tool]);
+
+ VPrintf(1, "Shadow scale=%d offset=%p\n", Mapping.Scale, Mapping.Offset);
+
+ uptr ShadowStart, ShadowEnd;
+#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_MIPS64)
+ for (int i = 0; getShadowRegion(i, &ShadowStart, &ShadowEnd); ++i) {
+ VPrintf(1, "Shadow #%d: [%zx-%zx) (%zuGB)\n", i, ShadowStart, ShadowEnd,
+ (ShadowEnd - ShadowStart) >> 30);
+
+ uptr Map;
+ if (__esan_which_tool == ESAN_WorkingSet) {
+ // We want to identify all shadow pages that are touched so we start
+ // out inaccessible.
+ Map = (uptr)MmapFixedNoAccess(ShadowStart, ShadowEnd - ShadowStart,
+ "shadow");
+ } else {
+ Map = (uptr)MmapFixedNoReserve(ShadowStart, ShadowEnd - ShadowStart,
+ "shadow");
+ }
+ if (Map != ShadowStart) {
+ Printf("FATAL: EfficiencySanitizer failed to map its shadow memory.\n");
+ Die();
+ }
+
+ if (common_flags()->no_huge_pages_for_shadow)
+ NoHugePagesInRegion(ShadowStart, ShadowEnd - ShadowStart);
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(ShadowStart, ShadowEnd - ShadowStart);
+
+ // TODO: Call MmapNoAccess() on in-between regions.
+ }
+#else
+ // ARMVL
+ ShadowStart = 0x20000000;
+ ShadowEnd = 0x3fffffff;
+ uptr Map =
+ (uptr)MmapFixedNoReserve(ShadowStart, ShadowEnd - ShadowStart, "shadow");
+
+ if (Map != ShadowStart) {
+ Printf("FATAL : EfficiencySanitizer failed to map its shadow memory \n");
+ Die();
+ }
+ VPrintf(1, "Shadow [%zx-%zx) (%zuGB)\n", ShadowStart, ShadowEnd,
+ (ShadowEnd - ShadowStart) >> 20);
+
+#endif
+}
+
+void initializeLibrary(ToolType Tool) {
+ // We assume there is only one thread during init, but we need to
+ // guard against double-init when we're (re-)called from an
+ // early interceptor.
+ if (EsanIsInitialized || EsanDuringInit)
+ return;
+ EsanDuringInit = true;
+ CHECK(Tool == __esan_which_tool);
+ SanitizerToolName = "EfficiencySanitizer";
+ MaybeMountProcFS();
+ CacheBinaryName();
+ initializeFlags();
+
+ // Intercepting libc _exit or exit via COMMON_INTERCEPTOR_ON_EXIT only
+ // finalizes on an explicit exit call by the app. To handle a normal
+ // exit we register an atexit handler.
+
+ // In case we run sanitized DSO but did not sanitize binary,
+ // we will get an segfault. Also can't set libesan to preload because
+ //__esan__which_tool comes from static DSO constructor, so if we
+ // do not have it, we will get a segfault.
+ if (Tool == ESAN_WorkingSet)
+ ::__cxa_atexit((void (*)())finalizeLibrary);
+
+ VPrintf(1, "in esan::%s\n", __FUNCTION__);
+ if (__esan_which_tool <= ESAN_None || __esan_which_tool >= ESAN_Max) {
+ Printf("ERROR: unknown tool %d requested\n", __esan_which_tool);
+ Die();
+ }
+
+ if (__esan_which_tool == ESAN_WorkingSet) {
+ initializeShadow();
+ initializeShadowWorkingSet();
+ }
+
+ initializeInterceptors();
+
+ if (__esan_which_tool == ESAN_CacheFrag) {
+ initializeCacheFrag();
+ } else if (__esan_which_tool == ESAN_WorkingSet) {
+ initializeWorkingSet();
+ }
+
+ EsanIsInitialized = true;
+ EsanDuringInit = false;
+}
+
+int finalizeLibrary() {
+ VPrintf(1, "in esan::%s\n", __FUNCTION__);
+ if (__esan_which_tool == ESAN_CacheFrag) {
+ return finalizeCacheFrag();
+ } else if (__esan_which_tool == ESAN_WorkingSet) {
+ return finalizeWorkingSet();
+ }
+ return 0;
+}
+
+void reportResults() {
+ VPrintf(1, "in esan::%s\n", __FUNCTION__);
+ if (__esan_which_tool == ESAN_CacheFrag) {
+ return reportCacheFrag();
+ } else if (__esan_which_tool == ESAN_WorkingSet) {
+ return reportWorkingSet();
+ }
+}
+
+void processCompilationUnitInit(void *Ptr) {
+ VPrintf(2, "in esan::%s\n", __FUNCTION__);
+ if (__esan_which_tool == ESAN_CacheFrag) {
+ DCHECK(Ptr != nullptr);
+ processCacheFragCompilationUnitInit(Ptr);
+ } else {
+ DCHECK(Ptr == nullptr);
+ }
+}
+
+// This is called when the containing module is unloaded.
+// For the main executable module, this is called after finalizeLibrary.
+void processCompilationUnitExit(void *Ptr) {
+ VPrintf(2, "in esan::%s\n", __FUNCTION__);
+ if (__esan_which_tool == ESAN_CacheFrag) {
+ DCHECK(Ptr != nullptr);
+ processCacheFragCompilationUnitExit(Ptr);
+ } else {
+ DCHECK(Ptr == nullptr);
+ }
+}
+
+unsigned int getSampleCount() {
+ VPrintf(1, "in esan::%s\n", __FUNCTION__);
+ if (__esan_which_tool == ESAN_WorkingSet) {
+ return getSampleCountWorkingSet();
+ }
+ return 0;
+}
+
+} // namespace __esan
--- /dev/null
+//===-- esan.h --------------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Main internal esan header file.
+//
+// Ground rules:
+// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
+// function-scope locals)
+// - All functions/classes/etc reside in namespace __esan, except for those
+// declared in esan_interface_internal.h.
+// - Platform-specific files should be used instead of ifdefs (*).
+// - No system headers included in header files (*).
+// - Platform specific headers included only into platform-specific files (*).
+//
+// (*) Except when inlining is critical for performance.
+//===----------------------------------------------------------------------===//
+
+#ifndef ESAN_H
+#define ESAN_H
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "esan_interface_internal.h"
+
+namespace __esan {
+
+extern bool EsanIsInitialized;
+extern bool EsanDuringInit;
+extern uptr VmaSize;
+
+void initializeLibrary(ToolType Tool);
+int finalizeLibrary();
+void reportResults();
+unsigned int getSampleCount();
+// Esan creates the variable per tool per compilation unit at compile time
+// and passes its pointer Ptr to the runtime library.
+void processCompilationUnitInit(void *Ptr);
+void processCompilationUnitExit(void *Ptr);
+void processRangeAccess(uptr PC, uptr Addr, int Size, bool IsWrite);
+void initializeInterceptors();
+void incrementFieldCounter(void *Addr);
+
+// Platform-dependent routines.
+void verifyAddressSpace();
+bool fixMmapAddr(void **Addr, SIZE_T Size, int Flags);
+uptr checkMmapResult(uptr Addr, SIZE_T Size);
+// The return value indicates whether to call the real version or not.
+bool processSignal(int SigNum, void (*Handler)(int), void (**Result)(int));
+bool processSigaction(int SigNum, const void *Act, void *OldAct);
+bool processSigprocmask(int How, void *Set, void *OldSet);
+
+} // namespace __esan
+
+#endif // ESAN_H
--- /dev/null
+__esan_init
+__esan_exit
+__esan_aligned*
+__esan_unaligned*
--- /dev/null
+//===-- esan_circular_buffer.h ----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Circular buffer data structure.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __esan {
+
+// A circular buffer for POD data whose memory is allocated using mmap.
+// There are two usage models: one is to use initialize/free (for global
+// instances) and the other is to use placement new with the
+// constructor and to call the destructor or free (they are equivalent).
+template<typename T>
+class CircularBuffer {
+ public:
+ // To support global instances we cannot initialize any field in the
+ // default constructor.
+ explicit CircularBuffer() {}
+ CircularBuffer(uptr BufferCapacity) {
+ initialize(BufferCapacity);
+ WasConstructed = true;
+ }
+ ~CircularBuffer() {
+ if (WasConstructed) // Else caller will call free() explicitly.
+ free();
+ }
+ void initialize(uptr BufferCapacity) {
+ Capacity = BufferCapacity;
+ // MmapOrDie rounds up to the page size for us.
+ Data = (T *)MmapOrDie(Capacity * sizeof(T), "CircularBuffer");
+ StartIdx = 0;
+ Count = 0;
+ WasConstructed = false;
+ }
+ void free() {
+ UnmapOrDie(Data, Capacity * sizeof(T));
+ }
+ T &operator[](uptr Idx) {
+ CHECK_LT(Idx, Count);
+ uptr ArrayIdx = (StartIdx + Idx) % Capacity;
+ return Data[ArrayIdx];
+ }
+ const T &operator[](uptr Idx) const {
+ CHECK_LT(Idx, Count);
+ uptr ArrayIdx = (StartIdx + Idx) % Capacity;
+ return Data[ArrayIdx];
+ }
+ void push_back(const T &Item) {
+ CHECK_GT(Capacity, 0);
+ uptr ArrayIdx = (StartIdx + Count) % Capacity;
+ Data[ArrayIdx] = Item;
+ if (Count < Capacity)
+ ++Count;
+ else
+ StartIdx = (StartIdx + 1) % Capacity;
+ }
+ T &back() {
+ CHECK_GT(Count, 0);
+ uptr ArrayIdx = (StartIdx + Count - 1) % Capacity;
+ return Data[ArrayIdx];
+ }
+ void pop_back() {
+ CHECK_GT(Count, 0);
+ --Count;
+ }
+ uptr size() const {
+ return Count;
+ }
+ void clear() {
+ StartIdx = 0;
+ Count = 0;
+ }
+ bool empty() const { return size() == 0; }
+
+ private:
+ CircularBuffer(const CircularBuffer&);
+ void operator=(const CircularBuffer&);
+
+ bool WasConstructed;
+ T *Data;
+ uptr Capacity;
+ uptr StartIdx;
+ uptr Count;
+};
+
+} // namespace __esan
--- /dev/null
+//===-- esan_flags.cc -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Esan flag parsing logic.
+//===----------------------------------------------------------------------===//
+
+#include "esan_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+
+using namespace __sanitizer;
+
+namespace __esan {
+
+static const char EsanOptsEnv[] = "ESAN_OPTIONS";
+
+Flags EsanFlagsDontUseDirectly;
+
+void Flags::setDefaults() {
+#define ESAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "esan_flags.inc"
+#undef ESAN_FLAG
+}
+
+static void registerEsanFlags(FlagParser *Parser, Flags *F) {
+#define ESAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(Parser, #Name, Description, &F->Name);
+#include "esan_flags.inc"
+#undef ESAN_FLAG
+}
+
+void initializeFlags() {
+ SetCommonFlagsDefaults();
+ Flags *F = getFlags();
+ F->setDefaults();
+
+ FlagParser Parser;
+ registerEsanFlags(&Parser, F);
+ RegisterCommonFlags(&Parser);
+
+ int mmaped = 0;
+ const char *options =
+ GetRuntimeOptions("ESAN_OPTIONS", "/ESAN_OPTIONS", &mmaped);
+
+ Parser.ParseString(options);
+
+ if (mmaped)
+ UnmapOrDie((void *)options, GetPageSizeCached());
+
+ InitializeCommonFlags();
+ if (Verbosity())
+ ReportUnrecognizedFlags();
+ if (common_flags()->help)
+ Parser.PrintFlagDescriptions();
+
+ __sanitizer_set_report_path(common_flags()->log_path);
+}
+
+} // namespace __esan
--- /dev/null
+//===-- esan_flags.h --------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Esan runtime flags.
+//===----------------------------------------------------------------------===//
+
+#ifndef ESAN_FLAGS_H
+#define ESAN_FLAGS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+
+namespace __esan {
+
+class Flags {
+public:
+#define ESAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "esan_flags.inc"
+#undef ESAN_FLAG
+
+ void setDefaults();
+};
+
+extern Flags EsanFlagsDontUseDirectly;
+inline Flags *getFlags() {
+ return &EsanFlagsDontUseDirectly;
+}
+
+void initializeFlags();
+
+} // namespace __esan
+
+#endif // ESAN_FLAGS_H
--- /dev/null
+//===-- esan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Esan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ESAN_FLAG
+# error "Define ESAN_FLAG prior to including this file!"
+#endif
+
+// ESAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+//===----------------------------------------------------------------------===//
+// Cross-tool options
+//===----------------------------------------------------------------------===//
+
+ESAN_FLAG(int, cache_line_size, 64,
+ "The number of bytes in a cache line. For the working-set tool, this "
+ "cannot be changed without also changing the compiler "
+ "instrumentation.")
+
+ESAN_FLAG(bool, process_range_access, false,
+ "Process range access.")
+
+//===----------------------------------------------------------------------===//
+// Working set tool options
+//===----------------------------------------------------------------------===//
+
+ESAN_FLAG(bool, record_snapshots, false,
+ "Working set tool: whether to sample snapshots during a run.")
+
+// Typical profiling uses a 10ms timer. Our snapshots take some work
+// to scan memory so we reduce to 20ms.
+// To disable samples, turn off record_snapshots.
+ESAN_FLAG(int, sample_freq, 20,
+ "Working set tool: sampling frequency in milliseconds.")
+
+// This controls the difference in frequency between each successive series
+// of snapshots. There are 8 in total, with number 0 using sample_freq.
+// Number N samples number N-1 every (1 << snapshot_step) instance of N-1.
+ESAN_FLAG(int, snapshot_step, 2, "Working set tool: the log of the sampling "
+ "performed for the next-higher-frequency snapshot series.")
+
+//===----------------------------------------------------------------------===//
+// Cache Fragmentation tool options
+//===----------------------------------------------------------------------===//
+ESAN_FLAG(bool, build_mode, true, "Don't output any info under the build\n")
+// The difference information of a struct is reported if the struct's difference
+// score is greater than the report_threshold.
+ESAN_FLAG(int, report_threshold, 1<<10, "Cache-frag tool: the struct difference"
+ " score threshold for reporting.")
--- /dev/null
+//===-- esan_hashtable.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Generic resizing hashtable.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include <stddef.h>
+
+namespace __esan {
+
+//===----------------------------------------------------------------------===//
+// Default hash and comparison functions
+//===----------------------------------------------------------------------===//
+
+template <typename T> struct DefaultHash {
+ size_t operator()(const T &Key) const {
+ return (size_t)Key;
+ }
+};
+
+template <typename T> struct DefaultEqual {
+ bool operator()(const T &Key1, const T &Key2) const {
+ return Key1 == Key2;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// HashTable declaration
+//===----------------------------------------------------------------------===//
+
+// A simple resizing and mutex-locked hashtable.
+//
+// If the default hash functor is used, KeyTy must have an operator size_t().
+// If the default comparison functor is used, KeyTy must have an operator ==.
+//
+// By default all operations are internally-synchronized with a mutex, with no
+// synchronization for payloads once hashtable functions return. If
+// ExternalLock is set to true, the caller should call the lock() and unlock()
+// routines around all hashtable operations and subsequent manipulation of
+// payloads.
+template <typename KeyTy, typename DataTy, bool ExternalLock = false,
+ typename HashFuncTy = DefaultHash<KeyTy>,
+ typename EqualFuncTy = DefaultEqual<KeyTy> >
+class HashTable {
+public:
+ // InitialCapacity must be a power of 2.
+ // ResizeFactor must be between 1 and 99 and indicates the
+ // maximum percentage full that the table should ever be.
+ HashTable(u32 InitialCapacity = 2048, u32 ResizeFactor = 70);
+ ~HashTable();
+ bool lookup(const KeyTy &Key, DataTy &Payload); // Const except for Mutex.
+ bool add(const KeyTy &Key, const DataTy &Payload);
+ bool remove(const KeyTy &Key);
+ u32 size(); // Const except for Mutex.
+ // If the table is internally-synchronized, this lock must not be held
+ // while a hashtable function is called as it will deadlock: the lock
+ // is not recursive. This is meant for use with externally-synchronized
+ // tables or with an iterator.
+ void lock();
+ void unlock();
+
+private:
+ struct HashEntry {
+ KeyTy Key;
+ DataTy Payload;
+ HashEntry *Next;
+ };
+
+public:
+ struct HashPair {
+ HashPair(KeyTy Key, DataTy Data) : Key(Key), Data(Data) {}
+ KeyTy Key;
+ DataTy Data;
+ };
+
+ // This iterator does not perform any synchronization.
+ // It expects the caller to lock the table across the whole iteration.
+ // Calling HashTable functions while using the iterator is not supported.
+ // The iterator returns copies of the keys and data.
+ class iterator {
+ public:
+ iterator(
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table);
+ iterator(const iterator &Src) = default;
+ iterator &operator=(const iterator &Src) = default;
+ HashPair operator*();
+ iterator &operator++();
+ iterator &operator++(int);
+ bool operator==(const iterator &Cmp) const;
+ bool operator!=(const iterator &Cmp) const;
+
+ private:
+ iterator(
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table,
+ int Idx);
+ friend HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>;
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table;
+ int Idx;
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::HashEntry
+ *Entry;
+ };
+
+ // No erase or insert iterator supported
+ iterator begin();
+ iterator end();
+
+private:
+ void resize();
+
+ HashEntry **Table;
+ u32 Capacity;
+ u32 Entries;
+ const u32 ResizeFactor;
+ BlockingMutex Mutex;
+ const HashFuncTy HashFunc;
+ const EqualFuncTy EqualFunc;
+};
+
+//===----------------------------------------------------------------------===//
+// Hashtable implementation
+//===----------------------------------------------------------------------===//
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::HashTable(
+ u32 InitialCapacity, u32 ResizeFactor)
+ : Capacity(InitialCapacity), Entries(0), ResizeFactor(ResizeFactor),
+ HashFunc(HashFuncTy()), EqualFunc(EqualFuncTy()) {
+ CHECK(IsPowerOfTwo(Capacity));
+ CHECK(ResizeFactor >= 1 && ResizeFactor <= 99);
+ Table = (HashEntry **)InternalAlloc(Capacity * sizeof(HashEntry *));
+ internal_memset(Table, 0, Capacity * sizeof(HashEntry *));
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::~HashTable() {
+ for (u32 i = 0; i < Capacity; ++i) {
+ HashEntry *Entry = Table[i];
+ while (Entry != nullptr) {
+ HashEntry *Next = Entry->Next;
+ Entry->Payload.~DataTy();
+ InternalFree(Entry);
+ Entry = Next;
+ }
+ }
+ InternalFree(Table);
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+u32 HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::size() {
+ u32 Res;
+ if (!ExternalLock)
+ Mutex.Lock();
+ Res = Entries;
+ if (!ExternalLock)
+ Mutex.Unlock();
+ return Res;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::lookup(
+ const KeyTy &Key, DataTy &Payload) {
+ if (!ExternalLock)
+ Mutex.Lock();
+ bool Found = false;
+ size_t Hash = HashFunc(Key) % Capacity;
+ HashEntry *Entry = Table[Hash];
+ for (; Entry != nullptr; Entry = Entry->Next) {
+ if (EqualFunc(Entry->Key, Key)) {
+ Payload = Entry->Payload;
+ Found = true;
+ break;
+ }
+ }
+ if (!ExternalLock)
+ Mutex.Unlock();
+ return Found;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+void HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::resize() {
+ if (!ExternalLock)
+ Mutex.CheckLocked();
+ size_t OldCapacity = Capacity;
+ HashEntry **OldTable = Table;
+ Capacity *= 2;
+ Table = (HashEntry **)InternalAlloc(Capacity * sizeof(HashEntry *));
+ internal_memset(Table, 0, Capacity * sizeof(HashEntry *));
+ // Re-hash
+ for (u32 i = 0; i < OldCapacity; ++i) {
+ HashEntry *OldEntry = OldTable[i];
+ while (OldEntry != nullptr) {
+ HashEntry *Next = OldEntry->Next;
+ size_t Hash = HashFunc(OldEntry->Key) % Capacity;
+ OldEntry->Next = Table[Hash];
+ Table[Hash] = OldEntry;
+ OldEntry = Next;
+ }
+ }
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::add(
+ const KeyTy &Key, const DataTy &Payload) {
+ if (!ExternalLock)
+ Mutex.Lock();
+ bool Exists = false;
+ size_t Hash = HashFunc(Key) % Capacity;
+ HashEntry *Entry = Table[Hash];
+ for (; Entry != nullptr; Entry = Entry->Next) {
+ if (EqualFunc(Entry->Key, Key)) {
+ Exists = true;
+ break;
+ }
+ }
+ if (!Exists) {
+ Entries++;
+ if (Entries * 100 >= Capacity * ResizeFactor) {
+ resize();
+ Hash = HashFunc(Key) % Capacity;
+ }
+ HashEntry *Add = (HashEntry *)InternalAlloc(sizeof(*Add));
+ Add->Key = Key;
+ Add->Payload = Payload;
+ Add->Next = Table[Hash];
+ Table[Hash] = Add;
+ }
+ if (!ExternalLock)
+ Mutex.Unlock();
+ return !Exists;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::remove(
+ const KeyTy &Key) {
+ if (!ExternalLock)
+ Mutex.Lock();
+ bool Found = false;
+ size_t Hash = HashFunc(Key) % Capacity;
+ HashEntry *Entry = Table[Hash];
+ HashEntry *Prev = nullptr;
+ for (; Entry != nullptr; Prev = Entry, Entry = Entry->Next) {
+ if (EqualFunc(Entry->Key, Key)) {
+ Found = true;
+ Entries--;
+ if (Prev == nullptr)
+ Table[Hash] = Entry->Next;
+ else
+ Prev->Next = Entry->Next;
+ Entry->Payload.~DataTy();
+ InternalFree(Entry);
+ break;
+ }
+ }
+ if (!ExternalLock)
+ Mutex.Unlock();
+ return Found;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+void HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::lock() {
+ Mutex.Lock();
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+void HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::unlock() {
+ Mutex.Unlock();
+}
+
+//===----------------------------------------------------------------------===//
+// Iterator implementation
+//===----------------------------------------------------------------------===//
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ iterator(
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table)
+ : Table(Table), Idx(-1), Entry(nullptr) {
+ operator++();
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ iterator(
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table,
+ int Idx)
+ : Table(Table), Idx(Idx), Entry(nullptr) {
+ CHECK(Idx >= (int)Table->Capacity); // Only used to create end().
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::HashPair
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ operator*() {
+ CHECK(Idx >= 0 && Idx < (int)Table->Capacity);
+ CHECK(Entry != nullptr);
+ return HashPair(Entry->Key, Entry->Payload);
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::iterator &
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ operator++() {
+ if (Entry != nullptr)
+ Entry = Entry->Next;
+ while (Entry == nullptr) {
+ ++Idx;
+ if (Idx >= (int)Table->Capacity)
+ break; // At end().
+ Entry = Table->Table[Idx];
+ }
+ return *this;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::iterator &
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ operator++(int) {
+ iterator Temp(*this);
+ operator++();
+ return Temp;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+operator==(const iterator &Cmp) const {
+ return Cmp.Table == Table && Cmp.Idx == Idx && Cmp.Entry == Entry;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+operator!=(const iterator &Cmp) const {
+ return Cmp.Table != Table || Cmp.Idx != Idx || Cmp.Entry != Entry;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::iterator
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::begin() {
+ return iterator(this);
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::iterator
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::end() {
+ return iterator(this, Capacity);
+}
+
+} // namespace __esan
--- /dev/null
+//===-- esan_interceptors.cpp ---------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Interception routines for the esan run-time.
+//===----------------------------------------------------------------------===//
+
+#include "esan.h"
+#include "esan_shadow.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+using namespace __esan; // NOLINT
+
+#define CUR_PC() (StackTrace::GetCurrentPc())
+
+//===----------------------------------------------------------------------===//
+// Interception via sanitizer common interceptors
+//===----------------------------------------------------------------------===//
+
+// Get the per-platform defines for what is possible to intercept
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+// TODO(bruening): tsan disables several interceptors (getpwent, etc.) claiming
+// that interception is a perf hit: should we do the same?
+
+// We don't need to intercept this.
+#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
+#undef SANITIZER_INTERCEPT_FORTIFY_SOURCE
+
+// TODO(bruening): the common realpath interceptor assumes malloc is
+// intercepted! We should try to parametrize that, though we'll
+// intercept malloc soon ourselves and can then remove this undef.
+#undef SANITIZER_INTERCEPT_REALPATH
+
+// We provide our own version:
+#undef SANITIZER_INTERCEPT_SIGPROCMASK
+
+// Could cause a segfault see: https://github.com/google/sanitizers/issues/321.
+#undef SANITIZER_INTERCEPT_CLOCK_GETTIME
+
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!EsanIsInitialized)
+
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
+ INTERCEPT_FUNCTION_VER(name, ver)
+
+// We must initialize during early interceptors, to support tcmalloc.
+// This means that for some apps we fully initialize prior to
+// __esan_init() being called.
+// We currently do not use ctx.
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ do { \
+ if (UNLIKELY(COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)) { \
+ if (!UNLIKELY(EsanDuringInit)) \
+ initializeLibrary(__esan_which_tool); \
+ return REAL(func)(__VA_ARGS__); \
+ } \
+ ctx = nullptr; \
+ (void)ctx; \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
+ COMMON_INTERCEPTOR_ENTER(ctx, func, __VA_ARGS__)
+
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ processRangeAccess(CUR_PC(), (uptr)ptr, size, true)
+
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ processRangeAccess(CUR_PC(), (uptr)ptr, size, false)
+
+// This is only called if the app explicitly calls exit(), not on
+// a normal exit.
+#define COMMON_INTERCEPTOR_ON_EXIT(ctx) finalizeLibrary()
+
+#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
+ do { \
+ (void)(ctx); \
+ (void)(file); \
+ (void)(path); \
+ } while (false)
+#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
+ do { \
+ (void)(ctx); \
+ (void)(file); \
+ } while (false)
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
+ do { \
+ (void)(filename); \
+ (void)(handle); \
+ } while (false)
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
+ do { \
+ (void)(ctx); \
+ (void)(u); \
+ } while (false)
+#define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
+ do { \
+ (void)(ctx); \
+ (void)(u); \
+ } while (false)
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ do { \
+ (void)(ctx); \
+ (void)(path); \
+ } while (false)
+#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ } while (false)
+#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ } while (false)
+#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ } while (false)
+#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ (void)(newfd); \
+ } while (false)
+#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
+ do { \
+ (void)(ctx); \
+ (void)(name); \
+ } while (false)
+#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
+ do { \
+ (void)(ctx); \
+ (void)(thread); \
+ (void)(name); \
+ } while (false)
+#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
+#define COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m) \
+ do { \
+ (void)(ctx); \
+ (void)(m); \
+ } while (false)
+#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
+ do { \
+ (void)(ctx); \
+ (void)(m); \
+ } while (false)
+#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
+ do { \
+ (void)(ctx); \
+ (void)(m); \
+ } while (false)
+#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
+ do { \
+ (void)(ctx); \
+ (void)(msg); \
+ } while (false)
+#define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
+ do { \
+ } while (false)
+
+#include "sanitizer_common/sanitizer_common_interceptors.inc"
+
+//===----------------------------------------------------------------------===//
+// Syscall interception
+//===----------------------------------------------------------------------===//
+
+// We want the caller's PC b/c unlike the other function interceptors these
+// are separate pre and post functions called around the app's syscall().
+
+#define COMMON_SYSCALL_PRE_READ_RANGE(ptr, size) \
+ processRangeAccess(GET_CALLER_PC(), (uptr)ptr, size, false)
+
+#define COMMON_SYSCALL_PRE_WRITE_RANGE(ptr, size) \
+ do { \
+ (void)(ptr); \
+ (void)(size); \
+ } while (false)
+
+#define COMMON_SYSCALL_POST_READ_RANGE(ptr, size) \
+ do { \
+ (void)(ptr); \
+ (void)(size); \
+ } while (false)
+
+// The actual amount written is in post, not pre.
+#define COMMON_SYSCALL_POST_WRITE_RANGE(ptr, size) \
+ processRangeAccess(GET_CALLER_PC(), (uptr)ptr, size, true)
+
+#define COMMON_SYSCALL_ACQUIRE(addr) \
+ do { \
+ (void)(addr); \
+ } while (false)
+#define COMMON_SYSCALL_RELEASE(addr) \
+ do { \
+ (void)(addr); \
+ } while (false)
+#define COMMON_SYSCALL_FD_CLOSE(fd) \
+ do { \
+ (void)(fd); \
+ } while (false)
+#define COMMON_SYSCALL_FD_ACQUIRE(fd) \
+ do { \
+ (void)(fd); \
+ } while (false)
+#define COMMON_SYSCALL_FD_RELEASE(fd) \
+ do { \
+ (void)(fd); \
+ } while (false)
+#define COMMON_SYSCALL_PRE_FORK() \
+ do { \
+ } while (false)
+#define COMMON_SYSCALL_POST_FORK(res) \
+ do { \
+ (void)(res); \
+ } while (false)
+
+#include "sanitizer_common/sanitizer_common_syscalls.inc"
+
+//===----------------------------------------------------------------------===//
+// Custom interceptors
+//===----------------------------------------------------------------------===//
+
+// TODO(bruening): move more of these to the common interception pool as they
+// are shared with tsan and asan.
+// While our other files match LLVM style, here we match sanitizer style as we
+// expect to move these to the common pool.
+
+INTERCEPTOR(char *, strcpy, char *dst, const char *src) { // NOLINT
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strcpy, dst, src);
+ uptr srclen = internal_strlen(src);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, srclen + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, srclen + 1);
+ return REAL(strcpy)(dst, src); // NOLINT
+}
+
+INTERCEPTOR(char *, strncpy, char *dst, char *src, uptr n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strncpy, dst, src, n);
+ uptr srclen = internal_strnlen(src, n);
+ uptr copied_size = srclen + 1 > n ? n : srclen + 1;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, copied_size);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, copied_size);
+ return REAL(strncpy)(dst, src, n);
+}
+
+INTERCEPTOR(int, open, const char *name, int flags, int mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, open, name, flags, mode);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, name, 0);
+ return REAL(open)(name, flags, mode);
+}
+
+#if SANITIZER_LINUX
+INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, open64, name, flags, mode);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, name, 0);
+ return REAL(open64)(name, flags, mode);
+}
+#define ESAN_MAYBE_INTERCEPT_OPEN64 INTERCEPT_FUNCTION(open64)
+#else
+#define ESAN_MAYBE_INTERCEPT_OPEN64
+#endif
+
+INTERCEPTOR(int, creat, const char *name, int mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, creat, name, mode);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, name, 0);
+ return REAL(creat)(name, mode);
+}
+
+#if SANITIZER_LINUX
+INTERCEPTOR(int, creat64, const char *name, int mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, creat64, name, mode);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, name, 0);
+ return REAL(creat64)(name, mode);
+}
+#define ESAN_MAYBE_INTERCEPT_CREAT64 INTERCEPT_FUNCTION(creat64)
+#else
+#define ESAN_MAYBE_INTERCEPT_CREAT64
+#endif
+
+INTERCEPTOR(int, unlink, char *path) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, unlink, path);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ return REAL(unlink)(path);
+}
+
+INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fread, ptr, size, nmemb, f);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size * nmemb);
+ return REAL(fread)(ptr, size, nmemb, f);
+}
+
+INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fwrite, p, size, nmemb, f);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, size * nmemb);
+ return REAL(fwrite)(p, size, nmemb, f);
+}
+
+INTERCEPTOR(int, puts, const char *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, puts, s);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s));
+ return REAL(puts)(s);
+}
+
+INTERCEPTOR(int, rmdir, char *path) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, rmdir, path);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ return REAL(rmdir)(path);
+}
+
+//===----------------------------------------------------------------------===//
+// Shadow-related interceptors
+//===----------------------------------------------------------------------===//
+
+// These are candidates for sharing with all sanitizers if shadow memory
+// support is also standardized.
+
+INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags,
+ int fd, OFF_T off) {
+ if (UNLIKELY(REAL(mmap) == nullptr)) {
+ // With esan init during interceptor init and a static libc preventing
+ // our early-calloc from triggering, we can end up here before our
+ // REAL pointer is set up.
+ return (void *)internal_mmap(addr, sz, prot, flags, fd, off);
+ }
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mmap, addr, sz, prot, flags, fd, off);
+ if (!fixMmapAddr(&addr, sz, flags))
+ return (void *)-1;
+ void *result = REAL(mmap)(addr, sz, prot, flags, fd, off);
+ return (void *)checkMmapResult((uptr)result, sz);
+}
+
+#if SANITIZER_LINUX
+INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags,
+ int fd, OFF64_T off) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mmap64, addr, sz, prot, flags, fd, off);
+ if (!fixMmapAddr(&addr, sz, flags))
+ return (void *)-1;
+ void *result = REAL(mmap64)(addr, sz, prot, flags, fd, off);
+ return (void *)checkMmapResult((uptr)result, sz);
+}
+#define ESAN_MAYBE_INTERCEPT_MMAP64 INTERCEPT_FUNCTION(mmap64)
+#else
+#define ESAN_MAYBE_INTERCEPT_MMAP64
+#endif
+
+//===----------------------------------------------------------------------===//
+// Signal-related interceptors
+//===----------------------------------------------------------------------===//
+
+#if SANITIZER_LINUX
+typedef void (*signal_handler_t)(int);
+INTERCEPTOR(signal_handler_t, signal, int signum, signal_handler_t handler) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, signal, signum, handler);
+ signal_handler_t result;
+ if (!processSignal(signum, handler, &result))
+ return result;
+ else
+ return REAL(signal)(signum, handler);
+}
+#define ESAN_MAYBE_INTERCEPT_SIGNAL INTERCEPT_FUNCTION(signal)
+#else
+#error Platform not supported
+#define ESAN_MAYBE_INTERCEPT_SIGNAL
+#endif
+
+#if SANITIZER_LINUX
+DECLARE_REAL(int, sigaction, int signum, const struct sigaction *act,
+ struct sigaction *oldact)
+INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
+ struct sigaction *oldact) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigaction, signum, act, oldact);
+ if (!processSigaction(signum, act, oldact))
+ return 0;
+ else
+ return REAL(sigaction)(signum, act, oldact);
+}
+
+// This is required to properly use internal_sigaction.
+namespace __sanitizer {
+#if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO
+int real_sigaction(int signum, const void *act, void *oldact) {
+ if (REAL(sigaction) == nullptr) {
+ // With an instrumented allocator, this is called during interceptor init
+ // and we need a raw syscall solution.
+ return internal_sigaction_syscall(signum, act, oldact);
+ }
+ return REAL(sigaction)(signum, (const struct sigaction *)act,
+ (struct sigaction *)oldact);
+}
+#endif
+} // namespace __sanitizer
+
+#define ESAN_MAYBE_INTERCEPT_SIGACTION INTERCEPT_FUNCTION(sigaction)
+#else
+#error Platform not supported
+#define ESAN_MAYBE_INTERCEPT_SIGACTION
+#endif
+
+#if SANITIZER_LINUX
+INTERCEPTOR(int, sigprocmask, int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigprocmask, how, set, oldset);
+ int res = 0;
+ if (processSigprocmask(how, set, oldset))
+ res = REAL(sigprocmask)(how, set, oldset);
+ if (!res && oldset)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldset, sizeof(*oldset));
+ return res;
+}
+#define ESAN_MAYBE_INTERCEPT_SIGPROCMASK INTERCEPT_FUNCTION(sigprocmask)
+#else
+#define ESAN_MAYBE_INTERCEPT_SIGPROCMASK
+#endif
+
+//This interceptor causes to crash coreclr
+#if !SANITIZER_WINDOWS && !SANITIZER_LINUX
+INTERCEPTOR(int, pthread_sigmask, int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_sigmask, how, set, oldset);
+ int res = 0;
+ if (processSigprocmask(how, set, oldset))
+ res = REAL(sigprocmask)(how, set, oldset);
+ if (!res && oldset)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldset, sizeof(*oldset));
+ return res;
+}
+#define ESAN_MAYBE_INTERCEPT_PTHREAD_SIGMASK INTERCEPT_FUNCTION(pthread_sigmask)
+#else
+#define ESAN_MAYBE_INTERCEPT_PTHREAD_SIGMASK
+#endif
+
+//===----------------------------------------------------------------------===//
+// Malloc interceptors
+//===----------------------------------------------------------------------===//
+
+static const uptr early_alloc_buf_size = 4096;
+static uptr allocated_bytes;
+static char early_alloc_buf[early_alloc_buf_size];
+
+static bool isInEarlyAllocBuf(const void *ptr) {
+ return ((uptr)ptr >= (uptr)early_alloc_buf &&
+ ((uptr)ptr - (uptr)early_alloc_buf) < sizeof(early_alloc_buf));
+}
+
+static void *handleEarlyAlloc(uptr size) {
+ // If esan is initialized during an interceptor (which happens with some
+ // tcmalloc implementations that call pthread_mutex_lock), the call from
+ // dlsym to calloc will deadlock.
+ // dlsym may also call malloc before REAL(malloc) is retrieved from dlsym.
+ // We work around it by using a static buffer for the early malloc/calloc
+ // requests.
+ // This solution will also allow us to deliberately intercept malloc & family
+ // in the future (to perform tool actions on each allocation, without
+ // replacing the allocator), as it also solves the problem of intercepting
+ // calloc when it will itself be called before its REAL pointer is
+ // initialized.
+ // We do not handle multiple threads here. This only happens at process init
+ // time, and while it's possible for a shared library to create early threads
+ // that race here, we consider that to be a corner case extreme enough that
+ // it's not worth the effort to handle.
+ void *mem = (void *)&early_alloc_buf[allocated_bytes];
+ allocated_bytes += size;
+ CHECK_LT(allocated_bytes, early_alloc_buf_size);
+ return mem;
+}
+
+INTERCEPTOR(void*, calloc, uptr size, uptr n) {
+ if (EsanDuringInit && REAL(calloc) == nullptr)
+ return handleEarlyAlloc(size * n);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, calloc, size, n);
+ void *res = REAL(calloc)(size, n);
+ // The memory is zeroed and thus is all written.
+ COMMON_INTERCEPTOR_WRITE_RANGE(nullptr, (uptr)res, size * n);
+ return res;
+}
+
+INTERCEPTOR(void*, malloc, uptr size) {
+ if (EsanDuringInit && REAL(malloc) == nullptr)
+ return handleEarlyAlloc(size);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, malloc, size);
+ return REAL(malloc)(size);
+}
+
+INTERCEPTOR(void, free, void *p) {
+ void *ctx;
+ // There are only a few early allocation requests, so we simply skip the free.
+ if (isInEarlyAllocBuf(p))
+ return;
+ COMMON_INTERCEPTOR_ENTER(ctx, free, p);
+ REAL(free)(p);
+}
+
+namespace __esan {
+
+void initializeInterceptors() {
+ InitializeCommonInterceptors();
+
+ INTERCEPT_FUNCTION(strcpy); // NOLINT
+ INTERCEPT_FUNCTION(strncpy);
+
+ INTERCEPT_FUNCTION(open);
+ ESAN_MAYBE_INTERCEPT_OPEN64;
+ INTERCEPT_FUNCTION(creat);
+ ESAN_MAYBE_INTERCEPT_CREAT64;
+ INTERCEPT_FUNCTION(unlink);
+ INTERCEPT_FUNCTION(fread);
+ INTERCEPT_FUNCTION(fwrite);
+ INTERCEPT_FUNCTION(puts);
+ INTERCEPT_FUNCTION(rmdir);
+
+ INTERCEPT_FUNCTION(mmap);
+ ESAN_MAYBE_INTERCEPT_MMAP64;
+
+ ESAN_MAYBE_INTERCEPT_SIGNAL;
+ ESAN_MAYBE_INTERCEPT_SIGACTION;
+ ESAN_MAYBE_INTERCEPT_SIGPROCMASK;
+ ESAN_MAYBE_INTERCEPT_PTHREAD_SIGMASK;
+
+ INTERCEPT_FUNCTION(calloc);
+ INTERCEPT_FUNCTION(malloc);
+ INTERCEPT_FUNCTION(free);
+
+ // TODO(bruening): intercept routines that other sanitizers intercept that
+ // are not in the common pool or here yet, ideally by adding to the common
+ // pool. Examples include wcslen and bcopy.
+
+ // TODO(bruening): there are many more libc routines that read or write data
+ // structures that no sanitizer is intercepting: sigaction, strtol, etc.
+}
+
+} // namespace __esan
--- /dev/null
+//===-- esan_interface.cpp ------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+//===----------------------------------------------------------------------===//
+
+#include "esan_interface_internal.h"
+#include "esan.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+using namespace __esan; // NOLINT
+
+void __esan_init(ToolType Tool, void *Ptr) {
+ if (Tool != __esan_which_tool) {
+ Printf("ERROR: tool mismatch: %d vs %d\n", Tool, __esan_which_tool);
+ Die();
+ }
+ initializeLibrary(Tool);
+ processCompilationUnitInit(Ptr);
+}
+
+void __esan_exit(void *Ptr) {
+ processCompilationUnitExit(Ptr);
+}
+
+void __esan_aligned_load1(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 1, false);
+}
+
+void __esan_aligned_load2(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 2, false);
+}
+
+void __esan_aligned_load4(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 4, false);
+}
+
+void __esan_aligned_load8(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 8, false);
+}
+
+void __esan_aligned_load16(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 16, false);
+}
+
+void __esan_aligned_store1(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 1, true);
+}
+
+void __esan_aligned_store2(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 2, true);
+}
+
+void __esan_aligned_store4(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 4, true);
+}
+
+void __esan_aligned_store8(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 8, true);
+}
+
+void __esan_aligned_store16(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 16, true);
+}
+
+void __esan_unaligned_load2(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 2, false);
+}
+
+void __esan_unaligned_load4(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 4, false);
+}
+
+void __esan_unaligned_load8(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 8, false);
+}
+
+void __esan_unaligned_load16(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 16, false);
+}
+
+void __esan_unaligned_store2(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 2, true);
+}
+
+void __esan_unaligned_store4(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 4, true);
+}
+
+void __esan_unaligned_store8(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 8, true);
+}
+
+void __esan_unaligned_store16(void *Addr) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, 16, true);
+}
+
+void __esan_unaligned_loadN(void *Addr, uptr Size) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, Size, false);
+}
+
+void __esan_unaligned_storeN(void *Addr, uptr Size) {
+ processRangeAccess(GET_CALLER_PC(), (uptr)Addr, Size, true);
+}
+
+void __esan_increment(void *Addr) { incrementFieldCounter(Addr); }
+
+// Public interface:
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_report() { reportResults(); }
+
+SANITIZER_INTERFACE_ATTRIBUTE unsigned int __esan_get_sample_count() {
+ return getSampleCount();
+}
+} // extern "C"
--- /dev/null
+//===-- esan_interface_internal.h -------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Calls to the functions declared in this header will be inserted by
+// the instrumentation module.
+//===----------------------------------------------------------------------===//
+
+#ifndef ESAN_INTERFACE_INTERNAL_H
+#define ESAN_INTERFACE_INTERNAL_H
+
+#include <sanitizer_common/sanitizer_internal_defs.h>
+
+// This header should NOT include any other headers.
+// All functions in this header are extern "C" and start with __esan_.
+
+using __sanitizer::uptr;
+using __sanitizer::u32;
+
+extern "C" {
+
+// This should be kept consistent with LLVM's EfficiencySanitizerOptions.
+// The value is passed as a 32-bit integer by the compiler.
+typedef enum Type : u32 {
+ ESAN_None = 0,
+ ESAN_CacheFrag,
+ ESAN_WorkingSet,
+ ESAN_Max,
+} ToolType;
+
+// To handle interceptors that invoke instrumented code prior to
+// __esan_init() being called, the instrumentation module creates this
+// global variable specifying the tool.
+extern ToolType __esan_which_tool __attribute__((weak));
+
+// This function should be called at the very beginning of the process,
+// before any instrumented code is executed and before any call to malloc.
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_init(ToolType Tool, void *Ptr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_exit(void *Ptr);
+
+// The instrumentation module will insert a call to one of these routines prior
+// to each load and store instruction for which we do not have "fastpath"
+// inlined instrumentation. These calls constitute the "slowpath" for our
+// tools. We have separate routines for each type of memory access to enable
+// targeted optimization.
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_aligned_load1(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_aligned_load2(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_aligned_load4(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_aligned_load8(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_aligned_load16(void *Addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_aligned_store1(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_aligned_store2(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_aligned_store4(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_aligned_store8(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_aligned_store16(void *Addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_unaligned_load2(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_unaligned_load4(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_unaligned_load8(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_unaligned_load16(void *Addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_unaligned_store2(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_unaligned_store4(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_unaligned_store8(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_unaligned_store16(void *Addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __esan_increment(void *Addr);
+
+// These cover unusually-sized accesses.
+SANITIZER_INTERFACE_ATTRIBUTE
+void __esan_unaligned_loadN(void *Addr, uptr Size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __esan_unaligned_storeN(void *Addr, uptr Size);
+
+} // extern "C"
+
+#endif // ESAN_INTERFACE_INTERNAL_H
--- /dev/null
+//===-- esan.cpp ----------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Linux-specific code for the Esan run-time.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+
+#include "esan.h"
+#include "esan_shadow.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include <sys/mman.h>
+#include <errno.h>
+
+namespace __esan {
+
+void verifyAddressSpace() {
+#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_MIPS64)
+ // The kernel determines its mmap base from the stack size limit.
+ // Our Linux 64-bit shadow mapping assumes the stack limit is less than a
+ // terabyte, which keeps the mmap region above 0x7e00'.
+ uptr StackLimit = GetStackSizeLimitInBytes();
+ if (StackSizeIsUnlimited() || StackLimit > MaxStackSize) {
+ VReport(1, "The stack size limit is beyond the maximum supported.\n"
+ "Re-execing with a stack size below 1TB.\n");
+ SetStackSizeLimitInBytes(MaxStackSize);
+ ReExec();
+ }
+#endif
+}
+
+static bool liesWithinSingleAppRegion(uptr Start, SIZE_T Size) {
+ uptr AppStart, AppEnd;
+ for (int i = 0; getAppRegion(i, &AppStart, &AppEnd); ++i) {
+ if (Start >= AppStart && Start + Size - 1 <= AppEnd) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool fixMmapAddr(void **Addr, SIZE_T Size, int Flags) {
+ if (*Addr) {
+ if (!liesWithinSingleAppRegion((uptr)*Addr, Size)) {
+ VPrintf(1, "mmap conflict: [%p-%p) is not in an app region\n",
+ *Addr, (uptr)*Addr + Size);
+ if (Flags & MAP_FIXED) {
+ errno = EINVAL;
+ return false;
+ } else {
+ *Addr = 0;
+ }
+ }
+ }
+ return true;
+}
+
+uptr checkMmapResult(uptr Addr, SIZE_T Size) {
+ if ((void *)Addr == MAP_FAILED)
+ return Addr;
+ if (!liesWithinSingleAppRegion(Addr, Size)) {
+ // FIXME: attempt to dynamically add this as an app region if it
+ // fits our shadow criteria.
+ // We could also try to remap somewhere else.
+ Printf("ERROR: unsupported mapping at [%p-%p)\n", Addr, Addr+Size);
+ Die();
+ }
+ return Addr;
+}
+
+} // namespace __esan
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
--- /dev/null
+//===-- esan_shadow.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Shadow memory mappings for the esan run-time.
+//===----------------------------------------------------------------------===//
+
+#ifndef ESAN_SHADOW_H
+#define ESAN_SHADOW_H
+
+#include "esan.h"
+#include <sanitizer_common/sanitizer_platform.h>
+
+namespace __esan {
+
+struct ApplicationRegion {
+ uptr Start;
+ uptr End;
+ bool ShadowMergedWithPrev;
+};
+
+#if SANITIZER_LINUX && defined(__x86_64__)
+// Linux x86_64
+//
+// Application memory falls into these 5 regions (ignoring the corner case
+// of PIE with a non-zero PT_LOAD base):
+//
+// [0x00000000'00000000, 0x00000100'00000000) non-PIE + heap
+// [0x00005500'00000000, 0x00005700'00000000) PIE
+// [0x00007e00'00000000, 0x00007fff'ff600000) libraries + stack, part 1
+// [0x00007fff'ff601000, 0x00008000'00000000) libraries + stack, part 2
+// [0xffffffff'ff600000, 0xffffffff'ff601000) vsyscall
+//
+// Although we can ignore the vsyscall for the most part as there are few data
+// references there (other sanitizers ignore it), we enforce a gap inside the
+// library region to distinguish the vsyscall's shadow, considering this gap to
+// be an invalid app region.
+// We disallow application memory outside of those 5 regions.
+// Our regions assume that the stack rlimit is less than a terabyte (otherwise
+// the Linux kernel's default mmap region drops below 0x7e00'), which we enforce
+// at init time (we can support larger and unlimited sizes for shadow
+// scaledowns, but it is difficult for 1:1 mappings).
+//
+// Our shadow memory is scaled from a 1:1 mapping and supports a scale
+// specified at library initialization time that can be any power-of-2
+// scaledown (1x, 2x, 4x, 8x, 16x, etc.).
+//
+// We model our shadow memory after Umbra, a library used by the Dr. Memory
+// tool: https://github.com/DynamoRIO/drmemory/blob/master/umbra/umbra_x64.c.
+// We use Umbra's scheme as it was designed to support different
+// offsets, it supports two different shadow mappings (which we may want to
+// use for future tools), and it ensures that the shadow of a shadow will
+// not overlap either shadow memory or application memory.
+//
+// This formula translates from application memory to shadow memory:
+//
+// shadow(app) = ((app & 0x00000fff'ffffffff) + offset) >> scale
+//
+// Where the offset for 1:1 is 0x00001300'00000000. For other scales, the
+// offset is shifted left by the scale, except for scales of 1 and 2 where
+// it must be tweaked in order to pass the double-shadow test
+// (see the "shadow(shadow)" comments below):
+// scale == 0: 0x00001300'000000000
+// scale == 1: 0x00002200'000000000
+// scale == 2: 0x00004400'000000000
+// scale >= 3: (0x00001300'000000000 << scale)
+//
+// Do not pass in the open-ended end value to the formula as it will fail.
+//
+// The resulting shadow memory regions for a 0 scaling are:
+//
+// [0x00001300'00000000, 0x00001400'00000000)
+// [0x00001800'00000000, 0x00001a00'00000000)
+// [0x00002100'00000000, 0x000022ff'ff600000)
+// [0x000022ff'ff601000, 0x00002300'00000000)
+// [0x000022ff'ff600000, 0x000022ff'ff601000]
+//
+// We also want to ensure that a wild access by the application into the shadow
+// regions will not corrupt our own shadow memory. shadow(shadow) ends up
+// disjoint from shadow(app):
+//
+// [0x00001600'00000000, 0x00001700'00000000)
+// [0x00001b00'00000000, 0x00001d00'00000000)
+// [0x00001400'00000000, 0x000015ff'ff600000]
+// [0x000015ff'ff601000, 0x00001600'00000000]
+// [0x000015ff'ff600000, 0x000015ff'ff601000]
+
+static const struct ApplicationRegion AppRegions[] = {
+ {0x0000000000000000ull, 0x0000010000000000u, false},
+ {0x0000550000000000u, 0x0000570000000000u, false},
+ // We make one shadow mapping to hold the shadow regions for all 3 of these
+ // app regions, as the mappings interleave, and the gap between the 3rd and
+ // 4th scales down below a page.
+ {0x00007e0000000000u, 0x00007fffff600000u, false},
+ {0x00007fffff601000u, 0x0000800000000000u, true},
+ {0xffffffffff600000u, 0xffffffffff601000u, true},
+};
+
+#elif SANITIZER_LINUX && SANITIZER_MIPS64
+
+// Application memory falls into these 3 regions
+//
+// [0x00000001'00000000, 0x00000002'00000000) non-PIE + heap
+// [0x000000aa'00000000, 0x000000ab'00000000) PIE
+// [0x000000ff'00000000, 0x000000ff'ffffffff) libraries + stack
+//
+// This formula translates from application memory to shadow memory:
+//
+// shadow(app) = ((app & 0x00000f'ffffffff) + offset) >> scale
+//
+// Where the offset for 1:1 is 0x000013'00000000. For other scales, the
+// offset is shifted left by the scale, except for scales of 1 and 2 where
+// it must be tweaked in order to pass the double-shadow test
+// (see the "shadow(shadow)" comments below):
+// scale == 0: 0x000013'00000000
+// scale == 1: 0x000022'00000000
+// scale == 2: 0x000044'00000000
+// scale >= 3: (0x000013'00000000 << scale)
+//
+// The resulting shadow memory regions for a 0 scaling are:
+//
+// [0x00000014'00000000, 0x00000015'00000000)
+// [0x0000001d'00000000, 0x0000001e'00000000)
+// [0x00000022'00000000, 0x00000022'ffffffff)
+//
+// We also want to ensure that a wild access by the application into the shadow
+// regions will not corrupt our own shadow memory. shadow(shadow) ends up
+// disjoint from shadow(app):
+//
+// [0x00000017'00000000, 0x00000018'00000000)
+// [0x00000020'00000000, 0x00000021'00000000)
+// [0x00000015'00000000, 0x00000015'ffffffff]
+
+static const struct ApplicationRegion AppRegions[] = {
+ {0x0100000000u, 0x0200000000u, false},
+ {0xaa00000000u, 0xab00000000u, false},
+ {0xff00000000u, 0xffffffffffu, false},
+};
+
+#else
+static const struct ApplicationRegion AppRegions[] = {
+ {0x00000000u, 0x1fffffffu, false},
+ {0x40000000u, 0xffffffffu, false},
+};
+#endif
+
+static const u32 NumAppRegions = sizeof(AppRegions) / sizeof(AppRegions[0]);
+
+// See the comment above: we do not currently support a stack size rlimit
+// equal to or larger than 1TB.
+#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_MIPS64)
+static const uptr MaxStackSize = (1ULL << 40) - 4096;
+#else
+static const uptr MaxStackSize = (1ULL << 32) - 4096;
+#endif
+
+class ShadowMapping {
+public:
+ // The scale and offset vary by tool.
+ uptr Scale;
+ uptr Offset;
+
+ // TODO(sagar.thakur): Try to hardcode the mask as done in the compiler
+ // instrumentation to reduce the runtime cost of appToShadow.
+#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_MIPS64)
+ struct ShadowMemoryMask40 {
+ static const uptr Mask = 0x0000000fffffffffu;
+ };
+ struct ShadowMemoryMask47 {
+ static const uptr Mask = 0x00000fffffffffffu;
+ };
+#else
+ struct ShadowMemoryMask32 {
+ static const uptr Mask = 0x000fffffu;
+ };
+#endif
+
+ void initialize(uptr ShadowScale) {
+
+#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_MIPS64)
+ const uptr OffsetArray40[3] = {
+ 0x0000001300000000u,
+ 0x0000002200000000u,
+ 0x0000004400000000u,
+ };
+ const uptr OffsetArray47[3] = {
+ 0x0000130000000000u,
+ 0x0000220000000000u,
+ 0x0000440000000000u,
+ };
+#endif
+
+ Scale = ShadowScale;
+ switch (VmaSize) {
+#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_MIPS64)
+ case 40: {
+ if (Scale <= 2)
+ Offset = OffsetArray40[Scale];
+ else
+ Offset = OffsetArray40[0] << Scale;
+ } break;
+ case 47: {
+ if (Scale <= 2)
+ Offset = OffsetArray47[Scale];
+ else
+ Offset = OffsetArray47[0] << Scale;
+ } break;
+#else
+ case 32: {
+ Offset = 0x20000000; // Low Shadow Address.
+ } break;
+#endif
+ default: {
+ Printf("ERROR: %d-bit virtual memory address size not supported\n",
+ VmaSize);
+ Die();
+ }
+ }
+ }
+};
+
+extern ShadowMapping Mapping;
+
+static inline bool getAppRegion(u32 i, uptr *Start, uptr *End) {
+ if (i >= NumAppRegions)
+ return false;
+ *Start = AppRegions[i].Start;
+ *End = AppRegions[i].End;
+ return true;
+}
+
+ALWAYS_INLINE
+bool isAppMem(uptr Mem) {
+ for (u32 i = 0; i < NumAppRegions; ++i) {
+ if (Mem >= AppRegions[i].Start && Mem < AppRegions[i].End)
+ return true;
+ }
+ return false;
+}
+
+template<typename Params>
+uptr appToShadowImpl(uptr App) {
+#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_MIPS64)
+ return (((App & Params::Mask) + Mapping.Offset) >> Mapping.Scale);
+#else
+ return ((App >> Mapping.Scale) + Mapping.Offset);
+#endif
+}
+
+ALWAYS_INLINE
+uptr appToShadow(uptr App) {
+ switch (VmaSize) {
+#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_MIPS64)
+ case 40:
+ return appToShadowImpl<ShadowMapping::ShadowMemoryMask40>(App);
+ case 47:
+ return appToShadowImpl<ShadowMapping::ShadowMemoryMask47>(App);
+#else
+ case 32:
+ return appToShadowImpl<ShadowMapping::ShadowMemoryMask32>(App);
+#endif
+ default: {
+ Printf("ERROR: %d-bit virtual memory address size not supported\n",
+ VmaSize);
+ Die();
+ }
+ }
+}
+
+static inline bool getShadowRegion(u32 i, uptr *Start, uptr *End) {
+ if (i >= NumAppRegions)
+ return false;
+ u32 UnmergedShadowCount = 0;
+ u32 AppIdx;
+ for (AppIdx = 0; AppIdx < NumAppRegions; ++AppIdx) {
+ if (!AppRegions[AppIdx].ShadowMergedWithPrev) {
+ if (UnmergedShadowCount == i)
+ break;
+ UnmergedShadowCount++;
+ }
+ }
+ if (AppIdx >= NumAppRegions || UnmergedShadowCount != i)
+ return false;
+ *Start = appToShadow(AppRegions[AppIdx].Start);
+ // The formula fails for the end itself.
+ *End = appToShadow(AppRegions[AppIdx].End - 1) + 1;
+ // Merge with adjacent shadow regions:
+ for (++AppIdx; AppIdx < NumAppRegions; ++AppIdx) {
+ if (!AppRegions[AppIdx].ShadowMergedWithPrev)
+ break;
+ *Start = Min(*Start, appToShadow(AppRegions[AppIdx].Start));
+ *End = Max(*End, appToShadow(AppRegions[AppIdx].End - 1) + 1);
+ }
+ return true;
+}
+
+ALWAYS_INLINE
+bool isShadowMem(uptr Mem) {
+ // We assume this is not used on any critical performance path and so there's
+ // no need to hardcode the mapping results.
+ for (uptr i = 0; i < NumAppRegions; ++i) {
+ if (Mem >= appToShadow(AppRegions[i].Start) &&
+ Mem < appToShadow(AppRegions[i].End - 1) + 1)
+ return true;
+ }
+ return false;
+}
+
+} // namespace __esan
+
+#endif /* ESAN_SHADOW_H */
--- /dev/null
+//===-- esan_sideline.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Esan sideline thread support.
+//===----------------------------------------------------------------------===//
+
+#ifndef ESAN_SIDELINE_H
+#define ESAN_SIDELINE_H
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __esan {
+
+typedef void (*SidelineFunc)(void *Arg);
+
+// Currently only one sideline thread is supported.
+// It calls the SidelineFunc passed to launchThread once on each sample at the
+// given frequency in real time (i.e., wall clock time).
+class SidelineThread {
+public:
+ // We cannot initialize any fields in the constructor as it will be called
+ // *after* launchThread for a static instance, as esan.module_ctor is called
+ // before static initializers.
+ SidelineThread() {}
+ ~SidelineThread() {}
+
+ // To simplify declaration in sanitizer code where we want to avoid
+ // heap allocations, the constructor and destructor do nothing and
+ // launchThread and joinThread do the real work.
+ // They should each be called just once.
+ bool launchThread(SidelineFunc takeSample, void *Arg, u32 FreqMilliSec);
+ bool joinThread();
+
+ // Must be called from the sideline thread itself.
+ bool adjustTimer(u32 FreqMilliSec);
+
+private:
+ static int runSideline(void *Arg);
+ static void registerSignal(int SigNum);
+ static void handleSidelineSignal(int SigNum, void *SigInfo, void *Ctx);
+
+ char *Stack;
+ SidelineFunc sampleFunc;
+ void *FuncArg;
+ u32 Freq;
+ uptr SidelineId;
+ atomic_uintptr_t SidelineExit;
+};
+
+} // namespace __esan
+
+#endif // ESAN_SIDELINE_H
--- /dev/null
+//===-- esan_sideline_linux.cpp ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Support for a separate or "sideline" tool thread on Linux.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_LINUX
+
+#include "esan_sideline.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include <errno.h>
+#include <sched.h>
+#include <sys/prctl.h>
+#include <sys/signal.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+namespace __esan {
+
+static const int SigAltStackSize = 4*1024;
+static const int SidelineStackSize = 4*1024;
+static const uptr SidelineIdUninitialized = 1;
+
+// FIXME: we'll need some kind of TLS (can we trust that a pthread key will
+// work in our non-POSIX thread?) to access our data in our signal handler
+// with multiple sideline threads. For now we assume there is only one
+// sideline thread and we use a dirty solution of a global var.
+static SidelineThread *TheThread;
+
+// We aren't passing SA_NODEFER so the same signal is blocked while here.
+void SidelineThread::handleSidelineSignal(int SigNum, void *SigInfo,
+ void *Ctx) {
+ VPrintf(3, "Sideline signal %d\n", SigNum);
+ CHECK_EQ(SigNum, SIGALRM);
+ // See above about needing TLS to avoid this global var.
+ SidelineThread *Thread = TheThread;
+ if (atomic_load(&Thread->SidelineExit, memory_order_relaxed) != 0)
+ return;
+ Thread->sampleFunc(Thread->FuncArg);
+}
+
+void SidelineThread::registerSignal(int SigNum) {
+ __sanitizer_sigaction SigAct;
+ internal_memset(&SigAct, 0, sizeof(SigAct));
+ SigAct.sigaction = handleSidelineSignal;
+ // We do not pass SA_NODEFER as we want to block the same signal.
+ SigAct.sa_flags = SA_ONSTACK | SA_SIGINFO;
+ int Res = internal_sigaction(SigNum, &SigAct, nullptr);
+ CHECK_EQ(Res, 0);
+}
+
+int SidelineThread::runSideline(void *Arg) {
+ VPrintf(1, "Sideline thread starting\n");
+ SidelineThread *Thread = static_cast<SidelineThread*>(Arg);
+
+ // If the parent dies, we want to exit also.
+ internal_prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
+
+ // Set up a signal handler on an alternate stack for safety.
+ InternalScopedBuffer<char> StackMap(SigAltStackSize);
+ struct sigaltstack SigAltStack;
+ SigAltStack.ss_sp = StackMap.data();
+ SigAltStack.ss_size = SigAltStackSize;
+ SigAltStack.ss_flags = 0;
+ internal_sigaltstack(&SigAltStack, nullptr);
+
+ // We inherit the signal mask from the app thread. In case
+ // we weren't created at init time, we ensure the mask is empty.
+ __sanitizer_sigset_t SigSet;
+ internal_sigfillset(&SigSet);
+ int Res = internal_sigprocmask(SIG_UNBLOCK, &SigSet, nullptr);
+ CHECK_EQ(Res, 0);
+
+ registerSignal(SIGALRM);
+
+ bool TimerSuccess = Thread->adjustTimer(Thread->Freq);
+ CHECK(TimerSuccess);
+
+ // We loop, doing nothing but handling itimer signals.
+ while (atomic_load(&TheThread->SidelineExit, memory_order_relaxed) == 0)
+ sched_yield();
+
+ if (!Thread->adjustTimer(0))
+ VPrintf(1, "Failed to disable timer\n");
+
+ VPrintf(1, "Sideline thread exiting\n");
+ return 0;
+}
+
+bool SidelineThread::launchThread(SidelineFunc takeSample, void *Arg,
+ u32 FreqMilliSec) {
+ // This can only be called once. However, we can't clear a field in
+ // the constructor and check for that here as the constructor for
+ // a static instance is called *after* our module_ctor and thus after
+ // this routine! Thus we rely on the TheThread check below.
+ CHECK(TheThread == nullptr); // Only one sideline thread is supported.
+ TheThread = this;
+ sampleFunc = takeSample;
+ FuncArg = Arg;
+ Freq = FreqMilliSec;
+ atomic_store(&SidelineExit, 0, memory_order_relaxed);
+
+ // We do without a guard page.
+ Stack = static_cast<char*>(MmapOrDie(SidelineStackSize, "SidelineStack"));
+ // We need to handle the return value from internal_clone() not having been
+ // assigned yet (for our CHECK in adjustTimer()) so we ensure this has a
+ // sentinel value.
+ SidelineId = SidelineIdUninitialized;
+ // By omitting CLONE_THREAD, the child is in its own thread group and will not
+ // receive any of the application's signals.
+ SidelineId = internal_clone(
+ runSideline, Stack + SidelineStackSize,
+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED,
+ this, nullptr /* parent_tidptr */,
+ nullptr /* newtls */, nullptr /* child_tidptr */);
+ int ErrCode;
+ if (internal_iserror(SidelineId, &ErrCode)) {
+ Printf("FATAL: EfficiencySanitizer failed to spawn a thread (code %d).\n",
+ ErrCode);
+ Die();
+ return false; // Not reached.
+ }
+ return true;
+}
+
+bool SidelineThread::joinThread() {
+ VPrintf(1, "Joining sideline thread\n");
+ bool Res = true;
+ atomic_store(&SidelineExit, 1, memory_order_relaxed);
+ while (true) {
+ uptr Status = internal_waitpid(SidelineId, nullptr, __WALL);
+ int ErrCode;
+ if (!internal_iserror(Status, &ErrCode))
+ break;
+ if (ErrCode == EINTR)
+ continue;
+ VPrintf(1, "Failed to join sideline thread (errno %d)\n", ErrCode);
+ Res = false;
+ break;
+ }
+ UnmapOrDie(Stack, SidelineStackSize);
+ return Res;
+}
+
+// Must be called from the sideline thread itself.
+bool SidelineThread::adjustTimer(u32 FreqMilliSec) {
+ // The return value of internal_clone() may not have been assigned yet:
+ CHECK(internal_getpid() == SidelineId ||
+ SidelineId == SidelineIdUninitialized);
+ Freq = FreqMilliSec;
+ struct itimerval TimerVal;
+ TimerVal.it_interval.tv_sec = (time_t) Freq / 1000;
+ TimerVal.it_interval.tv_usec = (time_t) (Freq % 1000) * 1000;
+ TimerVal.it_value.tv_sec = (time_t) Freq / 1000;
+ TimerVal.it_value.tv_usec = (time_t) (Freq % 1000) * 1000;
+ // As we're in a different thread group, we cannot use either
+ // ITIMER_PROF or ITIMER_VIRTUAL without taking up scheduled
+ // time ourselves: thus we must use real time.
+ int Res = setitimer(ITIMER_REAL, &TimerVal, nullptr);
+ return (Res == 0);
+}
+
+} // namespace __esan
+
+#endif // SANITIZER_LINUX
--- /dev/null
+# This file is used to maintain libtool version info for libasan. See
+# the libtool manual to understand the meaning of the fields. This is
+# a separate file so that version updates don't involve re-running
+# automake.
+# CURRENT:REVISION:AGE
+1:0:0
--- /dev/null
+//===-- working_set.cpp ---------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// This file contains working-set-specific code.
+//===----------------------------------------------------------------------===//
+
+#include "working_set.h"
+#include "esan.h"
+#include "esan_circular_buffer.h"
+#include "esan_flags.h"
+#include "esan_shadow.h"
+#include "esan_sideline.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+
+// We shadow every cache line of app memory with one shadow byte.
+// - The highest bit of each shadow byte indicates whether the corresponding
+// cache line has ever been accessed.
+// - The lowest bit of each shadow byte indicates whether the corresponding
+// cache line was accessed since the last sample.
+// - The other bits are used for working set snapshots at successively
+// lower frequencies, each bit to the left from the lowest bit stepping
+// down the frequency by 2 to the power of getFlags()->snapshot_step.
+// Thus we have something like this:
+// Bit 0: Since last sample
+// Bit 1: Since last 2^2 samples
+// Bit 2: Since last 2^4 samples
+// Bit 3: ...
+// Bit 7: Ever accessed.
+// We live with races in accessing each shadow byte.
+typedef unsigned char byte;
+
+namespace __esan {
+
+// Our shadow memory assumes that the line size is 64.
+static const u32 CacheLineSize = 64;
+
+// See the shadow byte layout description above.
+static const u32 TotalWorkingSetBitIdx = 7;
+// We accumulate to the left until we hit this bit.
+// We don't need to accumulate to the final bit as it's set on each ref
+// by the compiler instrumentation.
+static const u32 MaxAccumBitIdx = 6;
+static const u32 CurWorkingSetBitIdx = 0;
+static const byte ShadowAccessedVal =
+ (1 << TotalWorkingSetBitIdx) | (1 << CurWorkingSetBitIdx);
+
+static SidelineThread Thread;
+// If we use real-time-based timer samples this won't overflow in any realistic
+// scenario, but if we switch to some other unit (such as memory accesses) we
+// may want to consider a 64-bit int.
+static u32 SnapshotNum;
+
+// We store the wset size for each of 8 different sampling frequencies.
+static const u32 NumFreq = 8; // One for each bit of our shadow bytes.
+// We cannot use static objects as the global destructor is called
+// prior to our finalize routine.
+// These are each circular buffers, sized up front.
+CircularBuffer<u32> SizePerFreq[NumFreq];
+// We cannot rely on static initializers (they may run too late) but
+// we record the size here for clarity:
+u32 CircularBufferSizes[NumFreq] = {
+ // These are each mmap-ed so our minimum is one page.
+ 32*1024,
+ 16*1024,
+ 8*1024,
+ 4*1024,
+ 4*1024,
+ 4*1024,
+ 4*1024,
+ 4*1024,
+};
+
+void processRangeAccessWorkingSet(uptr PC, uptr Addr, SIZE_T Size,
+ bool IsWrite) {
+ if (Size == 0)
+ return;
+ SIZE_T I = 0;
+ uptr LineSize = getFlags()->cache_line_size;
+ // As Addr+Size could overflow at the top of a 32-bit address space,
+ // we avoid the simpler formula that rounds the start and end.
+ SIZE_T NumLines = Size / LineSize +
+ // Add any extra at the start or end adding on an extra line:
+ (LineSize - 1 + Addr % LineSize + Size % LineSize) / LineSize;
+ byte *Shadow = (byte *)appToShadow(Addr);
+ // Write shadow bytes until we're word-aligned.
+ while (I < NumLines && (uptr)Shadow % 4 != 0) {
+ if ((*Shadow & ShadowAccessedVal) != ShadowAccessedVal)
+ *Shadow |= ShadowAccessedVal;
+ ++Shadow;
+ ++I;
+ }
+ // Write whole shadow words at a time.
+ // Using a word-stride loop improves the runtime of a microbenchmark of
+ // memset calls by 10%.
+ u32 WordValue = ShadowAccessedVal | ShadowAccessedVal << 8 |
+ ShadowAccessedVal << 16 | ShadowAccessedVal << 24;
+ while (I + 4 <= NumLines) {
+ if ((*(u32*)Shadow & WordValue) != WordValue)
+ *(u32*)Shadow |= WordValue;
+ Shadow += 4;
+ I += 4;
+ }
+ // Write any trailing shadow bytes.
+ while (I < NumLines) {
+ if ((*Shadow & ShadowAccessedVal) != ShadowAccessedVal)
+ *Shadow |= ShadowAccessedVal;
+ ++Shadow;
+ ++I;
+ }
+}
+
+// This routine will word-align ShadowStart and ShadowEnd prior to scanning.
+// It does *not* clear for BitIdx==TotalWorkingSetBitIdx, as that top bit
+// measures the access during the entire execution and should never be cleared.
+static u32 countAndClearShadowValues(u32 BitIdx, uptr ShadowStart,
+ uptr ShadowEnd) {
+ u32 WorkingSetSize = 0;
+ u32 ByteValue = 0x1 << BitIdx;
+ u32 WordValue = ByteValue | ByteValue << 8 | ByteValue << 16 |
+ ByteValue << 24;
+ // Get word aligned start.
+ ShadowStart = RoundDownTo(ShadowStart, sizeof(u32));
+ bool Accum = getFlags()->record_snapshots && BitIdx < MaxAccumBitIdx;
+ // Do not clear the bit that measures access during the entire execution.
+ bool Clear = BitIdx < TotalWorkingSetBitIdx;
+ for (u32 *Ptr = (u32 *)ShadowStart; Ptr < (u32 *)ShadowEnd; ++Ptr) {
+ if ((*Ptr & WordValue) != 0) {
+ byte *BytePtr = (byte *)Ptr;
+ for (u32 j = 0; j < sizeof(u32); ++j) {
+ if (BytePtr[j] & ByteValue) {
+ ++WorkingSetSize;
+ if (Accum) {
+ // Accumulate to the lower-frequency bit to the left.
+ BytePtr[j] |= (ByteValue << 1);
+ }
+ }
+ }
+ if (Clear) {
+ // Clear this bit from every shadow byte.
+ *Ptr &= ~WordValue;
+ }
+ }
+ }
+ return WorkingSetSize;
+}
+
+// Scan shadow memory to calculate the number of cache lines being accessed,
+// i.e., the number of non-zero bits indexed by BitIdx in each shadow byte.
+// We also clear the lowest bits (most recent working set snapshot).
+// We do *not* clear for BitIdx==TotalWorkingSetBitIdx, as that top bit
+// measures the access during the entire execution and should never be cleared.
+static u32 computeWorkingSizeAndReset(u32 BitIdx) {
+ u32 WorkingSetSize = 0;
+ MemoryMappingLayout MemIter(true/*cache*/);
+ uptr Start, End, Prot;
+ while (MemIter.Next(&Start, &End, nullptr/*offs*/, nullptr/*file*/,
+ 0/*file size*/, &Prot)) {
+ VPrintf(4, "%s: considering %p-%p app=%d shadow=%d prot=%u\n",
+ __FUNCTION__, Start, End, Prot, isAppMem(Start),
+ isShadowMem(Start));
+ if (isShadowMem(Start) && (Prot & MemoryMappingLayout::kProtectionWrite)) {
+ VPrintf(3, "%s: walking %p-%p\n", __FUNCTION__, Start, End);
+ WorkingSetSize += countAndClearShadowValues(BitIdx, Start, End);
+ }
+ }
+ return WorkingSetSize;
+}
+
+// This is invoked from a signal handler but in a sideline thread doing nothing
+// else so it is a little less fragile than a typical signal handler.
+static void takeSample(void *Arg) {
+ u32 BitIdx = CurWorkingSetBitIdx;
+ u32 Freq = 1;
+ ++SnapshotNum; // Simpler to skip 0 whose mod matches everything.
+ while (BitIdx <= MaxAccumBitIdx && (SnapshotNum % Freq) == 0) {
+ u32 NumLines = computeWorkingSizeAndReset(BitIdx);
+ VReport(1, "%s: snapshot #%5d bit %d freq %4d: %8u\n", SanitizerToolName,
+ SnapshotNum, BitIdx, Freq, NumLines);
+ SizePerFreq[BitIdx].push_back(NumLines);
+ Freq = Freq << getFlags()->snapshot_step;
+ BitIdx++;
+ }
+}
+
+unsigned int getSampleCountWorkingSet()
+{
+ return SnapshotNum;
+}
+
+// Initialization that must be done before any instrumented code is executed.
+void initializeShadowWorkingSet() {
+ CHECK(getFlags()->cache_line_size == CacheLineSize);
+#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_MIPS64)
+ registerMemoryFaultHandler();
+#endif
+}
+
+void initializeWorkingSet() {
+ if (getFlags()->record_snapshots) {
+ for (u32 i = 0; i < NumFreq; ++i)
+ SizePerFreq[i].initialize(CircularBufferSizes[i]);
+ Thread.launchThread(takeSample, nullptr, getFlags()->sample_freq);
+ }
+}
+
+static u32 getPeriodForPrinting(u32 MilliSec, const char *&Unit) {
+ if (MilliSec > 600000) {
+ Unit = "min";
+ return MilliSec / 60000;
+ } else if (MilliSec > 10000) {
+ Unit = "sec";
+ return MilliSec / 1000;
+ } else {
+ Unit = "ms";
+ return MilliSec;
+ }
+}
+
+static u32 getSizeForPrinting(u32 NumOfCachelines, const char *&Unit) {
+ // We need a constant to avoid software divide support:
+ static const u32 KilobyteCachelines = (0x1 << 10) / CacheLineSize;
+ static const u32 MegabyteCachelines = KilobyteCachelines << 10;
+
+ if (NumOfCachelines > 10 * MegabyteCachelines) {
+ Unit = "MB";
+ return NumOfCachelines / MegabyteCachelines;
+ } else if (NumOfCachelines > 10 * KilobyteCachelines) {
+ Unit = "KB";
+ return NumOfCachelines / KilobyteCachelines;
+ } else {
+ Unit = "Bytes";
+ return NumOfCachelines * CacheLineSize;
+ }
+}
+
+void reportWorkingSet() {
+ const char *Unit;
+ if (getFlags()->record_snapshots) {
+ u32 Freq = 1;
+ Report(" Total number of samples: %u\n", SnapshotNum);
+ for (u32 i = 0; i < NumFreq; ++i) {
+ u32 Time = getPeriodForPrinting(getFlags()->sample_freq*Freq, Unit);
+ Report(" Samples array #%d at period %u %s\n", i, Time, Unit);
+ // FIXME: report whether we wrapped around and thus whether we
+ // have data on the whole run or just the last N samples.
+ for (u32 j = 0; j < SizePerFreq[i].size(); ++j) {
+ u32 Size = getSizeForPrinting(SizePerFreq[i][j], Unit);
+ Report("#%4d: %8u %s (%9u cache lines)\n", j, Size, Unit,
+ SizePerFreq[i][j]);
+ }
+ Freq = Freq << getFlags()->snapshot_step;
+ }
+ }
+
+ // Get the working set size for the entire execution.
+ u32 NumOfCachelines = computeWorkingSizeAndReset(TotalWorkingSetBitIdx);
+ u32 Size = getSizeForPrinting(NumOfCachelines, Unit);
+ Report(" %s: the total working set size: %u %s (%u cache lines)\n",
+ SanitizerToolName, Size, Unit, NumOfCachelines);
+}
+
+int finalizeWorkingSet() {
+ if (getFlags()->record_snapshots)
+ Thread.joinThread();
+ reportWorkingSet();
+ if (getFlags()->record_snapshots) {
+ for (u32 i = 0; i < NumFreq; ++i)
+ SizePerFreq[i].free();
+ }
+ return 0;
+}
+
+} // namespace __esan
--- /dev/null
+//===-- working_set.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Header for working-set-specific code.
+//===----------------------------------------------------------------------===//
+
+#ifndef WORKING_SET_H
+#define WORKING_SET_H
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __esan {
+
+void initializeWorkingSet();
+void initializeShadowWorkingSet();
+int finalizeWorkingSet();
+void reportWorkingSet();
+unsigned int getSampleCountWorkingSet();
+void processRangeAccessWorkingSet(uptr PC, uptr Addr, SIZE_T Size,
+ bool IsWrite);
+
+// Platform-dependent.
+void registerMemoryFaultHandler();
+bool processWorkingSetSignal(int SigNum, void (*Handler)(int),
+ void (**Result)(int));
+bool processWorkingSetSigaction(int SigNum, const void *Act, void *OldAct);
+bool processWorkingSetSigprocmask(int How, void *Set, void *OldSet);
+
+} // namespace __esan
+
+#endif // WORKING_SET_H
--- /dev/null
+//===-- working_set_posix.cpp -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// POSIX-specific working set tool code.
+//===----------------------------------------------------------------------===//
+
+#include "working_set.h"
+#include "esan_flags.h"
+#include "esan_shadow.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include <signal.h>
+#include <sys/mman.h>
+
+namespace __esan {
+
+// We only support regular POSIX threads with a single signal handler
+// for the whole process == thread group.
+// Thus we only need to store one app signal handler.
+// FIXME: Store and use any alternate stack and signal flags set by
+// the app. For now we just call the app handler from our handler.
+static __sanitizer_sigaction AppSigAct;
+
+bool processWorkingSetSignal(int SigNum, void (*Handler)(int),
+ void (**Result)(int)) {
+ VPrintf(2, "%s: %d\n", __FUNCTION__, SigNum);
+ if (SigNum == SIGSEGV) {
+ *Result = AppSigAct.handler;
+ AppSigAct.sigaction = (void (*)(int, void*, void*))Handler;
+ return false; // Skip real call.
+ }
+ return true;
+}
+
+bool processWorkingSetSigaction(int SigNum, const void *ActVoid,
+ void *OldActVoid) {
+ VPrintf(2, "%s: %d\n", __FUNCTION__, SigNum);
+ if (SigNum == SIGSEGV) {
+ const struct sigaction *Act = (const struct sigaction *) ActVoid;
+ struct sigaction *OldAct = (struct sigaction *) OldActVoid;
+ if (OldAct)
+ internal_memcpy(OldAct, &AppSigAct, sizeof(OldAct));
+ if (Act)
+ internal_memcpy(&AppSigAct, Act, sizeof(AppSigAct));
+ return false; // Skip real call.
+ }
+ return true;
+}
+
+bool processWorkingSetSigprocmask(int How, void *Set, void *OldSet) {
+ VPrintf(2, "%s\n", __FUNCTION__);
+ // All we need to do is ensure that SIGSEGV is not blocked.
+ // FIXME: we are not fully transparent as we do not pretend that
+ // SIGSEGV is still blocked on app queries: that would require
+ // per-thread mask tracking.
+ if (Set && (How == SIG_BLOCK || How == SIG_SETMASK)) {
+ if (internal_sigismember((__sanitizer_sigset_t *)Set, SIGSEGV)) {
+ VPrintf(1, "%s: removing SIGSEGV from the blocked set\n", __FUNCTION__);
+ internal_sigdelset((__sanitizer_sigset_t *)Set, SIGSEGV);
+ }
+ }
+ return true;
+}
+
+static void reinstateDefaultHandler(int SigNum) {
+ __sanitizer_sigaction SigAct;
+ internal_memset(&SigAct, 0, sizeof(SigAct));
+ SigAct.sigaction = (void (*)(int, void*, void*)) SIG_DFL;
+ int Res = internal_sigaction(SigNum, &SigAct, nullptr);
+ CHECK(Res == 0);
+ VPrintf(1, "Unregistered for %d handler\n", SigNum);
+}
+
+// If this is a shadow fault, we handle it here; otherwise, we pass it to the
+// app to handle it just as the app would do without our tool in place.
+static void handleMemoryFault(int SigNum, void *Info, void *Ctx) {
+ if (SigNum == SIGSEGV) {
+ // We rely on si_addr being filled in (thus we do not support old kernels).
+ siginfo_t *SigInfo = (siginfo_t *)Info;
+ uptr Addr = (uptr)SigInfo->si_addr;
+ if (isShadowMem(Addr)) {
+ VPrintf(3, "Shadow fault @%p\n", Addr);
+ uptr PageSize = GetPageSizeCached();
+ int Res = internal_mprotect((void *)RoundDownTo(Addr, PageSize),
+ PageSize, PROT_READ|PROT_WRITE);
+ CHECK(Res == 0);
+ } else if (AppSigAct.sigaction) {
+ // FIXME: For simplicity we ignore app options including its signal stack
+ // (we just use ours) and all the delivery flags.
+ AppSigAct.sigaction(SigNum, Info, Ctx);
+ } else {
+ // Crash instead of spinning with infinite faults.
+ reinstateDefaultHandler(SigNum);
+ }
+ } else
+ UNREACHABLE("signal not registered");
+}
+
+void registerMemoryFaultHandler() {
+ // We do not use an alternate signal stack, as doing so would require
+ // setting it up for each app thread.
+ // FIXME: This could result in problems with emulating the app's signal
+ // handling if the app relies on an alternate stack for SIGSEGV.
+
+ // We require that SIGSEGV is not blocked. We use a sigprocmask
+ // interceptor to ensure that in the future. Here we ensure it for
+ // the current thread. We assume there are no other threads at this
+ // point during initialization, or that at least they do not block
+ // SIGSEGV.
+ __sanitizer_sigset_t SigSet;
+ internal_sigemptyset(&SigSet);
+ internal_sigprocmask(SIG_BLOCK, &SigSet, nullptr);
+
+ __sanitizer_sigaction SigAct;
+ internal_memset(&SigAct, 0, sizeof(SigAct));
+ SigAct.sigaction = handleMemoryFault;
+ // We want to handle nested signals b/c we need to handle a
+ // shadow fault in an app signal handler.
+ SigAct.sa_flags = SA_SIGINFO | SA_NODEFER;
+ int Res = internal_sigaction(SIGSEGV, &SigAct, &AppSigAct);
+ CHECK(Res == 0);
+ VPrintf(1, "Registered for SIGSEGV handler\n");
+}
+
+} // namespace __esan
--- /dev/null
+//===-- sanitizer/esan_interface.h ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Public interface header.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ESAN_INTERFACE_H
+#define SANITIZER_ESAN_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+// We declare our interface routines as weak to allow the user to avoid
+// ifdefs and instead use this pattern to allow building the same sources
+// with and without our runtime library:
+// if (__esan_report)
+// __esan_report();
+#ifdef _MSC_VER
+/* selectany is as close to weak as we'll get. */
+#define COMPILER_RT_WEAK __declspec(selectany)
+#elif __GNUC__
+#define COMPILER_RT_WEAK __attribute__((weak))
+#else
+#define COMPILER_RT_WEAK
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// This function can be called mid-run (or at the end of a run for
+// a server process that doesn't shut down normally) to request that
+// data for that point in the run be reported from the tool.
+void COMPILER_RT_WEAK __esan_report(void);
+
+// This function returns the number of samples that the esan tool has collected
+// to this point. This is useful for testing.
+unsigned int COMPILER_RT_WEAK __esan_get_sample_count(void);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_ESAN_INTERFACE_H
libdir = @libdir@
libexecdir = @libexecdir@
link_libasan = @link_libasan@
+link_libesan = @link_libesan@
link_liblsan = @link_liblsan@
link_libtsan = @link_libtsan@
link_libubsan = @link_libubsan@
libdir = @libdir@
libexecdir = @libexecdir@
link_libasan = @link_libasan@
+link_libesan = @link_libesan@
link_liblsan = @link_liblsan@
link_libtsan = @link_libtsan@
link_libubsan = @link_libubsan@
*link_liblsan: @link_liblsan@
+*link_libesan: @link_libesan@
libdir = @libdir@
libexecdir = @libexecdir@
link_libasan = @link_libasan@
+link_libesan = @link_libesan@
link_liblsan = @link_liblsan@
link_libtsan = @link_libtsan@
link_libubsan = @link_libubsan@
libdir = @libdir@
libexecdir = @libexecdir@
link_libasan = @link_libasan@
+link_libesan = @link_libesan@
link_liblsan = @link_liblsan@
link_libtsan = @link_libtsan@
link_libubsan = @link_libubsan@
libdir = @libdir@
libexecdir = @libexecdir@
link_libasan = @link_libasan@
+link_libesan = @link_libesan@
link_liblsan = @link_liblsan@
link_libtsan = @link_libtsan@
link_libubsan = @link_libubsan@
libdir = @libdir@
libexecdir = @libexecdir@
link_libasan = @link_libasan@
+link_libesan = @link_libesan@
link_liblsan = @link_liblsan@
link_libtsan = @link_libtsan@
link_libubsan = @link_libubsan@
%define quadmath_arch %ix86 x86_64 ia64
%define tsan_arch x86_64 aarch64
+%define esan_arch x86_64 armv7l
%define asan_arch x86_64 %ix86 ppc ppc64 %sparc %arm aarch64
%define itm_arch x86_64 %ix86 %arm ppc ppc64 ppc64le s390 s390x %sparc aarch64
%define atomic_arch x86_64 %ix86 %arm aarch64 ppc ppc64 ppc64le s390 s390x %sparc m68k
%post -n libtsan -p /sbin/ldconfig
%postun -n libtsan -p /sbin/ldconfig
+%package -n libesan
+Summary: The GNU Compiler Efficciency Sanitier Runtime Library
+License: MIT
+Group: Development/Languages
+%description -n libesan
+The runtime library needed to run programs compiled with the
+-fsanitize=efficiency-working-set option of the GNU Compiler Collection (GCC).
+%post -n libesan -p /sbin/ldconfig
+%postun -n libesan -p /sbin/ldconfig
+
%package -n libatomic
Summary: The GNU Compiler Atomic Operations Runtime Library
License: GPL-3.0-with-GCC-exception
RPM_OPT_FLAGS="$RPM_OPT_FLAGS -marm -Wa,-mimplicit-it=arm -fno-omit-frame-pointer"
%endif
}
+%{?esan:%gcc_unforce_options}
rm -rf obj
mkdir obj
cd obj
%endif
# move libraries to libdir
-for lib in asan atomic cilkrts gfortran gomp cc1 itm lsan quadmath stdc++ supc++ tsan ubsan
+for lib in asan atomic cilkrts gfortran gomp cc1 itm lsan quadmath stdc++ supc++ tsan ubsan esan
do
[ -e %{buildroot}%{libsubdir}/lib$lib.a ] && mv %{buildroot}%{libsubdir}/lib$lib.a %{buildroot}%{libdir}/
[ -e %{buildroot}%{libsubdir}/lib$lib.so ] && mv %{buildroot}%{libsubdir}/lib$lib.so* %{buildroot}%{libdir}/
%ifarch %tsan_arch
%exclude %{libdir}/libtsan.so
%endif
+%ifarch %esan_arch
+%exclude %{libdir}/libesan.so
+%endif
%files c++
%defattr(-,root,root)
%{libdir}/libtsan.so*
%endif
+%ifarch %esan_arch
+%files -n libesan
+%manifest gcc.manifest
+%license libsanitizer/LICENSE.TXT
+%defattr(-,root,root)
+%{libdir}/libesan.so*
+%endif
+
%ifarch %atomic_arch
%files -n libatomic
%manifest gcc.manifest
%define quadmath_arch %ix86 x86_64 ia64
%define tsan_arch x86_64 aarch64
+%define esan_arch x86_64 armv7l
%define asan_arch x86_64 %ix86 ppc ppc64 %sparc %arm aarch64
%define itm_arch x86_64 %ix86 %arm ppc ppc64 ppc64le s390 s390x %sparc aarch64
%define atomic_arch x86_64 %ix86 %arm aarch64 ppc ppc64 ppc64le s390 s390x %sparc m68k
%post -n libtsan -p /sbin/ldconfig
%postun -n libtsan -p /sbin/ldconfig
+%package -n libesan
+Summary: The GNU Compiler Efficciency Sanitier Runtime Library
+License: MIT
+Group: Development/Languages
+%description -n libesan
+The runtime library needed to run programs compiled with the
+-fsanitize=efficiency-working-set option of the GNU Compiler Collection (GCC).
+%post -n libesan -p /sbin/ldconfig
+%postun -n libesan -p /sbin/ldconfig
+
%package -n libatomic
Summary: The GNU Compiler Atomic Operations Runtime Library
License: GPL-3.0-with-GCC-exception
RPM_OPT_FLAGS="$RPM_OPT_FLAGS -marm -Wa,-mimplicit-it=arm -fno-omit-frame-pointer"
%endif
}
+%{?esan:%gcc_unforce_options}
rm -rf obj
mkdir obj
cd obj
%endif
# move libraries to libdir
-for lib in asan atomic cilkrts gfortran gomp cc1 itm lsan quadmath stdc++ supc++ tsan ubsan
+for lib in asan atomic cilkrts gfortran gomp cc1 itm lsan quadmath stdc++ supc++ tsan ubsan esan
do
[ -e %{buildroot}%{libsubdir}/lib$lib.a ] && mv %{buildroot}%{libsubdir}/lib$lib.a %{buildroot}%{libdir}/
[ -e %{buildroot}%{libsubdir}/lib$lib.so ] && mv %{buildroot}%{libsubdir}/lib$lib.so* %{buildroot}%{libdir}/
%ifarch %tsan_arch
%exclude %{libdir}/libtsan.so
%endif
+%ifarch %esan_arch
+%exclude %{libdir}/libesan.so
+%endif
%files c++
%defattr(-,root,root)
%{libdir}/libtsan.so*
%endif
+%ifarch %esan_arch
+%files -n libesan
+%manifest gcc.manifest
+%license libsanitizer/LICENSE.TXT
+%defattr(-,root,root)
+%{libdir}/libesan.so*
+%endif
+
%ifarch %atomic_arch
%files -n libatomic
%manifest gcc.manifest
%define quadmath_arch %ix86 x86_64 ia64
%define tsan_arch x86_64 aarch64
+%define esan_arch x86_64 armv7l
%define asan_arch x86_64 %ix86 ppc ppc64 %sparc %arm aarch64
%define itm_arch x86_64 %ix86 %arm ppc ppc64 ppc64le s390 s390x %sparc aarch64
%define atomic_arch x86_64 %ix86 %arm aarch64 ppc ppc64 ppc64le s390 s390x %sparc m68k
%post -n libtsan -p /sbin/ldconfig
%postun -n libtsan -p /sbin/ldconfig
+%package -n libesan
+Summary: The GNU Compiler Efficciency Sanitier Runtime Library
+License: MIT
+Group: Development/Languages
+%description -n libesan
+The runtime library needed to run programs compiled with the
+-fsanitize=efficiency-working-set option of the GNU Compiler Collection (GCC).
+%post -n libesan -p /sbin/ldconfig
+%postun -n libesan -p /sbin/ldconfig
+
%package -n libatomic
Summary: The GNU Compiler Atomic Operations Runtime Library
License: GPL-3.0-with-GCC-exception
RPM_OPT_FLAGS="$RPM_OPT_FLAGS -marm -Wa,-mimplicit-it=arm -fno-omit-frame-pointer"
%endif
}
+%{?esan:%gcc_unforce_options}
rm -rf obj
mkdir obj
cd obj
%endif
# move libraries to libdir
-for lib in asan atomic cilkrts gfortran gomp cc1 itm lsan quadmath stdc++ supc++ tsan ubsan
+for lib in asan atomic cilkrts gfortran gomp cc1 itm lsan quadmath stdc++ supc++ tsan ubsan esan
do
[ -e %{buildroot}%{libsubdir}/lib$lib.a ] && mv %{buildroot}%{libsubdir}/lib$lib.a %{buildroot}%{libdir}/
[ -e %{buildroot}%{libsubdir}/lib$lib.so ] && mv %{buildroot}%{libsubdir}/lib$lib.so* %{buildroot}%{libdir}/
%ifarch %tsan_arch
%exclude %{libdir}/libtsan.so
%endif
+%ifarch %esan_arch
+%exclude %{libdir}/libesan.so
+%endif
%files c++
%defattr(-,root,root)
%{libdir}/libtsan.so*
%endif
+%ifarch %esan_arch
+%files -n libesan
+%manifest gcc.manifest
+%license libsanitizer/LICENSE.TXT
+%defattr(-,root,root)
+%{libdir}/libesan.so*
+%endif
+
%ifarch %atomic_arch
%files -n libatomic
%manifest gcc.manifest
%define quadmath_arch %ix86 x86_64 ia64
%define tsan_arch x86_64 aarch64
+%define esan_arch x86_64 armv7l
%define asan_arch x86_64 %ix86 ppc ppc64 %sparc %arm aarch64
%define itm_arch x86_64 %ix86 %arm ppc ppc64 ppc64le s390 s390x %sparc aarch64
%define atomic_arch x86_64 %ix86 %arm aarch64 ppc ppc64 ppc64le s390 s390x %sparc m68k
%post -n libtsan -p /sbin/ldconfig
%postun -n libtsan -p /sbin/ldconfig
+%package -n libesan
+Summary: The GNU Compiler Efficciency Sanitier Runtime Library
+License: MIT
+Group: Development/Languages
+%description -n libesan
+The runtime library needed to run programs compiled with the
+-fsanitize=efficiency-working-set option of the GNU Compiler Collection (GCC).
+%post -n libesan -p /sbin/ldconfig
+%postun -n libesan -p /sbin/ldconfig
+
%package -n libatomic
Summary: The GNU Compiler Atomic Operations Runtime Library
License: GPL-3.0-with-GCC-exception
RPM_OPT_FLAGS="$RPM_OPT_FLAGS -marm -Wa,-mimplicit-it=arm -fno-omit-frame-pointer"
%endif
}
+%{?esan:%gcc_unforce_options}
rm -rf obj
mkdir obj
cd obj
%endif
# move libraries to libdir
-for lib in asan atomic cilkrts gfortran gomp cc1 itm lsan quadmath stdc++ supc++ tsan ubsan
+for lib in asan atomic cilkrts gfortran gomp cc1 itm lsan quadmath stdc++ supc++ tsan ubsan esan
do
[ -e %{buildroot}%{libsubdir}/lib$lib.a ] && mv %{buildroot}%{libsubdir}/lib$lib.a %{buildroot}%{libdir}/
[ -e %{buildroot}%{libsubdir}/lib$lib.so ] && mv %{buildroot}%{libsubdir}/lib$lib.so* %{buildroot}%{libdir}/
%ifarch %tsan_arch
%exclude %{libdir}/libtsan.so
%endif
+%ifarch %esan_arch
+%exclude %{libdir}/libesan.so
+%endif
%files c++
%defattr(-,root,root)
%{libdir}/libtsan.so*
%endif
+%ifarch %esan_arch
+%files -n libesan
+%manifest gcc.manifest
+%license libsanitizer/LICENSE.TXT
+%defattr(-,root,root)
+%{libdir}/libesan.so*
+%endif
+
%ifarch %atomic_arch
%files -n libatomic
%manifest gcc.manifest