From ec3fafa9ec7d16b9d89076efd3bac1d1af0502b8 Mon Sep 17 00:00:00 2001 From: David Malcolm Date: Tue, 15 Jun 2021 17:53:34 -0400 Subject: [PATCH] analyzer: fix bitfield endianness issues [PR99212,PR101082] Looks like my patch for PR analyzer/99212 implicitly assumed little-endian, which the following patch fixes. Fixes bitfields-1.c on: - armeb-none-linux-gnueabihf - cris-elf - powerpc64-darwin - s390-linux-gnu gcc/analyzer/ChangeLog: PR analyzer/99212 PR analyzer/101082 * engine.cc: Include "target.h". (impl_run_checkers): Log BITS_BIG_ENDIAN, BYTES_BIG_ENDIAN, and WORDS_BIG_ENDIAN. * region-model-manager.cc (region_model_manager::maybe_fold_binop): Move support for masking via ARG0 & CST into... (region_model_manager::maybe_undo_optimize_bit_field_compare): ...this new function. Flatten by converting from nested conditionals to a series of early return statements to reject failures. Reject if type is not unsigned_char_type_node. Handle BYTES_BIG_ENDIAN when determining which bits are bound in the binding_map. * region-model.h (region_model_manager::maybe_undo_optimize_bit_field_compare): New decl. * store.cc (bit_range::dump): New function. * store.h (bit_range::dump): New decl. Signed-off-by: David Malcolm --- gcc/analyzer/engine.cc | 8 +++ gcc/analyzer/region-model-manager.cc | 94 ++++++++++++++++++++++-------------- gcc/analyzer/region-model.h | 3 ++ gcc/analyzer/store.cc | 12 +++++ gcc/analyzer/store.h | 1 + 5 files changed, 83 insertions(+), 35 deletions(-) diff --git a/gcc/analyzer/engine.cc b/gcc/analyzer/engine.cc index df04b0b..529965a 100644 --- a/gcc/analyzer/engine.cc +++ b/gcc/analyzer/engine.cc @@ -64,6 +64,7 @@ along with GCC; see the file COPYING3. If not see #include "analyzer/bar-chart.h" #include #include "plugin.h" +#include "target.h" /* For an overview, see gcc/doc/analyzer.texi. */ @@ -4845,6 +4846,13 @@ impl_run_checkers (logger *logger) { LOG_SCOPE (logger); + if (logger) + { + logger->log ("BITS_BIG_ENDIAN: %i", BITS_BIG_ENDIAN ? 1 : 0); + logger->log ("BYTES_BIG_ENDIAN: %i", BYTES_BIG_ENDIAN ? 1 : 0); + logger->log ("WORDS_BIG_ENDIAN: %i", WORDS_BIG_ENDIAN ? 1 : 0); + } + /* If using LTO, ensure that the cgraph nodes have function bodies. */ cgraph_node *node; FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node) diff --git a/gcc/analyzer/region-model-manager.cc b/gcc/analyzer/region-model-manager.cc index 0ca0c8a..621eff0 100644 --- a/gcc/analyzer/region-model-manager.cc +++ b/gcc/analyzer/region-model-manager.cc @@ -431,6 +431,60 @@ region_model_manager::get_or_create_cast (tree type, const svalue *arg) return get_or_create_unaryop (type, op, arg); } +/* Subroutine of region_model_manager::maybe_fold_binop for handling + (TYPE)(COMPOUND_SVAL BIT_AND_EXPR CST) that may have been generated by + optimize_bit_field_compare, where CST is from ARG1. + + Support masking out bits from a compound_svalue for comparing a bitfield + against a value, as generated by optimize_bit_field_compare for + BITFIELD == VALUE. + + If COMPOUND_SVAL has a value for the appropriate bits, return it, + shifted accordingly. + Otherwise return NULL. */ + +const svalue * +region_model_manager:: +maybe_undo_optimize_bit_field_compare (tree type, + const compound_svalue *compound_sval, + tree cst, + const svalue *arg1) +{ + if (type != unsigned_char_type_node) + return NULL; + + const binding_map &map = compound_sval->get_map (); + unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (cst); + /* If "mask" is a contiguous range of set bits, see if the + compound_sval has a value for those bits. */ + bit_range bits (0, 0); + if (!bit_range::from_mask (mask, &bits)) + return NULL; + + bit_range bound_bits (bits); + if (BYTES_BIG_ENDIAN) + bound_bits = bit_range (BITS_PER_UNIT - bits.get_next_bit_offset (), + bits.m_size_in_bits); + const concrete_binding *conc + = get_store_manager ()->get_concrete_binding (bound_bits, BK_direct); + const svalue *sval = map.get (conc); + if (!sval) + return NULL; + + /* We have a value; + shift it by the correct number of bits. */ + const svalue *lhs = get_or_create_cast (type, sval); + HOST_WIDE_INT bit_offset = bits.get_start_bit_offset ().to_shwi (); + tree shift_amt = build_int_cst (type, bit_offset); + const svalue *shift_sval = get_or_create_constant_svalue (shift_amt); + const svalue *shifted_sval = get_or_create_binop (type, LSHIFT_EXPR, + lhs, shift_sval); + /* Reapply the mask (needed for negative + signed bitfields). */ + return get_or_create_binop (type, BIT_AND_EXPR, + shifted_sval, arg1); +} + /* Subroutine of region_model_manager::get_or_create_binop. Attempt to fold the inputs and return a simpler svalue *. Otherwise, return NULL. */ @@ -485,43 +539,13 @@ region_model_manager::maybe_fold_binop (tree type, enum tree_code op, /* "(ARG0 & 0)" -> "0". */ return get_or_create_constant_svalue (build_int_cst (type, 0)); - /* Support masking out bits from a compound_svalue, as this - is generated when accessing bitfields. */ if (const compound_svalue *compound_sval = arg0->dyn_cast_compound_svalue ()) - { - const binding_map &map = compound_sval->get_map (); - unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (cst1); - /* If "mask" is a contiguous range of set bits, see if the - compound_sval has a value for those bits. */ - bit_range bits (0, 0); - if (bit_range::from_mask (mask, &bits)) - { - const concrete_binding *conc - = get_store_manager ()->get_concrete_binding (bits, - BK_direct); - if (const svalue *sval = map.get (conc)) - { - /* We have a value; - shift it by the correct number of bits. */ - const svalue *lhs = get_or_create_cast (type, sval); - HOST_WIDE_INT bit_offset - = bits.get_start_bit_offset ().to_shwi (); - tree shift_amt = build_int_cst (type, bit_offset); - const svalue *shift_sval - = get_or_create_constant_svalue (shift_amt); - const svalue *shifted_sval - = get_or_create_binop (type, - LSHIFT_EXPR, - lhs, shift_sval); - /* Reapply the mask (needed for negative - signed bitfields). */ - return get_or_create_binop (type, - BIT_AND_EXPR, - shifted_sval, arg1); - } - } - } + if (const svalue *sval + = maybe_undo_optimize_bit_field_compare (type, + compound_sval, + cst1, arg1)) + return sval; } break; case TRUTH_ANDIF_EXPR: diff --git a/gcc/analyzer/region-model.h b/gcc/analyzer/region-model.h index 8b669df..7b12d35 100644 --- a/gcc/analyzer/region-model.h +++ b/gcc/analyzer/region-model.h @@ -320,6 +320,9 @@ private: const svalue *maybe_fold_sub_svalue (tree type, const svalue *parent_svalue, const region *subregion); + const svalue *maybe_undo_optimize_bit_field_compare (tree type, + const compound_svalue *compound_sval, + tree cst, const svalue *arg1); unsigned m_next_region_id; root_region m_root_region; diff --git a/gcc/analyzer/store.cc b/gcc/analyzer/store.cc index 699de94..d75fb2c 100644 --- a/gcc/analyzer/store.cc +++ b/gcc/analyzer/store.cc @@ -249,6 +249,18 @@ bit_range::dump_to_pp (pretty_printer *pp) const pp_wide_int (pp, get_next_bit_offset (), SIGNED); } +/* Dump this object to stderr. */ + +DEBUG_FUNCTION void +bit_range::dump () const +{ + pretty_printer pp; + pp.buffer->stream = stderr; + dump_to_pp (&pp); + pp_newline (&pp); + pp_flush (&pp); +} + int bit_range::cmp (const bit_range &br1, const bit_range &br2) { diff --git a/gcc/analyzer/store.h b/gcc/analyzer/store.h index 7bd2824..ca9ff69 100644 --- a/gcc/analyzer/store.h +++ b/gcc/analyzer/store.h @@ -275,6 +275,7 @@ struct bit_range {} void dump_to_pp (pretty_printer *pp) const; + void dump () const; bit_offset_t get_start_bit_offset () const { -- 2.7.4