+2014-04-09 Adhemerval Zanella <azanella@linux.vnet.ibm.com>
+
+ * bits/string.h (_STRING_ARCH_unaligned): Define it to 0.
+ * crypt/sha256.c (_STRING_ARCH_unaligned): Check its value instead of its
+ definition.
+ * iconv/gconv_simple.c (_STRING_ARCH_unaligned): Likewise.
+ * iconv/loop.c (_STRING_ARCH_unaligned): Likewise.
+ * iconv/skeleton.c (_STRING_ARCH_unaligned): Likewise.
+ * nscd/nscd_gethst_r.c (_STRING_ARCH_unaligned): Likewise.
+ * nscd/nscd_getserv_r.c (_STRING_ARCH_unaligned): Likewise.
+ * nscd/nscd_helper.c (_STRING_ARCH_unaligned): Likewise.
+ * resolv/res_send.c (_STRING_ARCH_unaligned): Likewise.
+
2014-04-09 Peter Brett <peter@peter-b.co.uk>
[BZ #15514]
#ifndef _BITS_STRING_H
#define _BITS_STRING_H 1
+/* Define if architecture can access unaligned multi-byte variables. */
+#define _STRING_ARCH_unaligned 0
#endif /* bits/string.h */
memcpy (&ctx->buffer[bytes], fillbuf, pad);
/* Put the 64-bit file length in *bits* at the end of the buffer. */
-#ifdef _STRING_ARCH_unaligned
+#if _STRING_ARCH_unaligned
ctx->buffer64[(bytes + pad) / 8] = SWAP64 (ctx->total64 << 3);
#else
ctx->buffer32[(bytes + pad + 4) / 4] = SWAP (ctx->total[TOTAL64_low] << 3);
return result;
}
-#ifndef _STRING_ARCH_unaligned
+#if !_STRING_ARCH_unaligned
static inline int
__attribute ((always_inline))
internal_ucs4_loop_unaligned (struct __gconv_step *step,
return result;
}
-#ifndef _STRING_ARCH_unaligned
+#if !_STRING_ARCH_unaligned
static inline int
__attribute ((always_inline))
ucs4_internal_loop_unaligned (struct __gconv_step *step,
return result;
}
-#ifndef _STRING_ARCH_unaligned
+#if !_STRING_ARCH_unaligned
static inline int
__attribute ((always_inline))
internal_ucs4le_loop_unaligned (struct __gconv_step *step,
return result;
}
-#ifndef _STRING_ARCH_unaligned
+#if !_STRING_ARCH_unaligned
static inline int
__attribute ((always_inline))
ucs4le_internal_loop_unaligned (struct __gconv_step *step,
representations with a fixed width of 2 or 4 bytes. But if we cannot
access unaligned memory we still have to read byte-wise. */
#undef FCTNAME2
-#if defined _STRING_ARCH_unaligned || !defined DEFINE_UNALIGNED
+#if _STRING_ARCH_unaligned || !defined DEFINE_UNALIGNED
/* We can handle unaligned memory access. */
# define get16(addr) *((const uint16_t *) (addr))
# define get32(addr) *((const uint32_t *) (addr))
/* Include the file a second time to define the function to handle
unaligned access. */
-#if !defined DEFINE_UNALIGNED && !defined _STRING_ARCH_unaligned \
+#if !defined DEFINE_UNALIGNED && !_STRING_ARCH_unaligned \
&& MIN_NEEDED_INPUT != 1 && MAX_NEEDED_INPUT % MIN_NEEDED_INPUT == 0 \
&& MIN_NEEDED_OUTPUT != 1 && MAX_NEEDED_OUTPUT % MIN_NEEDED_OUTPUT == 0
# undef get16
/* Define macros which can access unaligned buffers. These macros are
supposed to be used only in code outside the inner loops. For the inner
loops we have other definitions which allow optimized access. */
-#ifdef _STRING_ARCH_unaligned
+#if _STRING_ARCH_unaligned
/* We can handle unaligned memory access. */
# define get16u(addr) *((const uint16_t *) (addr))
# define get32u(addr) *((const uint32_t *) (addr))
INTERNAL, for which the subexpression evaluates to 1, but INTERNAL
buffers are always aligned correctly. */
#define POSSIBLY_UNALIGNED \
- (!defined _STRING_ARCH_unaligned \
+ (!_STRING_ARCH_unaligned \
&& (((FROM_LOOP_MIN_NEEDED_FROM != 1 \
&& FROM_LOOP_MAX_NEEDED_FROM % FROM_LOOP_MIN_NEEDED_FROM == 0) \
&& (FROM_LOOP_MIN_NEEDED_TO != 1 \
goto out;
}
-#ifndef _STRING_ARCH_unaligned
+#if !_STRING_ARCH_unaligned
/* The aliases_len array in the mapped database might very
well be unaligned. We will access it word-wise so on
platforms which do not tolerate unaligned accesses we
> recend, 0))
goto out;
-#ifndef _STRING_ARCH_unaligned
+#if !_STRING_ARCH_unaligned
/* The aliases_len array in the mapped database might very
well be unaligned. We will access it word-wise so on
platforms which do not tolerate unaligned accesses we
struct hashentry *here = (struct hashentry *) (mapped->data + work);
ref_t here_key, here_packet;
-#ifndef _STRING_ARCH_unaligned
+#if !_STRING_ARCH_unaligned
/* Although during garbage collection when moving struct hashentry
records around we first copy from old to new location and then
adjust pointer from previous hashentry to it, there is no barrier
struct datahead *dh
= (struct datahead *) (mapped->data + here_packet);
-#ifndef _STRING_ARCH_unaligned
+#if !_STRING_ARCH_unaligned
if ((uintptr_t) dh & (__alignof__ (*dh) - 1))
return NULL;
#endif
struct hashentry *trailelem;
trailelem = (struct hashentry *) (mapped->data + trail);
-#ifndef _STRING_ARCH_unaligned
+#if !_STRING_ARCH_unaligned
/* We have to redo the checks. Maybe the data changed. */
if ((uintptr_t) trailelem & (__alignof__ (*trailelem) - 1))
return NULL;
/* No buffer allocated for the first
reply. We can try to use the rest
of the user-provided buffer. */
-#ifdef _STRING_ARCH_unaligned
+#if _STRING_ARCH_unaligned
*anssizp2 = orig_anssizp - resplen;
*ansp2 = *ansp + resplen;
#else
/* No buffer allocated for the first
reply. We can try to use the rest
of the user-provided buffer. */
-#ifdef _STRING_ARCH_unaligned
+#if _STRING_ARCH_unaligned
*anssizp2 = orig_anssizp - resplen;
*ansp2 = *ansp + resplen;
#else