* src/atomic_ops/sysdeps/aligned_atomic_load_store.h: Fix comments.
* src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h: Fix comments.
* src/atomic_ops/sysdeps/all_atomic_load_store.h: Fix comments.
* src/atomic_ops/sysdeps/atomic_load_store.h: Fix comments.
* src/atomic_ops/sysdeps/char_atomic_load_store.h: Fix comments.
* src/atomic_ops/sysdeps/gcc/arm.h: Fix comments.
* src/atomic_ops/sysdeps/gcc/x86.h: Fix comments.
* src/atomic_ops/sysdeps/gcc/x86_64.h: Fix comments.
* src/atomic_ops/sysdeps/hpc/hppa.h: Fix comments.
* src/atomic_ops/sysdeps/hpc/ia64.h: Fix comments.
* src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h: Fix comments.
* src/atomic_ops/sysdeps/int_atomic_load_store.h: Fix comments.
* src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h: Fix comments.
* src/atomic_ops/sysdeps/short_atomic_load_store.h: Fix comments.
* src/atomic_ops.c: Fix comments.
* src/atomic_ops.h: Fix comments.
* src/atomic_ops_stack.c: Fix comments.
* src/atomic_ops_stack.h: Fix comments.
+2008-10-21 Hans Boehm <Hans.Boehm@hp.com> (really Ivan Maidanski)
+ * src/atomic_ops/sysdeps/aligned_atomic_load_store.h: Fix comments.
+ * src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h: Fix comments.
+ * src/atomic_ops/sysdeps/all_atomic_load_store.h: Fix comments.
+ * src/atomic_ops/sysdeps/atomic_load_store.h: Fix comments.
+ * src/atomic_ops/sysdeps/char_atomic_load_store.h: Fix comments.
+ * src/atomic_ops/sysdeps/gcc/arm.h: Fix comments.
+ * src/atomic_ops/sysdeps/gcc/x86.h: Fix comments.
+ * src/atomic_ops/sysdeps/gcc/x86_64.h: Fix comments.
+ * src/atomic_ops/sysdeps/hpc/hppa.h: Fix comments.
+ * src/atomic_ops/sysdeps/hpc/ia64.h: Fix comments.
+ * src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h: Fix comments.
+ * src/atomic_ops/sysdeps/int_atomic_load_store.h: Fix comments.
+ * src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h: Fix comments.
+ * src/atomic_ops/sysdeps/short_atomic_load_store.h: Fix comments.
+ * src/atomic_ops.c: Fix comments.
+ * src/atomic_ops.h: Fix comments.
+ * src/atomic_ops_stack.c: Fix comments.
+ * src/atomic_ops_stack.h: Fix comments.
+
2008-10-20 Hans Boehm <Hans.Boehm@hp.com> (really Andrew Agno)
* src/atomic_ops/sysdeps/gcc/x86_64.h (AO_int_fetch_and_add_full):
fix return type.
* Out of line compare-and-swap emulation based on test and set.
*
* We use a small table of locks for different compare_and_swap locations.
- * Before we update perform a compare-and-swap, we grap the corresponding
+ * Before we update perform a compare-and-swap, we grab the corresponding
* lock. Different locations may hash to the same lock, but since we
* never acquire more than one lock at a time, this can't deadlock.
* We explicitly disable signals while we perform this operation.
*
- * FIXME: We should probably also suppport emulation based on Lamport
+ * FIXME: We should probably also support emulation based on Lamport
* locks, since we may not have test_and_set either.
*/
#define AO_HASH_SIZE 16
/* later writes. */
/* _full: Ordered with respect to both earlier and later memops.*/
/* _release_write: Ordered with respect to earlier writes. */
-/* _acquire_read: Ordered with repsect to later reads. */
+/* _acquire_read: Ordered with respect to later reads. */
/* */
/* Currently we try to define the following atomic memory */
/* operations, in combination with the above barriers: */
/* */
/* The architecture dependent section: */
/* This defines atomic operations that have direct hardware */
-/* support on a particular platform, mostly by uncluding the */
+/* support on a particular platform, mostly by including the */
/* appropriate compiler- and hardware-dependent file. */
/* */
/* The synthesis section: */
/* We make no attempt to synthesize operations in ways that */
/* effectively introduce locks, except for the debugging/demo */
/* pthread-based implementation at the beginning. A more */
-/* relistic implementation that falls back to locks could be */
+/* realistic implementation that falls back to locks could be */
/* added as a higher layer. But that would sacrifice */
/* usability from signal handlers. */
/* The synthesis section is implemented almost entirely in */
*/
/*
- * Definitions for architecturs on which loads and stores of AO_t are
+ * Definitions for architectures on which loads and stores of AO_t are
* atomic fo all legal alignments.
*/
/*
* Describes architectures on which AO_t, unsigned char, unsigned short,
- * and unsigned int loads and strores are atomic for all normally legal alignments.
+ * and unsigned int loads and stores are atomic for all normally legal
+ * alignments.
*/
#include "aligned_atomic_load_store.h"
#include "char_atomic_load_store.h"
/*
* Describes architectures on which AO_t, unsigned char, unsigned short,
- * and unsigned int loads and strores are atomic for all normally legal
+ * and unsigned int loads and stores are atomic for all normally legal
* alignments.
*/
#include "atomic_load_store.h"
*/
/*
- * Definitions for architecturs on which loads and stores of AO_t are
- * atomic fo all legal alignments.
+ * Definitions for architectures on which loads and stores of AO_t are
+ * atomic for all legal alignments.
*/
AO_INLINE AO_t
*/
/*
- * Definitions for architecturs on which loads and stores of unsigned char are
+ * Definitions for architectures on which loads and stores of unsigned char are
* atomic for all legal alignments.
*/
/* to be stored. Both registers must be different from addr. */
/* Make the address operand an early clobber output so it */
/* doesn't overlap with the other operands. The early clobber*/
- /* on oldval is neccessary to prevent the compiler allocating */
+ /* on oldval is necessary to prevent the compiler allocating */
/* them to the same register if they are both unused. */
__asm__ __volatile__("swp %0, %2, [%3]"
: "=&r"(oldval), "=&r"(addr)
char result;
#if __PIC__
/* If PIC is turned on, we can't use %ebx as it is reserved for the
- GOT poiner. We can save and restore %ebx because GCC won't be
+ GOT pointer. We can save and restore %ebx because GCC won't be
using it for anything else (such as any of the m operands) */
__asm__ __volatile__("pushl %%ebx;" /* save ebx used for PIC GOT ptr */
"movl %6,%%ebx;" /* move new_val2 to %ebx */
#else
/* this one provides spinlock based emulation of CAS implemented in */
/* atomic_ops.c. We probably do not want to do this here, since it is */
-/* not attomic with respect to other kinds of updates of *addr. On the */
+/* not atomic with respect to other kinds of updates of *addr. On the */
/* other hand, this may be a useful facility on occasion. */
#ifdef AO_WEAK_DOUBLE_CAS_EMULATION
int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- * Derived from the corresponsing header file for gcc.
+ * Derived from the corresponding header file for gcc.
*
*/
/*
* This file specifies Itanimum primitives for use with the HP compiler
- * unde HP/UX. We use intrinsics instead of the inline assembly code in the
+ * under HP/UX. We use intrinsics instead of the inline assembly code in the
* gcc file.
*/
*/
/*
- * Definitions for architecturs on which loads and stores of unsigned int are
- * atomic fo all legal alignments.
+ * Definitions for architectures on which loads and stores of unsigned int are
+ * atomic for all legal alignments.
*/
AO_INLINE unsigned int
*/
/*
- * Definitions for architecturs on which loads and stores of unsigned int are
+ * Definitions for architectures on which loads and stores of unsigned int are
* atomic for all legal alignments.
*/
*/
/*
- * Definitions for architecturs on which loads and stores of unsigned short are
- * atomic fo all legal alignments.
+ * Definitions for architectures on which loads and stores of unsigned short
+ * are atomic for all legal alignments.
*/
AO_INLINE unsigned short
*/
/*
- * Definitions for architecturs on which loads and stores of unsigned short are
- * atomic for all legal alignments.
+ * Definitions for architectures on which loads and stores of unsigned short
+ * are atomic for all legal alignments.
*/
AO_INLINE unsigned short
#ifdef AO_USE_ALMOST_LOCK_FREE
/* LIFO linked lists based on compare-and-swap. We need to avoid */
-/* the case of a node deleton and reinsertion while I'm deleting */
+/* the case of a node deletion and reinsertion while I'm deleting */
/* it, since that may cause my CAS to succeed eventhough the next */
/* pointer is now wrong. Our solution is not fully lock-free, but it */
/* is good enough for signal handlers, provided we have a suitably low */
* I concluded experimentally that checking a value first before
* performing a compare-and-swap is usually beneficial on X86, but
* slows things down appreciably with contention on Itanium.
- * ince the Itanium behavior makes more sense to me (more cache line
+ * Since the Itanium behavior makes more sense to me (more cache line
* movement unless we're mostly reading, but back-off should guard
* against that), we take Itanium as the default. Measurements on
* other multiprocessor architectures would be useful. (On a uniprocessor,
( &(list -> ptr), next, (AO_t) element));
/* This uses a narrow CAS here, an old optimization suggested */
/* by Treiber. Pop is still safe, since we run into the ABA */
- /* problem only if there were both interveining "pop"s and "push"es.*/
- /* Inthat case we still see a change inthe version number. */
+ /* problem only if there were both intervening "pop"s and "push"es. */
+ /* In that case we still see a change in the version number. */
}
AO_t *AO_stack_pop_acquire(AO_stack_t *list)
volatile AO_t AO_stack_bl[AO_BL_SIZE];
} AO_stack_aux;
-/* The stack implementation knows only about the lecation of */
+/* The stack implementation knows only about the location of */
/* link fields in nodes, and nothing about the rest of the */
/* stack elements. Link fields hold an AO_t, which is not */
/* necessarily a real pointer. This converts the AO_t to a */