void *iclog_hndl, /* iclog to hang callback off */
xfs_log_callback_t *cb)
{
- xlog_t *log = mp->m_log;
xlog_in_core_t *iclog = (xlog_in_core_t *)iclog_hndl;
int abortflg;
- cb->cb_next = NULL;
- spin_lock(&log->l_icloglock);
+ spin_lock(&iclog->ic_callback_lock);
abortflg = (iclog->ic_state & XLOG_STATE_IOERROR);
if (!abortflg) {
ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) ||
*(iclog->ic_callback_tail) = cb;
iclog->ic_callback_tail = &(cb->cb_next);
}
- spin_unlock(&log->l_icloglock);
+ spin_unlock(&iclog->ic_callback_lock);
return abortflg;
} /* xfs_log_notify */
iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize;
iclog->ic_state = XLOG_STATE_ACTIVE;
iclog->ic_log = log;
+ atomic_set(&iclog->ic_refcnt, 0);
+ spin_lock_init(&iclog->ic_callback_lock);
iclog->ic_callback_tail = &(iclog->ic_callback);
iclog->ic_datap = (char *)iclog->hic_data + log->l_iclog_hsize;
if (iclog->ic_state == XLOG_STATE_DIRTY) {
iclog->ic_state = XLOG_STATE_ACTIVE;
iclog->ic_offset = 0;
- iclog->ic_callback = NULL; /* don't need to free */
+ ASSERT(iclog->ic_callback == NULL);
/*
* If the number of ops in this iclog indicate it just
* contains the dummy transaction, we can
be64_to_cpu(iclog->ic_header.h_lsn);
spin_unlock(&log->l_grant_lock);
- /*
- * Keep processing entries in the callback list
- * until we come around and it is empty. We
- * need to atomically see that the list is
- * empty and change the state to DIRTY so that
- * we don't miss any more callbacks being added.
- */
- spin_lock(&log->l_icloglock);
} else {
+ spin_unlock(&log->l_icloglock);
ioerrors++;
}
- cb = iclog->ic_callback;
+ /*
+ * Keep processing entries in the callback list until
+ * we come around and it is empty. We need to
+ * atomically see that the list is empty and change the
+ * state to DIRTY so that we don't miss any more
+ * callbacks being added.
+ */
+ spin_lock(&iclog->ic_callback_lock);
+ cb = iclog->ic_callback;
while (cb) {
iclog->ic_callback_tail = &(iclog->ic_callback);
iclog->ic_callback = NULL;
- spin_unlock(&log->l_icloglock);
+ spin_unlock(&iclog->ic_callback_lock);
/* perform callbacks in the order given */
for (; cb; cb = cb_next) {
cb_next = cb->cb_next;
cb->cb_func(cb->cb_arg, aborted);
}
- spin_lock(&log->l_icloglock);
+ spin_lock(&iclog->ic_callback_lock);
cb = iclog->ic_callback;
}
loopdidcallbacks++;
funcdidcallbacks++;
+ spin_lock(&log->l_icloglock);
ASSERT(iclog->ic_callback == NULL);
+ spin_unlock(&iclog->ic_callback_lock);
if (!(iclog->ic_state & XLOG_STATE_IOERROR))
iclog->ic_state = XLOG_STATE_DIRTY;
* - ic_offset is the current number of bytes written to in this iclog.
* - ic_refcnt is bumped when someone is writing to the log.
* - ic_state is the state of the iclog.
+ *
+ * Because of cacheline contention on large machines, we need to separate
+ * various resources onto different cachelines. To start with, make the
+ * structure cacheline aligned. The following fields can be contended on
+ * by independent processes:
+ *
+ * - ic_callback_*
+ * - ic_refcnt
+ * - fields protected by the global l_icloglock
+ *
+ * so we need to ensure that these fields are located in separate cachelines.
+ * We'll put all the read-only and l_icloglock fields in the first cacheline,
+ * and move everything else out to subsequent cachelines.
*/
typedef struct xlog_iclog_fields {
sv_t ic_forcesema;
struct xlog_in_core *ic_prev;
struct xfs_buf *ic_bp;
struct log *ic_log;
- xfs_log_callback_t *ic_callback;
- xfs_log_callback_t **ic_callback_tail;
-#ifdef XFS_LOG_TRACE
- struct ktrace *ic_trace;
-#endif
int ic_size;
int ic_offset;
- atomic_t ic_refcnt;
int ic_bwritecnt;
ushort_t ic_state;
char *ic_datap; /* pointer to iclog data */
-} xlog_iclog_fields_t;
+#ifdef XFS_LOG_TRACE
+ struct ktrace *ic_trace;
+#endif
+
+ /* Callback structures need their own cacheline */
+ spinlock_t ic_callback_lock ____cacheline_aligned_in_smp;
+ xfs_log_callback_t *ic_callback;
+ xfs_log_callback_t **ic_callback_tail;
+
+ /* reference counts need their own cacheline */
+ atomic_t ic_refcnt ____cacheline_aligned_in_smp;
+} xlog_iclog_fields_t ____cacheline_aligned_in_smp;
typedef union xlog_in_core2 {
xlog_rec_header_t hic_header;
#define ic_bp hic_fields.ic_bp
#define ic_log hic_fields.ic_log
#define ic_callback hic_fields.ic_callback
+#define ic_callback_lock hic_fields.ic_callback_lock
#define ic_callback_tail hic_fields.ic_callback_tail
#define ic_trace hic_fields.ic_trace
#define ic_size hic_fields.ic_size