2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/spinlock.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/mutex.h>
16 #include <linux/uaccess.h>
17 #include <linux/delay.h>
18 #include <asm/synch.h>
19 #include <misc/cxl-base.h>
24 static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
25 u64 result, u64 mask, bool enabled)
28 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
31 spin_lock(&afu->afu_cntl_lock);
32 pr_devel("AFU command starting: %llx\n", command);
34 trace_cxl_afu_ctrl(afu, command);
36 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
37 cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
39 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
40 while ((AFU_Cntl & mask) != result) {
41 if (time_after_eq(jiffies, timeout)) {
42 dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
47 if (!cxl_ops->link_ok(afu->adapter, afu)) {
48 afu->enabled = enabled;
53 pr_devel_ratelimited("AFU control... (0x%016llx)\n",
56 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
59 if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
61 * Workaround for a bug in the XSL used in the Mellanox CX4
62 * that fails to clear the RA bit after an AFU reset,
63 * preventing subsequent AFU resets from working.
65 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
68 pr_devel("AFU command complete: %llx\n", command);
69 afu->enabled = enabled;
71 trace_cxl_afu_ctrl_done(afu, command, rc);
72 spin_unlock(&afu->afu_cntl_lock);
77 static int afu_enable(struct cxl_afu *afu)
79 pr_devel("AFU enable request\n");
81 return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
82 CXL_AFU_Cntl_An_ES_Enabled,
83 CXL_AFU_Cntl_An_ES_MASK, true);
86 int cxl_afu_disable(struct cxl_afu *afu)
88 pr_devel("AFU disable request\n");
90 return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
91 CXL_AFU_Cntl_An_ES_Disabled,
92 CXL_AFU_Cntl_An_ES_MASK, false);
95 /* This will disable as well as reset */
96 static int native_afu_reset(struct cxl_afu *afu)
98 pr_devel("AFU reset request\n");
100 return afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
101 CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
102 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
106 static int native_afu_check_and_enable(struct cxl_afu *afu)
108 if (!cxl_ops->link_ok(afu->adapter, afu)) {
109 WARN(1, "Refusing to enable afu while link down!\n");
114 return afu_enable(afu);
117 int cxl_psl_purge(struct cxl_afu *afu)
119 u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
120 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
123 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
126 trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
128 pr_devel("PSL purge request\n");
130 if (!cxl_ops->link_ok(afu->adapter, afu)) {
131 dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
136 if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
137 WARN(1, "psl_purge request while AFU not disabled!\n");
138 cxl_afu_disable(afu);
141 cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
142 PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
143 start = local_clock();
144 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
145 while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK)
146 == CXL_PSL_SCNTL_An_Ps_Pending) {
147 if (time_after_eq(jiffies, timeout)) {
148 dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
152 if (!cxl_ops->link_ok(afu->adapter, afu)) {
157 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
158 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", PSL_CNTL, dsisr);
159 if (dsisr & CXL_PSL_DSISR_TRANS) {
160 dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
161 dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", dsisr, dar);
162 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
164 dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", dsisr);
165 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
169 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
172 pr_devel("PSL purged in %lld ns\n", end - start);
174 cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
175 PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
177 trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
181 static int spa_max_procs(int spa_size)
185 * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
186 * Most of that junk is really just an overly-complicated way of saying
187 * the last 256 bytes are __aligned(128), so it's really:
188 * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
190 * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
192 * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
193 * Ignore the alignment (which is safe in this case as long as we are
194 * careful with our rounding) and solve for n:
196 return ((spa_size / 8) - 96) / 17;
199 int cxl_alloc_spa(struct cxl_afu *afu)
203 /* Work out how many pages to allocate */
204 afu->native->spa_order = -1;
206 afu->native->spa_order++;
207 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
209 if (spa_size > 0x100000) {
210 dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
211 afu->native->spa_max_procs, afu->native->spa_size);
212 afu->num_procs = afu->native->spa_max_procs;
216 afu->native->spa_size = spa_size;
217 afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
218 } while (afu->native->spa_max_procs < afu->num_procs);
220 if (!(afu->native->spa = (struct cxl_process_element *)
221 __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
222 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
225 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
226 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
231 static void attach_spa(struct cxl_afu *afu)
235 afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
236 ((afu->native->spa_max_procs + 3) * 128));
238 spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
239 spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
240 spap |= CXL_PSL_SPAP_V;
241 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
242 afu->native->spa, afu->native->spa_max_procs,
243 afu->native->sw_command_status, spap);
244 cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
247 static inline void detach_spa(struct cxl_afu *afu)
249 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
252 void cxl_release_spa(struct cxl_afu *afu)
254 if (afu->native->spa) {
255 free_pages((unsigned long) afu->native->spa,
256 afu->native->spa_order);
257 afu->native->spa = NULL;
261 int cxl_tlb_slb_invalidate(struct cxl *adapter)
263 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
265 pr_devel("CXL adapter wide TLBIA & SLBIA\n");
267 cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
269 cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
270 while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
271 if (time_after_eq(jiffies, timeout)) {
272 dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
275 if (!cxl_ops->link_ok(adapter, NULL))
280 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
281 while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
282 if (time_after_eq(jiffies, timeout)) {
283 dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
286 if (!cxl_ops->link_ok(adapter, NULL))
293 static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
297 /* 1. Disable SSTP by writing 0 to SSTP1[V] */
298 cxl_p2n_write(afu, CXL_SSTP1_An, 0);
300 /* 2. Invalidate all SLB entries */
301 if ((rc = cxl_afu_slbia(afu)))
304 /* 3. Set SSTP0_An */
305 cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
307 /* 4. Set SSTP1_An */
308 cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
313 /* Using per slice version may improve performance here. (ie. SLBIA_An) */
314 static void slb_invalid(struct cxl_context *ctx)
316 struct cxl *adapter = ctx->afu->adapter;
319 WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
321 cxl_p1_write(adapter, CXL_PSL_LBISEL,
322 ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
323 be32_to_cpu(ctx->elem->lpid));
324 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
327 if (!cxl_ops->link_ok(adapter, NULL))
329 slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
330 if (!(slbia & CXL_TLB_SLB_P))
336 static int do_process_element_cmd(struct cxl_context *ctx,
337 u64 cmd, u64 pe_state)
340 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
343 trace_cxl_llcmd(ctx, cmd);
345 WARN_ON(!ctx->afu->enabled);
347 ctx->elem->software_state = cpu_to_be32(pe_state);
349 *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
351 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
353 if (time_after_eq(jiffies, timeout)) {
354 dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
358 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
359 dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
363 state = be64_to_cpup(ctx->afu->native->sw_command_status);
364 if (state == ~0ULL) {
365 pr_err("cxl: Error adding process element to AFU\n");
369 if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) ==
370 (cmd | (cmd >> 16) | ctx->pe))
373 * The command won't finish in the PSL if there are
374 * outstanding DSIs. Hence we need to yield here in
375 * case there are outstanding DSIs that we need to
376 * service. Tuning possiblity: we could wait for a
383 trace_cxl_llcmd_done(ctx, cmd, rc);
387 static int add_process_element(struct cxl_context *ctx)
391 mutex_lock(&ctx->afu->native->spa_mutex);
392 pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
393 if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
394 ctx->pe_inserted = true;
395 pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
396 mutex_unlock(&ctx->afu->native->spa_mutex);
400 static int terminate_process_element(struct cxl_context *ctx)
404 /* fast path terminate if it's already invalid */
405 if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
408 mutex_lock(&ctx->afu->native->spa_mutex);
409 pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
410 /* We could be asked to terminate when the hw is down. That
411 * should always succeed: it's not running if the hw has gone
412 * away and is being reset.
414 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
415 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
416 CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
417 ctx->elem->software_state = 0; /* Remove Valid bit */
418 pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
419 mutex_unlock(&ctx->afu->native->spa_mutex);
423 static int remove_process_element(struct cxl_context *ctx)
427 mutex_lock(&ctx->afu->native->spa_mutex);
428 pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
430 /* We could be asked to remove when the hw is down. Again, if
431 * the hw is down, the PE is gone, so we succeed.
433 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
434 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
437 ctx->pe_inserted = false;
439 pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
440 mutex_unlock(&ctx->afu->native->spa_mutex);
445 void cxl_assign_psn_space(struct cxl_context *ctx)
447 if (!ctx->afu->pp_size || ctx->master) {
448 ctx->psn_phys = ctx->afu->psn_phys;
449 ctx->psn_size = ctx->afu->adapter->ps_size;
451 ctx->psn_phys = ctx->afu->psn_phys +
452 (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
453 ctx->psn_size = ctx->afu->pp_size;
457 static int activate_afu_directed(struct cxl_afu *afu)
461 dev_info(&afu->dev, "Activating AFU directed mode\n");
463 afu->num_procs = afu->max_procs_virtualised;
464 if (afu->native->spa == NULL) {
465 if (cxl_alloc_spa(afu))
470 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
471 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
472 cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
474 afu->current_mode = CXL_MODE_DIRECTED;
476 if ((rc = cxl_chardev_m_afu_add(afu)))
479 if ((rc = cxl_sysfs_afu_m_add(afu)))
482 if ((rc = cxl_chardev_s_afu_add(afu)))
487 cxl_sysfs_afu_m_remove(afu);
489 cxl_chardev_afu_remove(afu);
493 #ifdef CONFIG_CPU_LITTLE_ENDIAN
494 #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
496 #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
499 static u64 calculate_sr(struct cxl_context *ctx)
505 sr |= CXL_PSL_SR_An_MP;
506 if (mfspr(SPRN_LPCR) & LPCR_TC)
507 sr |= CXL_PSL_SR_An_TC;
510 sr |= CXL_PSL_SR_An_R;
511 sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
513 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
514 sr &= ~(CXL_PSL_SR_An_HV);
515 if (!test_tsk_thread_flag(current, TIF_32BIT))
516 sr |= CXL_PSL_SR_An_SF;
521 static void update_ivtes_directed(struct cxl_context *ctx)
523 bool need_update = (ctx->status == STARTED);
527 WARN_ON(terminate_process_element(ctx));
528 WARN_ON(remove_process_element(ctx));
531 for (r = 0; r < CXL_IRQ_RANGES; r++) {
532 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
533 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
537 * Theoretically we could use the update llcmd, instead of a
538 * terminate/remove/add (or if an atomic update was required we could
539 * do a suspend/update/resume), however it seems there might be issues
540 * with the update llcmd on some cards (including those using an XSL on
541 * an ASIC) so for now it's safest to go with the commands that are
542 * known to work. In the future if we come across a situation where the
543 * card may be performing transactions using the same PE while we are
544 * doing this update we might need to revisit this.
547 WARN_ON(add_process_element(ctx));
550 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
555 cxl_assign_psn_space(ctx);
557 ctx->elem->ctxtime = 0; /* disable */
558 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
559 ctx->elem->haurp = 0; /* disable */
560 ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
565 ctx->elem->common.tid = 0;
566 ctx->elem->common.pid = cpu_to_be32(pid);
568 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
570 ctx->elem->common.csrp = 0; /* disable */
571 ctx->elem->common.aurp0 = 0; /* disable */
572 ctx->elem->common.aurp1 = 0; /* disable */
574 cxl_prefault(ctx, wed);
576 ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
577 ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
580 * Ensure we have the multiplexed PSL interrupt set up to take faults
581 * for kernel contexts that may not have allocated any AFU IRQs at all:
583 if (ctx->irqs.range[0] == 0) {
584 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
585 ctx->irqs.range[0] = 1;
588 update_ivtes_directed(ctx);
590 ctx->elem->common.amr = cpu_to_be64(amr);
591 ctx->elem->common.wed = cpu_to_be64(wed);
593 /* first guy needs to enable */
594 if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
597 return add_process_element(ctx);
600 static int deactivate_afu_directed(struct cxl_afu *afu)
602 dev_info(&afu->dev, "Deactivating AFU directed mode\n");
604 afu->current_mode = 0;
607 cxl_sysfs_afu_m_remove(afu);
608 cxl_chardev_afu_remove(afu);
611 * The CAIA section 2.2.1 indicates that the procedure for starting and
612 * stopping an AFU in AFU directed mode is AFU specific, which is not
613 * ideal since this code is generic and with one exception has no
614 * knowledge of the AFU. This is in contrast to the procedure for
615 * disabling a dedicated process AFU, which is documented to just
616 * require a reset. The architecture does indicate that both an AFU
617 * reset and an AFU disable should result in the AFU being disabled and
618 * we do both followed by a PSL purge for safety.
620 * Notably we used to have some issues with the disable sequence on PSL
621 * cards, which is why we ended up using this heavy weight procedure in
622 * the first place, however a bug was discovered that had rendered the
623 * disable operation ineffective, so it is conceivable that was the
624 * sole explanation for those difficulties. Careful regression testing
625 * is recommended if anyone attempts to remove or reorder these
628 * The XSL on the Mellanox CX4 behaves a little differently from the
629 * PSL based cards and will time out an AFU reset if the AFU is still
630 * enabled. That card is special in that we do have a means to identify
631 * it from this code, so in that case we skip the reset and just use a
632 * disable/purge to avoid the timeout and corresponding noise in the
635 if (afu->adapter->native->sl_ops->needs_reset_before_disable)
636 cxl_ops->afu_reset(afu);
637 cxl_afu_disable(afu);
643 static int activate_dedicated_process(struct cxl_afu *afu)
645 dev_info(&afu->dev, "Activating dedicated process mode\n");
647 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
649 cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
650 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */
651 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
652 cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
653 cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */
654 cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
656 cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */
657 cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */
658 cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */
660 afu->current_mode = CXL_MODE_DEDICATED;
663 return cxl_chardev_d_afu_add(afu);
666 static void update_ivtes_dedicated(struct cxl_context *ctx)
668 struct cxl_afu *afu = ctx->afu;
670 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
671 (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
672 (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
673 (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
674 ((u64)ctx->irqs.offset[3] & 0xffff));
675 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
676 (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
677 (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
678 (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
679 ((u64)ctx->irqs.range[3] & 0xffff));
682 static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
684 struct cxl_afu *afu = ctx->afu;
688 pid = (u64)current->pid << 32;
691 cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
693 cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
695 if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
698 cxl_prefault(ctx, wed);
700 update_ivtes_dedicated(ctx);
702 cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
704 /* master only context for dedicated */
705 cxl_assign_psn_space(ctx);
707 if ((rc = cxl_ops->afu_reset(afu)))
710 cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
712 return afu_enable(afu);
715 static int deactivate_dedicated_process(struct cxl_afu *afu)
717 dev_info(&afu->dev, "Deactivating dedicated process mode\n");
719 afu->current_mode = 0;
722 cxl_chardev_afu_remove(afu);
727 static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
729 if (mode == CXL_MODE_DIRECTED)
730 return deactivate_afu_directed(afu);
731 if (mode == CXL_MODE_DEDICATED)
732 return deactivate_dedicated_process(afu);
736 static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
740 if (!(mode & afu->modes_supported))
743 if (!cxl_ops->link_ok(afu->adapter, afu)) {
744 WARN(1, "Device link is down, refusing to activate!\n");
748 if (mode == CXL_MODE_DIRECTED)
749 return activate_afu_directed(afu);
750 if (mode == CXL_MODE_DEDICATED)
751 return activate_dedicated_process(afu);
756 static int native_attach_process(struct cxl_context *ctx, bool kernel,
759 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
760 WARN(1, "Device link is down, refusing to attach process!\n");
764 ctx->kernel = kernel;
765 if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
766 return attach_afu_directed(ctx, wed, amr);
768 if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
769 return attach_dedicated(ctx, wed, amr);
774 static inline int detach_process_native_dedicated(struct cxl_context *ctx)
777 * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
778 * stop the AFU in dedicated mode (we therefore do not make that
779 * optional like we do in the afu directed path). It does not indicate
780 * that we need to do an explicit disable (which should occur
781 * implicitly as part of the reset) or purge, but we do these as well
782 * to be on the safe side.
784 * Notably we used to have some issues with the disable sequence
785 * (before the sequence was spelled out in the architecture) which is
786 * why we were so heavy weight in the first place, however a bug was
787 * discovered that had rendered the disable operation ineffective, so
788 * it is conceivable that was the sole explanation for those
789 * difficulties. Point is, we should be careful and do some regression
790 * testing if we ever attempt to remove any part of this procedure.
792 cxl_ops->afu_reset(ctx->afu);
793 cxl_afu_disable(ctx->afu);
794 cxl_psl_purge(ctx->afu);
798 static void native_update_ivtes(struct cxl_context *ctx)
800 if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
801 return update_ivtes_directed(ctx);
802 if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
803 return update_ivtes_dedicated(ctx);
804 WARN(1, "native_update_ivtes: Bad mode\n");
807 static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
809 if (!ctx->pe_inserted)
811 if (terminate_process_element(ctx))
813 if (remove_process_element(ctx))
819 static int native_detach_process(struct cxl_context *ctx)
821 trace_cxl_detach(ctx);
823 if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
824 return detach_process_native_dedicated(ctx);
826 return detach_process_native_afu_directed(ctx);
829 static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
833 /* If the adapter has gone away, we can't get any meaningful
836 if (!cxl_ops->link_ok(afu->adapter, afu))
839 info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
840 info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
841 info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
842 pidtid = cxl_p2n_read(afu, CXL_PSL_PID_TID_An);
843 info->pid = pidtid >> 32;
844 info->tid = pidtid & 0xffffffff;
845 info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
846 info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
847 info->proc_handle = 0;
852 void cxl_native_psl_irq_dump_regs(struct cxl_context *ctx)
854 u64 fir1, fir2, fir_slice, serr, afu_debug;
856 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
857 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
858 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
859 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
861 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
862 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
863 if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
864 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
865 cxl_afu_decode_psl_serr(ctx->afu, serr);
867 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
868 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
871 static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
872 u64 dsisr, u64 errstat)
875 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
877 if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
878 ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
880 if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
881 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
882 ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
885 return cxl_ops->ack_irq(ctx, 0, errstat);
888 static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
890 if (irq_info->dsisr & CXL_PSL_DSISR_TRANS)
891 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
893 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
898 static irqreturn_t native_irq_multiplexed(int irq, void *data)
900 struct cxl_afu *afu = data;
901 struct cxl_context *ctx;
902 struct cxl_irq_info irq_info;
903 int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
906 if ((ret = native_get_irq_info(afu, &irq_info))) {
907 WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
908 return fail_psl_irq(afu, &irq_info);
912 ctx = idr_find(&afu->contexts_idr, ph);
914 ret = cxl_irq(irq, ctx, &irq_info);
920 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
921 " %016llx\n(Possible AFU HW issue - was a term/remove acked"
922 " with outstanding transactions?)\n", ph, irq_info.dsisr,
924 return fail_psl_irq(afu, &irq_info);
927 static void native_irq_wait(struct cxl_context *ctx)
934 * Wait until no further interrupts are presented by the PSL
938 ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
941 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
942 if ((dsisr & CXL_PSL_DSISR_PENDING) == 0)
945 * We are waiting for the workqueue to process our
946 * irq, so need to let that run here.
951 dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
952 " DSISR %016llx!\n", ph, dsisr);
956 static irqreturn_t native_slice_irq_err(int irq, void *data)
958 struct cxl_afu *afu = data;
959 u64 fir_slice, errstat, serr, afu_debug, afu_error, dsisr;
962 * slice err interrupt is only used with full PSL (no XSL)
964 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
965 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
966 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
967 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
968 afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
969 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
970 cxl_afu_decode_psl_serr(afu, serr);
971 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
972 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
973 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
974 dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
975 dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
977 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
982 void cxl_native_err_irq_dump_regs(struct cxl *adapter)
986 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
987 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
989 dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
992 static irqreturn_t native_irq_err(int irq, void *data)
994 struct cxl *adapter = data;
997 WARN(1, "CXL ERROR interrupt %i\n", irq);
999 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
1000 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
1002 if (adapter->native->sl_ops->debugfs_stop_trace) {
1003 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
1004 adapter->native->sl_ops->debugfs_stop_trace(adapter);
1007 if (adapter->native->sl_ops->err_irq_dump_registers)
1008 adapter->native->sl_ops->err_irq_dump_registers(adapter);
1013 int cxl_native_register_psl_err_irq(struct cxl *adapter)
1017 adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1018 dev_name(&adapter->dev));
1019 if (!adapter->irq_name)
1022 if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
1023 &adapter->native->err_hwirq,
1024 &adapter->native->err_virq,
1025 adapter->irq_name))) {
1026 kfree(adapter->irq_name);
1027 adapter->irq_name = NULL;
1031 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
1036 void cxl_native_release_psl_err_irq(struct cxl *adapter)
1038 if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
1041 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
1042 cxl_unmap_irq(adapter->native->err_virq, adapter);
1043 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
1044 kfree(adapter->irq_name);
1047 int cxl_native_register_serr_irq(struct cxl_afu *afu)
1052 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1053 dev_name(&afu->dev));
1054 if (!afu->err_irq_name)
1057 if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
1059 &afu->serr_virq, afu->err_irq_name))) {
1060 kfree(afu->err_irq_name);
1061 afu->err_irq_name = NULL;
1065 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1066 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
1067 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1072 void cxl_native_release_serr_irq(struct cxl_afu *afu)
1074 if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
1077 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
1078 cxl_unmap_irq(afu->serr_virq, afu);
1079 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
1080 kfree(afu->err_irq_name);
1083 int cxl_native_register_psl_irq(struct cxl_afu *afu)
1087 afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
1088 dev_name(&afu->dev));
1089 if (!afu->psl_irq_name)
1092 if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
1093 afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
1094 afu->psl_irq_name))) {
1095 kfree(afu->psl_irq_name);
1096 afu->psl_irq_name = NULL;
1101 void cxl_native_release_psl_irq(struct cxl_afu *afu)
1103 if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
1106 cxl_unmap_irq(afu->native->psl_virq, afu);
1107 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
1108 kfree(afu->psl_irq_name);
1111 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
1115 pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
1117 /* Clear PSL_DSISR[PE] */
1118 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1119 cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
1121 /* Write 1s to clear error status bits */
1122 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
1125 static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
1127 trace_cxl_psl_irq_ack(ctx, tfc);
1129 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
1131 recover_psl_err(ctx->afu, psl_reset_mask);
1136 int cxl_check_error(struct cxl_afu *afu)
1138 return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
1141 static bool native_support_attributes(const char *attr_name,
1142 enum cxl_attrs type)
1147 static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
1149 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1151 if (unlikely(off >= afu->crs_len))
1153 *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
1154 (cr * afu->crs_len) + off);
1158 static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
1160 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1162 if (unlikely(off >= afu->crs_len))
1164 *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1165 (cr * afu->crs_len) + off);
1169 static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
1171 u64 aligned_off = off & ~0x3L;
1175 rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
1177 *out = (val >> ((off & 0x3) * 8)) & 0xffff;
1181 static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
1183 u64 aligned_off = off & ~0x3L;
1187 rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
1189 *out = (val >> ((off & 0x3) * 8)) & 0xff;
1193 static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
1195 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1197 if (unlikely(off >= afu->crs_len))
1199 out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1200 (cr * afu->crs_len) + off, in);
1204 static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
1206 u64 aligned_off = off & ~0x3L;
1207 u32 val32, mask, shift;
1210 rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1213 shift = (off & 0x3) * 8;
1214 WARN_ON(shift == 24);
1215 mask = 0xffff << shift;
1216 val32 = (val32 & ~mask) | (in << shift);
1218 rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1222 static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
1224 u64 aligned_off = off & ~0x3L;
1225 u32 val32, mask, shift;
1228 rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1231 shift = (off & 0x3) * 8;
1232 mask = 0xff << shift;
1233 val32 = (val32 & ~mask) | (in << shift);
1235 rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1239 const struct cxl_backend_ops cxl_native_ops = {
1240 .module = THIS_MODULE,
1241 .adapter_reset = cxl_pci_reset,
1242 .alloc_one_irq = cxl_pci_alloc_one_irq,
1243 .release_one_irq = cxl_pci_release_one_irq,
1244 .alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
1245 .release_irq_ranges = cxl_pci_release_irq_ranges,
1246 .setup_irq = cxl_pci_setup_irq,
1247 .handle_psl_slice_error = native_handle_psl_slice_error,
1248 .psl_interrupt = NULL,
1249 .ack_irq = native_ack_irq,
1250 .irq_wait = native_irq_wait,
1251 .attach_process = native_attach_process,
1252 .detach_process = native_detach_process,
1253 .update_ivtes = native_update_ivtes,
1254 .support_attributes = native_support_attributes,
1255 .link_ok = cxl_adapter_link_ok,
1256 .release_afu = cxl_pci_release_afu,
1257 .afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
1258 .afu_check_and_enable = native_afu_check_and_enable,
1259 .afu_activate_mode = native_afu_activate_mode,
1260 .afu_deactivate_mode = native_afu_deactivate_mode,
1261 .afu_reset = native_afu_reset,
1262 .afu_cr_read8 = native_afu_cr_read8,
1263 .afu_cr_read16 = native_afu_cr_read16,
1264 .afu_cr_read32 = native_afu_cr_read32,
1265 .afu_cr_read64 = native_afu_cr_read64,
1266 .afu_cr_write8 = native_afu_cr_write8,
1267 .afu_cr_write16 = native_afu_cr_write16,
1268 .afu_cr_write32 = native_afu_cr_write32,
1269 .read_adapter_vpd = cxl_pci_read_adapter_vpd,