Merge tag 'driver-core-6.1-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / drivers / soundwire / intel.c
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 // Copyright(c) 2015-17 Intel Corporation.
3
4 /*
5  * Soundwire Intel Master Driver
6  */
7
8 #include <linux/acpi.h>
9 #include <linux/debugfs.h>
10 #include <linux/delay.h>
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/auxiliary_bus.h>
15 #include <sound/pcm_params.h>
16 #include <linux/pm_runtime.h>
17 #include <sound/soc.h>
18 #include <linux/soundwire/sdw_registers.h>
19 #include <linux/soundwire/sdw.h>
20 #include <linux/soundwire/sdw_intel.h>
21 #include "cadence_master.h"
22 #include "bus.h"
23 #include "intel.h"
24
25 /* IDA min selected to avoid conflicts with HDaudio/iDISP SDI values */
26 #define INTEL_DEV_NUM_IDA_MIN           4
27
28 #define INTEL_MASTER_SUSPEND_DELAY_MS   3000
29 #define INTEL_MASTER_RESET_ITERATIONS   10
30
31 /*
32  * debug/config flags for the Intel SoundWire Master.
33  *
34  * Since we may have multiple masters active, we can have up to 8
35  * flags reused in each byte, with master0 using the ls-byte, etc.
36  */
37
38 #define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME             BIT(0)
39 #define SDW_INTEL_MASTER_DISABLE_CLOCK_STOP             BIT(1)
40 #define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE        BIT(2)
41 #define SDW_INTEL_MASTER_DISABLE_MULTI_LINK             BIT(3)
42
43 static int md_flags;
44 module_param_named(sdw_md_flags, md_flags, int, 0444);
45 MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)");
46
47 enum intel_pdi_type {
48         INTEL_PDI_IN = 0,
49         INTEL_PDI_OUT = 1,
50         INTEL_PDI_BD = 2,
51 };
52
53 #define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
54
55 /*
56  * Read, write helpers for HW registers
57  */
58 static inline int intel_readl(void __iomem *base, int offset)
59 {
60         return readl(base + offset);
61 }
62
63 static inline void intel_writel(void __iomem *base, int offset, int value)
64 {
65         writel(value, base + offset);
66 }
67
68 static inline u16 intel_readw(void __iomem *base, int offset)
69 {
70         return readw(base + offset);
71 }
72
73 static inline void intel_writew(void __iomem *base, int offset, u16 value)
74 {
75         writew(value, base + offset);
76 }
77
78 static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
79 {
80         int timeout = 10;
81         u32 reg_read;
82
83         do {
84                 reg_read = readl(base + offset);
85                 if ((reg_read & mask) == target)
86                         return 0;
87
88                 timeout--;
89                 usleep_range(50, 100);
90         } while (timeout != 0);
91
92         return -EAGAIN;
93 }
94
95 static int intel_clear_bit(void __iomem *base, int offset, u32 value, u32 mask)
96 {
97         writel(value, base + offset);
98         return intel_wait_bit(base, offset, mask, 0);
99 }
100
101 static int intel_set_bit(void __iomem *base, int offset, u32 value, u32 mask)
102 {
103         writel(value, base + offset);
104         return intel_wait_bit(base, offset, mask, mask);
105 }
106
107 /*
108  * debugfs
109  */
110 #ifdef CONFIG_DEBUG_FS
111
112 #define RD_BUF (2 * PAGE_SIZE)
113
114 static ssize_t intel_sprintf(void __iomem *mem, bool l,
115                              char *buf, size_t pos, unsigned int reg)
116 {
117         int value;
118
119         if (l)
120                 value = intel_readl(mem, reg);
121         else
122                 value = intel_readw(mem, reg);
123
124         return scnprintf(buf + pos, RD_BUF - pos, "%4x\t%4x\n", reg, value);
125 }
126
127 static int intel_reg_show(struct seq_file *s_file, void *data)
128 {
129         struct sdw_intel *sdw = s_file->private;
130         void __iomem *s = sdw->link_res->shim;
131         void __iomem *a = sdw->link_res->alh;
132         char *buf;
133         ssize_t ret;
134         int i, j;
135         unsigned int links, reg;
136
137         buf = kzalloc(RD_BUF, GFP_KERNEL);
138         if (!buf)
139                 return -ENOMEM;
140
141         links = intel_readl(s, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_LCOUNT_MASK;
142
143         ret = scnprintf(buf, RD_BUF, "Register  Value\n");
144         ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
145
146         for (i = 0; i < links; i++) {
147                 reg = SDW_SHIM_LCAP + i * 4;
148                 ret += intel_sprintf(s, true, buf, ret, reg);
149         }
150
151         for (i = 0; i < links; i++) {
152                 ret += scnprintf(buf + ret, RD_BUF - ret, "\nLink%d\n", i);
153                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLSCAP(i));
154                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS0CM(i));
155                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS1CM(i));
156                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS2CM(i));
157                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTLS3CM(i));
158                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PCMSCAP(i));
159
160                 ret += scnprintf(buf + ret, RD_BUF - ret, "\n PCMSyCH registers\n");
161
162                 /*
163                  * the value 10 is the number of PDIs. We will need a
164                  * cleanup to remove hard-coded Intel configurations
165                  * from cadence_master.c
166                  */
167                 for (j = 0; j < 10; j++) {
168                         ret += intel_sprintf(s, false, buf, ret,
169                                         SDW_SHIM_PCMSYCHM(i, j));
170                         ret += intel_sprintf(s, false, buf, ret,
171                                         SDW_SHIM_PCMSYCHC(i, j));
172                 }
173                 ret += scnprintf(buf + ret, RD_BUF - ret, "\n IOCTL, CTMCTL\n");
174
175                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
176                 ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
177         }
178
179         ret += scnprintf(buf + ret, RD_BUF - ret, "\nWake registers\n");
180         ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKEEN);
181         ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_WAKESTS);
182
183         ret += scnprintf(buf + ret, RD_BUF - ret, "\nALH STRMzCFG\n");
184         for (i = 0; i < SDW_ALH_NUM_STREAMS; i++)
185                 ret += intel_sprintf(a, true, buf, ret, SDW_ALH_STRMZCFG(i));
186
187         seq_printf(s_file, "%s", buf);
188         kfree(buf);
189
190         return 0;
191 }
192 DEFINE_SHOW_ATTRIBUTE(intel_reg);
193
194 static int intel_set_m_datamode(void *data, u64 value)
195 {
196         struct sdw_intel *sdw = data;
197         struct sdw_bus *bus = &sdw->cdns.bus;
198
199         if (value > SDW_PORT_DATA_MODE_STATIC_1)
200                 return -EINVAL;
201
202         /* Userspace changed the hardware state behind the kernel's back */
203         add_taint(TAINT_USER, LOCKDEP_STILL_OK);
204
205         bus->params.m_data_mode = value;
206
207         return 0;
208 }
209 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
210                          intel_set_m_datamode, "%llu\n");
211
212 static int intel_set_s_datamode(void *data, u64 value)
213 {
214         struct sdw_intel *sdw = data;
215         struct sdw_bus *bus = &sdw->cdns.bus;
216
217         if (value > SDW_PORT_DATA_MODE_STATIC_1)
218                 return -EINVAL;
219
220         /* Userspace changed the hardware state behind the kernel's back */
221         add_taint(TAINT_USER, LOCKDEP_STILL_OK);
222
223         bus->params.s_data_mode = value;
224
225         return 0;
226 }
227 DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
228                          intel_set_s_datamode, "%llu\n");
229
230 static void intel_debugfs_init(struct sdw_intel *sdw)
231 {
232         struct dentry *root = sdw->cdns.bus.debugfs;
233
234         if (!root)
235                 return;
236
237         sdw->debugfs = debugfs_create_dir("intel-sdw", root);
238
239         debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
240                             &intel_reg_fops);
241
242         debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
243                             &intel_set_m_datamode_fops);
244
245         debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
246                             &intel_set_s_datamode_fops);
247
248         sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
249 }
250
251 static void intel_debugfs_exit(struct sdw_intel *sdw)
252 {
253         debugfs_remove_recursive(sdw->debugfs);
254 }
255 #else
256 static void intel_debugfs_init(struct sdw_intel *sdw) {}
257 static void intel_debugfs_exit(struct sdw_intel *sdw) {}
258 #endif /* CONFIG_DEBUG_FS */
259
260 /*
261  * shim ops
262  */
263 /* this needs to be called with shim_lock */
264 static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
265 {
266         void __iomem *shim = sdw->link_res->shim;
267         unsigned int link_id = sdw->instance;
268         u16 ioctl;
269
270         /* Switch to MIP from Glue logic */
271         ioctl = intel_readw(shim,  SDW_SHIM_IOCTL(link_id));
272
273         ioctl &= ~(SDW_SHIM_IOCTL_DOE);
274         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
275         usleep_range(10, 15);
276
277         ioctl &= ~(SDW_SHIM_IOCTL_DO);
278         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
279         usleep_range(10, 15);
280
281         ioctl |= (SDW_SHIM_IOCTL_MIF);
282         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
283         usleep_range(10, 15);
284
285         ioctl &= ~(SDW_SHIM_IOCTL_BKE);
286         ioctl &= ~(SDW_SHIM_IOCTL_COE);
287         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
288         usleep_range(10, 15);
289
290         /* at this point Master IP has full control of the I/Os */
291 }
292
293 /* this needs to be called with shim_lock */
294 static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
295 {
296         unsigned int link_id = sdw->instance;
297         void __iomem *shim = sdw->link_res->shim;
298         u16 ioctl;
299
300         /* Glue logic */
301         ioctl = intel_readw(shim, SDW_SHIM_IOCTL(link_id));
302         ioctl |= SDW_SHIM_IOCTL_BKE;
303         ioctl |= SDW_SHIM_IOCTL_COE;
304         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
305         usleep_range(10, 15);
306
307         ioctl &= ~(SDW_SHIM_IOCTL_MIF);
308         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
309         usleep_range(10, 15);
310
311         /* at this point Integration Glue has full control of the I/Os */
312 }
313
314 /* this needs to be called with shim_lock */
315 static void intel_shim_init(struct sdw_intel *sdw)
316 {
317         void __iomem *shim = sdw->link_res->shim;
318         unsigned int link_id = sdw->instance;
319         u16 ioctl = 0, act = 0;
320
321         /* Initialize Shim */
322         ioctl |= SDW_SHIM_IOCTL_BKE;
323         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
324         usleep_range(10, 15);
325
326         ioctl |= SDW_SHIM_IOCTL_WPDD;
327         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
328         usleep_range(10, 15);
329
330         ioctl |= SDW_SHIM_IOCTL_DO;
331         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
332         usleep_range(10, 15);
333
334         ioctl |= SDW_SHIM_IOCTL_DOE;
335         intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
336         usleep_range(10, 15);
337
338         intel_shim_glue_to_master_ip(sdw);
339
340         u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
341         act |= SDW_SHIM_CTMCTL_DACTQE;
342         act |= SDW_SHIM_CTMCTL_DODS;
343         intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
344         usleep_range(10, 15);
345 }
346
347 static int intel_shim_check_wake(struct sdw_intel *sdw)
348 {
349         void __iomem *shim;
350         u16 wake_sts;
351
352         shim = sdw->link_res->shim;
353         wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
354
355         return wake_sts & BIT(sdw->instance);
356 }
357
358 static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
359 {
360         void __iomem *shim = sdw->link_res->shim;
361         unsigned int link_id = sdw->instance;
362         u16 wake_en, wake_sts;
363
364         mutex_lock(sdw->link_res->shim_lock);
365         wake_en = intel_readw(shim, SDW_SHIM_WAKEEN);
366
367         if (wake_enable) {
368                 /* Enable the wakeup */
369                 wake_en |= (SDW_SHIM_WAKEEN_ENABLE << link_id);
370                 intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
371         } else {
372                 /* Disable the wake up interrupt */
373                 wake_en &= ~(SDW_SHIM_WAKEEN_ENABLE << link_id);
374                 intel_writew(shim, SDW_SHIM_WAKEEN, wake_en);
375
376                 /* Clear wake status */
377                 wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
378                 wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id);
379                 intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts);
380         }
381         mutex_unlock(sdw->link_res->shim_lock);
382 }
383
384 static int intel_link_power_up(struct sdw_intel *sdw)
385 {
386         unsigned int link_id = sdw->instance;
387         void __iomem *shim = sdw->link_res->shim;
388         u32 *shim_mask = sdw->link_res->shim_mask;
389         struct sdw_bus *bus = &sdw->cdns.bus;
390         struct sdw_master_prop *prop = &bus->prop;
391         u32 spa_mask, cpa_mask;
392         u32 link_control;
393         int ret = 0;
394         u32 syncprd;
395         u32 sync_reg;
396
397         mutex_lock(sdw->link_res->shim_lock);
398
399         /*
400          * The hardware relies on an internal counter, typically 4kHz,
401          * to generate the SoundWire SSP - which defines a 'safe'
402          * synchronization point between commands and audio transport
403          * and allows for multi link synchronization. The SYNCPRD value
404          * is only dependent on the oscillator clock provided to
405          * the IP, so adjust based on _DSD properties reported in DSDT
406          * tables. The values reported are based on either 24MHz
407          * (CNL/CML) or 38.4 MHz (ICL/TGL+).
408          */
409         if (prop->mclk_freq % 6000000)
410                 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
411         else
412                 syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
413
414         if (!*shim_mask) {
415                 dev_dbg(sdw->cdns.dev, "powering up all links\n");
416
417                 /* we first need to program the SyncPRD/CPU registers */
418                 dev_dbg(sdw->cdns.dev,
419                         "first link up, programming SYNCPRD\n");
420
421                 /* set SyncPRD period */
422                 sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
423                 u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
424
425                 /* Set SyncCPU bit */
426                 sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
427                 intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
428
429                 /* Link power up sequence */
430                 link_control = intel_readl(shim, SDW_SHIM_LCTL);
431
432                 /* only power-up enabled links */
433                 spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
434                 cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
435
436                 link_control |=  spa_mask;
437
438                 ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
439                 if (ret < 0) {
440                         dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
441                         goto out;
442                 }
443
444                 /* SyncCPU will change once link is active */
445                 ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
446                                      SDW_SHIM_SYNC_SYNCCPU, 0);
447                 if (ret < 0) {
448                         dev_err(sdw->cdns.dev,
449                                 "Failed to set SHIM_SYNC: %d\n", ret);
450                         goto out;
451                 }
452         }
453
454         *shim_mask |= BIT(link_id);
455
456         sdw->cdns.link_up = true;
457
458         intel_shim_init(sdw);
459
460 out:
461         mutex_unlock(sdw->link_res->shim_lock);
462
463         return ret;
464 }
465
466 static int intel_link_power_down(struct sdw_intel *sdw)
467 {
468         u32 link_control, spa_mask, cpa_mask;
469         unsigned int link_id = sdw->instance;
470         void __iomem *shim = sdw->link_res->shim;
471         u32 *shim_mask = sdw->link_res->shim_mask;
472         int ret = 0;
473
474         mutex_lock(sdw->link_res->shim_lock);
475
476         if (!(*shim_mask & BIT(link_id)))
477                 dev_err(sdw->cdns.dev,
478                         "%s: Unbalanced power-up/down calls\n", __func__);
479
480         sdw->cdns.link_up = false;
481
482         intel_shim_master_ip_to_glue(sdw);
483
484         *shim_mask &= ~BIT(link_id);
485
486         if (!*shim_mask) {
487
488                 dev_dbg(sdw->cdns.dev, "powering down all links\n");
489
490                 /* Link power down sequence */
491                 link_control = intel_readl(shim, SDW_SHIM_LCTL);
492
493                 /* only power-down enabled links */
494                 spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
495                 cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
496
497                 link_control &=  spa_mask;
498
499                 ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
500                 if (ret < 0) {
501                         dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
502
503                         /*
504                          * we leave the sdw->cdns.link_up flag as false since we've disabled
505                          * the link at this point and cannot handle interrupts any longer.
506                          */
507                 }
508         }
509
510         mutex_unlock(sdw->link_res->shim_lock);
511
512         return ret;
513 }
514
515 static void intel_shim_sync_arm(struct sdw_intel *sdw)
516 {
517         void __iomem *shim = sdw->link_res->shim;
518         u32 sync_reg;
519
520         mutex_lock(sdw->link_res->shim_lock);
521
522         /* update SYNC register */
523         sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
524         sync_reg |= (SDW_SHIM_SYNC_CMDSYNC << sdw->instance);
525         intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
526
527         mutex_unlock(sdw->link_res->shim_lock);
528 }
529
530 static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
531 {
532         void __iomem *shim = sdw->link_res->shim;
533         u32 sync_reg;
534         int ret;
535
536         /* Read SYNC register */
537         sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
538
539         /*
540          * Set SyncGO bit to synchronously trigger a bank switch for
541          * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
542          * the Masters.
543          */
544         sync_reg |= SDW_SHIM_SYNC_SYNCGO;
545
546         ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
547                               SDW_SHIM_SYNC_SYNCGO);
548
549         if (ret < 0)
550                 dev_err(sdw->cdns.dev, "SyncGO clear failed: %d\n", ret);
551
552         return ret;
553 }
554
555 static int intel_shim_sync_go(struct sdw_intel *sdw)
556 {
557         int ret;
558
559         mutex_lock(sdw->link_res->shim_lock);
560
561         ret = intel_shim_sync_go_unlocked(sdw);
562
563         mutex_unlock(sdw->link_res->shim_lock);
564
565         return ret;
566 }
567
568 /*
569  * PDI routines
570  */
571 static void intel_pdi_init(struct sdw_intel *sdw,
572                            struct sdw_cdns_stream_config *config)
573 {
574         void __iomem *shim = sdw->link_res->shim;
575         unsigned int link_id = sdw->instance;
576         int pcm_cap;
577
578         /* PCM Stream Capability */
579         pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
580
581         config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
582         config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
583         config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
584
585         dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
586                 config->pcm_bd, config->pcm_in, config->pcm_out);
587 }
588
589 static int
590 intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
591 {
592         void __iomem *shim = sdw->link_res->shim;
593         unsigned int link_id = sdw->instance;
594         int count;
595
596         count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
597
598         /*
599          * WORKAROUND: on all existing Intel controllers, pdi
600          * number 2 reports channel count as 1 even though it
601          * supports 8 channels. Performing hardcoding for pdi
602          * number 2.
603          */
604         if (pdi_num == 2)
605                 count = 7;
606
607         /* zero based values for channel count in register */
608         count++;
609
610         return count;
611 }
612
613 static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
614                                    struct sdw_cdns_pdi *pdi,
615                                    unsigned int num_pdi,
616                                    unsigned int *num_ch)
617 {
618         int i, ch_count = 0;
619
620         for (i = 0; i < num_pdi; i++) {
621                 pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
622                 ch_count += pdi->ch_count;
623                 pdi++;
624         }
625
626         *num_ch = ch_count;
627         return 0;
628 }
629
630 static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
631                                       struct sdw_cdns_streams *stream)
632 {
633         intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
634                                 &stream->num_ch_bd);
635
636         intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
637                                 &stream->num_ch_in);
638
639         intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
640                                 &stream->num_ch_out);
641
642         return 0;
643 }
644
645 static int intel_pdi_ch_update(struct sdw_intel *sdw)
646 {
647         intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
648
649         return 0;
650 }
651
652 static void
653 intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
654 {
655         void __iomem *shim = sdw->link_res->shim;
656         unsigned int link_id = sdw->instance;
657         int pdi_conf = 0;
658
659         /* the Bulk and PCM streams are not contiguous */
660         pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
661         if (pdi->num >= 2)
662                 pdi->intel_alh_id += 2;
663
664         /*
665          * Program stream parameters to stream SHIM register
666          * This is applicable for PCM stream only.
667          */
668         if (pdi->type != SDW_STREAM_PCM)
669                 return;
670
671         if (pdi->dir == SDW_DATA_DIR_RX)
672                 pdi_conf |= SDW_SHIM_PCMSYCM_DIR;
673         else
674                 pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
675
676         u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
677         u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
678         u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
679
680         intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
681 }
682
683 static void
684 intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
685 {
686         void __iomem *alh = sdw->link_res->alh;
687         unsigned int link_id = sdw->instance;
688         unsigned int conf;
689
690         /* the Bulk and PCM streams are not contiguous */
691         pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
692         if (pdi->num >= 2)
693                 pdi->intel_alh_id += 2;
694
695         /* Program Stream config ALH register */
696         conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
697
698         u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
699         u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
700
701         intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
702 }
703
704 static int intel_params_stream(struct sdw_intel *sdw,
705                                int stream,
706                                struct snd_soc_dai *dai,
707                                struct snd_pcm_hw_params *hw_params,
708                                int link_id, int alh_stream_id)
709 {
710         struct sdw_intel_link_res *res = sdw->link_res;
711         struct sdw_intel_stream_params_data params_data;
712
713         params_data.stream = stream; /* direction */
714         params_data.dai = dai;
715         params_data.hw_params = hw_params;
716         params_data.link_id = link_id;
717         params_data.alh_stream_id = alh_stream_id;
718
719         if (res->ops && res->ops->params_stream && res->dev)
720                 return res->ops->params_stream(res->dev,
721                                                &params_data);
722         return -EIO;
723 }
724
725 static int intel_free_stream(struct sdw_intel *sdw,
726                              int stream,
727                              struct snd_soc_dai *dai,
728                              int link_id)
729 {
730         struct sdw_intel_link_res *res = sdw->link_res;
731         struct sdw_intel_stream_free_data free_data;
732
733         free_data.stream = stream; /* direction */
734         free_data.dai = dai;
735         free_data.link_id = link_id;
736
737         if (res->ops && res->ops->free_stream && res->dev)
738                 return res->ops->free_stream(res->dev,
739                                              &free_data);
740
741         return 0;
742 }
743
744 /*
745  * bank switch routines
746  */
747
748 static int intel_pre_bank_switch(struct sdw_bus *bus)
749 {
750         struct sdw_cdns *cdns = bus_to_cdns(bus);
751         struct sdw_intel *sdw = cdns_to_intel(cdns);
752
753         /* Write to register only for multi-link */
754         if (!bus->multi_link)
755                 return 0;
756
757         intel_shim_sync_arm(sdw);
758
759         return 0;
760 }
761
762 static int intel_post_bank_switch(struct sdw_bus *bus)
763 {
764         struct sdw_cdns *cdns = bus_to_cdns(bus);
765         struct sdw_intel *sdw = cdns_to_intel(cdns);
766         void __iomem *shim = sdw->link_res->shim;
767         int sync_reg, ret;
768
769         /* Write to register only for multi-link */
770         if (!bus->multi_link)
771                 return 0;
772
773         mutex_lock(sdw->link_res->shim_lock);
774
775         /* Read SYNC register */
776         sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
777
778         /*
779          * post_bank_switch() ops is called from the bus in loop for
780          * all the Masters in the steam with the expectation that
781          * we trigger the bankswitch for the only first Master in the list
782          * and do nothing for the other Masters
783          *
784          * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
785          */
786         if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) {
787                 ret = 0;
788                 goto unlock;
789         }
790
791         ret = intel_shim_sync_go_unlocked(sdw);
792 unlock:
793         mutex_unlock(sdw->link_res->shim_lock);
794
795         if (ret < 0)
796                 dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
797
798         return ret;
799 }
800
801 /*
802  * DAI routines
803  */
804
805 static int intel_startup(struct snd_pcm_substream *substream,
806                          struct snd_soc_dai *dai)
807 {
808         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
809         int ret;
810
811         ret = pm_runtime_resume_and_get(cdns->dev);
812         if (ret < 0 && ret != -EACCES) {
813                 dev_err_ratelimited(cdns->dev,
814                                     "pm_runtime_resume_and_get failed in %s, ret %d\n",
815                                     __func__, ret);
816                 return ret;
817         }
818         return 0;
819 }
820
821 static int intel_hw_params(struct snd_pcm_substream *substream,
822                            struct snd_pcm_hw_params *params,
823                            struct snd_soc_dai *dai)
824 {
825         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
826         struct sdw_intel *sdw = cdns_to_intel(cdns);
827         struct sdw_cdns_dma_data *dma;
828         struct sdw_cdns_pdi *pdi;
829         struct sdw_stream_config sconfig;
830         struct sdw_port_config *pconfig;
831         int ch, dir;
832         int ret;
833
834         dma = snd_soc_dai_get_dma_data(dai, substream);
835         if (!dma)
836                 return -EIO;
837
838         ch = params_channels(params);
839         if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
840                 dir = SDW_DATA_DIR_RX;
841         else
842                 dir = SDW_DATA_DIR_TX;
843
844         pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
845
846         if (!pdi) {
847                 ret = -EINVAL;
848                 goto error;
849         }
850
851         /* do run-time configurations for SHIM, ALH and PDI/PORT */
852         intel_pdi_shim_configure(sdw, pdi);
853         intel_pdi_alh_configure(sdw, pdi);
854         sdw_cdns_config_stream(cdns, ch, dir, pdi);
855
856         /* store pdi and hw_params, may be needed in prepare step */
857         dma->paused = false;
858         dma->suspended = false;
859         dma->pdi = pdi;
860         dma->hw_params = params;
861
862         /* Inform DSP about PDI stream number */
863         ret = intel_params_stream(sdw, substream->stream, dai, params,
864                                   sdw->instance,
865                                   pdi->intel_alh_id);
866         if (ret)
867                 goto error;
868
869         sconfig.direction = dir;
870         sconfig.ch_count = ch;
871         sconfig.frame_rate = params_rate(params);
872         sconfig.type = dma->stream_type;
873
874         sconfig.bps = snd_pcm_format_width(params_format(params));
875
876         /* Port configuration */
877         pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
878         if (!pconfig) {
879                 ret =  -ENOMEM;
880                 goto error;
881         }
882
883         pconfig->num = pdi->num;
884         pconfig->ch_mask = (1 << ch) - 1;
885
886         ret = sdw_stream_add_master(&cdns->bus, &sconfig,
887                                     pconfig, 1, dma->stream);
888         if (ret)
889                 dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
890
891         kfree(pconfig);
892 error:
893         return ret;
894 }
895
896 static int intel_prepare(struct snd_pcm_substream *substream,
897                          struct snd_soc_dai *dai)
898 {
899         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
900         struct sdw_intel *sdw = cdns_to_intel(cdns);
901         struct sdw_cdns_dma_data *dma;
902         int ch, dir;
903         int ret = 0;
904
905         dma = snd_soc_dai_get_dma_data(dai, substream);
906         if (!dma) {
907                 dev_err(dai->dev, "failed to get dma data in %s\n",
908                         __func__);
909                 return -EIO;
910         }
911
912         if (dma->suspended) {
913                 dma->suspended = false;
914
915                 /*
916                  * .prepare() is called after system resume, where we
917                  * need to reinitialize the SHIM/ALH/Cadence IP.
918                  * .prepare() is also called to deal with underflows,
919                  * but in those cases we cannot touch ALH/SHIM
920                  * registers
921                  */
922
923                 /* configure stream */
924                 ch = params_channels(dma->hw_params);
925                 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
926                         dir = SDW_DATA_DIR_RX;
927                 else
928                         dir = SDW_DATA_DIR_TX;
929
930                 intel_pdi_shim_configure(sdw, dma->pdi);
931                 intel_pdi_alh_configure(sdw, dma->pdi);
932                 sdw_cdns_config_stream(cdns, ch, dir, dma->pdi);
933
934                 /* Inform DSP about PDI stream number */
935                 ret = intel_params_stream(sdw, substream->stream, dai,
936                                           dma->hw_params,
937                                           sdw->instance,
938                                           dma->pdi->intel_alh_id);
939         }
940
941         return ret;
942 }
943
944 static int
945 intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
946 {
947         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
948         struct sdw_intel *sdw = cdns_to_intel(cdns);
949         struct sdw_cdns_dma_data *dma;
950         int ret;
951
952         dma = snd_soc_dai_get_dma_data(dai, substream);
953         if (!dma)
954                 return -EIO;
955
956         /*
957          * The sdw stream state will transition to RELEASED when stream->
958          * master_list is empty. So the stream state will transition to
959          * DEPREPARED for the first cpu-dai and to RELEASED for the last
960          * cpu-dai.
961          */
962         ret = sdw_stream_remove_master(&cdns->bus, dma->stream);
963         if (ret < 0) {
964                 dev_err(dai->dev, "remove master from stream %s failed: %d\n",
965                         dma->stream->name, ret);
966                 return ret;
967         }
968
969         ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
970         if (ret < 0) {
971                 dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
972                 return ret;
973         }
974
975         dma->hw_params = NULL;
976         dma->pdi = NULL;
977
978         return 0;
979 }
980
981 static void intel_shutdown(struct snd_pcm_substream *substream,
982                            struct snd_soc_dai *dai)
983 {
984         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
985
986         pm_runtime_mark_last_busy(cdns->dev);
987         pm_runtime_put_autosuspend(cdns->dev);
988 }
989
990 static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
991                                     void *stream, int direction)
992 {
993         return cdns_set_sdw_stream(dai, stream, direction);
994 }
995
996 static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
997                                   int direction)
998 {
999         struct sdw_cdns_dma_data *dma;
1000
1001         if (direction == SNDRV_PCM_STREAM_PLAYBACK)
1002                 dma = dai->playback_dma_data;
1003         else
1004                 dma = dai->capture_dma_data;
1005
1006         if (!dma)
1007                 return ERR_PTR(-EINVAL);
1008
1009         return dma->stream;
1010 }
1011
1012 static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
1013 {
1014         struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
1015         struct sdw_intel *sdw = cdns_to_intel(cdns);
1016         struct sdw_intel_link_res *res = sdw->link_res;
1017         struct sdw_cdns_dma_data *dma;
1018         int ret = 0;
1019
1020         /*
1021          * The .trigger callback is used to send required IPC to audio
1022          * firmware. The .free_stream callback will still be called
1023          * by intel_free_stream() in the TRIGGER_SUSPEND case.
1024          */
1025         if (res->ops && res->ops->trigger)
1026                 res->ops->trigger(dai, cmd, substream->stream);
1027
1028         dma = snd_soc_dai_get_dma_data(dai, substream);
1029         if (!dma) {
1030                 dev_err(dai->dev, "failed to get dma data in %s\n",
1031                         __func__);
1032                 return -EIO;
1033         }
1034
1035         switch (cmd) {
1036         case SNDRV_PCM_TRIGGER_SUSPEND:
1037
1038                 /*
1039                  * The .prepare callback is used to deal with xruns and resume operations.
1040                  * In the case of xruns, the DMAs and SHIM registers cannot be touched,
1041                  * but for resume operations the DMAs and SHIM registers need to be initialized.
1042                  * the .trigger callback is used to track the suspend case only.
1043                  */
1044
1045                 dma->suspended = true;
1046
1047                 ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
1048                 break;
1049
1050         case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
1051                 dma->paused = true;
1052                 break;
1053         case SNDRV_PCM_TRIGGER_STOP:
1054         case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
1055                 dma->paused = false;
1056                 break;
1057         default:
1058                 break;
1059         }
1060
1061         return ret;
1062 }
1063
1064 static int intel_component_probe(struct snd_soc_component *component)
1065 {
1066         int ret;
1067
1068         /*
1069          * make sure the device is pm_runtime_active before initiating
1070          * bus transactions during the card registration.
1071          * We use pm_runtime_resume() here, without taking a reference
1072          * and releasing it immediately.
1073          */
1074         ret = pm_runtime_resume(component->dev);
1075         if (ret < 0 && ret != -EACCES)
1076                 return ret;
1077
1078         return 0;
1079 }
1080
1081 static int intel_component_dais_suspend(struct snd_soc_component *component)
1082 {
1083         struct snd_soc_dai *dai;
1084
1085         /*
1086          * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
1087          * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
1088          * Since the component suspend is called last, we can trap this corner case
1089          * and force the DAIs to release their resources.
1090          */
1091         for_each_component_dais(component, dai) {
1092                 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
1093                 struct sdw_intel *sdw = cdns_to_intel(cdns);
1094                 struct sdw_cdns_dma_data *dma;
1095                 int stream;
1096                 int ret;
1097
1098                 dma = dai->playback_dma_data;
1099                 stream = SNDRV_PCM_STREAM_PLAYBACK;
1100                 if (!dma) {
1101                         dma = dai->capture_dma_data;
1102                         stream = SNDRV_PCM_STREAM_CAPTURE;
1103                 }
1104
1105                 if (!dma)
1106                         continue;
1107
1108                 if (dma->suspended)
1109                         continue;
1110
1111                 if (dma->paused) {
1112                         dma->suspended = true;
1113
1114                         ret = intel_free_stream(sdw, stream, dai, sdw->instance);
1115                         if (ret < 0)
1116                                 return ret;
1117                 }
1118         }
1119
1120         return 0;
1121 }
1122
1123 static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
1124         .startup = intel_startup,
1125         .hw_params = intel_hw_params,
1126         .prepare = intel_prepare,
1127         .hw_free = intel_hw_free,
1128         .trigger = intel_trigger,
1129         .shutdown = intel_shutdown,
1130         .set_stream = intel_pcm_set_sdw_stream,
1131         .get_stream = intel_get_sdw_stream,
1132 };
1133
1134 static const struct snd_soc_component_driver dai_component = {
1135         .name                   = "soundwire",
1136         .probe                  = intel_component_probe,
1137         .suspend                = intel_component_dais_suspend,
1138         .legacy_dai_naming      = 1,
1139 };
1140
1141 static int intel_create_dai(struct sdw_cdns *cdns,
1142                             struct snd_soc_dai_driver *dais,
1143                             enum intel_pdi_type type,
1144                             u32 num, u32 off, u32 max_ch)
1145 {
1146         int i;
1147
1148         if (num == 0)
1149                 return 0;
1150
1151          /* TODO: Read supported rates/formats from hardware */
1152         for (i = off; i < (off + num); i++) {
1153                 dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
1154                                               "SDW%d Pin%d",
1155                                               cdns->instance, i);
1156                 if (!dais[i].name)
1157                         return -ENOMEM;
1158
1159                 if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
1160                         dais[i].playback.channels_min = 1;
1161                         dais[i].playback.channels_max = max_ch;
1162                         dais[i].playback.rates = SNDRV_PCM_RATE_48000;
1163                         dais[i].playback.formats = SNDRV_PCM_FMTBIT_S16_LE;
1164                 }
1165
1166                 if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
1167                         dais[i].capture.channels_min = 1;
1168                         dais[i].capture.channels_max = max_ch;
1169                         dais[i].capture.rates = SNDRV_PCM_RATE_48000;
1170                         dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
1171                 }
1172
1173                 dais[i].ops = &intel_pcm_dai_ops;
1174         }
1175
1176         return 0;
1177 }
1178
1179 static int intel_register_dai(struct sdw_intel *sdw)
1180 {
1181         struct sdw_cdns_stream_config config;
1182         struct sdw_cdns *cdns = &sdw->cdns;
1183         struct sdw_cdns_streams *stream;
1184         struct snd_soc_dai_driver *dais;
1185         int num_dai, ret, off = 0;
1186
1187         /* Read the PDI config and initialize cadence PDI */
1188         intel_pdi_init(sdw, &config);
1189         ret = sdw_cdns_pdi_init(cdns, config);
1190         if (ret)
1191                 return ret;
1192
1193         intel_pdi_ch_update(sdw);
1194
1195         /* DAIs are created based on total number of PDIs supported */
1196         num_dai = cdns->pcm.num_pdi;
1197
1198         dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
1199         if (!dais)
1200                 return -ENOMEM;
1201
1202         /* Create PCM DAIs */
1203         stream = &cdns->pcm;
1204
1205         ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
1206                                off, stream->num_ch_in);
1207         if (ret)
1208                 return ret;
1209
1210         off += cdns->pcm.num_in;
1211         ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
1212                                off, stream->num_ch_out);
1213         if (ret)
1214                 return ret;
1215
1216         off += cdns->pcm.num_out;
1217         ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
1218                                off, stream->num_ch_bd);
1219         if (ret)
1220                 return ret;
1221
1222         return devm_snd_soc_register_component(cdns->dev, &dai_component,
1223                                                dais, num_dai);
1224 }
1225
1226 static int intel_start_bus(struct sdw_intel *sdw)
1227 {
1228         struct device *dev = sdw->cdns.dev;
1229         struct sdw_cdns *cdns = &sdw->cdns;
1230         struct sdw_bus *bus = &cdns->bus;
1231         int ret;
1232
1233         ret = sdw_cdns_enable_interrupt(cdns, true);
1234         if (ret < 0) {
1235                 dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
1236                 return ret;
1237         }
1238
1239         /*
1240          * follow recommended programming flows to avoid timeouts when
1241          * gsync is enabled
1242          */
1243         if (bus->multi_link)
1244                 intel_shim_sync_arm(sdw);
1245
1246         ret = sdw_cdns_init(cdns);
1247         if (ret < 0) {
1248                 dev_err(dev, "%s: unable to initialize Cadence IP: %d\n", __func__, ret);
1249                 goto err_interrupt;
1250         }
1251
1252         ret = sdw_cdns_exit_reset(cdns);
1253         if (ret < 0) {
1254                 dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
1255                 goto err_interrupt;
1256         }
1257
1258         if (bus->multi_link) {
1259                 ret = intel_shim_sync_go(sdw);
1260                 if (ret < 0) {
1261                         dev_err(dev, "%s: sync go failed: %d\n", __func__, ret);
1262                         goto err_interrupt;
1263                 }
1264         }
1265         sdw_cdns_check_self_clearing_bits(cdns, __func__,
1266                                           true, INTEL_MASTER_RESET_ITERATIONS);
1267
1268         return 0;
1269
1270 err_interrupt:
1271         sdw_cdns_enable_interrupt(cdns, false);
1272         return ret;
1273 }
1274
1275 static int intel_start_bus_after_reset(struct sdw_intel *sdw)
1276 {
1277         struct device *dev = sdw->cdns.dev;
1278         struct sdw_cdns *cdns = &sdw->cdns;
1279         struct sdw_bus *bus = &cdns->bus;
1280         bool clock_stop0;
1281         int status;
1282         int ret;
1283
1284         /*
1285          * An exception condition occurs for the CLK_STOP_BUS_RESET
1286          * case if one or more masters remain active. In this condition,
1287          * all the masters are powered on for they are in the same power
1288          * domain. Master can preserve its context for clock stop0, so
1289          * there is no need to clear slave status and reset bus.
1290          */
1291         clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1292
1293         if (!clock_stop0) {
1294
1295                 /*
1296                  * make sure all Slaves are tagged as UNATTACHED and
1297                  * provide reason for reinitialization
1298                  */
1299
1300                 status = SDW_UNATTACH_REQUEST_MASTER_RESET;
1301                 sdw_clear_slave_status(bus, status);
1302
1303                 ret = sdw_cdns_enable_interrupt(cdns, true);
1304                 if (ret < 0) {
1305                         dev_err(dev, "cannot enable interrupts during resume\n");
1306                         return ret;
1307                 }
1308
1309                 /*
1310                  * follow recommended programming flows to avoid
1311                  * timeouts when gsync is enabled
1312                  */
1313                 if (bus->multi_link)
1314                         intel_shim_sync_arm(sdw);
1315
1316                 /*
1317                  * Re-initialize the IP since it was powered-off
1318                  */
1319                 sdw_cdns_init(&sdw->cdns);
1320
1321         } else {
1322                 ret = sdw_cdns_enable_interrupt(cdns, true);
1323                 if (ret < 0) {
1324                         dev_err(dev, "cannot enable interrupts during resume\n");
1325                         return ret;
1326                 }
1327         }
1328
1329         ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
1330         if (ret < 0) {
1331                 dev_err(dev, "unable to restart clock during resume\n");
1332                 goto err_interrupt;
1333         }
1334
1335         if (!clock_stop0) {
1336                 ret = sdw_cdns_exit_reset(cdns);
1337                 if (ret < 0) {
1338                         dev_err(dev, "unable to exit bus reset sequence during resume\n");
1339                         goto err_interrupt;
1340                 }
1341
1342                 if (bus->multi_link) {
1343                         ret = intel_shim_sync_go(sdw);
1344                         if (ret < 0) {
1345                                 dev_err(sdw->cdns.dev, "sync go failed during resume\n");
1346                                 goto err_interrupt;
1347                         }
1348                 }
1349         }
1350         sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
1351
1352         return 0;
1353
1354 err_interrupt:
1355         sdw_cdns_enable_interrupt(cdns, false);
1356         return ret;
1357 }
1358
1359 static void intel_check_clock_stop(struct sdw_intel *sdw)
1360 {
1361         struct device *dev = sdw->cdns.dev;
1362         bool clock_stop0;
1363
1364         clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
1365         if (!clock_stop0)
1366                 dev_err(dev, "%s: invalid configuration, clock was not stopped\n", __func__);
1367 }
1368
1369 static int intel_start_bus_after_clock_stop(struct sdw_intel *sdw)
1370 {
1371         struct device *dev = sdw->cdns.dev;
1372         struct sdw_cdns *cdns = &sdw->cdns;
1373         int ret;
1374
1375         ret = sdw_cdns_enable_interrupt(cdns, true);
1376         if (ret < 0) {
1377                 dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
1378                 return ret;
1379         }
1380
1381         ret = sdw_cdns_clock_restart(cdns, false);
1382         if (ret < 0) {
1383                 dev_err(dev, "%s: unable to restart clock: %d\n", __func__, ret);
1384                 sdw_cdns_enable_interrupt(cdns, false);
1385                 return ret;
1386         }
1387
1388         sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks",
1389                                           true, INTEL_MASTER_RESET_ITERATIONS);
1390
1391         return 0;
1392 }
1393
1394 static int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop)
1395 {
1396         struct device *dev = sdw->cdns.dev;
1397         struct sdw_cdns *cdns = &sdw->cdns;
1398         bool wake_enable = false;
1399         int ret;
1400
1401         if (clock_stop) {
1402                 ret = sdw_cdns_clock_stop(cdns, true);
1403                 if (ret < 0)
1404                         dev_err(dev, "%s: cannot stop clock: %d\n", __func__, ret);
1405                 else
1406                         wake_enable = true;
1407         }
1408
1409         ret = sdw_cdns_enable_interrupt(cdns, false);
1410         if (ret < 0) {
1411                 dev_err(dev, "%s: cannot disable interrupts: %d\n", __func__, ret);
1412                 return ret;
1413         }
1414
1415         ret = intel_link_power_down(sdw);
1416         if (ret) {
1417                 dev_err(dev, "%s: Link power down failed: %d\n", __func__, ret);
1418                 return ret;
1419         }
1420
1421         intel_shim_wake(sdw, wake_enable);
1422
1423         return 0;
1424 }
1425
1426 static int sdw_master_read_intel_prop(struct sdw_bus *bus)
1427 {
1428         struct sdw_master_prop *prop = &bus->prop;
1429         struct fwnode_handle *link;
1430         char name[32];
1431         u32 quirk_mask;
1432
1433         /* Find master handle */
1434         snprintf(name, sizeof(name),
1435                  "mipi-sdw-link-%d-subproperties", bus->link_id);
1436
1437         link = device_get_named_child_node(bus->dev, name);
1438         if (!link) {
1439                 dev_err(bus->dev, "Master node %s not found\n", name);
1440                 return -EIO;
1441         }
1442
1443         fwnode_property_read_u32(link,
1444                                  "intel-sdw-ip-clock",
1445                                  &prop->mclk_freq);
1446
1447         /* the values reported by BIOS are the 2x clock, not the bus clock */
1448         prop->mclk_freq /= 2;
1449
1450         fwnode_property_read_u32(link,
1451                                  "intel-quirk-mask",
1452                                  &quirk_mask);
1453
1454         if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
1455                 prop->hw_disabled = true;
1456
1457         prop->quirks = SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH |
1458                 SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY;
1459
1460         return 0;
1461 }
1462
1463 static int intel_prop_read(struct sdw_bus *bus)
1464 {
1465         /* Initialize with default handler to read all DisCo properties */
1466         sdw_master_read_prop(bus);
1467
1468         /* read Intel-specific properties */
1469         sdw_master_read_intel_prop(bus);
1470
1471         return 0;
1472 }
1473
1474 static struct sdw_master_ops sdw_intel_ops = {
1475         .read_prop = intel_prop_read,
1476         .override_adr = sdw_dmi_override_adr,
1477         .xfer_msg = cdns_xfer_msg,
1478         .xfer_msg_defer = cdns_xfer_msg_defer,
1479         .reset_page_addr = cdns_reset_page_addr,
1480         .set_bus_conf = cdns_bus_conf,
1481         .pre_bank_switch = intel_pre_bank_switch,
1482         .post_bank_switch = intel_post_bank_switch,
1483         .read_ping_status = cdns_read_ping_status,
1484 };
1485
1486 /*
1487  * probe and init (aux_dev_id argument is required by function prototype but not used)
1488  */
1489 static int intel_link_probe(struct auxiliary_device *auxdev,
1490                             const struct auxiliary_device_id *aux_dev_id)
1491
1492 {
1493         struct device *dev = &auxdev->dev;
1494         struct sdw_intel_link_dev *ldev = auxiliary_dev_to_sdw_intel_link_dev(auxdev);
1495         struct sdw_intel *sdw;
1496         struct sdw_cdns *cdns;
1497         struct sdw_bus *bus;
1498         int ret;
1499
1500         sdw = devm_kzalloc(dev, sizeof(*sdw), GFP_KERNEL);
1501         if (!sdw)
1502                 return -ENOMEM;
1503
1504         cdns = &sdw->cdns;
1505         bus = &cdns->bus;
1506
1507         sdw->instance = auxdev->id;
1508         sdw->link_res = &ldev->link_res;
1509         cdns->dev = dev;
1510         cdns->registers = sdw->link_res->registers;
1511         cdns->instance = sdw->instance;
1512         cdns->msg_count = 0;
1513
1514         bus->link_id = auxdev->id;
1515         bus->dev_num_ida_min = INTEL_DEV_NUM_IDA_MIN;
1516         bus->clk_stop_timeout = 1;
1517
1518         sdw_cdns_probe(cdns);
1519
1520         /* Set ops */
1521         bus->ops = &sdw_intel_ops;
1522
1523         /* set driver data, accessed by snd_soc_dai_get_drvdata() */
1524         auxiliary_set_drvdata(auxdev, cdns);
1525
1526         /* use generic bandwidth allocation algorithm */
1527         sdw->cdns.bus.compute_params = sdw_compute_params;
1528
1529         /* avoid resuming from pm_runtime suspend if it's not required */
1530         dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
1531
1532         ret = sdw_bus_master_add(bus, dev, dev->fwnode);
1533         if (ret) {
1534                 dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
1535                 return ret;
1536         }
1537
1538         if (bus->prop.hw_disabled)
1539                 dev_info(dev,
1540                          "SoundWire master %d is disabled, will be ignored\n",
1541                          bus->link_id);
1542         /*
1543          * Ignore BIOS err_threshold, it's a really bad idea when dealing
1544          * with multiple hardware synchronized links
1545          */
1546         bus->prop.err_threshold = 0;
1547
1548         return 0;
1549 }
1550
1551 int intel_link_startup(struct auxiliary_device *auxdev)
1552 {
1553         struct device *dev = &auxdev->dev;
1554         struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev);
1555         struct sdw_intel *sdw = cdns_to_intel(cdns);
1556         struct sdw_bus *bus = &cdns->bus;
1557         int link_flags;
1558         bool multi_link;
1559         u32 clock_stop_quirks;
1560         int ret;
1561
1562         if (bus->prop.hw_disabled) {
1563                 dev_info(dev,
1564                          "SoundWire master %d is disabled, ignoring\n",
1565                          sdw->instance);
1566                 return 0;
1567         }
1568
1569         link_flags = md_flags >> (bus->link_id * 8);
1570         multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
1571         if (!multi_link) {
1572                 dev_dbg(dev, "Multi-link is disabled\n");
1573         } else {
1574                 /*
1575                  * hardware-based synchronization is required regardless
1576                  * of the number of segments used by a stream: SSP-based
1577                  * synchronization is gated by gsync when the multi-master
1578                  * mode is set.
1579                  */
1580                 bus->hw_sync_min_links = 1;
1581         }
1582         bus->multi_link = multi_link;
1583
1584         /* Initialize shim, controller */
1585         ret = intel_link_power_up(sdw);
1586         if (ret)
1587                 goto err_init;
1588
1589         /* Register DAIs */
1590         ret = intel_register_dai(sdw);
1591         if (ret) {
1592                 dev_err(dev, "DAI registration failed: %d\n", ret);
1593                 goto err_power_up;
1594         }
1595
1596         intel_debugfs_init(sdw);
1597
1598         /* start bus */
1599         ret = intel_start_bus(sdw);
1600         if (ret) {
1601                 dev_err(dev, "bus start failed: %d\n", ret);
1602                 goto err_power_up;
1603         }
1604
1605         /* Enable runtime PM */
1606         if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) {
1607                 pm_runtime_set_autosuspend_delay(dev,
1608                                                  INTEL_MASTER_SUSPEND_DELAY_MS);
1609                 pm_runtime_use_autosuspend(dev);
1610                 pm_runtime_mark_last_busy(dev);
1611
1612                 pm_runtime_set_active(dev);
1613                 pm_runtime_enable(dev);
1614         }
1615
1616         clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1617         if (clock_stop_quirks & SDW_INTEL_CLK_STOP_NOT_ALLOWED) {
1618                 /*
1619                  * To keep the clock running we need to prevent
1620                  * pm_runtime suspend from happening by increasing the
1621                  * reference count.
1622                  * This quirk is specified by the parent PCI device in
1623                  * case of specific latency requirements. It will have
1624                  * no effect if pm_runtime is disabled by the user via
1625                  * a module parameter for testing purposes.
1626                  */
1627                 pm_runtime_get_noresume(dev);
1628         }
1629
1630         /*
1631          * The runtime PM status of Slave devices is "Unsupported"
1632          * until they report as ATTACHED. If they don't, e.g. because
1633          * there are no Slave devices populated or if the power-on is
1634          * delayed or dependent on a power switch, the Master will
1635          * remain active and prevent its parent from suspending.
1636          *
1637          * Conditionally force the pm_runtime core to re-evaluate the
1638          * Master status in the absence of any Slave activity. A quirk
1639          * is provided to e.g. deal with Slaves that may be powered on
1640          * with a delay. A more complete solution would require the
1641          * definition of Master properties.
1642          */
1643         if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
1644                 pm_runtime_idle(dev);
1645
1646         sdw->startup_done = true;
1647         return 0;
1648
1649 err_power_up:
1650         intel_link_power_down(sdw);
1651 err_init:
1652         return ret;
1653 }
1654
1655 static void intel_link_remove(struct auxiliary_device *auxdev)
1656 {
1657         struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev);
1658         struct sdw_intel *sdw = cdns_to_intel(cdns);
1659         struct sdw_bus *bus = &cdns->bus;
1660
1661         /*
1662          * Since pm_runtime is already disabled, we don't decrease
1663          * the refcount when the clock_stop_quirk is
1664          * SDW_INTEL_CLK_STOP_NOT_ALLOWED
1665          */
1666         if (!bus->prop.hw_disabled) {
1667                 intel_debugfs_exit(sdw);
1668                 sdw_cdns_enable_interrupt(cdns, false);
1669         }
1670         sdw_bus_master_delete(bus);
1671 }
1672
1673 int intel_link_process_wakeen_event(struct auxiliary_device *auxdev)
1674 {
1675         struct device *dev = &auxdev->dev;
1676         struct sdw_intel *sdw;
1677         struct sdw_bus *bus;
1678
1679         sdw = auxiliary_get_drvdata(auxdev);
1680         bus = &sdw->cdns.bus;
1681
1682         if (bus->prop.hw_disabled || !sdw->startup_done) {
1683                 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1684                         bus->link_id);
1685                 return 0;
1686         }
1687
1688         if (!intel_shim_check_wake(sdw))
1689                 return 0;
1690
1691         /* disable WAKEEN interrupt ASAP to prevent interrupt flood */
1692         intel_shim_wake(sdw, false);
1693
1694         /*
1695          * resume the Master, which will generate a bus reset and result in
1696          * Slaves re-attaching and be re-enumerated. The SoundWire physical
1697          * device which generated the wake will trigger an interrupt, which
1698          * will in turn cause the corresponding Linux Slave device to be
1699          * resumed and the Slave codec driver to check the status.
1700          */
1701         pm_request_resume(dev);
1702
1703         return 0;
1704 }
1705
1706 /*
1707  * PM calls
1708  */
1709
1710 static int intel_resume_child_device(struct device *dev, void *data)
1711 {
1712         int ret;
1713         struct sdw_slave *slave = dev_to_sdw_dev(dev);
1714
1715         if (!slave->probed) {
1716                 dev_dbg(dev, "skipping device, no probed driver\n");
1717                 return 0;
1718         }
1719         if (!slave->dev_num_sticky) {
1720                 dev_dbg(dev, "skipping device, never detected on bus\n");
1721                 return 0;
1722         }
1723
1724         ret = pm_request_resume(dev);
1725         if (ret < 0)
1726                 dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret);
1727
1728         return ret;
1729 }
1730
1731 static int __maybe_unused intel_pm_prepare(struct device *dev)
1732 {
1733         struct sdw_cdns *cdns = dev_get_drvdata(dev);
1734         struct sdw_intel *sdw = cdns_to_intel(cdns);
1735         struct sdw_bus *bus = &cdns->bus;
1736         u32 clock_stop_quirks;
1737         int ret;
1738
1739         if (bus->prop.hw_disabled || !sdw->startup_done) {
1740                 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1741                         bus->link_id);
1742                 return 0;
1743         }
1744
1745         clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1746
1747         if (pm_runtime_suspended(dev) &&
1748             pm_runtime_suspended(dev->parent) &&
1749             ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) ||
1750              !clock_stop_quirks)) {
1751                 /*
1752                  * if we've enabled clock stop, and the parent is suspended, the SHIM registers
1753                  * are not accessible and the shim wake cannot be disabled.
1754                  * The only solution is to resume the entire bus to full power
1755                  */
1756
1757                 /*
1758                  * If any operation in this block fails, we keep going since we don't want
1759                  * to prevent system suspend from happening and errors should be recoverable
1760                  * on resume.
1761                  */
1762
1763                 /*
1764                  * first resume the device for this link. This will also by construction
1765                  * resume the PCI parent device.
1766                  */
1767                 ret = pm_request_resume(dev);
1768                 if (ret < 0) {
1769                         dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret);
1770                         return 0;
1771                 }
1772
1773                 /*
1774                  * Continue resuming the entire bus (parent + child devices) to exit
1775                  * the clock stop mode. If there are no devices connected on this link
1776                  * this is a no-op.
1777                  * The resume to full power could have been implemented with a .prepare
1778                  * step in SoundWire codec drivers. This would however require a lot
1779                  * of code to handle an Intel-specific corner case. It is simpler in
1780                  * practice to add a loop at the link level.
1781                  */
1782                 ret = device_for_each_child(bus->dev, NULL, intel_resume_child_device);
1783
1784                 if (ret < 0)
1785                         dev_err(dev, "%s: intel_resume_child_device failed: %d\n", __func__, ret);
1786         }
1787
1788         return 0;
1789 }
1790
1791 static int __maybe_unused intel_suspend(struct device *dev)
1792 {
1793         struct sdw_cdns *cdns = dev_get_drvdata(dev);
1794         struct sdw_intel *sdw = cdns_to_intel(cdns);
1795         struct sdw_bus *bus = &cdns->bus;
1796         u32 clock_stop_quirks;
1797         int ret;
1798
1799         if (bus->prop.hw_disabled || !sdw->startup_done) {
1800                 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1801                         bus->link_id);
1802                 return 0;
1803         }
1804
1805         if (pm_runtime_suspended(dev)) {
1806                 dev_dbg(dev, "pm_runtime status: suspended\n");
1807
1808                 clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1809
1810                 if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) ||
1811                     !clock_stop_quirks) {
1812
1813                         if (pm_runtime_suspended(dev->parent)) {
1814                                 /*
1815                                  * paranoia check: this should not happen with the .prepare
1816                                  * resume to full power
1817                                  */
1818                                 dev_err(dev, "%s: invalid config: parent is suspended\n", __func__);
1819                         } else {
1820                                 intel_shim_wake(sdw, false);
1821                         }
1822                 }
1823
1824                 return 0;
1825         }
1826
1827         ret = intel_stop_bus(sdw, false);
1828         if (ret < 0) {
1829                 dev_err(dev, "%s: cannot stop bus: %d\n", __func__, ret);
1830                 return ret;
1831         }
1832
1833         return 0;
1834 }
1835
1836 static int __maybe_unused intel_suspend_runtime(struct device *dev)
1837 {
1838         struct sdw_cdns *cdns = dev_get_drvdata(dev);
1839         struct sdw_intel *sdw = cdns_to_intel(cdns);
1840         struct sdw_bus *bus = &cdns->bus;
1841         u32 clock_stop_quirks;
1842         int ret;
1843
1844         if (bus->prop.hw_disabled || !sdw->startup_done) {
1845                 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1846                         bus->link_id);
1847                 return 0;
1848         }
1849
1850         clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1851
1852         if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
1853                 ret = intel_stop_bus(sdw, false);
1854                 if (ret < 0) {
1855                         dev_err(dev, "%s: cannot stop bus during teardown: %d\n",
1856                                 __func__, ret);
1857                         return ret;
1858                 }
1859         } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET || !clock_stop_quirks) {
1860                 ret = intel_stop_bus(sdw, true);
1861                 if (ret < 0) {
1862                         dev_err(dev, "%s: cannot stop bus during clock_stop: %d\n",
1863                                 __func__, ret);
1864                         return ret;
1865                 }
1866         } else {
1867                 dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
1868                         __func__, clock_stop_quirks);
1869                 ret = -EINVAL;
1870         }
1871
1872         return ret;
1873 }
1874
1875 static int __maybe_unused intel_resume(struct device *dev)
1876 {
1877         struct sdw_cdns *cdns = dev_get_drvdata(dev);
1878         struct sdw_intel *sdw = cdns_to_intel(cdns);
1879         struct sdw_bus *bus = &cdns->bus;
1880         int link_flags;
1881         int ret;
1882
1883         if (bus->prop.hw_disabled || !sdw->startup_done) {
1884                 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1885                         bus->link_id);
1886                 return 0;
1887         }
1888
1889         link_flags = md_flags >> (bus->link_id * 8);
1890
1891         if (pm_runtime_suspended(dev)) {
1892                 dev_dbg(dev, "pm_runtime status was suspended, forcing active\n");
1893
1894                 /* follow required sequence from runtime_pm.rst */
1895                 pm_runtime_disable(dev);
1896                 pm_runtime_set_active(dev);
1897                 pm_runtime_mark_last_busy(dev);
1898                 pm_runtime_enable(dev);
1899
1900                 link_flags = md_flags >> (bus->link_id * 8);
1901
1902                 if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
1903                         pm_runtime_idle(dev);
1904         }
1905
1906         ret = intel_link_power_up(sdw);
1907         if (ret) {
1908                 dev_err(dev, "%s failed: %d\n", __func__, ret);
1909                 return ret;
1910         }
1911
1912         /*
1913          * make sure all Slaves are tagged as UNATTACHED and provide
1914          * reason for reinitialization
1915          */
1916         sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1917
1918         ret = intel_start_bus(sdw);
1919         if (ret < 0) {
1920                 dev_err(dev, "cannot start bus during resume\n");
1921                 intel_link_power_down(sdw);
1922                 return ret;
1923         }
1924
1925         /*
1926          * after system resume, the pm_runtime suspend() may kick in
1927          * during the enumeration, before any children device force the
1928          * master device to remain active.  Using pm_runtime_get()
1929          * routines is not really possible, since it'd prevent the
1930          * master from suspending.
1931          * A reasonable compromise is to update the pm_runtime
1932          * counters and delay the pm_runtime suspend by several
1933          * seconds, by when all enumeration should be complete.
1934          */
1935         pm_runtime_mark_last_busy(dev);
1936
1937         return 0;
1938 }
1939
1940 static int __maybe_unused intel_resume_runtime(struct device *dev)
1941 {
1942         struct sdw_cdns *cdns = dev_get_drvdata(dev);
1943         struct sdw_intel *sdw = cdns_to_intel(cdns);
1944         struct sdw_bus *bus = &cdns->bus;
1945         u32 clock_stop_quirks;
1946         int ret;
1947
1948         if (bus->prop.hw_disabled || !sdw->startup_done) {
1949                 dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
1950                         bus->link_id);
1951                 return 0;
1952         }
1953
1954         /* unconditionally disable WAKEEN interrupt */
1955         intel_shim_wake(sdw, false);
1956
1957         clock_stop_quirks = sdw->link_res->clock_stop_quirks;
1958
1959         if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
1960                 ret = intel_link_power_up(sdw);
1961                 if (ret) {
1962                         dev_err(dev, "%s: power_up failed after teardown: %d\n", __func__, ret);
1963                         return ret;
1964                 }
1965
1966                 /*
1967                  * make sure all Slaves are tagged as UNATTACHED and provide
1968                  * reason for reinitialization
1969                  */
1970                 sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
1971
1972                 ret = intel_start_bus(sdw);
1973                 if (ret < 0) {
1974                         dev_err(dev, "%s: cannot start bus after teardown: %d\n", __func__, ret);
1975                         intel_link_power_down(sdw);
1976                         return ret;
1977                 }
1978
1979
1980         } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) {
1981                 ret = intel_link_power_up(sdw);
1982                 if (ret) {
1983                         dev_err(dev, "%s: power_up failed after bus reset: %d\n", __func__, ret);
1984                         return ret;
1985                 }
1986
1987                 ret = intel_start_bus_after_reset(sdw);
1988                 if (ret < 0) {
1989                         dev_err(dev, "%s: cannot start bus after reset: %d\n", __func__, ret);
1990                         intel_link_power_down(sdw);
1991                         return ret;
1992                 }
1993         } else if (!clock_stop_quirks) {
1994
1995                 intel_check_clock_stop(sdw);
1996
1997                 ret = intel_link_power_up(sdw);
1998                 if (ret) {
1999                         dev_err(dev, "%s: power_up failed: %d\n", __func__, ret);
2000                         return ret;
2001                 }
2002
2003                 ret = intel_start_bus_after_clock_stop(sdw);
2004                 if (ret < 0) {
2005                         dev_err(dev, "%s: cannot start bus after clock stop: %d\n", __func__, ret);
2006                         intel_link_power_down(sdw);
2007                         return ret;
2008                 }
2009         } else {
2010                 dev_err(dev, "%s: clock_stop_quirks %x unsupported\n",
2011                         __func__, clock_stop_quirks);
2012                 ret = -EINVAL;
2013         }
2014
2015         return ret;
2016 }
2017
2018 static const struct dev_pm_ops intel_pm = {
2019         .prepare = intel_pm_prepare,
2020         SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
2021         SET_RUNTIME_PM_OPS(intel_suspend_runtime, intel_resume_runtime, NULL)
2022 };
2023
2024 static const struct auxiliary_device_id intel_link_id_table[] = {
2025         { .name = "soundwire_intel.link" },
2026         {},
2027 };
2028 MODULE_DEVICE_TABLE(auxiliary, intel_link_id_table);
2029
2030 static struct auxiliary_driver sdw_intel_drv = {
2031         .probe = intel_link_probe,
2032         .remove = intel_link_remove,
2033         .driver = {
2034                 /* auxiliary_driver_register() sets .name to be the modname */
2035                 .pm = &intel_pm,
2036         },
2037         .id_table = intel_link_id_table
2038 };
2039 module_auxiliary_driver(sdw_intel_drv);
2040
2041 MODULE_LICENSE("Dual BSD/GPL");
2042 MODULE_DESCRIPTION("Intel Soundwire Link Driver");