1 // SPDX-License-Identifier: GPL-2.0-only
3 * skl-topology.c - Implements Platform component ALSA controls/widget
6 * Copyright (C) 2014-2015 Intel Corp
7 * Author: Jeeja KP <jeeja.kp@intel.com>
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/firmware.h>
14 #include <linux/uuid.h>
15 #include <sound/intel-nhlt.h>
16 #include <sound/soc.h>
17 #include <sound/soc-acpi.h>
18 #include <sound/soc-topology.h>
19 #include <uapi/sound/snd_sst_tokens.h>
20 #include <uapi/sound/skl-tplg-interface.h>
21 #include "skl-sst-dsp.h"
22 #include "skl-sst-ipc.h"
23 #include "skl-topology.h"
25 #include "../common/sst-dsp.h"
26 #include "../common/sst-dsp-priv.h"
28 #define SKL_CH_FIXUP_MASK (1 << 0)
29 #define SKL_RATE_FIXUP_MASK (1 << 1)
30 #define SKL_FMT_FIXUP_MASK (1 << 2)
31 #define SKL_IN_DIR_BIT_MASK BIT(0)
32 #define SKL_PIN_COUNT_MASK GENMASK(7, 4)
34 static const int mic_mono_list[] = {
37 static const int mic_stereo_list[][SKL_CH_STEREO] = {
38 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3},
40 static const int mic_trio_list[][SKL_CH_TRIO] = {
41 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3},
43 static const int mic_quatro_list[][SKL_CH_QUATRO] = {
47 #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \
48 ((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq))
50 void skl_tplg_d0i3_get(struct skl_dev *skl, enum d0i3_capability caps)
52 struct skl_d0i3_data *d0i3 = &skl->d0i3;
59 case SKL_D0I3_STREAMING:
63 case SKL_D0I3_NON_STREAMING:
64 d0i3->non_streaming++;
69 void skl_tplg_d0i3_put(struct skl_dev *skl, enum d0i3_capability caps)
71 struct skl_d0i3_data *d0i3 = &skl->d0i3;
78 case SKL_D0I3_STREAMING:
82 case SKL_D0I3_NON_STREAMING:
83 d0i3->non_streaming--;
89 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
90 * ignore. This helpers checks if the SKL driver handles this widget type
92 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w,
95 if (w->dapm->dev != dev)
99 case snd_soc_dapm_dai_link:
100 case snd_soc_dapm_dai_in:
101 case snd_soc_dapm_aif_in:
102 case snd_soc_dapm_aif_out:
103 case snd_soc_dapm_dai_out:
104 case snd_soc_dapm_switch:
105 case snd_soc_dapm_output:
106 case snd_soc_dapm_mux:
114 static void skl_dump_mconfig(struct skl_dev *skl, struct skl_module_cfg *mcfg)
116 struct skl_module_iface *iface = &mcfg->module->formats[mcfg->fmt_idx];
118 dev_dbg(skl->dev, "Dumping config\n");
119 dev_dbg(skl->dev, "Input Format:\n");
120 dev_dbg(skl->dev, "channels = %d\n", iface->inputs[0].fmt.channels);
121 dev_dbg(skl->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq);
122 dev_dbg(skl->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg);
123 dev_dbg(skl->dev, "valid bit depth = %d\n",
124 iface->inputs[0].fmt.valid_bit_depth);
125 dev_dbg(skl->dev, "Output Format:\n");
126 dev_dbg(skl->dev, "channels = %d\n", iface->outputs[0].fmt.channels);
127 dev_dbg(skl->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq);
128 dev_dbg(skl->dev, "valid bit depth = %d\n",
129 iface->outputs[0].fmt.valid_bit_depth);
130 dev_dbg(skl->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg);
133 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
135 int slot_map = 0xFFFFFFFF;
139 for (i = 0; i < chs; i++) {
141 * For 2 channels with starting slot as 0, slot map will
142 * look like 0xFFFFFF10.
144 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
147 fmt->ch_map = slot_map;
150 static void skl_tplg_update_params(struct skl_module_fmt *fmt,
151 struct skl_pipe_params *params, int fixup)
153 if (fixup & SKL_RATE_FIXUP_MASK)
154 fmt->s_freq = params->s_freq;
155 if (fixup & SKL_CH_FIXUP_MASK) {
156 fmt->channels = params->ch;
157 skl_tplg_update_chmap(fmt, fmt->channels);
159 if (fixup & SKL_FMT_FIXUP_MASK) {
160 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
163 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
164 * container so update bit depth accordingly
166 switch (fmt->valid_bit_depth) {
167 case SKL_DEPTH_16BIT:
168 fmt->bit_depth = fmt->valid_bit_depth;
172 fmt->bit_depth = SKL_DEPTH_32BIT;
180 * A pipeline may have modules which impact the pcm parameters, like SRC,
181 * channel converter, format converter.
182 * We need to calculate the output params by applying the 'fixup'
183 * Topology will tell driver which type of fixup is to be applied by
184 * supplying the fixup mask, so based on that we calculate the output
186 * Now In FE the pcm hw_params is source/target format. Same is applicable
187 * for BE with its hw_params invoked.
188 * here based on FE, BE pipeline and direction we calculate the input and
189 * outfix and then apply that for a module
191 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
192 struct skl_pipe_params *params, bool is_fe)
194 int in_fixup, out_fixup;
195 struct skl_module_fmt *in_fmt, *out_fmt;
197 /* Fixups will be applied to pin 0 only */
198 in_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].inputs[0].fmt;
199 out_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].outputs[0].fmt;
201 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
203 in_fixup = m_cfg->params_fixup;
204 out_fixup = (~m_cfg->converter) &
207 out_fixup = m_cfg->params_fixup;
208 in_fixup = (~m_cfg->converter) &
213 out_fixup = m_cfg->params_fixup;
214 in_fixup = (~m_cfg->converter) &
217 in_fixup = m_cfg->params_fixup;
218 out_fixup = (~m_cfg->converter) &
223 skl_tplg_update_params(in_fmt, params, in_fixup);
224 skl_tplg_update_params(out_fmt, params, out_fixup);
228 * A module needs input and output buffers, which are dependent upon pcm
229 * params, so once we have calculate params, we need buffer calculation as
232 static void skl_tplg_update_buffer_size(struct skl_dev *skl,
233 struct skl_module_cfg *mcfg)
236 struct skl_module_fmt *in_fmt, *out_fmt;
237 struct skl_module_res *res;
239 /* Since fixups is applied to pin 0 only, ibs, obs needs
240 * change for pin 0 only
242 res = &mcfg->module->resources[mcfg->res_idx];
243 in_fmt = &mcfg->module->formats[mcfg->fmt_idx].inputs[0].fmt;
244 out_fmt = &mcfg->module->formats[mcfg->fmt_idx].outputs[0].fmt;
246 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
249 res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) *
250 in_fmt->channels * (in_fmt->bit_depth >> 3) *
253 res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) *
254 out_fmt->channels * (out_fmt->bit_depth >> 3) *
258 static u8 skl_tplg_be_dev_type(int dev_type)
264 ret = NHLT_DEVICE_BT;
267 case SKL_DEVICE_DMIC:
268 ret = NHLT_DEVICE_DMIC;
272 ret = NHLT_DEVICE_I2S;
276 ret = NHLT_DEVICE_INVALID;
283 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
286 struct skl_module_cfg *m_cfg = w->priv;
288 u32 ch, s_freq, s_fmt, s_cont;
289 struct nhlt_specific_cfg *cfg;
290 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type);
291 int fmt_idx = m_cfg->fmt_idx;
292 struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx];
294 /* check if we already have blob */
295 if (m_cfg->formats_config[SKL_PARAM_INIT].caps_size > 0)
298 dev_dbg(skl->dev, "Applying default cfg blob\n");
299 switch (m_cfg->dev_type) {
300 case SKL_DEVICE_DMIC:
301 link_type = NHLT_LINK_DMIC;
302 dir = SNDRV_PCM_STREAM_CAPTURE;
303 s_freq = m_iface->inputs[0].fmt.s_freq;
304 s_fmt = m_iface->inputs[0].fmt.valid_bit_depth;
305 s_cont = m_iface->inputs[0].fmt.bit_depth;
306 ch = m_iface->inputs[0].fmt.channels;
310 link_type = NHLT_LINK_SSP;
311 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
312 dir = SNDRV_PCM_STREAM_PLAYBACK;
313 s_freq = m_iface->outputs[0].fmt.s_freq;
314 s_fmt = m_iface->outputs[0].fmt.valid_bit_depth;
315 s_cont = m_iface->outputs[0].fmt.bit_depth;
316 ch = m_iface->outputs[0].fmt.channels;
318 dir = SNDRV_PCM_STREAM_CAPTURE;
319 s_freq = m_iface->inputs[0].fmt.s_freq;
320 s_fmt = m_iface->inputs[0].fmt.valid_bit_depth;
321 s_cont = m_iface->inputs[0].fmt.bit_depth;
322 ch = m_iface->inputs[0].fmt.channels;
330 /* update the blob based on virtual bus_id and default params */
331 cfg = intel_nhlt_get_endpoint_blob(skl->dev, skl->nhlt, m_cfg->vbus_id,
332 link_type, s_fmt, s_cont, ch,
333 s_freq, dir, dev_type);
335 m_cfg->formats_config[SKL_PARAM_INIT].caps_size = cfg->size;
336 m_cfg->formats_config[SKL_PARAM_INIT].caps = (u32 *)&cfg->caps;
338 dev_err(skl->dev, "Blob NULL for id %x type %d dirn %d\n",
339 m_cfg->vbus_id, link_type, dir);
340 dev_err(skl->dev, "PCM: ch %d, freq %d, fmt %d/%d\n",
341 ch, s_freq, s_fmt, s_cont);
348 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
351 struct skl_module_cfg *m_cfg = w->priv;
352 struct skl_pipe_params *params = m_cfg->pipe->p_params;
353 int p_conn_type = m_cfg->pipe->conn_type;
356 if (!m_cfg->params_fixup)
359 dev_dbg(skl->dev, "Mconfig for widget=%s BEFORE updation\n",
362 skl_dump_mconfig(skl, m_cfg);
364 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE)
369 skl_tplg_update_params_fixup(m_cfg, params, is_fe);
370 skl_tplg_update_buffer_size(skl, m_cfg);
372 dev_dbg(skl->dev, "Mconfig for widget=%s AFTER updation\n",
375 skl_dump_mconfig(skl, m_cfg);
379 * some modules can have multiple params set from user control and
380 * need to be set after module is initialized. If set_param flag is
381 * set module params will be done after module is initialised.
383 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
387 struct skl_module_cfg *mconfig = w->priv;
388 const struct snd_kcontrol_new *k;
389 struct soc_bytes_ext *sb;
390 struct skl_algo_data *bc;
391 struct skl_specific_cfg *sp_cfg;
393 if (mconfig->formats_config[SKL_PARAM_SET].caps_size > 0 &&
394 mconfig->formats_config[SKL_PARAM_SET].set_params == SKL_PARAM_SET) {
395 sp_cfg = &mconfig->formats_config[SKL_PARAM_SET];
396 ret = skl_set_module_params(skl, sp_cfg->caps,
398 sp_cfg->param_id, mconfig);
403 for (i = 0; i < w->num_kcontrols; i++) {
404 k = &w->kcontrol_news[i];
405 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
406 sb = (void *) k->private_value;
407 bc = (struct skl_algo_data *)sb->dobj.private;
409 if (bc->set_params == SKL_PARAM_SET) {
410 ret = skl_set_module_params(skl,
411 (u32 *)bc->params, bc->size,
412 bc->param_id, mconfig);
423 * some module param can set from user control and this is required as
424 * when module is initailzed. if module param is required in init it is
425 * identifed by set_param flag. if set_param flag is not set, then this
426 * parameter needs to set as part of module init.
428 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
430 const struct snd_kcontrol_new *k;
431 struct soc_bytes_ext *sb;
432 struct skl_algo_data *bc;
433 struct skl_module_cfg *mconfig = w->priv;
436 for (i = 0; i < w->num_kcontrols; i++) {
437 k = &w->kcontrol_news[i];
438 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
439 sb = (struct soc_bytes_ext *)k->private_value;
440 bc = (struct skl_algo_data *)sb->dobj.private;
442 if (bc->set_params != SKL_PARAM_INIT)
445 mconfig->formats_config[SKL_PARAM_INIT].caps =
447 mconfig->formats_config[SKL_PARAM_INIT].caps_size =
457 static int skl_tplg_module_prepare(struct skl_dev *skl, struct skl_pipe *pipe,
458 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg)
460 switch (mcfg->dev_type) {
461 case SKL_DEVICE_HDAHOST:
462 return skl_pcm_host_dma_prepare(skl->dev, pipe->p_params);
464 case SKL_DEVICE_HDALINK:
465 return skl_pcm_link_dma_prepare(skl->dev, pipe->p_params);
472 * Inside a pipe instance, we can have various modules. These modules need
473 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by
474 * skl_init_module() routine, so invoke that for all modules in a pipeline
477 skl_tplg_init_pipe_modules(struct skl_dev *skl, struct skl_pipe *pipe)
479 struct skl_pipe_module *w_module;
480 struct snd_soc_dapm_widget *w;
481 struct skl_module_cfg *mconfig;
485 list_for_each_entry(w_module, &pipe->w_list, node) {
490 /* check if module ids are populated */
491 if (mconfig->id.module_id < 0) {
493 "module %pUL id not populated\n",
494 (guid_t *)mconfig->guid);
498 cfg_idx = mconfig->pipe->cur_config_idx;
499 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
500 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
502 if (mconfig->module->loadable && skl->dsp->fw_ops.load_mod) {
503 ret = skl->dsp->fw_ops.load_mod(skl->dsp,
504 mconfig->id.module_id, mconfig->guid);
509 /* prepare the DMA if the module is gateway cpr */
510 ret = skl_tplg_module_prepare(skl, pipe, w, mconfig);
514 /* update blob if blob is null for be with default value */
515 skl_tplg_update_be_blob(w, skl);
518 * apply fix/conversion to module params based on
521 skl_tplg_update_module_params(w, skl);
522 uuid_mod = (guid_t *)mconfig->guid;
523 mconfig->id.pvt_id = skl_get_pvt_id(skl, uuid_mod,
524 mconfig->id.instance_id);
525 if (mconfig->id.pvt_id < 0)
527 skl_tplg_set_module_init_data(w);
529 ret = skl_dsp_get_core(skl->dsp, mconfig->core_id);
531 dev_err(skl->dev, "Failed to wake up core %d ret=%d\n",
532 mconfig->core_id, ret);
536 ret = skl_init_module(skl, mconfig);
538 skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id);
542 ret = skl_tplg_set_module_params(w, skl);
549 skl_dsp_put_core(skl->dsp, mconfig->core_id);
553 static int skl_tplg_unload_pipe_modules(struct skl_dev *skl,
554 struct skl_pipe *pipe)
557 struct skl_pipe_module *w_module;
558 struct skl_module_cfg *mconfig;
560 list_for_each_entry(w_module, &pipe->w_list, node) {
562 mconfig = w_module->w->priv;
563 uuid_mod = (guid_t *)mconfig->guid;
565 if (mconfig->module->loadable && skl->dsp->fw_ops.unload_mod) {
566 ret = skl->dsp->fw_ops.unload_mod(skl->dsp,
567 mconfig->id.module_id);
571 skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id);
573 ret = skl_dsp_put_core(skl->dsp, mconfig->core_id);
575 /* don't return; continue with other modules */
576 dev_err(skl->dev, "Failed to sleep core %d ret=%d\n",
577 mconfig->core_id, ret);
581 /* no modules to unload in this path, so return */
585 static bool skl_tplg_is_multi_fmt(struct skl_dev *skl, struct skl_pipe *pipe)
587 struct skl_pipe_fmt *cur_fmt;
588 struct skl_pipe_fmt *next_fmt;
591 if (pipe->nr_cfgs <= 1)
594 if (pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
597 for (i = 0; i < pipe->nr_cfgs - 1; i++) {
598 if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) {
599 cur_fmt = &pipe->configs[i].out_fmt;
600 next_fmt = &pipe->configs[i + 1].out_fmt;
602 cur_fmt = &pipe->configs[i].in_fmt;
603 next_fmt = &pipe->configs[i + 1].in_fmt;
606 if (!CHECK_HW_PARAMS(cur_fmt->channels, cur_fmt->freq,
618 * Here, we select pipe format based on the pipe type and pipe
619 * direction to determine the current config index for the pipeline.
620 * The config index is then used to select proper module resources.
621 * Intermediate pipes currently have a fixed format hence we select the
622 * 0th configuratation by default for such pipes.
625 skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig)
627 struct skl_pipe *pipe = mconfig->pipe;
628 struct skl_pipe_params *params = pipe->p_params;
629 struct skl_path_config *pconfig = &pipe->configs[0];
630 struct skl_pipe_fmt *fmt = NULL;
634 if (pipe->nr_cfgs == 0) {
635 pipe->cur_config_idx = 0;
639 if (skl_tplg_is_multi_fmt(skl, pipe)) {
640 pipe->cur_config_idx = pipe->pipe_config_idx;
641 pipe->memory_pages = pconfig->mem_pages;
642 dev_dbg(skl->dev, "found pipe config idx:%d\n",
643 pipe->cur_config_idx);
647 if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE || pipe->nr_cfgs == 1) {
648 dev_dbg(skl->dev, "No conn_type or just 1 pathcfg, taking 0th for %d\n",
650 pipe->cur_config_idx = 0;
651 pipe->memory_pages = pconfig->mem_pages;
656 if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE &&
657 pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
658 (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE &&
659 pipe->direction == SNDRV_PCM_STREAM_CAPTURE))
662 for (i = 0; i < pipe->nr_cfgs; i++) {
663 pconfig = &pipe->configs[i];
665 fmt = &pconfig->in_fmt;
667 fmt = &pconfig->out_fmt;
669 if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt,
670 fmt->channels, fmt->freq, fmt->bps)) {
671 pipe->cur_config_idx = i;
672 pipe->memory_pages = pconfig->mem_pages;
673 dev_dbg(skl->dev, "Using pipe config: %d\n", i);
679 dev_err(skl->dev, "Invalid pipe config: %d %d %d for pipe: %d\n",
680 params->ch, params->s_freq, params->s_fmt, pipe->ppl_id);
685 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we
686 * need create the pipeline. So we do following:
687 * - Create the pipeline
688 * - Initialize the modules in pipeline
689 * - finally bind all modules together
691 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
695 struct skl_module_cfg *mconfig = w->priv;
696 struct skl_pipe_module *w_module;
697 struct skl_pipe *s_pipe = mconfig->pipe;
698 struct skl_module_cfg *src_module = NULL, *dst_module, *module;
699 struct skl_module_deferred_bind *modules;
701 ret = skl_tplg_get_pipe_config(skl, mconfig);
706 * Create a list of modules for pipe.
707 * This list contains modules from source to sink
709 ret = skl_create_pipeline(skl, mconfig->pipe);
713 /* Init all pipe modules from source to sink */
714 ret = skl_tplg_init_pipe_modules(skl, s_pipe);
718 /* Bind modules from source to sink */
719 list_for_each_entry(w_module, &s_pipe->w_list, node) {
720 dst_module = w_module->w->priv;
722 if (src_module == NULL) {
723 src_module = dst_module;
727 ret = skl_bind_modules(skl, src_module, dst_module);
731 src_module = dst_module;
735 * When the destination module is initialized, check for these modules
736 * in deferred bind list. If found, bind them.
738 list_for_each_entry(w_module, &s_pipe->w_list, node) {
739 if (list_empty(&skl->bind_list))
742 list_for_each_entry(modules, &skl->bind_list, node) {
743 module = w_module->w->priv;
744 if (modules->dst == module)
745 skl_bind_modules(skl, modules->src,
753 static int skl_fill_sink_instance_id(struct skl_dev *skl, u32 *params,
754 int size, struct skl_module_cfg *mcfg)
758 if (mcfg->m_type == SKL_MODULE_TYPE_KPB) {
759 struct skl_kpb_params *kpb_params =
760 (struct skl_kpb_params *)params;
761 struct skl_mod_inst_map *inst = kpb_params->u.map;
763 for (i = 0; i < kpb_params->num_modules; i++) {
764 pvt_id = skl_get_pvt_instance_id_map(skl, inst->mod_id,
769 inst->inst_id = pvt_id;
777 * Some modules require params to be set after the module is bound to
778 * all pins connected.
780 * The module provider initializes set_param flag for such modules and we
781 * send params after binding
783 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
784 struct skl_module_cfg *mcfg, struct skl_dev *skl)
787 struct skl_module_cfg *mconfig = w->priv;
788 const struct snd_kcontrol_new *k;
789 struct soc_bytes_ext *sb;
790 struct skl_algo_data *bc;
791 struct skl_specific_cfg *sp_cfg;
795 * check all out/in pins are in bind state.
796 * if so set the module param
798 for (i = 0; i < mcfg->module->max_output_pins; i++) {
799 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
803 for (i = 0; i < mcfg->module->max_input_pins; i++) {
804 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
808 if (mconfig->formats_config[SKL_PARAM_BIND].caps_size > 0 &&
809 mconfig->formats_config[SKL_PARAM_BIND].set_params ==
811 sp_cfg = &mconfig->formats_config[SKL_PARAM_BIND];
812 ret = skl_set_module_params(skl, sp_cfg->caps,
814 sp_cfg->param_id, mconfig);
819 for (i = 0; i < w->num_kcontrols; i++) {
820 k = &w->kcontrol_news[i];
821 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
822 sb = (void *) k->private_value;
823 bc = (struct skl_algo_data *)sb->dobj.private;
825 if (bc->set_params == SKL_PARAM_BIND) {
826 params = kmemdup(bc->params, bc->max, GFP_KERNEL);
830 skl_fill_sink_instance_id(skl, params, bc->max,
833 ret = skl_set_module_params(skl, params,
834 bc->max, bc->param_id, mconfig);
846 static int skl_get_module_id(struct skl_dev *skl, guid_t *uuid)
848 struct uuid_module *module;
850 list_for_each_entry(module, &skl->uuid_list, list) {
851 if (guid_equal(uuid, &module->uuid))
858 static int skl_tplg_find_moduleid_from_uuid(struct skl_dev *skl,
859 const struct snd_kcontrol_new *k)
861 struct soc_bytes_ext *sb = (void *) k->private_value;
862 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
863 struct skl_kpb_params *uuid_params, *params;
864 struct hdac_bus *bus = skl_to_bus(skl);
865 int i, size, module_id;
867 if (bc->set_params == SKL_PARAM_BIND && bc->max) {
868 uuid_params = (struct skl_kpb_params *)bc->params;
869 size = struct_size(params, u.map, uuid_params->num_modules);
871 params = devm_kzalloc(bus->dev, size, GFP_KERNEL);
875 params->num_modules = uuid_params->num_modules;
877 for (i = 0; i < uuid_params->num_modules; i++) {
878 module_id = skl_get_module_id(skl,
879 &uuid_params->u.map_uuid[i].mod_uuid);
881 devm_kfree(bus->dev, params);
885 params->u.map[i].mod_id = module_id;
886 params->u.map[i].inst_id =
887 uuid_params->u.map_uuid[i].inst_id;
890 devm_kfree(bus->dev, bc->params);
891 bc->params = (char *)params;
899 * Retrieve the module id from UUID mentioned in the
902 void skl_tplg_add_moduleid_in_bind_params(struct skl_dev *skl,
903 struct snd_soc_dapm_widget *w)
905 struct skl_module_cfg *mconfig = w->priv;
909 * Post bind params are used for only for KPB
910 * to set copier instances to drain the data
913 if (mconfig->m_type != SKL_MODULE_TYPE_KPB)
916 for (i = 0; i < w->num_kcontrols; i++)
917 if ((w->kcontrol_news[i].access &
918 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
919 (skl_tplg_find_moduleid_from_uuid(skl,
920 &w->kcontrol_news[i]) < 0))
922 "%s: invalid kpb post bind params\n",
926 static int skl_tplg_module_add_deferred_bind(struct skl_dev *skl,
927 struct skl_module_cfg *src, struct skl_module_cfg *dst)
929 struct skl_module_deferred_bind *m_list, *modules;
932 /* only supported for module with static pin connection */
933 for (i = 0; i < dst->module->max_input_pins; i++) {
934 struct skl_module_pin *pin = &dst->m_in_pin[i];
939 if ((pin->id.module_id == src->id.module_id) &&
940 (pin->id.instance_id == src->id.instance_id)) {
942 if (!list_empty(&skl->bind_list)) {
943 list_for_each_entry(modules, &skl->bind_list, node) {
944 if (modules->src == src && modules->dst == dst)
949 m_list = kzalloc(sizeof(*m_list), GFP_KERNEL);
956 list_add(&m_list->node, &skl->bind_list);
963 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
965 struct snd_soc_dapm_widget *src_w,
966 struct skl_module_cfg *src_mconfig)
968 struct snd_soc_dapm_path *p;
969 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL;
970 struct skl_module_cfg *sink_mconfig;
973 snd_soc_dapm_widget_for_each_sink_path(w, p) {
978 "%s: src widget=%s\n", __func__, w->name);
980 "%s: sink widget=%s\n", __func__, p->sink->name);
984 if (!is_skl_dsp_widget_type(p->sink, skl->dev))
985 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
988 * here we will check widgets in sink pipelines, so that
989 * can be any widgets type and we are only interested if
990 * they are ones used for SKL so check that first
992 if ((p->sink->priv != NULL) &&
993 is_skl_dsp_widget_type(p->sink, skl->dev)) {
996 sink_mconfig = sink->priv;
999 * Modules other than PGA leaf can be connected
1000 * directly or via switch to a module in another
1001 * pipeline. EX: reference path
1002 * when the path is enabled, the dst module that needs
1003 * to be bound may not be initialized. if the module is
1004 * not initialized, add these modules in the deferred
1005 * bind list and when the dst module is initialised,
1006 * bind this module to the dst_module in deferred list.
1008 if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE)
1009 && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) {
1011 ret = skl_tplg_module_add_deferred_bind(skl,
1012 src_mconfig, sink_mconfig);
1020 if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
1021 sink_mconfig->m_state == SKL_MODULE_UNINIT)
1024 /* Bind source to sink, mixin is always source */
1025 ret = skl_bind_modules(skl, src_mconfig, sink_mconfig);
1029 /* set module params after bind */
1030 skl_tplg_set_module_bind_params(src_w,
1032 skl_tplg_set_module_bind_params(sink,
1035 /* Start sinks pipe first */
1036 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
1037 if (sink_mconfig->pipe->conn_type !=
1038 SKL_PIPE_CONN_TYPE_FE)
1039 ret = skl_run_pipe(skl,
1040 sink_mconfig->pipe);
1047 if (!sink && next_sink)
1048 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
1054 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA
1055 * we need to do following:
1056 * - Bind to sink pipeline
1057 * Since the sink pipes can be running and we don't get mixer event on
1058 * connect for already running mixer, we need to find the sink pipes
1059 * here and bind to them. This way dynamic connect works.
1060 * - Start sink pipeline, if not running
1061 * - Then run current pipe
1063 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
1064 struct skl_dev *skl)
1066 struct skl_module_cfg *src_mconfig;
1069 src_mconfig = w->priv;
1072 * find which sink it is connected to, bind with the sink,
1073 * if sink is not started, start sink pipe first, then start
1076 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
1080 /* Start source pipe last after starting all sinks */
1081 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1082 return skl_run_pipe(skl, src_mconfig->pipe);
1087 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget(
1088 struct snd_soc_dapm_widget *w, struct skl_dev *skl)
1090 struct snd_soc_dapm_path *p;
1091 struct snd_soc_dapm_widget *src_w = NULL;
1093 snd_soc_dapm_widget_for_each_source_path(w, p) {
1098 dev_dbg(skl->dev, "sink widget=%s\n", w->name);
1099 dev_dbg(skl->dev, "src widget=%s\n", p->source->name);
1102 * here we will check widgets in sink pipelines, so that can
1103 * be any widgets type and we are only interested if they are
1104 * ones used for SKL so check that first
1106 if ((p->source->priv != NULL) &&
1107 is_skl_dsp_widget_type(p->source, skl->dev)) {
1113 return skl_get_src_dsp_widget(src_w, skl);
1119 * in the Post-PMU event of mixer we need to do following:
1120 * - Check if this pipe is running
1122 * - bind this pipeline to its source pipeline
1123 * if source pipe is already running, this means it is a dynamic
1124 * connection and we need to bind only to that pipe
1125 * - start this pipeline
1127 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
1128 struct skl_dev *skl)
1131 struct snd_soc_dapm_widget *source, *sink;
1132 struct skl_module_cfg *src_mconfig, *sink_mconfig;
1133 int src_pipe_started = 0;
1136 sink_mconfig = sink->priv;
1139 * If source pipe is already started, that means source is driving
1140 * one more sink before this sink got connected, Since source is
1141 * started, bind this sink to source and start this pipe.
1143 source = skl_get_src_dsp_widget(w, skl);
1144 if (source != NULL) {
1145 src_mconfig = source->priv;
1146 sink_mconfig = sink->priv;
1147 src_pipe_started = 1;
1150 * check pipe state, then no need to bind or start the
1153 if (src_mconfig->pipe->state != SKL_PIPE_STARTED)
1154 src_pipe_started = 0;
1157 if (src_pipe_started) {
1158 ret = skl_bind_modules(skl, src_mconfig, sink_mconfig);
1162 /* set module params after bind */
1163 skl_tplg_set_module_bind_params(source, src_mconfig, skl);
1164 skl_tplg_set_module_bind_params(sink, sink_mconfig, skl);
1166 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
1167 ret = skl_run_pipe(skl, sink_mconfig->pipe);
1174 * in the Pre-PMD event of mixer we need to do following:
1176 * - find the source connections and remove that from dapm_path_list
1177 * - unbind with source pipelines if still connected
1179 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w,
1180 struct skl_dev *skl)
1182 struct skl_module_cfg *src_mconfig, *sink_mconfig;
1185 sink_mconfig = w->priv;
1188 ret = skl_stop_pipe(skl, sink_mconfig->pipe);
1192 for (i = 0; i < sink_mconfig->module->max_input_pins; i++) {
1193 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1194 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg;
1198 ret = skl_unbind_modules(skl,
1199 src_mconfig, sink_mconfig);
1207 * in the Post-PMD event of mixer we need to do following:
1208 * - Unbind the modules within the pipeline
1209 * - Delete the pipeline (modules are not required to be explicitly
1210 * deleted, pipeline delete is enough here
1212 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1213 struct skl_dev *skl)
1215 struct skl_module_cfg *mconfig = w->priv;
1216 struct skl_pipe_module *w_module;
1217 struct skl_module_cfg *src_module = NULL, *dst_module;
1218 struct skl_pipe *s_pipe = mconfig->pipe;
1219 struct skl_module_deferred_bind *modules, *tmp;
1221 if (s_pipe->state == SKL_PIPE_INVALID)
1224 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1225 if (list_empty(&skl->bind_list))
1228 src_module = w_module->w->priv;
1230 list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) {
1232 * When the destination module is deleted, Unbind the
1233 * modules from deferred bind list.
1235 if (modules->dst == src_module) {
1236 skl_unbind_modules(skl, modules->src,
1241 * When the source module is deleted, remove this entry
1242 * from the deferred bind list.
1244 if (modules->src == src_module) {
1245 list_del(&modules->node);
1246 modules->src = NULL;
1247 modules->dst = NULL;
1253 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1254 dst_module = w_module->w->priv;
1256 if (src_module == NULL) {
1257 src_module = dst_module;
1261 skl_unbind_modules(skl, src_module, dst_module);
1262 src_module = dst_module;
1265 skl_delete_pipe(skl, mconfig->pipe);
1267 list_for_each_entry(w_module, &s_pipe->w_list, node) {
1268 src_module = w_module->w->priv;
1269 src_module->m_state = SKL_MODULE_UNINIT;
1272 return skl_tplg_unload_pipe_modules(skl, s_pipe);
1276 * in the Post-PMD event of PGA we need to do following:
1277 * - Stop the pipeline
1278 * - In source pipe is connected, unbind with source pipelines
1280 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
1281 struct skl_dev *skl)
1283 struct skl_module_cfg *src_mconfig, *sink_mconfig;
1286 src_mconfig = w->priv;
1288 /* Stop the pipe since this is a mixin module */
1289 ret = skl_stop_pipe(skl, src_mconfig->pipe);
1293 for (i = 0; i < src_mconfig->module->max_output_pins; i++) {
1294 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) {
1295 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg;
1299 * This is a connecter and if path is found that means
1300 * unbind between source and sink has not happened yet
1302 ret = skl_unbind_modules(skl, src_mconfig,
1311 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a
1312 * second one is required that is created as another pipe entity.
1313 * The mixer is responsible for pipe management and represent a pipeline
1316 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w,
1317 struct snd_kcontrol *k, int event)
1319 struct snd_soc_dapm_context *dapm = w->dapm;
1320 struct skl_dev *skl = get_skl_ctx(dapm->dev);
1323 case SND_SOC_DAPM_PRE_PMU:
1324 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
1326 case SND_SOC_DAPM_POST_PMU:
1327 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
1329 case SND_SOC_DAPM_PRE_PMD:
1330 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
1332 case SND_SOC_DAPM_POST_PMD:
1333 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
1340 * In modelling, we assumed rest of the modules in pipeline are PGA. But we
1341 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with
1342 * the sink when it is running (two FE to one BE or one FE to two BE)
1345 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
1346 struct snd_kcontrol *k, int event)
1349 struct snd_soc_dapm_context *dapm = w->dapm;
1350 struct skl_dev *skl = get_skl_ctx(dapm->dev);
1353 case SND_SOC_DAPM_PRE_PMU:
1354 return skl_tplg_pga_dapm_pre_pmu_event(w, skl);
1356 case SND_SOC_DAPM_POST_PMD:
1357 return skl_tplg_pga_dapm_post_pmd_event(w, skl);
1363 static int skl_tplg_multi_config_set_get(struct snd_kcontrol *kcontrol,
1364 struct snd_ctl_elem_value *ucontrol,
1367 struct snd_soc_component *component =
1368 snd_soc_kcontrol_component(kcontrol);
1369 struct hdac_bus *bus = snd_soc_component_get_drvdata(component);
1370 struct skl_dev *skl = bus_to_skl(bus);
1371 struct skl_pipeline *ppl;
1372 struct skl_pipe *pipe = NULL;
1373 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1379 if (is_set && ucontrol->value.enumerated.item[0] > ec->items)
1382 pipe_id = ec->dobj.private;
1384 list_for_each_entry(ppl, &skl->ppl_list, node) {
1385 if (ppl->pipe->ppl_id == *pipe_id) {
1394 pipe->pipe_config_idx = ucontrol->value.enumerated.item[0];
1396 ucontrol->value.enumerated.item[0] = pipe->pipe_config_idx;
1401 static int skl_tplg_multi_config_get(struct snd_kcontrol *kcontrol,
1402 struct snd_ctl_elem_value *ucontrol)
1404 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
1407 static int skl_tplg_multi_config_set(struct snd_kcontrol *kcontrol,
1408 struct snd_ctl_elem_value *ucontrol)
1410 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
1413 static int skl_tplg_multi_config_get_dmic(struct snd_kcontrol *kcontrol,
1414 struct snd_ctl_elem_value *ucontrol)
1416 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
1419 static int skl_tplg_multi_config_set_dmic(struct snd_kcontrol *kcontrol,
1420 struct snd_ctl_elem_value *ucontrol)
1422 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
1425 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
1426 unsigned int __user *data, unsigned int size)
1428 struct soc_bytes_ext *sb =
1429 (struct soc_bytes_ext *)kcontrol->private_value;
1430 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private;
1431 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1432 struct skl_module_cfg *mconfig = w->priv;
1433 struct skl_dev *skl = get_skl_ctx(w->dapm->dev);
1436 skl_get_module_params(skl, (u32 *)bc->params,
1437 bc->size, bc->param_id, mconfig);
1439 /* decrement size for TLV header */
1440 size -= 2 * sizeof(u32);
1442 /* check size as we don't want to send kernel data */
1447 if (copy_to_user(data, &bc->param_id, sizeof(u32)))
1449 if (copy_to_user(data + 1, &size, sizeof(u32)))
1451 if (copy_to_user(data + 2, bc->params, size))
1458 #define SKL_PARAM_VENDOR_ID 0xff
1460 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
1461 const unsigned int __user *data, unsigned int size)
1463 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1464 struct skl_module_cfg *mconfig = w->priv;
1465 struct soc_bytes_ext *sb =
1466 (struct soc_bytes_ext *)kcontrol->private_value;
1467 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private;
1468 struct skl_dev *skl = get_skl_ctx(w->dapm->dev);
1475 if (copy_from_user(ac->params, data, size))
1479 return skl_set_module_params(skl,
1480 (u32 *)ac->params, ac->size,
1481 ac->param_id, mconfig);
1487 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol,
1488 struct snd_ctl_elem_value *ucontrol)
1490 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1491 struct skl_module_cfg *mconfig = w->priv;
1492 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1493 u32 ch_type = *((u32 *)ec->dobj.private);
1495 if (mconfig->dmic_ch_type == ch_type)
1496 ucontrol->value.enumerated.item[0] =
1497 mconfig->dmic_ch_combo_index;
1499 ucontrol->value.enumerated.item[0] = 0;
1504 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig,
1505 struct skl_mic_sel_config *mic_cfg, struct device *dev)
1507 struct skl_specific_cfg *sp_cfg =
1508 &mconfig->formats_config[SKL_PARAM_INIT];
1510 sp_cfg->caps_size = sizeof(struct skl_mic_sel_config);
1511 sp_cfg->set_params = SKL_PARAM_SET;
1512 sp_cfg->param_id = 0x00;
1513 if (!sp_cfg->caps) {
1514 sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL);
1519 mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH;
1521 memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size);
1526 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol,
1527 struct snd_ctl_elem_value *ucontrol)
1529 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
1530 struct skl_module_cfg *mconfig = w->priv;
1531 struct skl_mic_sel_config mic_cfg = {0};
1532 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
1533 u32 ch_type = *((u32 *)ec->dobj.private);
1535 u8 in_ch, out_ch, index;
1537 mconfig->dmic_ch_type = ch_type;
1538 mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0];
1540 /* enum control index 0 is INVALID, so no channels to be set */
1541 if (mconfig->dmic_ch_combo_index == 0)
1544 /* No valid channel selection map for index 0, so offset by 1 */
1545 index = mconfig->dmic_ch_combo_index - 1;
1549 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list))
1552 list = &mic_mono_list[index];
1556 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list))
1559 list = mic_stereo_list[index];
1563 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list))
1566 list = mic_trio_list[index];
1570 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list))
1573 list = mic_quatro_list[index];
1577 dev_err(w->dapm->dev,
1578 "Invalid channel %d for mic_select module\n",
1584 /* channel type enum map to number of chanels for that type */
1585 for (out_ch = 0; out_ch < ch_type; out_ch++) {
1586 in_ch = list[out_ch];
1587 mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN;
1590 return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev);
1594 * Fill the dma id for host and link. In case of passthrough
1595 * pipeline, this will both host and link in the same
1596 * pipeline, so need to copy the link and host based on dev_type
1598 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
1599 struct skl_pipe_params *params)
1601 struct skl_pipe *pipe = mcfg->pipe;
1603 if (pipe->passthru) {
1604 switch (mcfg->dev_type) {
1605 case SKL_DEVICE_HDALINK:
1606 pipe->p_params->link_dma_id = params->link_dma_id;
1607 pipe->p_params->link_index = params->link_index;
1608 pipe->p_params->link_bps = params->link_bps;
1611 case SKL_DEVICE_HDAHOST:
1612 pipe->p_params->host_dma_id = params->host_dma_id;
1613 pipe->p_params->host_bps = params->host_bps;
1619 pipe->p_params->s_fmt = params->s_fmt;
1620 pipe->p_params->ch = params->ch;
1621 pipe->p_params->s_freq = params->s_freq;
1622 pipe->p_params->stream = params->stream;
1623 pipe->p_params->format = params->format;
1626 memcpy(pipe->p_params, params, sizeof(*params));
1631 * The FE params are passed by hw_params of the DAI.
1632 * On hw_params, the params are stored in Gateway module of the FE and we
1633 * need to calculate the format in DSP module configuration, that
1634 * conversion is done here
1636 int skl_tplg_update_pipe_params(struct device *dev,
1637 struct skl_module_cfg *mconfig,
1638 struct skl_pipe_params *params)
1640 struct skl_module_res *res;
1641 struct skl_dev *skl = get_skl_ctx(dev);
1642 struct skl_module_fmt *format = NULL;
1643 u8 cfg_idx = mconfig->pipe->cur_config_idx;
1645 res = &mconfig->module->resources[mconfig->res_idx];
1646 skl_tplg_fill_dma_id(mconfig, params);
1647 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
1648 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
1650 if (skl->nr_modules)
1653 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
1654 format = &mconfig->module->formats[mconfig->fmt_idx].inputs[0].fmt;
1656 format = &mconfig->module->formats[mconfig->fmt_idx].outputs[0].fmt;
1658 /* set the hw_params */
1659 format->s_freq = params->s_freq;
1660 format->channels = params->ch;
1661 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
1664 * 16 bit is 16 bit container whereas 24 bit is in 32 bit
1665 * container so update bit depth accordingly
1667 switch (format->valid_bit_depth) {
1668 case SKL_DEPTH_16BIT:
1669 format->bit_depth = format->valid_bit_depth;
1672 case SKL_DEPTH_24BIT:
1673 case SKL_DEPTH_32BIT:
1674 format->bit_depth = SKL_DEPTH_32BIT;
1678 dev_err(dev, "Invalid bit depth %x for pipe\n",
1679 format->valid_bit_depth);
1683 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1684 res->ibs = (format->s_freq / 1000) *
1685 (format->channels) *
1686 (format->bit_depth >> 3);
1688 res->obs = (format->s_freq / 1000) *
1689 (format->channels) *
1690 (format->bit_depth >> 3);
1697 * Query the module config for the FE DAI
1698 * This is used to find the hw_params set for that DAI and apply to FE
1701 struct skl_module_cfg *
1702 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
1704 struct snd_soc_dapm_widget *w;
1705 struct snd_soc_dapm_path *p = NULL;
1707 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1708 w = dai->playback_widget;
1709 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1710 if (p->connect && p->sink->power &&
1711 !is_skl_dsp_widget_type(p->sink, dai->dev))
1714 if (p->sink->priv) {
1715 dev_dbg(dai->dev, "set params for %s\n",
1717 return p->sink->priv;
1721 w = dai->capture_widget;
1722 snd_soc_dapm_widget_for_each_source_path(w, p) {
1723 if (p->connect && p->source->power &&
1724 !is_skl_dsp_widget_type(p->source, dai->dev))
1727 if (p->source->priv) {
1728 dev_dbg(dai->dev, "set params for %s\n",
1730 return p->source->priv;
1738 static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
1739 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1741 struct snd_soc_dapm_path *p;
1742 struct skl_module_cfg *mconfig = NULL;
1744 snd_soc_dapm_widget_for_each_source_path(w, p) {
1745 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
1747 (p->sink->id == snd_soc_dapm_aif_out) &&
1749 mconfig = p->source->priv;
1752 mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
1760 static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
1761 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
1763 struct snd_soc_dapm_path *p;
1764 struct skl_module_cfg *mconfig = NULL;
1766 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1767 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
1769 (p->source->id == snd_soc_dapm_aif_in) &&
1771 mconfig = p->sink->priv;
1774 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
1782 struct skl_module_cfg *
1783 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
1785 struct snd_soc_dapm_widget *w;
1786 struct skl_module_cfg *mconfig;
1788 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
1789 w = dai->playback_widget;
1790 mconfig = skl_get_mconfig_pb_cpr(dai, w);
1792 w = dai->capture_widget;
1793 mconfig = skl_get_mconfig_cap_cpr(dai, w);
1798 static u8 skl_tplg_be_link_type(int dev_type)
1804 ret = NHLT_LINK_SSP;
1807 case SKL_DEVICE_DMIC:
1808 ret = NHLT_LINK_DMIC;
1811 case SKL_DEVICE_I2S:
1812 ret = NHLT_LINK_SSP;
1815 case SKL_DEVICE_HDALINK:
1816 ret = NHLT_LINK_HDA;
1820 ret = NHLT_LINK_INVALID;
1828 * Fill the BE gateway parameters
1829 * The BE gateway expects a blob of parameters which are kept in the ACPI
1830 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance.
1831 * The port can have multiple settings so pick based on the pipeline
1834 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
1835 struct skl_module_cfg *mconfig,
1836 struct skl_pipe_params *params)
1838 struct nhlt_specific_cfg *cfg;
1839 struct skl_pipe *pipe = mconfig->pipe;
1840 struct skl_pipe_fmt *pipe_fmt;
1841 struct skl_dev *skl = get_skl_ctx(dai->dev);
1842 int link_type = skl_tplg_be_link_type(mconfig->dev_type);
1843 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type);
1845 skl_tplg_fill_dma_id(mconfig, params);
1847 if (link_type == NHLT_LINK_HDA)
1850 if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK)
1851 pipe_fmt = &pipe->configs[pipe->pipe_config_idx].out_fmt;
1853 pipe_fmt = &pipe->configs[pipe->pipe_config_idx].in_fmt;
1855 /* update the blob based on virtual bus_id*/
1856 cfg = intel_nhlt_get_endpoint_blob(dai->dev, skl->nhlt,
1857 mconfig->vbus_id, link_type,
1858 pipe_fmt->bps, params->s_cont,
1859 pipe_fmt->channels, pipe_fmt->freq,
1860 pipe->direction, dev_type);
1862 mconfig->formats_config[SKL_PARAM_INIT].caps_size = cfg->size;
1863 mconfig->formats_config[SKL_PARAM_INIT].caps = (u32 *)&cfg->caps;
1865 dev_err(dai->dev, "Blob NULL for id:%d type:%d dirn:%d ch:%d, freq:%d, fmt:%d\n",
1866 mconfig->vbus_id, link_type, params->stream,
1867 params->ch, params->s_freq, params->s_fmt);
1874 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai,
1875 struct snd_soc_dapm_widget *w,
1876 struct skl_pipe_params *params)
1878 struct snd_soc_dapm_path *p;
1881 snd_soc_dapm_widget_for_each_source_path(w, p) {
1882 if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) &&
1885 ret = skl_tplg_be_fill_pipe_params(dai,
1886 p->source->priv, params);
1890 ret = skl_tplg_be_set_src_pipe_params(dai,
1900 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai,
1901 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params)
1903 struct snd_soc_dapm_path *p;
1906 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1907 if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) &&
1910 ret = skl_tplg_be_fill_pipe_params(dai,
1911 p->sink->priv, params);
1915 ret = skl_tplg_be_set_sink_pipe_params(
1916 dai, p->sink, params);
1926 * BE hw_params can be a source parameters (capture) or sink parameters
1927 * (playback). Based on sink and source we need to either find the source
1928 * list or the sink list and set the pipeline parameters
1930 int skl_tplg_be_update_params(struct snd_soc_dai *dai,
1931 struct skl_pipe_params *params)
1933 struct snd_soc_dapm_widget *w;
1935 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1936 w = dai->playback_widget;
1938 return skl_tplg_be_set_src_pipe_params(dai, w, params);
1941 w = dai->capture_widget;
1943 return skl_tplg_be_set_sink_pipe_params(dai, w, params);
1949 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = {
1950 {SKL_MIXER_EVENT, skl_tplg_mixer_event},
1951 {SKL_VMIXER_EVENT, skl_tplg_mixer_event},
1952 {SKL_PGA_EVENT, skl_tplg_pga_event},
1955 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
1956 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get,
1957 skl_tplg_tlv_control_set},
1960 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = {
1962 .id = SKL_CONTROL_TYPE_MIC_SELECT,
1963 .get = skl_tplg_mic_control_get,
1964 .put = skl_tplg_mic_control_set,
1967 .id = SKL_CONTROL_TYPE_MULTI_IO_SELECT,
1968 .get = skl_tplg_multi_config_get,
1969 .put = skl_tplg_multi_config_set,
1972 .id = SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC,
1973 .get = skl_tplg_multi_config_get_dmic,
1974 .put = skl_tplg_multi_config_set_dmic,
1978 static int skl_tplg_fill_pipe_cfg(struct device *dev,
1979 struct skl_pipe *pipe, u32 tkn,
1980 u32 tkn_val, int conf_idx, int dir)
1982 struct skl_pipe_fmt *fmt;
1983 struct skl_path_config *config;
1987 fmt = &pipe->configs[conf_idx].in_fmt;
1991 fmt = &pipe->configs[conf_idx].out_fmt;
1995 dev_err(dev, "Invalid direction: %d\n", dir);
1999 config = &pipe->configs[conf_idx];
2002 case SKL_TKN_U32_CFG_FREQ:
2003 fmt->freq = tkn_val;
2006 case SKL_TKN_U8_CFG_CHAN:
2007 fmt->channels = tkn_val;
2010 case SKL_TKN_U8_CFG_BPS:
2014 case SKL_TKN_U32_PATH_MEM_PGS:
2015 config->mem_pages = tkn_val;
2019 dev_err(dev, "Invalid token config: %d\n", tkn);
2026 static int skl_tplg_fill_pipe_tkn(struct device *dev,
2027 struct skl_pipe *pipe, u32 tkn,
2032 case SKL_TKN_U32_PIPE_CONN_TYPE:
2033 pipe->conn_type = tkn_val;
2036 case SKL_TKN_U32_PIPE_PRIORITY:
2037 pipe->pipe_priority = tkn_val;
2040 case SKL_TKN_U32_PIPE_MEM_PGS:
2041 pipe->memory_pages = tkn_val;
2044 case SKL_TKN_U32_PMODE:
2045 pipe->lp_mode = tkn_val;
2048 case SKL_TKN_U32_PIPE_DIRECTION:
2049 pipe->direction = tkn_val;
2052 case SKL_TKN_U32_NUM_CONFIGS:
2053 pipe->nr_cfgs = tkn_val;
2057 dev_err(dev, "Token not handled %d\n", tkn);
2065 * Add pipeline by parsing the relevant tokens
2066 * Return an existing pipe if the pipe already exists.
2068 static int skl_tplg_add_pipe(struct device *dev,
2069 struct skl_module_cfg *mconfig, struct skl_dev *skl,
2070 struct snd_soc_tplg_vendor_value_elem *tkn_elem)
2072 struct skl_pipeline *ppl;
2073 struct skl_pipe *pipe;
2074 struct skl_pipe_params *params;
2076 list_for_each_entry(ppl, &skl->ppl_list, node) {
2077 if (ppl->pipe->ppl_id == tkn_elem->value) {
2078 mconfig->pipe = ppl->pipe;
2083 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
2087 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
2091 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
2095 pipe->p_params = params;
2096 pipe->ppl_id = tkn_elem->value;
2097 INIT_LIST_HEAD(&pipe->w_list);
2100 list_add(&ppl->node, &skl->ppl_list);
2102 mconfig->pipe = pipe;
2103 mconfig->pipe->state = SKL_PIPE_INVALID;
2108 static int skl_tplg_get_uuid(struct device *dev, guid_t *guid,
2109 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
2111 if (uuid_tkn->token == SKL_TKN_UUID) {
2112 import_guid(guid, uuid_tkn->uuid);
2116 dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token);
2121 static int skl_tplg_fill_pin(struct device *dev,
2122 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2123 struct skl_module_pin *m_pin,
2128 switch (tkn_elem->token) {
2129 case SKL_TKN_U32_PIN_MOD_ID:
2130 m_pin[pin_index].id.module_id = tkn_elem->value;
2133 case SKL_TKN_U32_PIN_INST_ID:
2134 m_pin[pin_index].id.instance_id = tkn_elem->value;
2138 ret = skl_tplg_get_uuid(dev, &m_pin[pin_index].id.mod_uuid,
2139 (struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem);
2146 dev_err(dev, "%d Not a pin token\n", tkn_elem->token);
2154 * Parse for pin config specific tokens to fill up the
2155 * module private data
2157 static int skl_tplg_fill_pins_info(struct device *dev,
2158 struct skl_module_cfg *mconfig,
2159 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2160 int dir, int pin_count)
2163 struct skl_module_pin *m_pin;
2167 m_pin = mconfig->m_in_pin;
2171 m_pin = mconfig->m_out_pin;
2175 dev_err(dev, "Invalid direction value\n");
2179 ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count);
2183 m_pin[pin_count].in_use = false;
2184 m_pin[pin_count].pin_state = SKL_PIN_UNBIND;
2190 * Fill up input/output module config format based
2193 static int skl_tplg_fill_fmt(struct device *dev,
2194 struct skl_module_fmt *dst_fmt,
2198 case SKL_TKN_U32_FMT_CH:
2199 dst_fmt->channels = value;
2202 case SKL_TKN_U32_FMT_FREQ:
2203 dst_fmt->s_freq = value;
2206 case SKL_TKN_U32_FMT_BIT_DEPTH:
2207 dst_fmt->bit_depth = value;
2210 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2211 dst_fmt->valid_bit_depth = value;
2214 case SKL_TKN_U32_FMT_CH_CONFIG:
2215 dst_fmt->ch_cfg = value;
2218 case SKL_TKN_U32_FMT_INTERLEAVE:
2219 dst_fmt->interleaving_style = value;
2222 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2223 dst_fmt->sample_type = value;
2226 case SKL_TKN_U32_FMT_CH_MAP:
2227 dst_fmt->ch_map = value;
2231 dev_err(dev, "Invalid token %d\n", tkn);
2238 static int skl_tplg_widget_fill_fmt(struct device *dev,
2239 struct skl_module_iface *fmt,
2240 u32 tkn, u32 val, u32 dir, int fmt_idx)
2242 struct skl_module_fmt *dst_fmt;
2249 dst_fmt = &fmt->inputs[fmt_idx].fmt;
2253 dst_fmt = &fmt->outputs[fmt_idx].fmt;
2257 dev_err(dev, "Invalid direction: %d\n", dir);
2261 return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val);
2264 static void skl_tplg_fill_pin_dynamic_val(
2265 struct skl_module_pin *mpin, u32 pin_count, u32 value)
2269 for (i = 0; i < pin_count; i++)
2270 mpin[i].is_dynamic = value;
2274 * Resource table in the manifest has pin specific resources
2275 * like pin and pin buffer size
2277 static int skl_tplg_manifest_pin_res_tkn(struct device *dev,
2278 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2279 struct skl_module_res *res, int pin_idx, int dir)
2281 struct skl_module_pin_resources *m_pin;
2285 m_pin = &res->input[pin_idx];
2289 m_pin = &res->output[pin_idx];
2293 dev_err(dev, "Invalid pin direction: %d\n", dir);
2297 switch (tkn_elem->token) {
2298 case SKL_TKN_MM_U32_RES_PIN_ID:
2299 m_pin->pin_index = tkn_elem->value;
2302 case SKL_TKN_MM_U32_PIN_BUF:
2303 m_pin->buf_size = tkn_elem->value;
2307 dev_err(dev, "Invalid token: %d\n", tkn_elem->token);
2315 * Fill module specific resources from the manifest's resource
2316 * table like CPS, DMA size, mem_pages.
2318 static int skl_tplg_fill_res_tkn(struct device *dev,
2319 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2320 struct skl_module_res *res,
2321 int pin_idx, int dir)
2323 int ret, tkn_count = 0;
2328 switch (tkn_elem->token) {
2329 case SKL_TKN_MM_U32_DMA_SIZE:
2330 res->dma_buffer_size = tkn_elem->value;
2333 case SKL_TKN_MM_U32_CPC:
2334 res->cpc = tkn_elem->value;
2337 case SKL_TKN_U32_MEM_PAGES:
2338 res->is_pages = tkn_elem->value;
2341 case SKL_TKN_U32_OBS:
2342 res->obs = tkn_elem->value;
2345 case SKL_TKN_U32_IBS:
2346 res->ibs = tkn_elem->value;
2349 case SKL_TKN_MM_U32_RES_PIN_ID:
2350 case SKL_TKN_MM_U32_PIN_BUF:
2351 ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res,
2357 case SKL_TKN_MM_U32_CPS:
2358 case SKL_TKN_U32_MAX_MCPS:
2359 /* ignore unused tokens */
2363 dev_err(dev, "Not a res type token: %d", tkn_elem->token);
2373 * Parse tokens to fill up the module private data
2375 static int skl_tplg_get_token(struct device *dev,
2376 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
2377 struct skl_dev *skl, struct skl_module_cfg *mconfig)
2381 static int is_pipe_exists;
2382 static int pin_index, dir, conf_idx;
2383 struct skl_module_iface *iface = NULL;
2384 struct skl_module_res *res = NULL;
2385 int res_idx = mconfig->res_idx;
2386 int fmt_idx = mconfig->fmt_idx;
2389 * If the manifest structure contains no modules, fill all
2390 * the module data to 0th index.
2391 * res_idx and fmt_idx are default set to 0.
2393 if (skl->nr_modules == 0) {
2394 res = &mconfig->module->resources[res_idx];
2395 iface = &mconfig->module->formats[fmt_idx];
2398 if (tkn_elem->token > SKL_TKN_MAX)
2401 switch (tkn_elem->token) {
2402 case SKL_TKN_U8_IN_QUEUE_COUNT:
2403 mconfig->module->max_input_pins = tkn_elem->value;
2406 case SKL_TKN_U8_OUT_QUEUE_COUNT:
2407 mconfig->module->max_output_pins = tkn_elem->value;
2410 case SKL_TKN_U8_DYN_IN_PIN:
2411 if (!mconfig->m_in_pin)
2413 devm_kcalloc(dev, MAX_IN_QUEUE,
2414 sizeof(*mconfig->m_in_pin),
2416 if (!mconfig->m_in_pin)
2419 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE,
2423 case SKL_TKN_U8_DYN_OUT_PIN:
2424 if (!mconfig->m_out_pin)
2425 mconfig->m_out_pin =
2426 devm_kcalloc(dev, MAX_IN_QUEUE,
2427 sizeof(*mconfig->m_in_pin),
2429 if (!mconfig->m_out_pin)
2432 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE,
2436 case SKL_TKN_U8_TIME_SLOT:
2437 mconfig->time_slot = tkn_elem->value;
2440 case SKL_TKN_U8_CORE_ID:
2441 mconfig->core_id = tkn_elem->value;
2444 case SKL_TKN_U8_MOD_TYPE:
2445 mconfig->m_type = tkn_elem->value;
2448 case SKL_TKN_U8_DEV_TYPE:
2449 mconfig->dev_type = tkn_elem->value;
2452 case SKL_TKN_U8_HW_CONN_TYPE:
2453 mconfig->hw_conn_type = tkn_elem->value;
2456 case SKL_TKN_U16_MOD_INST_ID:
2457 mconfig->id.instance_id =
2461 case SKL_TKN_U32_MEM_PAGES:
2462 case SKL_TKN_U32_MAX_MCPS:
2463 case SKL_TKN_U32_OBS:
2464 case SKL_TKN_U32_IBS:
2465 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir);
2471 case SKL_TKN_U32_VBUS_ID:
2472 mconfig->vbus_id = tkn_elem->value;
2475 case SKL_TKN_U32_PARAMS_FIXUP:
2476 mconfig->params_fixup = tkn_elem->value;
2479 case SKL_TKN_U32_CONVERTER:
2480 mconfig->converter = tkn_elem->value;
2483 case SKL_TKN_U32_D0I3_CAPS:
2484 mconfig->d0i3_caps = tkn_elem->value;
2487 case SKL_TKN_U32_PIPE_ID:
2488 ret = skl_tplg_add_pipe(dev,
2489 mconfig, skl, tkn_elem);
2492 if (ret == -EEXIST) {
2496 return is_pipe_exists;
2501 case SKL_TKN_U32_PIPE_CONFIG_ID:
2502 conf_idx = tkn_elem->value;
2505 case SKL_TKN_U32_PIPE_CONN_TYPE:
2506 case SKL_TKN_U32_PIPE_PRIORITY:
2507 case SKL_TKN_U32_PIPE_MEM_PGS:
2508 case SKL_TKN_U32_PMODE:
2509 case SKL_TKN_U32_PIPE_DIRECTION:
2510 case SKL_TKN_U32_NUM_CONFIGS:
2511 if (is_pipe_exists) {
2512 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
2513 tkn_elem->token, tkn_elem->value);
2520 case SKL_TKN_U32_PATH_MEM_PGS:
2521 case SKL_TKN_U32_CFG_FREQ:
2522 case SKL_TKN_U8_CFG_CHAN:
2523 case SKL_TKN_U8_CFG_BPS:
2524 if (mconfig->pipe->nr_cfgs) {
2525 ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe,
2526 tkn_elem->token, tkn_elem->value,
2533 case SKL_TKN_CFG_MOD_RES_ID:
2534 mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value;
2537 case SKL_TKN_CFG_MOD_FMT_ID:
2538 mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value;
2542 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both
2543 * direction and the pin count. The first four bits represent
2544 * direction and next four the pin count.
2546 case SKL_TKN_U32_DIR_PIN_COUNT:
2547 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
2548 pin_index = (tkn_elem->value &
2549 SKL_PIN_COUNT_MASK) >> 4;
2553 case SKL_TKN_U32_FMT_CH:
2554 case SKL_TKN_U32_FMT_FREQ:
2555 case SKL_TKN_U32_FMT_BIT_DEPTH:
2556 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
2557 case SKL_TKN_U32_FMT_CH_CONFIG:
2558 case SKL_TKN_U32_FMT_INTERLEAVE:
2559 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
2560 case SKL_TKN_U32_FMT_CH_MAP:
2561 ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token,
2562 tkn_elem->value, dir, pin_index);
2569 case SKL_TKN_U32_PIN_MOD_ID:
2570 case SKL_TKN_U32_PIN_INST_ID:
2572 ret = skl_tplg_fill_pins_info(dev,
2573 mconfig, tkn_elem, dir,
2580 case SKL_TKN_U32_FMT_CFG_IDX:
2581 if (tkn_elem->value > SKL_MAX_PARAMS_TYPES)
2584 mconfig->fmt_cfg_idx = tkn_elem->value;
2587 case SKL_TKN_U32_CAPS_SIZE:
2588 mconfig->formats_config[mconfig->fmt_cfg_idx].caps_size =
2593 case SKL_TKN_U32_CAPS_SET_PARAMS:
2594 mconfig->formats_config[mconfig->fmt_cfg_idx].set_params =
2598 case SKL_TKN_U32_CAPS_PARAMS_ID:
2599 mconfig->formats_config[mconfig->fmt_cfg_idx].param_id =
2603 case SKL_TKN_U32_PROC_DOMAIN:
2609 case SKL_TKN_U32_DMA_BUF_SIZE:
2610 mconfig->dma_buffer_size = tkn_elem->value;
2613 case SKL_TKN_U8_IN_PIN_TYPE:
2614 case SKL_TKN_U8_OUT_PIN_TYPE:
2615 case SKL_TKN_U8_CONN_TYPE:
2619 dev_err(dev, "Token %d not handled\n",
2630 * Parse the vendor array for specific tokens to construct
2631 * module private data
2633 static int skl_tplg_get_tokens(struct device *dev,
2634 char *pvt_data, struct skl_dev *skl,
2635 struct skl_module_cfg *mconfig, int block_size)
2637 struct snd_soc_tplg_vendor_array *array;
2638 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2639 int tkn_count = 0, ret;
2640 int off = 0, tuple_size = 0;
2641 bool is_module_guid = true;
2643 if (block_size <= 0)
2646 while (tuple_size < block_size) {
2647 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
2651 switch (array->type) {
2652 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
2653 dev_warn(dev, "no string tokens expected for skl tplg\n");
2656 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
2657 if (is_module_guid) {
2658 ret = skl_tplg_get_uuid(dev, (guid_t *)mconfig->guid,
2660 is_module_guid = false;
2662 ret = skl_tplg_get_token(dev, array->value, skl,
2669 tuple_size += sizeof(*array->uuid);
2674 tkn_elem = array->value;
2679 while (tkn_count <= (array->num_elems - 1)) {
2680 ret = skl_tplg_get_token(dev, tkn_elem,
2686 tkn_count = tkn_count + ret;
2690 tuple_size += tkn_count * sizeof(*tkn_elem);
2697 * Every data block is preceded by a descriptor to read the number
2698 * of data blocks, they type of the block and it's size
2700 static int skl_tplg_get_desc_blocks(struct device *dev,
2701 struct snd_soc_tplg_vendor_array *array)
2703 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
2705 tkn_elem = array->value;
2707 switch (tkn_elem->token) {
2708 case SKL_TKN_U8_NUM_BLOCKS:
2709 case SKL_TKN_U8_BLOCK_TYPE:
2710 case SKL_TKN_U16_BLOCK_SIZE:
2711 return tkn_elem->value;
2714 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token);
2721 /* Functions to parse private data from configuration file format v4 */
2724 * Add pipeline from topology binary into driver pipeline list
2726 * If already added we return that instance
2727 * Otherwise we create a new instance and add into driver list
2729 static int skl_tplg_add_pipe_v4(struct device *dev,
2730 struct skl_module_cfg *mconfig, struct skl_dev *skl,
2731 struct skl_dfw_v4_pipe *dfw_pipe)
2733 struct skl_pipeline *ppl;
2734 struct skl_pipe *pipe;
2735 struct skl_pipe_params *params;
2737 list_for_each_entry(ppl, &skl->ppl_list, node) {
2738 if (ppl->pipe->ppl_id == dfw_pipe->pipe_id) {
2739 mconfig->pipe = ppl->pipe;
2744 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL);
2748 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL);
2752 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL);
2756 pipe->ppl_id = dfw_pipe->pipe_id;
2757 pipe->memory_pages = dfw_pipe->memory_pages;
2758 pipe->pipe_priority = dfw_pipe->pipe_priority;
2759 pipe->conn_type = dfw_pipe->conn_type;
2760 pipe->state = SKL_PIPE_INVALID;
2761 pipe->p_params = params;
2762 INIT_LIST_HEAD(&pipe->w_list);
2765 list_add(&ppl->node, &skl->ppl_list);
2767 mconfig->pipe = pipe;
2772 static void skl_fill_module_pin_info_v4(struct skl_dfw_v4_module_pin *dfw_pin,
2773 struct skl_module_pin *m_pin,
2774 bool is_dynamic, int max_pin)
2778 for (i = 0; i < max_pin; i++) {
2779 m_pin[i].id.module_id = dfw_pin[i].module_id;
2780 m_pin[i].id.instance_id = dfw_pin[i].instance_id;
2781 m_pin[i].in_use = false;
2782 m_pin[i].is_dynamic = is_dynamic;
2783 m_pin[i].pin_state = SKL_PIN_UNBIND;
2787 static void skl_tplg_fill_fmt_v4(struct skl_module_pin_fmt *dst_fmt,
2788 struct skl_dfw_v4_module_fmt *src_fmt,
2793 for (i = 0; i < pins; i++) {
2794 dst_fmt[i].fmt.channels = src_fmt[i].channels;
2795 dst_fmt[i].fmt.s_freq = src_fmt[i].freq;
2796 dst_fmt[i].fmt.bit_depth = src_fmt[i].bit_depth;
2797 dst_fmt[i].fmt.valid_bit_depth = src_fmt[i].valid_bit_depth;
2798 dst_fmt[i].fmt.ch_cfg = src_fmt[i].ch_cfg;
2799 dst_fmt[i].fmt.ch_map = src_fmt[i].ch_map;
2800 dst_fmt[i].fmt.interleaving_style =
2801 src_fmt[i].interleaving_style;
2802 dst_fmt[i].fmt.sample_type = src_fmt[i].sample_type;
2806 static int skl_tplg_get_pvt_data_v4(struct snd_soc_tplg_dapm_widget *tplg_w,
2807 struct skl_dev *skl, struct device *dev,
2808 struct skl_module_cfg *mconfig)
2810 struct skl_dfw_v4_module *dfw =
2811 (struct skl_dfw_v4_module *)tplg_w->priv.data;
2813 int idx = mconfig->fmt_cfg_idx;
2815 dev_dbg(dev, "Parsing Skylake v4 widget topology data\n");
2817 ret = guid_parse(dfw->uuid, (guid_t *)mconfig->guid);
2820 mconfig->id.module_id = -1;
2821 mconfig->id.instance_id = dfw->instance_id;
2822 mconfig->module->resources[0].cpc = dfw->max_mcps / 1000;
2823 mconfig->module->resources[0].ibs = dfw->ibs;
2824 mconfig->module->resources[0].obs = dfw->obs;
2825 mconfig->core_id = dfw->core_id;
2826 mconfig->module->max_input_pins = dfw->max_in_queue;
2827 mconfig->module->max_output_pins = dfw->max_out_queue;
2828 mconfig->module->loadable = dfw->is_loadable;
2829 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].inputs, dfw->in_fmt,
2831 skl_tplg_fill_fmt_v4(mconfig->module->formats[0].outputs, dfw->out_fmt,
2834 mconfig->params_fixup = dfw->params_fixup;
2835 mconfig->converter = dfw->converter;
2836 mconfig->m_type = dfw->module_type;
2837 mconfig->vbus_id = dfw->vbus_id;
2838 mconfig->module->resources[0].is_pages = dfw->mem_pages;
2840 ret = skl_tplg_add_pipe_v4(dev, mconfig, skl, &dfw->pipe);
2844 mconfig->dev_type = dfw->dev_type;
2845 mconfig->hw_conn_type = dfw->hw_conn_type;
2846 mconfig->time_slot = dfw->time_slot;
2847 mconfig->formats_config[idx].caps_size = dfw->caps.caps_size;
2849 mconfig->m_in_pin = devm_kcalloc(dev,
2850 MAX_IN_QUEUE, sizeof(*mconfig->m_in_pin),
2852 if (!mconfig->m_in_pin)
2855 mconfig->m_out_pin = devm_kcalloc(dev,
2856 MAX_OUT_QUEUE, sizeof(*mconfig->m_out_pin),
2858 if (!mconfig->m_out_pin)
2861 skl_fill_module_pin_info_v4(dfw->in_pin, mconfig->m_in_pin,
2862 dfw->is_dynamic_in_pin,
2863 mconfig->module->max_input_pins);
2864 skl_fill_module_pin_info_v4(dfw->out_pin, mconfig->m_out_pin,
2865 dfw->is_dynamic_out_pin,
2866 mconfig->module->max_output_pins);
2868 if (mconfig->formats_config[idx].caps_size) {
2869 mconfig->formats_config[idx].set_params = dfw->caps.set_params;
2870 mconfig->formats_config[idx].param_id = dfw->caps.param_id;
2871 mconfig->formats_config[idx].caps =
2872 devm_kzalloc(dev, mconfig->formats_config[idx].caps_size,
2874 if (!mconfig->formats_config[idx].caps)
2876 memcpy(mconfig->formats_config[idx].caps, dfw->caps.caps,
2877 dfw->caps.caps_size);
2883 static int skl_tplg_get_caps_data(struct device *dev, char *data,
2884 struct skl_module_cfg *mconfig)
2886 int idx = mconfig->fmt_cfg_idx;
2888 if (mconfig->formats_config[idx].caps_size > 0) {
2889 mconfig->formats_config[idx].caps =
2890 devm_kzalloc(dev, mconfig->formats_config[idx].caps_size,
2892 if (!mconfig->formats_config[idx].caps)
2894 memcpy(mconfig->formats_config[idx].caps, data,
2895 mconfig->formats_config[idx].caps_size);
2898 return mconfig->formats_config[idx].caps_size;
2902 * Parse the private data for the token and corresponding value.
2903 * The private data can have multiple data blocks. So, a data block
2904 * is preceded by a descriptor for number of blocks and a descriptor
2905 * for the type and size of the suceeding data block.
2907 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
2908 struct skl_dev *skl, struct device *dev,
2909 struct skl_module_cfg *mconfig)
2911 struct snd_soc_tplg_vendor_array *array;
2912 int num_blocks, block_size, block_type, off = 0;
2917 * v4 configuration files have a valid UUID at the start of
2918 * the widget's private data.
2920 if (uuid_is_valid((char *)tplg_w->priv.data))
2921 return skl_tplg_get_pvt_data_v4(tplg_w, skl, dev, mconfig);
2923 /* Read the NUM_DATA_BLOCKS descriptor */
2924 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data;
2925 ret = skl_tplg_get_desc_blocks(dev, array);
2931 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
2932 while (num_blocks > 0) {
2933 array = (struct snd_soc_tplg_vendor_array *)
2934 (tplg_w->priv.data + off);
2936 ret = skl_tplg_get_desc_blocks(dev, array);
2943 array = (struct snd_soc_tplg_vendor_array *)
2944 (tplg_w->priv.data + off);
2946 ret = skl_tplg_get_desc_blocks(dev, array);
2953 data = (tplg_w->priv.data + off);
2955 if (block_type == SKL_TYPE_TUPLE) {
2956 ret = skl_tplg_get_tokens(dev, data,
2957 skl, mconfig, block_size);
2959 ret = skl_tplg_get_caps_data(dev, data, mconfig);
2972 static void skl_clear_pin_config(struct snd_soc_component *component,
2973 struct snd_soc_dapm_widget *w)
2976 struct skl_module_cfg *mconfig;
2977 struct skl_pipe *pipe;
2979 if (!strncmp(w->dapm->component->name, component->name,
2980 strlen(component->name))) {
2982 pipe = mconfig->pipe;
2983 for (i = 0; i < mconfig->module->max_input_pins; i++) {
2984 mconfig->m_in_pin[i].in_use = false;
2985 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
2987 for (i = 0; i < mconfig->module->max_output_pins; i++) {
2988 mconfig->m_out_pin[i].in_use = false;
2989 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
2991 pipe->state = SKL_PIPE_INVALID;
2992 mconfig->m_state = SKL_MODULE_UNINIT;
2996 void skl_cleanup_resources(struct skl_dev *skl)
2998 struct snd_soc_component *soc_component = skl->component;
2999 struct snd_soc_dapm_widget *w;
3000 struct snd_soc_card *card;
3002 if (soc_component == NULL)
3005 card = soc_component->card;
3006 if (!card || !card->instantiated)
3009 list_for_each_entry(w, &card->widgets, list) {
3010 if (is_skl_dsp_widget_type(w, skl->dev) && w->priv != NULL)
3011 skl_clear_pin_config(soc_component, w);
3014 skl_clear_module_cnt(skl->dsp);
3018 * Topology core widget load callback
3020 * This is used to save the private data for each widget which gives
3021 * information to the driver about module and pipeline parameters which DSP
3022 * FW expects like ids, resource values, formats etc
3024 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index,
3025 struct snd_soc_dapm_widget *w,
3026 struct snd_soc_tplg_dapm_widget *tplg_w)
3029 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
3030 struct skl_dev *skl = bus_to_skl(bus);
3031 struct skl_module_cfg *mconfig;
3033 if (!tplg_w->priv.size)
3036 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL);
3041 if (skl->nr_modules == 0) {
3042 mconfig->module = devm_kzalloc(bus->dev,
3043 sizeof(*mconfig->module), GFP_KERNEL);
3044 if (!mconfig->module)
3051 * module binary can be loaded later, so set it to query when
3052 * module is load for a use case
3054 mconfig->id.module_id = -1;
3056 /* To provide backward compatibility, set default as SKL_PARAM_INIT */
3057 mconfig->fmt_cfg_idx = SKL_PARAM_INIT;
3059 /* Parse private data for tuples */
3060 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
3064 skl_debug_init_module(skl->debugfs, w, mconfig);
3067 if (tplg_w->event_type == 0) {
3068 dev_dbg(bus->dev, "ASoC: No event handler required\n");
3072 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops,
3073 ARRAY_SIZE(skl_tplg_widget_ops),
3074 tplg_w->event_type);
3077 dev_err(bus->dev, "%s: No matching event handlers found for %d\n",
3078 __func__, tplg_w->event_type);
3085 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
3086 struct snd_soc_tplg_bytes_control *bc)
3088 struct skl_algo_data *ac;
3089 struct skl_dfw_algo_data *dfw_ac =
3090 (struct skl_dfw_algo_data *)bc->priv.data;
3092 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL);
3096 /* Fill private data */
3097 ac->max = dfw_ac->max;
3098 ac->param_id = dfw_ac->param_id;
3099 ac->set_params = dfw_ac->set_params;
3100 ac->size = dfw_ac->max;
3103 ac->params = devm_kzalloc(dev, ac->max, GFP_KERNEL);
3107 memcpy(ac->params, dfw_ac->params, ac->max);
3110 be->dobj.private = ac;
3114 static int skl_init_enum_data(struct device *dev, struct soc_enum *se,
3115 struct snd_soc_tplg_enum_control *ec)
3120 if (ec->priv.size) {
3121 data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL);
3124 memcpy(data, ec->priv.data, ec->priv.size);
3125 se->dobj.private = data;
3132 static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
3134 struct snd_kcontrol_new *kctl,
3135 struct snd_soc_tplg_ctl_hdr *hdr)
3137 struct soc_bytes_ext *sb;
3138 struct snd_soc_tplg_bytes_control *tplg_bc;
3139 struct snd_soc_tplg_enum_control *tplg_ec;
3140 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
3141 struct soc_enum *se;
3143 switch (hdr->ops.info) {
3144 case SND_SOC_TPLG_CTL_BYTES:
3145 tplg_bc = container_of(hdr,
3146 struct snd_soc_tplg_bytes_control, hdr);
3147 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
3148 sb = (struct soc_bytes_ext *)kctl->private_value;
3149 if (tplg_bc->priv.size)
3150 return skl_init_algo_data(
3151 bus->dev, sb, tplg_bc);
3155 case SND_SOC_TPLG_CTL_ENUM:
3156 tplg_ec = container_of(hdr,
3157 struct snd_soc_tplg_enum_control, hdr);
3158 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READ) {
3159 se = (struct soc_enum *)kctl->private_value;
3160 if (tplg_ec->priv.size)
3161 skl_init_enum_data(bus->dev, se, tplg_ec);
3165 * now that the control initializations are done, remove
3166 * write permission for the DMIC configuration enums to
3167 * avoid conflicts between NHLT settings and user interaction
3170 if (hdr->ops.get == SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC)
3171 kctl->access = SNDRV_CTL_ELEM_ACCESS_READ;
3176 dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n",
3177 hdr->ops.get, hdr->ops.put, hdr->ops.info);
3184 static int skl_tplg_fill_str_mfest_tkn(struct device *dev,
3185 struct snd_soc_tplg_vendor_string_elem *str_elem,
3186 struct skl_dev *skl)
3189 static int ref_count;
3191 switch (str_elem->token) {
3192 case SKL_TKN_STR_LIB_NAME:
3193 if (ref_count > skl->lib_count - 1) {
3198 strncpy(skl->lib_info[ref_count].name,
3200 ARRAY_SIZE(skl->lib_info[ref_count].name));
3205 dev_err(dev, "Not a string token %d\n", str_elem->token);
3213 static int skl_tplg_get_str_tkn(struct device *dev,
3214 struct snd_soc_tplg_vendor_array *array,
3215 struct skl_dev *skl)
3217 int tkn_count = 0, ret;
3218 struct snd_soc_tplg_vendor_string_elem *str_elem;
3220 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value;
3221 while (tkn_count < array->num_elems) {
3222 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl);
3228 tkn_count = tkn_count + ret;
3234 static int skl_tplg_manifest_fill_fmt(struct device *dev,
3235 struct skl_module_iface *fmt,
3236 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3237 u32 dir, int fmt_idx)
3239 struct skl_module_pin_fmt *dst_fmt;
3240 struct skl_module_fmt *mod_fmt;
3248 dst_fmt = &fmt->inputs[fmt_idx];
3252 dst_fmt = &fmt->outputs[fmt_idx];
3256 dev_err(dev, "Invalid direction: %d\n", dir);
3260 mod_fmt = &dst_fmt->fmt;
3262 switch (tkn_elem->token) {
3263 case SKL_TKN_MM_U32_INTF_PIN_ID:
3264 dst_fmt->id = tkn_elem->value;
3268 ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token,
3278 static int skl_tplg_fill_mod_info(struct device *dev,
3279 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3280 struct skl_module *mod)
3286 switch (tkn_elem->token) {
3287 case SKL_TKN_U8_IN_PIN_TYPE:
3288 mod->input_pin_type = tkn_elem->value;
3291 case SKL_TKN_U8_OUT_PIN_TYPE:
3292 mod->output_pin_type = tkn_elem->value;
3295 case SKL_TKN_U8_IN_QUEUE_COUNT:
3296 mod->max_input_pins = tkn_elem->value;
3299 case SKL_TKN_U8_OUT_QUEUE_COUNT:
3300 mod->max_output_pins = tkn_elem->value;
3303 case SKL_TKN_MM_U8_NUM_RES:
3304 mod->nr_resources = tkn_elem->value;
3307 case SKL_TKN_MM_U8_NUM_INTF:
3308 mod->nr_interfaces = tkn_elem->value;
3312 dev_err(dev, "Invalid mod info token %d", tkn_elem->token);
3320 static int skl_tplg_get_int_tkn(struct device *dev,
3321 struct snd_soc_tplg_vendor_value_elem *tkn_elem,
3322 struct skl_dev *skl)
3324 int tkn_count = 0, ret;
3325 static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx;
3326 struct skl_module_res *res = NULL;
3327 struct skl_module_iface *fmt = NULL;
3328 struct skl_module *mod = NULL;
3329 static struct skl_astate_param *astate_table;
3330 static int astate_cfg_idx, count;
3335 mod = skl->modules[mod_idx];
3336 res = &mod->resources[res_val_idx];
3337 fmt = &mod->formats[intf_val_idx];
3340 switch (tkn_elem->token) {
3341 case SKL_TKN_U32_LIB_COUNT:
3342 skl->lib_count = tkn_elem->value;
3345 case SKL_TKN_U8_NUM_MOD:
3346 skl->nr_modules = tkn_elem->value;
3347 skl->modules = devm_kcalloc(dev, skl->nr_modules,
3348 sizeof(*skl->modules), GFP_KERNEL);
3352 for (i = 0; i < skl->nr_modules; i++) {
3353 skl->modules[i] = devm_kzalloc(dev,
3354 sizeof(struct skl_module), GFP_KERNEL);
3355 if (!skl->modules[i])
3360 case SKL_TKN_MM_U8_MOD_IDX:
3361 mod_idx = tkn_elem->value;
3364 case SKL_TKN_U32_ASTATE_COUNT:
3365 if (astate_table != NULL) {
3366 dev_err(dev, "More than one entry for A-State count");
3370 if (tkn_elem->value > SKL_MAX_ASTATE_CFG) {
3371 dev_err(dev, "Invalid A-State count %d\n",
3376 size = struct_size(skl->cfg.astate_cfg, astate_table,
3378 skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
3379 if (!skl->cfg.astate_cfg)
3382 astate_table = skl->cfg.astate_cfg->astate_table;
3383 count = skl->cfg.astate_cfg->count = tkn_elem->value;
3386 case SKL_TKN_U32_ASTATE_IDX:
3387 if (tkn_elem->value >= count) {
3388 dev_err(dev, "Invalid A-State index %d\n",
3393 astate_cfg_idx = tkn_elem->value;
3396 case SKL_TKN_U32_ASTATE_KCPS:
3397 astate_table[astate_cfg_idx].kcps = tkn_elem->value;
3400 case SKL_TKN_U32_ASTATE_CLK_SRC:
3401 astate_table[astate_cfg_idx].clk_src = tkn_elem->value;
3404 case SKL_TKN_U8_IN_PIN_TYPE:
3405 case SKL_TKN_U8_OUT_PIN_TYPE:
3406 case SKL_TKN_U8_IN_QUEUE_COUNT:
3407 case SKL_TKN_U8_OUT_QUEUE_COUNT:
3408 case SKL_TKN_MM_U8_NUM_RES:
3409 case SKL_TKN_MM_U8_NUM_INTF:
3410 ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod);
3415 case SKL_TKN_U32_DIR_PIN_COUNT:
3416 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK;
3417 pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4;
3420 case SKL_TKN_MM_U32_RES_ID:
3424 res->id = tkn_elem->value;
3425 res_val_idx = tkn_elem->value;
3428 case SKL_TKN_MM_U32_FMT_ID:
3432 fmt->fmt_idx = tkn_elem->value;
3433 intf_val_idx = tkn_elem->value;
3436 case SKL_TKN_MM_U32_CPS:
3437 case SKL_TKN_MM_U32_DMA_SIZE:
3438 case SKL_TKN_MM_U32_CPC:
3439 case SKL_TKN_U32_MEM_PAGES:
3440 case SKL_TKN_U32_OBS:
3441 case SKL_TKN_U32_IBS:
3442 case SKL_TKN_MM_U32_RES_PIN_ID:
3443 case SKL_TKN_MM_U32_PIN_BUF:
3444 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir);
3450 case SKL_TKN_MM_U32_NUM_IN_FMT:
3454 res->nr_input_pins = tkn_elem->value;
3457 case SKL_TKN_MM_U32_NUM_OUT_FMT:
3461 res->nr_output_pins = tkn_elem->value;
3464 case SKL_TKN_U32_FMT_CH:
3465 case SKL_TKN_U32_FMT_FREQ:
3466 case SKL_TKN_U32_FMT_BIT_DEPTH:
3467 case SKL_TKN_U32_FMT_SAMPLE_SIZE:
3468 case SKL_TKN_U32_FMT_CH_CONFIG:
3469 case SKL_TKN_U32_FMT_INTERLEAVE:
3470 case SKL_TKN_U32_FMT_SAMPLE_TYPE:
3471 case SKL_TKN_U32_FMT_CH_MAP:
3472 case SKL_TKN_MM_U32_INTF_PIN_ID:
3473 ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem,
3480 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token);
3489 * Fill the manifest structure by parsing the tokens based on the
3492 static int skl_tplg_get_manifest_tkn(struct device *dev,
3493 char *pvt_data, struct skl_dev *skl,
3496 int tkn_count = 0, ret;
3497 int off = 0, tuple_size = 0;
3499 struct snd_soc_tplg_vendor_array *array;
3500 struct snd_soc_tplg_vendor_value_elem *tkn_elem;
3502 if (block_size <= 0)
3505 while (tuple_size < block_size) {
3506 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off);
3508 switch (array->type) {
3509 case SND_SOC_TPLG_TUPLE_TYPE_STRING:
3510 ret = skl_tplg_get_str_tkn(dev, array, skl);
3516 tuple_size += tkn_count *
3517 sizeof(struct snd_soc_tplg_vendor_string_elem);
3520 case SND_SOC_TPLG_TUPLE_TYPE_UUID:
3521 if (array->uuid->token != SKL_TKN_UUID) {
3522 dev_err(dev, "Not an UUID token: %d\n",
3523 array->uuid->token);
3526 if (uuid_index >= skl->nr_modules) {
3527 dev_err(dev, "Too many UUID tokens\n");
3530 import_guid(&skl->modules[uuid_index++]->uuid,
3533 tuple_size += sizeof(*array->uuid);
3537 tkn_elem = array->value;
3542 while (tkn_count <= array->num_elems - 1) {
3543 ret = skl_tplg_get_int_tkn(dev,
3548 tkn_count = tkn_count + ret;
3551 tuple_size += (tkn_count * sizeof(*tkn_elem));
3559 * Parse manifest private data for tokens. The private data block is
3560 * preceded by descriptors for type and size of data block.
3562 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest,
3563 struct device *dev, struct skl_dev *skl)
3565 struct snd_soc_tplg_vendor_array *array;
3566 int num_blocks, block_size = 0, block_type, off = 0;
3570 /* Read the NUM_DATA_BLOCKS descriptor */
3571 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data;
3572 ret = skl_tplg_get_desc_blocks(dev, array);
3578 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
3579 while (num_blocks > 0) {
3580 array = (struct snd_soc_tplg_vendor_array *)
3581 (manifest->priv.data + off);
3582 ret = skl_tplg_get_desc_blocks(dev, array);
3589 array = (struct snd_soc_tplg_vendor_array *)
3590 (manifest->priv.data + off);
3592 ret = skl_tplg_get_desc_blocks(dev, array);
3599 data = (manifest->priv.data + off);
3601 if (block_type == SKL_TYPE_TUPLE) {
3602 ret = skl_tplg_get_manifest_tkn(dev, data, skl,
3618 static int skl_manifest_load(struct snd_soc_component *cmpnt, int index,
3619 struct snd_soc_tplg_manifest *manifest)
3621 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt);
3622 struct skl_dev *skl = bus_to_skl(bus);
3624 /* proceed only if we have private data defined */
3625 if (manifest->priv.size == 0)
3628 skl_tplg_get_manifest_data(manifest, bus->dev, skl);
3630 if (skl->lib_count > SKL_MAX_LIB) {
3631 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n",
3639 static int skl_tplg_complete(struct snd_soc_component *component)
3641 struct snd_soc_dobj *dobj;
3642 struct snd_soc_acpi_mach *mach;
3643 struct snd_ctl_elem_value *val;
3646 val = kmalloc(sizeof(*val), GFP_KERNEL);
3650 mach = dev_get_platdata(component->card->dev);
3651 list_for_each_entry(dobj, &component->dobj_list, list) {
3652 struct snd_kcontrol *kcontrol = dobj->control.kcontrol;
3653 struct soc_enum *se;
3657 if (dobj->type != SND_SOC_DOBJ_ENUM || !kcontrol ||
3658 kcontrol->put != skl_tplg_multi_config_set_dmic)
3661 se = (struct soc_enum *)kcontrol->private_value;
3662 texts = dobj->control.dtexts;
3663 sprintf(chan_text, "c%d", mach->mach_params.dmic_num);
3665 for (i = 0; i < se->items; i++) {
3666 if (strstr(texts[i], chan_text)) {
3667 memset(val, 0, sizeof(*val));
3668 val->value.enumerated.item[0] = i;
3669 kcontrol->put(kcontrol, val);
3678 static struct snd_soc_tplg_ops skl_tplg_ops = {
3679 .widget_load = skl_tplg_widget_load,
3680 .control_load = skl_tplg_control_load,
3681 .bytes_ext_ops = skl_tlv_ops,
3682 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
3683 .io_ops = skl_tplg_kcontrol_ops,
3684 .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
3685 .manifest = skl_manifest_load,
3686 .dai_load = skl_dai_load,
3687 .complete = skl_tplg_complete,
3691 * A pipe can have multiple modules, each of them will be a DAPM widget as
3692 * well. While managing a pipeline we need to get the list of all the
3693 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
3694 * helps to get the SKL type widgets in that pipeline
3696 static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component)
3698 struct snd_soc_dapm_widget *w;
3699 struct skl_module_cfg *mcfg = NULL;
3700 struct skl_pipe_module *p_module = NULL;
3701 struct skl_pipe *pipe;
3703 list_for_each_entry(w, &component->card->widgets, list) {
3704 if (is_skl_dsp_widget_type(w, component->dev) && w->priv) {
3708 p_module = devm_kzalloc(component->dev,
3709 sizeof(*p_module), GFP_KERNEL);
3714 list_add_tail(&p_module->node, &pipe->w_list);
3721 static void skl_tplg_set_pipe_type(struct skl_dev *skl, struct skl_pipe *pipe)
3723 struct skl_pipe_module *w_module;
3724 struct snd_soc_dapm_widget *w;
3725 struct skl_module_cfg *mconfig;
3726 bool host_found = false, link_found = false;
3728 list_for_each_entry(w_module, &pipe->w_list, node) {
3732 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
3734 else if (mconfig->dev_type != SKL_DEVICE_NONE)
3738 if (host_found && link_found)
3739 pipe->passthru = true;
3741 pipe->passthru = false;
3745 * SKL topology init routine
3747 int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus)
3750 const struct firmware *fw;
3751 struct skl_dev *skl = bus_to_skl(bus);
3752 struct skl_pipeline *ppl;
3754 ret = request_firmware(&fw, skl->tplg_name, bus->dev);
3756 char alt_tplg_name[64];
3758 snprintf(alt_tplg_name, sizeof(alt_tplg_name), "%s-tplg.bin",
3759 skl->mach->drv_name);
3760 dev_info(bus->dev, "tplg fw %s load failed with %d, trying alternative tplg name %s",
3761 skl->tplg_name, ret, alt_tplg_name);
3763 ret = request_firmware(&fw, alt_tplg_name, bus->dev);
3765 goto component_load;
3767 dev_info(bus->dev, "tplg %s failed with %d, falling back to dfw_sst.bin",
3768 alt_tplg_name, ret);
3770 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
3772 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
3773 "dfw_sst.bin", ret);
3779 ret = snd_soc_tplg_component_load(component, &skl_tplg_ops, fw);
3781 dev_err(bus->dev, "tplg component load failed%d\n", ret);
3785 ret = skl_tplg_create_pipe_widget_list(component);
3787 dev_err(bus->dev, "tplg create pipe widget list failed%d\n",
3792 list_for_each_entry(ppl, &skl->ppl_list, node)
3793 skl_tplg_set_pipe_type(skl, ppl->pipe);
3796 release_firmware(fw);
3800 void skl_tplg_exit(struct snd_soc_component *component, struct hdac_bus *bus)
3802 struct skl_dev *skl = bus_to_skl(bus);
3803 struct skl_pipeline *ppl, *tmp;
3805 list_for_each_entry_safe(ppl, tmp, &skl->ppl_list, node)
3806 list_del(&ppl->node);
3808 /* clean up topology */
3809 snd_soc_tplg_component_remove(component);