87b4e5fc5605e9ba9a4dadc7381b67228f1375bb
[platform/kernel/u-boot.git] / drivers / ufs / ufs.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /**
3  * ufs.c - Universal Flash Subsystem (UFS) driver
4  *
5  * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported
6  * to u-boot.
7  *
8  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
9  */
10
11 #include <charset.h>
12 #include <common.h>
13 #include <dm.h>
14 #include <log.h>
15 #include <dm/device_compat.h>
16 #include <dm/devres.h>
17 #include <dm/lists.h>
18 #include <dm/device-internal.h>
19 #include <malloc.h>
20 #include <hexdump.h>
21 #include <scsi.h>
22 #include <linux/bitops.h>
23 #include <linux/delay.h>
24
25 #include <linux/dma-mapping.h>
26
27 #include "ufs.h"
28
29 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
30                                  UTP_TASK_REQ_COMPL |\
31                                  UFSHCD_ERROR_MASK)
32 /* maximum number of link-startup retries */
33 #define DME_LINKSTARTUP_RETRIES 3
34
35 /* maximum number of retries for a general UIC command  */
36 #define UFS_UIC_COMMAND_RETRIES 3
37
38 /* Query request retries */
39 #define QUERY_REQ_RETRIES 3
40 /* Query request timeout */
41 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
42
43 /* maximum timeout in ms for a general UIC command */
44 #define UFS_UIC_CMD_TIMEOUT     1000
45 /* NOP OUT retries waiting for NOP IN response */
46 #define NOP_OUT_RETRIES    10
47 /* Timeout after 30 msecs if NOP OUT hangs without response */
48 #define NOP_OUT_TIMEOUT    30 /* msecs */
49
50 /* Only use one Task Tag for all requests */
51 #define TASK_TAG        0
52
53 /* Expose the flag value from utp_upiu_query.value */
54 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
55
56 #define MAX_PRDT_ENTRY  262144
57
58 /* maximum bytes per request */
59 #define UFS_MAX_BYTES   (128 * 256 * 1024)
60
61 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba);
62 static inline void ufshcd_hba_stop(struct ufs_hba *hba);
63 static int ufshcd_hba_enable(struct ufs_hba *hba);
64
65 /*
66  * ufshcd_wait_for_register - wait for register value to change
67  */
68 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
69                                     u32 val, unsigned long timeout_ms)
70 {
71         int err = 0;
72         unsigned long start = get_timer(0);
73
74         /* ignore bits that we don't intend to wait on */
75         val = val & mask;
76
77         while ((ufshcd_readl(hba, reg) & mask) != val) {
78                 if (get_timer(start) > timeout_ms) {
79                         if ((ufshcd_readl(hba, reg) & mask) != val)
80                                 err = -ETIMEDOUT;
81                         break;
82                 }
83         }
84
85         return err;
86 }
87
88 /**
89  * ufshcd_init_pwr_info - setting the POR (power on reset)
90  * values in hba power info
91  */
92 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
93 {
94         hba->pwr_info.gear_rx = UFS_PWM_G1;
95         hba->pwr_info.gear_tx = UFS_PWM_G1;
96         hba->pwr_info.lane_rx = 1;
97         hba->pwr_info.lane_tx = 1;
98         hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
99         hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
100         hba->pwr_info.hs_rate = 0;
101 }
102
103 /**
104  * ufshcd_print_pwr_info - print power params as saved in hba
105  * power info
106  */
107 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
108 {
109         static const char * const names[] = {
110                 "INVALID MODE",
111                 "FAST MODE",
112                 "SLOW_MODE",
113                 "INVALID MODE",
114                 "FASTAUTO_MODE",
115                 "SLOWAUTO_MODE",
116                 "INVALID MODE",
117         };
118
119         dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
120                 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
121                 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
122                 names[hba->pwr_info.pwr_rx],
123                 names[hba->pwr_info.pwr_tx],
124                 hba->pwr_info.hs_rate);
125 }
126
127 /**
128  * ufshcd_ready_for_uic_cmd - Check if controller is ready
129  *                            to accept UIC commands
130  */
131 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
132 {
133         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
134                 return true;
135         else
136                 return false;
137 }
138
139 /**
140  * ufshcd_get_uic_cmd_result - Get the UIC command result
141  */
142 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
143 {
144         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
145                MASK_UIC_COMMAND_RESULT;
146 }
147
148 /**
149  * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
150  */
151 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
152 {
153         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
154 }
155
156 /**
157  * ufshcd_is_device_present - Check if any device connected to
158  *                            the host controller
159  */
160 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
161 {
162         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
163                                                 DEVICE_PRESENT) ? true : false;
164 }
165
166 /**
167  * ufshcd_send_uic_cmd - UFS Interconnect layer command API
168  *
169  */
170 static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
171 {
172         unsigned long start = 0;
173         u32 intr_status;
174         u32 enabled_intr_status;
175
176         if (!ufshcd_ready_for_uic_cmd(hba)) {
177                 dev_err(hba->dev,
178                         "Controller not ready to accept UIC commands\n");
179                 return -EIO;
180         }
181
182         debug("sending uic command:%d\n", uic_cmd->command);
183
184         /* Write Args */
185         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
186         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
187         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
188
189         /* Write UIC Cmd */
190         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
191                       REG_UIC_COMMAND);
192
193         start = get_timer(0);
194         do {
195                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
196                 enabled_intr_status = intr_status & hba->intr_mask;
197                 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
198
199                 if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
200                         dev_err(hba->dev,
201                                 "Timedout waiting for UIC response\n");
202
203                         return -ETIMEDOUT;
204                 }
205
206                 if (enabled_intr_status & UFSHCD_ERROR_MASK) {
207                         dev_err(hba->dev, "Error in status:%08x\n",
208                                 enabled_intr_status);
209
210                         return -1;
211                 }
212         } while (!(enabled_intr_status & UFSHCD_UIC_MASK));
213
214         uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba);
215         uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba);
216
217         debug("Sent successfully\n");
218
219         return 0;
220 }
221
222 /**
223  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
224  *
225  */
226 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set,
227                         u32 mib_val, u8 peer)
228 {
229         struct uic_command uic_cmd = {0};
230         static const char *const action[] = {
231                 "dme-set",
232                 "dme-peer-set"
233         };
234         const char *set = action[!!peer];
235         int ret;
236         int retries = UFS_UIC_COMMAND_RETRIES;
237
238         uic_cmd.command = peer ?
239                 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
240         uic_cmd.argument1 = attr_sel;
241         uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
242         uic_cmd.argument3 = mib_val;
243
244         do {
245                 /* for peer attributes we retry upon failure */
246                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
247                 if (ret)
248                         dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
249                                 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
250         } while (ret && peer && --retries);
251
252         if (ret)
253                 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
254                         set, UIC_GET_ATTR_ID(attr_sel), mib_val,
255                         UFS_UIC_COMMAND_RETRIES - retries);
256
257         return ret;
258 }
259
260 /**
261  * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
262  *
263  */
264 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
265                         u32 *mib_val, u8 peer)
266 {
267         struct uic_command uic_cmd = {0};
268         static const char *const action[] = {
269                 "dme-get",
270                 "dme-peer-get"
271         };
272         const char *get = action[!!peer];
273         int ret;
274         int retries = UFS_UIC_COMMAND_RETRIES;
275
276         uic_cmd.command = peer ?
277                 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
278         uic_cmd.argument1 = attr_sel;
279
280         do {
281                 /* for peer attributes we retry upon failure */
282                 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
283                 if (ret)
284                         dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
285                                 get, UIC_GET_ATTR_ID(attr_sel), ret);
286         } while (ret && peer && --retries);
287
288         if (ret)
289                 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
290                         get, UIC_GET_ATTR_ID(attr_sel),
291                         UFS_UIC_COMMAND_RETRIES - retries);
292
293         if (mib_val && !ret)
294                 *mib_val = uic_cmd.argument3;
295
296         return ret;
297 }
298
299 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
300 {
301         u32 tx_lanes, i, err = 0;
302
303         if (!peer)
304                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
305                                &tx_lanes);
306         else
307                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
308                                     &tx_lanes);
309         for (i = 0; i < tx_lanes; i++) {
310                 if (!peer)
311                         err = ufshcd_dme_set(hba,
312                                              UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
313                                              UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
314                                              0);
315                 else
316                         err = ufshcd_dme_peer_set(hba,
317                                         UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
318                                         UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
319                                         0);
320                 if (err) {
321                         dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
322                                 __func__, peer, i, err);
323                         break;
324                 }
325         }
326
327         return err;
328 }
329
330 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
331 {
332         return ufshcd_disable_tx_lcc(hba, true);
333 }
334
335 /**
336  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
337  *
338  */
339 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
340 {
341         struct uic_command uic_cmd = {0};
342         int ret;
343
344         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
345
346         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
347         if (ret)
348                 dev_dbg(hba->dev,
349                         "dme-link-startup: error code %d\n", ret);
350         return ret;
351 }
352
353 /**
354  * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
355  *
356  */
357 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
358 {
359         ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
360 }
361
362 /**
363  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
364  */
365 static inline int ufshcd_get_lists_status(u32 reg)
366 {
367         return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
368 }
369
370 /**
371  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
372  *                      When run-stop registers are set to 1, it indicates the
373  *                      host controller that it can process the requests
374  */
375 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
376 {
377         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
378                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
379         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
380                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
381 }
382
383 /**
384  * ufshcd_enable_intr - enable interrupts
385  */
386 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
387 {
388         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
389         u32 rw;
390
391         if (hba->version == UFSHCI_VERSION_10) {
392                 rw = set & INTERRUPT_MASK_RW_VER_10;
393                 set = rw | ((set ^ intrs) & intrs);
394         } else {
395                 set |= intrs;
396         }
397
398         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
399
400         hba->intr_mask = set;
401 }
402
403 /**
404  * ufshcd_make_hba_operational - Make UFS controller operational
405  *
406  * To bring UFS host controller to operational state,
407  * 1. Enable required interrupts
408  * 2. Configure interrupt aggregation
409  * 3. Program UTRL and UTMRL base address
410  * 4. Configure run-stop-registers
411  *
412  */
413 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
414 {
415         int err = 0;
416         u32 reg;
417
418         /* Enable required interrupts */
419         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
420
421         /* Disable interrupt aggregation */
422         ufshcd_disable_intr_aggr(hba);
423
424         /* Configure UTRL and UTMRL base address registers */
425         ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl),
426                       REG_UTP_TRANSFER_REQ_LIST_BASE_L);
427         ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl),
428                       REG_UTP_TRANSFER_REQ_LIST_BASE_H);
429         ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl),
430                       REG_UTP_TASK_REQ_LIST_BASE_L);
431         ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl),
432                       REG_UTP_TASK_REQ_LIST_BASE_H);
433
434         /*
435          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
436          */
437         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
438         if (!(ufshcd_get_lists_status(reg))) {
439                 ufshcd_enable_run_stop_reg(hba);
440         } else {
441                 dev_err(hba->dev,
442                         "Host controller not ready to process requests");
443                 err = -EIO;
444                 goto out;
445         }
446
447 out:
448         return err;
449 }
450
451 /**
452  * ufshcd_link_startup - Initialize unipro link startup
453  */
454 static int ufshcd_link_startup(struct ufs_hba *hba)
455 {
456         int ret;
457         int retries = DME_LINKSTARTUP_RETRIES;
458         bool link_startup_again = true;
459
460 link_startup:
461         do {
462                 ufshcd_ops_link_startup_notify(hba, PRE_CHANGE);
463
464                 ret = ufshcd_dme_link_startup(hba);
465
466                 /* check if device is detected by inter-connect layer */
467                 if (!ret && !ufshcd_is_device_present(hba)) {
468                         dev_err(hba->dev, "%s: Device not present\n", __func__);
469                         ret = -ENXIO;
470                         goto out;
471                 }
472
473                 /*
474                  * DME link lost indication is only received when link is up,
475                  * but we can't be sure if the link is up until link startup
476                  * succeeds. So reset the local Uni-Pro and try again.
477                  */
478                 if (ret && ufshcd_hba_enable(hba))
479                         goto out;
480         } while (ret && retries--);
481
482         if (ret)
483                 /* failed to get the link up... retire */
484                 goto out;
485
486         if (link_startup_again) {
487                 link_startup_again = false;
488                 retries = DME_LINKSTARTUP_RETRIES;
489                 goto link_startup;
490         }
491
492         /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
493         ufshcd_init_pwr_info(hba);
494
495         if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
496                 ret = ufshcd_disable_device_tx_lcc(hba);
497                 if (ret)
498                         goto out;
499         }
500
501         /* Include any host controller configuration via UIC commands */
502         ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE);
503         if (ret)
504                 goto out;
505
506         ret = ufshcd_make_hba_operational(hba);
507 out:
508         if (ret)
509                 dev_err(hba->dev, "link startup failed %d\n", ret);
510
511         return ret;
512 }
513
514 /**
515  * ufshcd_hba_stop - Send controller to reset state
516  */
517 static inline void ufshcd_hba_stop(struct ufs_hba *hba)
518 {
519         int err;
520
521         ufshcd_writel(hba, CONTROLLER_DISABLE,  REG_CONTROLLER_ENABLE);
522         err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
523                                        CONTROLLER_ENABLE, CONTROLLER_DISABLE,
524                                        10);
525         if (err)
526                 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
527 }
528
529 /**
530  * ufshcd_is_hba_active - Get controller state
531  */
532 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
533 {
534         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
535                 ? false : true;
536 }
537
538 /**
539  * ufshcd_hba_start - Start controller initialization sequence
540  */
541 static inline void ufshcd_hba_start(struct ufs_hba *hba)
542 {
543         ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
544 }
545
546 /**
547  * ufshcd_hba_enable - initialize the controller
548  */
549 static int ufshcd_hba_enable(struct ufs_hba *hba)
550 {
551         int retry;
552
553         if (!ufshcd_is_hba_active(hba))
554                 /* change controller state to "reset state" */
555                 ufshcd_hba_stop(hba);
556
557         ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE);
558
559         /* start controller initialization sequence */
560         ufshcd_hba_start(hba);
561
562         /*
563          * To initialize a UFS host controller HCE bit must be set to 1.
564          * During initialization the HCE bit value changes from 1->0->1.
565          * When the host controller completes initialization sequence
566          * it sets the value of HCE bit to 1. The same HCE bit is read back
567          * to check if the controller has completed initialization sequence.
568          * So without this delay the value HCE = 1, set in the previous
569          * instruction might be read back.
570          * This delay can be changed based on the controller.
571          */
572         mdelay(1);
573
574         /* wait for the host controller to complete initialization */
575         retry = 10;
576         while (ufshcd_is_hba_active(hba)) {
577                 if (retry) {
578                         retry--;
579                 } else {
580                         dev_err(hba->dev, "Controller enable failed\n");
581                         return -EIO;
582                 }
583                 mdelay(5);
584         }
585
586         /* enable UIC related interrupts */
587         ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
588
589         ufshcd_ops_hce_enable_notify(hba, POST_CHANGE);
590
591         return 0;
592 }
593
594 /**
595  * ufshcd_host_memory_configure - configure local reference block with
596  *                              memory offsets
597  */
598 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
599 {
600         struct utp_transfer_req_desc *utrdlp;
601         dma_addr_t cmd_desc_dma_addr;
602         u16 response_offset;
603         u16 prdt_offset;
604
605         utrdlp = hba->utrdl;
606         cmd_desc_dma_addr = (dma_addr_t)hba->ucdl;
607
608         utrdlp->command_desc_base_addr_lo =
609                                 cpu_to_le32(lower_32_bits(cmd_desc_dma_addr));
610         utrdlp->command_desc_base_addr_hi =
611                                 cpu_to_le32(upper_32_bits(cmd_desc_dma_addr));
612
613         response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu);
614         prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
615
616         utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2);
617         utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2);
618         utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
619
620         hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl;
621         hba->ucd_rsp_ptr =
622                 (struct utp_upiu_rsp *)&hba->ucdl->response_upiu;
623         hba->ucd_prdt_ptr =
624                 (struct ufshcd_sg_entry *)&hba->ucdl->prd_table;
625 }
626
627 /**
628  * ufshcd_memory_alloc - allocate memory for host memory space data structures
629  */
630 static int ufshcd_memory_alloc(struct ufs_hba *hba)
631 {
632         /* Allocate one Transfer Request Descriptor
633          * Should be aligned to 1k boundary.
634          */
635         hba->utrdl = memalign(1024, sizeof(struct utp_transfer_req_desc));
636         if (!hba->utrdl) {
637                 dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n");
638                 return -ENOMEM;
639         }
640
641         /* Allocate one Command Descriptor
642          * Should be aligned to 1k boundary.
643          */
644         hba->ucdl = memalign(1024, sizeof(struct utp_transfer_cmd_desc));
645         if (!hba->ucdl) {
646                 dev_err(hba->dev, "Command descriptor memory allocation failed\n");
647                 return -ENOMEM;
648         }
649
650         return 0;
651 }
652
653 /**
654  * ufshcd_get_intr_mask - Get the interrupt bit mask
655  */
656 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
657 {
658         u32 intr_mask = 0;
659
660         switch (hba->version) {
661         case UFSHCI_VERSION_10:
662                 intr_mask = INTERRUPT_MASK_ALL_VER_10;
663                 break;
664         case UFSHCI_VERSION_11:
665         case UFSHCI_VERSION_20:
666                 intr_mask = INTERRUPT_MASK_ALL_VER_11;
667                 break;
668         case UFSHCI_VERSION_21:
669         default:
670                 intr_mask = INTERRUPT_MASK_ALL_VER_21;
671                 break;
672         }
673
674         return intr_mask;
675 }
676
677 /**
678  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
679  */
680 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
681 {
682         return ufshcd_readl(hba, REG_UFS_VERSION);
683 }
684
685 /**
686  * ufshcd_get_upmcrs - Get the power mode change request status
687  */
688 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
689 {
690         return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
691 }
692
693 /**
694  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
695  * descriptor according to request
696  */
697 static void ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc *req_desc,
698                                         u32 *upiu_flags,
699                                         enum dma_data_direction cmd_dir)
700 {
701         u32 data_direction;
702         u32 dword_0;
703
704         if (cmd_dir == DMA_FROM_DEVICE) {
705                 data_direction = UTP_DEVICE_TO_HOST;
706                 *upiu_flags = UPIU_CMD_FLAGS_READ;
707         } else if (cmd_dir == DMA_TO_DEVICE) {
708                 data_direction = UTP_HOST_TO_DEVICE;
709                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
710         } else {
711                 data_direction = UTP_NO_DATA_TRANSFER;
712                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
713         }
714
715         dword_0 = data_direction | (0x1 << UPIU_COMMAND_TYPE_OFFSET);
716
717         /* Enable Interrupt for command */
718         dword_0 |= UTP_REQ_DESC_INT_CMD;
719
720         /* Transfer request descriptor header fields */
721         req_desc->header.dword_0 = cpu_to_le32(dword_0);
722         /* dword_1 is reserved, hence it is set to 0 */
723         req_desc->header.dword_1 = 0;
724         /*
725          * assigning invalid value for command status. Controller
726          * updates OCS on command completion, with the command
727          * status
728          */
729         req_desc->header.dword_2 =
730                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
731         /* dword_3 is reserved, hence it is set to 0 */
732         req_desc->header.dword_3 = 0;
733
734         req_desc->prd_table_length = 0;
735 }
736
737 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
738                                               u32 upiu_flags)
739 {
740         struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
741         struct ufs_query *query = &hba->dev_cmd.query;
742         u16 len = be16_to_cpu(query->request.upiu_req.length);
743
744         /* Query request header */
745         ucd_req_ptr->header.dword_0 =
746                                 UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ,
747                                                   upiu_flags, 0, TASK_TAG);
748         ucd_req_ptr->header.dword_1 =
749                                 UPIU_HEADER_DWORD(0, query->request.query_func,
750                                                   0, 0);
751
752         /* Data segment length only need for WRITE_DESC */
753         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
754                 ucd_req_ptr->header.dword_2 =
755                                 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
756         else
757                 ucd_req_ptr->header.dword_2 = 0;
758
759         /* Copy the Query Request buffer as is */
760         memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE);
761
762         /* Copy the Descriptor */
763         if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
764                 memcpy(ucd_req_ptr + 1, query->descriptor, len);
765
766         memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
767 }
768
769 static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
770 {
771         struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
772
773         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
774
775         /* command descriptor fields */
776         ucd_req_ptr->header.dword_0 =
777                         UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, 0x1f);
778         /* clear rest of the fields of basic header */
779         ucd_req_ptr->header.dword_1 = 0;
780         ucd_req_ptr->header.dword_2 = 0;
781
782         memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
783 }
784
785 /**
786  * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
787  *                           for Device Management Purposes
788  */
789 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba,
790                                    enum dev_cmd_type cmd_type)
791 {
792         u32 upiu_flags;
793         int ret = 0;
794         struct utp_transfer_req_desc *req_desc = hba->utrdl;
795
796         hba->dev_cmd.type = cmd_type;
797
798         ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, DMA_NONE);
799         switch (cmd_type) {
800         case DEV_CMD_TYPE_QUERY:
801                 ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags);
802                 break;
803         case DEV_CMD_TYPE_NOP:
804                 ufshcd_prepare_utp_nop_upiu(hba);
805                 break;
806         default:
807                 ret = -EINVAL;
808         }
809
810         return ret;
811 }
812
813 static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
814 {
815         unsigned long start;
816         u32 intr_status;
817         u32 enabled_intr_status;
818
819         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
820
821         start = get_timer(0);
822         do {
823                 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
824                 enabled_intr_status = intr_status & hba->intr_mask;
825                 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
826
827                 if (get_timer(start) > QUERY_REQ_TIMEOUT) {
828                         dev_err(hba->dev,
829                                 "Timedout waiting for UTP response\n");
830
831                         return -ETIMEDOUT;
832                 }
833
834                 if (enabled_intr_status & UFSHCD_ERROR_MASK) {
835                         dev_err(hba->dev, "Error in status:%08x\n",
836                                 enabled_intr_status);
837
838                         return -1;
839                 }
840         } while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL));
841
842         return 0;
843 }
844
845 /**
846  * ufshcd_get_req_rsp - returns the TR response transaction type
847  */
848 static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
849 {
850         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
851 }
852
853 /**
854  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
855  *
856  */
857 static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba)
858 {
859         return le32_to_cpu(hba->utrdl->header.dword_2) & MASK_OCS;
860 }
861
862 static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
863 {
864         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
865 }
866
867 static int ufshcd_check_query_response(struct ufs_hba *hba)
868 {
869         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
870
871         /* Get the UPIU response */
872         query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >>
873                                 UPIU_RSP_CODE_OFFSET;
874         return query_res->response;
875 }
876
877 /**
878  * ufshcd_copy_query_response() - Copy the Query Response and the data
879  * descriptor
880  */
881 static int ufshcd_copy_query_response(struct ufs_hba *hba)
882 {
883         struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
884
885         memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
886
887         /* Get the descriptor */
888         if (hba->dev_cmd.query.descriptor &&
889             hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
890                 u8 *descp = (u8 *)hba->ucd_rsp_ptr +
891                                 GENERAL_UPIU_REQUEST_SIZE;
892                 u16 resp_len;
893                 u16 buf_len;
894
895                 /* data segment length */
896                 resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) &
897                                                 MASK_QUERY_DATA_SEG_LEN;
898                 buf_len =
899                         be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length);
900                 if (likely(buf_len >= resp_len)) {
901                         memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
902                 } else {
903                         dev_warn(hba->dev,
904                                  "%s: Response size is bigger than buffer",
905                                  __func__);
906                         return -EINVAL;
907                 }
908         }
909
910         return 0;
911 }
912
913 /**
914  * ufshcd_exec_dev_cmd - API for sending device management requests
915  */
916 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type,
917                                int timeout)
918 {
919         int err;
920         int resp;
921
922         err = ufshcd_comp_devman_upiu(hba, cmd_type);
923         if (err)
924                 return err;
925
926         err = ufshcd_send_command(hba, TASK_TAG);
927         if (err)
928                 return err;
929
930         err = ufshcd_get_tr_ocs(hba);
931         if (err) {
932                 dev_err(hba->dev, "Error in OCS:%d\n", err);
933                 return -EINVAL;
934         }
935
936         resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
937         switch (resp) {
938         case UPIU_TRANSACTION_NOP_IN:
939                 break;
940         case UPIU_TRANSACTION_QUERY_RSP:
941                 err = ufshcd_check_query_response(hba);
942                 if (!err)
943                         err = ufshcd_copy_query_response(hba);
944                 break;
945         case UPIU_TRANSACTION_REJECT_UPIU:
946                 /* TODO: handle Reject UPIU Response */
947                 err = -EPERM;
948                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
949                         __func__);
950                 break;
951         default:
952                 err = -EINVAL;
953                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
954                         __func__, resp);
955         }
956
957         return err;
958 }
959
960 /**
961  * ufshcd_init_query() - init the query response and request parameters
962  */
963 static inline void ufshcd_init_query(struct ufs_hba *hba,
964                                      struct ufs_query_req **request,
965                                      struct ufs_query_res **response,
966                                      enum query_opcode opcode,
967                                      u8 idn, u8 index, u8 selector)
968 {
969         *request = &hba->dev_cmd.query.request;
970         *response = &hba->dev_cmd.query.response;
971         memset(*request, 0, sizeof(struct ufs_query_req));
972         memset(*response, 0, sizeof(struct ufs_query_res));
973         (*request)->upiu_req.opcode = opcode;
974         (*request)->upiu_req.idn = idn;
975         (*request)->upiu_req.index = index;
976         (*request)->upiu_req.selector = selector;
977 }
978
979 /**
980  * ufshcd_query_flag() - API function for sending flag query requests
981  */
982 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
983                       enum flag_idn idn, bool *flag_res)
984 {
985         struct ufs_query_req *request = NULL;
986         struct ufs_query_res *response = NULL;
987         int err, index = 0, selector = 0;
988         int timeout = QUERY_REQ_TIMEOUT;
989
990         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
991                           selector);
992
993         switch (opcode) {
994         case UPIU_QUERY_OPCODE_SET_FLAG:
995         case UPIU_QUERY_OPCODE_CLEAR_FLAG:
996         case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
997                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
998                 break;
999         case UPIU_QUERY_OPCODE_READ_FLAG:
1000                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1001                 if (!flag_res) {
1002                         /* No dummy reads */
1003                         dev_err(hba->dev, "%s: Invalid argument for read request\n",
1004                                 __func__);
1005                         err = -EINVAL;
1006                         goto out;
1007                 }
1008                 break;
1009         default:
1010                 dev_err(hba->dev,
1011                         "%s: Expected query flag opcode but got = %d\n",
1012                         __func__, opcode);
1013                 err = -EINVAL;
1014                 goto out;
1015         }
1016
1017         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
1018
1019         if (err) {
1020                 dev_err(hba->dev,
1021                         "%s: Sending flag query for idn %d failed, err = %d\n",
1022                         __func__, idn, err);
1023                 goto out;
1024         }
1025
1026         if (flag_res)
1027                 *flag_res = (be32_to_cpu(response->upiu_res.value) &
1028                                 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1029
1030 out:
1031         return err;
1032 }
1033
1034 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
1035                                    enum query_opcode opcode,
1036                                    enum flag_idn idn, bool *flag_res)
1037 {
1038         int ret;
1039         int retries;
1040
1041         for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
1042                 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
1043                 if (ret)
1044                         dev_dbg(hba->dev,
1045                                 "%s: failed with error %d, retries %d\n",
1046                                 __func__, ret, retries);
1047                 else
1048                         break;
1049         }
1050
1051         if (ret)
1052                 dev_err(hba->dev,
1053                         "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
1054                         __func__, opcode, idn, ret, retries);
1055         return ret;
1056 }
1057
1058 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
1059                                      enum query_opcode opcode,
1060                                      enum desc_idn idn, u8 index, u8 selector,
1061                                      u8 *desc_buf, int *buf_len)
1062 {
1063         struct ufs_query_req *request = NULL;
1064         struct ufs_query_res *response = NULL;
1065         int err;
1066
1067         if (!desc_buf) {
1068                 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1069                         __func__, opcode);
1070                 err = -EINVAL;
1071                 goto out;
1072         }
1073
1074         if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1075                 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1076                         __func__, *buf_len);
1077                 err = -EINVAL;
1078                 goto out;
1079         }
1080
1081         ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1082                           selector);
1083         hba->dev_cmd.query.descriptor = desc_buf;
1084         request->upiu_req.length = cpu_to_be16(*buf_len);
1085
1086         switch (opcode) {
1087         case UPIU_QUERY_OPCODE_WRITE_DESC:
1088                 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1089                 break;
1090         case UPIU_QUERY_OPCODE_READ_DESC:
1091                 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1092                 break;
1093         default:
1094                 dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1095                         __func__, opcode);
1096                 err = -EINVAL;
1097                 goto out;
1098         }
1099
1100         err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1101
1102         if (err) {
1103                 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
1104                         __func__, opcode, idn, index, err);
1105                 goto out;
1106         }
1107
1108         hba->dev_cmd.query.descriptor = NULL;
1109         *buf_len = be16_to_cpu(response->upiu_res.length);
1110
1111 out:
1112         return err;
1113 }
1114
1115 /**
1116  * ufshcd_query_descriptor_retry - API function for sending descriptor requests
1117  */
1118 int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode,
1119                                   enum desc_idn idn, u8 index, u8 selector,
1120                                   u8 *desc_buf, int *buf_len)
1121 {
1122         int err;
1123         int retries;
1124
1125         for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1126                 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
1127                                                 selector, desc_buf, buf_len);
1128                 if (!err || err == -EINVAL)
1129                         break;
1130         }
1131
1132         return err;
1133 }
1134
1135 /**
1136  * ufshcd_read_desc_length - read the specified descriptor length from header
1137  */
1138 static int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id,
1139                                    int desc_index, int *desc_length)
1140 {
1141         int ret;
1142         u8 header[QUERY_DESC_HDR_SIZE];
1143         int header_len = QUERY_DESC_HDR_SIZE;
1144
1145         if (desc_id >= QUERY_DESC_IDN_MAX)
1146                 return -EINVAL;
1147
1148         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
1149                                             desc_id, desc_index, 0, header,
1150                                             &header_len);
1151
1152         if (ret) {
1153                 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
1154                         __func__, desc_id);
1155                 return ret;
1156         } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
1157                 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
1158                          __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
1159                          desc_id);
1160                 ret = -EINVAL;
1161         }
1162
1163         *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
1164
1165         return ret;
1166 }
1167
1168 static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
1169 {
1170         int err;
1171
1172         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
1173                                       &hba->desc_size.dev_desc);
1174         if (err)
1175                 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
1176
1177         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
1178                                       &hba->desc_size.pwr_desc);
1179         if (err)
1180                 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
1181
1182         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
1183                                       &hba->desc_size.interc_desc);
1184         if (err)
1185                 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
1186
1187         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
1188                                       &hba->desc_size.conf_desc);
1189         if (err)
1190                 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
1191
1192         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
1193                                       &hba->desc_size.unit_desc);
1194         if (err)
1195                 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
1196
1197         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
1198                                       &hba->desc_size.geom_desc);
1199         if (err)
1200                 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
1201
1202         err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
1203                                       &hba->desc_size.hlth_desc);
1204         if (err)
1205                 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
1206 }
1207
1208 /**
1209  * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
1210  *
1211  */
1212 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
1213                                  int *desc_len)
1214 {
1215         switch (desc_id) {
1216         case QUERY_DESC_IDN_DEVICE:
1217                 *desc_len = hba->desc_size.dev_desc;
1218                 break;
1219         case QUERY_DESC_IDN_POWER:
1220                 *desc_len = hba->desc_size.pwr_desc;
1221                 break;
1222         case QUERY_DESC_IDN_GEOMETRY:
1223                 *desc_len = hba->desc_size.geom_desc;
1224                 break;
1225         case QUERY_DESC_IDN_CONFIGURATION:
1226                 *desc_len = hba->desc_size.conf_desc;
1227                 break;
1228         case QUERY_DESC_IDN_UNIT:
1229                 *desc_len = hba->desc_size.unit_desc;
1230                 break;
1231         case QUERY_DESC_IDN_INTERCONNECT:
1232                 *desc_len = hba->desc_size.interc_desc;
1233                 break;
1234         case QUERY_DESC_IDN_STRING:
1235                 *desc_len = QUERY_DESC_MAX_SIZE;
1236                 break;
1237         case QUERY_DESC_IDN_HEALTH:
1238                 *desc_len = hba->desc_size.hlth_desc;
1239                 break;
1240         case QUERY_DESC_IDN_RFU_0:
1241         case QUERY_DESC_IDN_RFU_1:
1242                 *desc_len = 0;
1243                 break;
1244         default:
1245                 *desc_len = 0;
1246                 return -EINVAL;
1247         }
1248         return 0;
1249 }
1250 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
1251
1252 /**
1253  * ufshcd_read_desc_param - read the specified descriptor parameter
1254  *
1255  */
1256 int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id,
1257                            int desc_index, u8 param_offset, u8 *param_read_buf,
1258                            u8 param_size)
1259 {
1260         int ret;
1261         u8 *desc_buf;
1262         int buff_len;
1263         bool is_kmalloc = true;
1264
1265         /* Safety check */
1266         if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
1267                 return -EINVAL;
1268
1269         /* Get the max length of descriptor from structure filled up at probe
1270          * time.
1271          */
1272         ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
1273
1274         /* Sanity checks */
1275         if (ret || !buff_len) {
1276                 dev_err(hba->dev, "%s: Failed to get full descriptor length",
1277                         __func__);
1278                 return ret;
1279         }
1280
1281         /* Check whether we need temp memory */
1282         if (param_offset != 0 || param_size < buff_len) {
1283                 desc_buf = kmalloc(buff_len, GFP_KERNEL);
1284                 if (!desc_buf)
1285                         return -ENOMEM;
1286         } else {
1287                 desc_buf = param_read_buf;
1288                 is_kmalloc = false;
1289         }
1290
1291         /* Request for full descriptor */
1292         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
1293                                             desc_id, desc_index, 0, desc_buf,
1294                                             &buff_len);
1295
1296         if (ret) {
1297                 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
1298                         __func__, desc_id, desc_index, param_offset, ret);
1299                 goto out;
1300         }
1301
1302         /* Sanity check */
1303         if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
1304                 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
1305                         __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
1306                 ret = -EINVAL;
1307                 goto out;
1308         }
1309
1310         /* Check wherher we will not copy more data, than available */
1311         if (is_kmalloc && param_size > buff_len)
1312                 param_size = buff_len;
1313
1314         if (is_kmalloc)
1315                 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
1316 out:
1317         if (is_kmalloc)
1318                 kfree(desc_buf);
1319         return ret;
1320 }
1321
1322 /* replace non-printable or non-ASCII characters with spaces */
1323 static inline void ufshcd_remove_non_printable(uint8_t *val)
1324 {
1325         if (!val)
1326                 return;
1327
1328         if (*val < 0x20 || *val > 0x7e)
1329                 *val = ' ';
1330 }
1331
1332 /**
1333  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
1334  * state) and waits for it to take effect.
1335  *
1336  */
1337 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
1338 {
1339         unsigned long start = 0;
1340         u8 status;
1341         int ret;
1342
1343         ret = ufshcd_send_uic_cmd(hba, cmd);
1344         if (ret) {
1345                 dev_err(hba->dev,
1346                         "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
1347                         cmd->command, cmd->argument3, ret);
1348
1349                 return ret;
1350         }
1351
1352         start = get_timer(0);
1353         do {
1354                 status = ufshcd_get_upmcrs(hba);
1355                 if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
1356                         dev_err(hba->dev,
1357                                 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
1358                                 cmd->command, status);
1359                         ret = (status != PWR_OK) ? status : -1;
1360                         break;
1361                 }
1362         } while (status != PWR_LOCAL);
1363
1364         return ret;
1365 }
1366
1367 /**
1368  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change
1369  *                              using DME_SET primitives.
1370  */
1371 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
1372 {
1373         struct uic_command uic_cmd = {0};
1374         int ret;
1375
1376         uic_cmd.command = UIC_CMD_DME_SET;
1377         uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
1378         uic_cmd.argument3 = mode;
1379         ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
1380
1381         return ret;
1382 }
1383
1384 static
1385 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba,
1386                                       struct scsi_cmd *pccb, u32 upiu_flags)
1387 {
1388         struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
1389         unsigned int cdb_len;
1390
1391         /* command descriptor fields */
1392         ucd_req_ptr->header.dword_0 =
1393                         UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags,
1394                                           pccb->lun, TASK_TAG);
1395         ucd_req_ptr->header.dword_1 =
1396                         UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
1397
1398         /* Total EHS length and Data segment length will be zero */
1399         ucd_req_ptr->header.dword_2 = 0;
1400
1401         ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen);
1402
1403         cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE);
1404         memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
1405         memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len);
1406
1407         memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
1408 }
1409
1410 static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry,
1411                                      unsigned char *buf, ulong len)
1412 {
1413         entry->size = cpu_to_le32(len) | GENMASK(1, 0);
1414         entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf));
1415         entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf));
1416 }
1417
1418 static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
1419 {
1420         struct utp_transfer_req_desc *req_desc = hba->utrdl;
1421         struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr;
1422         ulong datalen = pccb->datalen;
1423         int table_length;
1424         u8 *buf;
1425         int i;
1426
1427         if (!datalen) {
1428                 req_desc->prd_table_length = 0;
1429                 return;
1430         }
1431
1432         table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY);
1433         buf = pccb->pdata;
1434         i = table_length;
1435         while (--i) {
1436                 prepare_prdt_desc(&prd_table[table_length - i - 1], buf,
1437                                   MAX_PRDT_ENTRY - 1);
1438                 buf += MAX_PRDT_ENTRY;
1439                 datalen -= MAX_PRDT_ENTRY;
1440         }
1441
1442         prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1);
1443
1444         req_desc->prd_table_length = table_length;
1445 }
1446
1447 static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb)
1448 {
1449         struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent);
1450         struct utp_transfer_req_desc *req_desc = hba->utrdl;
1451         u32 upiu_flags;
1452         int ocs, result = 0;
1453         u8 scsi_status;
1454
1455         ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, pccb->dma_dir);
1456         ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags);
1457         prepare_prdt_table(hba, pccb);
1458
1459         ufshcd_send_command(hba, TASK_TAG);
1460
1461         ocs = ufshcd_get_tr_ocs(hba);
1462         switch (ocs) {
1463         case OCS_SUCCESS:
1464                 result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
1465                 switch (result) {
1466                 case UPIU_TRANSACTION_RESPONSE:
1467                         result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr);
1468
1469                         scsi_status = result & MASK_SCSI_STATUS;
1470                         if (scsi_status)
1471                                 return -EINVAL;
1472
1473                         break;
1474                 case UPIU_TRANSACTION_REJECT_UPIU:
1475                         /* TODO: handle Reject UPIU Response */
1476                         dev_err(hba->dev,
1477                                 "Reject UPIU not fully implemented\n");
1478                         return -EINVAL;
1479                 default:
1480                         dev_err(hba->dev,
1481                                 "Unexpected request response code = %x\n",
1482                                 result);
1483                         return -EINVAL;
1484                 }
1485                 break;
1486         default:
1487                 dev_err(hba->dev, "OCS error from controller = %x\n", ocs);
1488                 return -EINVAL;
1489         }
1490
1491         return 0;
1492 }
1493
1494 static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id,
1495                                    int desc_index, u8 *buf, u32 size)
1496 {
1497         return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
1498 }
1499
1500 static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
1501 {
1502         return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
1503 }
1504
1505 /**
1506  * ufshcd_read_string_desc - read string descriptor
1507  *
1508  */
1509 int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
1510                             u8 *buf, u32 size, bool ascii)
1511 {
1512         int err = 0;
1513
1514         err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf,
1515                                size);
1516
1517         if (err) {
1518                 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
1519                         __func__, QUERY_REQ_RETRIES, err);
1520                 goto out;
1521         }
1522
1523         if (ascii) {
1524                 int desc_len;
1525                 int ascii_len;
1526                 int i;
1527                 u8 *buff_ascii;
1528
1529                 desc_len = buf[0];
1530                 /* remove header and divide by 2 to move from UTF16 to UTF8 */
1531                 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
1532                 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
1533                         dev_err(hba->dev, "%s: buffer allocated size is too small\n",
1534                                 __func__);
1535                         err = -ENOMEM;
1536                         goto out;
1537                 }
1538
1539                 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
1540                 if (!buff_ascii) {
1541                         err = -ENOMEM;
1542                         goto out;
1543                 }
1544
1545                 /*
1546                  * the descriptor contains string in UTF16 format
1547                  * we need to convert to utf-8 so it can be displayed
1548                  */
1549                 utf16_to_utf8(buff_ascii,
1550                               (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len);
1551
1552                 /* replace non-printable or non-ASCII characters with spaces */
1553                 for (i = 0; i < ascii_len; i++)
1554                         ufshcd_remove_non_printable(&buff_ascii[i]);
1555
1556                 memset(buf + QUERY_DESC_HDR_SIZE, 0,
1557                        size - QUERY_DESC_HDR_SIZE);
1558                 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
1559                 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
1560                 kfree(buff_ascii);
1561         }
1562 out:
1563         return err;
1564 }
1565
1566 static int ufs_get_device_desc(struct ufs_hba *hba,
1567                                struct ufs_dev_desc *dev_desc)
1568 {
1569         int err;
1570         size_t buff_len;
1571         u8 model_index;
1572         u8 *desc_buf;
1573
1574         buff_len = max_t(size_t, hba->desc_size.dev_desc,
1575                          QUERY_DESC_MAX_SIZE + 1);
1576         desc_buf = kmalloc(buff_len, GFP_KERNEL);
1577         if (!desc_buf) {
1578                 err = -ENOMEM;
1579                 goto out;
1580         }
1581
1582         err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
1583         if (err) {
1584                 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
1585                         __func__, err);
1586                 goto out;
1587         }
1588
1589         /*
1590          * getting vendor (manufacturerID) and Bank Index in big endian
1591          * format
1592          */
1593         dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
1594                                      desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
1595
1596         model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
1597
1598         /* Zero-pad entire buffer for string termination. */
1599         memset(desc_buf, 0, buff_len);
1600
1601         err = ufshcd_read_string_desc(hba, model_index, desc_buf,
1602                                       QUERY_DESC_MAX_SIZE, true/*ASCII*/);
1603         if (err) {
1604                 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
1605                         __func__, err);
1606                 goto out;
1607         }
1608
1609         desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
1610         strlcpy(dev_desc->model, (char *)(desc_buf + QUERY_DESC_HDR_SIZE),
1611                 min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
1612                       MAX_MODEL_LEN));
1613
1614         /* Null terminate the model string */
1615         dev_desc->model[MAX_MODEL_LEN] = '\0';
1616
1617 out:
1618         kfree(desc_buf);
1619         return err;
1620 }
1621
1622 /**
1623  * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
1624  */
1625 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
1626 {
1627         struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
1628
1629         if (hba->max_pwr_info.is_valid)
1630                 return 0;
1631
1632         pwr_info->pwr_tx = FAST_MODE;
1633         pwr_info->pwr_rx = FAST_MODE;
1634         pwr_info->hs_rate = PA_HS_MODE_B;
1635
1636         /* Get the connected lane count */
1637         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
1638                        &pwr_info->lane_rx);
1639         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
1640                        &pwr_info->lane_tx);
1641
1642         if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
1643                 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
1644                         __func__, pwr_info->lane_rx, pwr_info->lane_tx);
1645                 return -EINVAL;
1646         }
1647
1648         /*
1649          * First, get the maximum gears of HS speed.
1650          * If a zero value, it means there is no HSGEAR capability.
1651          * Then, get the maximum gears of PWM speed.
1652          */
1653         ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
1654         if (!pwr_info->gear_rx) {
1655                 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1656                                &pwr_info->gear_rx);
1657                 if (!pwr_info->gear_rx) {
1658                         dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
1659                                 __func__, pwr_info->gear_rx);
1660                         return -EINVAL;
1661                 }
1662                 pwr_info->pwr_rx = SLOW_MODE;
1663         }
1664
1665         ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
1666                             &pwr_info->gear_tx);
1667         if (!pwr_info->gear_tx) {
1668                 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1669                                     &pwr_info->gear_tx);
1670                 if (!pwr_info->gear_tx) {
1671                         dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
1672                                 __func__, pwr_info->gear_tx);
1673                         return -EINVAL;
1674                 }
1675                 pwr_info->pwr_tx = SLOW_MODE;
1676         }
1677
1678         hba->max_pwr_info.is_valid = true;
1679         return 0;
1680 }
1681
1682 static int ufshcd_change_power_mode(struct ufs_hba *hba,
1683                                     struct ufs_pa_layer_attr *pwr_mode)
1684 {
1685         int ret;
1686
1687         /* if already configured to the requested pwr_mode */
1688         if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
1689             pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
1690             pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
1691             pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
1692             pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
1693             pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
1694             pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
1695                 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
1696                 return 0;
1697         }
1698
1699         /*
1700          * Configure attributes for power mode change with below.
1701          * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
1702          * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
1703          * - PA_HSSERIES
1704          */
1705         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
1706         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1707                        pwr_mode->lane_rx);
1708         if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE)
1709                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
1710         else
1711                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
1712
1713         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
1714         ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1715                        pwr_mode->lane_tx);
1716         if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE)
1717                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
1718         else
1719                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
1720
1721         if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
1722             pwr_mode->pwr_tx == FASTAUTO_MODE ||
1723             pwr_mode->pwr_rx == FAST_MODE ||
1724             pwr_mode->pwr_tx == FAST_MODE)
1725                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1726                                pwr_mode->hs_rate);
1727
1728         ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 |
1729                                          pwr_mode->pwr_tx);
1730
1731         if (ret) {
1732                 dev_err(hba->dev,
1733                         "%s: power mode change failed %d\n", __func__, ret);
1734
1735                 return ret;
1736         }
1737
1738         /* Copy new Power Mode to power info */
1739         memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr));
1740
1741         return ret;
1742 }
1743
1744 /**
1745  * ufshcd_verify_dev_init() - Verify device initialization
1746  *
1747  */
1748 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1749 {
1750         int retries;
1751         int err;
1752
1753         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
1754                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1755                                           NOP_OUT_TIMEOUT);
1756                 if (!err || err == -ETIMEDOUT)
1757                         break;
1758
1759                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1760         }
1761
1762         if (err)
1763                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
1764
1765         return err;
1766 }
1767
1768 /**
1769  * ufshcd_complete_dev_init() - checks device readiness
1770  */
1771 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
1772 {
1773         int i;
1774         int err;
1775         bool flag_res = 1;
1776
1777         err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1778                                       QUERY_FLAG_IDN_FDEVICEINIT, NULL);
1779         if (err) {
1780                 dev_err(hba->dev,
1781                         "%s setting fDeviceInit flag failed with error %d\n",
1782                         __func__, err);
1783                 goto out;
1784         }
1785
1786         /* poll for max. 1000 iterations for fDeviceInit flag to clear */
1787         for (i = 0; i < 1000 && !err && flag_res; i++)
1788                 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
1789                                               QUERY_FLAG_IDN_FDEVICEINIT,
1790                                               &flag_res);
1791
1792         if (err)
1793                 dev_err(hba->dev,
1794                         "%s reading fDeviceInit flag failed with error %d\n",
1795                         __func__, err);
1796         else if (flag_res)
1797                 dev_err(hba->dev,
1798                         "%s fDeviceInit was not cleared by the device\n",
1799                         __func__);
1800
1801 out:
1802         return err;
1803 }
1804
1805 static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
1806 {
1807         hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
1808         hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
1809         hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
1810         hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
1811         hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
1812         hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
1813         hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
1814 }
1815
1816 int ufs_start(struct ufs_hba *hba)
1817 {
1818         struct ufs_dev_desc card = {0};
1819         int ret;
1820
1821         ret = ufshcd_link_startup(hba);
1822         if (ret)
1823                 return ret;
1824
1825         ret = ufshcd_verify_dev_init(hba);
1826         if (ret)
1827                 return ret;
1828
1829         ret = ufshcd_complete_dev_init(hba);
1830         if (ret)
1831                 return ret;
1832
1833         /* Init check for device descriptor sizes */
1834         ufshcd_init_desc_sizes(hba);
1835
1836         ret = ufs_get_device_desc(hba, &card);
1837         if (ret) {
1838                 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
1839                         __func__, ret);
1840
1841                 return ret;
1842         }
1843
1844         if (ufshcd_get_max_pwr_mode(hba)) {
1845                 dev_err(hba->dev,
1846                         "%s: Failed getting max supported power mode\n",
1847                         __func__);
1848         } else {
1849                 ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info);
1850                 if (ret) {
1851                         dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
1852                                 __func__, ret);
1853
1854                         return ret;
1855                 }
1856
1857                 printf("Device at %s up at:", hba->dev->name);
1858                 ufshcd_print_pwr_info(hba);
1859         }
1860
1861         return 0;
1862 }
1863
1864 int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops)
1865 {
1866         struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev);
1867         struct scsi_platdata *scsi_plat;
1868         struct udevice *scsi_dev;
1869         int err;
1870
1871         device_find_first_child(ufs_dev, &scsi_dev);
1872         if (!scsi_dev)
1873                 return -ENODEV;
1874
1875         scsi_plat = dev_get_uclass_platdata(scsi_dev);
1876         scsi_plat->max_id = UFSHCD_MAX_ID;
1877         scsi_plat->max_lun = UFS_MAX_LUNS;
1878         scsi_plat->max_bytes_per_req = UFS_MAX_BYTES;
1879
1880         hba->dev = ufs_dev;
1881         hba->ops = hba_ops;
1882         hba->mmio_base = (void *)dev_read_addr(ufs_dev);
1883
1884         /* Set descriptor lengths to specification defaults */
1885         ufshcd_def_desc_sizes(hba);
1886
1887         ufshcd_ops_init(hba);
1888
1889         /* Read capabilties registers */
1890         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1891
1892         /* Get UFS version supported by the controller */
1893         hba->version = ufshcd_get_ufs_version(hba);
1894         if (hba->version != UFSHCI_VERSION_10 &&
1895             hba->version != UFSHCI_VERSION_11 &&
1896             hba->version != UFSHCI_VERSION_20 &&
1897             hba->version != UFSHCI_VERSION_21)
1898                 dev_err(hba->dev, "invalid UFS version 0x%x\n",
1899                         hba->version);
1900
1901         /* Get Interrupt bit mask per version */
1902         hba->intr_mask = ufshcd_get_intr_mask(hba);
1903
1904         /* Allocate memory for host memory space */
1905         err = ufshcd_memory_alloc(hba);
1906         if (err) {
1907                 dev_err(hba->dev, "Memory allocation failed\n");
1908                 return err;
1909         }
1910
1911         /* Configure Local data structures */
1912         ufshcd_host_memory_configure(hba);
1913
1914         /*
1915          * In order to avoid any spurious interrupt immediately after
1916          * registering UFS controller interrupt handler, clear any pending UFS
1917          * interrupt status and disable all the UFS interrupts.
1918          */
1919         ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
1920                       REG_INTERRUPT_STATUS);
1921         ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
1922
1923         err = ufshcd_hba_enable(hba);
1924         if (err) {
1925                 dev_err(hba->dev, "Host controller enable failed\n");
1926                 return err;
1927         }
1928
1929         err = ufs_start(hba);
1930         if (err)
1931                 return err;
1932
1933         return 0;
1934 }
1935
1936 int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp)
1937 {
1938         int ret = device_bind_driver(ufs_dev, "ufs_scsi", "ufs_scsi",
1939                                      scsi_devp);
1940
1941         return ret;
1942 }
1943
1944 static struct scsi_ops ufs_ops = {
1945         .exec           = ufs_scsi_exec,
1946 };
1947
1948 int ufs_probe_dev(int index)
1949 {
1950         struct udevice *dev;
1951
1952         return uclass_get_device(UCLASS_UFS, index, &dev);
1953 }
1954
1955 int ufs_probe(void)
1956 {
1957         struct udevice *dev;
1958         int ret, i;
1959
1960         for (i = 0;; i++) {
1961                 ret = uclass_get_device(UCLASS_UFS, i, &dev);
1962                 if (ret == -ENODEV)
1963                         break;
1964         }
1965
1966         return 0;
1967 }
1968
1969 U_BOOT_DRIVER(ufs_scsi) = {
1970         .id = UCLASS_SCSI,
1971         .name = "ufs_scsi",
1972         .ops = &ufs_ops,
1973 };