ARM: dts: sun7i: A20-olinuxino-lime2: Fix ethernet phy-mode
[platform/kernel/linux-starfive.git] / drivers / scsi / ufs / ufshpb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Universal Flash Storage Host Performance Booster
4  *
5  * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
6  *
7  * Authors:
8  *      Yongmyung Lee <ymhungry.lee@samsung.com>
9  *      Jinyoung Choi <j-young.choi@samsung.com>
10  */
11
12 #include <asm/unaligned.h>
13 #include <linux/async.h>
14
15 #include "ufshcd.h"
16 #include "ufshpb.h"
17 #include "../sd.h"
18
19 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
20 #define READ_TO_MS 1000
21 #define READ_TO_EXPIRIES 100
22 #define POLLING_INTERVAL_MS 200
23 #define THROTTLE_MAP_REQ_DEFAULT 1
24
25 /* memory management */
26 static struct kmem_cache *ufshpb_mctx_cache;
27 static mempool_t *ufshpb_mctx_pool;
28 static mempool_t *ufshpb_page_pool;
29 /* A cache size of 2MB can cache ppn in the 1GB range. */
30 static unsigned int ufshpb_host_map_kbytes = 2048;
31 static int tot_active_srgn_pages;
32
33 static struct workqueue_struct *ufshpb_wq;
34
35 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
36                                       int srgn_idx);
37
38 bool ufshpb_is_allowed(struct ufs_hba *hba)
39 {
40         return !(hba->ufshpb_dev.hpb_disabled);
41 }
42
43 /* HPB version 1.0 is called as legacy version. */
44 bool ufshpb_is_legacy(struct ufs_hba *hba)
45 {
46         return hba->ufshpb_dev.is_legacy;
47 }
48
49 static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
50 {
51         return sdev->hostdata;
52 }
53
54 static int ufshpb_get_state(struct ufshpb_lu *hpb)
55 {
56         return atomic_read(&hpb->hpb_state);
57 }
58
59 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
60 {
61         atomic_set(&hpb->hpb_state, state);
62 }
63
64 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
65                                 struct ufshpb_subregion *srgn)
66 {
67         return rgn->rgn_state != HPB_RGN_INACTIVE &&
68                 srgn->srgn_state == HPB_SRGN_VALID;
69 }
70
71 static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
72 {
73         return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
74 }
75
76 static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
77 {
78         return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
79                op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
80 }
81
82 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
83 {
84         return transfer_len <= hpb->pre_req_max_tr_len;
85 }
86
87 /*
88  * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as
89  * default. It is possible to change range of transfer_len through sysfs.
90  */
91 static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len)
92 {
93         return len > hpb->pre_req_min_tr_len &&
94                len <= hpb->pre_req_max_tr_len;
95 }
96
97 static bool ufshpb_is_general_lun(int lun)
98 {
99         return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
100 }
101
102 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
103 {
104         if (hpb->lu_pinned_end != PINNED_NOT_SET &&
105             rgn_idx >= hpb->lu_pinned_start &&
106             rgn_idx <= hpb->lu_pinned_end)
107                 return true;
108
109         return false;
110 }
111
112 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
113 {
114         bool ret = false;
115         unsigned long flags;
116
117         if (ufshpb_get_state(hpb) != HPB_PRESENT)
118                 return;
119
120         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
121         if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
122                 ret = true;
123         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
124
125         if (ret)
126                 queue_work(ufshpb_wq, &hpb->map_work);
127 }
128
129 static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
130                                     struct ufshcd_lrb *lrbp,
131                                     struct utp_hpb_rsp *rsp_field)
132 {
133         /* Check HPB_UPDATE_ALERT */
134         if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
135               UPIU_HEADER_DWORD(0, 2, 0, 0)))
136                 return false;
137
138         if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
139             rsp_field->desc_type != DEV_DES_TYPE ||
140             rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
141             rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
142             rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
143             rsp_field->hpb_op == HPB_RSP_NONE ||
144             (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
145              !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
146                 return false;
147
148         if (!ufshpb_is_general_lun(rsp_field->lun)) {
149                 dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
150                          lrbp->lun);
151                 return false;
152         }
153
154         return true;
155 }
156
157 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
158                                int srgn_offset, int cnt, bool set_dirty)
159 {
160         struct ufshpb_region *rgn;
161         struct ufshpb_subregion *srgn, *prev_srgn = NULL;
162         int set_bit_len;
163         int bitmap_len;
164         unsigned long flags;
165
166 next_srgn:
167         rgn = hpb->rgn_tbl + rgn_idx;
168         srgn = rgn->srgn_tbl + srgn_idx;
169
170         if (likely(!srgn->is_last))
171                 bitmap_len = hpb->entries_per_srgn;
172         else
173                 bitmap_len = hpb->last_srgn_entries;
174
175         if ((srgn_offset + cnt) > bitmap_len)
176                 set_bit_len = bitmap_len - srgn_offset;
177         else
178                 set_bit_len = cnt;
179
180         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
181         if (rgn->rgn_state != HPB_RGN_INACTIVE) {
182                 if (set_dirty) {
183                         if (srgn->srgn_state == HPB_SRGN_VALID)
184                                 bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
185                                            set_bit_len);
186                 } else if (hpb->is_hcm) {
187                          /* rewind the read timer for lru regions */
188                         rgn->read_timeout = ktime_add_ms(ktime_get(),
189                                         rgn->hpb->params.read_timeout_ms);
190                         rgn->read_timeout_expiries =
191                                 rgn->hpb->params.read_timeout_expiries;
192                 }
193         }
194         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
195
196         if (hpb->is_hcm && prev_srgn != srgn) {
197                 bool activate = false;
198
199                 spin_lock(&rgn->rgn_lock);
200                 if (set_dirty) {
201                         rgn->reads -= srgn->reads;
202                         srgn->reads = 0;
203                         set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
204                 } else {
205                         srgn->reads++;
206                         rgn->reads++;
207                         if (srgn->reads == hpb->params.activation_thld)
208                                 activate = true;
209                 }
210                 spin_unlock(&rgn->rgn_lock);
211
212                 if (activate ||
213                     test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
214                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
215                         ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
216                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
217                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
218                                 "activate region %d-%d\n", rgn_idx, srgn_idx);
219                 }
220
221                 prev_srgn = srgn;
222         }
223
224         srgn_offset = 0;
225         if (++srgn_idx == hpb->srgns_per_rgn) {
226                 srgn_idx = 0;
227                 rgn_idx++;
228         }
229
230         cnt -= set_bit_len;
231         if (cnt > 0)
232                 goto next_srgn;
233 }
234
235 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
236                                   int srgn_idx, int srgn_offset, int cnt)
237 {
238         struct ufshpb_region *rgn;
239         struct ufshpb_subregion *srgn;
240         int bitmap_len;
241         int bit_len;
242
243 next_srgn:
244         rgn = hpb->rgn_tbl + rgn_idx;
245         srgn = rgn->srgn_tbl + srgn_idx;
246
247         if (likely(!srgn->is_last))
248                 bitmap_len = hpb->entries_per_srgn;
249         else
250                 bitmap_len = hpb->last_srgn_entries;
251
252         if (!ufshpb_is_valid_srgn(rgn, srgn))
253                 return true;
254
255         /*
256          * If the region state is active, mctx must be allocated.
257          * In this case, check whether the region is evicted or
258          * mctx allocation fail.
259          */
260         if (unlikely(!srgn->mctx)) {
261                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
262                         "no mctx in region %d subregion %d.\n",
263                         srgn->rgn_idx, srgn->srgn_idx);
264                 return true;
265         }
266
267         if ((srgn_offset + cnt) > bitmap_len)
268                 bit_len = bitmap_len - srgn_offset;
269         else
270                 bit_len = cnt;
271
272         if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
273                           srgn_offset) < bit_len + srgn_offset)
274                 return true;
275
276         srgn_offset = 0;
277         if (++srgn_idx == hpb->srgns_per_rgn) {
278                 srgn_idx = 0;
279                 rgn_idx++;
280         }
281
282         cnt -= bit_len;
283         if (cnt > 0)
284                 goto next_srgn;
285
286         return false;
287 }
288
289 static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
290 {
291         return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
292 }
293
294 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
295                                      struct ufshpb_map_ctx *mctx, int pos,
296                                      int len, __be64 *ppn_buf)
297 {
298         struct page *page;
299         int index, offset;
300         int copied;
301
302         index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
303         offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
304
305         if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
306                 copied = len;
307         else
308                 copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
309
310         page = mctx->m_page[index];
311         if (unlikely(!page)) {
312                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
313                         "error. cannot find page in mctx\n");
314                 return -ENOMEM;
315         }
316
317         memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
318                copied * HPB_ENTRY_SIZE);
319
320         return copied;
321 }
322
323 static void
324 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
325                         int *srgn_idx, int *offset)
326 {
327         int rgn_offset;
328
329         *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
330         rgn_offset = lpn & hpb->entries_per_rgn_mask;
331         *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
332         *offset = rgn_offset & hpb->entries_per_srgn_mask;
333 }
334
335 static void
336 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb,
337                             struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn,
338                             u8 transfer_len, int read_id)
339 {
340         unsigned char *cdb = lrbp->cmd->cmnd;
341         __be64 ppn_tmp = ppn;
342         cdb[0] = UFSHPB_READ;
343
344         if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
345                 ppn_tmp = swab64(ppn);
346
347         /* ppn value is stored as big-endian in the host memory */
348         memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
349         cdb[14] = transfer_len;
350         cdb[15] = read_id;
351
352         lrbp->cmd->cmd_len = UFS_CDB_SIZE;
353 }
354
355 static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
356                                             unsigned long lpn, unsigned int len,
357                                             int read_id)
358 {
359         cdb[0] = UFSHPB_WRITE_BUFFER;
360         cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID;
361
362         put_unaligned_be32(lpn, &cdb[2]);
363         cdb[6] = read_id;
364         put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]);
365
366         cdb[9] = 0x00;  /* Control = 0x00 */
367 }
368
369 static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
370 {
371         struct ufshpb_req *pre_req;
372
373         if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
374                 dev_info(&hpb->sdev_ufs_lu->sdev_dev,
375                          "pre_req throttle. inflight %d throttle %d",
376                          hpb->num_inflight_pre_req, hpb->throttle_pre_req);
377                 return NULL;
378         }
379
380         pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
381                                            struct ufshpb_req, list_req);
382         if (!pre_req) {
383                 dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
384                 return NULL;
385         }
386
387         list_del_init(&pre_req->list_req);
388         hpb->num_inflight_pre_req++;
389
390         return pre_req;
391 }
392
393 static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
394                                       struct ufshpb_req *pre_req)
395 {
396         pre_req->req = NULL;
397         bio_reset(pre_req->bio);
398         list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
399         hpb->num_inflight_pre_req--;
400 }
401
402 static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error)
403 {
404         struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
405         struct ufshpb_lu *hpb = pre_req->hpb;
406         unsigned long flags;
407
408         if (error) {
409                 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
410                 struct scsi_sense_hdr sshdr;
411
412                 dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
413                 scsi_command_normalize_sense(cmd, &sshdr);
414                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
415                         "code %x sense_key %x asc %x ascq %x",
416                         sshdr.response_code,
417                         sshdr.sense_key, sshdr.asc, sshdr.ascq);
418                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
419                         "byte4 %x byte5 %x byte6 %x additional_len %x",
420                         sshdr.byte4, sshdr.byte5,
421                         sshdr.byte6, sshdr.additional_length);
422         }
423
424         blk_mq_free_request(req);
425         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
426         ufshpb_put_pre_req(pre_req->hpb, pre_req);
427         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
428 }
429
430 static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page)
431 {
432         struct ufshpb_lu *hpb = pre_req->hpb;
433         struct ufshpb_region *rgn;
434         struct ufshpb_subregion *srgn;
435         __be64 *addr;
436         int offset = 0;
437         int copied;
438         unsigned long lpn = pre_req->wb.lpn;
439         int rgn_idx, srgn_idx, srgn_offset;
440         unsigned long flags;
441
442         addr = page_address(page);
443         ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
444
445         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
446
447 next_offset:
448         rgn = hpb->rgn_tbl + rgn_idx;
449         srgn = rgn->srgn_tbl + srgn_idx;
450
451         if (!ufshpb_is_valid_srgn(rgn, srgn))
452                 goto mctx_error;
453
454         if (!srgn->mctx)
455                 goto mctx_error;
456
457         copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
458                                            pre_req->wb.len - offset,
459                                            &addr[offset]);
460
461         if (copied < 0)
462                 goto mctx_error;
463
464         offset += copied;
465         srgn_offset += copied;
466
467         if (srgn_offset == hpb->entries_per_srgn) {
468                 srgn_offset = 0;
469
470                 if (++srgn_idx == hpb->srgns_per_rgn) {
471                         srgn_idx = 0;
472                         rgn_idx++;
473                 }
474         }
475
476         if (offset < pre_req->wb.len)
477                 goto next_offset;
478
479         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
480         return 0;
481 mctx_error:
482         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
483         return -ENOMEM;
484 }
485
486 static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
487                                        struct request_queue *q,
488                                        struct ufshpb_req *pre_req)
489 {
490         struct page *page = pre_req->wb.m_page;
491         struct bio *bio = pre_req->bio;
492         int entries_bytes, ret;
493
494         if (!page)
495                 return -ENOMEM;
496
497         if (ufshpb_prep_entry(pre_req, page))
498                 return -ENOMEM;
499
500         entries_bytes = pre_req->wb.len * sizeof(__be64);
501
502         ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
503         if (ret != entries_bytes) {
504                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
505                         "bio_add_pc_page fail: %d", ret);
506                 return -ENOMEM;
507         }
508         return 0;
509 }
510
511 static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
512 {
513         if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
514                 hpb->cur_read_id = 1;
515         return hpb->cur_read_id;
516 }
517
518 static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
519                                   struct ufshpb_req *pre_req, int read_id)
520 {
521         struct scsi_device *sdev = cmd->device;
522         struct request_queue *q = sdev->request_queue;
523         struct request *req;
524         struct scsi_request *rq;
525         struct bio *bio = pre_req->bio;
526
527         pre_req->hpb = hpb;
528         pre_req->wb.lpn = sectors_to_logical(cmd->device,
529                                              blk_rq_pos(scsi_cmd_to_rq(cmd)));
530         pre_req->wb.len = sectors_to_logical(cmd->device,
531                                              blk_rq_sectors(scsi_cmd_to_rq(cmd)));
532         if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
533                 return -ENOMEM;
534
535         req = pre_req->req;
536
537         /* 1. request setup */
538         blk_rq_append_bio(req, bio);
539         req->rq_disk = NULL;
540         req->end_io_data = (void *)pre_req;
541         req->end_io = ufshpb_pre_req_compl_fn;
542
543         /* 2. scsi_request setup */
544         rq = scsi_req(req);
545         rq->retries = 1;
546
547         ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
548                                  read_id);
549         rq->cmd_len = scsi_command_size(rq->cmd);
550
551         if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
552                 return -EAGAIN;
553
554         hpb->stats.pre_req_cnt++;
555
556         return 0;
557 }
558
559 static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
560                                 int *read_id)
561 {
562         struct ufshpb_req *pre_req;
563         struct request *req = NULL;
564         unsigned long flags;
565         int _read_id;
566         int ret = 0;
567
568         req = blk_get_request(cmd->device->request_queue,
569                               REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
570         if (IS_ERR(req))
571                 return -EAGAIN;
572
573         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
574         pre_req = ufshpb_get_pre_req(hpb);
575         if (!pre_req) {
576                 ret = -EAGAIN;
577                 goto unlock_out;
578         }
579         _read_id = ufshpb_get_read_id(hpb);
580         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
581
582         pre_req->req = req;
583
584         ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
585         if (ret)
586                 goto free_pre_req;
587
588         *read_id = _read_id;
589
590         return ret;
591 free_pre_req:
592         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
593         ufshpb_put_pre_req(hpb, pre_req);
594 unlock_out:
595         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
596         blk_put_request(req);
597         return ret;
598 }
599
600 /*
601  * This function will set up HPB read command using host-side L2P map data.
602  */
603 int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
604 {
605         struct ufshpb_lu *hpb;
606         struct ufshpb_region *rgn;
607         struct ufshpb_subregion *srgn;
608         struct scsi_cmnd *cmd = lrbp->cmd;
609         u32 lpn;
610         __be64 ppn;
611         unsigned long flags;
612         int transfer_len, rgn_idx, srgn_idx, srgn_offset;
613         int read_id = 0;
614         int err = 0;
615
616         hpb = ufshpb_get_hpb_data(cmd->device);
617         if (!hpb)
618                 return -ENODEV;
619
620         if (ufshpb_get_state(hpb) == HPB_INIT)
621                 return -ENODEV;
622
623         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
624                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
625                            "%s: ufshpb state is not PRESENT", __func__);
626                 return -ENODEV;
627         }
628
629         if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
630             (!ufshpb_is_write_or_discard(cmd) &&
631              !ufshpb_is_read_cmd(cmd)))
632                 return 0;
633
634         transfer_len = sectors_to_logical(cmd->device,
635                                           blk_rq_sectors(scsi_cmd_to_rq(cmd)));
636         if (unlikely(!transfer_len))
637                 return 0;
638
639         lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
640         ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
641         rgn = hpb->rgn_tbl + rgn_idx;
642         srgn = rgn->srgn_tbl + srgn_idx;
643
644         /* If command type is WRITE or DISCARD, set bitmap as drity */
645         if (ufshpb_is_write_or_discard(cmd)) {
646                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
647                                    transfer_len, true);
648                 return 0;
649         }
650
651         if (!ufshpb_is_supported_chunk(hpb, transfer_len))
652                 return 0;
653
654         WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH);
655
656         if (hpb->is_hcm) {
657                 /*
658                  * in host control mode, reads are the main source for
659                  * activation trials.
660                  */
661                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
662                                    transfer_len, false);
663
664                 /* keep those counters normalized */
665                 if (rgn->reads > hpb->entries_per_srgn)
666                         schedule_work(&hpb->ufshpb_normalization_work);
667         }
668
669         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
670         if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
671                                    transfer_len)) {
672                 hpb->stats.miss_cnt++;
673                 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
674                 return 0;
675         }
676
677         err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
678         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
679         if (unlikely(err < 0)) {
680                 /*
681                  * In this case, the region state is active,
682                  * but the ppn table is not allocated.
683                  * Make sure that ppn table must be allocated on
684                  * active state.
685                  */
686                 dev_err(hba->dev, "get ppn failed. err %d\n", err);
687                 return err;
688         }
689         if (!ufshpb_is_legacy(hba) &&
690             ufshpb_is_required_wb(hpb, transfer_len)) {
691                 err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
692                 if (err) {
693                         unsigned long timeout;
694
695                         timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
696                                   hpb->params.requeue_timeout_ms);
697
698                         if (time_before(jiffies, timeout))
699                                 return -EAGAIN;
700
701                         hpb->stats.miss_cnt++;
702                         return 0;
703                 }
704         }
705
706         ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len,
707                                     read_id);
708
709         hpb->stats.hit_cnt++;
710         return 0;
711 }
712
713 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
714                                          int rgn_idx, enum req_opf dir,
715                                          bool atomic)
716 {
717         struct ufshpb_req *rq;
718         struct request *req;
719         int retries = HPB_MAP_REQ_RETRIES;
720
721         rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
722         if (!rq)
723                 return NULL;
724
725 retry:
726         req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
727                               BLK_MQ_REQ_NOWAIT);
728
729         if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
730                 usleep_range(3000, 3100);
731                 goto retry;
732         }
733
734         if (IS_ERR(req))
735                 goto free_rq;
736
737         rq->hpb = hpb;
738         rq->req = req;
739         rq->rb.rgn_idx = rgn_idx;
740
741         return rq;
742
743 free_rq:
744         kmem_cache_free(hpb->map_req_cache, rq);
745         return NULL;
746 }
747
748 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
749 {
750         blk_put_request(rq->req);
751         kmem_cache_free(hpb->map_req_cache, rq);
752 }
753
754 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
755                                              struct ufshpb_subregion *srgn)
756 {
757         struct ufshpb_req *map_req;
758         struct bio *bio;
759         unsigned long flags;
760
761         if (hpb->is_hcm &&
762             hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
763                 dev_info(&hpb->sdev_ufs_lu->sdev_dev,
764                          "map_req throttle. inflight %d throttle %d",
765                          hpb->num_inflight_map_req,
766                          hpb->params.inflight_map_req);
767                 return NULL;
768         }
769
770         map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
771         if (!map_req)
772                 return NULL;
773
774         bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
775         if (!bio) {
776                 ufshpb_put_req(hpb, map_req);
777                 return NULL;
778         }
779
780         map_req->bio = bio;
781
782         map_req->rb.srgn_idx = srgn->srgn_idx;
783         map_req->rb.mctx = srgn->mctx;
784
785         spin_lock_irqsave(&hpb->param_lock, flags);
786         hpb->num_inflight_map_req++;
787         spin_unlock_irqrestore(&hpb->param_lock, flags);
788
789         return map_req;
790 }
791
792 static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
793                                struct ufshpb_req *map_req)
794 {
795         unsigned long flags;
796
797         bio_put(map_req->bio);
798         ufshpb_put_req(hpb, map_req);
799
800         spin_lock_irqsave(&hpb->param_lock, flags);
801         hpb->num_inflight_map_req--;
802         spin_unlock_irqrestore(&hpb->param_lock, flags);
803 }
804
805 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
806                                      struct ufshpb_subregion *srgn)
807 {
808         struct ufshpb_region *rgn;
809         u32 num_entries = hpb->entries_per_srgn;
810
811         if (!srgn->mctx) {
812                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
813                         "no mctx in region %d subregion %d.\n",
814                         srgn->rgn_idx, srgn->srgn_idx);
815                 return -1;
816         }
817
818         if (unlikely(srgn->is_last))
819                 num_entries = hpb->last_srgn_entries;
820
821         bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
822
823         rgn = hpb->rgn_tbl + srgn->rgn_idx;
824         clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
825
826         return 0;
827 }
828
829 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
830                                       int srgn_idx)
831 {
832         struct ufshpb_region *rgn;
833         struct ufshpb_subregion *srgn;
834
835         rgn = hpb->rgn_tbl + rgn_idx;
836         srgn = rgn->srgn_tbl + srgn_idx;
837
838         list_del_init(&rgn->list_inact_rgn);
839
840         if (list_empty(&srgn->list_act_srgn))
841                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
842
843         hpb->stats.rb_active_cnt++;
844 }
845
846 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
847 {
848         struct ufshpb_region *rgn;
849         struct ufshpb_subregion *srgn;
850         int srgn_idx;
851
852         rgn = hpb->rgn_tbl + rgn_idx;
853
854         for_each_sub_region(rgn, srgn_idx, srgn)
855                 list_del_init(&srgn->list_act_srgn);
856
857         if (list_empty(&rgn->list_inact_rgn))
858                 list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
859
860         hpb->stats.rb_inactive_cnt++;
861 }
862
863 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
864                                       struct ufshpb_subregion *srgn)
865 {
866         struct ufshpb_region *rgn;
867
868         /*
869          * If there is no mctx in subregion
870          * after I/O progress for HPB_READ_BUFFER, the region to which the
871          * subregion belongs was evicted.
872          * Make sure the region must not evict in I/O progress
873          */
874         if (!srgn->mctx) {
875                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
876                         "no mctx in region %d subregion %d.\n",
877                         srgn->rgn_idx, srgn->srgn_idx);
878                 srgn->srgn_state = HPB_SRGN_INVALID;
879                 return;
880         }
881
882         rgn = hpb->rgn_tbl + srgn->rgn_idx;
883
884         if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
885                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
886                         "region %d subregion %d evicted\n",
887                         srgn->rgn_idx, srgn->srgn_idx);
888                 srgn->srgn_state = HPB_SRGN_INVALID;
889                 return;
890         }
891         srgn->srgn_state = HPB_SRGN_VALID;
892 }
893
894 static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
895 {
896         struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
897
898         ufshpb_put_req(umap_req->hpb, umap_req);
899 }
900
901 static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
902 {
903         struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
904         struct ufshpb_lu *hpb = map_req->hpb;
905         struct ufshpb_subregion *srgn;
906         unsigned long flags;
907
908         srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
909                 map_req->rb.srgn_idx;
910
911         ufshpb_clear_dirty_bitmap(hpb, srgn);
912         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
913         ufshpb_activate_subregion(hpb, srgn);
914         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
915
916         ufshpb_put_map_req(map_req->hpb, map_req);
917 }
918
919 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
920 {
921         cdb[0] = UFSHPB_WRITE_BUFFER;
922         cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
923                           UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
924         if (rgn)
925                 put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
926         cdb[9] = 0x00;
927 }
928
929 static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
930                                     int srgn_idx, int srgn_mem_size)
931 {
932         cdb[0] = UFSHPB_READ_BUFFER;
933         cdb[1] = UFSHPB_READ_BUFFER_ID;
934
935         put_unaligned_be16(rgn_idx, &cdb[2]);
936         put_unaligned_be16(srgn_idx, &cdb[4]);
937         put_unaligned_be24(srgn_mem_size, &cdb[6]);
938
939         cdb[9] = 0x00;
940 }
941
942 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
943                                    struct ufshpb_req *umap_req,
944                                    struct ufshpb_region *rgn)
945 {
946         struct request *req;
947         struct scsi_request *rq;
948
949         req = umap_req->req;
950         req->timeout = 0;
951         req->end_io_data = (void *)umap_req;
952         rq = scsi_req(req);
953         ufshpb_set_unmap_cmd(rq->cmd, rgn);
954         rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
955
956         blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn);
957
958         hpb->stats.umap_req_cnt++;
959 }
960
961 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
962                                   struct ufshpb_req *map_req, bool last)
963 {
964         struct request_queue *q;
965         struct request *req;
966         struct scsi_request *rq;
967         int mem_size = hpb->srgn_mem_size;
968         int ret = 0;
969         int i;
970
971         q = hpb->sdev_ufs_lu->request_queue;
972         for (i = 0; i < hpb->pages_per_srgn; i++) {
973                 ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
974                                       PAGE_SIZE, 0);
975                 if (ret != PAGE_SIZE) {
976                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
977                                    "bio_add_pc_page fail %d - %d\n",
978                                    map_req->rb.rgn_idx, map_req->rb.srgn_idx);
979                         return ret;
980                 }
981         }
982
983         req = map_req->req;
984
985         blk_rq_append_bio(req, map_req->bio);
986
987         req->end_io_data = map_req;
988
989         rq = scsi_req(req);
990
991         if (unlikely(last))
992                 mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
993
994         ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx,
995                                 map_req->rb.srgn_idx, mem_size);
996         rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
997
998         blk_execute_rq_nowait(NULL, req, 1, ufshpb_map_req_compl_fn);
999
1000         hpb->stats.map_req_cnt++;
1001         return 0;
1002 }
1003
1004 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
1005                                                  bool last)
1006 {
1007         struct ufshpb_map_ctx *mctx;
1008         u32 num_entries = hpb->entries_per_srgn;
1009         int i, j;
1010
1011         mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
1012         if (!mctx)
1013                 return NULL;
1014
1015         mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
1016         if (!mctx->m_page)
1017                 goto release_mctx;
1018
1019         if (unlikely(last))
1020                 num_entries = hpb->last_srgn_entries;
1021
1022         mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
1023         if (!mctx->ppn_dirty)
1024                 goto release_m_page;
1025
1026         for (i = 0; i < hpb->pages_per_srgn; i++) {
1027                 mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
1028                 if (!mctx->m_page[i]) {
1029                         for (j = 0; j < i; j++)
1030                                 mempool_free(mctx->m_page[j], ufshpb_page_pool);
1031                         goto release_ppn_dirty;
1032                 }
1033                 clear_page(page_address(mctx->m_page[i]));
1034         }
1035
1036         return mctx;
1037
1038 release_ppn_dirty:
1039         bitmap_free(mctx->ppn_dirty);
1040 release_m_page:
1041         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
1042 release_mctx:
1043         mempool_free(mctx, ufshpb_mctx_pool);
1044         return NULL;
1045 }
1046
1047 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
1048                                struct ufshpb_map_ctx *mctx)
1049 {
1050         int i;
1051
1052         for (i = 0; i < hpb->pages_per_srgn; i++)
1053                 mempool_free(mctx->m_page[i], ufshpb_page_pool);
1054
1055         bitmap_free(mctx->ppn_dirty);
1056         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
1057         mempool_free(mctx, ufshpb_mctx_pool);
1058 }
1059
1060 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
1061                                           struct ufshpb_region *rgn)
1062 {
1063         struct ufshpb_subregion *srgn;
1064         int srgn_idx;
1065
1066         for_each_sub_region(rgn, srgn_idx, srgn)
1067                 if (srgn->srgn_state == HPB_SRGN_ISSUED)
1068                         return -EPERM;
1069
1070         return 0;
1071 }
1072
1073 static void ufshpb_read_to_handler(struct work_struct *work)
1074 {
1075         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1076                                              ufshpb_read_to_work.work);
1077         struct victim_select_info *lru_info = &hpb->lru_info;
1078         struct ufshpb_region *rgn, *next_rgn;
1079         unsigned long flags;
1080         unsigned int poll;
1081         LIST_HEAD(expired_list);
1082
1083         if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
1084                 return;
1085
1086         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1087
1088         list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
1089                                  list_lru_rgn) {
1090                 bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
1091
1092                 if (timedout) {
1093                         rgn->read_timeout_expiries--;
1094                         if (is_rgn_dirty(rgn) ||
1095                             rgn->read_timeout_expiries == 0)
1096                                 list_add(&rgn->list_expired_rgn, &expired_list);
1097                         else
1098                                 rgn->read_timeout = ktime_add_ms(ktime_get(),
1099                                                 hpb->params.read_timeout_ms);
1100                 }
1101         }
1102
1103         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1104
1105         list_for_each_entry_safe(rgn, next_rgn, &expired_list,
1106                                  list_expired_rgn) {
1107                 list_del_init(&rgn->list_expired_rgn);
1108                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1109                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1110                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1111         }
1112
1113         ufshpb_kick_map_work(hpb);
1114
1115         clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
1116
1117         poll = hpb->params.timeout_polling_interval_ms;
1118         schedule_delayed_work(&hpb->ufshpb_read_to_work,
1119                               msecs_to_jiffies(poll));
1120 }
1121
1122 static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
1123                                 struct ufshpb_region *rgn)
1124 {
1125         rgn->rgn_state = HPB_RGN_ACTIVE;
1126         list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
1127         atomic_inc(&lru_info->active_cnt);
1128         if (rgn->hpb->is_hcm) {
1129                 rgn->read_timeout =
1130                         ktime_add_ms(ktime_get(),
1131                                      rgn->hpb->params.read_timeout_ms);
1132                 rgn->read_timeout_expiries =
1133                         rgn->hpb->params.read_timeout_expiries;
1134         }
1135 }
1136
1137 static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
1138                                 struct ufshpb_region *rgn)
1139 {
1140         list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
1141 }
1142
1143 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
1144 {
1145         struct victim_select_info *lru_info = &hpb->lru_info;
1146         struct ufshpb_region *rgn, *victim_rgn = NULL;
1147
1148         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
1149                 if (!rgn) {
1150                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1151                                 "%s: no region allocated\n",
1152                                 __func__);
1153                         return NULL;
1154                 }
1155                 if (ufshpb_check_srgns_issue_state(hpb, rgn))
1156                         continue;
1157
1158                 /*
1159                  * in host control mode, verify that the exiting region
1160                  * has fewer reads
1161                  */
1162                 if (hpb->is_hcm &&
1163                     rgn->reads > hpb->params.eviction_thld_exit)
1164                         continue;
1165
1166                 victim_rgn = rgn;
1167                 break;
1168         }
1169
1170         return victim_rgn;
1171 }
1172
1173 static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
1174                                     struct ufshpb_region *rgn)
1175 {
1176         list_del_init(&rgn->list_lru_rgn);
1177         rgn->rgn_state = HPB_RGN_INACTIVE;
1178         atomic_dec(&lru_info->active_cnt);
1179 }
1180
1181 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
1182                                           struct ufshpb_subregion *srgn)
1183 {
1184         if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1185                 ufshpb_put_map_ctx(hpb, srgn->mctx);
1186                 srgn->srgn_state = HPB_SRGN_UNUSED;
1187                 srgn->mctx = NULL;
1188         }
1189 }
1190
1191 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
1192                                  struct ufshpb_region *rgn,
1193                                  bool atomic)
1194 {
1195         struct ufshpb_req *umap_req;
1196         int rgn_idx = rgn ? rgn->rgn_idx : 0;
1197
1198         umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
1199         if (!umap_req)
1200                 return -ENOMEM;
1201
1202         ufshpb_execute_umap_req(hpb, umap_req, rgn);
1203
1204         return 0;
1205 }
1206
1207 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
1208                                         struct ufshpb_region *rgn)
1209 {
1210         return ufshpb_issue_umap_req(hpb, rgn, true);
1211 }
1212
1213 static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
1214 {
1215         return ufshpb_issue_umap_req(hpb, NULL, false);
1216 }
1217
1218 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
1219                                  struct ufshpb_region *rgn)
1220 {
1221         struct victim_select_info *lru_info;
1222         struct ufshpb_subregion *srgn;
1223         int srgn_idx;
1224
1225         lru_info = &hpb->lru_info;
1226
1227         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
1228
1229         ufshpb_cleanup_lru_info(lru_info, rgn);
1230
1231         for_each_sub_region(rgn, srgn_idx, srgn)
1232                 ufshpb_purge_active_subregion(hpb, srgn);
1233 }
1234
1235 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1236 {
1237         unsigned long flags;
1238         int ret = 0;
1239
1240         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1241         if (rgn->rgn_state == HPB_RGN_PINNED) {
1242                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1243                          "pinned region cannot drop-out. region %d\n",
1244                          rgn->rgn_idx);
1245                 goto out;
1246         }
1247
1248         if (!list_empty(&rgn->list_lru_rgn)) {
1249                 if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
1250                         ret = -EBUSY;
1251                         goto out;
1252                 }
1253
1254                 if (hpb->is_hcm) {
1255                         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1256                         ret = ufshpb_issue_umap_single_req(hpb, rgn);
1257                         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1258                         if (ret)
1259                                 goto out;
1260                 }
1261
1262                 __ufshpb_evict_region(hpb, rgn);
1263         }
1264 out:
1265         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1266         return ret;
1267 }
1268
1269 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
1270                                 struct ufshpb_region *rgn,
1271                                 struct ufshpb_subregion *srgn)
1272 {
1273         struct ufshpb_req *map_req;
1274         unsigned long flags;
1275         int ret;
1276         int err = -EAGAIN;
1277         bool alloc_required = false;
1278         enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
1279
1280         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1281
1282         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1283                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1284                            "%s: ufshpb state is not PRESENT\n", __func__);
1285                 goto unlock_out;
1286         }
1287
1288         if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1289             (srgn->srgn_state == HPB_SRGN_INVALID)) {
1290                 err = 0;
1291                 goto unlock_out;
1292         }
1293
1294         if (srgn->srgn_state == HPB_SRGN_UNUSED)
1295                 alloc_required = true;
1296
1297         /*
1298          * If the subregion is already ISSUED state,
1299          * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1300          * the device and HPB response for map loading is received.
1301          * In this case, after finishing the HPB_READ_BUFFER,
1302          * the next HPB_READ_BUFFER is performed again to obtain the latest
1303          * map data.
1304          */
1305         if (srgn->srgn_state == HPB_SRGN_ISSUED)
1306                 goto unlock_out;
1307
1308         srgn->srgn_state = HPB_SRGN_ISSUED;
1309         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1310
1311         if (alloc_required) {
1312                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1313                 if (!srgn->mctx) {
1314                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1315                             "get map_ctx failed. region %d - %d\n",
1316                             rgn->rgn_idx, srgn->srgn_idx);
1317                         state = HPB_SRGN_UNUSED;
1318                         goto change_srgn_state;
1319                 }
1320         }
1321
1322         map_req = ufshpb_get_map_req(hpb, srgn);
1323         if (!map_req)
1324                 goto change_srgn_state;
1325
1326
1327         ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1328         if (ret) {
1329                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1330                            "%s: issue map_req failed: %d, region %d - %d\n",
1331                            __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1332                 goto free_map_req;
1333         }
1334         return 0;
1335
1336 free_map_req:
1337         ufshpb_put_map_req(hpb, map_req);
1338 change_srgn_state:
1339         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1340         srgn->srgn_state = state;
1341 unlock_out:
1342         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1343         return err;
1344 }
1345
1346 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1347 {
1348         struct ufshpb_region *victim_rgn = NULL;
1349         struct victim_select_info *lru_info = &hpb->lru_info;
1350         unsigned long flags;
1351         int ret = 0;
1352
1353         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1354         /*
1355          * If region belongs to lru_list, just move the region
1356          * to the front of lru list because the state of the region
1357          * is already active-state.
1358          */
1359         if (!list_empty(&rgn->list_lru_rgn)) {
1360                 ufshpb_hit_lru_info(lru_info, rgn);
1361                 goto out;
1362         }
1363
1364         if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1365                 if (atomic_read(&lru_info->active_cnt) ==
1366                     lru_info->max_lru_active_cnt) {
1367                         /*
1368                          * If the maximum number of active regions
1369                          * is exceeded, evict the least recently used region.
1370                          * This case may occur when the device responds
1371                          * to the eviction information late.
1372                          * It is okay to evict the least recently used region,
1373                          * because the device could detect this region
1374                          * by not issuing HPB_READ
1375                          *
1376                          * in host control mode, verify that the entering
1377                          * region has enough reads
1378                          */
1379                         if (hpb->is_hcm &&
1380                             rgn->reads < hpb->params.eviction_thld_enter) {
1381                                 ret = -EACCES;
1382                                 goto out;
1383                         }
1384
1385                         victim_rgn = ufshpb_victim_lru_info(hpb);
1386                         if (!victim_rgn) {
1387                                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1388                                     "cannot get victim region %s\n",
1389                                     hpb->is_hcm ? "" : "error");
1390                                 ret = -ENOMEM;
1391                                 goto out;
1392                         }
1393
1394                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1395                                 "LRU full (%d), choose victim %d\n",
1396                                 atomic_read(&lru_info->active_cnt),
1397                                 victim_rgn->rgn_idx);
1398
1399                         if (hpb->is_hcm) {
1400                                 spin_unlock_irqrestore(&hpb->rgn_state_lock,
1401                                                        flags);
1402                                 ret = ufshpb_issue_umap_single_req(hpb,
1403                                                                 victim_rgn);
1404                                 spin_lock_irqsave(&hpb->rgn_state_lock,
1405                                                   flags);
1406                                 if (ret)
1407                                         goto out;
1408                         }
1409
1410                         __ufshpb_evict_region(hpb, victim_rgn);
1411                 }
1412
1413                 /*
1414                  * When a region is added to lru_info list_head,
1415                  * it is guaranteed that the subregion has been
1416                  * assigned all mctx. If failed, try to receive mctx again
1417                  * without being added to lru_info list_head
1418                  */
1419                 ufshpb_add_lru_info(lru_info, rgn);
1420         }
1421 out:
1422         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1423         return ret;
1424 }
1425
1426 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1427                                          struct utp_hpb_rsp *rsp_field)
1428 {
1429         struct ufshpb_region *rgn;
1430         struct ufshpb_subregion *srgn;
1431         int i, rgn_i, srgn_i;
1432
1433         BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1434         /*
1435          * If the active region and the inactive region are the same,
1436          * we will inactivate this region.
1437          * The device could check this (region inactivated) and
1438          * will response the proper active region information
1439          */
1440         for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1441                 rgn_i =
1442                         be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1443                 srgn_i =
1444                         be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1445
1446                 rgn = hpb->rgn_tbl + rgn_i;
1447                 if (hpb->is_hcm &&
1448                     (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1449                         /*
1450                          * in host control mode, subregion activation
1451                          * recommendations are only allowed to active regions.
1452                          * Also, ignore recommendations for dirty regions - the
1453                          * host will make decisions concerning those by himself
1454                          */
1455                         continue;
1456                 }
1457
1458                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1459                         "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1460
1461                 spin_lock(&hpb->rsp_list_lock);
1462                 ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1463                 spin_unlock(&hpb->rsp_list_lock);
1464
1465                 srgn = rgn->srgn_tbl + srgn_i;
1466
1467                 /* blocking HPB_READ */
1468                 spin_lock(&hpb->rgn_state_lock);
1469                 if (srgn->srgn_state == HPB_SRGN_VALID)
1470                         srgn->srgn_state = HPB_SRGN_INVALID;
1471                 spin_unlock(&hpb->rgn_state_lock);
1472         }
1473
1474         if (hpb->is_hcm) {
1475                 /*
1476                  * in host control mode the device is not allowed to inactivate
1477                  * regions
1478                  */
1479                 goto out;
1480         }
1481
1482         for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1483                 rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1484                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1485                         "inactivate(%d) region %d\n", i, rgn_i);
1486
1487                 spin_lock(&hpb->rsp_list_lock);
1488                 ufshpb_update_inactive_info(hpb, rgn_i);
1489                 spin_unlock(&hpb->rsp_list_lock);
1490
1491                 rgn = hpb->rgn_tbl + rgn_i;
1492
1493                 spin_lock(&hpb->rgn_state_lock);
1494                 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1495                         for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
1496                                 srgn = rgn->srgn_tbl + srgn_i;
1497                                 if (srgn->srgn_state == HPB_SRGN_VALID)
1498                                         srgn->srgn_state = HPB_SRGN_INVALID;
1499                         }
1500                 }
1501                 spin_unlock(&hpb->rgn_state_lock);
1502
1503         }
1504
1505 out:
1506         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1507                 rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1508
1509         if (ufshpb_get_state(hpb) == HPB_PRESENT)
1510                 queue_work(ufshpb_wq, &hpb->map_work);
1511 }
1512
1513 static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
1514 {
1515         struct victim_select_info *lru_info = &hpb->lru_info;
1516         struct ufshpb_region *rgn;
1517         unsigned long flags;
1518
1519         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1520
1521         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1522                 set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1523
1524         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1525 }
1526
1527 /*
1528  * This function will parse recommended active subregion information in sense
1529  * data field of response UPIU with SAM_STAT_GOOD state.
1530  */
1531 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1532 {
1533         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1534         struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1535         int data_seg_len;
1536
1537         if (unlikely(lrbp->lun != rsp_field->lun)) {
1538                 struct scsi_device *sdev;
1539                 bool found = false;
1540
1541                 __shost_for_each_device(sdev, hba->host) {
1542                         hpb = ufshpb_get_hpb_data(sdev);
1543
1544                         if (!hpb)
1545                                 continue;
1546
1547                         if (rsp_field->lun == hpb->lun) {
1548                                 found = true;
1549                                 break;
1550                         }
1551                 }
1552
1553                 if (!found)
1554                         return;
1555         }
1556
1557         if (!hpb)
1558                 return;
1559
1560         if (ufshpb_get_state(hpb) == HPB_INIT)
1561                 return;
1562
1563         if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1564             (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1565                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1566                            "%s: ufshpb state is not PRESENT/SUSPEND\n",
1567                            __func__);
1568                 return;
1569         }
1570
1571         data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1572                 & MASK_RSP_UPIU_DATA_SEG_LEN;
1573
1574         /* To flush remained rsp_list, we queue the map_work task */
1575         if (!data_seg_len) {
1576                 if (!ufshpb_is_general_lun(hpb->lun))
1577                         return;
1578
1579                 ufshpb_kick_map_work(hpb);
1580                 return;
1581         }
1582
1583         BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1584
1585         if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1586                 return;
1587
1588         hpb->stats.rb_noti_cnt++;
1589
1590         switch (rsp_field->hpb_op) {
1591         case HPB_RSP_REQ_REGION_UPDATE:
1592                 if (data_seg_len != DEV_DATA_SEG_LEN)
1593                         dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1594                                  "%s: data seg length is not same.\n",
1595                                  __func__);
1596                 ufshpb_rsp_req_region_update(hpb, rsp_field);
1597                 break;
1598         case HPB_RSP_DEV_RESET:
1599                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1600                          "UFS device lost HPB information during PM.\n");
1601
1602                 if (hpb->is_hcm) {
1603                         struct scsi_device *sdev;
1604
1605                         __shost_for_each_device(sdev, hba->host) {
1606                                 struct ufshpb_lu *h = sdev->hostdata;
1607
1608                                 if (h)
1609                                         ufshpb_dev_reset_handler(h);
1610                         }
1611                 }
1612
1613                 break;
1614         default:
1615                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1616                            "hpb_op is not available: %d\n",
1617                            rsp_field->hpb_op);
1618                 break;
1619         }
1620 }
1621
1622 static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1623                                    struct ufshpb_region *rgn,
1624                                    struct ufshpb_subregion *srgn)
1625 {
1626         if (!list_empty(&rgn->list_inact_rgn))
1627                 return;
1628
1629         if (!list_empty(&srgn->list_act_srgn)) {
1630                 list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1631                 return;
1632         }
1633
1634         list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1635 }
1636
1637 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1638                                           struct ufshpb_region *rgn,
1639                                           struct list_head *pending_list)
1640 {
1641         struct ufshpb_subregion *srgn;
1642         int srgn_idx;
1643
1644         if (!list_empty(&rgn->list_inact_rgn))
1645                 return;
1646
1647         for_each_sub_region(rgn, srgn_idx, srgn)
1648                 if (!list_empty(&srgn->list_act_srgn))
1649                         return;
1650
1651         list_add_tail(&rgn->list_inact_rgn, pending_list);
1652 }
1653
1654 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1655 {
1656         struct ufshpb_region *rgn;
1657         struct ufshpb_subregion *srgn;
1658         unsigned long flags;
1659         int ret = 0;
1660
1661         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1662         while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1663                                                 struct ufshpb_subregion,
1664                                                 list_act_srgn))) {
1665                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1666                         break;
1667
1668                 list_del_init(&srgn->list_act_srgn);
1669                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1670
1671                 rgn = hpb->rgn_tbl + srgn->rgn_idx;
1672                 ret = ufshpb_add_region(hpb, rgn);
1673                 if (ret)
1674                         goto active_failed;
1675
1676                 ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1677                 if (ret) {
1678                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1679                             "issue map_req failed. ret %d, region %d - %d\n",
1680                             ret, rgn->rgn_idx, srgn->srgn_idx);
1681                         goto active_failed;
1682                 }
1683                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1684         }
1685         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1686         return;
1687
1688 active_failed:
1689         dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1690                    rgn->rgn_idx, srgn->srgn_idx);
1691         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1692         ufshpb_add_active_list(hpb, rgn, srgn);
1693         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1694 }
1695
1696 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1697 {
1698         struct ufshpb_region *rgn;
1699         unsigned long flags;
1700         int ret;
1701         LIST_HEAD(pending_list);
1702
1703         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1704         while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1705                                                struct ufshpb_region,
1706                                                list_inact_rgn))) {
1707                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1708                         break;
1709
1710                 list_del_init(&rgn->list_inact_rgn);
1711                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1712
1713                 ret = ufshpb_evict_region(hpb, rgn);
1714                 if (ret) {
1715                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1716                         ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1717                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1718                 }
1719
1720                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1721         }
1722
1723         list_splice(&pending_list, &hpb->lh_inact_rgn);
1724         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1725 }
1726
1727 static void ufshpb_normalization_work_handler(struct work_struct *work)
1728 {
1729         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1730                                              ufshpb_normalization_work);
1731         int rgn_idx;
1732         u8 factor = hpb->params.normalization_factor;
1733
1734         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1735                 struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1736                 int srgn_idx;
1737
1738                 spin_lock(&rgn->rgn_lock);
1739                 rgn->reads = 0;
1740                 for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1741                         struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1742
1743                         srgn->reads >>= factor;
1744                         rgn->reads += srgn->reads;
1745                 }
1746                 spin_unlock(&rgn->rgn_lock);
1747
1748                 if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1749                         continue;
1750
1751                 /* if region is active but has no reads - inactivate it */
1752                 spin_lock(&hpb->rsp_list_lock);
1753                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1754                 spin_unlock(&hpb->rsp_list_lock);
1755         }
1756 }
1757
1758 static void ufshpb_map_work_handler(struct work_struct *work)
1759 {
1760         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1761
1762         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1763                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1764                            "%s: ufshpb state is not PRESENT\n", __func__);
1765                 return;
1766         }
1767
1768         ufshpb_run_inactive_region_list(hpb);
1769         ufshpb_run_active_subregion_list(hpb);
1770 }
1771
1772 /*
1773  * this function doesn't need to hold lock due to be called in init.
1774  * (rgn_state_lock, rsp_list_lock, etc..)
1775  */
1776 static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1777                                             struct ufshpb_lu *hpb,
1778                                             struct ufshpb_region *rgn)
1779 {
1780         struct ufshpb_subregion *srgn;
1781         int srgn_idx, i;
1782         int err = 0;
1783
1784         for_each_sub_region(rgn, srgn_idx, srgn) {
1785                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1786                 srgn->srgn_state = HPB_SRGN_INVALID;
1787                 if (!srgn->mctx) {
1788                         err = -ENOMEM;
1789                         dev_err(hba->dev,
1790                                 "alloc mctx for pinned region failed\n");
1791                         goto release;
1792                 }
1793
1794                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1795         }
1796
1797         rgn->rgn_state = HPB_RGN_PINNED;
1798         return 0;
1799
1800 release:
1801         for (i = 0; i < srgn_idx; i++) {
1802                 srgn = rgn->srgn_tbl + i;
1803                 ufshpb_put_map_ctx(hpb, srgn->mctx);
1804         }
1805         return err;
1806 }
1807
1808 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1809                                       struct ufshpb_region *rgn, bool last)
1810 {
1811         int srgn_idx;
1812         struct ufshpb_subregion *srgn;
1813
1814         for_each_sub_region(rgn, srgn_idx, srgn) {
1815                 INIT_LIST_HEAD(&srgn->list_act_srgn);
1816
1817                 srgn->rgn_idx = rgn->rgn_idx;
1818                 srgn->srgn_idx = srgn_idx;
1819                 srgn->srgn_state = HPB_SRGN_UNUSED;
1820         }
1821
1822         if (unlikely(last && hpb->last_srgn_entries))
1823                 srgn->is_last = true;
1824 }
1825
1826 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1827                                       struct ufshpb_region *rgn, int srgn_cnt)
1828 {
1829         rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1830                                  GFP_KERNEL);
1831         if (!rgn->srgn_tbl)
1832                 return -ENOMEM;
1833
1834         rgn->srgn_cnt = srgn_cnt;
1835         return 0;
1836 }
1837
1838 static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1839                                      struct ufshpb_lu *hpb,
1840                                      struct ufshpb_dev_info *hpb_dev_info,
1841                                      struct ufshpb_lu_info *hpb_lu_info)
1842 {
1843         u32 entries_per_rgn;
1844         u64 rgn_mem_size, tmp;
1845
1846         /* for pre_req */
1847         hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1;
1848
1849         if (ufshpb_is_legacy(hba))
1850                 hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1851         else
1852                 hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
1853
1854         hpb->cur_read_id = 0;
1855
1856         hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1857         hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1858                 (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1859                 : PINNED_NOT_SET;
1860         hpb->lru_info.max_lru_active_cnt =
1861                 hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1862
1863         rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1864                         * HPB_ENTRY_SIZE;
1865         do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1866         hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1867                 * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1868
1869         tmp = rgn_mem_size;
1870         do_div(tmp, HPB_ENTRY_SIZE);
1871         entries_per_rgn = (u32)tmp;
1872         hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1873         hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1874
1875         hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1876         hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1877         hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1878
1879         tmp = rgn_mem_size;
1880         do_div(tmp, hpb->srgn_mem_size);
1881         hpb->srgns_per_rgn = (int)tmp;
1882
1883         hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1884                                 entries_per_rgn);
1885         hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1886                                 (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1887         hpb->last_srgn_entries = hpb_lu_info->num_blocks
1888                                  % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1889
1890         hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1891
1892         if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1893                 hpb->is_hcm = true;
1894 }
1895
1896 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1897 {
1898         struct ufshpb_region *rgn_table, *rgn;
1899         int rgn_idx, i;
1900         int ret = 0;
1901
1902         rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1903                             GFP_KERNEL);
1904         if (!rgn_table)
1905                 return -ENOMEM;
1906
1907         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1908                 int srgn_cnt = hpb->srgns_per_rgn;
1909                 bool last_srgn = false;
1910
1911                 rgn = rgn_table + rgn_idx;
1912                 rgn->rgn_idx = rgn_idx;
1913
1914                 spin_lock_init(&rgn->rgn_lock);
1915
1916                 INIT_LIST_HEAD(&rgn->list_inact_rgn);
1917                 INIT_LIST_HEAD(&rgn->list_lru_rgn);
1918                 INIT_LIST_HEAD(&rgn->list_expired_rgn);
1919
1920                 if (rgn_idx == hpb->rgns_per_lu - 1) {
1921                         srgn_cnt = ((hpb->srgns_per_lu - 1) %
1922                                     hpb->srgns_per_rgn) + 1;
1923                         last_srgn = true;
1924                 }
1925
1926                 ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1927                 if (ret)
1928                         goto release_srgn_table;
1929                 ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1930
1931                 if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1932                         ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1933                         if (ret)
1934                                 goto release_srgn_table;
1935                 } else {
1936                         rgn->rgn_state = HPB_RGN_INACTIVE;
1937                 }
1938
1939                 rgn->rgn_flags = 0;
1940                 rgn->hpb = hpb;
1941         }
1942
1943         hpb->rgn_tbl = rgn_table;
1944
1945         return 0;
1946
1947 release_srgn_table:
1948         for (i = 0; i <= rgn_idx; i++)
1949                 kvfree(rgn_table[i].srgn_tbl);
1950
1951         kvfree(rgn_table);
1952         return ret;
1953 }
1954
1955 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1956                                          struct ufshpb_region *rgn)
1957 {
1958         int srgn_idx;
1959         struct ufshpb_subregion *srgn;
1960
1961         for_each_sub_region(rgn, srgn_idx, srgn)
1962                 if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1963                         srgn->srgn_state = HPB_SRGN_UNUSED;
1964                         ufshpb_put_map_ctx(hpb, srgn->mctx);
1965                 }
1966 }
1967
1968 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1969 {
1970         int rgn_idx;
1971
1972         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1973                 struct ufshpb_region *rgn;
1974
1975                 rgn = hpb->rgn_tbl + rgn_idx;
1976                 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1977                         rgn->rgn_state = HPB_RGN_INACTIVE;
1978
1979                         ufshpb_destroy_subregion_tbl(hpb, rgn);
1980                 }
1981
1982                 kvfree(rgn->srgn_tbl);
1983         }
1984
1985         kvfree(hpb->rgn_tbl);
1986 }
1987
1988 /* SYSFS functions */
1989 #define ufshpb_sysfs_attr_show_func(__name)                             \
1990 static ssize_t __name##_show(struct device *dev,                        \
1991         struct device_attribute *attr, char *buf)                       \
1992 {                                                                       \
1993         struct scsi_device *sdev = to_scsi_device(dev);                 \
1994         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1995                                                                         \
1996         if (!hpb)                                                       \
1997                 return -ENODEV;                                         \
1998                                                                         \
1999         return sysfs_emit(buf, "%llu\n", hpb->stats.__name);            \
2000 }                                                                       \
2001 \
2002 static DEVICE_ATTR_RO(__name)
2003
2004 ufshpb_sysfs_attr_show_func(hit_cnt);
2005 ufshpb_sysfs_attr_show_func(miss_cnt);
2006 ufshpb_sysfs_attr_show_func(rb_noti_cnt);
2007 ufshpb_sysfs_attr_show_func(rb_active_cnt);
2008 ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
2009 ufshpb_sysfs_attr_show_func(map_req_cnt);
2010 ufshpb_sysfs_attr_show_func(umap_req_cnt);
2011
2012 static struct attribute *hpb_dev_stat_attrs[] = {
2013         &dev_attr_hit_cnt.attr,
2014         &dev_attr_miss_cnt.attr,
2015         &dev_attr_rb_noti_cnt.attr,
2016         &dev_attr_rb_active_cnt.attr,
2017         &dev_attr_rb_inactive_cnt.attr,
2018         &dev_attr_map_req_cnt.attr,
2019         &dev_attr_umap_req_cnt.attr,
2020         NULL,
2021 };
2022
2023 struct attribute_group ufs_sysfs_hpb_stat_group = {
2024         .name = "hpb_stats",
2025         .attrs = hpb_dev_stat_attrs,
2026 };
2027
2028 /* SYSFS functions */
2029 #define ufshpb_sysfs_param_show_func(__name)                            \
2030 static ssize_t __name##_show(struct device *dev,                        \
2031         struct device_attribute *attr, char *buf)                       \
2032 {                                                                       \
2033         struct scsi_device *sdev = to_scsi_device(dev);                 \
2034         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
2035                                                                         \
2036         if (!hpb)                                                       \
2037                 return -ENODEV;                                         \
2038                                                                         \
2039         return sysfs_emit(buf, "%d\n", hpb->params.__name);             \
2040 }
2041
2042 ufshpb_sysfs_param_show_func(requeue_timeout_ms);
2043 static ssize_t
2044 requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
2045                          const char *buf, size_t count)
2046 {
2047         struct scsi_device *sdev = to_scsi_device(dev);
2048         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2049         int val;
2050
2051         if (!hpb)
2052                 return -ENODEV;
2053
2054         if (kstrtouint(buf, 0, &val))
2055                 return -EINVAL;
2056
2057         if (val < 0)
2058                 return -EINVAL;
2059
2060         hpb->params.requeue_timeout_ms = val;
2061
2062         return count;
2063 }
2064 static DEVICE_ATTR_RW(requeue_timeout_ms);
2065
2066 ufshpb_sysfs_param_show_func(activation_thld);
2067 static ssize_t
2068 activation_thld_store(struct device *dev, struct device_attribute *attr,
2069                       const char *buf, size_t count)
2070 {
2071         struct scsi_device *sdev = to_scsi_device(dev);
2072         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2073         int val;
2074
2075         if (!hpb)
2076                 return -ENODEV;
2077
2078         if (!hpb->is_hcm)
2079                 return -EOPNOTSUPP;
2080
2081         if (kstrtouint(buf, 0, &val))
2082                 return -EINVAL;
2083
2084         if (val <= 0)
2085                 return -EINVAL;
2086
2087         hpb->params.activation_thld = val;
2088
2089         return count;
2090 }
2091 static DEVICE_ATTR_RW(activation_thld);
2092
2093 ufshpb_sysfs_param_show_func(normalization_factor);
2094 static ssize_t
2095 normalization_factor_store(struct device *dev, struct device_attribute *attr,
2096                            const char *buf, size_t count)
2097 {
2098         struct scsi_device *sdev = to_scsi_device(dev);
2099         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2100         int val;
2101
2102         if (!hpb)
2103                 return -ENODEV;
2104
2105         if (!hpb->is_hcm)
2106                 return -EOPNOTSUPP;
2107
2108         if (kstrtouint(buf, 0, &val))
2109                 return -EINVAL;
2110
2111         if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
2112                 return -EINVAL;
2113
2114         hpb->params.normalization_factor = val;
2115
2116         return count;
2117 }
2118 static DEVICE_ATTR_RW(normalization_factor);
2119
2120 ufshpb_sysfs_param_show_func(eviction_thld_enter);
2121 static ssize_t
2122 eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
2123                           const char *buf, size_t count)
2124 {
2125         struct scsi_device *sdev = to_scsi_device(dev);
2126         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2127         int val;
2128
2129         if (!hpb)
2130                 return -ENODEV;
2131
2132         if (!hpb->is_hcm)
2133                 return -EOPNOTSUPP;
2134
2135         if (kstrtouint(buf, 0, &val))
2136                 return -EINVAL;
2137
2138         if (val <= hpb->params.eviction_thld_exit)
2139                 return -EINVAL;
2140
2141         hpb->params.eviction_thld_enter = val;
2142
2143         return count;
2144 }
2145 static DEVICE_ATTR_RW(eviction_thld_enter);
2146
2147 ufshpb_sysfs_param_show_func(eviction_thld_exit);
2148 static ssize_t
2149 eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
2150                          const char *buf, size_t count)
2151 {
2152         struct scsi_device *sdev = to_scsi_device(dev);
2153         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2154         int val;
2155
2156         if (!hpb)
2157                 return -ENODEV;
2158
2159         if (!hpb->is_hcm)
2160                 return -EOPNOTSUPP;
2161
2162         if (kstrtouint(buf, 0, &val))
2163                 return -EINVAL;
2164
2165         if (val <= hpb->params.activation_thld)
2166                 return -EINVAL;
2167
2168         hpb->params.eviction_thld_exit = val;
2169
2170         return count;
2171 }
2172 static DEVICE_ATTR_RW(eviction_thld_exit);
2173
2174 ufshpb_sysfs_param_show_func(read_timeout_ms);
2175 static ssize_t
2176 read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
2177                       const char *buf, size_t count)
2178 {
2179         struct scsi_device *sdev = to_scsi_device(dev);
2180         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2181         int val;
2182
2183         if (!hpb)
2184                 return -ENODEV;
2185
2186         if (!hpb->is_hcm)
2187                 return -EOPNOTSUPP;
2188
2189         if (kstrtouint(buf, 0, &val))
2190                 return -EINVAL;
2191
2192         /* read_timeout >> timeout_polling_interval */
2193         if (val < hpb->params.timeout_polling_interval_ms * 2)
2194                 return -EINVAL;
2195
2196         hpb->params.read_timeout_ms = val;
2197
2198         return count;
2199 }
2200 static DEVICE_ATTR_RW(read_timeout_ms);
2201
2202 ufshpb_sysfs_param_show_func(read_timeout_expiries);
2203 static ssize_t
2204 read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
2205                             const char *buf, size_t count)
2206 {
2207         struct scsi_device *sdev = to_scsi_device(dev);
2208         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2209         int val;
2210
2211         if (!hpb)
2212                 return -ENODEV;
2213
2214         if (!hpb->is_hcm)
2215                 return -EOPNOTSUPP;
2216
2217         if (kstrtouint(buf, 0, &val))
2218                 return -EINVAL;
2219
2220         if (val <= 0)
2221                 return -EINVAL;
2222
2223         hpb->params.read_timeout_expiries = val;
2224
2225         return count;
2226 }
2227 static DEVICE_ATTR_RW(read_timeout_expiries);
2228
2229 ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
2230 static ssize_t
2231 timeout_polling_interval_ms_store(struct device *dev,
2232                                   struct device_attribute *attr,
2233                                   const char *buf, size_t count)
2234 {
2235         struct scsi_device *sdev = to_scsi_device(dev);
2236         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2237         int val;
2238
2239         if (!hpb)
2240                 return -ENODEV;
2241
2242         if (!hpb->is_hcm)
2243                 return -EOPNOTSUPP;
2244
2245         if (kstrtouint(buf, 0, &val))
2246                 return -EINVAL;
2247
2248         /* timeout_polling_interval << read_timeout */
2249         if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
2250                 return -EINVAL;
2251
2252         hpb->params.timeout_polling_interval_ms = val;
2253
2254         return count;
2255 }
2256 static DEVICE_ATTR_RW(timeout_polling_interval_ms);
2257
2258 ufshpb_sysfs_param_show_func(inflight_map_req);
2259 static ssize_t inflight_map_req_store(struct device *dev,
2260                                       struct device_attribute *attr,
2261                                       const char *buf, size_t count)
2262 {
2263         struct scsi_device *sdev = to_scsi_device(dev);
2264         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2265         int val;
2266
2267         if (!hpb)
2268                 return -ENODEV;
2269
2270         if (!hpb->is_hcm)
2271                 return -EOPNOTSUPP;
2272
2273         if (kstrtouint(buf, 0, &val))
2274                 return -EINVAL;
2275
2276         if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
2277                 return -EINVAL;
2278
2279         hpb->params.inflight_map_req = val;
2280
2281         return count;
2282 }
2283 static DEVICE_ATTR_RW(inflight_map_req);
2284
2285 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
2286 {
2287         hpb->params.activation_thld = ACTIVATION_THRESHOLD;
2288         hpb->params.normalization_factor = 1;
2289         hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2290         hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2291         hpb->params.read_timeout_ms = READ_TO_MS;
2292         hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2293         hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2294         hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2295 }
2296
2297 static struct attribute *hpb_dev_param_attrs[] = {
2298         &dev_attr_requeue_timeout_ms.attr,
2299         &dev_attr_activation_thld.attr,
2300         &dev_attr_normalization_factor.attr,
2301         &dev_attr_eviction_thld_enter.attr,
2302         &dev_attr_eviction_thld_exit.attr,
2303         &dev_attr_read_timeout_ms.attr,
2304         &dev_attr_read_timeout_expiries.attr,
2305         &dev_attr_timeout_polling_interval_ms.attr,
2306         &dev_attr_inflight_map_req.attr,
2307         NULL,
2308 };
2309
2310 struct attribute_group ufs_sysfs_hpb_param_group = {
2311         .name = "hpb_params",
2312         .attrs = hpb_dev_param_attrs,
2313 };
2314
2315 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2316 {
2317         struct ufshpb_req *pre_req = NULL, *t;
2318         int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2319         int i;
2320
2321         INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2322
2323         hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2324         hpb->throttle_pre_req = qd;
2325         hpb->num_inflight_pre_req = 0;
2326
2327         if (!hpb->pre_req)
2328                 goto release_mem;
2329
2330         for (i = 0; i < qd; i++) {
2331                 pre_req = hpb->pre_req + i;
2332                 INIT_LIST_HEAD(&pre_req->list_req);
2333                 pre_req->req = NULL;
2334
2335                 pre_req->bio = bio_alloc(GFP_KERNEL, 1);
2336                 if (!pre_req->bio)
2337                         goto release_mem;
2338
2339                 pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2340                 if (!pre_req->wb.m_page) {
2341                         bio_put(pre_req->bio);
2342                         goto release_mem;
2343                 }
2344
2345                 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2346         }
2347
2348         return 0;
2349 release_mem:
2350         list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2351                 list_del_init(&pre_req->list_req);
2352                 bio_put(pre_req->bio);
2353                 __free_page(pre_req->wb.m_page);
2354         }
2355
2356         kfree(hpb->pre_req);
2357         return -ENOMEM;
2358 }
2359
2360 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2361 {
2362         struct ufshpb_req *pre_req = NULL;
2363         int i;
2364
2365         for (i = 0; i < hpb->throttle_pre_req; i++) {
2366                 pre_req = hpb->pre_req + i;
2367                 bio_put(hpb->pre_req[i].bio);
2368                 if (!pre_req->wb.m_page)
2369                         __free_page(hpb->pre_req[i].wb.m_page);
2370                 list_del_init(&pre_req->list_req);
2371         }
2372
2373         kfree(hpb->pre_req);
2374 }
2375
2376 static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2377 {
2378         hpb->stats.hit_cnt = 0;
2379         hpb->stats.miss_cnt = 0;
2380         hpb->stats.rb_noti_cnt = 0;
2381         hpb->stats.rb_active_cnt = 0;
2382         hpb->stats.rb_inactive_cnt = 0;
2383         hpb->stats.map_req_cnt = 0;
2384         hpb->stats.umap_req_cnt = 0;
2385 }
2386
2387 static void ufshpb_param_init(struct ufshpb_lu *hpb)
2388 {
2389         hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2390         if (hpb->is_hcm)
2391                 ufshpb_hcm_param_init(hpb);
2392 }
2393
2394 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2395 {
2396         int ret;
2397
2398         spin_lock_init(&hpb->rgn_state_lock);
2399         spin_lock_init(&hpb->rsp_list_lock);
2400         spin_lock_init(&hpb->param_lock);
2401
2402         INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2403         INIT_LIST_HEAD(&hpb->lh_act_srgn);
2404         INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2405         INIT_LIST_HEAD(&hpb->list_hpb_lu);
2406
2407         INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2408         if (hpb->is_hcm) {
2409                 INIT_WORK(&hpb->ufshpb_normalization_work,
2410                           ufshpb_normalization_work_handler);
2411                 INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2412                                   ufshpb_read_to_handler);
2413         }
2414
2415         hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2416                           sizeof(struct ufshpb_req), 0, 0, NULL);
2417         if (!hpb->map_req_cache) {
2418                 dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2419                         hpb->lun);
2420                 return -ENOMEM;
2421         }
2422
2423         hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2424                           sizeof(struct page *) * hpb->pages_per_srgn,
2425                           0, 0, NULL);
2426         if (!hpb->m_page_cache) {
2427                 dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2428                         hpb->lun);
2429                 ret = -ENOMEM;
2430                 goto release_req_cache;
2431         }
2432
2433         ret = ufshpb_pre_req_mempool_init(hpb);
2434         if (ret) {
2435                 dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2436                         hpb->lun);
2437                 goto release_m_page_cache;
2438         }
2439
2440         ret = ufshpb_alloc_region_tbl(hba, hpb);
2441         if (ret)
2442                 goto release_pre_req_mempool;
2443
2444         ufshpb_stat_init(hpb);
2445         ufshpb_param_init(hpb);
2446
2447         if (hpb->is_hcm) {
2448                 unsigned int poll;
2449
2450                 poll = hpb->params.timeout_polling_interval_ms;
2451                 schedule_delayed_work(&hpb->ufshpb_read_to_work,
2452                                       msecs_to_jiffies(poll));
2453         }
2454
2455         return 0;
2456
2457 release_pre_req_mempool:
2458         ufshpb_pre_req_mempool_destroy(hpb);
2459 release_m_page_cache:
2460         kmem_cache_destroy(hpb->m_page_cache);
2461 release_req_cache:
2462         kmem_cache_destroy(hpb->map_req_cache);
2463         return ret;
2464 }
2465
2466 static struct ufshpb_lu *
2467 ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2468                     struct ufshpb_dev_info *hpb_dev_info,
2469                     struct ufshpb_lu_info *hpb_lu_info)
2470 {
2471         struct ufshpb_lu *hpb;
2472         int ret;
2473
2474         hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2475         if (!hpb)
2476                 return NULL;
2477
2478         hpb->lun = sdev->lun;
2479         hpb->sdev_ufs_lu = sdev;
2480
2481         ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2482
2483         ret = ufshpb_lu_hpb_init(hba, hpb);
2484         if (ret) {
2485                 dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2486                 goto release_hpb;
2487         }
2488
2489         sdev->hostdata = hpb;
2490         return hpb;
2491
2492 release_hpb:
2493         kfree(hpb);
2494         return NULL;
2495 }
2496
2497 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2498 {
2499         struct ufshpb_region *rgn, *next_rgn;
2500         struct ufshpb_subregion *srgn, *next_srgn;
2501         unsigned long flags;
2502
2503         /*
2504          * If the device reset occurred, the remaining HPB region information
2505          * may be stale. Therefore, by discarding the lists of HPB response
2506          * that remained after reset, we prevent unnecessary work.
2507          */
2508         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2509         list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2510                                  list_inact_rgn)
2511                 list_del_init(&rgn->list_inact_rgn);
2512
2513         list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2514                                  list_act_srgn)
2515                 list_del_init(&srgn->list_act_srgn);
2516         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2517 }
2518
2519 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2520 {
2521         if (hpb->is_hcm) {
2522                 cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2523                 cancel_work_sync(&hpb->ufshpb_normalization_work);
2524         }
2525         cancel_work_sync(&hpb->map_work);
2526 }
2527
2528 static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2529 {
2530         int err = 0;
2531         bool flag_res = true;
2532         int try;
2533
2534         /* wait for the device to complete HPB reset query */
2535         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2536                 dev_dbg(hba->dev,
2537                         "%s start flag reset polling %d times\n",
2538                         __func__, try);
2539
2540                 /* Poll fHpbReset flag to be cleared */
2541                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2542                                 QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2543
2544                 if (err) {
2545                         dev_err(hba->dev,
2546                                 "%s reading fHpbReset flag failed with error %d\n",
2547                                 __func__, err);
2548                         return flag_res;
2549                 }
2550
2551                 if (!flag_res)
2552                         goto out;
2553
2554                 usleep_range(1000, 1100);
2555         }
2556         if (flag_res) {
2557                 dev_err(hba->dev,
2558                         "%s fHpbReset was not cleared by the device\n",
2559                         __func__);
2560         }
2561 out:
2562         return flag_res;
2563 }
2564
2565 void ufshpb_reset(struct ufs_hba *hba)
2566 {
2567         struct ufshpb_lu *hpb;
2568         struct scsi_device *sdev;
2569
2570         shost_for_each_device(sdev, hba->host) {
2571                 hpb = ufshpb_get_hpb_data(sdev);
2572                 if (!hpb)
2573                         continue;
2574
2575                 if (ufshpb_get_state(hpb) != HPB_RESET)
2576                         continue;
2577
2578                 ufshpb_set_state(hpb, HPB_PRESENT);
2579         }
2580 }
2581
2582 void ufshpb_reset_host(struct ufs_hba *hba)
2583 {
2584         struct ufshpb_lu *hpb;
2585         struct scsi_device *sdev;
2586
2587         shost_for_each_device(sdev, hba->host) {
2588                 hpb = ufshpb_get_hpb_data(sdev);
2589                 if (!hpb)
2590                         continue;
2591
2592                 if (ufshpb_get_state(hpb) != HPB_PRESENT)
2593                         continue;
2594                 ufshpb_set_state(hpb, HPB_RESET);
2595                 ufshpb_cancel_jobs(hpb);
2596                 ufshpb_discard_rsp_lists(hpb);
2597         }
2598 }
2599
2600 void ufshpb_suspend(struct ufs_hba *hba)
2601 {
2602         struct ufshpb_lu *hpb;
2603         struct scsi_device *sdev;
2604
2605         shost_for_each_device(sdev, hba->host) {
2606                 hpb = ufshpb_get_hpb_data(sdev);
2607                 if (!hpb)
2608                         continue;
2609
2610                 if (ufshpb_get_state(hpb) != HPB_PRESENT)
2611                         continue;
2612                 ufshpb_set_state(hpb, HPB_SUSPEND);
2613                 ufshpb_cancel_jobs(hpb);
2614         }
2615 }
2616
2617 void ufshpb_resume(struct ufs_hba *hba)
2618 {
2619         struct ufshpb_lu *hpb;
2620         struct scsi_device *sdev;
2621
2622         shost_for_each_device(sdev, hba->host) {
2623                 hpb = ufshpb_get_hpb_data(sdev);
2624                 if (!hpb)
2625                         continue;
2626
2627                 if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
2628                     (ufshpb_get_state(hpb) != HPB_SUSPEND))
2629                         continue;
2630                 ufshpb_set_state(hpb, HPB_PRESENT);
2631                 ufshpb_kick_map_work(hpb);
2632                 if (hpb->is_hcm) {
2633                         unsigned int poll =
2634                                 hpb->params.timeout_polling_interval_ms;
2635
2636                         schedule_delayed_work(&hpb->ufshpb_read_to_work,
2637                                 msecs_to_jiffies(poll));
2638                 }
2639         }
2640 }
2641
2642 static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2643                               struct ufshpb_lu_info *hpb_lu_info)
2644 {
2645         u16 max_active_rgns;
2646         u8 lu_enable;
2647         int size;
2648         int ret;
2649         char desc_buf[QUERY_DESC_MAX_SIZE];
2650
2651         ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2652
2653         pm_runtime_get_sync(hba->dev);
2654         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2655                                             QUERY_DESC_IDN_UNIT, lun, 0,
2656                                             desc_buf, &size);
2657         pm_runtime_put_sync(hba->dev);
2658
2659         if (ret) {
2660                 dev_err(hba->dev,
2661                         "%s: idn: %d lun: %d  query request failed",
2662                         __func__, QUERY_DESC_IDN_UNIT, lun);
2663                 return ret;
2664         }
2665
2666         lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2667         if (lu_enable != LU_ENABLED_HPB_FUNC)
2668                 return -ENODEV;
2669
2670         max_active_rgns = get_unaligned_be16(
2671                         desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2672         if (!max_active_rgns) {
2673                 dev_err(hba->dev,
2674                         "lun %d wrong number of max active regions\n", lun);
2675                 return -ENODEV;
2676         }
2677
2678         hpb_lu_info->num_blocks = get_unaligned_be64(
2679                         desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2680         hpb_lu_info->pinned_start = get_unaligned_be16(
2681                         desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2682         hpb_lu_info->num_pinned = get_unaligned_be16(
2683                         desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2684         hpb_lu_info->max_active_rgns = max_active_rgns;
2685
2686         return 0;
2687 }
2688
2689 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2690 {
2691         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2692
2693         if (!hpb)
2694                 return;
2695
2696         ufshpb_set_state(hpb, HPB_FAILED);
2697
2698         sdev = hpb->sdev_ufs_lu;
2699         sdev->hostdata = NULL;
2700
2701         ufshpb_cancel_jobs(hpb);
2702
2703         ufshpb_pre_req_mempool_destroy(hpb);
2704         ufshpb_destroy_region_tbl(hpb);
2705
2706         kmem_cache_destroy(hpb->map_req_cache);
2707         kmem_cache_destroy(hpb->m_page_cache);
2708
2709         list_del_init(&hpb->list_hpb_lu);
2710
2711         kfree(hpb);
2712 }
2713
2714 static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2715 {
2716         int pool_size;
2717         struct ufshpb_lu *hpb;
2718         struct scsi_device *sdev;
2719         bool init_success;
2720
2721         if (tot_active_srgn_pages == 0) {
2722                 ufshpb_remove(hba);
2723                 return;
2724         }
2725
2726         init_success = !ufshpb_check_hpb_reset_query(hba);
2727
2728         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2729         if (pool_size > tot_active_srgn_pages) {
2730                 mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2731                 mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2732         }
2733
2734         shost_for_each_device(sdev, hba->host) {
2735                 hpb = ufshpb_get_hpb_data(sdev);
2736                 if (!hpb)
2737                         continue;
2738
2739                 if (init_success) {
2740                         ufshpb_set_state(hpb, HPB_PRESENT);
2741                         if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2742                                 queue_work(ufshpb_wq, &hpb->map_work);
2743                         if (!hpb->is_hcm)
2744                                 ufshpb_issue_umap_all_req(hpb);
2745                 } else {
2746                         dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2747                         ufshpb_destroy_lu(hba, sdev);
2748                 }
2749         }
2750
2751         if (!init_success)
2752                 ufshpb_remove(hba);
2753 }
2754
2755 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2756 {
2757         struct ufshpb_lu *hpb;
2758         int ret;
2759         struct ufshpb_lu_info hpb_lu_info = { 0 };
2760         int lun = sdev->lun;
2761
2762         if (lun >= hba->dev_info.max_lu_supported)
2763                 goto out;
2764
2765         ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2766         if (ret)
2767                 goto out;
2768
2769         hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
2770                                   &hpb_lu_info);
2771         if (!hpb)
2772                 goto out;
2773
2774         tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2775                         hpb->srgns_per_rgn * hpb->pages_per_srgn;
2776
2777 out:
2778         /* All LUs are initialized */
2779         if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
2780                 ufshpb_hpb_lu_prepared(hba);
2781 }
2782
2783 static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2784 {
2785         int ret;
2786         unsigned int pool_size;
2787
2788         ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2789                                         sizeof(struct ufshpb_map_ctx),
2790                                         0, 0, NULL);
2791         if (!ufshpb_mctx_cache) {
2792                 dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2793                 return -ENOMEM;
2794         }
2795
2796         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2797         dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2798                __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2799
2800         ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2801                                                     ufshpb_mctx_cache);
2802         if (!ufshpb_mctx_pool) {
2803                 dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2804                 ret = -ENOMEM;
2805                 goto release_mctx_cache;
2806         }
2807
2808         ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2809         if (!ufshpb_page_pool) {
2810                 dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2811                 ret = -ENOMEM;
2812                 goto release_mctx_pool;
2813         }
2814
2815         ufshpb_wq = alloc_workqueue("ufshpb-wq",
2816                                         WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2817         if (!ufshpb_wq) {
2818                 dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2819                 ret = -ENOMEM;
2820                 goto release_page_pool;
2821         }
2822
2823         return 0;
2824
2825 release_page_pool:
2826         mempool_destroy(ufshpb_page_pool);
2827 release_mctx_pool:
2828         mempool_destroy(ufshpb_mctx_pool);
2829 release_mctx_cache:
2830         kmem_cache_destroy(ufshpb_mctx_cache);
2831         return ret;
2832 }
2833
2834 void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2835 {
2836         struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
2837         int max_active_rgns = 0;
2838         int hpb_num_lu;
2839
2840         hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2841         if (hpb_num_lu == 0) {
2842                 dev_err(hba->dev, "No HPB LU supported\n");
2843                 hpb_info->hpb_disabled = true;
2844                 return;
2845         }
2846
2847         hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2848         hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2849         max_active_rgns = get_unaligned_be16(geo_buf +
2850                           GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2851
2852         if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2853             max_active_rgns == 0) {
2854                 dev_err(hba->dev, "No HPB supported device\n");
2855                 hpb_info->hpb_disabled = true;
2856                 return;
2857         }
2858 }
2859
2860 void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2861 {
2862         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2863         int version, ret;
2864         u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW;
2865
2866         hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2867
2868         version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
2869         if ((version != HPB_SUPPORT_VERSION) &&
2870             (version != HPB_SUPPORT_LEGACY_VERSION)) {
2871                 dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2872                         __func__, version);
2873                 hpb_dev_info->hpb_disabled = true;
2874                 return;
2875         }
2876
2877         if (version == HPB_SUPPORT_LEGACY_VERSION)
2878                 hpb_dev_info->is_legacy = true;
2879
2880         pm_runtime_get_sync(hba->dev);
2881         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2882                 QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd);
2883         pm_runtime_put_sync(hba->dev);
2884
2885         if (ret)
2886                 dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed",
2887                         __func__);
2888         hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd;
2889
2890         /*
2891          * Get the number of user logical unit to check whether all
2892          * scsi_device finish initialization
2893          */
2894         hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2895 }
2896
2897 void ufshpb_init(struct ufs_hba *hba)
2898 {
2899         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2900         int try;
2901         int ret;
2902
2903         if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2904                 return;
2905
2906         if (ufshpb_init_mem_wq(hba)) {
2907                 hpb_dev_info->hpb_disabled = true;
2908                 return;
2909         }
2910
2911         atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2912         tot_active_srgn_pages = 0;
2913         /* issue HPB reset query */
2914         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2915                 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2916                                         QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2917                 if (!ret)
2918                         break;
2919         }
2920 }
2921
2922 void ufshpb_remove(struct ufs_hba *hba)
2923 {
2924         mempool_destroy(ufshpb_page_pool);
2925         mempool_destroy(ufshpb_mctx_pool);
2926         kmem_cache_destroy(ufshpb_mctx_cache);
2927
2928         destroy_workqueue(ufshpb_wq);
2929 }
2930
2931 module_param(ufshpb_host_map_kbytes, uint, 0644);
2932 MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2933         "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");