Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[platform/kernel/linux-starfive.git] / drivers / scsi / ufs / ufshpb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Universal Flash Storage Host Performance Booster
4  *
5  * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
6  *
7  * Authors:
8  *      Yongmyung Lee <ymhungry.lee@samsung.com>
9  *      Jinyoung Choi <j-young.choi@samsung.com>
10  */
11
12 #include <asm/unaligned.h>
13
14 #include "ufshcd.h"
15 #include "ufshpb.h"
16 #include "../sd.h"
17
18 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
19 #define READ_TO_MS 1000
20 #define READ_TO_EXPIRIES 100
21 #define POLLING_INTERVAL_MS 200
22 #define THROTTLE_MAP_REQ_DEFAULT 1
23
24 /* memory management */
25 static struct kmem_cache *ufshpb_mctx_cache;
26 static mempool_t *ufshpb_mctx_pool;
27 static mempool_t *ufshpb_page_pool;
28 /* A cache size of 2MB can cache ppn in the 1GB range. */
29 static unsigned int ufshpb_host_map_kbytes = 2048;
30 static int tot_active_srgn_pages;
31
32 static struct workqueue_struct *ufshpb_wq;
33
34 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
35                                       int srgn_idx);
36
37 bool ufshpb_is_allowed(struct ufs_hba *hba)
38 {
39         return !(hba->ufshpb_dev.hpb_disabled);
40 }
41
42 /* HPB version 1.0 is called as legacy version. */
43 bool ufshpb_is_legacy(struct ufs_hba *hba)
44 {
45         return hba->ufshpb_dev.is_legacy;
46 }
47
48 static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
49 {
50         return sdev->hostdata;
51 }
52
53 static int ufshpb_get_state(struct ufshpb_lu *hpb)
54 {
55         return atomic_read(&hpb->hpb_state);
56 }
57
58 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
59 {
60         atomic_set(&hpb->hpb_state, state);
61 }
62
63 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
64                                 struct ufshpb_subregion *srgn)
65 {
66         return rgn->rgn_state != HPB_RGN_INACTIVE &&
67                 srgn->srgn_state == HPB_SRGN_VALID;
68 }
69
70 static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
71 {
72         return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
73 }
74
75 static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
76 {
77         return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
78                op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
79 }
80
81 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
82 {
83         return transfer_len <= hpb->pre_req_max_tr_len;
84 }
85
86 static bool ufshpb_is_general_lun(int lun)
87 {
88         return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
89 }
90
91 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
92 {
93         if (hpb->lu_pinned_end != PINNED_NOT_SET &&
94             rgn_idx >= hpb->lu_pinned_start &&
95             rgn_idx <= hpb->lu_pinned_end)
96                 return true;
97
98         return false;
99 }
100
101 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
102 {
103         bool ret = false;
104         unsigned long flags;
105
106         if (ufshpb_get_state(hpb) != HPB_PRESENT)
107                 return;
108
109         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
110         if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
111                 ret = true;
112         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
113
114         if (ret)
115                 queue_work(ufshpb_wq, &hpb->map_work);
116 }
117
118 static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
119                                     struct ufshcd_lrb *lrbp,
120                                     struct utp_hpb_rsp *rsp_field)
121 {
122         /* Check HPB_UPDATE_ALERT */
123         if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
124               UPIU_HEADER_DWORD(0, 2, 0, 0)))
125                 return false;
126
127         if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
128             rsp_field->desc_type != DEV_DES_TYPE ||
129             rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
130             rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
131             rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
132             rsp_field->hpb_op == HPB_RSP_NONE ||
133             (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
134              !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
135                 return false;
136
137         if (!ufshpb_is_general_lun(rsp_field->lun)) {
138                 dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
139                          lrbp->lun);
140                 return false;
141         }
142
143         return true;
144 }
145
146 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
147                                int srgn_offset, int cnt, bool set_dirty)
148 {
149         struct ufshpb_region *rgn;
150         struct ufshpb_subregion *srgn, *prev_srgn = NULL;
151         int set_bit_len;
152         int bitmap_len;
153         unsigned long flags;
154
155 next_srgn:
156         rgn = hpb->rgn_tbl + rgn_idx;
157         srgn = rgn->srgn_tbl + srgn_idx;
158
159         if (likely(!srgn->is_last))
160                 bitmap_len = hpb->entries_per_srgn;
161         else
162                 bitmap_len = hpb->last_srgn_entries;
163
164         if ((srgn_offset + cnt) > bitmap_len)
165                 set_bit_len = bitmap_len - srgn_offset;
166         else
167                 set_bit_len = cnt;
168
169         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
170         if (rgn->rgn_state != HPB_RGN_INACTIVE) {
171                 if (set_dirty) {
172                         if (srgn->srgn_state == HPB_SRGN_VALID)
173                                 bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
174                                            set_bit_len);
175                 } else if (hpb->is_hcm) {
176                          /* rewind the read timer for lru regions */
177                         rgn->read_timeout = ktime_add_ms(ktime_get(),
178                                         rgn->hpb->params.read_timeout_ms);
179                         rgn->read_timeout_expiries =
180                                 rgn->hpb->params.read_timeout_expiries;
181                 }
182         }
183         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
184
185         if (hpb->is_hcm && prev_srgn != srgn) {
186                 bool activate = false;
187
188                 spin_lock(&rgn->rgn_lock);
189                 if (set_dirty) {
190                         rgn->reads -= srgn->reads;
191                         srgn->reads = 0;
192                         set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
193                 } else {
194                         srgn->reads++;
195                         rgn->reads++;
196                         if (srgn->reads == hpb->params.activation_thld)
197                                 activate = true;
198                 }
199                 spin_unlock(&rgn->rgn_lock);
200
201                 if (activate ||
202                     test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
203                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
204                         ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
205                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
206                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
207                                 "activate region %d-%d\n", rgn_idx, srgn_idx);
208                 }
209
210                 prev_srgn = srgn;
211         }
212
213         srgn_offset = 0;
214         if (++srgn_idx == hpb->srgns_per_rgn) {
215                 srgn_idx = 0;
216                 rgn_idx++;
217         }
218
219         cnt -= set_bit_len;
220         if (cnt > 0)
221                 goto next_srgn;
222 }
223
224 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
225                                   int srgn_idx, int srgn_offset, int cnt)
226 {
227         struct ufshpb_region *rgn;
228         struct ufshpb_subregion *srgn;
229         int bitmap_len;
230         int bit_len;
231
232 next_srgn:
233         rgn = hpb->rgn_tbl + rgn_idx;
234         srgn = rgn->srgn_tbl + srgn_idx;
235
236         if (likely(!srgn->is_last))
237                 bitmap_len = hpb->entries_per_srgn;
238         else
239                 bitmap_len = hpb->last_srgn_entries;
240
241         if (!ufshpb_is_valid_srgn(rgn, srgn))
242                 return true;
243
244         /*
245          * If the region state is active, mctx must be allocated.
246          * In this case, check whether the region is evicted or
247          * mctx allocation fail.
248          */
249         if (unlikely(!srgn->mctx)) {
250                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
251                         "no mctx in region %d subregion %d.\n",
252                         srgn->rgn_idx, srgn->srgn_idx);
253                 return true;
254         }
255
256         if ((srgn_offset + cnt) > bitmap_len)
257                 bit_len = bitmap_len - srgn_offset;
258         else
259                 bit_len = cnt;
260
261         if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
262                           srgn_offset) < bit_len + srgn_offset)
263                 return true;
264
265         srgn_offset = 0;
266         if (++srgn_idx == hpb->srgns_per_rgn) {
267                 srgn_idx = 0;
268                 rgn_idx++;
269         }
270
271         cnt -= bit_len;
272         if (cnt > 0)
273                 goto next_srgn;
274
275         return false;
276 }
277
278 static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
279 {
280         return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
281 }
282
283 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
284                                      struct ufshpb_map_ctx *mctx, int pos,
285                                      int len, __be64 *ppn_buf)
286 {
287         struct page *page;
288         int index, offset;
289         int copied;
290
291         index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
292         offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
293
294         if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
295                 copied = len;
296         else
297                 copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
298
299         page = mctx->m_page[index];
300         if (unlikely(!page)) {
301                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
302                         "error. cannot find page in mctx\n");
303                 return -ENOMEM;
304         }
305
306         memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
307                copied * HPB_ENTRY_SIZE);
308
309         return copied;
310 }
311
312 static void
313 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
314                         int *srgn_idx, int *offset)
315 {
316         int rgn_offset;
317
318         *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
319         rgn_offset = lpn & hpb->entries_per_rgn_mask;
320         *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
321         *offset = rgn_offset & hpb->entries_per_srgn_mask;
322 }
323
324 static void
325 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
326                             __be64 ppn, u8 transfer_len)
327 {
328         unsigned char *cdb = lrbp->cmd->cmnd;
329         __be64 ppn_tmp = ppn;
330         cdb[0] = UFSHPB_READ;
331
332         if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
333                 ppn_tmp = (__force __be64)swab64((__force u64)ppn);
334
335         /* ppn value is stored as big-endian in the host memory */
336         memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
337         cdb[14] = transfer_len;
338         cdb[15] = 0;
339
340         lrbp->cmd->cmd_len = UFS_CDB_SIZE;
341 }
342
343 /*
344  * This function will set up HPB read command using host-side L2P map data.
345  */
346 int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
347 {
348         struct ufshpb_lu *hpb;
349         struct ufshpb_region *rgn;
350         struct ufshpb_subregion *srgn;
351         struct scsi_cmnd *cmd = lrbp->cmd;
352         u32 lpn;
353         __be64 ppn;
354         unsigned long flags;
355         int transfer_len, rgn_idx, srgn_idx, srgn_offset;
356         int err = 0;
357
358         hpb = ufshpb_get_hpb_data(cmd->device);
359         if (!hpb)
360                 return -ENODEV;
361
362         if (ufshpb_get_state(hpb) == HPB_INIT)
363                 return -ENODEV;
364
365         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
366                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
367                            "%s: ufshpb state is not PRESENT", __func__);
368                 return -ENODEV;
369         }
370
371         if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
372             (!ufshpb_is_write_or_discard(cmd) &&
373              !ufshpb_is_read_cmd(cmd)))
374                 return 0;
375
376         transfer_len = sectors_to_logical(cmd->device,
377                                           blk_rq_sectors(scsi_cmd_to_rq(cmd)));
378         if (unlikely(!transfer_len))
379                 return 0;
380
381         lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
382         ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
383         rgn = hpb->rgn_tbl + rgn_idx;
384         srgn = rgn->srgn_tbl + srgn_idx;
385
386         /* If command type is WRITE or DISCARD, set bitmap as drity */
387         if (ufshpb_is_write_or_discard(cmd)) {
388                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
389                                    transfer_len, true);
390                 return 0;
391         }
392
393         if (!ufshpb_is_supported_chunk(hpb, transfer_len))
394                 return 0;
395
396         if (hpb->is_hcm) {
397                 /*
398                  * in host control mode, reads are the main source for
399                  * activation trials.
400                  */
401                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
402                                    transfer_len, false);
403
404                 /* keep those counters normalized */
405                 if (rgn->reads > hpb->entries_per_srgn)
406                         schedule_work(&hpb->ufshpb_normalization_work);
407         }
408
409         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
410         if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
411                                    transfer_len)) {
412                 hpb->stats.miss_cnt++;
413                 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
414                 return 0;
415         }
416
417         err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
418         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
419         if (unlikely(err < 0)) {
420                 /*
421                  * In this case, the region state is active,
422                  * but the ppn table is not allocated.
423                  * Make sure that ppn table must be allocated on
424                  * active state.
425                  */
426                 dev_err(hba->dev, "get ppn failed. err %d\n", err);
427                 return err;
428         }
429
430         ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
431
432         hpb->stats.hit_cnt++;
433         return 0;
434 }
435
436 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
437                                          int rgn_idx, enum req_opf dir,
438                                          bool atomic)
439 {
440         struct ufshpb_req *rq;
441         struct request *req;
442         int retries = HPB_MAP_REQ_RETRIES;
443
444         rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
445         if (!rq)
446                 return NULL;
447
448 retry:
449         req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir,
450                               BLK_MQ_REQ_NOWAIT);
451
452         if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
453                 usleep_range(3000, 3100);
454                 goto retry;
455         }
456
457         if (IS_ERR(req))
458                 goto free_rq;
459
460         rq->hpb = hpb;
461         rq->req = req;
462         rq->rb.rgn_idx = rgn_idx;
463
464         return rq;
465
466 free_rq:
467         kmem_cache_free(hpb->map_req_cache, rq);
468         return NULL;
469 }
470
471 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
472 {
473         blk_mq_free_request(rq->req);
474         kmem_cache_free(hpb->map_req_cache, rq);
475 }
476
477 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
478                                              struct ufshpb_subregion *srgn)
479 {
480         struct ufshpb_req *map_req;
481         struct bio *bio;
482         unsigned long flags;
483
484         if (hpb->is_hcm &&
485             hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
486                 dev_info(&hpb->sdev_ufs_lu->sdev_dev,
487                          "map_req throttle. inflight %d throttle %d",
488                          hpb->num_inflight_map_req,
489                          hpb->params.inflight_map_req);
490                 return NULL;
491         }
492
493         map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
494         if (!map_req)
495                 return NULL;
496
497         bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL);
498         if (!bio) {
499                 ufshpb_put_req(hpb, map_req);
500                 return NULL;
501         }
502
503         map_req->bio = bio;
504
505         map_req->rb.srgn_idx = srgn->srgn_idx;
506         map_req->rb.mctx = srgn->mctx;
507
508         spin_lock_irqsave(&hpb->param_lock, flags);
509         hpb->num_inflight_map_req++;
510         spin_unlock_irqrestore(&hpb->param_lock, flags);
511
512         return map_req;
513 }
514
515 static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
516                                struct ufshpb_req *map_req)
517 {
518         unsigned long flags;
519
520         bio_put(map_req->bio);
521         ufshpb_put_req(hpb, map_req);
522
523         spin_lock_irqsave(&hpb->param_lock, flags);
524         hpb->num_inflight_map_req--;
525         spin_unlock_irqrestore(&hpb->param_lock, flags);
526 }
527
528 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
529                                      struct ufshpb_subregion *srgn)
530 {
531         struct ufshpb_region *rgn;
532         u32 num_entries = hpb->entries_per_srgn;
533
534         if (!srgn->mctx) {
535                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
536                         "no mctx in region %d subregion %d.\n",
537                         srgn->rgn_idx, srgn->srgn_idx);
538                 return -1;
539         }
540
541         if (unlikely(srgn->is_last))
542                 num_entries = hpb->last_srgn_entries;
543
544         bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
545
546         rgn = hpb->rgn_tbl + srgn->rgn_idx;
547         clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
548
549         return 0;
550 }
551
552 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
553                                       int srgn_idx)
554 {
555         struct ufshpb_region *rgn;
556         struct ufshpb_subregion *srgn;
557
558         rgn = hpb->rgn_tbl + rgn_idx;
559         srgn = rgn->srgn_tbl + srgn_idx;
560
561         list_del_init(&rgn->list_inact_rgn);
562
563         if (list_empty(&srgn->list_act_srgn))
564                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
565
566         hpb->stats.rb_active_cnt++;
567 }
568
569 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
570 {
571         struct ufshpb_region *rgn;
572         struct ufshpb_subregion *srgn;
573         int srgn_idx;
574
575         rgn = hpb->rgn_tbl + rgn_idx;
576
577         for_each_sub_region(rgn, srgn_idx, srgn)
578                 list_del_init(&srgn->list_act_srgn);
579
580         if (list_empty(&rgn->list_inact_rgn))
581                 list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
582
583         hpb->stats.rb_inactive_cnt++;
584 }
585
586 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
587                                       struct ufshpb_subregion *srgn)
588 {
589         struct ufshpb_region *rgn;
590
591         /*
592          * If there is no mctx in subregion
593          * after I/O progress for HPB_READ_BUFFER, the region to which the
594          * subregion belongs was evicted.
595          * Make sure the region must not evict in I/O progress
596          */
597         if (!srgn->mctx) {
598                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
599                         "no mctx in region %d subregion %d.\n",
600                         srgn->rgn_idx, srgn->srgn_idx);
601                 srgn->srgn_state = HPB_SRGN_INVALID;
602                 return;
603         }
604
605         rgn = hpb->rgn_tbl + srgn->rgn_idx;
606
607         if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
608                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
609                         "region %d subregion %d evicted\n",
610                         srgn->rgn_idx, srgn->srgn_idx);
611                 srgn->srgn_state = HPB_SRGN_INVALID;
612                 return;
613         }
614         srgn->srgn_state = HPB_SRGN_VALID;
615 }
616
617 static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
618 {
619         struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
620
621         ufshpb_put_req(umap_req->hpb, umap_req);
622 }
623
624 static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
625 {
626         struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
627         struct ufshpb_lu *hpb = map_req->hpb;
628         struct ufshpb_subregion *srgn;
629         unsigned long flags;
630
631         srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
632                 map_req->rb.srgn_idx;
633
634         ufshpb_clear_dirty_bitmap(hpb, srgn);
635         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
636         ufshpb_activate_subregion(hpb, srgn);
637         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
638
639         ufshpb_put_map_req(map_req->hpb, map_req);
640 }
641
642 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
643 {
644         cdb[0] = UFSHPB_WRITE_BUFFER;
645         cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
646                           UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
647         if (rgn)
648                 put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
649         cdb[9] = 0x00;
650 }
651
652 static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
653                                     int srgn_idx, int srgn_mem_size)
654 {
655         cdb[0] = UFSHPB_READ_BUFFER;
656         cdb[1] = UFSHPB_READ_BUFFER_ID;
657
658         put_unaligned_be16(rgn_idx, &cdb[2]);
659         put_unaligned_be16(srgn_idx, &cdb[4]);
660         put_unaligned_be24(srgn_mem_size, &cdb[6]);
661
662         cdb[9] = 0x00;
663 }
664
665 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
666                                    struct ufshpb_req *umap_req,
667                                    struct ufshpb_region *rgn)
668 {
669         struct request *req = umap_req->req;
670         struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
671
672         req->timeout = 0;
673         req->end_io_data = umap_req;
674
675         ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
676         scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
677
678         blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
679
680         hpb->stats.umap_req_cnt++;
681 }
682
683 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
684                                   struct ufshpb_req *map_req, bool last)
685 {
686         struct request_queue *q;
687         struct request *req;
688         struct scsi_cmnd *scmd;
689         int mem_size = hpb->srgn_mem_size;
690         int ret = 0;
691         int i;
692
693         q = hpb->sdev_ufs_lu->request_queue;
694         for (i = 0; i < hpb->pages_per_srgn; i++) {
695                 ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
696                                       PAGE_SIZE, 0);
697                 if (ret != PAGE_SIZE) {
698                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
699                                    "bio_add_pc_page fail %d - %d\n",
700                                    map_req->rb.rgn_idx, map_req->rb.srgn_idx);
701                         return ret;
702                 }
703         }
704
705         req = map_req->req;
706
707         blk_rq_append_bio(req, map_req->bio);
708
709         req->end_io_data = map_req;
710
711         if (unlikely(last))
712                 mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
713
714         scmd = blk_mq_rq_to_pdu(req);
715         ufshpb_set_read_buf_cmd(scmd->cmnd, map_req->rb.rgn_idx,
716                                 map_req->rb.srgn_idx, mem_size);
717         scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
718
719         blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
720
721         hpb->stats.map_req_cnt++;
722         return 0;
723 }
724
725 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
726                                                  bool last)
727 {
728         struct ufshpb_map_ctx *mctx;
729         u32 num_entries = hpb->entries_per_srgn;
730         int i, j;
731
732         mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
733         if (!mctx)
734                 return NULL;
735
736         mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
737         if (!mctx->m_page)
738                 goto release_mctx;
739
740         if (unlikely(last))
741                 num_entries = hpb->last_srgn_entries;
742
743         mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
744         if (!mctx->ppn_dirty)
745                 goto release_m_page;
746
747         for (i = 0; i < hpb->pages_per_srgn; i++) {
748                 mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
749                 if (!mctx->m_page[i]) {
750                         for (j = 0; j < i; j++)
751                                 mempool_free(mctx->m_page[j], ufshpb_page_pool);
752                         goto release_ppn_dirty;
753                 }
754                 clear_page(page_address(mctx->m_page[i]));
755         }
756
757         return mctx;
758
759 release_ppn_dirty:
760         bitmap_free(mctx->ppn_dirty);
761 release_m_page:
762         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
763 release_mctx:
764         mempool_free(mctx, ufshpb_mctx_pool);
765         return NULL;
766 }
767
768 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
769                                struct ufshpb_map_ctx *mctx)
770 {
771         int i;
772
773         for (i = 0; i < hpb->pages_per_srgn; i++)
774                 mempool_free(mctx->m_page[i], ufshpb_page_pool);
775
776         bitmap_free(mctx->ppn_dirty);
777         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
778         mempool_free(mctx, ufshpb_mctx_pool);
779 }
780
781 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
782                                           struct ufshpb_region *rgn)
783 {
784         struct ufshpb_subregion *srgn;
785         int srgn_idx;
786
787         for_each_sub_region(rgn, srgn_idx, srgn)
788                 if (srgn->srgn_state == HPB_SRGN_ISSUED)
789                         return -EPERM;
790
791         return 0;
792 }
793
794 static void ufshpb_read_to_handler(struct work_struct *work)
795 {
796         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
797                                              ufshpb_read_to_work.work);
798         struct victim_select_info *lru_info = &hpb->lru_info;
799         struct ufshpb_region *rgn, *next_rgn;
800         unsigned long flags;
801         unsigned int poll;
802         LIST_HEAD(expired_list);
803
804         if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
805                 return;
806
807         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
808
809         list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
810                                  list_lru_rgn) {
811                 bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
812
813                 if (timedout) {
814                         rgn->read_timeout_expiries--;
815                         if (is_rgn_dirty(rgn) ||
816                             rgn->read_timeout_expiries == 0)
817                                 list_add(&rgn->list_expired_rgn, &expired_list);
818                         else
819                                 rgn->read_timeout = ktime_add_ms(ktime_get(),
820                                                 hpb->params.read_timeout_ms);
821                 }
822         }
823
824         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
825
826         list_for_each_entry_safe(rgn, next_rgn, &expired_list,
827                                  list_expired_rgn) {
828                 list_del_init(&rgn->list_expired_rgn);
829                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
830                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
831                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
832         }
833
834         ufshpb_kick_map_work(hpb);
835
836         clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
837
838         poll = hpb->params.timeout_polling_interval_ms;
839         schedule_delayed_work(&hpb->ufshpb_read_to_work,
840                               msecs_to_jiffies(poll));
841 }
842
843 static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
844                                 struct ufshpb_region *rgn)
845 {
846         rgn->rgn_state = HPB_RGN_ACTIVE;
847         list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
848         atomic_inc(&lru_info->active_cnt);
849         if (rgn->hpb->is_hcm) {
850                 rgn->read_timeout =
851                         ktime_add_ms(ktime_get(),
852                                      rgn->hpb->params.read_timeout_ms);
853                 rgn->read_timeout_expiries =
854                         rgn->hpb->params.read_timeout_expiries;
855         }
856 }
857
858 static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
859                                 struct ufshpb_region *rgn)
860 {
861         list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
862 }
863
864 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
865 {
866         struct victim_select_info *lru_info = &hpb->lru_info;
867         struct ufshpb_region *rgn, *victim_rgn = NULL;
868
869         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
870                 if (!rgn) {
871                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
872                                 "%s: no region allocated\n",
873                                 __func__);
874                         return NULL;
875                 }
876                 if (ufshpb_check_srgns_issue_state(hpb, rgn))
877                         continue;
878
879                 /*
880                  * in host control mode, verify that the exiting region
881                  * has fewer reads
882                  */
883                 if (hpb->is_hcm &&
884                     rgn->reads > hpb->params.eviction_thld_exit)
885                         continue;
886
887                 victim_rgn = rgn;
888                 break;
889         }
890
891         return victim_rgn;
892 }
893
894 static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
895                                     struct ufshpb_region *rgn)
896 {
897         list_del_init(&rgn->list_lru_rgn);
898         rgn->rgn_state = HPB_RGN_INACTIVE;
899         atomic_dec(&lru_info->active_cnt);
900 }
901
902 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
903                                           struct ufshpb_subregion *srgn)
904 {
905         if (srgn->srgn_state != HPB_SRGN_UNUSED) {
906                 ufshpb_put_map_ctx(hpb, srgn->mctx);
907                 srgn->srgn_state = HPB_SRGN_UNUSED;
908                 srgn->mctx = NULL;
909         }
910 }
911
912 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
913                                  struct ufshpb_region *rgn,
914                                  bool atomic)
915 {
916         struct ufshpb_req *umap_req;
917         int rgn_idx = rgn ? rgn->rgn_idx : 0;
918
919         umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
920         if (!umap_req)
921                 return -ENOMEM;
922
923         ufshpb_execute_umap_req(hpb, umap_req, rgn);
924
925         return 0;
926 }
927
928 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
929                                         struct ufshpb_region *rgn)
930 {
931         return ufshpb_issue_umap_req(hpb, rgn, true);
932 }
933
934 static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
935 {
936         return ufshpb_issue_umap_req(hpb, NULL, false);
937 }
938
939 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
940                                  struct ufshpb_region *rgn)
941 {
942         struct victim_select_info *lru_info;
943         struct ufshpb_subregion *srgn;
944         int srgn_idx;
945
946         lru_info = &hpb->lru_info;
947
948         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
949
950         ufshpb_cleanup_lru_info(lru_info, rgn);
951
952         for_each_sub_region(rgn, srgn_idx, srgn)
953                 ufshpb_purge_active_subregion(hpb, srgn);
954 }
955
956 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
957 {
958         unsigned long flags;
959         int ret = 0;
960
961         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
962         if (rgn->rgn_state == HPB_RGN_PINNED) {
963                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
964                          "pinned region cannot drop-out. region %d\n",
965                          rgn->rgn_idx);
966                 goto out;
967         }
968
969         if (!list_empty(&rgn->list_lru_rgn)) {
970                 if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
971                         ret = -EBUSY;
972                         goto out;
973                 }
974
975                 if (hpb->is_hcm) {
976                         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
977                         ret = ufshpb_issue_umap_single_req(hpb, rgn);
978                         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
979                         if (ret)
980                                 goto out;
981                 }
982
983                 __ufshpb_evict_region(hpb, rgn);
984         }
985 out:
986         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
987         return ret;
988 }
989
990 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
991                                 struct ufshpb_region *rgn,
992                                 struct ufshpb_subregion *srgn)
993 {
994         struct ufshpb_req *map_req;
995         unsigned long flags;
996         int ret;
997         int err = -EAGAIN;
998         bool alloc_required = false;
999         enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
1000
1001         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1002
1003         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1004                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1005                            "%s: ufshpb state is not PRESENT\n", __func__);
1006                 goto unlock_out;
1007         }
1008
1009         if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1010             (srgn->srgn_state == HPB_SRGN_INVALID)) {
1011                 err = 0;
1012                 goto unlock_out;
1013         }
1014
1015         if (srgn->srgn_state == HPB_SRGN_UNUSED)
1016                 alloc_required = true;
1017
1018         /*
1019          * If the subregion is already ISSUED state,
1020          * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1021          * the device and HPB response for map loading is received.
1022          * In this case, after finishing the HPB_READ_BUFFER,
1023          * the next HPB_READ_BUFFER is performed again to obtain the latest
1024          * map data.
1025          */
1026         if (srgn->srgn_state == HPB_SRGN_ISSUED)
1027                 goto unlock_out;
1028
1029         srgn->srgn_state = HPB_SRGN_ISSUED;
1030         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1031
1032         if (alloc_required) {
1033                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1034                 if (!srgn->mctx) {
1035                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1036                             "get map_ctx failed. region %d - %d\n",
1037                             rgn->rgn_idx, srgn->srgn_idx);
1038                         state = HPB_SRGN_UNUSED;
1039                         goto change_srgn_state;
1040                 }
1041         }
1042
1043         map_req = ufshpb_get_map_req(hpb, srgn);
1044         if (!map_req)
1045                 goto change_srgn_state;
1046
1047
1048         ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1049         if (ret) {
1050                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1051                            "%s: issue map_req failed: %d, region %d - %d\n",
1052                            __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1053                 goto free_map_req;
1054         }
1055         return 0;
1056
1057 free_map_req:
1058         ufshpb_put_map_req(hpb, map_req);
1059 change_srgn_state:
1060         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1061         srgn->srgn_state = state;
1062 unlock_out:
1063         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1064         return err;
1065 }
1066
1067 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1068 {
1069         struct ufshpb_region *victim_rgn = NULL;
1070         struct victim_select_info *lru_info = &hpb->lru_info;
1071         unsigned long flags;
1072         int ret = 0;
1073
1074         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1075         /*
1076          * If region belongs to lru_list, just move the region
1077          * to the front of lru list because the state of the region
1078          * is already active-state.
1079          */
1080         if (!list_empty(&rgn->list_lru_rgn)) {
1081                 ufshpb_hit_lru_info(lru_info, rgn);
1082                 goto out;
1083         }
1084
1085         if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1086                 if (atomic_read(&lru_info->active_cnt) ==
1087                     lru_info->max_lru_active_cnt) {
1088                         /*
1089                          * If the maximum number of active regions
1090                          * is exceeded, evict the least recently used region.
1091                          * This case may occur when the device responds
1092                          * to the eviction information late.
1093                          * It is okay to evict the least recently used region,
1094                          * because the device could detect this region
1095                          * by not issuing HPB_READ
1096                          *
1097                          * in host control mode, verify that the entering
1098                          * region has enough reads
1099                          */
1100                         if (hpb->is_hcm &&
1101                             rgn->reads < hpb->params.eviction_thld_enter) {
1102                                 ret = -EACCES;
1103                                 goto out;
1104                         }
1105
1106                         victim_rgn = ufshpb_victim_lru_info(hpb);
1107                         if (!victim_rgn) {
1108                                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1109                                     "cannot get victim region %s\n",
1110                                     hpb->is_hcm ? "" : "error");
1111                                 ret = -ENOMEM;
1112                                 goto out;
1113                         }
1114
1115                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1116                                 "LRU full (%d), choose victim %d\n",
1117                                 atomic_read(&lru_info->active_cnt),
1118                                 victim_rgn->rgn_idx);
1119
1120                         if (hpb->is_hcm) {
1121                                 spin_unlock_irqrestore(&hpb->rgn_state_lock,
1122                                                        flags);
1123                                 ret = ufshpb_issue_umap_single_req(hpb,
1124                                                                 victim_rgn);
1125                                 spin_lock_irqsave(&hpb->rgn_state_lock,
1126                                                   flags);
1127                                 if (ret)
1128                                         goto out;
1129                         }
1130
1131                         __ufshpb_evict_region(hpb, victim_rgn);
1132                 }
1133
1134                 /*
1135                  * When a region is added to lru_info list_head,
1136                  * it is guaranteed that the subregion has been
1137                  * assigned all mctx. If failed, try to receive mctx again
1138                  * without being added to lru_info list_head
1139                  */
1140                 ufshpb_add_lru_info(lru_info, rgn);
1141         }
1142 out:
1143         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1144         return ret;
1145 }
1146
1147 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1148                                          struct utp_hpb_rsp *rsp_field)
1149 {
1150         struct ufshpb_region *rgn;
1151         struct ufshpb_subregion *srgn;
1152         int i, rgn_i, srgn_i;
1153
1154         BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1155         /*
1156          * If the active region and the inactive region are the same,
1157          * we will inactivate this region.
1158          * The device could check this (region inactivated) and
1159          * will response the proper active region information
1160          */
1161         for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1162                 rgn_i =
1163                         be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1164                 srgn_i =
1165                         be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1166
1167                 rgn = hpb->rgn_tbl + rgn_i;
1168                 if (hpb->is_hcm &&
1169                     (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1170                         /*
1171                          * in host control mode, subregion activation
1172                          * recommendations are only allowed to active regions.
1173                          * Also, ignore recommendations for dirty regions - the
1174                          * host will make decisions concerning those by himself
1175                          */
1176                         continue;
1177                 }
1178
1179                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1180                         "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1181
1182                 spin_lock(&hpb->rsp_list_lock);
1183                 ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1184                 spin_unlock(&hpb->rsp_list_lock);
1185
1186                 srgn = rgn->srgn_tbl + srgn_i;
1187
1188                 /* blocking HPB_READ */
1189                 spin_lock(&hpb->rgn_state_lock);
1190                 if (srgn->srgn_state == HPB_SRGN_VALID)
1191                         srgn->srgn_state = HPB_SRGN_INVALID;
1192                 spin_unlock(&hpb->rgn_state_lock);
1193         }
1194
1195         if (hpb->is_hcm) {
1196                 /*
1197                  * in host control mode the device is not allowed to inactivate
1198                  * regions
1199                  */
1200                 goto out;
1201         }
1202
1203         for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1204                 rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1205                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1206                         "inactivate(%d) region %d\n", i, rgn_i);
1207
1208                 spin_lock(&hpb->rsp_list_lock);
1209                 ufshpb_update_inactive_info(hpb, rgn_i);
1210                 spin_unlock(&hpb->rsp_list_lock);
1211
1212                 rgn = hpb->rgn_tbl + rgn_i;
1213
1214                 spin_lock(&hpb->rgn_state_lock);
1215                 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1216                         for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
1217                                 srgn = rgn->srgn_tbl + srgn_i;
1218                                 if (srgn->srgn_state == HPB_SRGN_VALID)
1219                                         srgn->srgn_state = HPB_SRGN_INVALID;
1220                         }
1221                 }
1222                 spin_unlock(&hpb->rgn_state_lock);
1223
1224         }
1225
1226 out:
1227         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1228                 rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1229
1230         if (ufshpb_get_state(hpb) == HPB_PRESENT)
1231                 queue_work(ufshpb_wq, &hpb->map_work);
1232 }
1233
1234 static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
1235 {
1236         struct victim_select_info *lru_info = &hpb->lru_info;
1237         struct ufshpb_region *rgn;
1238         unsigned long flags;
1239
1240         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1241
1242         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1243                 set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1244
1245         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1246 }
1247
1248 /*
1249  * This function will parse recommended active subregion information in sense
1250  * data field of response UPIU with SAM_STAT_GOOD state.
1251  */
1252 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1253 {
1254         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1255         struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1256         int data_seg_len;
1257
1258         if (unlikely(lrbp->lun != rsp_field->lun)) {
1259                 struct scsi_device *sdev;
1260                 bool found = false;
1261
1262                 __shost_for_each_device(sdev, hba->host) {
1263                         hpb = ufshpb_get_hpb_data(sdev);
1264
1265                         if (!hpb)
1266                                 continue;
1267
1268                         if (rsp_field->lun == hpb->lun) {
1269                                 found = true;
1270                                 break;
1271                         }
1272                 }
1273
1274                 if (!found)
1275                         return;
1276         }
1277
1278         if (!hpb)
1279                 return;
1280
1281         if (ufshpb_get_state(hpb) == HPB_INIT)
1282                 return;
1283
1284         if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1285             (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1286                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1287                            "%s: ufshpb state is not PRESENT/SUSPEND\n",
1288                            __func__);
1289                 return;
1290         }
1291
1292         data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1293                 & MASK_RSP_UPIU_DATA_SEG_LEN;
1294
1295         /* To flush remained rsp_list, we queue the map_work task */
1296         if (!data_seg_len) {
1297                 if (!ufshpb_is_general_lun(hpb->lun))
1298                         return;
1299
1300                 ufshpb_kick_map_work(hpb);
1301                 return;
1302         }
1303
1304         BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1305
1306         if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1307                 return;
1308
1309         hpb->stats.rb_noti_cnt++;
1310
1311         switch (rsp_field->hpb_op) {
1312         case HPB_RSP_REQ_REGION_UPDATE:
1313                 if (data_seg_len != DEV_DATA_SEG_LEN)
1314                         dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1315                                  "%s: data seg length is not same.\n",
1316                                  __func__);
1317                 ufshpb_rsp_req_region_update(hpb, rsp_field);
1318                 break;
1319         case HPB_RSP_DEV_RESET:
1320                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1321                          "UFS device lost HPB information during PM.\n");
1322
1323                 if (hpb->is_hcm) {
1324                         struct scsi_device *sdev;
1325
1326                         __shost_for_each_device(sdev, hba->host) {
1327                                 struct ufshpb_lu *h = sdev->hostdata;
1328
1329                                 if (h)
1330                                         ufshpb_dev_reset_handler(h);
1331                         }
1332                 }
1333
1334                 break;
1335         default:
1336                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1337                            "hpb_op is not available: %d\n",
1338                            rsp_field->hpb_op);
1339                 break;
1340         }
1341 }
1342
1343 static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1344                                    struct ufshpb_region *rgn,
1345                                    struct ufshpb_subregion *srgn)
1346 {
1347         if (!list_empty(&rgn->list_inact_rgn))
1348                 return;
1349
1350         if (!list_empty(&srgn->list_act_srgn)) {
1351                 list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1352                 return;
1353         }
1354
1355         list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1356 }
1357
1358 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1359                                           struct ufshpb_region *rgn,
1360                                           struct list_head *pending_list)
1361 {
1362         struct ufshpb_subregion *srgn;
1363         int srgn_idx;
1364
1365         if (!list_empty(&rgn->list_inact_rgn))
1366                 return;
1367
1368         for_each_sub_region(rgn, srgn_idx, srgn)
1369                 if (!list_empty(&srgn->list_act_srgn))
1370                         return;
1371
1372         list_add_tail(&rgn->list_inact_rgn, pending_list);
1373 }
1374
1375 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1376 {
1377         struct ufshpb_region *rgn;
1378         struct ufshpb_subregion *srgn;
1379         unsigned long flags;
1380         int ret = 0;
1381
1382         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1383         while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1384                                                 struct ufshpb_subregion,
1385                                                 list_act_srgn))) {
1386                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1387                         break;
1388
1389                 list_del_init(&srgn->list_act_srgn);
1390                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1391
1392                 rgn = hpb->rgn_tbl + srgn->rgn_idx;
1393                 ret = ufshpb_add_region(hpb, rgn);
1394                 if (ret)
1395                         goto active_failed;
1396
1397                 ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1398                 if (ret) {
1399                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1400                             "issue map_req failed. ret %d, region %d - %d\n",
1401                             ret, rgn->rgn_idx, srgn->srgn_idx);
1402                         goto active_failed;
1403                 }
1404                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1405         }
1406         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1407         return;
1408
1409 active_failed:
1410         dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1411                    rgn->rgn_idx, srgn->srgn_idx);
1412         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1413         ufshpb_add_active_list(hpb, rgn, srgn);
1414         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1415 }
1416
1417 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1418 {
1419         struct ufshpb_region *rgn;
1420         unsigned long flags;
1421         int ret;
1422         LIST_HEAD(pending_list);
1423
1424         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1425         while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1426                                                struct ufshpb_region,
1427                                                list_inact_rgn))) {
1428                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1429                         break;
1430
1431                 list_del_init(&rgn->list_inact_rgn);
1432                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1433
1434                 ret = ufshpb_evict_region(hpb, rgn);
1435                 if (ret) {
1436                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1437                         ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1438                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1439                 }
1440
1441                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1442         }
1443
1444         list_splice(&pending_list, &hpb->lh_inact_rgn);
1445         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1446 }
1447
1448 static void ufshpb_normalization_work_handler(struct work_struct *work)
1449 {
1450         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1451                                              ufshpb_normalization_work);
1452         int rgn_idx;
1453         u8 factor = hpb->params.normalization_factor;
1454
1455         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1456                 struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1457                 int srgn_idx;
1458
1459                 spin_lock(&rgn->rgn_lock);
1460                 rgn->reads = 0;
1461                 for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1462                         struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1463
1464                         srgn->reads >>= factor;
1465                         rgn->reads += srgn->reads;
1466                 }
1467                 spin_unlock(&rgn->rgn_lock);
1468
1469                 if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1470                         continue;
1471
1472                 /* if region is active but has no reads - inactivate it */
1473                 spin_lock(&hpb->rsp_list_lock);
1474                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1475                 spin_unlock(&hpb->rsp_list_lock);
1476         }
1477 }
1478
1479 static void ufshpb_map_work_handler(struct work_struct *work)
1480 {
1481         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1482
1483         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1484                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1485                            "%s: ufshpb state is not PRESENT\n", __func__);
1486                 return;
1487         }
1488
1489         ufshpb_run_inactive_region_list(hpb);
1490         ufshpb_run_active_subregion_list(hpb);
1491 }
1492
1493 /*
1494  * this function doesn't need to hold lock due to be called in init.
1495  * (rgn_state_lock, rsp_list_lock, etc..)
1496  */
1497 static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1498                                             struct ufshpb_lu *hpb,
1499                                             struct ufshpb_region *rgn)
1500 {
1501         struct ufshpb_subregion *srgn;
1502         int srgn_idx, i;
1503         int err = 0;
1504
1505         for_each_sub_region(rgn, srgn_idx, srgn) {
1506                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1507                 srgn->srgn_state = HPB_SRGN_INVALID;
1508                 if (!srgn->mctx) {
1509                         err = -ENOMEM;
1510                         dev_err(hba->dev,
1511                                 "alloc mctx for pinned region failed\n");
1512                         goto release;
1513                 }
1514
1515                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1516         }
1517
1518         rgn->rgn_state = HPB_RGN_PINNED;
1519         return 0;
1520
1521 release:
1522         for (i = 0; i < srgn_idx; i++) {
1523                 srgn = rgn->srgn_tbl + i;
1524                 ufshpb_put_map_ctx(hpb, srgn->mctx);
1525         }
1526         return err;
1527 }
1528
1529 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1530                                       struct ufshpb_region *rgn, bool last)
1531 {
1532         int srgn_idx;
1533         struct ufshpb_subregion *srgn;
1534
1535         for_each_sub_region(rgn, srgn_idx, srgn) {
1536                 INIT_LIST_HEAD(&srgn->list_act_srgn);
1537
1538                 srgn->rgn_idx = rgn->rgn_idx;
1539                 srgn->srgn_idx = srgn_idx;
1540                 srgn->srgn_state = HPB_SRGN_UNUSED;
1541         }
1542
1543         if (unlikely(last && hpb->last_srgn_entries))
1544                 srgn->is_last = true;
1545 }
1546
1547 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1548                                       struct ufshpb_region *rgn, int srgn_cnt)
1549 {
1550         rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1551                                  GFP_KERNEL);
1552         if (!rgn->srgn_tbl)
1553                 return -ENOMEM;
1554
1555         rgn->srgn_cnt = srgn_cnt;
1556         return 0;
1557 }
1558
1559 static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1560                                      struct ufshpb_lu *hpb,
1561                                      struct ufshpb_dev_info *hpb_dev_info,
1562                                      struct ufshpb_lu_info *hpb_lu_info)
1563 {
1564         u32 entries_per_rgn;
1565         u64 rgn_mem_size, tmp;
1566
1567         if (ufshpb_is_legacy(hba))
1568                 hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1569         else
1570                 hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
1571
1572         hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1573         hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1574                 (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1575                 : PINNED_NOT_SET;
1576         hpb->lru_info.max_lru_active_cnt =
1577                 hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1578
1579         rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1580                         * HPB_ENTRY_SIZE;
1581         do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1582         hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1583                 * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1584
1585         tmp = rgn_mem_size;
1586         do_div(tmp, HPB_ENTRY_SIZE);
1587         entries_per_rgn = (u32)tmp;
1588         hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1589         hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1590
1591         hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1592         hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1593         hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1594
1595         tmp = rgn_mem_size;
1596         do_div(tmp, hpb->srgn_mem_size);
1597         hpb->srgns_per_rgn = (int)tmp;
1598
1599         hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1600                                 entries_per_rgn);
1601         hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1602                                 (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1603         hpb->last_srgn_entries = hpb_lu_info->num_blocks
1604                                  % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1605
1606         hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1607
1608         if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1609                 hpb->is_hcm = true;
1610 }
1611
1612 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1613 {
1614         struct ufshpb_region *rgn_table, *rgn;
1615         int rgn_idx, i;
1616         int ret = 0;
1617
1618         rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1619                             GFP_KERNEL);
1620         if (!rgn_table)
1621                 return -ENOMEM;
1622
1623         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1624                 int srgn_cnt = hpb->srgns_per_rgn;
1625                 bool last_srgn = false;
1626
1627                 rgn = rgn_table + rgn_idx;
1628                 rgn->rgn_idx = rgn_idx;
1629
1630                 spin_lock_init(&rgn->rgn_lock);
1631
1632                 INIT_LIST_HEAD(&rgn->list_inact_rgn);
1633                 INIT_LIST_HEAD(&rgn->list_lru_rgn);
1634                 INIT_LIST_HEAD(&rgn->list_expired_rgn);
1635
1636                 if (rgn_idx == hpb->rgns_per_lu - 1) {
1637                         srgn_cnt = ((hpb->srgns_per_lu - 1) %
1638                                     hpb->srgns_per_rgn) + 1;
1639                         last_srgn = true;
1640                 }
1641
1642                 ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1643                 if (ret)
1644                         goto release_srgn_table;
1645                 ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1646
1647                 if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1648                         ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1649                         if (ret)
1650                                 goto release_srgn_table;
1651                 } else {
1652                         rgn->rgn_state = HPB_RGN_INACTIVE;
1653                 }
1654
1655                 rgn->rgn_flags = 0;
1656                 rgn->hpb = hpb;
1657         }
1658
1659         hpb->rgn_tbl = rgn_table;
1660
1661         return 0;
1662
1663 release_srgn_table:
1664         for (i = 0; i <= rgn_idx; i++)
1665                 kvfree(rgn_table[i].srgn_tbl);
1666
1667         kvfree(rgn_table);
1668         return ret;
1669 }
1670
1671 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1672                                          struct ufshpb_region *rgn)
1673 {
1674         int srgn_idx;
1675         struct ufshpb_subregion *srgn;
1676
1677         for_each_sub_region(rgn, srgn_idx, srgn)
1678                 if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1679                         srgn->srgn_state = HPB_SRGN_UNUSED;
1680                         ufshpb_put_map_ctx(hpb, srgn->mctx);
1681                 }
1682 }
1683
1684 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1685 {
1686         int rgn_idx;
1687
1688         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1689                 struct ufshpb_region *rgn;
1690
1691                 rgn = hpb->rgn_tbl + rgn_idx;
1692                 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1693                         rgn->rgn_state = HPB_RGN_INACTIVE;
1694
1695                         ufshpb_destroy_subregion_tbl(hpb, rgn);
1696                 }
1697
1698                 kvfree(rgn->srgn_tbl);
1699         }
1700
1701         kvfree(hpb->rgn_tbl);
1702 }
1703
1704 /* SYSFS functions */
1705 #define ufshpb_sysfs_attr_show_func(__name)                             \
1706 static ssize_t __name##_show(struct device *dev,                        \
1707         struct device_attribute *attr, char *buf)                       \
1708 {                                                                       \
1709         struct scsi_device *sdev = to_scsi_device(dev);                 \
1710         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1711                                                                         \
1712         if (!hpb)                                                       \
1713                 return -ENODEV;                                         \
1714                                                                         \
1715         return sysfs_emit(buf, "%llu\n", hpb->stats.__name);            \
1716 }                                                                       \
1717 \
1718 static DEVICE_ATTR_RO(__name)
1719
1720 ufshpb_sysfs_attr_show_func(hit_cnt);
1721 ufshpb_sysfs_attr_show_func(miss_cnt);
1722 ufshpb_sysfs_attr_show_func(rb_noti_cnt);
1723 ufshpb_sysfs_attr_show_func(rb_active_cnt);
1724 ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
1725 ufshpb_sysfs_attr_show_func(map_req_cnt);
1726 ufshpb_sysfs_attr_show_func(umap_req_cnt);
1727
1728 static struct attribute *hpb_dev_stat_attrs[] = {
1729         &dev_attr_hit_cnt.attr,
1730         &dev_attr_miss_cnt.attr,
1731         &dev_attr_rb_noti_cnt.attr,
1732         &dev_attr_rb_active_cnt.attr,
1733         &dev_attr_rb_inactive_cnt.attr,
1734         &dev_attr_map_req_cnt.attr,
1735         &dev_attr_umap_req_cnt.attr,
1736         NULL,
1737 };
1738
1739 struct attribute_group ufs_sysfs_hpb_stat_group = {
1740         .name = "hpb_stats",
1741         .attrs = hpb_dev_stat_attrs,
1742 };
1743
1744 /* SYSFS functions */
1745 #define ufshpb_sysfs_param_show_func(__name)                            \
1746 static ssize_t __name##_show(struct device *dev,                        \
1747         struct device_attribute *attr, char *buf)                       \
1748 {                                                                       \
1749         struct scsi_device *sdev = to_scsi_device(dev);                 \
1750         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1751                                                                         \
1752         if (!hpb)                                                       \
1753                 return -ENODEV;                                         \
1754                                                                         \
1755         return sysfs_emit(buf, "%d\n", hpb->params.__name);             \
1756 }
1757
1758 ufshpb_sysfs_param_show_func(requeue_timeout_ms);
1759 static ssize_t
1760 requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1761                          const char *buf, size_t count)
1762 {
1763         struct scsi_device *sdev = to_scsi_device(dev);
1764         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1765         int val;
1766
1767         if (!hpb)
1768                 return -ENODEV;
1769
1770         if (kstrtouint(buf, 0, &val))
1771                 return -EINVAL;
1772
1773         if (val < 0)
1774                 return -EINVAL;
1775
1776         hpb->params.requeue_timeout_ms = val;
1777
1778         return count;
1779 }
1780 static DEVICE_ATTR_RW(requeue_timeout_ms);
1781
1782 ufshpb_sysfs_param_show_func(activation_thld);
1783 static ssize_t
1784 activation_thld_store(struct device *dev, struct device_attribute *attr,
1785                       const char *buf, size_t count)
1786 {
1787         struct scsi_device *sdev = to_scsi_device(dev);
1788         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1789         int val;
1790
1791         if (!hpb)
1792                 return -ENODEV;
1793
1794         if (!hpb->is_hcm)
1795                 return -EOPNOTSUPP;
1796
1797         if (kstrtouint(buf, 0, &val))
1798                 return -EINVAL;
1799
1800         if (val <= 0)
1801                 return -EINVAL;
1802
1803         hpb->params.activation_thld = val;
1804
1805         return count;
1806 }
1807 static DEVICE_ATTR_RW(activation_thld);
1808
1809 ufshpb_sysfs_param_show_func(normalization_factor);
1810 static ssize_t
1811 normalization_factor_store(struct device *dev, struct device_attribute *attr,
1812                            const char *buf, size_t count)
1813 {
1814         struct scsi_device *sdev = to_scsi_device(dev);
1815         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1816         int val;
1817
1818         if (!hpb)
1819                 return -ENODEV;
1820
1821         if (!hpb->is_hcm)
1822                 return -EOPNOTSUPP;
1823
1824         if (kstrtouint(buf, 0, &val))
1825                 return -EINVAL;
1826
1827         if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
1828                 return -EINVAL;
1829
1830         hpb->params.normalization_factor = val;
1831
1832         return count;
1833 }
1834 static DEVICE_ATTR_RW(normalization_factor);
1835
1836 ufshpb_sysfs_param_show_func(eviction_thld_enter);
1837 static ssize_t
1838 eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
1839                           const char *buf, size_t count)
1840 {
1841         struct scsi_device *sdev = to_scsi_device(dev);
1842         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1843         int val;
1844
1845         if (!hpb)
1846                 return -ENODEV;
1847
1848         if (!hpb->is_hcm)
1849                 return -EOPNOTSUPP;
1850
1851         if (kstrtouint(buf, 0, &val))
1852                 return -EINVAL;
1853
1854         if (val <= hpb->params.eviction_thld_exit)
1855                 return -EINVAL;
1856
1857         hpb->params.eviction_thld_enter = val;
1858
1859         return count;
1860 }
1861 static DEVICE_ATTR_RW(eviction_thld_enter);
1862
1863 ufshpb_sysfs_param_show_func(eviction_thld_exit);
1864 static ssize_t
1865 eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
1866                          const char *buf, size_t count)
1867 {
1868         struct scsi_device *sdev = to_scsi_device(dev);
1869         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1870         int val;
1871
1872         if (!hpb)
1873                 return -ENODEV;
1874
1875         if (!hpb->is_hcm)
1876                 return -EOPNOTSUPP;
1877
1878         if (kstrtouint(buf, 0, &val))
1879                 return -EINVAL;
1880
1881         if (val <= hpb->params.activation_thld)
1882                 return -EINVAL;
1883
1884         hpb->params.eviction_thld_exit = val;
1885
1886         return count;
1887 }
1888 static DEVICE_ATTR_RW(eviction_thld_exit);
1889
1890 ufshpb_sysfs_param_show_func(read_timeout_ms);
1891 static ssize_t
1892 read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1893                       const char *buf, size_t count)
1894 {
1895         struct scsi_device *sdev = to_scsi_device(dev);
1896         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1897         int val;
1898
1899         if (!hpb)
1900                 return -ENODEV;
1901
1902         if (!hpb->is_hcm)
1903                 return -EOPNOTSUPP;
1904
1905         if (kstrtouint(buf, 0, &val))
1906                 return -EINVAL;
1907
1908         /* read_timeout >> timeout_polling_interval */
1909         if (val < hpb->params.timeout_polling_interval_ms * 2)
1910                 return -EINVAL;
1911
1912         hpb->params.read_timeout_ms = val;
1913
1914         return count;
1915 }
1916 static DEVICE_ATTR_RW(read_timeout_ms);
1917
1918 ufshpb_sysfs_param_show_func(read_timeout_expiries);
1919 static ssize_t
1920 read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
1921                             const char *buf, size_t count)
1922 {
1923         struct scsi_device *sdev = to_scsi_device(dev);
1924         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1925         int val;
1926
1927         if (!hpb)
1928                 return -ENODEV;
1929
1930         if (!hpb->is_hcm)
1931                 return -EOPNOTSUPP;
1932
1933         if (kstrtouint(buf, 0, &val))
1934                 return -EINVAL;
1935
1936         if (val <= 0)
1937                 return -EINVAL;
1938
1939         hpb->params.read_timeout_expiries = val;
1940
1941         return count;
1942 }
1943 static DEVICE_ATTR_RW(read_timeout_expiries);
1944
1945 ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
1946 static ssize_t
1947 timeout_polling_interval_ms_store(struct device *dev,
1948                                   struct device_attribute *attr,
1949                                   const char *buf, size_t count)
1950 {
1951         struct scsi_device *sdev = to_scsi_device(dev);
1952         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1953         int val;
1954
1955         if (!hpb)
1956                 return -ENODEV;
1957
1958         if (!hpb->is_hcm)
1959                 return -EOPNOTSUPP;
1960
1961         if (kstrtouint(buf, 0, &val))
1962                 return -EINVAL;
1963
1964         /* timeout_polling_interval << read_timeout */
1965         if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
1966                 return -EINVAL;
1967
1968         hpb->params.timeout_polling_interval_ms = val;
1969
1970         return count;
1971 }
1972 static DEVICE_ATTR_RW(timeout_polling_interval_ms);
1973
1974 ufshpb_sysfs_param_show_func(inflight_map_req);
1975 static ssize_t inflight_map_req_store(struct device *dev,
1976                                       struct device_attribute *attr,
1977                                       const char *buf, size_t count)
1978 {
1979         struct scsi_device *sdev = to_scsi_device(dev);
1980         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1981         int val;
1982
1983         if (!hpb)
1984                 return -ENODEV;
1985
1986         if (!hpb->is_hcm)
1987                 return -EOPNOTSUPP;
1988
1989         if (kstrtouint(buf, 0, &val))
1990                 return -EINVAL;
1991
1992         if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
1993                 return -EINVAL;
1994
1995         hpb->params.inflight_map_req = val;
1996
1997         return count;
1998 }
1999 static DEVICE_ATTR_RW(inflight_map_req);
2000
2001 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
2002 {
2003         hpb->params.activation_thld = ACTIVATION_THRESHOLD;
2004         hpb->params.normalization_factor = 1;
2005         hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2006         hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2007         hpb->params.read_timeout_ms = READ_TO_MS;
2008         hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2009         hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2010         hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2011 }
2012
2013 static struct attribute *hpb_dev_param_attrs[] = {
2014         &dev_attr_requeue_timeout_ms.attr,
2015         &dev_attr_activation_thld.attr,
2016         &dev_attr_normalization_factor.attr,
2017         &dev_attr_eviction_thld_enter.attr,
2018         &dev_attr_eviction_thld_exit.attr,
2019         &dev_attr_read_timeout_ms.attr,
2020         &dev_attr_read_timeout_expiries.attr,
2021         &dev_attr_timeout_polling_interval_ms.attr,
2022         &dev_attr_inflight_map_req.attr,
2023         NULL,
2024 };
2025
2026 struct attribute_group ufs_sysfs_hpb_param_group = {
2027         .name = "hpb_params",
2028         .attrs = hpb_dev_param_attrs,
2029 };
2030
2031 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2032 {
2033         struct ufshpb_req *pre_req = NULL, *t;
2034         int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2035         int i;
2036
2037         INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2038
2039         hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2040         hpb->throttle_pre_req = qd;
2041         hpb->num_inflight_pre_req = 0;
2042
2043         if (!hpb->pre_req)
2044                 goto release_mem;
2045
2046         for (i = 0; i < qd; i++) {
2047                 pre_req = hpb->pre_req + i;
2048                 INIT_LIST_HEAD(&pre_req->list_req);
2049                 pre_req->req = NULL;
2050
2051                 pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
2052                 if (!pre_req->bio)
2053                         goto release_mem;
2054
2055                 pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2056                 if (!pre_req->wb.m_page) {
2057                         bio_put(pre_req->bio);
2058                         goto release_mem;
2059                 }
2060
2061                 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2062         }
2063
2064         return 0;
2065 release_mem:
2066         list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2067                 list_del_init(&pre_req->list_req);
2068                 bio_put(pre_req->bio);
2069                 __free_page(pre_req->wb.m_page);
2070         }
2071
2072         kfree(hpb->pre_req);
2073         return -ENOMEM;
2074 }
2075
2076 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2077 {
2078         struct ufshpb_req *pre_req = NULL;
2079         int i;
2080
2081         for (i = 0; i < hpb->throttle_pre_req; i++) {
2082                 pre_req = hpb->pre_req + i;
2083                 bio_put(hpb->pre_req[i].bio);
2084                 if (!pre_req->wb.m_page)
2085                         __free_page(hpb->pre_req[i].wb.m_page);
2086                 list_del_init(&pre_req->list_req);
2087         }
2088
2089         kfree(hpb->pre_req);
2090 }
2091
2092 static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2093 {
2094         hpb->stats.hit_cnt = 0;
2095         hpb->stats.miss_cnt = 0;
2096         hpb->stats.rb_noti_cnt = 0;
2097         hpb->stats.rb_active_cnt = 0;
2098         hpb->stats.rb_inactive_cnt = 0;
2099         hpb->stats.map_req_cnt = 0;
2100         hpb->stats.umap_req_cnt = 0;
2101 }
2102
2103 static void ufshpb_param_init(struct ufshpb_lu *hpb)
2104 {
2105         hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2106         if (hpb->is_hcm)
2107                 ufshpb_hcm_param_init(hpb);
2108 }
2109
2110 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2111 {
2112         int ret;
2113
2114         spin_lock_init(&hpb->rgn_state_lock);
2115         spin_lock_init(&hpb->rsp_list_lock);
2116         spin_lock_init(&hpb->param_lock);
2117
2118         INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2119         INIT_LIST_HEAD(&hpb->lh_act_srgn);
2120         INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2121         INIT_LIST_HEAD(&hpb->list_hpb_lu);
2122
2123         INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2124         if (hpb->is_hcm) {
2125                 INIT_WORK(&hpb->ufshpb_normalization_work,
2126                           ufshpb_normalization_work_handler);
2127                 INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2128                                   ufshpb_read_to_handler);
2129         }
2130
2131         hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2132                           sizeof(struct ufshpb_req), 0, 0, NULL);
2133         if (!hpb->map_req_cache) {
2134                 dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2135                         hpb->lun);
2136                 return -ENOMEM;
2137         }
2138
2139         hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2140                           sizeof(struct page *) * hpb->pages_per_srgn,
2141                           0, 0, NULL);
2142         if (!hpb->m_page_cache) {
2143                 dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2144                         hpb->lun);
2145                 ret = -ENOMEM;
2146                 goto release_req_cache;
2147         }
2148
2149         ret = ufshpb_pre_req_mempool_init(hpb);
2150         if (ret) {
2151                 dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2152                         hpb->lun);
2153                 goto release_m_page_cache;
2154         }
2155
2156         ret = ufshpb_alloc_region_tbl(hba, hpb);
2157         if (ret)
2158                 goto release_pre_req_mempool;
2159
2160         ufshpb_stat_init(hpb);
2161         ufshpb_param_init(hpb);
2162
2163         if (hpb->is_hcm) {
2164                 unsigned int poll;
2165
2166                 poll = hpb->params.timeout_polling_interval_ms;
2167                 schedule_delayed_work(&hpb->ufshpb_read_to_work,
2168                                       msecs_to_jiffies(poll));
2169         }
2170
2171         return 0;
2172
2173 release_pre_req_mempool:
2174         ufshpb_pre_req_mempool_destroy(hpb);
2175 release_m_page_cache:
2176         kmem_cache_destroy(hpb->m_page_cache);
2177 release_req_cache:
2178         kmem_cache_destroy(hpb->map_req_cache);
2179         return ret;
2180 }
2181
2182 static struct ufshpb_lu *
2183 ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2184                     struct ufshpb_dev_info *hpb_dev_info,
2185                     struct ufshpb_lu_info *hpb_lu_info)
2186 {
2187         struct ufshpb_lu *hpb;
2188         int ret;
2189
2190         hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2191         if (!hpb)
2192                 return NULL;
2193
2194         hpb->lun = sdev->lun;
2195         hpb->sdev_ufs_lu = sdev;
2196
2197         ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2198
2199         ret = ufshpb_lu_hpb_init(hba, hpb);
2200         if (ret) {
2201                 dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2202                 goto release_hpb;
2203         }
2204
2205         sdev->hostdata = hpb;
2206         return hpb;
2207
2208 release_hpb:
2209         kfree(hpb);
2210         return NULL;
2211 }
2212
2213 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2214 {
2215         struct ufshpb_region *rgn, *next_rgn;
2216         struct ufshpb_subregion *srgn, *next_srgn;
2217         unsigned long flags;
2218
2219         /*
2220          * If the device reset occurred, the remaining HPB region information
2221          * may be stale. Therefore, by discarding the lists of HPB response
2222          * that remained after reset, we prevent unnecessary work.
2223          */
2224         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2225         list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2226                                  list_inact_rgn)
2227                 list_del_init(&rgn->list_inact_rgn);
2228
2229         list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2230                                  list_act_srgn)
2231                 list_del_init(&srgn->list_act_srgn);
2232         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2233 }
2234
2235 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2236 {
2237         if (hpb->is_hcm) {
2238                 cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2239                 cancel_work_sync(&hpb->ufshpb_normalization_work);
2240         }
2241         cancel_work_sync(&hpb->map_work);
2242 }
2243
2244 static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2245 {
2246         int err = 0;
2247         bool flag_res = true;
2248         int try;
2249
2250         /* wait for the device to complete HPB reset query */
2251         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2252                 dev_dbg(hba->dev,
2253                         "%s start flag reset polling %d times\n",
2254                         __func__, try);
2255
2256                 /* Poll fHpbReset flag to be cleared */
2257                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2258                                 QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2259
2260                 if (err) {
2261                         dev_err(hba->dev,
2262                                 "%s reading fHpbReset flag failed with error %d\n",
2263                                 __func__, err);
2264                         return flag_res;
2265                 }
2266
2267                 if (!flag_res)
2268                         goto out;
2269
2270                 usleep_range(1000, 1100);
2271         }
2272         if (flag_res) {
2273                 dev_err(hba->dev,
2274                         "%s fHpbReset was not cleared by the device\n",
2275                         __func__);
2276         }
2277 out:
2278         return flag_res;
2279 }
2280
2281 void ufshpb_reset(struct ufs_hba *hba)
2282 {
2283         struct ufshpb_lu *hpb;
2284         struct scsi_device *sdev;
2285
2286         shost_for_each_device(sdev, hba->host) {
2287                 hpb = ufshpb_get_hpb_data(sdev);
2288                 if (!hpb)
2289                         continue;
2290
2291                 if (ufshpb_get_state(hpb) != HPB_RESET)
2292                         continue;
2293
2294                 ufshpb_set_state(hpb, HPB_PRESENT);
2295         }
2296 }
2297
2298 void ufshpb_reset_host(struct ufs_hba *hba)
2299 {
2300         struct ufshpb_lu *hpb;
2301         struct scsi_device *sdev;
2302
2303         shost_for_each_device(sdev, hba->host) {
2304                 hpb = ufshpb_get_hpb_data(sdev);
2305                 if (!hpb)
2306                         continue;
2307
2308                 if (ufshpb_get_state(hpb) != HPB_PRESENT)
2309                         continue;
2310                 ufshpb_set_state(hpb, HPB_RESET);
2311                 ufshpb_cancel_jobs(hpb);
2312                 ufshpb_discard_rsp_lists(hpb);
2313         }
2314 }
2315
2316 void ufshpb_suspend(struct ufs_hba *hba)
2317 {
2318         struct ufshpb_lu *hpb;
2319         struct scsi_device *sdev;
2320
2321         shost_for_each_device(sdev, hba->host) {
2322                 hpb = ufshpb_get_hpb_data(sdev);
2323                 if (!hpb)
2324                         continue;
2325
2326                 if (ufshpb_get_state(hpb) != HPB_PRESENT)
2327                         continue;
2328                 ufshpb_set_state(hpb, HPB_SUSPEND);
2329                 ufshpb_cancel_jobs(hpb);
2330         }
2331 }
2332
2333 void ufshpb_resume(struct ufs_hba *hba)
2334 {
2335         struct ufshpb_lu *hpb;
2336         struct scsi_device *sdev;
2337
2338         shost_for_each_device(sdev, hba->host) {
2339                 hpb = ufshpb_get_hpb_data(sdev);
2340                 if (!hpb)
2341                         continue;
2342
2343                 if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
2344                     (ufshpb_get_state(hpb) != HPB_SUSPEND))
2345                         continue;
2346                 ufshpb_set_state(hpb, HPB_PRESENT);
2347                 ufshpb_kick_map_work(hpb);
2348                 if (hpb->is_hcm) {
2349                         unsigned int poll =
2350                                 hpb->params.timeout_polling_interval_ms;
2351
2352                         schedule_delayed_work(&hpb->ufshpb_read_to_work,
2353                                 msecs_to_jiffies(poll));
2354                 }
2355         }
2356 }
2357
2358 static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2359                               struct ufshpb_lu_info *hpb_lu_info)
2360 {
2361         u16 max_active_rgns;
2362         u8 lu_enable;
2363         int size;
2364         int ret;
2365         char desc_buf[QUERY_DESC_MAX_SIZE];
2366
2367         ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2368
2369         ufshcd_rpm_get_sync(hba);
2370         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2371                                             QUERY_DESC_IDN_UNIT, lun, 0,
2372                                             desc_buf, &size);
2373         ufshcd_rpm_put_sync(hba);
2374
2375         if (ret) {
2376                 dev_err(hba->dev,
2377                         "%s: idn: %d lun: %d  query request failed",
2378                         __func__, QUERY_DESC_IDN_UNIT, lun);
2379                 return ret;
2380         }
2381
2382         lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2383         if (lu_enable != LU_ENABLED_HPB_FUNC)
2384                 return -ENODEV;
2385
2386         max_active_rgns = get_unaligned_be16(
2387                         desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2388         if (!max_active_rgns) {
2389                 dev_err(hba->dev,
2390                         "lun %d wrong number of max active regions\n", lun);
2391                 return -ENODEV;
2392         }
2393
2394         hpb_lu_info->num_blocks = get_unaligned_be64(
2395                         desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2396         hpb_lu_info->pinned_start = get_unaligned_be16(
2397                         desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2398         hpb_lu_info->num_pinned = get_unaligned_be16(
2399                         desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2400         hpb_lu_info->max_active_rgns = max_active_rgns;
2401
2402         return 0;
2403 }
2404
2405 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2406 {
2407         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2408
2409         if (!hpb)
2410                 return;
2411
2412         ufshpb_set_state(hpb, HPB_FAILED);
2413
2414         sdev = hpb->sdev_ufs_lu;
2415         sdev->hostdata = NULL;
2416
2417         ufshpb_cancel_jobs(hpb);
2418
2419         ufshpb_pre_req_mempool_destroy(hpb);
2420         ufshpb_destroy_region_tbl(hpb);
2421
2422         kmem_cache_destroy(hpb->map_req_cache);
2423         kmem_cache_destroy(hpb->m_page_cache);
2424
2425         list_del_init(&hpb->list_hpb_lu);
2426
2427         kfree(hpb);
2428 }
2429
2430 static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2431 {
2432         int pool_size;
2433         struct ufshpb_lu *hpb;
2434         struct scsi_device *sdev;
2435         bool init_success;
2436
2437         if (tot_active_srgn_pages == 0) {
2438                 ufshpb_remove(hba);
2439                 return;
2440         }
2441
2442         init_success = !ufshpb_check_hpb_reset_query(hba);
2443
2444         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2445         if (pool_size > tot_active_srgn_pages) {
2446                 mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2447                 mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2448         }
2449
2450         shost_for_each_device(sdev, hba->host) {
2451                 hpb = ufshpb_get_hpb_data(sdev);
2452                 if (!hpb)
2453                         continue;
2454
2455                 if (init_success) {
2456                         ufshpb_set_state(hpb, HPB_PRESENT);
2457                         if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2458                                 queue_work(ufshpb_wq, &hpb->map_work);
2459                         if (!hpb->is_hcm)
2460                                 ufshpb_issue_umap_all_req(hpb);
2461                 } else {
2462                         dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2463                         ufshpb_destroy_lu(hba, sdev);
2464                 }
2465         }
2466
2467         if (!init_success)
2468                 ufshpb_remove(hba);
2469 }
2470
2471 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2472 {
2473         struct ufshpb_lu *hpb;
2474         int ret;
2475         struct ufshpb_lu_info hpb_lu_info = { 0 };
2476         int lun = sdev->lun;
2477
2478         if (lun >= hba->dev_info.max_lu_supported)
2479                 goto out;
2480
2481         ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2482         if (ret)
2483                 goto out;
2484
2485         hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
2486                                   &hpb_lu_info);
2487         if (!hpb)
2488                 goto out;
2489
2490         tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2491                         hpb->srgns_per_rgn * hpb->pages_per_srgn;
2492
2493 out:
2494         /* All LUs are initialized */
2495         if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
2496                 ufshpb_hpb_lu_prepared(hba);
2497 }
2498
2499 static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2500 {
2501         int ret;
2502         unsigned int pool_size;
2503
2504         ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2505                                         sizeof(struct ufshpb_map_ctx),
2506                                         0, 0, NULL);
2507         if (!ufshpb_mctx_cache) {
2508                 dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2509                 return -ENOMEM;
2510         }
2511
2512         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2513         dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2514                __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2515
2516         ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2517                                                     ufshpb_mctx_cache);
2518         if (!ufshpb_mctx_pool) {
2519                 dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2520                 ret = -ENOMEM;
2521                 goto release_mctx_cache;
2522         }
2523
2524         ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2525         if (!ufshpb_page_pool) {
2526                 dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2527                 ret = -ENOMEM;
2528                 goto release_mctx_pool;
2529         }
2530
2531         ufshpb_wq = alloc_workqueue("ufshpb-wq",
2532                                         WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2533         if (!ufshpb_wq) {
2534                 dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2535                 ret = -ENOMEM;
2536                 goto release_page_pool;
2537         }
2538
2539         return 0;
2540
2541 release_page_pool:
2542         mempool_destroy(ufshpb_page_pool);
2543 release_mctx_pool:
2544         mempool_destroy(ufshpb_mctx_pool);
2545 release_mctx_cache:
2546         kmem_cache_destroy(ufshpb_mctx_cache);
2547         return ret;
2548 }
2549
2550 void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2551 {
2552         struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
2553         int max_active_rgns = 0;
2554         int hpb_num_lu;
2555
2556         hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2557         if (hpb_num_lu == 0) {
2558                 dev_err(hba->dev, "No HPB LU supported\n");
2559                 hpb_info->hpb_disabled = true;
2560                 return;
2561         }
2562
2563         hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2564         hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2565         max_active_rgns = get_unaligned_be16(geo_buf +
2566                           GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2567
2568         if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2569             max_active_rgns == 0) {
2570                 dev_err(hba->dev, "No HPB supported device\n");
2571                 hpb_info->hpb_disabled = true;
2572                 return;
2573         }
2574 }
2575
2576 void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2577 {
2578         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2579         int version, ret;
2580         int max_single_cmd;
2581
2582         hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2583
2584         version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
2585         if ((version != HPB_SUPPORT_VERSION) &&
2586             (version != HPB_SUPPORT_LEGACY_VERSION)) {
2587                 dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2588                         __func__, version);
2589                 hpb_dev_info->hpb_disabled = true;
2590                 return;
2591         }
2592
2593         if (version == HPB_SUPPORT_LEGACY_VERSION)
2594                 hpb_dev_info->is_legacy = true;
2595
2596         /*
2597          * Get the number of user logical unit to check whether all
2598          * scsi_device finish initialization
2599          */
2600         hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2601
2602         if (hpb_dev_info->is_legacy)
2603                 return;
2604
2605         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2606                 QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
2607
2608         if (ret)
2609                 hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
2610         else
2611                 hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
2612 }
2613
2614 void ufshpb_init(struct ufs_hba *hba)
2615 {
2616         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2617         int try;
2618         int ret;
2619
2620         if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2621                 return;
2622
2623         if (ufshpb_init_mem_wq(hba)) {
2624                 hpb_dev_info->hpb_disabled = true;
2625                 return;
2626         }
2627
2628         atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2629         tot_active_srgn_pages = 0;
2630         /* issue HPB reset query */
2631         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2632                 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2633                                         QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2634                 if (!ret)
2635                         break;
2636         }
2637 }
2638
2639 void ufshpb_remove(struct ufs_hba *hba)
2640 {
2641         mempool_destroy(ufshpb_page_pool);
2642         mempool_destroy(ufshpb_mctx_pool);
2643         kmem_cache_destroy(ufshpb_mctx_cache);
2644
2645         destroy_workqueue(ufshpb_wq);
2646 }
2647
2648 module_param(ufshpb_host_map_kbytes, uint, 0644);
2649 MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2650         "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");