Merge tag 'coccinelle-for-5.20' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / drivers / ufs / core / ufshpb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Universal Flash Storage Host Performance Booster
4  *
5  * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
6  *
7  * Authors:
8  *      Yongmyung Lee <ymhungry.lee@samsung.com>
9  *      Jinyoung Choi <j-young.choi@samsung.com>
10  */
11
12 #include <asm/unaligned.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/module.h>
16 #include <scsi/scsi_cmnd.h>
17
18 #include "ufshcd-priv.h"
19 #include "ufshpb.h"
20 #include "../../scsi/sd.h"
21
22 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
23 #define READ_TO_MS 1000
24 #define READ_TO_EXPIRIES 100
25 #define POLLING_INTERVAL_MS 200
26 #define THROTTLE_MAP_REQ_DEFAULT 1
27
28 /* memory management */
29 static struct kmem_cache *ufshpb_mctx_cache;
30 static mempool_t *ufshpb_mctx_pool;
31 static mempool_t *ufshpb_page_pool;
32 /* A cache size of 2MB can cache ppn in the 1GB range. */
33 static unsigned int ufshpb_host_map_kbytes = 2048;
34 static int tot_active_srgn_pages;
35
36 static struct workqueue_struct *ufshpb_wq;
37
38 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
39                                       int srgn_idx);
40
41 bool ufshpb_is_allowed(struct ufs_hba *hba)
42 {
43         return !(hba->ufshpb_dev.hpb_disabled);
44 }
45
46 /* HPB version 1.0 is called as legacy version. */
47 bool ufshpb_is_legacy(struct ufs_hba *hba)
48 {
49         return hba->ufshpb_dev.is_legacy;
50 }
51
52 static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
53 {
54         return sdev->hostdata;
55 }
56
57 static int ufshpb_get_state(struct ufshpb_lu *hpb)
58 {
59         return atomic_read(&hpb->hpb_state);
60 }
61
62 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
63 {
64         atomic_set(&hpb->hpb_state, state);
65 }
66
67 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
68                                 struct ufshpb_subregion *srgn)
69 {
70         return rgn->rgn_state != HPB_RGN_INACTIVE &&
71                 srgn->srgn_state == HPB_SRGN_VALID;
72 }
73
74 static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
75 {
76         return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
77 }
78
79 static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
80 {
81         return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
82                op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
83 }
84
85 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
86 {
87         return transfer_len <= hpb->pre_req_max_tr_len;
88 }
89
90 static bool ufshpb_is_general_lun(int lun)
91 {
92         return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
93 }
94
95 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
96 {
97         return hpb->lu_pinned_end != PINNED_NOT_SET &&
98                rgn_idx >= hpb->lu_pinned_start && rgn_idx <= hpb->lu_pinned_end;
99 }
100
101 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
102 {
103         bool ret = false;
104         unsigned long flags;
105
106         if (ufshpb_get_state(hpb) != HPB_PRESENT)
107                 return;
108
109         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
110         if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
111                 ret = true;
112         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
113
114         if (ret)
115                 queue_work(ufshpb_wq, &hpb->map_work);
116 }
117
118 static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
119                                     struct ufshcd_lrb *lrbp,
120                                     struct utp_hpb_rsp *rsp_field)
121 {
122         /* Check HPB_UPDATE_ALERT */
123         if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
124               UPIU_HEADER_DWORD(0, 2, 0, 0)))
125                 return false;
126
127         if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
128             rsp_field->desc_type != DEV_DES_TYPE ||
129             rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
130             rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
131             rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
132             rsp_field->hpb_op == HPB_RSP_NONE ||
133             (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
134              !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
135                 return false;
136
137         if (!ufshpb_is_general_lun(rsp_field->lun)) {
138                 dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
139                          lrbp->lun);
140                 return false;
141         }
142
143         return true;
144 }
145
146 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
147                                int srgn_offset, int cnt, bool set_dirty)
148 {
149         struct ufshpb_region *rgn;
150         struct ufshpb_subregion *srgn, *prev_srgn = NULL;
151         int set_bit_len;
152         int bitmap_len;
153         unsigned long flags;
154
155 next_srgn:
156         rgn = hpb->rgn_tbl + rgn_idx;
157         srgn = rgn->srgn_tbl + srgn_idx;
158
159         if (likely(!srgn->is_last))
160                 bitmap_len = hpb->entries_per_srgn;
161         else
162                 bitmap_len = hpb->last_srgn_entries;
163
164         if ((srgn_offset + cnt) > bitmap_len)
165                 set_bit_len = bitmap_len - srgn_offset;
166         else
167                 set_bit_len = cnt;
168
169         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
170         if (rgn->rgn_state != HPB_RGN_INACTIVE) {
171                 if (set_dirty) {
172                         if (srgn->srgn_state == HPB_SRGN_VALID)
173                                 bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
174                                            set_bit_len);
175                 } else if (hpb->is_hcm) {
176                          /* rewind the read timer for lru regions */
177                         rgn->read_timeout = ktime_add_ms(ktime_get(),
178                                         rgn->hpb->params.read_timeout_ms);
179                         rgn->read_timeout_expiries =
180                                 rgn->hpb->params.read_timeout_expiries;
181                 }
182         }
183         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
184
185         if (hpb->is_hcm && prev_srgn != srgn) {
186                 bool activate = false;
187
188                 spin_lock(&rgn->rgn_lock);
189                 if (set_dirty) {
190                         rgn->reads -= srgn->reads;
191                         srgn->reads = 0;
192                         set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
193                 } else {
194                         srgn->reads++;
195                         rgn->reads++;
196                         if (srgn->reads == hpb->params.activation_thld)
197                                 activate = true;
198                 }
199                 spin_unlock(&rgn->rgn_lock);
200
201                 if (activate ||
202                     test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
203                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
204                         ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
205                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
206                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
207                                 "activate region %d-%d\n", rgn_idx, srgn_idx);
208                 }
209
210                 prev_srgn = srgn;
211         }
212
213         srgn_offset = 0;
214         if (++srgn_idx == hpb->srgns_per_rgn) {
215                 srgn_idx = 0;
216                 rgn_idx++;
217         }
218
219         cnt -= set_bit_len;
220         if (cnt > 0)
221                 goto next_srgn;
222 }
223
224 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
225                                   int srgn_idx, int srgn_offset, int cnt)
226 {
227         struct ufshpb_region *rgn;
228         struct ufshpb_subregion *srgn;
229         int bitmap_len;
230         int bit_len;
231
232 next_srgn:
233         rgn = hpb->rgn_tbl + rgn_idx;
234         srgn = rgn->srgn_tbl + srgn_idx;
235
236         if (likely(!srgn->is_last))
237                 bitmap_len = hpb->entries_per_srgn;
238         else
239                 bitmap_len = hpb->last_srgn_entries;
240
241         if (!ufshpb_is_valid_srgn(rgn, srgn))
242                 return true;
243
244         /*
245          * If the region state is active, mctx must be allocated.
246          * In this case, check whether the region is evicted or
247          * mctx allocation fail.
248          */
249         if (unlikely(!srgn->mctx)) {
250                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
251                         "no mctx in region %d subregion %d.\n",
252                         srgn->rgn_idx, srgn->srgn_idx);
253                 return true;
254         }
255
256         if ((srgn_offset + cnt) > bitmap_len)
257                 bit_len = bitmap_len - srgn_offset;
258         else
259                 bit_len = cnt;
260
261         if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
262                           srgn_offset) < bit_len + srgn_offset)
263                 return true;
264
265         srgn_offset = 0;
266         if (++srgn_idx == hpb->srgns_per_rgn) {
267                 srgn_idx = 0;
268                 rgn_idx++;
269         }
270
271         cnt -= bit_len;
272         if (cnt > 0)
273                 goto next_srgn;
274
275         return false;
276 }
277
278 static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
279 {
280         return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
281 }
282
283 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
284                                      struct ufshpb_map_ctx *mctx, int pos,
285                                      int len, __be64 *ppn_buf)
286 {
287         struct page *page;
288         int index, offset;
289         int copied;
290
291         index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
292         offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
293
294         if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
295                 copied = len;
296         else
297                 copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
298
299         page = mctx->m_page[index];
300         if (unlikely(!page)) {
301                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
302                         "error. cannot find page in mctx\n");
303                 return -ENOMEM;
304         }
305
306         memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
307                copied * HPB_ENTRY_SIZE);
308
309         return copied;
310 }
311
312 static void
313 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
314                         int *srgn_idx, int *offset)
315 {
316         int rgn_offset;
317
318         *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
319         rgn_offset = lpn & hpb->entries_per_rgn_mask;
320         *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
321         *offset = rgn_offset & hpb->entries_per_srgn_mask;
322 }
323
324 static void
325 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
326                             __be64 ppn, u8 transfer_len)
327 {
328         unsigned char *cdb = lrbp->cmd->cmnd;
329         __be64 ppn_tmp = ppn;
330         cdb[0] = UFSHPB_READ;
331
332         if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
333                 ppn_tmp = (__force __be64)swab64((__force u64)ppn);
334
335         /* ppn value is stored as big-endian in the host memory */
336         memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
337         cdb[14] = transfer_len;
338         cdb[15] = 0;
339
340         lrbp->cmd->cmd_len = UFS_CDB_SIZE;
341 }
342
343 /*
344  * This function will set up HPB read command using host-side L2P map data.
345  */
346 int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
347 {
348         struct ufshpb_lu *hpb;
349         struct ufshpb_region *rgn;
350         struct ufshpb_subregion *srgn;
351         struct scsi_cmnd *cmd = lrbp->cmd;
352         u32 lpn;
353         __be64 ppn;
354         unsigned long flags;
355         int transfer_len, rgn_idx, srgn_idx, srgn_offset;
356         int err = 0;
357
358         hpb = ufshpb_get_hpb_data(cmd->device);
359         if (!hpb)
360                 return -ENODEV;
361
362         if (ufshpb_get_state(hpb) == HPB_INIT)
363                 return -ENODEV;
364
365         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
366                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
367                            "%s: ufshpb state is not PRESENT", __func__);
368                 return -ENODEV;
369         }
370
371         if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
372             (!ufshpb_is_write_or_discard(cmd) &&
373              !ufshpb_is_read_cmd(cmd)))
374                 return 0;
375
376         transfer_len = sectors_to_logical(cmd->device,
377                                           blk_rq_sectors(scsi_cmd_to_rq(cmd)));
378         if (unlikely(!transfer_len))
379                 return 0;
380
381         lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
382         ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
383         rgn = hpb->rgn_tbl + rgn_idx;
384         srgn = rgn->srgn_tbl + srgn_idx;
385
386         /* If command type is WRITE or DISCARD, set bitmap as drity */
387         if (ufshpb_is_write_or_discard(cmd)) {
388                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
389                                    transfer_len, true);
390                 return 0;
391         }
392
393         if (!ufshpb_is_supported_chunk(hpb, transfer_len))
394                 return 0;
395
396         if (hpb->is_hcm) {
397                 /*
398                  * in host control mode, reads are the main source for
399                  * activation trials.
400                  */
401                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
402                                    transfer_len, false);
403
404                 /* keep those counters normalized */
405                 if (rgn->reads > hpb->entries_per_srgn)
406                         schedule_work(&hpb->ufshpb_normalization_work);
407         }
408
409         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
410         if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
411                                    transfer_len)) {
412                 hpb->stats.miss_cnt++;
413                 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
414                 return 0;
415         }
416
417         err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
418         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
419         if (unlikely(err < 0)) {
420                 /*
421                  * In this case, the region state is active,
422                  * but the ppn table is not allocated.
423                  * Make sure that ppn table must be allocated on
424                  * active state.
425                  */
426                 dev_err(hba->dev, "get ppn failed. err %d\n", err);
427                 return err;
428         }
429
430         ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
431
432         hpb->stats.hit_cnt++;
433         return 0;
434 }
435
436 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, int rgn_idx,
437                                          enum req_op op, bool atomic)
438 {
439         struct ufshpb_req *rq;
440         struct request *req;
441         int retries = HPB_MAP_REQ_RETRIES;
442
443         rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
444         if (!rq)
445                 return NULL;
446
447 retry:
448         req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, op,
449                               BLK_MQ_REQ_NOWAIT);
450
451         if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
452                 usleep_range(3000, 3100);
453                 goto retry;
454         }
455
456         if (IS_ERR(req))
457                 goto free_rq;
458
459         rq->hpb = hpb;
460         rq->req = req;
461         rq->rb.rgn_idx = rgn_idx;
462
463         return rq;
464
465 free_rq:
466         kmem_cache_free(hpb->map_req_cache, rq);
467         return NULL;
468 }
469
470 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
471 {
472         blk_mq_free_request(rq->req);
473         kmem_cache_free(hpb->map_req_cache, rq);
474 }
475
476 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
477                                              struct ufshpb_subregion *srgn)
478 {
479         struct ufshpb_req *map_req;
480         struct bio *bio;
481         unsigned long flags;
482
483         if (hpb->is_hcm &&
484             hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
485                 dev_info(&hpb->sdev_ufs_lu->sdev_dev,
486                          "map_req throttle. inflight %d throttle %d",
487                          hpb->num_inflight_map_req,
488                          hpb->params.inflight_map_req);
489                 return NULL;
490         }
491
492         map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
493         if (!map_req)
494                 return NULL;
495
496         bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL);
497         if (!bio) {
498                 ufshpb_put_req(hpb, map_req);
499                 return NULL;
500         }
501
502         map_req->bio = bio;
503
504         map_req->rb.srgn_idx = srgn->srgn_idx;
505         map_req->rb.mctx = srgn->mctx;
506
507         spin_lock_irqsave(&hpb->param_lock, flags);
508         hpb->num_inflight_map_req++;
509         spin_unlock_irqrestore(&hpb->param_lock, flags);
510
511         return map_req;
512 }
513
514 static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
515                                struct ufshpb_req *map_req)
516 {
517         unsigned long flags;
518
519         bio_put(map_req->bio);
520         ufshpb_put_req(hpb, map_req);
521
522         spin_lock_irqsave(&hpb->param_lock, flags);
523         hpb->num_inflight_map_req--;
524         spin_unlock_irqrestore(&hpb->param_lock, flags);
525 }
526
527 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
528                                      struct ufshpb_subregion *srgn)
529 {
530         struct ufshpb_region *rgn;
531         u32 num_entries = hpb->entries_per_srgn;
532
533         if (!srgn->mctx) {
534                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
535                         "no mctx in region %d subregion %d.\n",
536                         srgn->rgn_idx, srgn->srgn_idx);
537                 return -1;
538         }
539
540         if (unlikely(srgn->is_last))
541                 num_entries = hpb->last_srgn_entries;
542
543         bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
544
545         rgn = hpb->rgn_tbl + srgn->rgn_idx;
546         clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
547
548         return 0;
549 }
550
551 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
552                                       int srgn_idx)
553 {
554         struct ufshpb_region *rgn;
555         struct ufshpb_subregion *srgn;
556
557         rgn = hpb->rgn_tbl + rgn_idx;
558         srgn = rgn->srgn_tbl + srgn_idx;
559
560         list_del_init(&rgn->list_inact_rgn);
561
562         if (list_empty(&srgn->list_act_srgn))
563                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
564
565         hpb->stats.rcmd_active_cnt++;
566 }
567
568 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
569 {
570         struct ufshpb_region *rgn;
571         struct ufshpb_subregion *srgn;
572         int srgn_idx;
573
574         rgn = hpb->rgn_tbl + rgn_idx;
575
576         for_each_sub_region(rgn, srgn_idx, srgn)
577                 list_del_init(&srgn->list_act_srgn);
578
579         if (list_empty(&rgn->list_inact_rgn))
580                 list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
581
582         hpb->stats.rcmd_inactive_cnt++;
583 }
584
585 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
586                                       struct ufshpb_subregion *srgn)
587 {
588         struct ufshpb_region *rgn;
589
590         /*
591          * If there is no mctx in subregion
592          * after I/O progress for HPB_READ_BUFFER, the region to which the
593          * subregion belongs was evicted.
594          * Make sure the region must not evict in I/O progress
595          */
596         if (!srgn->mctx) {
597                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
598                         "no mctx in region %d subregion %d.\n",
599                         srgn->rgn_idx, srgn->srgn_idx);
600                 srgn->srgn_state = HPB_SRGN_INVALID;
601                 return;
602         }
603
604         rgn = hpb->rgn_tbl + srgn->rgn_idx;
605
606         if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
607                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
608                         "region %d subregion %d evicted\n",
609                         srgn->rgn_idx, srgn->srgn_idx);
610                 srgn->srgn_state = HPB_SRGN_INVALID;
611                 return;
612         }
613         srgn->srgn_state = HPB_SRGN_VALID;
614 }
615
616 static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
617 {
618         struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
619
620         ufshpb_put_req(umap_req->hpb, umap_req);
621 }
622
623 static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
624 {
625         struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
626         struct ufshpb_lu *hpb = map_req->hpb;
627         struct ufshpb_subregion *srgn;
628         unsigned long flags;
629
630         srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
631                 map_req->rb.srgn_idx;
632
633         ufshpb_clear_dirty_bitmap(hpb, srgn);
634         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
635         ufshpb_activate_subregion(hpb, srgn);
636         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
637
638         ufshpb_put_map_req(map_req->hpb, map_req);
639 }
640
641 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
642 {
643         cdb[0] = UFSHPB_WRITE_BUFFER;
644         cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
645                           UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
646         if (rgn)
647                 put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
648         cdb[9] = 0x00;
649 }
650
651 static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
652                                     int srgn_idx, int srgn_mem_size)
653 {
654         cdb[0] = UFSHPB_READ_BUFFER;
655         cdb[1] = UFSHPB_READ_BUFFER_ID;
656
657         put_unaligned_be16(rgn_idx, &cdb[2]);
658         put_unaligned_be16(srgn_idx, &cdb[4]);
659         put_unaligned_be24(srgn_mem_size, &cdb[6]);
660
661         cdb[9] = 0x00;
662 }
663
664 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
665                                    struct ufshpb_req *umap_req,
666                                    struct ufshpb_region *rgn)
667 {
668         struct request *req = umap_req->req;
669         struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
670
671         req->timeout = 0;
672         req->end_io_data = umap_req;
673         req->end_io = ufshpb_umap_req_compl_fn;
674
675         ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
676         scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
677
678         blk_execute_rq_nowait(req, true);
679
680         hpb->stats.umap_req_cnt++;
681 }
682
683 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
684                                   struct ufshpb_req *map_req, bool last)
685 {
686         struct request_queue *q;
687         struct request *req;
688         struct scsi_cmnd *scmd;
689         int mem_size = hpb->srgn_mem_size;
690         int ret = 0;
691         int i;
692
693         q = hpb->sdev_ufs_lu->request_queue;
694         for (i = 0; i < hpb->pages_per_srgn; i++) {
695                 ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
696                                       PAGE_SIZE, 0);
697                 if (ret != PAGE_SIZE) {
698                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
699                                    "bio_add_pc_page fail %d - %d\n",
700                                    map_req->rb.rgn_idx, map_req->rb.srgn_idx);
701                         return ret;
702                 }
703         }
704
705         req = map_req->req;
706
707         blk_rq_append_bio(req, map_req->bio);
708
709         req->end_io_data = map_req;
710         req->end_io = ufshpb_map_req_compl_fn;
711
712         if (unlikely(last))
713                 mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
714
715         scmd = blk_mq_rq_to_pdu(req);
716         ufshpb_set_read_buf_cmd(scmd->cmnd, map_req->rb.rgn_idx,
717                                 map_req->rb.srgn_idx, mem_size);
718         scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
719
720         blk_execute_rq_nowait(req, true);
721
722         hpb->stats.map_req_cnt++;
723         return 0;
724 }
725
726 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
727                                                  bool last)
728 {
729         struct ufshpb_map_ctx *mctx;
730         u32 num_entries = hpb->entries_per_srgn;
731         int i, j;
732
733         mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
734         if (!mctx)
735                 return NULL;
736
737         mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
738         if (!mctx->m_page)
739                 goto release_mctx;
740
741         if (unlikely(last))
742                 num_entries = hpb->last_srgn_entries;
743
744         mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
745         if (!mctx->ppn_dirty)
746                 goto release_m_page;
747
748         for (i = 0; i < hpb->pages_per_srgn; i++) {
749                 mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
750                 if (!mctx->m_page[i]) {
751                         for (j = 0; j < i; j++)
752                                 mempool_free(mctx->m_page[j], ufshpb_page_pool);
753                         goto release_ppn_dirty;
754                 }
755                 clear_page(page_address(mctx->m_page[i]));
756         }
757
758         return mctx;
759
760 release_ppn_dirty:
761         bitmap_free(mctx->ppn_dirty);
762 release_m_page:
763         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
764 release_mctx:
765         mempool_free(mctx, ufshpb_mctx_pool);
766         return NULL;
767 }
768
769 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
770                                struct ufshpb_map_ctx *mctx)
771 {
772         int i;
773
774         for (i = 0; i < hpb->pages_per_srgn; i++)
775                 mempool_free(mctx->m_page[i], ufshpb_page_pool);
776
777         bitmap_free(mctx->ppn_dirty);
778         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
779         mempool_free(mctx, ufshpb_mctx_pool);
780 }
781
782 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
783                                           struct ufshpb_region *rgn)
784 {
785         struct ufshpb_subregion *srgn;
786         int srgn_idx;
787
788         for_each_sub_region(rgn, srgn_idx, srgn)
789                 if (srgn->srgn_state == HPB_SRGN_ISSUED)
790                         return -EPERM;
791
792         return 0;
793 }
794
795 static void ufshpb_read_to_handler(struct work_struct *work)
796 {
797         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
798                                              ufshpb_read_to_work.work);
799         struct victim_select_info *lru_info = &hpb->lru_info;
800         struct ufshpb_region *rgn, *next_rgn;
801         unsigned long flags;
802         unsigned int poll;
803         LIST_HEAD(expired_list);
804
805         if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
806                 return;
807
808         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
809
810         list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
811                                  list_lru_rgn) {
812                 bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
813
814                 if (timedout) {
815                         rgn->read_timeout_expiries--;
816                         if (is_rgn_dirty(rgn) ||
817                             rgn->read_timeout_expiries == 0)
818                                 list_add(&rgn->list_expired_rgn, &expired_list);
819                         else
820                                 rgn->read_timeout = ktime_add_ms(ktime_get(),
821                                                 hpb->params.read_timeout_ms);
822                 }
823         }
824
825         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
826
827         list_for_each_entry_safe(rgn, next_rgn, &expired_list,
828                                  list_expired_rgn) {
829                 list_del_init(&rgn->list_expired_rgn);
830                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
831                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
832                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
833         }
834
835         ufshpb_kick_map_work(hpb);
836
837         clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
838
839         poll = hpb->params.timeout_polling_interval_ms;
840         schedule_delayed_work(&hpb->ufshpb_read_to_work,
841                               msecs_to_jiffies(poll));
842 }
843
844 static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
845                                 struct ufshpb_region *rgn)
846 {
847         rgn->rgn_state = HPB_RGN_ACTIVE;
848         list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
849         atomic_inc(&lru_info->active_cnt);
850         if (rgn->hpb->is_hcm) {
851                 rgn->read_timeout =
852                         ktime_add_ms(ktime_get(),
853                                      rgn->hpb->params.read_timeout_ms);
854                 rgn->read_timeout_expiries =
855                         rgn->hpb->params.read_timeout_expiries;
856         }
857 }
858
859 static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
860                                 struct ufshpb_region *rgn)
861 {
862         list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
863 }
864
865 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
866 {
867         struct victim_select_info *lru_info = &hpb->lru_info;
868         struct ufshpb_region *rgn, *victim_rgn = NULL;
869
870         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
871                 if (ufshpb_check_srgns_issue_state(hpb, rgn))
872                         continue;
873
874                 /*
875                  * in host control mode, verify that the exiting region
876                  * has fewer reads
877                  */
878                 if (hpb->is_hcm &&
879                     rgn->reads > hpb->params.eviction_thld_exit)
880                         continue;
881
882                 victim_rgn = rgn;
883                 break;
884         }
885
886         if (!victim_rgn)
887                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
888                         "%s: no region allocated\n",
889                         __func__);
890
891         return victim_rgn;
892 }
893
894 static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
895                                     struct ufshpb_region *rgn)
896 {
897         list_del_init(&rgn->list_lru_rgn);
898         rgn->rgn_state = HPB_RGN_INACTIVE;
899         atomic_dec(&lru_info->active_cnt);
900 }
901
902 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
903                                           struct ufshpb_subregion *srgn)
904 {
905         if (srgn->srgn_state != HPB_SRGN_UNUSED) {
906                 ufshpb_put_map_ctx(hpb, srgn->mctx);
907                 srgn->srgn_state = HPB_SRGN_UNUSED;
908                 srgn->mctx = NULL;
909         }
910 }
911
912 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
913                                  struct ufshpb_region *rgn,
914                                  bool atomic)
915 {
916         struct ufshpb_req *umap_req;
917         int rgn_idx = rgn ? rgn->rgn_idx : 0;
918
919         umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
920         if (!umap_req)
921                 return -ENOMEM;
922
923         ufshpb_execute_umap_req(hpb, umap_req, rgn);
924
925         return 0;
926 }
927
928 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
929                                         struct ufshpb_region *rgn)
930 {
931         return ufshpb_issue_umap_req(hpb, rgn, true);
932 }
933
934 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
935                                  struct ufshpb_region *rgn)
936 {
937         struct victim_select_info *lru_info;
938         struct ufshpb_subregion *srgn;
939         int srgn_idx;
940
941         lru_info = &hpb->lru_info;
942
943         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
944
945         ufshpb_cleanup_lru_info(lru_info, rgn);
946
947         for_each_sub_region(rgn, srgn_idx, srgn)
948                 ufshpb_purge_active_subregion(hpb, srgn);
949 }
950
951 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
952 {
953         unsigned long flags;
954         int ret = 0;
955
956         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
957         if (rgn->rgn_state == HPB_RGN_PINNED) {
958                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
959                          "pinned region cannot drop-out. region %d\n",
960                          rgn->rgn_idx);
961                 goto out;
962         }
963
964         if (!list_empty(&rgn->list_lru_rgn)) {
965                 if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
966                         ret = -EBUSY;
967                         goto out;
968                 }
969
970                 if (hpb->is_hcm) {
971                         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
972                         ret = ufshpb_issue_umap_single_req(hpb, rgn);
973                         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
974                         if (ret)
975                                 goto out;
976                 }
977
978                 __ufshpb_evict_region(hpb, rgn);
979         }
980 out:
981         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
982         return ret;
983 }
984
985 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
986                                 struct ufshpb_region *rgn,
987                                 struct ufshpb_subregion *srgn)
988 {
989         struct ufshpb_req *map_req;
990         unsigned long flags;
991         int ret;
992         int err = -EAGAIN;
993         bool alloc_required = false;
994         enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
995
996         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
997
998         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
999                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1000                            "%s: ufshpb state is not PRESENT\n", __func__);
1001                 goto unlock_out;
1002         }
1003
1004         if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1005             (srgn->srgn_state == HPB_SRGN_INVALID)) {
1006                 err = 0;
1007                 goto unlock_out;
1008         }
1009
1010         if (srgn->srgn_state == HPB_SRGN_UNUSED)
1011                 alloc_required = true;
1012
1013         /*
1014          * If the subregion is already ISSUED state,
1015          * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1016          * the device and HPB response for map loading is received.
1017          * In this case, after finishing the HPB_READ_BUFFER,
1018          * the next HPB_READ_BUFFER is performed again to obtain the latest
1019          * map data.
1020          */
1021         if (srgn->srgn_state == HPB_SRGN_ISSUED)
1022                 goto unlock_out;
1023
1024         srgn->srgn_state = HPB_SRGN_ISSUED;
1025         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1026
1027         if (alloc_required) {
1028                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1029                 if (!srgn->mctx) {
1030                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1031                             "get map_ctx failed. region %d - %d\n",
1032                             rgn->rgn_idx, srgn->srgn_idx);
1033                         state = HPB_SRGN_UNUSED;
1034                         goto change_srgn_state;
1035                 }
1036         }
1037
1038         map_req = ufshpb_get_map_req(hpb, srgn);
1039         if (!map_req)
1040                 goto change_srgn_state;
1041
1042
1043         ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1044         if (ret) {
1045                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1046                            "%s: issue map_req failed: %d, region %d - %d\n",
1047                            __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1048                 goto free_map_req;
1049         }
1050         return 0;
1051
1052 free_map_req:
1053         ufshpb_put_map_req(hpb, map_req);
1054 change_srgn_state:
1055         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1056         srgn->srgn_state = state;
1057 unlock_out:
1058         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1059         return err;
1060 }
1061
1062 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1063 {
1064         struct ufshpb_region *victim_rgn = NULL;
1065         struct victim_select_info *lru_info = &hpb->lru_info;
1066         unsigned long flags;
1067         int ret = 0;
1068
1069         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1070         /*
1071          * If region belongs to lru_list, just move the region
1072          * to the front of lru list because the state of the region
1073          * is already active-state.
1074          */
1075         if (!list_empty(&rgn->list_lru_rgn)) {
1076                 ufshpb_hit_lru_info(lru_info, rgn);
1077                 goto out;
1078         }
1079
1080         if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1081                 if (atomic_read(&lru_info->active_cnt) ==
1082                     lru_info->max_lru_active_cnt) {
1083                         /*
1084                          * If the maximum number of active regions
1085                          * is exceeded, evict the least recently used region.
1086                          * This case may occur when the device responds
1087                          * to the eviction information late.
1088                          * It is okay to evict the least recently used region,
1089                          * because the device could detect this region
1090                          * by not issuing HPB_READ
1091                          *
1092                          * in host control mode, verify that the entering
1093                          * region has enough reads
1094                          */
1095                         if (hpb->is_hcm &&
1096                             rgn->reads < hpb->params.eviction_thld_enter) {
1097                                 ret = -EACCES;
1098                                 goto out;
1099                         }
1100
1101                         victim_rgn = ufshpb_victim_lru_info(hpb);
1102                         if (!victim_rgn) {
1103                                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1104                                     "cannot get victim region %s\n",
1105                                     hpb->is_hcm ? "" : "error");
1106                                 ret = -ENOMEM;
1107                                 goto out;
1108                         }
1109
1110                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1111                                 "LRU full (%d), choose victim %d\n",
1112                                 atomic_read(&lru_info->active_cnt),
1113                                 victim_rgn->rgn_idx);
1114
1115                         if (hpb->is_hcm) {
1116                                 spin_unlock_irqrestore(&hpb->rgn_state_lock,
1117                                                        flags);
1118                                 ret = ufshpb_issue_umap_single_req(hpb,
1119                                                                 victim_rgn);
1120                                 spin_lock_irqsave(&hpb->rgn_state_lock,
1121                                                   flags);
1122                                 if (ret)
1123                                         goto out;
1124                         }
1125
1126                         __ufshpb_evict_region(hpb, victim_rgn);
1127                 }
1128
1129                 /*
1130                  * When a region is added to lru_info list_head,
1131                  * it is guaranteed that the subregion has been
1132                  * assigned all mctx. If failed, try to receive mctx again
1133                  * without being added to lru_info list_head
1134                  */
1135                 ufshpb_add_lru_info(lru_info, rgn);
1136         }
1137 out:
1138         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1139         return ret;
1140 }
1141 /**
1142  *ufshpb_submit_region_inactive() - submit a region to be inactivated later
1143  *@hpb: per-LU HPB instance
1144  *@region_index: the index associated with the region that will be inactivated later
1145  */
1146 static void ufshpb_submit_region_inactive(struct ufshpb_lu *hpb, int region_index)
1147 {
1148         int subregion_index;
1149         struct ufshpb_region *rgn;
1150         struct ufshpb_subregion *srgn;
1151
1152         /*
1153          * Remove this region from active region list and add it to inactive list
1154          */
1155         spin_lock(&hpb->rsp_list_lock);
1156         ufshpb_update_inactive_info(hpb, region_index);
1157         spin_unlock(&hpb->rsp_list_lock);
1158
1159         rgn = hpb->rgn_tbl + region_index;
1160
1161         /*
1162          * Set subregion state to be HPB_SRGN_INVALID, there will no HPB read on this subregion
1163          */
1164         spin_lock(&hpb->rgn_state_lock);
1165         if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1166                 for (subregion_index = 0; subregion_index < rgn->srgn_cnt; subregion_index++) {
1167                         srgn = rgn->srgn_tbl + subregion_index;
1168                         if (srgn->srgn_state == HPB_SRGN_VALID)
1169                                 srgn->srgn_state = HPB_SRGN_INVALID;
1170                 }
1171         }
1172         spin_unlock(&hpb->rgn_state_lock);
1173 }
1174
1175 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1176                                          struct utp_hpb_rsp *rsp_field)
1177 {
1178         struct ufshpb_region *rgn;
1179         struct ufshpb_subregion *srgn;
1180         int i, rgn_i, srgn_i;
1181
1182         BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1183         /*
1184          * If the active region and the inactive region are the same,
1185          * we will inactivate this region.
1186          * The device could check this (region inactivated) and
1187          * will response the proper active region information
1188          */
1189         for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1190                 rgn_i =
1191                         be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1192                 srgn_i =
1193                         be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1194
1195                 rgn = hpb->rgn_tbl + rgn_i;
1196                 if (hpb->is_hcm &&
1197                     (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1198                         /*
1199                          * in host control mode, subregion activation
1200                          * recommendations are only allowed to active regions.
1201                          * Also, ignore recommendations for dirty regions - the
1202                          * host will make decisions concerning those by himself
1203                          */
1204                         continue;
1205                 }
1206
1207                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1208                         "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1209
1210                 spin_lock(&hpb->rsp_list_lock);
1211                 ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1212                 spin_unlock(&hpb->rsp_list_lock);
1213
1214                 srgn = rgn->srgn_tbl + srgn_i;
1215
1216                 /* blocking HPB_READ */
1217                 spin_lock(&hpb->rgn_state_lock);
1218                 if (srgn->srgn_state == HPB_SRGN_VALID)
1219                         srgn->srgn_state = HPB_SRGN_INVALID;
1220                 spin_unlock(&hpb->rgn_state_lock);
1221         }
1222
1223         if (hpb->is_hcm) {
1224                 /*
1225                  * in host control mode the device is not allowed to inactivate
1226                  * regions
1227                  */
1228                 goto out;
1229         }
1230
1231         for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1232                 rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1233                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "inactivate(%d) region %d\n", i, rgn_i);
1234                 ufshpb_submit_region_inactive(hpb, rgn_i);
1235         }
1236
1237 out:
1238         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1239                 rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1240
1241         if (ufshpb_get_state(hpb) == HPB_PRESENT)
1242                 queue_work(ufshpb_wq, &hpb->map_work);
1243 }
1244
1245 /*
1246  * Set the flags of all active regions to RGN_FLAG_UPDATE to let host side reload L2P entries later
1247  */
1248 static void ufshpb_set_regions_update(struct ufshpb_lu *hpb)
1249 {
1250         struct victim_select_info *lru_info = &hpb->lru_info;
1251         struct ufshpb_region *rgn;
1252         unsigned long flags;
1253
1254         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1255
1256         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1257                 set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1258
1259         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1260 }
1261
1262 static void ufshpb_dev_reset_handler(struct ufs_hba *hba)
1263 {
1264         struct scsi_device *sdev;
1265         struct ufshpb_lu *hpb;
1266
1267         __shost_for_each_device(sdev, hba->host) {
1268                 hpb = ufshpb_get_hpb_data(sdev);
1269                 if (!hpb)
1270                         continue;
1271
1272                 if (hpb->is_hcm) {
1273                         /*
1274                          * For the HPB host control mode, in case device powered up and lost HPB
1275                          * information, we will set the region flag to be RGN_FLAG_UPDATE, it will
1276                          * let host reload its L2P entries(reactivate region in the UFS device).
1277                          */
1278                         ufshpb_set_regions_update(hpb);
1279                 } else {
1280                         /*
1281                          * For the HPB device control mode, if host side receives 02h:HPB Operation
1282                          * in UPIU response, which means device recommends the host side should
1283                          * inactivate all active regions. Here we add all active regions to inactive
1284                          * list, they will be inactivated later in ufshpb_map_work_handler().
1285                          */
1286                         struct victim_select_info *lru_info = &hpb->lru_info;
1287                         struct ufshpb_region *rgn;
1288
1289                         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1290                                 ufshpb_submit_region_inactive(hpb, rgn->rgn_idx);
1291
1292                         if (ufshpb_get_state(hpb) == HPB_PRESENT)
1293                                 queue_work(ufshpb_wq, &hpb->map_work);
1294                 }
1295         }
1296 }
1297
1298 /*
1299  * This function will parse recommended active subregion information in sense
1300  * data field of response UPIU with SAM_STAT_GOOD state.
1301  */
1302 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1303 {
1304         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1305         struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1306         int data_seg_len;
1307
1308         data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1309                 & MASK_RSP_UPIU_DATA_SEG_LEN;
1310
1311         /* If data segment length is zero, rsp_field is not valid */
1312         if (!data_seg_len)
1313                 return;
1314
1315         if (unlikely(lrbp->lun != rsp_field->lun)) {
1316                 struct scsi_device *sdev;
1317                 bool found = false;
1318
1319                 __shost_for_each_device(sdev, hba->host) {
1320                         hpb = ufshpb_get_hpb_data(sdev);
1321
1322                         if (!hpb)
1323                                 continue;
1324
1325                         if (rsp_field->lun == hpb->lun) {
1326                                 found = true;
1327                                 break;
1328                         }
1329                 }
1330
1331                 if (!found)
1332                         return;
1333         }
1334
1335         if (!hpb)
1336                 return;
1337
1338         if (ufshpb_get_state(hpb) == HPB_INIT)
1339                 return;
1340
1341         if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1342             (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1343                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1344                            "%s: ufshpb state is not PRESENT/SUSPEND\n",
1345                            __func__);
1346                 return;
1347         }
1348
1349         BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1350
1351         if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1352                 return;
1353
1354         hpb->stats.rcmd_noti_cnt++;
1355
1356         switch (rsp_field->hpb_op) {
1357         case HPB_RSP_REQ_REGION_UPDATE:
1358                 if (data_seg_len != DEV_DATA_SEG_LEN)
1359                         dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1360                                  "%s: data seg length is not same.\n",
1361                                  __func__);
1362                 ufshpb_rsp_req_region_update(hpb, rsp_field);
1363                 break;
1364         case HPB_RSP_DEV_RESET:
1365                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1366                          "UFS device lost HPB information during PM.\n");
1367                 ufshpb_dev_reset_handler(hba);
1368
1369                 break;
1370         default:
1371                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1372                            "hpb_op is not available: %d\n",
1373                            rsp_field->hpb_op);
1374                 break;
1375         }
1376 }
1377
1378 static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1379                                    struct ufshpb_region *rgn,
1380                                    struct ufshpb_subregion *srgn)
1381 {
1382         if (!list_empty(&rgn->list_inact_rgn))
1383                 return;
1384
1385         if (!list_empty(&srgn->list_act_srgn)) {
1386                 list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1387                 return;
1388         }
1389
1390         list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1391 }
1392
1393 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1394                                           struct ufshpb_region *rgn,
1395                                           struct list_head *pending_list)
1396 {
1397         struct ufshpb_subregion *srgn;
1398         int srgn_idx;
1399
1400         if (!list_empty(&rgn->list_inact_rgn))
1401                 return;
1402
1403         for_each_sub_region(rgn, srgn_idx, srgn)
1404                 if (!list_empty(&srgn->list_act_srgn))
1405                         return;
1406
1407         list_add_tail(&rgn->list_inact_rgn, pending_list);
1408 }
1409
1410 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1411 {
1412         struct ufshpb_region *rgn;
1413         struct ufshpb_subregion *srgn;
1414         unsigned long flags;
1415         int ret = 0;
1416
1417         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1418         while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1419                                                 struct ufshpb_subregion,
1420                                                 list_act_srgn))) {
1421                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1422                         break;
1423
1424                 list_del_init(&srgn->list_act_srgn);
1425                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1426
1427                 rgn = hpb->rgn_tbl + srgn->rgn_idx;
1428                 ret = ufshpb_add_region(hpb, rgn);
1429                 if (ret)
1430                         goto active_failed;
1431
1432                 ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1433                 if (ret) {
1434                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1435                             "issue map_req failed. ret %d, region %d - %d\n",
1436                             ret, rgn->rgn_idx, srgn->srgn_idx);
1437                         goto active_failed;
1438                 }
1439                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1440         }
1441         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1442         return;
1443
1444 active_failed:
1445         dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1446                    rgn->rgn_idx, srgn->srgn_idx);
1447         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1448         ufshpb_add_active_list(hpb, rgn, srgn);
1449         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1450 }
1451
1452 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1453 {
1454         struct ufshpb_region *rgn;
1455         unsigned long flags;
1456         int ret;
1457         LIST_HEAD(pending_list);
1458
1459         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1460         while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1461                                                struct ufshpb_region,
1462                                                list_inact_rgn))) {
1463                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1464                         break;
1465
1466                 list_del_init(&rgn->list_inact_rgn);
1467                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1468
1469                 ret = ufshpb_evict_region(hpb, rgn);
1470                 if (ret) {
1471                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1472                         ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1473                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1474                 }
1475
1476                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1477         }
1478
1479         list_splice(&pending_list, &hpb->lh_inact_rgn);
1480         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1481 }
1482
1483 static void ufshpb_normalization_work_handler(struct work_struct *work)
1484 {
1485         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1486                                              ufshpb_normalization_work);
1487         int rgn_idx;
1488         u8 factor = hpb->params.normalization_factor;
1489
1490         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1491                 struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1492                 int srgn_idx;
1493
1494                 spin_lock(&rgn->rgn_lock);
1495                 rgn->reads = 0;
1496                 for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1497                         struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1498
1499                         srgn->reads >>= factor;
1500                         rgn->reads += srgn->reads;
1501                 }
1502                 spin_unlock(&rgn->rgn_lock);
1503
1504                 if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1505                         continue;
1506
1507                 /* if region is active but has no reads - inactivate it */
1508                 spin_lock(&hpb->rsp_list_lock);
1509                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1510                 spin_unlock(&hpb->rsp_list_lock);
1511         }
1512 }
1513
1514 static void ufshpb_map_work_handler(struct work_struct *work)
1515 {
1516         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1517
1518         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1519                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1520                            "%s: ufshpb state is not PRESENT\n", __func__);
1521                 return;
1522         }
1523
1524         ufshpb_run_inactive_region_list(hpb);
1525         ufshpb_run_active_subregion_list(hpb);
1526 }
1527
1528 /*
1529  * this function doesn't need to hold lock due to be called in init.
1530  * (rgn_state_lock, rsp_list_lock, etc..)
1531  */
1532 static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1533                                             struct ufshpb_lu *hpb,
1534                                             struct ufshpb_region *rgn)
1535 {
1536         struct ufshpb_subregion *srgn;
1537         int srgn_idx, i;
1538         int err = 0;
1539
1540         for_each_sub_region(rgn, srgn_idx, srgn) {
1541                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1542                 srgn->srgn_state = HPB_SRGN_INVALID;
1543                 if (!srgn->mctx) {
1544                         err = -ENOMEM;
1545                         dev_err(hba->dev,
1546                                 "alloc mctx for pinned region failed\n");
1547                         goto release;
1548                 }
1549
1550                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1551         }
1552
1553         rgn->rgn_state = HPB_RGN_PINNED;
1554         return 0;
1555
1556 release:
1557         for (i = 0; i < srgn_idx; i++) {
1558                 srgn = rgn->srgn_tbl + i;
1559                 ufshpb_put_map_ctx(hpb, srgn->mctx);
1560         }
1561         return err;
1562 }
1563
1564 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1565                                       struct ufshpb_region *rgn, bool last)
1566 {
1567         int srgn_idx;
1568         struct ufshpb_subregion *srgn;
1569
1570         for_each_sub_region(rgn, srgn_idx, srgn) {
1571                 INIT_LIST_HEAD(&srgn->list_act_srgn);
1572
1573                 srgn->rgn_idx = rgn->rgn_idx;
1574                 srgn->srgn_idx = srgn_idx;
1575                 srgn->srgn_state = HPB_SRGN_UNUSED;
1576         }
1577
1578         if (unlikely(last && hpb->last_srgn_entries))
1579                 srgn->is_last = true;
1580 }
1581
1582 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1583                                       struct ufshpb_region *rgn, int srgn_cnt)
1584 {
1585         rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1586                                  GFP_KERNEL);
1587         if (!rgn->srgn_tbl)
1588                 return -ENOMEM;
1589
1590         rgn->srgn_cnt = srgn_cnt;
1591         return 0;
1592 }
1593
1594 static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1595                                      struct ufshpb_lu *hpb,
1596                                      struct ufshpb_dev_info *hpb_dev_info,
1597                                      struct ufshpb_lu_info *hpb_lu_info)
1598 {
1599         u32 entries_per_rgn;
1600         u64 rgn_mem_size, tmp;
1601
1602         if (ufshpb_is_legacy(hba))
1603                 hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1604         else
1605                 hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
1606
1607         hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1608         hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1609                 (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1610                 : PINNED_NOT_SET;
1611         hpb->lru_info.max_lru_active_cnt =
1612                 hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1613
1614         rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1615                         * HPB_ENTRY_SIZE;
1616         do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1617         hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1618                 * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1619
1620         tmp = rgn_mem_size;
1621         do_div(tmp, HPB_ENTRY_SIZE);
1622         entries_per_rgn = (u32)tmp;
1623         hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1624         hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1625
1626         hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1627         hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1628         hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1629
1630         tmp = rgn_mem_size;
1631         do_div(tmp, hpb->srgn_mem_size);
1632         hpb->srgns_per_rgn = (int)tmp;
1633
1634         hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1635                                 entries_per_rgn);
1636         hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1637                                 (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1638         hpb->last_srgn_entries = hpb_lu_info->num_blocks
1639                                  % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1640
1641         hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1642
1643         if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1644                 hpb->is_hcm = true;
1645 }
1646
1647 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1648 {
1649         struct ufshpb_region *rgn_table, *rgn;
1650         int rgn_idx, i;
1651         int ret = 0;
1652
1653         rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1654                             GFP_KERNEL);
1655         if (!rgn_table)
1656                 return -ENOMEM;
1657
1658         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1659                 int srgn_cnt = hpb->srgns_per_rgn;
1660                 bool last_srgn = false;
1661
1662                 rgn = rgn_table + rgn_idx;
1663                 rgn->rgn_idx = rgn_idx;
1664
1665                 spin_lock_init(&rgn->rgn_lock);
1666
1667                 INIT_LIST_HEAD(&rgn->list_inact_rgn);
1668                 INIT_LIST_HEAD(&rgn->list_lru_rgn);
1669                 INIT_LIST_HEAD(&rgn->list_expired_rgn);
1670
1671                 if (rgn_idx == hpb->rgns_per_lu - 1) {
1672                         srgn_cnt = ((hpb->srgns_per_lu - 1) %
1673                                     hpb->srgns_per_rgn) + 1;
1674                         last_srgn = true;
1675                 }
1676
1677                 ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1678                 if (ret)
1679                         goto release_srgn_table;
1680                 ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1681
1682                 if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1683                         ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1684                         if (ret)
1685                                 goto release_srgn_table;
1686                 } else {
1687                         rgn->rgn_state = HPB_RGN_INACTIVE;
1688                 }
1689
1690                 rgn->rgn_flags = 0;
1691                 rgn->hpb = hpb;
1692         }
1693
1694         hpb->rgn_tbl = rgn_table;
1695
1696         return 0;
1697
1698 release_srgn_table:
1699         for (i = 0; i <= rgn_idx; i++)
1700                 kvfree(rgn_table[i].srgn_tbl);
1701
1702         kvfree(rgn_table);
1703         return ret;
1704 }
1705
1706 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1707                                          struct ufshpb_region *rgn)
1708 {
1709         int srgn_idx;
1710         struct ufshpb_subregion *srgn;
1711
1712         for_each_sub_region(rgn, srgn_idx, srgn)
1713                 if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1714                         srgn->srgn_state = HPB_SRGN_UNUSED;
1715                         ufshpb_put_map_ctx(hpb, srgn->mctx);
1716                 }
1717 }
1718
1719 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1720 {
1721         int rgn_idx;
1722
1723         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1724                 struct ufshpb_region *rgn;
1725
1726                 rgn = hpb->rgn_tbl + rgn_idx;
1727                 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1728                         rgn->rgn_state = HPB_RGN_INACTIVE;
1729
1730                         ufshpb_destroy_subregion_tbl(hpb, rgn);
1731                 }
1732
1733                 kvfree(rgn->srgn_tbl);
1734         }
1735
1736         kvfree(hpb->rgn_tbl);
1737 }
1738
1739 /* SYSFS functions */
1740 #define ufshpb_sysfs_attr_show_func(__name)                             \
1741 static ssize_t __name##_show(struct device *dev,                        \
1742         struct device_attribute *attr, char *buf)                       \
1743 {                                                                       \
1744         struct scsi_device *sdev = to_scsi_device(dev);                 \
1745         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1746                                                                         \
1747         if (!hpb)                                                       \
1748                 return -ENODEV;                                         \
1749                                                                         \
1750         return sysfs_emit(buf, "%llu\n", hpb->stats.__name);            \
1751 }                                                                       \
1752 \
1753 static DEVICE_ATTR_RO(__name)
1754
1755 ufshpb_sysfs_attr_show_func(hit_cnt);
1756 ufshpb_sysfs_attr_show_func(miss_cnt);
1757 ufshpb_sysfs_attr_show_func(rcmd_noti_cnt);
1758 ufshpb_sysfs_attr_show_func(rcmd_active_cnt);
1759 ufshpb_sysfs_attr_show_func(rcmd_inactive_cnt);
1760 ufshpb_sysfs_attr_show_func(map_req_cnt);
1761 ufshpb_sysfs_attr_show_func(umap_req_cnt);
1762
1763 static struct attribute *hpb_dev_stat_attrs[] = {
1764         &dev_attr_hit_cnt.attr,
1765         &dev_attr_miss_cnt.attr,
1766         &dev_attr_rcmd_noti_cnt.attr,
1767         &dev_attr_rcmd_active_cnt.attr,
1768         &dev_attr_rcmd_inactive_cnt.attr,
1769         &dev_attr_map_req_cnt.attr,
1770         &dev_attr_umap_req_cnt.attr,
1771         NULL,
1772 };
1773
1774 struct attribute_group ufs_sysfs_hpb_stat_group = {
1775         .name = "hpb_stats",
1776         .attrs = hpb_dev_stat_attrs,
1777 };
1778
1779 /* SYSFS functions */
1780 #define ufshpb_sysfs_param_show_func(__name)                            \
1781 static ssize_t __name##_show(struct device *dev,                        \
1782         struct device_attribute *attr, char *buf)                       \
1783 {                                                                       \
1784         struct scsi_device *sdev = to_scsi_device(dev);                 \
1785         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1786                                                                         \
1787         if (!hpb)                                                       \
1788                 return -ENODEV;                                         \
1789                                                                         \
1790         return sysfs_emit(buf, "%d\n", hpb->params.__name);             \
1791 }
1792
1793 ufshpb_sysfs_param_show_func(requeue_timeout_ms);
1794 static ssize_t
1795 requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1796                          const char *buf, size_t count)
1797 {
1798         struct scsi_device *sdev = to_scsi_device(dev);
1799         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1800         int val;
1801
1802         if (!hpb)
1803                 return -ENODEV;
1804
1805         if (kstrtouint(buf, 0, &val))
1806                 return -EINVAL;
1807
1808         if (val < 0)
1809                 return -EINVAL;
1810
1811         hpb->params.requeue_timeout_ms = val;
1812
1813         return count;
1814 }
1815 static DEVICE_ATTR_RW(requeue_timeout_ms);
1816
1817 ufshpb_sysfs_param_show_func(activation_thld);
1818 static ssize_t
1819 activation_thld_store(struct device *dev, struct device_attribute *attr,
1820                       const char *buf, size_t count)
1821 {
1822         struct scsi_device *sdev = to_scsi_device(dev);
1823         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1824         int val;
1825
1826         if (!hpb)
1827                 return -ENODEV;
1828
1829         if (!hpb->is_hcm)
1830                 return -EOPNOTSUPP;
1831
1832         if (kstrtouint(buf, 0, &val))
1833                 return -EINVAL;
1834
1835         if (val <= 0)
1836                 return -EINVAL;
1837
1838         hpb->params.activation_thld = val;
1839
1840         return count;
1841 }
1842 static DEVICE_ATTR_RW(activation_thld);
1843
1844 ufshpb_sysfs_param_show_func(normalization_factor);
1845 static ssize_t
1846 normalization_factor_store(struct device *dev, struct device_attribute *attr,
1847                            const char *buf, size_t count)
1848 {
1849         struct scsi_device *sdev = to_scsi_device(dev);
1850         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1851         int val;
1852
1853         if (!hpb)
1854                 return -ENODEV;
1855
1856         if (!hpb->is_hcm)
1857                 return -EOPNOTSUPP;
1858
1859         if (kstrtouint(buf, 0, &val))
1860                 return -EINVAL;
1861
1862         if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
1863                 return -EINVAL;
1864
1865         hpb->params.normalization_factor = val;
1866
1867         return count;
1868 }
1869 static DEVICE_ATTR_RW(normalization_factor);
1870
1871 ufshpb_sysfs_param_show_func(eviction_thld_enter);
1872 static ssize_t
1873 eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
1874                           const char *buf, size_t count)
1875 {
1876         struct scsi_device *sdev = to_scsi_device(dev);
1877         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1878         int val;
1879
1880         if (!hpb)
1881                 return -ENODEV;
1882
1883         if (!hpb->is_hcm)
1884                 return -EOPNOTSUPP;
1885
1886         if (kstrtouint(buf, 0, &val))
1887                 return -EINVAL;
1888
1889         if (val <= hpb->params.eviction_thld_exit)
1890                 return -EINVAL;
1891
1892         hpb->params.eviction_thld_enter = val;
1893
1894         return count;
1895 }
1896 static DEVICE_ATTR_RW(eviction_thld_enter);
1897
1898 ufshpb_sysfs_param_show_func(eviction_thld_exit);
1899 static ssize_t
1900 eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
1901                          const char *buf, size_t count)
1902 {
1903         struct scsi_device *sdev = to_scsi_device(dev);
1904         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1905         int val;
1906
1907         if (!hpb)
1908                 return -ENODEV;
1909
1910         if (!hpb->is_hcm)
1911                 return -EOPNOTSUPP;
1912
1913         if (kstrtouint(buf, 0, &val))
1914                 return -EINVAL;
1915
1916         if (val <= hpb->params.activation_thld)
1917                 return -EINVAL;
1918
1919         hpb->params.eviction_thld_exit = val;
1920
1921         return count;
1922 }
1923 static DEVICE_ATTR_RW(eviction_thld_exit);
1924
1925 ufshpb_sysfs_param_show_func(read_timeout_ms);
1926 static ssize_t
1927 read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1928                       const char *buf, size_t count)
1929 {
1930         struct scsi_device *sdev = to_scsi_device(dev);
1931         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1932         int val;
1933
1934         if (!hpb)
1935                 return -ENODEV;
1936
1937         if (!hpb->is_hcm)
1938                 return -EOPNOTSUPP;
1939
1940         if (kstrtouint(buf, 0, &val))
1941                 return -EINVAL;
1942
1943         /* read_timeout >> timeout_polling_interval */
1944         if (val < hpb->params.timeout_polling_interval_ms * 2)
1945                 return -EINVAL;
1946
1947         hpb->params.read_timeout_ms = val;
1948
1949         return count;
1950 }
1951 static DEVICE_ATTR_RW(read_timeout_ms);
1952
1953 ufshpb_sysfs_param_show_func(read_timeout_expiries);
1954 static ssize_t
1955 read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
1956                             const char *buf, size_t count)
1957 {
1958         struct scsi_device *sdev = to_scsi_device(dev);
1959         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1960         int val;
1961
1962         if (!hpb)
1963                 return -ENODEV;
1964
1965         if (!hpb->is_hcm)
1966                 return -EOPNOTSUPP;
1967
1968         if (kstrtouint(buf, 0, &val))
1969                 return -EINVAL;
1970
1971         if (val <= 0)
1972                 return -EINVAL;
1973
1974         hpb->params.read_timeout_expiries = val;
1975
1976         return count;
1977 }
1978 static DEVICE_ATTR_RW(read_timeout_expiries);
1979
1980 ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
1981 static ssize_t
1982 timeout_polling_interval_ms_store(struct device *dev,
1983                                   struct device_attribute *attr,
1984                                   const char *buf, size_t count)
1985 {
1986         struct scsi_device *sdev = to_scsi_device(dev);
1987         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1988         int val;
1989
1990         if (!hpb)
1991                 return -ENODEV;
1992
1993         if (!hpb->is_hcm)
1994                 return -EOPNOTSUPP;
1995
1996         if (kstrtouint(buf, 0, &val))
1997                 return -EINVAL;
1998
1999         /* timeout_polling_interval << read_timeout */
2000         if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
2001                 return -EINVAL;
2002
2003         hpb->params.timeout_polling_interval_ms = val;
2004
2005         return count;
2006 }
2007 static DEVICE_ATTR_RW(timeout_polling_interval_ms);
2008
2009 ufshpb_sysfs_param_show_func(inflight_map_req);
2010 static ssize_t inflight_map_req_store(struct device *dev,
2011                                       struct device_attribute *attr,
2012                                       const char *buf, size_t count)
2013 {
2014         struct scsi_device *sdev = to_scsi_device(dev);
2015         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2016         int val;
2017
2018         if (!hpb)
2019                 return -ENODEV;
2020
2021         if (!hpb->is_hcm)
2022                 return -EOPNOTSUPP;
2023
2024         if (kstrtouint(buf, 0, &val))
2025                 return -EINVAL;
2026
2027         if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
2028                 return -EINVAL;
2029
2030         hpb->params.inflight_map_req = val;
2031
2032         return count;
2033 }
2034 static DEVICE_ATTR_RW(inflight_map_req);
2035
2036 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
2037 {
2038         hpb->params.activation_thld = ACTIVATION_THRESHOLD;
2039         hpb->params.normalization_factor = 1;
2040         hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2041         hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2042         hpb->params.read_timeout_ms = READ_TO_MS;
2043         hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2044         hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2045         hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2046 }
2047
2048 static struct attribute *hpb_dev_param_attrs[] = {
2049         &dev_attr_requeue_timeout_ms.attr,
2050         &dev_attr_activation_thld.attr,
2051         &dev_attr_normalization_factor.attr,
2052         &dev_attr_eviction_thld_enter.attr,
2053         &dev_attr_eviction_thld_exit.attr,
2054         &dev_attr_read_timeout_ms.attr,
2055         &dev_attr_read_timeout_expiries.attr,
2056         &dev_attr_timeout_polling_interval_ms.attr,
2057         &dev_attr_inflight_map_req.attr,
2058         NULL,
2059 };
2060
2061 struct attribute_group ufs_sysfs_hpb_param_group = {
2062         .name = "hpb_params",
2063         .attrs = hpb_dev_param_attrs,
2064 };
2065
2066 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2067 {
2068         struct ufshpb_req *pre_req = NULL, *t;
2069         int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2070         int i;
2071
2072         INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2073
2074         hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2075         hpb->throttle_pre_req = qd;
2076         hpb->num_inflight_pre_req = 0;
2077
2078         if (!hpb->pre_req)
2079                 goto release_mem;
2080
2081         for (i = 0; i < qd; i++) {
2082                 pre_req = hpb->pre_req + i;
2083                 INIT_LIST_HEAD(&pre_req->list_req);
2084                 pre_req->req = NULL;
2085
2086                 pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
2087                 if (!pre_req->bio)
2088                         goto release_mem;
2089
2090                 pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2091                 if (!pre_req->wb.m_page) {
2092                         bio_put(pre_req->bio);
2093                         goto release_mem;
2094                 }
2095
2096                 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2097         }
2098
2099         return 0;
2100 release_mem:
2101         list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2102                 list_del_init(&pre_req->list_req);
2103                 bio_put(pre_req->bio);
2104                 __free_page(pre_req->wb.m_page);
2105         }
2106
2107         kfree(hpb->pre_req);
2108         return -ENOMEM;
2109 }
2110
2111 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2112 {
2113         struct ufshpb_req *pre_req = NULL;
2114         int i;
2115
2116         for (i = 0; i < hpb->throttle_pre_req; i++) {
2117                 pre_req = hpb->pre_req + i;
2118                 bio_put(hpb->pre_req[i].bio);
2119                 if (!pre_req->wb.m_page)
2120                         __free_page(hpb->pre_req[i].wb.m_page);
2121                 list_del_init(&pre_req->list_req);
2122         }
2123
2124         kfree(hpb->pre_req);
2125 }
2126
2127 static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2128 {
2129         hpb->stats.hit_cnt = 0;
2130         hpb->stats.miss_cnt = 0;
2131         hpb->stats.rcmd_noti_cnt = 0;
2132         hpb->stats.rcmd_active_cnt = 0;
2133         hpb->stats.rcmd_inactive_cnt = 0;
2134         hpb->stats.map_req_cnt = 0;
2135         hpb->stats.umap_req_cnt = 0;
2136 }
2137
2138 static void ufshpb_param_init(struct ufshpb_lu *hpb)
2139 {
2140         hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2141         if (hpb->is_hcm)
2142                 ufshpb_hcm_param_init(hpb);
2143 }
2144
2145 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2146 {
2147         int ret;
2148
2149         spin_lock_init(&hpb->rgn_state_lock);
2150         spin_lock_init(&hpb->rsp_list_lock);
2151         spin_lock_init(&hpb->param_lock);
2152
2153         INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2154         INIT_LIST_HEAD(&hpb->lh_act_srgn);
2155         INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2156         INIT_LIST_HEAD(&hpb->list_hpb_lu);
2157
2158         INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2159         if (hpb->is_hcm) {
2160                 INIT_WORK(&hpb->ufshpb_normalization_work,
2161                           ufshpb_normalization_work_handler);
2162                 INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2163                                   ufshpb_read_to_handler);
2164         }
2165
2166         hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2167                           sizeof(struct ufshpb_req), 0, 0, NULL);
2168         if (!hpb->map_req_cache) {
2169                 dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2170                         hpb->lun);
2171                 return -ENOMEM;
2172         }
2173
2174         hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2175                           sizeof(struct page *) * hpb->pages_per_srgn,
2176                           0, 0, NULL);
2177         if (!hpb->m_page_cache) {
2178                 dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2179                         hpb->lun);
2180                 ret = -ENOMEM;
2181                 goto release_req_cache;
2182         }
2183
2184         ret = ufshpb_pre_req_mempool_init(hpb);
2185         if (ret) {
2186                 dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2187                         hpb->lun);
2188                 goto release_m_page_cache;
2189         }
2190
2191         ret = ufshpb_alloc_region_tbl(hba, hpb);
2192         if (ret)
2193                 goto release_pre_req_mempool;
2194
2195         ufshpb_stat_init(hpb);
2196         ufshpb_param_init(hpb);
2197
2198         if (hpb->is_hcm) {
2199                 unsigned int poll;
2200
2201                 poll = hpb->params.timeout_polling_interval_ms;
2202                 schedule_delayed_work(&hpb->ufshpb_read_to_work,
2203                                       msecs_to_jiffies(poll));
2204         }
2205
2206         return 0;
2207
2208 release_pre_req_mempool:
2209         ufshpb_pre_req_mempool_destroy(hpb);
2210 release_m_page_cache:
2211         kmem_cache_destroy(hpb->m_page_cache);
2212 release_req_cache:
2213         kmem_cache_destroy(hpb->map_req_cache);
2214         return ret;
2215 }
2216
2217 static struct ufshpb_lu *
2218 ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2219                     struct ufshpb_dev_info *hpb_dev_info,
2220                     struct ufshpb_lu_info *hpb_lu_info)
2221 {
2222         struct ufshpb_lu *hpb;
2223         int ret;
2224
2225         hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2226         if (!hpb)
2227                 return NULL;
2228
2229         hpb->lun = sdev->lun;
2230         hpb->sdev_ufs_lu = sdev;
2231
2232         ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2233
2234         ret = ufshpb_lu_hpb_init(hba, hpb);
2235         if (ret) {
2236                 dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2237                 goto release_hpb;
2238         }
2239
2240         sdev->hostdata = hpb;
2241         return hpb;
2242
2243 release_hpb:
2244         kfree(hpb);
2245         return NULL;
2246 }
2247
2248 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2249 {
2250         struct ufshpb_region *rgn, *next_rgn;
2251         struct ufshpb_subregion *srgn, *next_srgn;
2252         unsigned long flags;
2253
2254         /*
2255          * If the device reset occurred, the remaining HPB region information
2256          * may be stale. Therefore, by discarding the lists of HPB response
2257          * that remained after reset, we prevent unnecessary work.
2258          */
2259         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2260         list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2261                                  list_inact_rgn)
2262                 list_del_init(&rgn->list_inact_rgn);
2263
2264         list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2265                                  list_act_srgn)
2266                 list_del_init(&srgn->list_act_srgn);
2267         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2268 }
2269
2270 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2271 {
2272         if (hpb->is_hcm) {
2273                 cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2274                 cancel_work_sync(&hpb->ufshpb_normalization_work);
2275         }
2276         cancel_work_sync(&hpb->map_work);
2277 }
2278
2279 static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2280 {
2281         int err = 0;
2282         bool flag_res = true;
2283         int try;
2284
2285         /* wait for the device to complete HPB reset query */
2286         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2287                 dev_dbg(hba->dev,
2288                         "%s start flag reset polling %d times\n",
2289                         __func__, try);
2290
2291                 /* Poll fHpbReset flag to be cleared */
2292                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2293                                 QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2294
2295                 if (err) {
2296                         dev_err(hba->dev,
2297                                 "%s reading fHpbReset flag failed with error %d\n",
2298                                 __func__, err);
2299                         return flag_res;
2300                 }
2301
2302                 if (!flag_res)
2303                         goto out;
2304
2305                 usleep_range(1000, 1100);
2306         }
2307         if (flag_res) {
2308                 dev_err(hba->dev,
2309                         "%s fHpbReset was not cleared by the device\n",
2310                         __func__);
2311         }
2312 out:
2313         return flag_res;
2314 }
2315
2316 /**
2317  * ufshpb_toggle_state - switch HPB state of all LUs
2318  * @hba: per-adapter instance
2319  * @src: expected current HPB state
2320  * @dest: target HPB state to switch to
2321  */
2322 void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest)
2323 {
2324         struct ufshpb_lu *hpb;
2325         struct scsi_device *sdev;
2326
2327         shost_for_each_device(sdev, hba->host) {
2328                 hpb = ufshpb_get_hpb_data(sdev);
2329
2330                 if (!hpb || ufshpb_get_state(hpb) != src)
2331                         continue;
2332                 ufshpb_set_state(hpb, dest);
2333
2334                 if (dest == HPB_RESET) {
2335                         ufshpb_cancel_jobs(hpb);
2336                         ufshpb_discard_rsp_lists(hpb);
2337                 }
2338         }
2339 }
2340
2341 void ufshpb_suspend(struct ufs_hba *hba)
2342 {
2343         struct ufshpb_lu *hpb;
2344         struct scsi_device *sdev;
2345
2346         shost_for_each_device(sdev, hba->host) {
2347                 hpb = ufshpb_get_hpb_data(sdev);
2348                 if (!hpb || ufshpb_get_state(hpb) != HPB_PRESENT)
2349                         continue;
2350
2351                 ufshpb_set_state(hpb, HPB_SUSPEND);
2352                 ufshpb_cancel_jobs(hpb);
2353         }
2354 }
2355
2356 void ufshpb_resume(struct ufs_hba *hba)
2357 {
2358         struct ufshpb_lu *hpb;
2359         struct scsi_device *sdev;
2360
2361         shost_for_each_device(sdev, hba->host) {
2362                 hpb = ufshpb_get_hpb_data(sdev);
2363                 if (!hpb || ufshpb_get_state(hpb) != HPB_SUSPEND)
2364                         continue;
2365
2366                 ufshpb_set_state(hpb, HPB_PRESENT);
2367                 ufshpb_kick_map_work(hpb);
2368                 if (hpb->is_hcm) {
2369                         unsigned int poll = hpb->params.timeout_polling_interval_ms;
2370
2371                         schedule_delayed_work(&hpb->ufshpb_read_to_work, msecs_to_jiffies(poll));
2372                 }
2373         }
2374 }
2375
2376 static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2377                               struct ufshpb_lu_info *hpb_lu_info)
2378 {
2379         u16 max_active_rgns;
2380         u8 lu_enable;
2381         int size;
2382         int ret;
2383         char desc_buf[QUERY_DESC_MAX_SIZE];
2384
2385         ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2386
2387         ufshcd_rpm_get_sync(hba);
2388         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2389                                             QUERY_DESC_IDN_UNIT, lun, 0,
2390                                             desc_buf, &size);
2391         ufshcd_rpm_put_sync(hba);
2392
2393         if (ret) {
2394                 dev_err(hba->dev,
2395                         "%s: idn: %d lun: %d  query request failed",
2396                         __func__, QUERY_DESC_IDN_UNIT, lun);
2397                 return ret;
2398         }
2399
2400         lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2401         if (lu_enable != LU_ENABLED_HPB_FUNC)
2402                 return -ENODEV;
2403
2404         max_active_rgns = get_unaligned_be16(
2405                         desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2406         if (!max_active_rgns) {
2407                 dev_err(hba->dev,
2408                         "lun %d wrong number of max active regions\n", lun);
2409                 return -ENODEV;
2410         }
2411
2412         hpb_lu_info->num_blocks = get_unaligned_be64(
2413                         desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2414         hpb_lu_info->pinned_start = get_unaligned_be16(
2415                         desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2416         hpb_lu_info->num_pinned = get_unaligned_be16(
2417                         desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2418         hpb_lu_info->max_active_rgns = max_active_rgns;
2419
2420         return 0;
2421 }
2422
2423 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2424 {
2425         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2426
2427         if (!hpb)
2428                 return;
2429
2430         ufshpb_set_state(hpb, HPB_FAILED);
2431
2432         sdev = hpb->sdev_ufs_lu;
2433         sdev->hostdata = NULL;
2434
2435         ufshpb_cancel_jobs(hpb);
2436
2437         ufshpb_pre_req_mempool_destroy(hpb);
2438         ufshpb_destroy_region_tbl(hpb);
2439
2440         kmem_cache_destroy(hpb->map_req_cache);
2441         kmem_cache_destroy(hpb->m_page_cache);
2442
2443         list_del_init(&hpb->list_hpb_lu);
2444
2445         kfree(hpb);
2446 }
2447
2448 static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2449 {
2450         int pool_size;
2451         struct ufshpb_lu *hpb;
2452         struct scsi_device *sdev;
2453         bool init_success;
2454
2455         if (tot_active_srgn_pages == 0) {
2456                 ufshpb_remove(hba);
2457                 return;
2458         }
2459
2460         init_success = !ufshpb_check_hpb_reset_query(hba);
2461
2462         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2463         if (pool_size > tot_active_srgn_pages) {
2464                 mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2465                 mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2466         }
2467
2468         shost_for_each_device(sdev, hba->host) {
2469                 hpb = ufshpb_get_hpb_data(sdev);
2470                 if (!hpb)
2471                         continue;
2472
2473                 if (init_success) {
2474                         ufshpb_set_state(hpb, HPB_PRESENT);
2475                         if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2476                                 queue_work(ufshpb_wq, &hpb->map_work);
2477                 } else {
2478                         dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2479                         ufshpb_destroy_lu(hba, sdev);
2480                 }
2481         }
2482
2483         if (!init_success)
2484                 ufshpb_remove(hba);
2485 }
2486
2487 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2488 {
2489         struct ufshpb_lu *hpb;
2490         int ret;
2491         struct ufshpb_lu_info hpb_lu_info = { 0 };
2492         int lun = sdev->lun;
2493
2494         if (lun >= hba->dev_info.max_lu_supported)
2495                 goto out;
2496
2497         ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2498         if (ret)
2499                 goto out;
2500
2501         hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
2502                                   &hpb_lu_info);
2503         if (!hpb)
2504                 goto out;
2505
2506         tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2507                         hpb->srgns_per_rgn * hpb->pages_per_srgn;
2508
2509 out:
2510         /* All LUs are initialized */
2511         if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
2512                 ufshpb_hpb_lu_prepared(hba);
2513 }
2514
2515 static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2516 {
2517         int ret;
2518         unsigned int pool_size;
2519
2520         ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2521                                         sizeof(struct ufshpb_map_ctx),
2522                                         0, 0, NULL);
2523         if (!ufshpb_mctx_cache) {
2524                 dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2525                 return -ENOMEM;
2526         }
2527
2528         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2529         dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2530                __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2531
2532         ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2533                                                     ufshpb_mctx_cache);
2534         if (!ufshpb_mctx_pool) {
2535                 dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2536                 ret = -ENOMEM;
2537                 goto release_mctx_cache;
2538         }
2539
2540         ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2541         if (!ufshpb_page_pool) {
2542                 dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2543                 ret = -ENOMEM;
2544                 goto release_mctx_pool;
2545         }
2546
2547         ufshpb_wq = alloc_workqueue("ufshpb-wq",
2548                                         WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2549         if (!ufshpb_wq) {
2550                 dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2551                 ret = -ENOMEM;
2552                 goto release_page_pool;
2553         }
2554
2555         return 0;
2556
2557 release_page_pool:
2558         mempool_destroy(ufshpb_page_pool);
2559 release_mctx_pool:
2560         mempool_destroy(ufshpb_mctx_pool);
2561 release_mctx_cache:
2562         kmem_cache_destroy(ufshpb_mctx_cache);
2563         return ret;
2564 }
2565
2566 void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2567 {
2568         struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
2569         int max_active_rgns = 0;
2570         int hpb_num_lu;
2571
2572         hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2573         if (hpb_num_lu == 0) {
2574                 dev_err(hba->dev, "No HPB LU supported\n");
2575                 hpb_info->hpb_disabled = true;
2576                 return;
2577         }
2578
2579         hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2580         hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2581         max_active_rgns = get_unaligned_be16(geo_buf +
2582                           GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2583
2584         if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2585             max_active_rgns == 0) {
2586                 dev_err(hba->dev, "No HPB supported device\n");
2587                 hpb_info->hpb_disabled = true;
2588                 return;
2589         }
2590 }
2591
2592 void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2593 {
2594         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2595         int version, ret;
2596         int max_single_cmd;
2597
2598         hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2599
2600         version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
2601         if ((version != HPB_SUPPORT_VERSION) &&
2602             (version != HPB_SUPPORT_LEGACY_VERSION)) {
2603                 dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2604                         __func__, version);
2605                 hpb_dev_info->hpb_disabled = true;
2606                 return;
2607         }
2608
2609         if (version == HPB_SUPPORT_LEGACY_VERSION)
2610                 hpb_dev_info->is_legacy = true;
2611
2612         /*
2613          * Get the number of user logical unit to check whether all
2614          * scsi_device finish initialization
2615          */
2616         hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2617
2618         if (hpb_dev_info->is_legacy)
2619                 return;
2620
2621         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2622                 QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
2623
2624         if (ret)
2625                 hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
2626         else
2627                 hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
2628 }
2629
2630 void ufshpb_init(struct ufs_hba *hba)
2631 {
2632         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2633         int try;
2634         int ret;
2635
2636         if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2637                 return;
2638
2639         if (ufshpb_init_mem_wq(hba)) {
2640                 hpb_dev_info->hpb_disabled = true;
2641                 return;
2642         }
2643
2644         atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2645         tot_active_srgn_pages = 0;
2646         /* issue HPB reset query */
2647         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2648                 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2649                                         QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2650                 if (!ret)
2651                         break;
2652         }
2653 }
2654
2655 void ufshpb_remove(struct ufs_hba *hba)
2656 {
2657         mempool_destroy(ufshpb_page_pool);
2658         mempool_destroy(ufshpb_mctx_pool);
2659         kmem_cache_destroy(ufshpb_mctx_cache);
2660
2661         destroy_workqueue(ufshpb_wq);
2662 }
2663
2664 module_param(ufshpb_host_map_kbytes, uint, 0644);
2665 MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2666         "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");