MAINTAINERS: update the LSM maintainer info
[platform/kernel/linux-starfive.git] / drivers / ufs / core / ufshpb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Universal Flash Storage Host Performance Booster
4  *
5  * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
6  *
7  * Authors:
8  *      Yongmyung Lee <ymhungry.lee@samsung.com>
9  *      Jinyoung Choi <j-young.choi@samsung.com>
10  */
11
12 #include <asm/unaligned.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/module.h>
16 #include <scsi/scsi_cmnd.h>
17
18 #include "ufshcd-priv.h"
19 #include "ufshpb.h"
20 #include "../../scsi/sd.h"
21
22 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
23 #define READ_TO_MS 1000
24 #define READ_TO_EXPIRIES 100
25 #define POLLING_INTERVAL_MS 200
26 #define THROTTLE_MAP_REQ_DEFAULT 1
27
28 /* memory management */
29 static struct kmem_cache *ufshpb_mctx_cache;
30 static mempool_t *ufshpb_mctx_pool;
31 static mempool_t *ufshpb_page_pool;
32 /* A cache size of 2MB can cache ppn in the 1GB range. */
33 static unsigned int ufshpb_host_map_kbytes = 2048;
34 static int tot_active_srgn_pages;
35
36 static struct workqueue_struct *ufshpb_wq;
37
38 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
39                                       int srgn_idx);
40
41 bool ufshpb_is_allowed(struct ufs_hba *hba)
42 {
43         return !(hba->ufshpb_dev.hpb_disabled);
44 }
45
46 /* HPB version 1.0 is called as legacy version. */
47 bool ufshpb_is_legacy(struct ufs_hba *hba)
48 {
49         return hba->ufshpb_dev.is_legacy;
50 }
51
52 static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
53 {
54         return sdev->hostdata;
55 }
56
57 static int ufshpb_get_state(struct ufshpb_lu *hpb)
58 {
59         return atomic_read(&hpb->hpb_state);
60 }
61
62 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
63 {
64         atomic_set(&hpb->hpb_state, state);
65 }
66
67 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
68                                 struct ufshpb_subregion *srgn)
69 {
70         return rgn->rgn_state != HPB_RGN_INACTIVE &&
71                 srgn->srgn_state == HPB_SRGN_VALID;
72 }
73
74 static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
75 {
76         return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
77 }
78
79 static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
80 {
81         return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
82                op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
83 }
84
85 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
86 {
87         return transfer_len <= hpb->pre_req_max_tr_len;
88 }
89
90 static bool ufshpb_is_general_lun(int lun)
91 {
92         return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
93 }
94
95 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
96 {
97         return hpb->lu_pinned_end != PINNED_NOT_SET &&
98                rgn_idx >= hpb->lu_pinned_start && rgn_idx <= hpb->lu_pinned_end;
99 }
100
101 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
102 {
103         bool ret = false;
104         unsigned long flags;
105
106         if (ufshpb_get_state(hpb) != HPB_PRESENT)
107                 return;
108
109         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
110         if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
111                 ret = true;
112         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
113
114         if (ret)
115                 queue_work(ufshpb_wq, &hpb->map_work);
116 }
117
118 static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
119                                     struct ufshcd_lrb *lrbp,
120                                     struct utp_hpb_rsp *rsp_field)
121 {
122         /* Check HPB_UPDATE_ALERT */
123         if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
124               UPIU_HEADER_DWORD(0, 2, 0, 0)))
125                 return false;
126
127         if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
128             rsp_field->desc_type != DEV_DES_TYPE ||
129             rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
130             rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
131             rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
132             rsp_field->hpb_op == HPB_RSP_NONE ||
133             (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
134              !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
135                 return false;
136
137         if (!ufshpb_is_general_lun(rsp_field->lun)) {
138                 dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
139                          lrbp->lun);
140                 return false;
141         }
142
143         return true;
144 }
145
146 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
147                                int srgn_offset, int cnt, bool set_dirty)
148 {
149         struct ufshpb_region *rgn;
150         struct ufshpb_subregion *srgn, *prev_srgn = NULL;
151         int set_bit_len;
152         int bitmap_len;
153         unsigned long flags;
154
155 next_srgn:
156         rgn = hpb->rgn_tbl + rgn_idx;
157         srgn = rgn->srgn_tbl + srgn_idx;
158
159         if (likely(!srgn->is_last))
160                 bitmap_len = hpb->entries_per_srgn;
161         else
162                 bitmap_len = hpb->last_srgn_entries;
163
164         if ((srgn_offset + cnt) > bitmap_len)
165                 set_bit_len = bitmap_len - srgn_offset;
166         else
167                 set_bit_len = cnt;
168
169         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
170         if (rgn->rgn_state != HPB_RGN_INACTIVE) {
171                 if (set_dirty) {
172                         if (srgn->srgn_state == HPB_SRGN_VALID)
173                                 bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
174                                            set_bit_len);
175                 } else if (hpb->is_hcm) {
176                          /* rewind the read timer for lru regions */
177                         rgn->read_timeout = ktime_add_ms(ktime_get(),
178                                         rgn->hpb->params.read_timeout_ms);
179                         rgn->read_timeout_expiries =
180                                 rgn->hpb->params.read_timeout_expiries;
181                 }
182         }
183         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
184
185         if (hpb->is_hcm && prev_srgn != srgn) {
186                 bool activate = false;
187
188                 spin_lock(&rgn->rgn_lock);
189                 if (set_dirty) {
190                         rgn->reads -= srgn->reads;
191                         srgn->reads = 0;
192                         set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
193                 } else {
194                         srgn->reads++;
195                         rgn->reads++;
196                         if (srgn->reads == hpb->params.activation_thld)
197                                 activate = true;
198                 }
199                 spin_unlock(&rgn->rgn_lock);
200
201                 if (activate ||
202                     test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
203                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
204                         ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
205                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
206                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
207                                 "activate region %d-%d\n", rgn_idx, srgn_idx);
208                 }
209
210                 prev_srgn = srgn;
211         }
212
213         srgn_offset = 0;
214         if (++srgn_idx == hpb->srgns_per_rgn) {
215                 srgn_idx = 0;
216                 rgn_idx++;
217         }
218
219         cnt -= set_bit_len;
220         if (cnt > 0)
221                 goto next_srgn;
222 }
223
224 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
225                                   int srgn_idx, int srgn_offset, int cnt)
226 {
227         struct ufshpb_region *rgn;
228         struct ufshpb_subregion *srgn;
229         int bitmap_len;
230         int bit_len;
231
232 next_srgn:
233         rgn = hpb->rgn_tbl + rgn_idx;
234         srgn = rgn->srgn_tbl + srgn_idx;
235
236         if (likely(!srgn->is_last))
237                 bitmap_len = hpb->entries_per_srgn;
238         else
239                 bitmap_len = hpb->last_srgn_entries;
240
241         if (!ufshpb_is_valid_srgn(rgn, srgn))
242                 return true;
243
244         /*
245          * If the region state is active, mctx must be allocated.
246          * In this case, check whether the region is evicted or
247          * mctx allocation fail.
248          */
249         if (unlikely(!srgn->mctx)) {
250                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
251                         "no mctx in region %d subregion %d.\n",
252                         srgn->rgn_idx, srgn->srgn_idx);
253                 return true;
254         }
255
256         if ((srgn_offset + cnt) > bitmap_len)
257                 bit_len = bitmap_len - srgn_offset;
258         else
259                 bit_len = cnt;
260
261         if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
262                           srgn_offset) < bit_len + srgn_offset)
263                 return true;
264
265         srgn_offset = 0;
266         if (++srgn_idx == hpb->srgns_per_rgn) {
267                 srgn_idx = 0;
268                 rgn_idx++;
269         }
270
271         cnt -= bit_len;
272         if (cnt > 0)
273                 goto next_srgn;
274
275         return false;
276 }
277
278 static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
279 {
280         return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
281 }
282
283 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
284                                      struct ufshpb_map_ctx *mctx, int pos,
285                                      int len, __be64 *ppn_buf)
286 {
287         struct page *page;
288         int index, offset;
289         int copied;
290
291         index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
292         offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
293
294         if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
295                 copied = len;
296         else
297                 copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
298
299         page = mctx->m_page[index];
300         if (unlikely(!page)) {
301                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
302                         "error. cannot find page in mctx\n");
303                 return -ENOMEM;
304         }
305
306         memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
307                copied * HPB_ENTRY_SIZE);
308
309         return copied;
310 }
311
312 static void
313 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
314                         int *srgn_idx, int *offset)
315 {
316         int rgn_offset;
317
318         *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
319         rgn_offset = lpn & hpb->entries_per_rgn_mask;
320         *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
321         *offset = rgn_offset & hpb->entries_per_srgn_mask;
322 }
323
324 static void
325 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
326                             __be64 ppn, u8 transfer_len)
327 {
328         unsigned char *cdb = lrbp->cmd->cmnd;
329         __be64 ppn_tmp = ppn;
330         cdb[0] = UFSHPB_READ;
331
332         if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
333                 ppn_tmp = (__force __be64)swab64((__force u64)ppn);
334
335         /* ppn value is stored as big-endian in the host memory */
336         memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
337         cdb[14] = transfer_len;
338         cdb[15] = 0;
339
340         lrbp->cmd->cmd_len = UFS_CDB_SIZE;
341 }
342
343 /*
344  * This function will set up HPB read command using host-side L2P map data.
345  */
346 int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
347 {
348         struct ufshpb_lu *hpb;
349         struct ufshpb_region *rgn;
350         struct ufshpb_subregion *srgn;
351         struct scsi_cmnd *cmd = lrbp->cmd;
352         u32 lpn;
353         __be64 ppn;
354         unsigned long flags;
355         int transfer_len, rgn_idx, srgn_idx, srgn_offset;
356         int err = 0;
357
358         hpb = ufshpb_get_hpb_data(cmd->device);
359         if (!hpb)
360                 return -ENODEV;
361
362         if (ufshpb_get_state(hpb) == HPB_INIT)
363                 return -ENODEV;
364
365         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
366                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
367                            "%s: ufshpb state is not PRESENT", __func__);
368                 return -ENODEV;
369         }
370
371         if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
372             (!ufshpb_is_write_or_discard(cmd) &&
373              !ufshpb_is_read_cmd(cmd)))
374                 return 0;
375
376         transfer_len = sectors_to_logical(cmd->device,
377                                           blk_rq_sectors(scsi_cmd_to_rq(cmd)));
378         if (unlikely(!transfer_len))
379                 return 0;
380
381         lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
382         ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
383         rgn = hpb->rgn_tbl + rgn_idx;
384         srgn = rgn->srgn_tbl + srgn_idx;
385
386         /* If command type is WRITE or DISCARD, set bitmap as drity */
387         if (ufshpb_is_write_or_discard(cmd)) {
388                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
389                                    transfer_len, true);
390                 return 0;
391         }
392
393         if (!ufshpb_is_supported_chunk(hpb, transfer_len))
394                 return 0;
395
396         if (hpb->is_hcm) {
397                 /*
398                  * in host control mode, reads are the main source for
399                  * activation trials.
400                  */
401                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
402                                    transfer_len, false);
403
404                 /* keep those counters normalized */
405                 if (rgn->reads > hpb->entries_per_srgn)
406                         schedule_work(&hpb->ufshpb_normalization_work);
407         }
408
409         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
410         if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
411                                    transfer_len)) {
412                 hpb->stats.miss_cnt++;
413                 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
414                 return 0;
415         }
416
417         err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
418         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
419         if (unlikely(err < 0)) {
420                 /*
421                  * In this case, the region state is active,
422                  * but the ppn table is not allocated.
423                  * Make sure that ppn table must be allocated on
424                  * active state.
425                  */
426                 dev_err(hba->dev, "get ppn failed. err %d\n", err);
427                 return err;
428         }
429
430         ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
431
432         hpb->stats.hit_cnt++;
433         return 0;
434 }
435
436 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
437                                          int rgn_idx, enum req_opf dir,
438                                          bool atomic)
439 {
440         struct ufshpb_req *rq;
441         struct request *req;
442         int retries = HPB_MAP_REQ_RETRIES;
443
444         rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
445         if (!rq)
446                 return NULL;
447
448 retry:
449         req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir,
450                               BLK_MQ_REQ_NOWAIT);
451
452         if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
453                 usleep_range(3000, 3100);
454                 goto retry;
455         }
456
457         if (IS_ERR(req))
458                 goto free_rq;
459
460         rq->hpb = hpb;
461         rq->req = req;
462         rq->rb.rgn_idx = rgn_idx;
463
464         return rq;
465
466 free_rq:
467         kmem_cache_free(hpb->map_req_cache, rq);
468         return NULL;
469 }
470
471 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
472 {
473         blk_mq_free_request(rq->req);
474         kmem_cache_free(hpb->map_req_cache, rq);
475 }
476
477 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
478                                              struct ufshpb_subregion *srgn)
479 {
480         struct ufshpb_req *map_req;
481         struct bio *bio;
482         unsigned long flags;
483
484         if (hpb->is_hcm &&
485             hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
486                 dev_info(&hpb->sdev_ufs_lu->sdev_dev,
487                          "map_req throttle. inflight %d throttle %d",
488                          hpb->num_inflight_map_req,
489                          hpb->params.inflight_map_req);
490                 return NULL;
491         }
492
493         map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
494         if (!map_req)
495                 return NULL;
496
497         bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL);
498         if (!bio) {
499                 ufshpb_put_req(hpb, map_req);
500                 return NULL;
501         }
502
503         map_req->bio = bio;
504
505         map_req->rb.srgn_idx = srgn->srgn_idx;
506         map_req->rb.mctx = srgn->mctx;
507
508         spin_lock_irqsave(&hpb->param_lock, flags);
509         hpb->num_inflight_map_req++;
510         spin_unlock_irqrestore(&hpb->param_lock, flags);
511
512         return map_req;
513 }
514
515 static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
516                                struct ufshpb_req *map_req)
517 {
518         unsigned long flags;
519
520         bio_put(map_req->bio);
521         ufshpb_put_req(hpb, map_req);
522
523         spin_lock_irqsave(&hpb->param_lock, flags);
524         hpb->num_inflight_map_req--;
525         spin_unlock_irqrestore(&hpb->param_lock, flags);
526 }
527
528 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
529                                      struct ufshpb_subregion *srgn)
530 {
531         struct ufshpb_region *rgn;
532         u32 num_entries = hpb->entries_per_srgn;
533
534         if (!srgn->mctx) {
535                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
536                         "no mctx in region %d subregion %d.\n",
537                         srgn->rgn_idx, srgn->srgn_idx);
538                 return -1;
539         }
540
541         if (unlikely(srgn->is_last))
542                 num_entries = hpb->last_srgn_entries;
543
544         bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
545
546         rgn = hpb->rgn_tbl + srgn->rgn_idx;
547         clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
548
549         return 0;
550 }
551
552 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
553                                       int srgn_idx)
554 {
555         struct ufshpb_region *rgn;
556         struct ufshpb_subregion *srgn;
557
558         rgn = hpb->rgn_tbl + rgn_idx;
559         srgn = rgn->srgn_tbl + srgn_idx;
560
561         list_del_init(&rgn->list_inact_rgn);
562
563         if (list_empty(&srgn->list_act_srgn))
564                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
565
566         hpb->stats.rcmd_active_cnt++;
567 }
568
569 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
570 {
571         struct ufshpb_region *rgn;
572         struct ufshpb_subregion *srgn;
573         int srgn_idx;
574
575         rgn = hpb->rgn_tbl + rgn_idx;
576
577         for_each_sub_region(rgn, srgn_idx, srgn)
578                 list_del_init(&srgn->list_act_srgn);
579
580         if (list_empty(&rgn->list_inact_rgn))
581                 list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
582
583         hpb->stats.rcmd_inactive_cnt++;
584 }
585
586 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
587                                       struct ufshpb_subregion *srgn)
588 {
589         struct ufshpb_region *rgn;
590
591         /*
592          * If there is no mctx in subregion
593          * after I/O progress for HPB_READ_BUFFER, the region to which the
594          * subregion belongs was evicted.
595          * Make sure the region must not evict in I/O progress
596          */
597         if (!srgn->mctx) {
598                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
599                         "no mctx in region %d subregion %d.\n",
600                         srgn->rgn_idx, srgn->srgn_idx);
601                 srgn->srgn_state = HPB_SRGN_INVALID;
602                 return;
603         }
604
605         rgn = hpb->rgn_tbl + srgn->rgn_idx;
606
607         if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
608                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
609                         "region %d subregion %d evicted\n",
610                         srgn->rgn_idx, srgn->srgn_idx);
611                 srgn->srgn_state = HPB_SRGN_INVALID;
612                 return;
613         }
614         srgn->srgn_state = HPB_SRGN_VALID;
615 }
616
617 static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
618 {
619         struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
620
621         ufshpb_put_req(umap_req->hpb, umap_req);
622 }
623
624 static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
625 {
626         struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
627         struct ufshpb_lu *hpb = map_req->hpb;
628         struct ufshpb_subregion *srgn;
629         unsigned long flags;
630
631         srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
632                 map_req->rb.srgn_idx;
633
634         ufshpb_clear_dirty_bitmap(hpb, srgn);
635         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
636         ufshpb_activate_subregion(hpb, srgn);
637         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
638
639         ufshpb_put_map_req(map_req->hpb, map_req);
640 }
641
642 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
643 {
644         cdb[0] = UFSHPB_WRITE_BUFFER;
645         cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
646                           UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
647         if (rgn)
648                 put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
649         cdb[9] = 0x00;
650 }
651
652 static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
653                                     int srgn_idx, int srgn_mem_size)
654 {
655         cdb[0] = UFSHPB_READ_BUFFER;
656         cdb[1] = UFSHPB_READ_BUFFER_ID;
657
658         put_unaligned_be16(rgn_idx, &cdb[2]);
659         put_unaligned_be16(srgn_idx, &cdb[4]);
660         put_unaligned_be24(srgn_mem_size, &cdb[6]);
661
662         cdb[9] = 0x00;
663 }
664
665 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
666                                    struct ufshpb_req *umap_req,
667                                    struct ufshpb_region *rgn)
668 {
669         struct request *req = umap_req->req;
670         struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
671
672         req->timeout = 0;
673         req->end_io_data = umap_req;
674         req->end_io = ufshpb_umap_req_compl_fn;
675
676         ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
677         scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
678
679         blk_execute_rq_nowait(req, true);
680
681         hpb->stats.umap_req_cnt++;
682 }
683
684 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
685                                   struct ufshpb_req *map_req, bool last)
686 {
687         struct request_queue *q;
688         struct request *req;
689         struct scsi_cmnd *scmd;
690         int mem_size = hpb->srgn_mem_size;
691         int ret = 0;
692         int i;
693
694         q = hpb->sdev_ufs_lu->request_queue;
695         for (i = 0; i < hpb->pages_per_srgn; i++) {
696                 ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
697                                       PAGE_SIZE, 0);
698                 if (ret != PAGE_SIZE) {
699                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
700                                    "bio_add_pc_page fail %d - %d\n",
701                                    map_req->rb.rgn_idx, map_req->rb.srgn_idx);
702                         return ret;
703                 }
704         }
705
706         req = map_req->req;
707
708         blk_rq_append_bio(req, map_req->bio);
709
710         req->end_io_data = map_req;
711         req->end_io = ufshpb_map_req_compl_fn;
712
713         if (unlikely(last))
714                 mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
715
716         scmd = blk_mq_rq_to_pdu(req);
717         ufshpb_set_read_buf_cmd(scmd->cmnd, map_req->rb.rgn_idx,
718                                 map_req->rb.srgn_idx, mem_size);
719         scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
720
721         blk_execute_rq_nowait(req, true);
722
723         hpb->stats.map_req_cnt++;
724         return 0;
725 }
726
727 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
728                                                  bool last)
729 {
730         struct ufshpb_map_ctx *mctx;
731         u32 num_entries = hpb->entries_per_srgn;
732         int i, j;
733
734         mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
735         if (!mctx)
736                 return NULL;
737
738         mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
739         if (!mctx->m_page)
740                 goto release_mctx;
741
742         if (unlikely(last))
743                 num_entries = hpb->last_srgn_entries;
744
745         mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
746         if (!mctx->ppn_dirty)
747                 goto release_m_page;
748
749         for (i = 0; i < hpb->pages_per_srgn; i++) {
750                 mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
751                 if (!mctx->m_page[i]) {
752                         for (j = 0; j < i; j++)
753                                 mempool_free(mctx->m_page[j], ufshpb_page_pool);
754                         goto release_ppn_dirty;
755                 }
756                 clear_page(page_address(mctx->m_page[i]));
757         }
758
759         return mctx;
760
761 release_ppn_dirty:
762         bitmap_free(mctx->ppn_dirty);
763 release_m_page:
764         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
765 release_mctx:
766         mempool_free(mctx, ufshpb_mctx_pool);
767         return NULL;
768 }
769
770 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
771                                struct ufshpb_map_ctx *mctx)
772 {
773         int i;
774
775         for (i = 0; i < hpb->pages_per_srgn; i++)
776                 mempool_free(mctx->m_page[i], ufshpb_page_pool);
777
778         bitmap_free(mctx->ppn_dirty);
779         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
780         mempool_free(mctx, ufshpb_mctx_pool);
781 }
782
783 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
784                                           struct ufshpb_region *rgn)
785 {
786         struct ufshpb_subregion *srgn;
787         int srgn_idx;
788
789         for_each_sub_region(rgn, srgn_idx, srgn)
790                 if (srgn->srgn_state == HPB_SRGN_ISSUED)
791                         return -EPERM;
792
793         return 0;
794 }
795
796 static void ufshpb_read_to_handler(struct work_struct *work)
797 {
798         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
799                                              ufshpb_read_to_work.work);
800         struct victim_select_info *lru_info = &hpb->lru_info;
801         struct ufshpb_region *rgn, *next_rgn;
802         unsigned long flags;
803         unsigned int poll;
804         LIST_HEAD(expired_list);
805
806         if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
807                 return;
808
809         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
810
811         list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
812                                  list_lru_rgn) {
813                 bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
814
815                 if (timedout) {
816                         rgn->read_timeout_expiries--;
817                         if (is_rgn_dirty(rgn) ||
818                             rgn->read_timeout_expiries == 0)
819                                 list_add(&rgn->list_expired_rgn, &expired_list);
820                         else
821                                 rgn->read_timeout = ktime_add_ms(ktime_get(),
822                                                 hpb->params.read_timeout_ms);
823                 }
824         }
825
826         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
827
828         list_for_each_entry_safe(rgn, next_rgn, &expired_list,
829                                  list_expired_rgn) {
830                 list_del_init(&rgn->list_expired_rgn);
831                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
832                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
833                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
834         }
835
836         ufshpb_kick_map_work(hpb);
837
838         clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
839
840         poll = hpb->params.timeout_polling_interval_ms;
841         schedule_delayed_work(&hpb->ufshpb_read_to_work,
842                               msecs_to_jiffies(poll));
843 }
844
845 static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
846                                 struct ufshpb_region *rgn)
847 {
848         rgn->rgn_state = HPB_RGN_ACTIVE;
849         list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
850         atomic_inc(&lru_info->active_cnt);
851         if (rgn->hpb->is_hcm) {
852                 rgn->read_timeout =
853                         ktime_add_ms(ktime_get(),
854                                      rgn->hpb->params.read_timeout_ms);
855                 rgn->read_timeout_expiries =
856                         rgn->hpb->params.read_timeout_expiries;
857         }
858 }
859
860 static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
861                                 struct ufshpb_region *rgn)
862 {
863         list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
864 }
865
866 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
867 {
868         struct victim_select_info *lru_info = &hpb->lru_info;
869         struct ufshpb_region *rgn, *victim_rgn = NULL;
870
871         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
872                 if (ufshpb_check_srgns_issue_state(hpb, rgn))
873                         continue;
874
875                 /*
876                  * in host control mode, verify that the exiting region
877                  * has fewer reads
878                  */
879                 if (hpb->is_hcm &&
880                     rgn->reads > hpb->params.eviction_thld_exit)
881                         continue;
882
883                 victim_rgn = rgn;
884                 break;
885         }
886
887         if (!victim_rgn)
888                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
889                         "%s: no region allocated\n",
890                         __func__);
891
892         return victim_rgn;
893 }
894
895 static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
896                                     struct ufshpb_region *rgn)
897 {
898         list_del_init(&rgn->list_lru_rgn);
899         rgn->rgn_state = HPB_RGN_INACTIVE;
900         atomic_dec(&lru_info->active_cnt);
901 }
902
903 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
904                                           struct ufshpb_subregion *srgn)
905 {
906         if (srgn->srgn_state != HPB_SRGN_UNUSED) {
907                 ufshpb_put_map_ctx(hpb, srgn->mctx);
908                 srgn->srgn_state = HPB_SRGN_UNUSED;
909                 srgn->mctx = NULL;
910         }
911 }
912
913 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
914                                  struct ufshpb_region *rgn,
915                                  bool atomic)
916 {
917         struct ufshpb_req *umap_req;
918         int rgn_idx = rgn ? rgn->rgn_idx : 0;
919
920         umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
921         if (!umap_req)
922                 return -ENOMEM;
923
924         ufshpb_execute_umap_req(hpb, umap_req, rgn);
925
926         return 0;
927 }
928
929 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
930                                         struct ufshpb_region *rgn)
931 {
932         return ufshpb_issue_umap_req(hpb, rgn, true);
933 }
934
935 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
936                                  struct ufshpb_region *rgn)
937 {
938         struct victim_select_info *lru_info;
939         struct ufshpb_subregion *srgn;
940         int srgn_idx;
941
942         lru_info = &hpb->lru_info;
943
944         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
945
946         ufshpb_cleanup_lru_info(lru_info, rgn);
947
948         for_each_sub_region(rgn, srgn_idx, srgn)
949                 ufshpb_purge_active_subregion(hpb, srgn);
950 }
951
952 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
953 {
954         unsigned long flags;
955         int ret = 0;
956
957         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
958         if (rgn->rgn_state == HPB_RGN_PINNED) {
959                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
960                          "pinned region cannot drop-out. region %d\n",
961                          rgn->rgn_idx);
962                 goto out;
963         }
964
965         if (!list_empty(&rgn->list_lru_rgn)) {
966                 if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
967                         ret = -EBUSY;
968                         goto out;
969                 }
970
971                 if (hpb->is_hcm) {
972                         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
973                         ret = ufshpb_issue_umap_single_req(hpb, rgn);
974                         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
975                         if (ret)
976                                 goto out;
977                 }
978
979                 __ufshpb_evict_region(hpb, rgn);
980         }
981 out:
982         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
983         return ret;
984 }
985
986 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
987                                 struct ufshpb_region *rgn,
988                                 struct ufshpb_subregion *srgn)
989 {
990         struct ufshpb_req *map_req;
991         unsigned long flags;
992         int ret;
993         int err = -EAGAIN;
994         bool alloc_required = false;
995         enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
996
997         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
998
999         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1000                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1001                            "%s: ufshpb state is not PRESENT\n", __func__);
1002                 goto unlock_out;
1003         }
1004
1005         if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1006             (srgn->srgn_state == HPB_SRGN_INVALID)) {
1007                 err = 0;
1008                 goto unlock_out;
1009         }
1010
1011         if (srgn->srgn_state == HPB_SRGN_UNUSED)
1012                 alloc_required = true;
1013
1014         /*
1015          * If the subregion is already ISSUED state,
1016          * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1017          * the device and HPB response for map loading is received.
1018          * In this case, after finishing the HPB_READ_BUFFER,
1019          * the next HPB_READ_BUFFER is performed again to obtain the latest
1020          * map data.
1021          */
1022         if (srgn->srgn_state == HPB_SRGN_ISSUED)
1023                 goto unlock_out;
1024
1025         srgn->srgn_state = HPB_SRGN_ISSUED;
1026         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1027
1028         if (alloc_required) {
1029                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1030                 if (!srgn->mctx) {
1031                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1032                             "get map_ctx failed. region %d - %d\n",
1033                             rgn->rgn_idx, srgn->srgn_idx);
1034                         state = HPB_SRGN_UNUSED;
1035                         goto change_srgn_state;
1036                 }
1037         }
1038
1039         map_req = ufshpb_get_map_req(hpb, srgn);
1040         if (!map_req)
1041                 goto change_srgn_state;
1042
1043
1044         ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1045         if (ret) {
1046                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1047                            "%s: issue map_req failed: %d, region %d - %d\n",
1048                            __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1049                 goto free_map_req;
1050         }
1051         return 0;
1052
1053 free_map_req:
1054         ufshpb_put_map_req(hpb, map_req);
1055 change_srgn_state:
1056         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1057         srgn->srgn_state = state;
1058 unlock_out:
1059         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1060         return err;
1061 }
1062
1063 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1064 {
1065         struct ufshpb_region *victim_rgn = NULL;
1066         struct victim_select_info *lru_info = &hpb->lru_info;
1067         unsigned long flags;
1068         int ret = 0;
1069
1070         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1071         /*
1072          * If region belongs to lru_list, just move the region
1073          * to the front of lru list because the state of the region
1074          * is already active-state.
1075          */
1076         if (!list_empty(&rgn->list_lru_rgn)) {
1077                 ufshpb_hit_lru_info(lru_info, rgn);
1078                 goto out;
1079         }
1080
1081         if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1082                 if (atomic_read(&lru_info->active_cnt) ==
1083                     lru_info->max_lru_active_cnt) {
1084                         /*
1085                          * If the maximum number of active regions
1086                          * is exceeded, evict the least recently used region.
1087                          * This case may occur when the device responds
1088                          * to the eviction information late.
1089                          * It is okay to evict the least recently used region,
1090                          * because the device could detect this region
1091                          * by not issuing HPB_READ
1092                          *
1093                          * in host control mode, verify that the entering
1094                          * region has enough reads
1095                          */
1096                         if (hpb->is_hcm &&
1097                             rgn->reads < hpb->params.eviction_thld_enter) {
1098                                 ret = -EACCES;
1099                                 goto out;
1100                         }
1101
1102                         victim_rgn = ufshpb_victim_lru_info(hpb);
1103                         if (!victim_rgn) {
1104                                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1105                                     "cannot get victim region %s\n",
1106                                     hpb->is_hcm ? "" : "error");
1107                                 ret = -ENOMEM;
1108                                 goto out;
1109                         }
1110
1111                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1112                                 "LRU full (%d), choose victim %d\n",
1113                                 atomic_read(&lru_info->active_cnt),
1114                                 victim_rgn->rgn_idx);
1115
1116                         if (hpb->is_hcm) {
1117                                 spin_unlock_irqrestore(&hpb->rgn_state_lock,
1118                                                        flags);
1119                                 ret = ufshpb_issue_umap_single_req(hpb,
1120                                                                 victim_rgn);
1121                                 spin_lock_irqsave(&hpb->rgn_state_lock,
1122                                                   flags);
1123                                 if (ret)
1124                                         goto out;
1125                         }
1126
1127                         __ufshpb_evict_region(hpb, victim_rgn);
1128                 }
1129
1130                 /*
1131                  * When a region is added to lru_info list_head,
1132                  * it is guaranteed that the subregion has been
1133                  * assigned all mctx. If failed, try to receive mctx again
1134                  * without being added to lru_info list_head
1135                  */
1136                 ufshpb_add_lru_info(lru_info, rgn);
1137         }
1138 out:
1139         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1140         return ret;
1141 }
1142 /**
1143  *ufshpb_submit_region_inactive() - submit a region to be inactivated later
1144  *@hpb: per-LU HPB instance
1145  *@region_index: the index associated with the region that will be inactivated later
1146  */
1147 static void ufshpb_submit_region_inactive(struct ufshpb_lu *hpb, int region_index)
1148 {
1149         int subregion_index;
1150         struct ufshpb_region *rgn;
1151         struct ufshpb_subregion *srgn;
1152
1153         /*
1154          * Remove this region from active region list and add it to inactive list
1155          */
1156         spin_lock(&hpb->rsp_list_lock);
1157         ufshpb_update_inactive_info(hpb, region_index);
1158         spin_unlock(&hpb->rsp_list_lock);
1159
1160         rgn = hpb->rgn_tbl + region_index;
1161
1162         /*
1163          * Set subregion state to be HPB_SRGN_INVALID, there will no HPB read on this subregion
1164          */
1165         spin_lock(&hpb->rgn_state_lock);
1166         if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1167                 for (subregion_index = 0; subregion_index < rgn->srgn_cnt; subregion_index++) {
1168                         srgn = rgn->srgn_tbl + subregion_index;
1169                         if (srgn->srgn_state == HPB_SRGN_VALID)
1170                                 srgn->srgn_state = HPB_SRGN_INVALID;
1171                 }
1172         }
1173         spin_unlock(&hpb->rgn_state_lock);
1174 }
1175
1176 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1177                                          struct utp_hpb_rsp *rsp_field)
1178 {
1179         struct ufshpb_region *rgn;
1180         struct ufshpb_subregion *srgn;
1181         int i, rgn_i, srgn_i;
1182
1183         BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1184         /*
1185          * If the active region and the inactive region are the same,
1186          * we will inactivate this region.
1187          * The device could check this (region inactivated) and
1188          * will response the proper active region information
1189          */
1190         for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1191                 rgn_i =
1192                         be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1193                 srgn_i =
1194                         be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1195
1196                 rgn = hpb->rgn_tbl + rgn_i;
1197                 if (hpb->is_hcm &&
1198                     (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1199                         /*
1200                          * in host control mode, subregion activation
1201                          * recommendations are only allowed to active regions.
1202                          * Also, ignore recommendations for dirty regions - the
1203                          * host will make decisions concerning those by himself
1204                          */
1205                         continue;
1206                 }
1207
1208                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1209                         "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1210
1211                 spin_lock(&hpb->rsp_list_lock);
1212                 ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1213                 spin_unlock(&hpb->rsp_list_lock);
1214
1215                 srgn = rgn->srgn_tbl + srgn_i;
1216
1217                 /* blocking HPB_READ */
1218                 spin_lock(&hpb->rgn_state_lock);
1219                 if (srgn->srgn_state == HPB_SRGN_VALID)
1220                         srgn->srgn_state = HPB_SRGN_INVALID;
1221                 spin_unlock(&hpb->rgn_state_lock);
1222         }
1223
1224         if (hpb->is_hcm) {
1225                 /*
1226                  * in host control mode the device is not allowed to inactivate
1227                  * regions
1228                  */
1229                 goto out;
1230         }
1231
1232         for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1233                 rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1234                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "inactivate(%d) region %d\n", i, rgn_i);
1235                 ufshpb_submit_region_inactive(hpb, rgn_i);
1236         }
1237
1238 out:
1239         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1240                 rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1241
1242         if (ufshpb_get_state(hpb) == HPB_PRESENT)
1243                 queue_work(ufshpb_wq, &hpb->map_work);
1244 }
1245
1246 /*
1247  * Set the flags of all active regions to RGN_FLAG_UPDATE to let host side reload L2P entries later
1248  */
1249 static void ufshpb_set_regions_update(struct ufshpb_lu *hpb)
1250 {
1251         struct victim_select_info *lru_info = &hpb->lru_info;
1252         struct ufshpb_region *rgn;
1253         unsigned long flags;
1254
1255         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1256
1257         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1258                 set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1259
1260         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1261 }
1262
1263 static void ufshpb_dev_reset_handler(struct ufs_hba *hba)
1264 {
1265         struct scsi_device *sdev;
1266         struct ufshpb_lu *hpb;
1267
1268         __shost_for_each_device(sdev, hba->host) {
1269                 hpb = ufshpb_get_hpb_data(sdev);
1270                 if (!hpb)
1271                         continue;
1272
1273                 if (hpb->is_hcm) {
1274                         /*
1275                          * For the HPB host control mode, in case device powered up and lost HPB
1276                          * information, we will set the region flag to be RGN_FLAG_UPDATE, it will
1277                          * let host reload its L2P entries(reactivate region in the UFS device).
1278                          */
1279                         ufshpb_set_regions_update(hpb);
1280                 } else {
1281                         /*
1282                          * For the HPB device control mode, if host side receives 02h:HPB Operation
1283                          * in UPIU response, which means device recommends the host side should
1284                          * inactivate all active regions. Here we add all active regions to inactive
1285                          * list, they will be inactivated later in ufshpb_map_work_handler().
1286                          */
1287                         struct victim_select_info *lru_info = &hpb->lru_info;
1288                         struct ufshpb_region *rgn;
1289
1290                         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1291                                 ufshpb_submit_region_inactive(hpb, rgn->rgn_idx);
1292
1293                         if (ufshpb_get_state(hpb) == HPB_PRESENT)
1294                                 queue_work(ufshpb_wq, &hpb->map_work);
1295                 }
1296         }
1297 }
1298
1299 /*
1300  * This function will parse recommended active subregion information in sense
1301  * data field of response UPIU with SAM_STAT_GOOD state.
1302  */
1303 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1304 {
1305         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1306         struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1307         int data_seg_len;
1308
1309         data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1310                 & MASK_RSP_UPIU_DATA_SEG_LEN;
1311
1312         /* If data segment length is zero, rsp_field is not valid */
1313         if (!data_seg_len)
1314                 return;
1315
1316         if (unlikely(lrbp->lun != rsp_field->lun)) {
1317                 struct scsi_device *sdev;
1318                 bool found = false;
1319
1320                 __shost_for_each_device(sdev, hba->host) {
1321                         hpb = ufshpb_get_hpb_data(sdev);
1322
1323                         if (!hpb)
1324                                 continue;
1325
1326                         if (rsp_field->lun == hpb->lun) {
1327                                 found = true;
1328                                 break;
1329                         }
1330                 }
1331
1332                 if (!found)
1333                         return;
1334         }
1335
1336         if (!hpb)
1337                 return;
1338
1339         if (ufshpb_get_state(hpb) == HPB_INIT)
1340                 return;
1341
1342         if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1343             (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1344                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1345                            "%s: ufshpb state is not PRESENT/SUSPEND\n",
1346                            __func__);
1347                 return;
1348         }
1349
1350         BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1351
1352         if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1353                 return;
1354
1355         hpb->stats.rcmd_noti_cnt++;
1356
1357         switch (rsp_field->hpb_op) {
1358         case HPB_RSP_REQ_REGION_UPDATE:
1359                 if (data_seg_len != DEV_DATA_SEG_LEN)
1360                         dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1361                                  "%s: data seg length is not same.\n",
1362                                  __func__);
1363                 ufshpb_rsp_req_region_update(hpb, rsp_field);
1364                 break;
1365         case HPB_RSP_DEV_RESET:
1366                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1367                          "UFS device lost HPB information during PM.\n");
1368                 ufshpb_dev_reset_handler(hba);
1369
1370                 break;
1371         default:
1372                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1373                            "hpb_op is not available: %d\n",
1374                            rsp_field->hpb_op);
1375                 break;
1376         }
1377 }
1378
1379 static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1380                                    struct ufshpb_region *rgn,
1381                                    struct ufshpb_subregion *srgn)
1382 {
1383         if (!list_empty(&rgn->list_inact_rgn))
1384                 return;
1385
1386         if (!list_empty(&srgn->list_act_srgn)) {
1387                 list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1388                 return;
1389         }
1390
1391         list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1392 }
1393
1394 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1395                                           struct ufshpb_region *rgn,
1396                                           struct list_head *pending_list)
1397 {
1398         struct ufshpb_subregion *srgn;
1399         int srgn_idx;
1400
1401         if (!list_empty(&rgn->list_inact_rgn))
1402                 return;
1403
1404         for_each_sub_region(rgn, srgn_idx, srgn)
1405                 if (!list_empty(&srgn->list_act_srgn))
1406                         return;
1407
1408         list_add_tail(&rgn->list_inact_rgn, pending_list);
1409 }
1410
1411 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1412 {
1413         struct ufshpb_region *rgn;
1414         struct ufshpb_subregion *srgn;
1415         unsigned long flags;
1416         int ret = 0;
1417
1418         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1419         while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1420                                                 struct ufshpb_subregion,
1421                                                 list_act_srgn))) {
1422                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1423                         break;
1424
1425                 list_del_init(&srgn->list_act_srgn);
1426                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1427
1428                 rgn = hpb->rgn_tbl + srgn->rgn_idx;
1429                 ret = ufshpb_add_region(hpb, rgn);
1430                 if (ret)
1431                         goto active_failed;
1432
1433                 ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1434                 if (ret) {
1435                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1436                             "issue map_req failed. ret %d, region %d - %d\n",
1437                             ret, rgn->rgn_idx, srgn->srgn_idx);
1438                         goto active_failed;
1439                 }
1440                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1441         }
1442         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1443         return;
1444
1445 active_failed:
1446         dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1447                    rgn->rgn_idx, srgn->srgn_idx);
1448         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1449         ufshpb_add_active_list(hpb, rgn, srgn);
1450         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1451 }
1452
1453 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1454 {
1455         struct ufshpb_region *rgn;
1456         unsigned long flags;
1457         int ret;
1458         LIST_HEAD(pending_list);
1459
1460         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1461         while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1462                                                struct ufshpb_region,
1463                                                list_inact_rgn))) {
1464                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1465                         break;
1466
1467                 list_del_init(&rgn->list_inact_rgn);
1468                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1469
1470                 ret = ufshpb_evict_region(hpb, rgn);
1471                 if (ret) {
1472                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1473                         ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1474                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1475                 }
1476
1477                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1478         }
1479
1480         list_splice(&pending_list, &hpb->lh_inact_rgn);
1481         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1482 }
1483
1484 static void ufshpb_normalization_work_handler(struct work_struct *work)
1485 {
1486         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1487                                              ufshpb_normalization_work);
1488         int rgn_idx;
1489         u8 factor = hpb->params.normalization_factor;
1490
1491         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1492                 struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1493                 int srgn_idx;
1494
1495                 spin_lock(&rgn->rgn_lock);
1496                 rgn->reads = 0;
1497                 for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1498                         struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1499
1500                         srgn->reads >>= factor;
1501                         rgn->reads += srgn->reads;
1502                 }
1503                 spin_unlock(&rgn->rgn_lock);
1504
1505                 if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1506                         continue;
1507
1508                 /* if region is active but has no reads - inactivate it */
1509                 spin_lock(&hpb->rsp_list_lock);
1510                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1511                 spin_unlock(&hpb->rsp_list_lock);
1512         }
1513 }
1514
1515 static void ufshpb_map_work_handler(struct work_struct *work)
1516 {
1517         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1518
1519         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1520                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1521                            "%s: ufshpb state is not PRESENT\n", __func__);
1522                 return;
1523         }
1524
1525         ufshpb_run_inactive_region_list(hpb);
1526         ufshpb_run_active_subregion_list(hpb);
1527 }
1528
1529 /*
1530  * this function doesn't need to hold lock due to be called in init.
1531  * (rgn_state_lock, rsp_list_lock, etc..)
1532  */
1533 static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1534                                             struct ufshpb_lu *hpb,
1535                                             struct ufshpb_region *rgn)
1536 {
1537         struct ufshpb_subregion *srgn;
1538         int srgn_idx, i;
1539         int err = 0;
1540
1541         for_each_sub_region(rgn, srgn_idx, srgn) {
1542                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1543                 srgn->srgn_state = HPB_SRGN_INVALID;
1544                 if (!srgn->mctx) {
1545                         err = -ENOMEM;
1546                         dev_err(hba->dev,
1547                                 "alloc mctx for pinned region failed\n");
1548                         goto release;
1549                 }
1550
1551                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1552         }
1553
1554         rgn->rgn_state = HPB_RGN_PINNED;
1555         return 0;
1556
1557 release:
1558         for (i = 0; i < srgn_idx; i++) {
1559                 srgn = rgn->srgn_tbl + i;
1560                 ufshpb_put_map_ctx(hpb, srgn->mctx);
1561         }
1562         return err;
1563 }
1564
1565 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1566                                       struct ufshpb_region *rgn, bool last)
1567 {
1568         int srgn_idx;
1569         struct ufshpb_subregion *srgn;
1570
1571         for_each_sub_region(rgn, srgn_idx, srgn) {
1572                 INIT_LIST_HEAD(&srgn->list_act_srgn);
1573
1574                 srgn->rgn_idx = rgn->rgn_idx;
1575                 srgn->srgn_idx = srgn_idx;
1576                 srgn->srgn_state = HPB_SRGN_UNUSED;
1577         }
1578
1579         if (unlikely(last && hpb->last_srgn_entries))
1580                 srgn->is_last = true;
1581 }
1582
1583 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1584                                       struct ufshpb_region *rgn, int srgn_cnt)
1585 {
1586         rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1587                                  GFP_KERNEL);
1588         if (!rgn->srgn_tbl)
1589                 return -ENOMEM;
1590
1591         rgn->srgn_cnt = srgn_cnt;
1592         return 0;
1593 }
1594
1595 static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1596                                      struct ufshpb_lu *hpb,
1597                                      struct ufshpb_dev_info *hpb_dev_info,
1598                                      struct ufshpb_lu_info *hpb_lu_info)
1599 {
1600         u32 entries_per_rgn;
1601         u64 rgn_mem_size, tmp;
1602
1603         if (ufshpb_is_legacy(hba))
1604                 hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1605         else
1606                 hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
1607
1608         hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1609         hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1610                 (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1611                 : PINNED_NOT_SET;
1612         hpb->lru_info.max_lru_active_cnt =
1613                 hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1614
1615         rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1616                         * HPB_ENTRY_SIZE;
1617         do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1618         hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1619                 * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1620
1621         tmp = rgn_mem_size;
1622         do_div(tmp, HPB_ENTRY_SIZE);
1623         entries_per_rgn = (u32)tmp;
1624         hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1625         hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1626
1627         hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1628         hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1629         hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1630
1631         tmp = rgn_mem_size;
1632         do_div(tmp, hpb->srgn_mem_size);
1633         hpb->srgns_per_rgn = (int)tmp;
1634
1635         hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1636                                 entries_per_rgn);
1637         hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1638                                 (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1639         hpb->last_srgn_entries = hpb_lu_info->num_blocks
1640                                  % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1641
1642         hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1643
1644         if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1645                 hpb->is_hcm = true;
1646 }
1647
1648 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1649 {
1650         struct ufshpb_region *rgn_table, *rgn;
1651         int rgn_idx, i;
1652         int ret = 0;
1653
1654         rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1655                             GFP_KERNEL);
1656         if (!rgn_table)
1657                 return -ENOMEM;
1658
1659         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1660                 int srgn_cnt = hpb->srgns_per_rgn;
1661                 bool last_srgn = false;
1662
1663                 rgn = rgn_table + rgn_idx;
1664                 rgn->rgn_idx = rgn_idx;
1665
1666                 spin_lock_init(&rgn->rgn_lock);
1667
1668                 INIT_LIST_HEAD(&rgn->list_inact_rgn);
1669                 INIT_LIST_HEAD(&rgn->list_lru_rgn);
1670                 INIT_LIST_HEAD(&rgn->list_expired_rgn);
1671
1672                 if (rgn_idx == hpb->rgns_per_lu - 1) {
1673                         srgn_cnt = ((hpb->srgns_per_lu - 1) %
1674                                     hpb->srgns_per_rgn) + 1;
1675                         last_srgn = true;
1676                 }
1677
1678                 ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1679                 if (ret)
1680                         goto release_srgn_table;
1681                 ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1682
1683                 if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1684                         ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1685                         if (ret)
1686                                 goto release_srgn_table;
1687                 } else {
1688                         rgn->rgn_state = HPB_RGN_INACTIVE;
1689                 }
1690
1691                 rgn->rgn_flags = 0;
1692                 rgn->hpb = hpb;
1693         }
1694
1695         hpb->rgn_tbl = rgn_table;
1696
1697         return 0;
1698
1699 release_srgn_table:
1700         for (i = 0; i <= rgn_idx; i++)
1701                 kvfree(rgn_table[i].srgn_tbl);
1702
1703         kvfree(rgn_table);
1704         return ret;
1705 }
1706
1707 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1708                                          struct ufshpb_region *rgn)
1709 {
1710         int srgn_idx;
1711         struct ufshpb_subregion *srgn;
1712
1713         for_each_sub_region(rgn, srgn_idx, srgn)
1714                 if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1715                         srgn->srgn_state = HPB_SRGN_UNUSED;
1716                         ufshpb_put_map_ctx(hpb, srgn->mctx);
1717                 }
1718 }
1719
1720 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1721 {
1722         int rgn_idx;
1723
1724         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1725                 struct ufshpb_region *rgn;
1726
1727                 rgn = hpb->rgn_tbl + rgn_idx;
1728                 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1729                         rgn->rgn_state = HPB_RGN_INACTIVE;
1730
1731                         ufshpb_destroy_subregion_tbl(hpb, rgn);
1732                 }
1733
1734                 kvfree(rgn->srgn_tbl);
1735         }
1736
1737         kvfree(hpb->rgn_tbl);
1738 }
1739
1740 /* SYSFS functions */
1741 #define ufshpb_sysfs_attr_show_func(__name)                             \
1742 static ssize_t __name##_show(struct device *dev,                        \
1743         struct device_attribute *attr, char *buf)                       \
1744 {                                                                       \
1745         struct scsi_device *sdev = to_scsi_device(dev);                 \
1746         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1747                                                                         \
1748         if (!hpb)                                                       \
1749                 return -ENODEV;                                         \
1750                                                                         \
1751         return sysfs_emit(buf, "%llu\n", hpb->stats.__name);            \
1752 }                                                                       \
1753 \
1754 static DEVICE_ATTR_RO(__name)
1755
1756 ufshpb_sysfs_attr_show_func(hit_cnt);
1757 ufshpb_sysfs_attr_show_func(miss_cnt);
1758 ufshpb_sysfs_attr_show_func(rcmd_noti_cnt);
1759 ufshpb_sysfs_attr_show_func(rcmd_active_cnt);
1760 ufshpb_sysfs_attr_show_func(rcmd_inactive_cnt);
1761 ufshpb_sysfs_attr_show_func(map_req_cnt);
1762 ufshpb_sysfs_attr_show_func(umap_req_cnt);
1763
1764 static struct attribute *hpb_dev_stat_attrs[] = {
1765         &dev_attr_hit_cnt.attr,
1766         &dev_attr_miss_cnt.attr,
1767         &dev_attr_rcmd_noti_cnt.attr,
1768         &dev_attr_rcmd_active_cnt.attr,
1769         &dev_attr_rcmd_inactive_cnt.attr,
1770         &dev_attr_map_req_cnt.attr,
1771         &dev_attr_umap_req_cnt.attr,
1772         NULL,
1773 };
1774
1775 struct attribute_group ufs_sysfs_hpb_stat_group = {
1776         .name = "hpb_stats",
1777         .attrs = hpb_dev_stat_attrs,
1778 };
1779
1780 /* SYSFS functions */
1781 #define ufshpb_sysfs_param_show_func(__name)                            \
1782 static ssize_t __name##_show(struct device *dev,                        \
1783         struct device_attribute *attr, char *buf)                       \
1784 {                                                                       \
1785         struct scsi_device *sdev = to_scsi_device(dev);                 \
1786         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1787                                                                         \
1788         if (!hpb)                                                       \
1789                 return -ENODEV;                                         \
1790                                                                         \
1791         return sysfs_emit(buf, "%d\n", hpb->params.__name);             \
1792 }
1793
1794 ufshpb_sysfs_param_show_func(requeue_timeout_ms);
1795 static ssize_t
1796 requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1797                          const char *buf, size_t count)
1798 {
1799         struct scsi_device *sdev = to_scsi_device(dev);
1800         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1801         int val;
1802
1803         if (!hpb)
1804                 return -ENODEV;
1805
1806         if (kstrtouint(buf, 0, &val))
1807                 return -EINVAL;
1808
1809         if (val < 0)
1810                 return -EINVAL;
1811
1812         hpb->params.requeue_timeout_ms = val;
1813
1814         return count;
1815 }
1816 static DEVICE_ATTR_RW(requeue_timeout_ms);
1817
1818 ufshpb_sysfs_param_show_func(activation_thld);
1819 static ssize_t
1820 activation_thld_store(struct device *dev, struct device_attribute *attr,
1821                       const char *buf, size_t count)
1822 {
1823         struct scsi_device *sdev = to_scsi_device(dev);
1824         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1825         int val;
1826
1827         if (!hpb)
1828                 return -ENODEV;
1829
1830         if (!hpb->is_hcm)
1831                 return -EOPNOTSUPP;
1832
1833         if (kstrtouint(buf, 0, &val))
1834                 return -EINVAL;
1835
1836         if (val <= 0)
1837                 return -EINVAL;
1838
1839         hpb->params.activation_thld = val;
1840
1841         return count;
1842 }
1843 static DEVICE_ATTR_RW(activation_thld);
1844
1845 ufshpb_sysfs_param_show_func(normalization_factor);
1846 static ssize_t
1847 normalization_factor_store(struct device *dev, struct device_attribute *attr,
1848                            const char *buf, size_t count)
1849 {
1850         struct scsi_device *sdev = to_scsi_device(dev);
1851         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1852         int val;
1853
1854         if (!hpb)
1855                 return -ENODEV;
1856
1857         if (!hpb->is_hcm)
1858                 return -EOPNOTSUPP;
1859
1860         if (kstrtouint(buf, 0, &val))
1861                 return -EINVAL;
1862
1863         if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
1864                 return -EINVAL;
1865
1866         hpb->params.normalization_factor = val;
1867
1868         return count;
1869 }
1870 static DEVICE_ATTR_RW(normalization_factor);
1871
1872 ufshpb_sysfs_param_show_func(eviction_thld_enter);
1873 static ssize_t
1874 eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
1875                           const char *buf, size_t count)
1876 {
1877         struct scsi_device *sdev = to_scsi_device(dev);
1878         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1879         int val;
1880
1881         if (!hpb)
1882                 return -ENODEV;
1883
1884         if (!hpb->is_hcm)
1885                 return -EOPNOTSUPP;
1886
1887         if (kstrtouint(buf, 0, &val))
1888                 return -EINVAL;
1889
1890         if (val <= hpb->params.eviction_thld_exit)
1891                 return -EINVAL;
1892
1893         hpb->params.eviction_thld_enter = val;
1894
1895         return count;
1896 }
1897 static DEVICE_ATTR_RW(eviction_thld_enter);
1898
1899 ufshpb_sysfs_param_show_func(eviction_thld_exit);
1900 static ssize_t
1901 eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
1902                          const char *buf, size_t count)
1903 {
1904         struct scsi_device *sdev = to_scsi_device(dev);
1905         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1906         int val;
1907
1908         if (!hpb)
1909                 return -ENODEV;
1910
1911         if (!hpb->is_hcm)
1912                 return -EOPNOTSUPP;
1913
1914         if (kstrtouint(buf, 0, &val))
1915                 return -EINVAL;
1916
1917         if (val <= hpb->params.activation_thld)
1918                 return -EINVAL;
1919
1920         hpb->params.eviction_thld_exit = val;
1921
1922         return count;
1923 }
1924 static DEVICE_ATTR_RW(eviction_thld_exit);
1925
1926 ufshpb_sysfs_param_show_func(read_timeout_ms);
1927 static ssize_t
1928 read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1929                       const char *buf, size_t count)
1930 {
1931         struct scsi_device *sdev = to_scsi_device(dev);
1932         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1933         int val;
1934
1935         if (!hpb)
1936                 return -ENODEV;
1937
1938         if (!hpb->is_hcm)
1939                 return -EOPNOTSUPP;
1940
1941         if (kstrtouint(buf, 0, &val))
1942                 return -EINVAL;
1943
1944         /* read_timeout >> timeout_polling_interval */
1945         if (val < hpb->params.timeout_polling_interval_ms * 2)
1946                 return -EINVAL;
1947
1948         hpb->params.read_timeout_ms = val;
1949
1950         return count;
1951 }
1952 static DEVICE_ATTR_RW(read_timeout_ms);
1953
1954 ufshpb_sysfs_param_show_func(read_timeout_expiries);
1955 static ssize_t
1956 read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
1957                             const char *buf, size_t count)
1958 {
1959         struct scsi_device *sdev = to_scsi_device(dev);
1960         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1961         int val;
1962
1963         if (!hpb)
1964                 return -ENODEV;
1965
1966         if (!hpb->is_hcm)
1967                 return -EOPNOTSUPP;
1968
1969         if (kstrtouint(buf, 0, &val))
1970                 return -EINVAL;
1971
1972         if (val <= 0)
1973                 return -EINVAL;
1974
1975         hpb->params.read_timeout_expiries = val;
1976
1977         return count;
1978 }
1979 static DEVICE_ATTR_RW(read_timeout_expiries);
1980
1981 ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
1982 static ssize_t
1983 timeout_polling_interval_ms_store(struct device *dev,
1984                                   struct device_attribute *attr,
1985                                   const char *buf, size_t count)
1986 {
1987         struct scsi_device *sdev = to_scsi_device(dev);
1988         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1989         int val;
1990
1991         if (!hpb)
1992                 return -ENODEV;
1993
1994         if (!hpb->is_hcm)
1995                 return -EOPNOTSUPP;
1996
1997         if (kstrtouint(buf, 0, &val))
1998                 return -EINVAL;
1999
2000         /* timeout_polling_interval << read_timeout */
2001         if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
2002                 return -EINVAL;
2003
2004         hpb->params.timeout_polling_interval_ms = val;
2005
2006         return count;
2007 }
2008 static DEVICE_ATTR_RW(timeout_polling_interval_ms);
2009
2010 ufshpb_sysfs_param_show_func(inflight_map_req);
2011 static ssize_t inflight_map_req_store(struct device *dev,
2012                                       struct device_attribute *attr,
2013                                       const char *buf, size_t count)
2014 {
2015         struct scsi_device *sdev = to_scsi_device(dev);
2016         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2017         int val;
2018
2019         if (!hpb)
2020                 return -ENODEV;
2021
2022         if (!hpb->is_hcm)
2023                 return -EOPNOTSUPP;
2024
2025         if (kstrtouint(buf, 0, &val))
2026                 return -EINVAL;
2027
2028         if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
2029                 return -EINVAL;
2030
2031         hpb->params.inflight_map_req = val;
2032
2033         return count;
2034 }
2035 static DEVICE_ATTR_RW(inflight_map_req);
2036
2037 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
2038 {
2039         hpb->params.activation_thld = ACTIVATION_THRESHOLD;
2040         hpb->params.normalization_factor = 1;
2041         hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2042         hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2043         hpb->params.read_timeout_ms = READ_TO_MS;
2044         hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2045         hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2046         hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2047 }
2048
2049 static struct attribute *hpb_dev_param_attrs[] = {
2050         &dev_attr_requeue_timeout_ms.attr,
2051         &dev_attr_activation_thld.attr,
2052         &dev_attr_normalization_factor.attr,
2053         &dev_attr_eviction_thld_enter.attr,
2054         &dev_attr_eviction_thld_exit.attr,
2055         &dev_attr_read_timeout_ms.attr,
2056         &dev_attr_read_timeout_expiries.attr,
2057         &dev_attr_timeout_polling_interval_ms.attr,
2058         &dev_attr_inflight_map_req.attr,
2059         NULL,
2060 };
2061
2062 struct attribute_group ufs_sysfs_hpb_param_group = {
2063         .name = "hpb_params",
2064         .attrs = hpb_dev_param_attrs,
2065 };
2066
2067 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2068 {
2069         struct ufshpb_req *pre_req = NULL, *t;
2070         int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2071         int i;
2072
2073         INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2074
2075         hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2076         hpb->throttle_pre_req = qd;
2077         hpb->num_inflight_pre_req = 0;
2078
2079         if (!hpb->pre_req)
2080                 goto release_mem;
2081
2082         for (i = 0; i < qd; i++) {
2083                 pre_req = hpb->pre_req + i;
2084                 INIT_LIST_HEAD(&pre_req->list_req);
2085                 pre_req->req = NULL;
2086
2087                 pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
2088                 if (!pre_req->bio)
2089                         goto release_mem;
2090
2091                 pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2092                 if (!pre_req->wb.m_page) {
2093                         bio_put(pre_req->bio);
2094                         goto release_mem;
2095                 }
2096
2097                 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2098         }
2099
2100         return 0;
2101 release_mem:
2102         list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2103                 list_del_init(&pre_req->list_req);
2104                 bio_put(pre_req->bio);
2105                 __free_page(pre_req->wb.m_page);
2106         }
2107
2108         kfree(hpb->pre_req);
2109         return -ENOMEM;
2110 }
2111
2112 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2113 {
2114         struct ufshpb_req *pre_req = NULL;
2115         int i;
2116
2117         for (i = 0; i < hpb->throttle_pre_req; i++) {
2118                 pre_req = hpb->pre_req + i;
2119                 bio_put(hpb->pre_req[i].bio);
2120                 if (!pre_req->wb.m_page)
2121                         __free_page(hpb->pre_req[i].wb.m_page);
2122                 list_del_init(&pre_req->list_req);
2123         }
2124
2125         kfree(hpb->pre_req);
2126 }
2127
2128 static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2129 {
2130         hpb->stats.hit_cnt = 0;
2131         hpb->stats.miss_cnt = 0;
2132         hpb->stats.rcmd_noti_cnt = 0;
2133         hpb->stats.rcmd_active_cnt = 0;
2134         hpb->stats.rcmd_inactive_cnt = 0;
2135         hpb->stats.map_req_cnt = 0;
2136         hpb->stats.umap_req_cnt = 0;
2137 }
2138
2139 static void ufshpb_param_init(struct ufshpb_lu *hpb)
2140 {
2141         hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2142         if (hpb->is_hcm)
2143                 ufshpb_hcm_param_init(hpb);
2144 }
2145
2146 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2147 {
2148         int ret;
2149
2150         spin_lock_init(&hpb->rgn_state_lock);
2151         spin_lock_init(&hpb->rsp_list_lock);
2152         spin_lock_init(&hpb->param_lock);
2153
2154         INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2155         INIT_LIST_HEAD(&hpb->lh_act_srgn);
2156         INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2157         INIT_LIST_HEAD(&hpb->list_hpb_lu);
2158
2159         INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2160         if (hpb->is_hcm) {
2161                 INIT_WORK(&hpb->ufshpb_normalization_work,
2162                           ufshpb_normalization_work_handler);
2163                 INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2164                                   ufshpb_read_to_handler);
2165         }
2166
2167         hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2168                           sizeof(struct ufshpb_req), 0, 0, NULL);
2169         if (!hpb->map_req_cache) {
2170                 dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2171                         hpb->lun);
2172                 return -ENOMEM;
2173         }
2174
2175         hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2176                           sizeof(struct page *) * hpb->pages_per_srgn,
2177                           0, 0, NULL);
2178         if (!hpb->m_page_cache) {
2179                 dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2180                         hpb->lun);
2181                 ret = -ENOMEM;
2182                 goto release_req_cache;
2183         }
2184
2185         ret = ufshpb_pre_req_mempool_init(hpb);
2186         if (ret) {
2187                 dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2188                         hpb->lun);
2189                 goto release_m_page_cache;
2190         }
2191
2192         ret = ufshpb_alloc_region_tbl(hba, hpb);
2193         if (ret)
2194                 goto release_pre_req_mempool;
2195
2196         ufshpb_stat_init(hpb);
2197         ufshpb_param_init(hpb);
2198
2199         if (hpb->is_hcm) {
2200                 unsigned int poll;
2201
2202                 poll = hpb->params.timeout_polling_interval_ms;
2203                 schedule_delayed_work(&hpb->ufshpb_read_to_work,
2204                                       msecs_to_jiffies(poll));
2205         }
2206
2207         return 0;
2208
2209 release_pre_req_mempool:
2210         ufshpb_pre_req_mempool_destroy(hpb);
2211 release_m_page_cache:
2212         kmem_cache_destroy(hpb->m_page_cache);
2213 release_req_cache:
2214         kmem_cache_destroy(hpb->map_req_cache);
2215         return ret;
2216 }
2217
2218 static struct ufshpb_lu *
2219 ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2220                     struct ufshpb_dev_info *hpb_dev_info,
2221                     struct ufshpb_lu_info *hpb_lu_info)
2222 {
2223         struct ufshpb_lu *hpb;
2224         int ret;
2225
2226         hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2227         if (!hpb)
2228                 return NULL;
2229
2230         hpb->lun = sdev->lun;
2231         hpb->sdev_ufs_lu = sdev;
2232
2233         ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2234
2235         ret = ufshpb_lu_hpb_init(hba, hpb);
2236         if (ret) {
2237                 dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2238                 goto release_hpb;
2239         }
2240
2241         sdev->hostdata = hpb;
2242         return hpb;
2243
2244 release_hpb:
2245         kfree(hpb);
2246         return NULL;
2247 }
2248
2249 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2250 {
2251         struct ufshpb_region *rgn, *next_rgn;
2252         struct ufshpb_subregion *srgn, *next_srgn;
2253         unsigned long flags;
2254
2255         /*
2256          * If the device reset occurred, the remaining HPB region information
2257          * may be stale. Therefore, by discarding the lists of HPB response
2258          * that remained after reset, we prevent unnecessary work.
2259          */
2260         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2261         list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2262                                  list_inact_rgn)
2263                 list_del_init(&rgn->list_inact_rgn);
2264
2265         list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2266                                  list_act_srgn)
2267                 list_del_init(&srgn->list_act_srgn);
2268         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2269 }
2270
2271 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2272 {
2273         if (hpb->is_hcm) {
2274                 cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2275                 cancel_work_sync(&hpb->ufshpb_normalization_work);
2276         }
2277         cancel_work_sync(&hpb->map_work);
2278 }
2279
2280 static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2281 {
2282         int err = 0;
2283         bool flag_res = true;
2284         int try;
2285
2286         /* wait for the device to complete HPB reset query */
2287         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2288                 dev_dbg(hba->dev,
2289                         "%s start flag reset polling %d times\n",
2290                         __func__, try);
2291
2292                 /* Poll fHpbReset flag to be cleared */
2293                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2294                                 QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2295
2296                 if (err) {
2297                         dev_err(hba->dev,
2298                                 "%s reading fHpbReset flag failed with error %d\n",
2299                                 __func__, err);
2300                         return flag_res;
2301                 }
2302
2303                 if (!flag_res)
2304                         goto out;
2305
2306                 usleep_range(1000, 1100);
2307         }
2308         if (flag_res) {
2309                 dev_err(hba->dev,
2310                         "%s fHpbReset was not cleared by the device\n",
2311                         __func__);
2312         }
2313 out:
2314         return flag_res;
2315 }
2316
2317 /**
2318  * ufshpb_toggle_state - switch HPB state of all LUs
2319  * @hba: per-adapter instance
2320  * @src: expected current HPB state
2321  * @dest: target HPB state to switch to
2322  */
2323 void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest)
2324 {
2325         struct ufshpb_lu *hpb;
2326         struct scsi_device *sdev;
2327
2328         shost_for_each_device(sdev, hba->host) {
2329                 hpb = ufshpb_get_hpb_data(sdev);
2330
2331                 if (!hpb || ufshpb_get_state(hpb) != src)
2332                         continue;
2333                 ufshpb_set_state(hpb, dest);
2334
2335                 if (dest == HPB_RESET) {
2336                         ufshpb_cancel_jobs(hpb);
2337                         ufshpb_discard_rsp_lists(hpb);
2338                 }
2339         }
2340 }
2341
2342 void ufshpb_suspend(struct ufs_hba *hba)
2343 {
2344         struct ufshpb_lu *hpb;
2345         struct scsi_device *sdev;
2346
2347         shost_for_each_device(sdev, hba->host) {
2348                 hpb = ufshpb_get_hpb_data(sdev);
2349                 if (!hpb || ufshpb_get_state(hpb) != HPB_PRESENT)
2350                         continue;
2351
2352                 ufshpb_set_state(hpb, HPB_SUSPEND);
2353                 ufshpb_cancel_jobs(hpb);
2354         }
2355 }
2356
2357 void ufshpb_resume(struct ufs_hba *hba)
2358 {
2359         struct ufshpb_lu *hpb;
2360         struct scsi_device *sdev;
2361
2362         shost_for_each_device(sdev, hba->host) {
2363                 hpb = ufshpb_get_hpb_data(sdev);
2364                 if (!hpb || ufshpb_get_state(hpb) != HPB_SUSPEND)
2365                         continue;
2366
2367                 ufshpb_set_state(hpb, HPB_PRESENT);
2368                 ufshpb_kick_map_work(hpb);
2369                 if (hpb->is_hcm) {
2370                         unsigned int poll = hpb->params.timeout_polling_interval_ms;
2371
2372                         schedule_delayed_work(&hpb->ufshpb_read_to_work, msecs_to_jiffies(poll));
2373                 }
2374         }
2375 }
2376
2377 static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2378                               struct ufshpb_lu_info *hpb_lu_info)
2379 {
2380         u16 max_active_rgns;
2381         u8 lu_enable;
2382         int size;
2383         int ret;
2384         char desc_buf[QUERY_DESC_MAX_SIZE];
2385
2386         ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2387
2388         ufshcd_rpm_get_sync(hba);
2389         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2390                                             QUERY_DESC_IDN_UNIT, lun, 0,
2391                                             desc_buf, &size);
2392         ufshcd_rpm_put_sync(hba);
2393
2394         if (ret) {
2395                 dev_err(hba->dev,
2396                         "%s: idn: %d lun: %d  query request failed",
2397                         __func__, QUERY_DESC_IDN_UNIT, lun);
2398                 return ret;
2399         }
2400
2401         lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2402         if (lu_enable != LU_ENABLED_HPB_FUNC)
2403                 return -ENODEV;
2404
2405         max_active_rgns = get_unaligned_be16(
2406                         desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2407         if (!max_active_rgns) {
2408                 dev_err(hba->dev,
2409                         "lun %d wrong number of max active regions\n", lun);
2410                 return -ENODEV;
2411         }
2412
2413         hpb_lu_info->num_blocks = get_unaligned_be64(
2414                         desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2415         hpb_lu_info->pinned_start = get_unaligned_be16(
2416                         desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2417         hpb_lu_info->num_pinned = get_unaligned_be16(
2418                         desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2419         hpb_lu_info->max_active_rgns = max_active_rgns;
2420
2421         return 0;
2422 }
2423
2424 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2425 {
2426         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2427
2428         if (!hpb)
2429                 return;
2430
2431         ufshpb_set_state(hpb, HPB_FAILED);
2432
2433         sdev = hpb->sdev_ufs_lu;
2434         sdev->hostdata = NULL;
2435
2436         ufshpb_cancel_jobs(hpb);
2437
2438         ufshpb_pre_req_mempool_destroy(hpb);
2439         ufshpb_destroy_region_tbl(hpb);
2440
2441         kmem_cache_destroy(hpb->map_req_cache);
2442         kmem_cache_destroy(hpb->m_page_cache);
2443
2444         list_del_init(&hpb->list_hpb_lu);
2445
2446         kfree(hpb);
2447 }
2448
2449 static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2450 {
2451         int pool_size;
2452         struct ufshpb_lu *hpb;
2453         struct scsi_device *sdev;
2454         bool init_success;
2455
2456         if (tot_active_srgn_pages == 0) {
2457                 ufshpb_remove(hba);
2458                 return;
2459         }
2460
2461         init_success = !ufshpb_check_hpb_reset_query(hba);
2462
2463         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2464         if (pool_size > tot_active_srgn_pages) {
2465                 mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2466                 mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2467         }
2468
2469         shost_for_each_device(sdev, hba->host) {
2470                 hpb = ufshpb_get_hpb_data(sdev);
2471                 if (!hpb)
2472                         continue;
2473
2474                 if (init_success) {
2475                         ufshpb_set_state(hpb, HPB_PRESENT);
2476                         if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2477                                 queue_work(ufshpb_wq, &hpb->map_work);
2478                 } else {
2479                         dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2480                         ufshpb_destroy_lu(hba, sdev);
2481                 }
2482         }
2483
2484         if (!init_success)
2485                 ufshpb_remove(hba);
2486 }
2487
2488 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2489 {
2490         struct ufshpb_lu *hpb;
2491         int ret;
2492         struct ufshpb_lu_info hpb_lu_info = { 0 };
2493         int lun = sdev->lun;
2494
2495         if (lun >= hba->dev_info.max_lu_supported)
2496                 goto out;
2497
2498         ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2499         if (ret)
2500                 goto out;
2501
2502         hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
2503                                   &hpb_lu_info);
2504         if (!hpb)
2505                 goto out;
2506
2507         tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2508                         hpb->srgns_per_rgn * hpb->pages_per_srgn;
2509
2510 out:
2511         /* All LUs are initialized */
2512         if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
2513                 ufshpb_hpb_lu_prepared(hba);
2514 }
2515
2516 static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2517 {
2518         int ret;
2519         unsigned int pool_size;
2520
2521         ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2522                                         sizeof(struct ufshpb_map_ctx),
2523                                         0, 0, NULL);
2524         if (!ufshpb_mctx_cache) {
2525                 dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2526                 return -ENOMEM;
2527         }
2528
2529         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2530         dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2531                __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2532
2533         ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2534                                                     ufshpb_mctx_cache);
2535         if (!ufshpb_mctx_pool) {
2536                 dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2537                 ret = -ENOMEM;
2538                 goto release_mctx_cache;
2539         }
2540
2541         ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2542         if (!ufshpb_page_pool) {
2543                 dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2544                 ret = -ENOMEM;
2545                 goto release_mctx_pool;
2546         }
2547
2548         ufshpb_wq = alloc_workqueue("ufshpb-wq",
2549                                         WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2550         if (!ufshpb_wq) {
2551                 dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2552                 ret = -ENOMEM;
2553                 goto release_page_pool;
2554         }
2555
2556         return 0;
2557
2558 release_page_pool:
2559         mempool_destroy(ufshpb_page_pool);
2560 release_mctx_pool:
2561         mempool_destroy(ufshpb_mctx_pool);
2562 release_mctx_cache:
2563         kmem_cache_destroy(ufshpb_mctx_cache);
2564         return ret;
2565 }
2566
2567 void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2568 {
2569         struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
2570         int max_active_rgns = 0;
2571         int hpb_num_lu;
2572
2573         hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2574         if (hpb_num_lu == 0) {
2575                 dev_err(hba->dev, "No HPB LU supported\n");
2576                 hpb_info->hpb_disabled = true;
2577                 return;
2578         }
2579
2580         hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2581         hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2582         max_active_rgns = get_unaligned_be16(geo_buf +
2583                           GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2584
2585         if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2586             max_active_rgns == 0) {
2587                 dev_err(hba->dev, "No HPB supported device\n");
2588                 hpb_info->hpb_disabled = true;
2589                 return;
2590         }
2591 }
2592
2593 void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2594 {
2595         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2596         int version, ret;
2597         int max_single_cmd;
2598
2599         hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2600
2601         version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
2602         if ((version != HPB_SUPPORT_VERSION) &&
2603             (version != HPB_SUPPORT_LEGACY_VERSION)) {
2604                 dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2605                         __func__, version);
2606                 hpb_dev_info->hpb_disabled = true;
2607                 return;
2608         }
2609
2610         if (version == HPB_SUPPORT_LEGACY_VERSION)
2611                 hpb_dev_info->is_legacy = true;
2612
2613         /*
2614          * Get the number of user logical unit to check whether all
2615          * scsi_device finish initialization
2616          */
2617         hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2618
2619         if (hpb_dev_info->is_legacy)
2620                 return;
2621
2622         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2623                 QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
2624
2625         if (ret)
2626                 hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
2627         else
2628                 hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
2629 }
2630
2631 void ufshpb_init(struct ufs_hba *hba)
2632 {
2633         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2634         int try;
2635         int ret;
2636
2637         if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2638                 return;
2639
2640         if (ufshpb_init_mem_wq(hba)) {
2641                 hpb_dev_info->hpb_disabled = true;
2642                 return;
2643         }
2644
2645         atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2646         tot_active_srgn_pages = 0;
2647         /* issue HPB reset query */
2648         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2649                 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2650                                         QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2651                 if (!ret)
2652                         break;
2653         }
2654 }
2655
2656 void ufshpb_remove(struct ufs_hba *hba)
2657 {
2658         mempool_destroy(ufshpb_page_pool);
2659         mempool_destroy(ufshpb_mctx_pool);
2660         kmem_cache_destroy(ufshpb_mctx_cache);
2661
2662         destroy_workqueue(ufshpb_wq);
2663 }
2664
2665 module_param(ufshpb_host_map_kbytes, uint, 0644);
2666 MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2667         "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");