Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[platform/kernel/linux-starfive.git] / drivers / scsi / ufs / ufshpb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Universal Flash Storage Host Performance Booster
4  *
5  * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
6  *
7  * Authors:
8  *      Yongmyung Lee <ymhungry.lee@samsung.com>
9  *      Jinyoung Choi <j-young.choi@samsung.com>
10  */
11
12 #include <asm/unaligned.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/module.h>
16 #include <scsi/scsi_cmnd.h>
17
18 #include "ufshcd-priv.h"
19 #include "ufshpb.h"
20 #include "../sd.h"
21
22 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
23 #define READ_TO_MS 1000
24 #define READ_TO_EXPIRIES 100
25 #define POLLING_INTERVAL_MS 200
26 #define THROTTLE_MAP_REQ_DEFAULT 1
27
28 /* memory management */
29 static struct kmem_cache *ufshpb_mctx_cache;
30 static mempool_t *ufshpb_mctx_pool;
31 static mempool_t *ufshpb_page_pool;
32 /* A cache size of 2MB can cache ppn in the 1GB range. */
33 static unsigned int ufshpb_host_map_kbytes = 2048;
34 static int tot_active_srgn_pages;
35
36 static struct workqueue_struct *ufshpb_wq;
37
38 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
39                                       int srgn_idx);
40
41 bool ufshpb_is_allowed(struct ufs_hba *hba)
42 {
43         return !(hba->ufshpb_dev.hpb_disabled);
44 }
45
46 /* HPB version 1.0 is called as legacy version. */
47 bool ufshpb_is_legacy(struct ufs_hba *hba)
48 {
49         return hba->ufshpb_dev.is_legacy;
50 }
51
52 static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
53 {
54         return sdev->hostdata;
55 }
56
57 static int ufshpb_get_state(struct ufshpb_lu *hpb)
58 {
59         return atomic_read(&hpb->hpb_state);
60 }
61
62 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
63 {
64         atomic_set(&hpb->hpb_state, state);
65 }
66
67 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
68                                 struct ufshpb_subregion *srgn)
69 {
70         return rgn->rgn_state != HPB_RGN_INACTIVE &&
71                 srgn->srgn_state == HPB_SRGN_VALID;
72 }
73
74 static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
75 {
76         return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
77 }
78
79 static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
80 {
81         return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
82                op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
83 }
84
85 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
86 {
87         return transfer_len <= hpb->pre_req_max_tr_len;
88 }
89
90 static bool ufshpb_is_general_lun(int lun)
91 {
92         return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
93 }
94
95 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
96 {
97         return hpb->lu_pinned_end != PINNED_NOT_SET &&
98                rgn_idx >= hpb->lu_pinned_start && rgn_idx <= hpb->lu_pinned_end;
99 }
100
101 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
102 {
103         bool ret = false;
104         unsigned long flags;
105
106         if (ufshpb_get_state(hpb) != HPB_PRESENT)
107                 return;
108
109         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
110         if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
111                 ret = true;
112         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
113
114         if (ret)
115                 queue_work(ufshpb_wq, &hpb->map_work);
116 }
117
118 static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
119                                     struct ufshcd_lrb *lrbp,
120                                     struct utp_hpb_rsp *rsp_field)
121 {
122         /* Check HPB_UPDATE_ALERT */
123         if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
124               UPIU_HEADER_DWORD(0, 2, 0, 0)))
125                 return false;
126
127         if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
128             rsp_field->desc_type != DEV_DES_TYPE ||
129             rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
130             rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
131             rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
132             rsp_field->hpb_op == HPB_RSP_NONE ||
133             (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
134              !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
135                 return false;
136
137         if (!ufshpb_is_general_lun(rsp_field->lun)) {
138                 dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
139                          lrbp->lun);
140                 return false;
141         }
142
143         return true;
144 }
145
146 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
147                                int srgn_offset, int cnt, bool set_dirty)
148 {
149         struct ufshpb_region *rgn;
150         struct ufshpb_subregion *srgn, *prev_srgn = NULL;
151         int set_bit_len;
152         int bitmap_len;
153         unsigned long flags;
154
155 next_srgn:
156         rgn = hpb->rgn_tbl + rgn_idx;
157         srgn = rgn->srgn_tbl + srgn_idx;
158
159         if (likely(!srgn->is_last))
160                 bitmap_len = hpb->entries_per_srgn;
161         else
162                 bitmap_len = hpb->last_srgn_entries;
163
164         if ((srgn_offset + cnt) > bitmap_len)
165                 set_bit_len = bitmap_len - srgn_offset;
166         else
167                 set_bit_len = cnt;
168
169         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
170         if (rgn->rgn_state != HPB_RGN_INACTIVE) {
171                 if (set_dirty) {
172                         if (srgn->srgn_state == HPB_SRGN_VALID)
173                                 bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
174                                            set_bit_len);
175                 } else if (hpb->is_hcm) {
176                          /* rewind the read timer for lru regions */
177                         rgn->read_timeout = ktime_add_ms(ktime_get(),
178                                         rgn->hpb->params.read_timeout_ms);
179                         rgn->read_timeout_expiries =
180                                 rgn->hpb->params.read_timeout_expiries;
181                 }
182         }
183         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
184
185         if (hpb->is_hcm && prev_srgn != srgn) {
186                 bool activate = false;
187
188                 spin_lock(&rgn->rgn_lock);
189                 if (set_dirty) {
190                         rgn->reads -= srgn->reads;
191                         srgn->reads = 0;
192                         set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
193                 } else {
194                         srgn->reads++;
195                         rgn->reads++;
196                         if (srgn->reads == hpb->params.activation_thld)
197                                 activate = true;
198                 }
199                 spin_unlock(&rgn->rgn_lock);
200
201                 if (activate ||
202                     test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
203                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
204                         ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
205                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
206                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
207                                 "activate region %d-%d\n", rgn_idx, srgn_idx);
208                 }
209
210                 prev_srgn = srgn;
211         }
212
213         srgn_offset = 0;
214         if (++srgn_idx == hpb->srgns_per_rgn) {
215                 srgn_idx = 0;
216                 rgn_idx++;
217         }
218
219         cnt -= set_bit_len;
220         if (cnt > 0)
221                 goto next_srgn;
222 }
223
224 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
225                                   int srgn_idx, int srgn_offset, int cnt)
226 {
227         struct ufshpb_region *rgn;
228         struct ufshpb_subregion *srgn;
229         int bitmap_len;
230         int bit_len;
231
232 next_srgn:
233         rgn = hpb->rgn_tbl + rgn_idx;
234         srgn = rgn->srgn_tbl + srgn_idx;
235
236         if (likely(!srgn->is_last))
237                 bitmap_len = hpb->entries_per_srgn;
238         else
239                 bitmap_len = hpb->last_srgn_entries;
240
241         if (!ufshpb_is_valid_srgn(rgn, srgn))
242                 return true;
243
244         /*
245          * If the region state is active, mctx must be allocated.
246          * In this case, check whether the region is evicted or
247          * mctx allocation fail.
248          */
249         if (unlikely(!srgn->mctx)) {
250                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
251                         "no mctx in region %d subregion %d.\n",
252                         srgn->rgn_idx, srgn->srgn_idx);
253                 return true;
254         }
255
256         if ((srgn_offset + cnt) > bitmap_len)
257                 bit_len = bitmap_len - srgn_offset;
258         else
259                 bit_len = cnt;
260
261         if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
262                           srgn_offset) < bit_len + srgn_offset)
263                 return true;
264
265         srgn_offset = 0;
266         if (++srgn_idx == hpb->srgns_per_rgn) {
267                 srgn_idx = 0;
268                 rgn_idx++;
269         }
270
271         cnt -= bit_len;
272         if (cnt > 0)
273                 goto next_srgn;
274
275         return false;
276 }
277
278 static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
279 {
280         return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
281 }
282
283 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
284                                      struct ufshpb_map_ctx *mctx, int pos,
285                                      int len, __be64 *ppn_buf)
286 {
287         struct page *page;
288         int index, offset;
289         int copied;
290
291         index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
292         offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
293
294         if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
295                 copied = len;
296         else
297                 copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
298
299         page = mctx->m_page[index];
300         if (unlikely(!page)) {
301                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
302                         "error. cannot find page in mctx\n");
303                 return -ENOMEM;
304         }
305
306         memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
307                copied * HPB_ENTRY_SIZE);
308
309         return copied;
310 }
311
312 static void
313 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
314                         int *srgn_idx, int *offset)
315 {
316         int rgn_offset;
317
318         *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
319         rgn_offset = lpn & hpb->entries_per_rgn_mask;
320         *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
321         *offset = rgn_offset & hpb->entries_per_srgn_mask;
322 }
323
324 static void
325 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
326                             __be64 ppn, u8 transfer_len)
327 {
328         unsigned char *cdb = lrbp->cmd->cmnd;
329         __be64 ppn_tmp = ppn;
330         cdb[0] = UFSHPB_READ;
331
332         if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
333                 ppn_tmp = (__force __be64)swab64((__force u64)ppn);
334
335         /* ppn value is stored as big-endian in the host memory */
336         memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
337         cdb[14] = transfer_len;
338         cdb[15] = 0;
339
340         lrbp->cmd->cmd_len = UFS_CDB_SIZE;
341 }
342
343 /*
344  * This function will set up HPB read command using host-side L2P map data.
345  */
346 int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
347 {
348         struct ufshpb_lu *hpb;
349         struct ufshpb_region *rgn;
350         struct ufshpb_subregion *srgn;
351         struct scsi_cmnd *cmd = lrbp->cmd;
352         u32 lpn;
353         __be64 ppn;
354         unsigned long flags;
355         int transfer_len, rgn_idx, srgn_idx, srgn_offset;
356         int err = 0;
357
358         hpb = ufshpb_get_hpb_data(cmd->device);
359         if (!hpb)
360                 return -ENODEV;
361
362         if (ufshpb_get_state(hpb) == HPB_INIT)
363                 return -ENODEV;
364
365         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
366                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
367                            "%s: ufshpb state is not PRESENT", __func__);
368                 return -ENODEV;
369         }
370
371         if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
372             (!ufshpb_is_write_or_discard(cmd) &&
373              !ufshpb_is_read_cmd(cmd)))
374                 return 0;
375
376         transfer_len = sectors_to_logical(cmd->device,
377                                           blk_rq_sectors(scsi_cmd_to_rq(cmd)));
378         if (unlikely(!transfer_len))
379                 return 0;
380
381         lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
382         ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
383         rgn = hpb->rgn_tbl + rgn_idx;
384         srgn = rgn->srgn_tbl + srgn_idx;
385
386         /* If command type is WRITE or DISCARD, set bitmap as drity */
387         if (ufshpb_is_write_or_discard(cmd)) {
388                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
389                                    transfer_len, true);
390                 return 0;
391         }
392
393         if (!ufshpb_is_supported_chunk(hpb, transfer_len))
394                 return 0;
395
396         if (hpb->is_hcm) {
397                 /*
398                  * in host control mode, reads are the main source for
399                  * activation trials.
400                  */
401                 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
402                                    transfer_len, false);
403
404                 /* keep those counters normalized */
405                 if (rgn->reads > hpb->entries_per_srgn)
406                         schedule_work(&hpb->ufshpb_normalization_work);
407         }
408
409         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
410         if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
411                                    transfer_len)) {
412                 hpb->stats.miss_cnt++;
413                 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
414                 return 0;
415         }
416
417         err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
418         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
419         if (unlikely(err < 0)) {
420                 /*
421                  * In this case, the region state is active,
422                  * but the ppn table is not allocated.
423                  * Make sure that ppn table must be allocated on
424                  * active state.
425                  */
426                 dev_err(hba->dev, "get ppn failed. err %d\n", err);
427                 return err;
428         }
429
430         ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
431
432         hpb->stats.hit_cnt++;
433         return 0;
434 }
435
436 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
437                                          int rgn_idx, enum req_opf dir,
438                                          bool atomic)
439 {
440         struct ufshpb_req *rq;
441         struct request *req;
442         int retries = HPB_MAP_REQ_RETRIES;
443
444         rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
445         if (!rq)
446                 return NULL;
447
448 retry:
449         req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir,
450                               BLK_MQ_REQ_NOWAIT);
451
452         if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
453                 usleep_range(3000, 3100);
454                 goto retry;
455         }
456
457         if (IS_ERR(req))
458                 goto free_rq;
459
460         rq->hpb = hpb;
461         rq->req = req;
462         rq->rb.rgn_idx = rgn_idx;
463
464         return rq;
465
466 free_rq:
467         kmem_cache_free(hpb->map_req_cache, rq);
468         return NULL;
469 }
470
471 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
472 {
473         blk_mq_free_request(rq->req);
474         kmem_cache_free(hpb->map_req_cache, rq);
475 }
476
477 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
478                                              struct ufshpb_subregion *srgn)
479 {
480         struct ufshpb_req *map_req;
481         struct bio *bio;
482         unsigned long flags;
483
484         if (hpb->is_hcm &&
485             hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
486                 dev_info(&hpb->sdev_ufs_lu->sdev_dev,
487                          "map_req throttle. inflight %d throttle %d",
488                          hpb->num_inflight_map_req,
489                          hpb->params.inflight_map_req);
490                 return NULL;
491         }
492
493         map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
494         if (!map_req)
495                 return NULL;
496
497         bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL);
498         if (!bio) {
499                 ufshpb_put_req(hpb, map_req);
500                 return NULL;
501         }
502
503         map_req->bio = bio;
504
505         map_req->rb.srgn_idx = srgn->srgn_idx;
506         map_req->rb.mctx = srgn->mctx;
507
508         spin_lock_irqsave(&hpb->param_lock, flags);
509         hpb->num_inflight_map_req++;
510         spin_unlock_irqrestore(&hpb->param_lock, flags);
511
512         return map_req;
513 }
514
515 static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
516                                struct ufshpb_req *map_req)
517 {
518         unsigned long flags;
519
520         bio_put(map_req->bio);
521         ufshpb_put_req(hpb, map_req);
522
523         spin_lock_irqsave(&hpb->param_lock, flags);
524         hpb->num_inflight_map_req--;
525         spin_unlock_irqrestore(&hpb->param_lock, flags);
526 }
527
528 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
529                                      struct ufshpb_subregion *srgn)
530 {
531         struct ufshpb_region *rgn;
532         u32 num_entries = hpb->entries_per_srgn;
533
534         if (!srgn->mctx) {
535                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
536                         "no mctx in region %d subregion %d.\n",
537                         srgn->rgn_idx, srgn->srgn_idx);
538                 return -1;
539         }
540
541         if (unlikely(srgn->is_last))
542                 num_entries = hpb->last_srgn_entries;
543
544         bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
545
546         rgn = hpb->rgn_tbl + srgn->rgn_idx;
547         clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
548
549         return 0;
550 }
551
552 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
553                                       int srgn_idx)
554 {
555         struct ufshpb_region *rgn;
556         struct ufshpb_subregion *srgn;
557
558         rgn = hpb->rgn_tbl + rgn_idx;
559         srgn = rgn->srgn_tbl + srgn_idx;
560
561         list_del_init(&rgn->list_inact_rgn);
562
563         if (list_empty(&srgn->list_act_srgn))
564                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
565
566         hpb->stats.rcmd_active_cnt++;
567 }
568
569 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
570 {
571         struct ufshpb_region *rgn;
572         struct ufshpb_subregion *srgn;
573         int srgn_idx;
574
575         rgn = hpb->rgn_tbl + rgn_idx;
576
577         for_each_sub_region(rgn, srgn_idx, srgn)
578                 list_del_init(&srgn->list_act_srgn);
579
580         if (list_empty(&rgn->list_inact_rgn))
581                 list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
582
583         hpb->stats.rcmd_inactive_cnt++;
584 }
585
586 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
587                                       struct ufshpb_subregion *srgn)
588 {
589         struct ufshpb_region *rgn;
590
591         /*
592          * If there is no mctx in subregion
593          * after I/O progress for HPB_READ_BUFFER, the region to which the
594          * subregion belongs was evicted.
595          * Make sure the region must not evict in I/O progress
596          */
597         if (!srgn->mctx) {
598                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
599                         "no mctx in region %d subregion %d.\n",
600                         srgn->rgn_idx, srgn->srgn_idx);
601                 srgn->srgn_state = HPB_SRGN_INVALID;
602                 return;
603         }
604
605         rgn = hpb->rgn_tbl + srgn->rgn_idx;
606
607         if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
608                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
609                         "region %d subregion %d evicted\n",
610                         srgn->rgn_idx, srgn->srgn_idx);
611                 srgn->srgn_state = HPB_SRGN_INVALID;
612                 return;
613         }
614         srgn->srgn_state = HPB_SRGN_VALID;
615 }
616
617 static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
618 {
619         struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
620
621         ufshpb_put_req(umap_req->hpb, umap_req);
622 }
623
624 static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
625 {
626         struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
627         struct ufshpb_lu *hpb = map_req->hpb;
628         struct ufshpb_subregion *srgn;
629         unsigned long flags;
630
631         srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
632                 map_req->rb.srgn_idx;
633
634         ufshpb_clear_dirty_bitmap(hpb, srgn);
635         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
636         ufshpb_activate_subregion(hpb, srgn);
637         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
638
639         ufshpb_put_map_req(map_req->hpb, map_req);
640 }
641
642 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
643 {
644         cdb[0] = UFSHPB_WRITE_BUFFER;
645         cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
646                           UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
647         if (rgn)
648                 put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
649         cdb[9] = 0x00;
650 }
651
652 static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
653                                     int srgn_idx, int srgn_mem_size)
654 {
655         cdb[0] = UFSHPB_READ_BUFFER;
656         cdb[1] = UFSHPB_READ_BUFFER_ID;
657
658         put_unaligned_be16(rgn_idx, &cdb[2]);
659         put_unaligned_be16(srgn_idx, &cdb[4]);
660         put_unaligned_be24(srgn_mem_size, &cdb[6]);
661
662         cdb[9] = 0x00;
663 }
664
665 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
666                                    struct ufshpb_req *umap_req,
667                                    struct ufshpb_region *rgn)
668 {
669         struct request *req = umap_req->req;
670         struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
671
672         req->timeout = 0;
673         req->end_io_data = umap_req;
674
675         ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
676         scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
677
678         blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
679
680         hpb->stats.umap_req_cnt++;
681 }
682
683 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
684                                   struct ufshpb_req *map_req, bool last)
685 {
686         struct request_queue *q;
687         struct request *req;
688         struct scsi_cmnd *scmd;
689         int mem_size = hpb->srgn_mem_size;
690         int ret = 0;
691         int i;
692
693         q = hpb->sdev_ufs_lu->request_queue;
694         for (i = 0; i < hpb->pages_per_srgn; i++) {
695                 ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
696                                       PAGE_SIZE, 0);
697                 if (ret != PAGE_SIZE) {
698                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
699                                    "bio_add_pc_page fail %d - %d\n",
700                                    map_req->rb.rgn_idx, map_req->rb.srgn_idx);
701                         return ret;
702                 }
703         }
704
705         req = map_req->req;
706
707         blk_rq_append_bio(req, map_req->bio);
708
709         req->end_io_data = map_req;
710
711         if (unlikely(last))
712                 mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
713
714         scmd = blk_mq_rq_to_pdu(req);
715         ufshpb_set_read_buf_cmd(scmd->cmnd, map_req->rb.rgn_idx,
716                                 map_req->rb.srgn_idx, mem_size);
717         scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
718
719         blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
720
721         hpb->stats.map_req_cnt++;
722         return 0;
723 }
724
725 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
726                                                  bool last)
727 {
728         struct ufshpb_map_ctx *mctx;
729         u32 num_entries = hpb->entries_per_srgn;
730         int i, j;
731
732         mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
733         if (!mctx)
734                 return NULL;
735
736         mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
737         if (!mctx->m_page)
738                 goto release_mctx;
739
740         if (unlikely(last))
741                 num_entries = hpb->last_srgn_entries;
742
743         mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
744         if (!mctx->ppn_dirty)
745                 goto release_m_page;
746
747         for (i = 0; i < hpb->pages_per_srgn; i++) {
748                 mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
749                 if (!mctx->m_page[i]) {
750                         for (j = 0; j < i; j++)
751                                 mempool_free(mctx->m_page[j], ufshpb_page_pool);
752                         goto release_ppn_dirty;
753                 }
754                 clear_page(page_address(mctx->m_page[i]));
755         }
756
757         return mctx;
758
759 release_ppn_dirty:
760         bitmap_free(mctx->ppn_dirty);
761 release_m_page:
762         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
763 release_mctx:
764         mempool_free(mctx, ufshpb_mctx_pool);
765         return NULL;
766 }
767
768 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
769                                struct ufshpb_map_ctx *mctx)
770 {
771         int i;
772
773         for (i = 0; i < hpb->pages_per_srgn; i++)
774                 mempool_free(mctx->m_page[i], ufshpb_page_pool);
775
776         bitmap_free(mctx->ppn_dirty);
777         kmem_cache_free(hpb->m_page_cache, mctx->m_page);
778         mempool_free(mctx, ufshpb_mctx_pool);
779 }
780
781 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
782                                           struct ufshpb_region *rgn)
783 {
784         struct ufshpb_subregion *srgn;
785         int srgn_idx;
786
787         for_each_sub_region(rgn, srgn_idx, srgn)
788                 if (srgn->srgn_state == HPB_SRGN_ISSUED)
789                         return -EPERM;
790
791         return 0;
792 }
793
794 static void ufshpb_read_to_handler(struct work_struct *work)
795 {
796         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
797                                              ufshpb_read_to_work.work);
798         struct victim_select_info *lru_info = &hpb->lru_info;
799         struct ufshpb_region *rgn, *next_rgn;
800         unsigned long flags;
801         unsigned int poll;
802         LIST_HEAD(expired_list);
803
804         if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
805                 return;
806
807         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
808
809         list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
810                                  list_lru_rgn) {
811                 bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
812
813                 if (timedout) {
814                         rgn->read_timeout_expiries--;
815                         if (is_rgn_dirty(rgn) ||
816                             rgn->read_timeout_expiries == 0)
817                                 list_add(&rgn->list_expired_rgn, &expired_list);
818                         else
819                                 rgn->read_timeout = ktime_add_ms(ktime_get(),
820                                                 hpb->params.read_timeout_ms);
821                 }
822         }
823
824         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
825
826         list_for_each_entry_safe(rgn, next_rgn, &expired_list,
827                                  list_expired_rgn) {
828                 list_del_init(&rgn->list_expired_rgn);
829                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
830                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
831                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
832         }
833
834         ufshpb_kick_map_work(hpb);
835
836         clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
837
838         poll = hpb->params.timeout_polling_interval_ms;
839         schedule_delayed_work(&hpb->ufshpb_read_to_work,
840                               msecs_to_jiffies(poll));
841 }
842
843 static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
844                                 struct ufshpb_region *rgn)
845 {
846         rgn->rgn_state = HPB_RGN_ACTIVE;
847         list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
848         atomic_inc(&lru_info->active_cnt);
849         if (rgn->hpb->is_hcm) {
850                 rgn->read_timeout =
851                         ktime_add_ms(ktime_get(),
852                                      rgn->hpb->params.read_timeout_ms);
853                 rgn->read_timeout_expiries =
854                         rgn->hpb->params.read_timeout_expiries;
855         }
856 }
857
858 static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
859                                 struct ufshpb_region *rgn)
860 {
861         list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
862 }
863
864 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
865 {
866         struct victim_select_info *lru_info = &hpb->lru_info;
867         struct ufshpb_region *rgn, *victim_rgn = NULL;
868
869         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
870                 if (ufshpb_check_srgns_issue_state(hpb, rgn))
871                         continue;
872
873                 /*
874                  * in host control mode, verify that the exiting region
875                  * has fewer reads
876                  */
877                 if (hpb->is_hcm &&
878                     rgn->reads > hpb->params.eviction_thld_exit)
879                         continue;
880
881                 victim_rgn = rgn;
882                 break;
883         }
884
885         if (!victim_rgn)
886                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
887                         "%s: no region allocated\n",
888                         __func__);
889
890         return victim_rgn;
891 }
892
893 static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
894                                     struct ufshpb_region *rgn)
895 {
896         list_del_init(&rgn->list_lru_rgn);
897         rgn->rgn_state = HPB_RGN_INACTIVE;
898         atomic_dec(&lru_info->active_cnt);
899 }
900
901 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
902                                           struct ufshpb_subregion *srgn)
903 {
904         if (srgn->srgn_state != HPB_SRGN_UNUSED) {
905                 ufshpb_put_map_ctx(hpb, srgn->mctx);
906                 srgn->srgn_state = HPB_SRGN_UNUSED;
907                 srgn->mctx = NULL;
908         }
909 }
910
911 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
912                                  struct ufshpb_region *rgn,
913                                  bool atomic)
914 {
915         struct ufshpb_req *umap_req;
916         int rgn_idx = rgn ? rgn->rgn_idx : 0;
917
918         umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
919         if (!umap_req)
920                 return -ENOMEM;
921
922         ufshpb_execute_umap_req(hpb, umap_req, rgn);
923
924         return 0;
925 }
926
927 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
928                                         struct ufshpb_region *rgn)
929 {
930         return ufshpb_issue_umap_req(hpb, rgn, true);
931 }
932
933 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
934                                  struct ufshpb_region *rgn)
935 {
936         struct victim_select_info *lru_info;
937         struct ufshpb_subregion *srgn;
938         int srgn_idx;
939
940         lru_info = &hpb->lru_info;
941
942         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
943
944         ufshpb_cleanup_lru_info(lru_info, rgn);
945
946         for_each_sub_region(rgn, srgn_idx, srgn)
947                 ufshpb_purge_active_subregion(hpb, srgn);
948 }
949
950 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
951 {
952         unsigned long flags;
953         int ret = 0;
954
955         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
956         if (rgn->rgn_state == HPB_RGN_PINNED) {
957                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
958                          "pinned region cannot drop-out. region %d\n",
959                          rgn->rgn_idx);
960                 goto out;
961         }
962
963         if (!list_empty(&rgn->list_lru_rgn)) {
964                 if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
965                         ret = -EBUSY;
966                         goto out;
967                 }
968
969                 if (hpb->is_hcm) {
970                         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
971                         ret = ufshpb_issue_umap_single_req(hpb, rgn);
972                         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
973                         if (ret)
974                                 goto out;
975                 }
976
977                 __ufshpb_evict_region(hpb, rgn);
978         }
979 out:
980         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
981         return ret;
982 }
983
984 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
985                                 struct ufshpb_region *rgn,
986                                 struct ufshpb_subregion *srgn)
987 {
988         struct ufshpb_req *map_req;
989         unsigned long flags;
990         int ret;
991         int err = -EAGAIN;
992         bool alloc_required = false;
993         enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
994
995         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
996
997         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
998                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
999                            "%s: ufshpb state is not PRESENT\n", __func__);
1000                 goto unlock_out;
1001         }
1002
1003         if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1004             (srgn->srgn_state == HPB_SRGN_INVALID)) {
1005                 err = 0;
1006                 goto unlock_out;
1007         }
1008
1009         if (srgn->srgn_state == HPB_SRGN_UNUSED)
1010                 alloc_required = true;
1011
1012         /*
1013          * If the subregion is already ISSUED state,
1014          * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1015          * the device and HPB response for map loading is received.
1016          * In this case, after finishing the HPB_READ_BUFFER,
1017          * the next HPB_READ_BUFFER is performed again to obtain the latest
1018          * map data.
1019          */
1020         if (srgn->srgn_state == HPB_SRGN_ISSUED)
1021                 goto unlock_out;
1022
1023         srgn->srgn_state = HPB_SRGN_ISSUED;
1024         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1025
1026         if (alloc_required) {
1027                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1028                 if (!srgn->mctx) {
1029                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1030                             "get map_ctx failed. region %d - %d\n",
1031                             rgn->rgn_idx, srgn->srgn_idx);
1032                         state = HPB_SRGN_UNUSED;
1033                         goto change_srgn_state;
1034                 }
1035         }
1036
1037         map_req = ufshpb_get_map_req(hpb, srgn);
1038         if (!map_req)
1039                 goto change_srgn_state;
1040
1041
1042         ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1043         if (ret) {
1044                 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1045                            "%s: issue map_req failed: %d, region %d - %d\n",
1046                            __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1047                 goto free_map_req;
1048         }
1049         return 0;
1050
1051 free_map_req:
1052         ufshpb_put_map_req(hpb, map_req);
1053 change_srgn_state:
1054         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1055         srgn->srgn_state = state;
1056 unlock_out:
1057         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1058         return err;
1059 }
1060
1061 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1062 {
1063         struct ufshpb_region *victim_rgn = NULL;
1064         struct victim_select_info *lru_info = &hpb->lru_info;
1065         unsigned long flags;
1066         int ret = 0;
1067
1068         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1069         /*
1070          * If region belongs to lru_list, just move the region
1071          * to the front of lru list because the state of the region
1072          * is already active-state.
1073          */
1074         if (!list_empty(&rgn->list_lru_rgn)) {
1075                 ufshpb_hit_lru_info(lru_info, rgn);
1076                 goto out;
1077         }
1078
1079         if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1080                 if (atomic_read(&lru_info->active_cnt) ==
1081                     lru_info->max_lru_active_cnt) {
1082                         /*
1083                          * If the maximum number of active regions
1084                          * is exceeded, evict the least recently used region.
1085                          * This case may occur when the device responds
1086                          * to the eviction information late.
1087                          * It is okay to evict the least recently used region,
1088                          * because the device could detect this region
1089                          * by not issuing HPB_READ
1090                          *
1091                          * in host control mode, verify that the entering
1092                          * region has enough reads
1093                          */
1094                         if (hpb->is_hcm &&
1095                             rgn->reads < hpb->params.eviction_thld_enter) {
1096                                 ret = -EACCES;
1097                                 goto out;
1098                         }
1099
1100                         victim_rgn = ufshpb_victim_lru_info(hpb);
1101                         if (!victim_rgn) {
1102                                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1103                                     "cannot get victim region %s\n",
1104                                     hpb->is_hcm ? "" : "error");
1105                                 ret = -ENOMEM;
1106                                 goto out;
1107                         }
1108
1109                         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1110                                 "LRU full (%d), choose victim %d\n",
1111                                 atomic_read(&lru_info->active_cnt),
1112                                 victim_rgn->rgn_idx);
1113
1114                         if (hpb->is_hcm) {
1115                                 spin_unlock_irqrestore(&hpb->rgn_state_lock,
1116                                                        flags);
1117                                 ret = ufshpb_issue_umap_single_req(hpb,
1118                                                                 victim_rgn);
1119                                 spin_lock_irqsave(&hpb->rgn_state_lock,
1120                                                   flags);
1121                                 if (ret)
1122                                         goto out;
1123                         }
1124
1125                         __ufshpb_evict_region(hpb, victim_rgn);
1126                 }
1127
1128                 /*
1129                  * When a region is added to lru_info list_head,
1130                  * it is guaranteed that the subregion has been
1131                  * assigned all mctx. If failed, try to receive mctx again
1132                  * without being added to lru_info list_head
1133                  */
1134                 ufshpb_add_lru_info(lru_info, rgn);
1135         }
1136 out:
1137         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1138         return ret;
1139 }
1140 /**
1141  *ufshpb_submit_region_inactive() - submit a region to be inactivated later
1142  *@hpb: per-LU HPB instance
1143  *@region_index: the index associated with the region that will be inactivated later
1144  */
1145 static void ufshpb_submit_region_inactive(struct ufshpb_lu *hpb, int region_index)
1146 {
1147         int subregion_index;
1148         struct ufshpb_region *rgn;
1149         struct ufshpb_subregion *srgn;
1150
1151         /*
1152          * Remove this region from active region list and add it to inactive list
1153          */
1154         spin_lock(&hpb->rsp_list_lock);
1155         ufshpb_update_inactive_info(hpb, region_index);
1156         spin_unlock(&hpb->rsp_list_lock);
1157
1158         rgn = hpb->rgn_tbl + region_index;
1159
1160         /*
1161          * Set subregion state to be HPB_SRGN_INVALID, there will no HPB read on this subregion
1162          */
1163         spin_lock(&hpb->rgn_state_lock);
1164         if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1165                 for (subregion_index = 0; subregion_index < rgn->srgn_cnt; subregion_index++) {
1166                         srgn = rgn->srgn_tbl + subregion_index;
1167                         if (srgn->srgn_state == HPB_SRGN_VALID)
1168                                 srgn->srgn_state = HPB_SRGN_INVALID;
1169                 }
1170         }
1171         spin_unlock(&hpb->rgn_state_lock);
1172 }
1173
1174 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1175                                          struct utp_hpb_rsp *rsp_field)
1176 {
1177         struct ufshpb_region *rgn;
1178         struct ufshpb_subregion *srgn;
1179         int i, rgn_i, srgn_i;
1180
1181         BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1182         /*
1183          * If the active region and the inactive region are the same,
1184          * we will inactivate this region.
1185          * The device could check this (region inactivated) and
1186          * will response the proper active region information
1187          */
1188         for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1189                 rgn_i =
1190                         be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1191                 srgn_i =
1192                         be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1193
1194                 rgn = hpb->rgn_tbl + rgn_i;
1195                 if (hpb->is_hcm &&
1196                     (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1197                         /*
1198                          * in host control mode, subregion activation
1199                          * recommendations are only allowed to active regions.
1200                          * Also, ignore recommendations for dirty regions - the
1201                          * host will make decisions concerning those by himself
1202                          */
1203                         continue;
1204                 }
1205
1206                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1207                         "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1208
1209                 spin_lock(&hpb->rsp_list_lock);
1210                 ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1211                 spin_unlock(&hpb->rsp_list_lock);
1212
1213                 srgn = rgn->srgn_tbl + srgn_i;
1214
1215                 /* blocking HPB_READ */
1216                 spin_lock(&hpb->rgn_state_lock);
1217                 if (srgn->srgn_state == HPB_SRGN_VALID)
1218                         srgn->srgn_state = HPB_SRGN_INVALID;
1219                 spin_unlock(&hpb->rgn_state_lock);
1220         }
1221
1222         if (hpb->is_hcm) {
1223                 /*
1224                  * in host control mode the device is not allowed to inactivate
1225                  * regions
1226                  */
1227                 goto out;
1228         }
1229
1230         for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1231                 rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1232                 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "inactivate(%d) region %d\n", i, rgn_i);
1233                 ufshpb_submit_region_inactive(hpb, rgn_i);
1234         }
1235
1236 out:
1237         dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1238                 rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1239
1240         if (ufshpb_get_state(hpb) == HPB_PRESENT)
1241                 queue_work(ufshpb_wq, &hpb->map_work);
1242 }
1243
1244 /*
1245  * Set the flags of all active regions to RGN_FLAG_UPDATE to let host side reload L2P entries later
1246  */
1247 static void ufshpb_set_regions_update(struct ufshpb_lu *hpb)
1248 {
1249         struct victim_select_info *lru_info = &hpb->lru_info;
1250         struct ufshpb_region *rgn;
1251         unsigned long flags;
1252
1253         spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1254
1255         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1256                 set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1257
1258         spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1259 }
1260
1261 static void ufshpb_dev_reset_handler(struct ufs_hba *hba)
1262 {
1263         struct scsi_device *sdev;
1264         struct ufshpb_lu *hpb;
1265
1266         __shost_for_each_device(sdev, hba->host) {
1267                 hpb = ufshpb_get_hpb_data(sdev);
1268                 if (!hpb)
1269                         continue;
1270
1271                 if (hpb->is_hcm) {
1272                         /*
1273                          * For the HPB host control mode, in case device powered up and lost HPB
1274                          * information, we will set the region flag to be RGN_FLAG_UPDATE, it will
1275                          * let host reload its L2P entries(reactivate region in the UFS device).
1276                          */
1277                         ufshpb_set_regions_update(hpb);
1278                 } else {
1279                         /*
1280                          * For the HPB device control mode, if host side receives 02h:HPB Operation
1281                          * in UPIU response, which means device recommends the host side should
1282                          * inactivate all active regions. Here we add all active regions to inactive
1283                          * list, they will be inactivated later in ufshpb_map_work_handler().
1284                          */
1285                         struct victim_select_info *lru_info = &hpb->lru_info;
1286                         struct ufshpb_region *rgn;
1287
1288                         list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1289                                 ufshpb_submit_region_inactive(hpb, rgn->rgn_idx);
1290
1291                         if (ufshpb_get_state(hpb) == HPB_PRESENT)
1292                                 queue_work(ufshpb_wq, &hpb->map_work);
1293                 }
1294         }
1295 }
1296
1297 /*
1298  * This function will parse recommended active subregion information in sense
1299  * data field of response UPIU with SAM_STAT_GOOD state.
1300  */
1301 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1302 {
1303         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1304         struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1305         int data_seg_len;
1306
1307         data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1308                 & MASK_RSP_UPIU_DATA_SEG_LEN;
1309
1310         /* If data segment length is zero, rsp_field is not valid */
1311         if (!data_seg_len)
1312                 return;
1313
1314         if (unlikely(lrbp->lun != rsp_field->lun)) {
1315                 struct scsi_device *sdev;
1316                 bool found = false;
1317
1318                 __shost_for_each_device(sdev, hba->host) {
1319                         hpb = ufshpb_get_hpb_data(sdev);
1320
1321                         if (!hpb)
1322                                 continue;
1323
1324                         if (rsp_field->lun == hpb->lun) {
1325                                 found = true;
1326                                 break;
1327                         }
1328                 }
1329
1330                 if (!found)
1331                         return;
1332         }
1333
1334         if (!hpb)
1335                 return;
1336
1337         if (ufshpb_get_state(hpb) == HPB_INIT)
1338                 return;
1339
1340         if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1341             (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1342                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1343                            "%s: ufshpb state is not PRESENT/SUSPEND\n",
1344                            __func__);
1345                 return;
1346         }
1347
1348         BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1349
1350         if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1351                 return;
1352
1353         hpb->stats.rcmd_noti_cnt++;
1354
1355         switch (rsp_field->hpb_op) {
1356         case HPB_RSP_REQ_REGION_UPDATE:
1357                 if (data_seg_len != DEV_DATA_SEG_LEN)
1358                         dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1359                                  "%s: data seg length is not same.\n",
1360                                  __func__);
1361                 ufshpb_rsp_req_region_update(hpb, rsp_field);
1362                 break;
1363         case HPB_RSP_DEV_RESET:
1364                 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1365                          "UFS device lost HPB information during PM.\n");
1366                 ufshpb_dev_reset_handler(hba);
1367
1368                 break;
1369         default:
1370                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1371                            "hpb_op is not available: %d\n",
1372                            rsp_field->hpb_op);
1373                 break;
1374         }
1375 }
1376
1377 static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1378                                    struct ufshpb_region *rgn,
1379                                    struct ufshpb_subregion *srgn)
1380 {
1381         if (!list_empty(&rgn->list_inact_rgn))
1382                 return;
1383
1384         if (!list_empty(&srgn->list_act_srgn)) {
1385                 list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1386                 return;
1387         }
1388
1389         list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1390 }
1391
1392 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1393                                           struct ufshpb_region *rgn,
1394                                           struct list_head *pending_list)
1395 {
1396         struct ufshpb_subregion *srgn;
1397         int srgn_idx;
1398
1399         if (!list_empty(&rgn->list_inact_rgn))
1400                 return;
1401
1402         for_each_sub_region(rgn, srgn_idx, srgn)
1403                 if (!list_empty(&srgn->list_act_srgn))
1404                         return;
1405
1406         list_add_tail(&rgn->list_inact_rgn, pending_list);
1407 }
1408
1409 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1410 {
1411         struct ufshpb_region *rgn;
1412         struct ufshpb_subregion *srgn;
1413         unsigned long flags;
1414         int ret = 0;
1415
1416         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1417         while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1418                                                 struct ufshpb_subregion,
1419                                                 list_act_srgn))) {
1420                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1421                         break;
1422
1423                 list_del_init(&srgn->list_act_srgn);
1424                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1425
1426                 rgn = hpb->rgn_tbl + srgn->rgn_idx;
1427                 ret = ufshpb_add_region(hpb, rgn);
1428                 if (ret)
1429                         goto active_failed;
1430
1431                 ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1432                 if (ret) {
1433                         dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1434                             "issue map_req failed. ret %d, region %d - %d\n",
1435                             ret, rgn->rgn_idx, srgn->srgn_idx);
1436                         goto active_failed;
1437                 }
1438                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1439         }
1440         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1441         return;
1442
1443 active_failed:
1444         dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1445                    rgn->rgn_idx, srgn->srgn_idx);
1446         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1447         ufshpb_add_active_list(hpb, rgn, srgn);
1448         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1449 }
1450
1451 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1452 {
1453         struct ufshpb_region *rgn;
1454         unsigned long flags;
1455         int ret;
1456         LIST_HEAD(pending_list);
1457
1458         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1459         while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1460                                                struct ufshpb_region,
1461                                                list_inact_rgn))) {
1462                 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1463                         break;
1464
1465                 list_del_init(&rgn->list_inact_rgn);
1466                 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1467
1468                 ret = ufshpb_evict_region(hpb, rgn);
1469                 if (ret) {
1470                         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1471                         ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1472                         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1473                 }
1474
1475                 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1476         }
1477
1478         list_splice(&pending_list, &hpb->lh_inact_rgn);
1479         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1480 }
1481
1482 static void ufshpb_normalization_work_handler(struct work_struct *work)
1483 {
1484         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1485                                              ufshpb_normalization_work);
1486         int rgn_idx;
1487         u8 factor = hpb->params.normalization_factor;
1488
1489         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1490                 struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1491                 int srgn_idx;
1492
1493                 spin_lock(&rgn->rgn_lock);
1494                 rgn->reads = 0;
1495                 for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1496                         struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1497
1498                         srgn->reads >>= factor;
1499                         rgn->reads += srgn->reads;
1500                 }
1501                 spin_unlock(&rgn->rgn_lock);
1502
1503                 if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1504                         continue;
1505
1506                 /* if region is active but has no reads - inactivate it */
1507                 spin_lock(&hpb->rsp_list_lock);
1508                 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1509                 spin_unlock(&hpb->rsp_list_lock);
1510         }
1511 }
1512
1513 static void ufshpb_map_work_handler(struct work_struct *work)
1514 {
1515         struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1516
1517         if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1518                 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1519                            "%s: ufshpb state is not PRESENT\n", __func__);
1520                 return;
1521         }
1522
1523         ufshpb_run_inactive_region_list(hpb);
1524         ufshpb_run_active_subregion_list(hpb);
1525 }
1526
1527 /*
1528  * this function doesn't need to hold lock due to be called in init.
1529  * (rgn_state_lock, rsp_list_lock, etc..)
1530  */
1531 static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1532                                             struct ufshpb_lu *hpb,
1533                                             struct ufshpb_region *rgn)
1534 {
1535         struct ufshpb_subregion *srgn;
1536         int srgn_idx, i;
1537         int err = 0;
1538
1539         for_each_sub_region(rgn, srgn_idx, srgn) {
1540                 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1541                 srgn->srgn_state = HPB_SRGN_INVALID;
1542                 if (!srgn->mctx) {
1543                         err = -ENOMEM;
1544                         dev_err(hba->dev,
1545                                 "alloc mctx for pinned region failed\n");
1546                         goto release;
1547                 }
1548
1549                 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1550         }
1551
1552         rgn->rgn_state = HPB_RGN_PINNED;
1553         return 0;
1554
1555 release:
1556         for (i = 0; i < srgn_idx; i++) {
1557                 srgn = rgn->srgn_tbl + i;
1558                 ufshpb_put_map_ctx(hpb, srgn->mctx);
1559         }
1560         return err;
1561 }
1562
1563 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1564                                       struct ufshpb_region *rgn, bool last)
1565 {
1566         int srgn_idx;
1567         struct ufshpb_subregion *srgn;
1568
1569         for_each_sub_region(rgn, srgn_idx, srgn) {
1570                 INIT_LIST_HEAD(&srgn->list_act_srgn);
1571
1572                 srgn->rgn_idx = rgn->rgn_idx;
1573                 srgn->srgn_idx = srgn_idx;
1574                 srgn->srgn_state = HPB_SRGN_UNUSED;
1575         }
1576
1577         if (unlikely(last && hpb->last_srgn_entries))
1578                 srgn->is_last = true;
1579 }
1580
1581 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1582                                       struct ufshpb_region *rgn, int srgn_cnt)
1583 {
1584         rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1585                                  GFP_KERNEL);
1586         if (!rgn->srgn_tbl)
1587                 return -ENOMEM;
1588
1589         rgn->srgn_cnt = srgn_cnt;
1590         return 0;
1591 }
1592
1593 static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1594                                      struct ufshpb_lu *hpb,
1595                                      struct ufshpb_dev_info *hpb_dev_info,
1596                                      struct ufshpb_lu_info *hpb_lu_info)
1597 {
1598         u32 entries_per_rgn;
1599         u64 rgn_mem_size, tmp;
1600
1601         if (ufshpb_is_legacy(hba))
1602                 hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1603         else
1604                 hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
1605
1606         hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1607         hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1608                 (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1609                 : PINNED_NOT_SET;
1610         hpb->lru_info.max_lru_active_cnt =
1611                 hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1612
1613         rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1614                         * HPB_ENTRY_SIZE;
1615         do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1616         hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1617                 * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1618
1619         tmp = rgn_mem_size;
1620         do_div(tmp, HPB_ENTRY_SIZE);
1621         entries_per_rgn = (u32)tmp;
1622         hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1623         hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1624
1625         hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1626         hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1627         hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1628
1629         tmp = rgn_mem_size;
1630         do_div(tmp, hpb->srgn_mem_size);
1631         hpb->srgns_per_rgn = (int)tmp;
1632
1633         hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1634                                 entries_per_rgn);
1635         hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1636                                 (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1637         hpb->last_srgn_entries = hpb_lu_info->num_blocks
1638                                  % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1639
1640         hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1641
1642         if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1643                 hpb->is_hcm = true;
1644 }
1645
1646 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1647 {
1648         struct ufshpb_region *rgn_table, *rgn;
1649         int rgn_idx, i;
1650         int ret = 0;
1651
1652         rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1653                             GFP_KERNEL);
1654         if (!rgn_table)
1655                 return -ENOMEM;
1656
1657         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1658                 int srgn_cnt = hpb->srgns_per_rgn;
1659                 bool last_srgn = false;
1660
1661                 rgn = rgn_table + rgn_idx;
1662                 rgn->rgn_idx = rgn_idx;
1663
1664                 spin_lock_init(&rgn->rgn_lock);
1665
1666                 INIT_LIST_HEAD(&rgn->list_inact_rgn);
1667                 INIT_LIST_HEAD(&rgn->list_lru_rgn);
1668                 INIT_LIST_HEAD(&rgn->list_expired_rgn);
1669
1670                 if (rgn_idx == hpb->rgns_per_lu - 1) {
1671                         srgn_cnt = ((hpb->srgns_per_lu - 1) %
1672                                     hpb->srgns_per_rgn) + 1;
1673                         last_srgn = true;
1674                 }
1675
1676                 ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1677                 if (ret)
1678                         goto release_srgn_table;
1679                 ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1680
1681                 if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1682                         ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1683                         if (ret)
1684                                 goto release_srgn_table;
1685                 } else {
1686                         rgn->rgn_state = HPB_RGN_INACTIVE;
1687                 }
1688
1689                 rgn->rgn_flags = 0;
1690                 rgn->hpb = hpb;
1691         }
1692
1693         hpb->rgn_tbl = rgn_table;
1694
1695         return 0;
1696
1697 release_srgn_table:
1698         for (i = 0; i <= rgn_idx; i++)
1699                 kvfree(rgn_table[i].srgn_tbl);
1700
1701         kvfree(rgn_table);
1702         return ret;
1703 }
1704
1705 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1706                                          struct ufshpb_region *rgn)
1707 {
1708         int srgn_idx;
1709         struct ufshpb_subregion *srgn;
1710
1711         for_each_sub_region(rgn, srgn_idx, srgn)
1712                 if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1713                         srgn->srgn_state = HPB_SRGN_UNUSED;
1714                         ufshpb_put_map_ctx(hpb, srgn->mctx);
1715                 }
1716 }
1717
1718 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1719 {
1720         int rgn_idx;
1721
1722         for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1723                 struct ufshpb_region *rgn;
1724
1725                 rgn = hpb->rgn_tbl + rgn_idx;
1726                 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1727                         rgn->rgn_state = HPB_RGN_INACTIVE;
1728
1729                         ufshpb_destroy_subregion_tbl(hpb, rgn);
1730                 }
1731
1732                 kvfree(rgn->srgn_tbl);
1733         }
1734
1735         kvfree(hpb->rgn_tbl);
1736 }
1737
1738 /* SYSFS functions */
1739 #define ufshpb_sysfs_attr_show_func(__name)                             \
1740 static ssize_t __name##_show(struct device *dev,                        \
1741         struct device_attribute *attr, char *buf)                       \
1742 {                                                                       \
1743         struct scsi_device *sdev = to_scsi_device(dev);                 \
1744         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1745                                                                         \
1746         if (!hpb)                                                       \
1747                 return -ENODEV;                                         \
1748                                                                         \
1749         return sysfs_emit(buf, "%llu\n", hpb->stats.__name);            \
1750 }                                                                       \
1751 \
1752 static DEVICE_ATTR_RO(__name)
1753
1754 ufshpb_sysfs_attr_show_func(hit_cnt);
1755 ufshpb_sysfs_attr_show_func(miss_cnt);
1756 ufshpb_sysfs_attr_show_func(rcmd_noti_cnt);
1757 ufshpb_sysfs_attr_show_func(rcmd_active_cnt);
1758 ufshpb_sysfs_attr_show_func(rcmd_inactive_cnt);
1759 ufshpb_sysfs_attr_show_func(map_req_cnt);
1760 ufshpb_sysfs_attr_show_func(umap_req_cnt);
1761
1762 static struct attribute *hpb_dev_stat_attrs[] = {
1763         &dev_attr_hit_cnt.attr,
1764         &dev_attr_miss_cnt.attr,
1765         &dev_attr_rcmd_noti_cnt.attr,
1766         &dev_attr_rcmd_active_cnt.attr,
1767         &dev_attr_rcmd_inactive_cnt.attr,
1768         &dev_attr_map_req_cnt.attr,
1769         &dev_attr_umap_req_cnt.attr,
1770         NULL,
1771 };
1772
1773 struct attribute_group ufs_sysfs_hpb_stat_group = {
1774         .name = "hpb_stats",
1775         .attrs = hpb_dev_stat_attrs,
1776 };
1777
1778 /* SYSFS functions */
1779 #define ufshpb_sysfs_param_show_func(__name)                            \
1780 static ssize_t __name##_show(struct device *dev,                        \
1781         struct device_attribute *attr, char *buf)                       \
1782 {                                                                       \
1783         struct scsi_device *sdev = to_scsi_device(dev);                 \
1784         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
1785                                                                         \
1786         if (!hpb)                                                       \
1787                 return -ENODEV;                                         \
1788                                                                         \
1789         return sysfs_emit(buf, "%d\n", hpb->params.__name);             \
1790 }
1791
1792 ufshpb_sysfs_param_show_func(requeue_timeout_ms);
1793 static ssize_t
1794 requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1795                          const char *buf, size_t count)
1796 {
1797         struct scsi_device *sdev = to_scsi_device(dev);
1798         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1799         int val;
1800
1801         if (!hpb)
1802                 return -ENODEV;
1803
1804         if (kstrtouint(buf, 0, &val))
1805                 return -EINVAL;
1806
1807         if (val < 0)
1808                 return -EINVAL;
1809
1810         hpb->params.requeue_timeout_ms = val;
1811
1812         return count;
1813 }
1814 static DEVICE_ATTR_RW(requeue_timeout_ms);
1815
1816 ufshpb_sysfs_param_show_func(activation_thld);
1817 static ssize_t
1818 activation_thld_store(struct device *dev, struct device_attribute *attr,
1819                       const char *buf, size_t count)
1820 {
1821         struct scsi_device *sdev = to_scsi_device(dev);
1822         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1823         int val;
1824
1825         if (!hpb)
1826                 return -ENODEV;
1827
1828         if (!hpb->is_hcm)
1829                 return -EOPNOTSUPP;
1830
1831         if (kstrtouint(buf, 0, &val))
1832                 return -EINVAL;
1833
1834         if (val <= 0)
1835                 return -EINVAL;
1836
1837         hpb->params.activation_thld = val;
1838
1839         return count;
1840 }
1841 static DEVICE_ATTR_RW(activation_thld);
1842
1843 ufshpb_sysfs_param_show_func(normalization_factor);
1844 static ssize_t
1845 normalization_factor_store(struct device *dev, struct device_attribute *attr,
1846                            const char *buf, size_t count)
1847 {
1848         struct scsi_device *sdev = to_scsi_device(dev);
1849         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1850         int val;
1851
1852         if (!hpb)
1853                 return -ENODEV;
1854
1855         if (!hpb->is_hcm)
1856                 return -EOPNOTSUPP;
1857
1858         if (kstrtouint(buf, 0, &val))
1859                 return -EINVAL;
1860
1861         if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
1862                 return -EINVAL;
1863
1864         hpb->params.normalization_factor = val;
1865
1866         return count;
1867 }
1868 static DEVICE_ATTR_RW(normalization_factor);
1869
1870 ufshpb_sysfs_param_show_func(eviction_thld_enter);
1871 static ssize_t
1872 eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
1873                           const char *buf, size_t count)
1874 {
1875         struct scsi_device *sdev = to_scsi_device(dev);
1876         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1877         int val;
1878
1879         if (!hpb)
1880                 return -ENODEV;
1881
1882         if (!hpb->is_hcm)
1883                 return -EOPNOTSUPP;
1884
1885         if (kstrtouint(buf, 0, &val))
1886                 return -EINVAL;
1887
1888         if (val <= hpb->params.eviction_thld_exit)
1889                 return -EINVAL;
1890
1891         hpb->params.eviction_thld_enter = val;
1892
1893         return count;
1894 }
1895 static DEVICE_ATTR_RW(eviction_thld_enter);
1896
1897 ufshpb_sysfs_param_show_func(eviction_thld_exit);
1898 static ssize_t
1899 eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
1900                          const char *buf, size_t count)
1901 {
1902         struct scsi_device *sdev = to_scsi_device(dev);
1903         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1904         int val;
1905
1906         if (!hpb)
1907                 return -ENODEV;
1908
1909         if (!hpb->is_hcm)
1910                 return -EOPNOTSUPP;
1911
1912         if (kstrtouint(buf, 0, &val))
1913                 return -EINVAL;
1914
1915         if (val <= hpb->params.activation_thld)
1916                 return -EINVAL;
1917
1918         hpb->params.eviction_thld_exit = val;
1919
1920         return count;
1921 }
1922 static DEVICE_ATTR_RW(eviction_thld_exit);
1923
1924 ufshpb_sysfs_param_show_func(read_timeout_ms);
1925 static ssize_t
1926 read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
1927                       const char *buf, size_t count)
1928 {
1929         struct scsi_device *sdev = to_scsi_device(dev);
1930         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1931         int val;
1932
1933         if (!hpb)
1934                 return -ENODEV;
1935
1936         if (!hpb->is_hcm)
1937                 return -EOPNOTSUPP;
1938
1939         if (kstrtouint(buf, 0, &val))
1940                 return -EINVAL;
1941
1942         /* read_timeout >> timeout_polling_interval */
1943         if (val < hpb->params.timeout_polling_interval_ms * 2)
1944                 return -EINVAL;
1945
1946         hpb->params.read_timeout_ms = val;
1947
1948         return count;
1949 }
1950 static DEVICE_ATTR_RW(read_timeout_ms);
1951
1952 ufshpb_sysfs_param_show_func(read_timeout_expiries);
1953 static ssize_t
1954 read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
1955                             const char *buf, size_t count)
1956 {
1957         struct scsi_device *sdev = to_scsi_device(dev);
1958         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1959         int val;
1960
1961         if (!hpb)
1962                 return -ENODEV;
1963
1964         if (!hpb->is_hcm)
1965                 return -EOPNOTSUPP;
1966
1967         if (kstrtouint(buf, 0, &val))
1968                 return -EINVAL;
1969
1970         if (val <= 0)
1971                 return -EINVAL;
1972
1973         hpb->params.read_timeout_expiries = val;
1974
1975         return count;
1976 }
1977 static DEVICE_ATTR_RW(read_timeout_expiries);
1978
1979 ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
1980 static ssize_t
1981 timeout_polling_interval_ms_store(struct device *dev,
1982                                   struct device_attribute *attr,
1983                                   const char *buf, size_t count)
1984 {
1985         struct scsi_device *sdev = to_scsi_device(dev);
1986         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
1987         int val;
1988
1989         if (!hpb)
1990                 return -ENODEV;
1991
1992         if (!hpb->is_hcm)
1993                 return -EOPNOTSUPP;
1994
1995         if (kstrtouint(buf, 0, &val))
1996                 return -EINVAL;
1997
1998         /* timeout_polling_interval << read_timeout */
1999         if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
2000                 return -EINVAL;
2001
2002         hpb->params.timeout_polling_interval_ms = val;
2003
2004         return count;
2005 }
2006 static DEVICE_ATTR_RW(timeout_polling_interval_ms);
2007
2008 ufshpb_sysfs_param_show_func(inflight_map_req);
2009 static ssize_t inflight_map_req_store(struct device *dev,
2010                                       struct device_attribute *attr,
2011                                       const char *buf, size_t count)
2012 {
2013         struct scsi_device *sdev = to_scsi_device(dev);
2014         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2015         int val;
2016
2017         if (!hpb)
2018                 return -ENODEV;
2019
2020         if (!hpb->is_hcm)
2021                 return -EOPNOTSUPP;
2022
2023         if (kstrtouint(buf, 0, &val))
2024                 return -EINVAL;
2025
2026         if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
2027                 return -EINVAL;
2028
2029         hpb->params.inflight_map_req = val;
2030
2031         return count;
2032 }
2033 static DEVICE_ATTR_RW(inflight_map_req);
2034
2035 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
2036 {
2037         hpb->params.activation_thld = ACTIVATION_THRESHOLD;
2038         hpb->params.normalization_factor = 1;
2039         hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2040         hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2041         hpb->params.read_timeout_ms = READ_TO_MS;
2042         hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2043         hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2044         hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2045 }
2046
2047 static struct attribute *hpb_dev_param_attrs[] = {
2048         &dev_attr_requeue_timeout_ms.attr,
2049         &dev_attr_activation_thld.attr,
2050         &dev_attr_normalization_factor.attr,
2051         &dev_attr_eviction_thld_enter.attr,
2052         &dev_attr_eviction_thld_exit.attr,
2053         &dev_attr_read_timeout_ms.attr,
2054         &dev_attr_read_timeout_expiries.attr,
2055         &dev_attr_timeout_polling_interval_ms.attr,
2056         &dev_attr_inflight_map_req.attr,
2057         NULL,
2058 };
2059
2060 struct attribute_group ufs_sysfs_hpb_param_group = {
2061         .name = "hpb_params",
2062         .attrs = hpb_dev_param_attrs,
2063 };
2064
2065 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2066 {
2067         struct ufshpb_req *pre_req = NULL, *t;
2068         int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2069         int i;
2070
2071         INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2072
2073         hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2074         hpb->throttle_pre_req = qd;
2075         hpb->num_inflight_pre_req = 0;
2076
2077         if (!hpb->pre_req)
2078                 goto release_mem;
2079
2080         for (i = 0; i < qd; i++) {
2081                 pre_req = hpb->pre_req + i;
2082                 INIT_LIST_HEAD(&pre_req->list_req);
2083                 pre_req->req = NULL;
2084
2085                 pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
2086                 if (!pre_req->bio)
2087                         goto release_mem;
2088
2089                 pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2090                 if (!pre_req->wb.m_page) {
2091                         bio_put(pre_req->bio);
2092                         goto release_mem;
2093                 }
2094
2095                 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2096         }
2097
2098         return 0;
2099 release_mem:
2100         list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2101                 list_del_init(&pre_req->list_req);
2102                 bio_put(pre_req->bio);
2103                 __free_page(pre_req->wb.m_page);
2104         }
2105
2106         kfree(hpb->pre_req);
2107         return -ENOMEM;
2108 }
2109
2110 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2111 {
2112         struct ufshpb_req *pre_req = NULL;
2113         int i;
2114
2115         for (i = 0; i < hpb->throttle_pre_req; i++) {
2116                 pre_req = hpb->pre_req + i;
2117                 bio_put(hpb->pre_req[i].bio);
2118                 if (!pre_req->wb.m_page)
2119                         __free_page(hpb->pre_req[i].wb.m_page);
2120                 list_del_init(&pre_req->list_req);
2121         }
2122
2123         kfree(hpb->pre_req);
2124 }
2125
2126 static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2127 {
2128         hpb->stats.hit_cnt = 0;
2129         hpb->stats.miss_cnt = 0;
2130         hpb->stats.rcmd_noti_cnt = 0;
2131         hpb->stats.rcmd_active_cnt = 0;
2132         hpb->stats.rcmd_inactive_cnt = 0;
2133         hpb->stats.map_req_cnt = 0;
2134         hpb->stats.umap_req_cnt = 0;
2135 }
2136
2137 static void ufshpb_param_init(struct ufshpb_lu *hpb)
2138 {
2139         hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2140         if (hpb->is_hcm)
2141                 ufshpb_hcm_param_init(hpb);
2142 }
2143
2144 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2145 {
2146         int ret;
2147
2148         spin_lock_init(&hpb->rgn_state_lock);
2149         spin_lock_init(&hpb->rsp_list_lock);
2150         spin_lock_init(&hpb->param_lock);
2151
2152         INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2153         INIT_LIST_HEAD(&hpb->lh_act_srgn);
2154         INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2155         INIT_LIST_HEAD(&hpb->list_hpb_lu);
2156
2157         INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2158         if (hpb->is_hcm) {
2159                 INIT_WORK(&hpb->ufshpb_normalization_work,
2160                           ufshpb_normalization_work_handler);
2161                 INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2162                                   ufshpb_read_to_handler);
2163         }
2164
2165         hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2166                           sizeof(struct ufshpb_req), 0, 0, NULL);
2167         if (!hpb->map_req_cache) {
2168                 dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2169                         hpb->lun);
2170                 return -ENOMEM;
2171         }
2172
2173         hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2174                           sizeof(struct page *) * hpb->pages_per_srgn,
2175                           0, 0, NULL);
2176         if (!hpb->m_page_cache) {
2177                 dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2178                         hpb->lun);
2179                 ret = -ENOMEM;
2180                 goto release_req_cache;
2181         }
2182
2183         ret = ufshpb_pre_req_mempool_init(hpb);
2184         if (ret) {
2185                 dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2186                         hpb->lun);
2187                 goto release_m_page_cache;
2188         }
2189
2190         ret = ufshpb_alloc_region_tbl(hba, hpb);
2191         if (ret)
2192                 goto release_pre_req_mempool;
2193
2194         ufshpb_stat_init(hpb);
2195         ufshpb_param_init(hpb);
2196
2197         if (hpb->is_hcm) {
2198                 unsigned int poll;
2199
2200                 poll = hpb->params.timeout_polling_interval_ms;
2201                 schedule_delayed_work(&hpb->ufshpb_read_to_work,
2202                                       msecs_to_jiffies(poll));
2203         }
2204
2205         return 0;
2206
2207 release_pre_req_mempool:
2208         ufshpb_pre_req_mempool_destroy(hpb);
2209 release_m_page_cache:
2210         kmem_cache_destroy(hpb->m_page_cache);
2211 release_req_cache:
2212         kmem_cache_destroy(hpb->map_req_cache);
2213         return ret;
2214 }
2215
2216 static struct ufshpb_lu *
2217 ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2218                     struct ufshpb_dev_info *hpb_dev_info,
2219                     struct ufshpb_lu_info *hpb_lu_info)
2220 {
2221         struct ufshpb_lu *hpb;
2222         int ret;
2223
2224         hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2225         if (!hpb)
2226                 return NULL;
2227
2228         hpb->lun = sdev->lun;
2229         hpb->sdev_ufs_lu = sdev;
2230
2231         ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2232
2233         ret = ufshpb_lu_hpb_init(hba, hpb);
2234         if (ret) {
2235                 dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2236                 goto release_hpb;
2237         }
2238
2239         sdev->hostdata = hpb;
2240         return hpb;
2241
2242 release_hpb:
2243         kfree(hpb);
2244         return NULL;
2245 }
2246
2247 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2248 {
2249         struct ufshpb_region *rgn, *next_rgn;
2250         struct ufshpb_subregion *srgn, *next_srgn;
2251         unsigned long flags;
2252
2253         /*
2254          * If the device reset occurred, the remaining HPB region information
2255          * may be stale. Therefore, by discarding the lists of HPB response
2256          * that remained after reset, we prevent unnecessary work.
2257          */
2258         spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2259         list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2260                                  list_inact_rgn)
2261                 list_del_init(&rgn->list_inact_rgn);
2262
2263         list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2264                                  list_act_srgn)
2265                 list_del_init(&srgn->list_act_srgn);
2266         spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2267 }
2268
2269 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2270 {
2271         if (hpb->is_hcm) {
2272                 cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2273                 cancel_work_sync(&hpb->ufshpb_normalization_work);
2274         }
2275         cancel_work_sync(&hpb->map_work);
2276 }
2277
2278 static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2279 {
2280         int err = 0;
2281         bool flag_res = true;
2282         int try;
2283
2284         /* wait for the device to complete HPB reset query */
2285         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2286                 dev_dbg(hba->dev,
2287                         "%s start flag reset polling %d times\n",
2288                         __func__, try);
2289
2290                 /* Poll fHpbReset flag to be cleared */
2291                 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2292                                 QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2293
2294                 if (err) {
2295                         dev_err(hba->dev,
2296                                 "%s reading fHpbReset flag failed with error %d\n",
2297                                 __func__, err);
2298                         return flag_res;
2299                 }
2300
2301                 if (!flag_res)
2302                         goto out;
2303
2304                 usleep_range(1000, 1100);
2305         }
2306         if (flag_res) {
2307                 dev_err(hba->dev,
2308                         "%s fHpbReset was not cleared by the device\n",
2309                         __func__);
2310         }
2311 out:
2312         return flag_res;
2313 }
2314
2315 /**
2316  * ufshpb_toggle_state - switch HPB state of all LUs
2317  * @hba: per-adapter instance
2318  * @src: expected current HPB state
2319  * @dest: target HPB state to switch to
2320  */
2321 void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest)
2322 {
2323         struct ufshpb_lu *hpb;
2324         struct scsi_device *sdev;
2325
2326         shost_for_each_device(sdev, hba->host) {
2327                 hpb = ufshpb_get_hpb_data(sdev);
2328
2329                 if (!hpb || ufshpb_get_state(hpb) != src)
2330                         continue;
2331                 ufshpb_set_state(hpb, dest);
2332
2333                 if (dest == HPB_RESET) {
2334                         ufshpb_cancel_jobs(hpb);
2335                         ufshpb_discard_rsp_lists(hpb);
2336                 }
2337         }
2338 }
2339
2340 void ufshpb_suspend(struct ufs_hba *hba)
2341 {
2342         struct ufshpb_lu *hpb;
2343         struct scsi_device *sdev;
2344
2345         shost_for_each_device(sdev, hba->host) {
2346                 hpb = ufshpb_get_hpb_data(sdev);
2347                 if (!hpb || ufshpb_get_state(hpb) != HPB_PRESENT)
2348                         continue;
2349
2350                 ufshpb_set_state(hpb, HPB_SUSPEND);
2351                 ufshpb_cancel_jobs(hpb);
2352         }
2353 }
2354
2355 void ufshpb_resume(struct ufs_hba *hba)
2356 {
2357         struct ufshpb_lu *hpb;
2358         struct scsi_device *sdev;
2359
2360         shost_for_each_device(sdev, hba->host) {
2361                 hpb = ufshpb_get_hpb_data(sdev);
2362                 if (!hpb || ufshpb_get_state(hpb) != HPB_SUSPEND)
2363                         continue;
2364
2365                 ufshpb_set_state(hpb, HPB_PRESENT);
2366                 ufshpb_kick_map_work(hpb);
2367                 if (hpb->is_hcm) {
2368                         unsigned int poll = hpb->params.timeout_polling_interval_ms;
2369
2370                         schedule_delayed_work(&hpb->ufshpb_read_to_work, msecs_to_jiffies(poll));
2371                 }
2372         }
2373 }
2374
2375 static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2376                               struct ufshpb_lu_info *hpb_lu_info)
2377 {
2378         u16 max_active_rgns;
2379         u8 lu_enable;
2380         int size;
2381         int ret;
2382         char desc_buf[QUERY_DESC_MAX_SIZE];
2383
2384         ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2385
2386         ufshcd_rpm_get_sync(hba);
2387         ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2388                                             QUERY_DESC_IDN_UNIT, lun, 0,
2389                                             desc_buf, &size);
2390         ufshcd_rpm_put_sync(hba);
2391
2392         if (ret) {
2393                 dev_err(hba->dev,
2394                         "%s: idn: %d lun: %d  query request failed",
2395                         __func__, QUERY_DESC_IDN_UNIT, lun);
2396                 return ret;
2397         }
2398
2399         lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2400         if (lu_enable != LU_ENABLED_HPB_FUNC)
2401                 return -ENODEV;
2402
2403         max_active_rgns = get_unaligned_be16(
2404                         desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2405         if (!max_active_rgns) {
2406                 dev_err(hba->dev,
2407                         "lun %d wrong number of max active regions\n", lun);
2408                 return -ENODEV;
2409         }
2410
2411         hpb_lu_info->num_blocks = get_unaligned_be64(
2412                         desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2413         hpb_lu_info->pinned_start = get_unaligned_be16(
2414                         desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2415         hpb_lu_info->num_pinned = get_unaligned_be16(
2416                         desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2417         hpb_lu_info->max_active_rgns = max_active_rgns;
2418
2419         return 0;
2420 }
2421
2422 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2423 {
2424         struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2425
2426         if (!hpb)
2427                 return;
2428
2429         ufshpb_set_state(hpb, HPB_FAILED);
2430
2431         sdev = hpb->sdev_ufs_lu;
2432         sdev->hostdata = NULL;
2433
2434         ufshpb_cancel_jobs(hpb);
2435
2436         ufshpb_pre_req_mempool_destroy(hpb);
2437         ufshpb_destroy_region_tbl(hpb);
2438
2439         kmem_cache_destroy(hpb->map_req_cache);
2440         kmem_cache_destroy(hpb->m_page_cache);
2441
2442         list_del_init(&hpb->list_hpb_lu);
2443
2444         kfree(hpb);
2445 }
2446
2447 static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2448 {
2449         int pool_size;
2450         struct ufshpb_lu *hpb;
2451         struct scsi_device *sdev;
2452         bool init_success;
2453
2454         if (tot_active_srgn_pages == 0) {
2455                 ufshpb_remove(hba);
2456                 return;
2457         }
2458
2459         init_success = !ufshpb_check_hpb_reset_query(hba);
2460
2461         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2462         if (pool_size > tot_active_srgn_pages) {
2463                 mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2464                 mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2465         }
2466
2467         shost_for_each_device(sdev, hba->host) {
2468                 hpb = ufshpb_get_hpb_data(sdev);
2469                 if (!hpb)
2470                         continue;
2471
2472                 if (init_success) {
2473                         ufshpb_set_state(hpb, HPB_PRESENT);
2474                         if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2475                                 queue_work(ufshpb_wq, &hpb->map_work);
2476                 } else {
2477                         dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2478                         ufshpb_destroy_lu(hba, sdev);
2479                 }
2480         }
2481
2482         if (!init_success)
2483                 ufshpb_remove(hba);
2484 }
2485
2486 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2487 {
2488         struct ufshpb_lu *hpb;
2489         int ret;
2490         struct ufshpb_lu_info hpb_lu_info = { 0 };
2491         int lun = sdev->lun;
2492
2493         if (lun >= hba->dev_info.max_lu_supported)
2494                 goto out;
2495
2496         ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2497         if (ret)
2498                 goto out;
2499
2500         hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
2501                                   &hpb_lu_info);
2502         if (!hpb)
2503                 goto out;
2504
2505         tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2506                         hpb->srgns_per_rgn * hpb->pages_per_srgn;
2507
2508 out:
2509         /* All LUs are initialized */
2510         if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
2511                 ufshpb_hpb_lu_prepared(hba);
2512 }
2513
2514 static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2515 {
2516         int ret;
2517         unsigned int pool_size;
2518
2519         ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2520                                         sizeof(struct ufshpb_map_ctx),
2521                                         0, 0, NULL);
2522         if (!ufshpb_mctx_cache) {
2523                 dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2524                 return -ENOMEM;
2525         }
2526
2527         pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2528         dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2529                __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2530
2531         ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2532                                                     ufshpb_mctx_cache);
2533         if (!ufshpb_mctx_pool) {
2534                 dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2535                 ret = -ENOMEM;
2536                 goto release_mctx_cache;
2537         }
2538
2539         ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2540         if (!ufshpb_page_pool) {
2541                 dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2542                 ret = -ENOMEM;
2543                 goto release_mctx_pool;
2544         }
2545
2546         ufshpb_wq = alloc_workqueue("ufshpb-wq",
2547                                         WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2548         if (!ufshpb_wq) {
2549                 dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2550                 ret = -ENOMEM;
2551                 goto release_page_pool;
2552         }
2553
2554         return 0;
2555
2556 release_page_pool:
2557         mempool_destroy(ufshpb_page_pool);
2558 release_mctx_pool:
2559         mempool_destroy(ufshpb_mctx_pool);
2560 release_mctx_cache:
2561         kmem_cache_destroy(ufshpb_mctx_cache);
2562         return ret;
2563 }
2564
2565 void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2566 {
2567         struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
2568         int max_active_rgns = 0;
2569         int hpb_num_lu;
2570
2571         hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2572         if (hpb_num_lu == 0) {
2573                 dev_err(hba->dev, "No HPB LU supported\n");
2574                 hpb_info->hpb_disabled = true;
2575                 return;
2576         }
2577
2578         hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2579         hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2580         max_active_rgns = get_unaligned_be16(geo_buf +
2581                           GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2582
2583         if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2584             max_active_rgns == 0) {
2585                 dev_err(hba->dev, "No HPB supported device\n");
2586                 hpb_info->hpb_disabled = true;
2587                 return;
2588         }
2589 }
2590
2591 void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2592 {
2593         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2594         int version, ret;
2595         int max_single_cmd;
2596
2597         hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2598
2599         version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
2600         if ((version != HPB_SUPPORT_VERSION) &&
2601             (version != HPB_SUPPORT_LEGACY_VERSION)) {
2602                 dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2603                         __func__, version);
2604                 hpb_dev_info->hpb_disabled = true;
2605                 return;
2606         }
2607
2608         if (version == HPB_SUPPORT_LEGACY_VERSION)
2609                 hpb_dev_info->is_legacy = true;
2610
2611         /*
2612          * Get the number of user logical unit to check whether all
2613          * scsi_device finish initialization
2614          */
2615         hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2616
2617         if (hpb_dev_info->is_legacy)
2618                 return;
2619
2620         ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2621                 QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
2622
2623         if (ret)
2624                 hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
2625         else
2626                 hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
2627 }
2628
2629 void ufshpb_init(struct ufs_hba *hba)
2630 {
2631         struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2632         int try;
2633         int ret;
2634
2635         if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2636                 return;
2637
2638         if (ufshpb_init_mem_wq(hba)) {
2639                 hpb_dev_info->hpb_disabled = true;
2640                 return;
2641         }
2642
2643         atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2644         tot_active_srgn_pages = 0;
2645         /* issue HPB reset query */
2646         for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2647                 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2648                                         QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2649                 if (!ret)
2650                         break;
2651         }
2652 }
2653
2654 void ufshpb_remove(struct ufs_hba *hba)
2655 {
2656         mempool_destroy(ufshpb_page_pool);
2657         mempool_destroy(ufshpb_mctx_pool);
2658         kmem_cache_destroy(ufshpb_mctx_cache);
2659
2660         destroy_workqueue(ufshpb_wq);
2661 }
2662
2663 module_param(ufshpb_host_map_kbytes, uint, 0644);
2664 MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2665         "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");