1 // SPDX-License-Identifier: GPL-2.0
3 * Universal Flash Storage Host Performance Booster
5 * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
8 * Yongmyung Lee <ymhungry.lee@samsung.com>
9 * Jinyoung Choi <j-young.choi@samsung.com>
12 #include <asm/unaligned.h>
13 #include <linux/async.h>
19 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
20 #define READ_TO_MS 1000
21 #define READ_TO_EXPIRIES 100
22 #define POLLING_INTERVAL_MS 200
23 #define THROTTLE_MAP_REQ_DEFAULT 1
25 /* memory management */
26 static struct kmem_cache *ufshpb_mctx_cache;
27 static mempool_t *ufshpb_mctx_pool;
28 static mempool_t *ufshpb_page_pool;
29 /* A cache size of 2MB can cache ppn in the 1GB range. */
30 static unsigned int ufshpb_host_map_kbytes = 2048;
31 static int tot_active_srgn_pages;
33 static struct workqueue_struct *ufshpb_wq;
35 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
38 bool ufshpb_is_allowed(struct ufs_hba *hba)
40 return !(hba->ufshpb_dev.hpb_disabled);
43 /* HPB version 1.0 is called as legacy version. */
44 bool ufshpb_is_legacy(struct ufs_hba *hba)
46 return hba->ufshpb_dev.is_legacy;
49 static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
51 return sdev->hostdata;
54 static int ufshpb_get_state(struct ufshpb_lu *hpb)
56 return atomic_read(&hpb->hpb_state);
59 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
61 atomic_set(&hpb->hpb_state, state);
64 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
65 struct ufshpb_subregion *srgn)
67 return rgn->rgn_state != HPB_RGN_INACTIVE &&
68 srgn->srgn_state == HPB_SRGN_VALID;
71 static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
73 return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
76 static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
78 return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
79 op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
82 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
84 return transfer_len <= hpb->pre_req_max_tr_len;
88 * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as
89 * default. It is possible to change range of transfer_len through sysfs.
91 static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len)
93 return len > hpb->pre_req_min_tr_len &&
94 len <= hpb->pre_req_max_tr_len;
97 static bool ufshpb_is_general_lun(int lun)
99 return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
102 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
104 if (hpb->lu_pinned_end != PINNED_NOT_SET &&
105 rgn_idx >= hpb->lu_pinned_start &&
106 rgn_idx <= hpb->lu_pinned_end)
112 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
117 if (ufshpb_get_state(hpb) != HPB_PRESENT)
120 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
121 if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
123 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
126 queue_work(ufshpb_wq, &hpb->map_work);
129 static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
130 struct ufshcd_lrb *lrbp,
131 struct utp_hpb_rsp *rsp_field)
133 /* Check HPB_UPDATE_ALERT */
134 if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
135 UPIU_HEADER_DWORD(0, 2, 0, 0)))
138 if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
139 rsp_field->desc_type != DEV_DES_TYPE ||
140 rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
141 rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
142 rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
143 rsp_field->hpb_op == HPB_RSP_NONE ||
144 (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
145 !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
148 if (!ufshpb_is_general_lun(rsp_field->lun)) {
149 dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
157 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
158 int srgn_offset, int cnt, bool set_dirty)
160 struct ufshpb_region *rgn;
161 struct ufshpb_subregion *srgn, *prev_srgn = NULL;
167 rgn = hpb->rgn_tbl + rgn_idx;
168 srgn = rgn->srgn_tbl + srgn_idx;
170 if (likely(!srgn->is_last))
171 bitmap_len = hpb->entries_per_srgn;
173 bitmap_len = hpb->last_srgn_entries;
175 if ((srgn_offset + cnt) > bitmap_len)
176 set_bit_len = bitmap_len - srgn_offset;
180 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
181 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
183 if (srgn->srgn_state == HPB_SRGN_VALID)
184 bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
186 } else if (hpb->is_hcm) {
187 /* rewind the read timer for lru regions */
188 rgn->read_timeout = ktime_add_ms(ktime_get(),
189 rgn->hpb->params.read_timeout_ms);
190 rgn->read_timeout_expiries =
191 rgn->hpb->params.read_timeout_expiries;
194 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
196 if (hpb->is_hcm && prev_srgn != srgn) {
197 bool activate = false;
199 spin_lock(&rgn->rgn_lock);
201 rgn->reads -= srgn->reads;
203 set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
207 if (srgn->reads == hpb->params.activation_thld)
210 spin_unlock(&rgn->rgn_lock);
213 test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
214 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
215 ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
216 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
217 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
218 "activate region %d-%d\n", rgn_idx, srgn_idx);
225 if (++srgn_idx == hpb->srgns_per_rgn) {
235 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
236 int srgn_idx, int srgn_offset, int cnt)
238 struct ufshpb_region *rgn;
239 struct ufshpb_subregion *srgn;
244 rgn = hpb->rgn_tbl + rgn_idx;
245 srgn = rgn->srgn_tbl + srgn_idx;
247 if (likely(!srgn->is_last))
248 bitmap_len = hpb->entries_per_srgn;
250 bitmap_len = hpb->last_srgn_entries;
252 if (!ufshpb_is_valid_srgn(rgn, srgn))
256 * If the region state is active, mctx must be allocated.
257 * In this case, check whether the region is evicted or
258 * mctx allocation fail.
260 if (unlikely(!srgn->mctx)) {
261 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
262 "no mctx in region %d subregion %d.\n",
263 srgn->rgn_idx, srgn->srgn_idx);
267 if ((srgn_offset + cnt) > bitmap_len)
268 bit_len = bitmap_len - srgn_offset;
272 if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
273 srgn_offset) < bit_len + srgn_offset)
277 if (++srgn_idx == hpb->srgns_per_rgn) {
289 static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
291 return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
294 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
295 struct ufshpb_map_ctx *mctx, int pos,
296 int len, __be64 *ppn_buf)
302 index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
303 offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
305 if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
308 copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
310 page = mctx->m_page[index];
311 if (unlikely(!page)) {
312 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
313 "error. cannot find page in mctx\n");
317 memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
318 copied * HPB_ENTRY_SIZE);
324 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
325 int *srgn_idx, int *offset)
329 *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
330 rgn_offset = lpn & hpb->entries_per_rgn_mask;
331 *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
332 *offset = rgn_offset & hpb->entries_per_srgn_mask;
336 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb,
337 struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn,
338 u8 transfer_len, int read_id)
340 unsigned char *cdb = lrbp->cmd->cmnd;
341 __be64 ppn_tmp = ppn;
342 cdb[0] = UFSHPB_READ;
344 if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
345 ppn_tmp = swab64(ppn);
347 /* ppn value is stored as big-endian in the host memory */
348 memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
349 cdb[14] = transfer_len;
352 lrbp->cmd->cmd_len = UFS_CDB_SIZE;
355 static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
356 unsigned long lpn, unsigned int len,
359 cdb[0] = UFSHPB_WRITE_BUFFER;
360 cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID;
362 put_unaligned_be32(lpn, &cdb[2]);
364 put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]);
366 cdb[9] = 0x00; /* Control = 0x00 */
369 static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
371 struct ufshpb_req *pre_req;
373 if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
374 dev_info(&hpb->sdev_ufs_lu->sdev_dev,
375 "pre_req throttle. inflight %d throttle %d",
376 hpb->num_inflight_pre_req, hpb->throttle_pre_req);
380 pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
381 struct ufshpb_req, list_req);
383 dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
387 list_del_init(&pre_req->list_req);
388 hpb->num_inflight_pre_req++;
393 static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
394 struct ufshpb_req *pre_req)
397 bio_reset(pre_req->bio);
398 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
399 hpb->num_inflight_pre_req--;
402 static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error)
404 struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
405 struct ufshpb_lu *hpb = pre_req->hpb;
409 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
410 struct scsi_sense_hdr sshdr;
412 dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
413 scsi_command_normalize_sense(cmd, &sshdr);
414 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
415 "code %x sense_key %x asc %x ascq %x",
417 sshdr.sense_key, sshdr.asc, sshdr.ascq);
418 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
419 "byte4 %x byte5 %x byte6 %x additional_len %x",
420 sshdr.byte4, sshdr.byte5,
421 sshdr.byte6, sshdr.additional_length);
424 blk_mq_free_request(req);
425 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
426 ufshpb_put_pre_req(pre_req->hpb, pre_req);
427 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
430 static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page)
432 struct ufshpb_lu *hpb = pre_req->hpb;
433 struct ufshpb_region *rgn;
434 struct ufshpb_subregion *srgn;
438 unsigned long lpn = pre_req->wb.lpn;
439 int rgn_idx, srgn_idx, srgn_offset;
442 addr = page_address(page);
443 ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
445 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
448 rgn = hpb->rgn_tbl + rgn_idx;
449 srgn = rgn->srgn_tbl + srgn_idx;
451 if (!ufshpb_is_valid_srgn(rgn, srgn))
457 copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
458 pre_req->wb.len - offset,
465 srgn_offset += copied;
467 if (srgn_offset == hpb->entries_per_srgn) {
470 if (++srgn_idx == hpb->srgns_per_rgn) {
476 if (offset < pre_req->wb.len)
479 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
482 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
486 static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
487 struct request_queue *q,
488 struct ufshpb_req *pre_req)
490 struct page *page = pre_req->wb.m_page;
491 struct bio *bio = pre_req->bio;
492 int entries_bytes, ret;
497 if (ufshpb_prep_entry(pre_req, page))
500 entries_bytes = pre_req->wb.len * sizeof(__be64);
502 ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
503 if (ret != entries_bytes) {
504 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
505 "bio_add_pc_page fail: %d", ret);
511 static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
513 if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
514 hpb->cur_read_id = 1;
515 return hpb->cur_read_id;
518 static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
519 struct ufshpb_req *pre_req, int read_id)
521 struct scsi_device *sdev = cmd->device;
522 struct request_queue *q = sdev->request_queue;
524 struct scsi_request *rq;
525 struct bio *bio = pre_req->bio;
528 pre_req->wb.lpn = sectors_to_logical(cmd->device,
529 blk_rq_pos(scsi_cmd_to_rq(cmd)));
530 pre_req->wb.len = sectors_to_logical(cmd->device,
531 blk_rq_sectors(scsi_cmd_to_rq(cmd)));
532 if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
537 /* 1. request setup */
538 blk_rq_append_bio(req, bio);
540 req->end_io_data = (void *)pre_req;
541 req->end_io = ufshpb_pre_req_compl_fn;
543 /* 2. scsi_request setup */
547 ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
549 rq->cmd_len = scsi_command_size(rq->cmd);
551 if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
554 hpb->stats.pre_req_cnt++;
559 static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
562 struct ufshpb_req *pre_req;
563 struct request *req = NULL;
568 req = blk_get_request(cmd->device->request_queue,
569 REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
573 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
574 pre_req = ufshpb_get_pre_req(hpb);
579 _read_id = ufshpb_get_read_id(hpb);
580 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
584 ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
592 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
593 ufshpb_put_pre_req(hpb, pre_req);
595 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
596 blk_put_request(req);
601 * This function will set up HPB read command using host-side L2P map data.
603 int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
605 struct ufshpb_lu *hpb;
606 struct ufshpb_region *rgn;
607 struct ufshpb_subregion *srgn;
608 struct scsi_cmnd *cmd = lrbp->cmd;
612 int transfer_len, rgn_idx, srgn_idx, srgn_offset;
616 hpb = ufshpb_get_hpb_data(cmd->device);
620 if (ufshpb_get_state(hpb) == HPB_INIT)
623 if (ufshpb_get_state(hpb) != HPB_PRESENT) {
624 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
625 "%s: ufshpb state is not PRESENT", __func__);
629 if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
630 (!ufshpb_is_write_or_discard(cmd) &&
631 !ufshpb_is_read_cmd(cmd)))
634 transfer_len = sectors_to_logical(cmd->device,
635 blk_rq_sectors(scsi_cmd_to_rq(cmd)));
636 if (unlikely(!transfer_len))
639 lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
640 ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
641 rgn = hpb->rgn_tbl + rgn_idx;
642 srgn = rgn->srgn_tbl + srgn_idx;
644 /* If command type is WRITE or DISCARD, set bitmap as drity */
645 if (ufshpb_is_write_or_discard(cmd)) {
646 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
651 if (!ufshpb_is_supported_chunk(hpb, transfer_len))
654 WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH);
658 * in host control mode, reads are the main source for
661 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
662 transfer_len, false);
664 /* keep those counters normalized */
665 if (rgn->reads > hpb->entries_per_srgn)
666 schedule_work(&hpb->ufshpb_normalization_work);
669 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
670 if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
672 hpb->stats.miss_cnt++;
673 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
677 err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
678 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
679 if (unlikely(err < 0)) {
681 * In this case, the region state is active,
682 * but the ppn table is not allocated.
683 * Make sure that ppn table must be allocated on
686 dev_err(hba->dev, "get ppn failed. err %d\n", err);
689 if (!ufshpb_is_legacy(hba) &&
690 ufshpb_is_required_wb(hpb, transfer_len)) {
691 err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
693 unsigned long timeout;
695 timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
696 hpb->params.requeue_timeout_ms);
698 if (time_before(jiffies, timeout))
701 hpb->stats.miss_cnt++;
706 ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len,
709 hpb->stats.hit_cnt++;
713 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
714 int rgn_idx, enum req_opf dir,
717 struct ufshpb_req *rq;
719 int retries = HPB_MAP_REQ_RETRIES;
721 rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
726 req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
729 if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
730 usleep_range(3000, 3100);
739 rq->rb.rgn_idx = rgn_idx;
744 kmem_cache_free(hpb->map_req_cache, rq);
748 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
750 blk_put_request(rq->req);
751 kmem_cache_free(hpb->map_req_cache, rq);
754 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
755 struct ufshpb_subregion *srgn)
757 struct ufshpb_req *map_req;
762 hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
763 dev_info(&hpb->sdev_ufs_lu->sdev_dev,
764 "map_req throttle. inflight %d throttle %d",
765 hpb->num_inflight_map_req,
766 hpb->params.inflight_map_req);
770 map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
774 bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
776 ufshpb_put_req(hpb, map_req);
782 map_req->rb.srgn_idx = srgn->srgn_idx;
783 map_req->rb.mctx = srgn->mctx;
785 spin_lock_irqsave(&hpb->param_lock, flags);
786 hpb->num_inflight_map_req++;
787 spin_unlock_irqrestore(&hpb->param_lock, flags);
792 static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
793 struct ufshpb_req *map_req)
797 bio_put(map_req->bio);
798 ufshpb_put_req(hpb, map_req);
800 spin_lock_irqsave(&hpb->param_lock, flags);
801 hpb->num_inflight_map_req--;
802 spin_unlock_irqrestore(&hpb->param_lock, flags);
805 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
806 struct ufshpb_subregion *srgn)
808 struct ufshpb_region *rgn;
809 u32 num_entries = hpb->entries_per_srgn;
812 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
813 "no mctx in region %d subregion %d.\n",
814 srgn->rgn_idx, srgn->srgn_idx);
818 if (unlikely(srgn->is_last))
819 num_entries = hpb->last_srgn_entries;
821 bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
823 rgn = hpb->rgn_tbl + srgn->rgn_idx;
824 clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
829 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
832 struct ufshpb_region *rgn;
833 struct ufshpb_subregion *srgn;
835 rgn = hpb->rgn_tbl + rgn_idx;
836 srgn = rgn->srgn_tbl + srgn_idx;
838 list_del_init(&rgn->list_inact_rgn);
840 if (list_empty(&srgn->list_act_srgn))
841 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
843 hpb->stats.rb_active_cnt++;
846 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
848 struct ufshpb_region *rgn;
849 struct ufshpb_subregion *srgn;
852 rgn = hpb->rgn_tbl + rgn_idx;
854 for_each_sub_region(rgn, srgn_idx, srgn)
855 list_del_init(&srgn->list_act_srgn);
857 if (list_empty(&rgn->list_inact_rgn))
858 list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
860 hpb->stats.rb_inactive_cnt++;
863 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
864 struct ufshpb_subregion *srgn)
866 struct ufshpb_region *rgn;
869 * If there is no mctx in subregion
870 * after I/O progress for HPB_READ_BUFFER, the region to which the
871 * subregion belongs was evicted.
872 * Make sure the region must not evict in I/O progress
875 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
876 "no mctx in region %d subregion %d.\n",
877 srgn->rgn_idx, srgn->srgn_idx);
878 srgn->srgn_state = HPB_SRGN_INVALID;
882 rgn = hpb->rgn_tbl + srgn->rgn_idx;
884 if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
885 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
886 "region %d subregion %d evicted\n",
887 srgn->rgn_idx, srgn->srgn_idx);
888 srgn->srgn_state = HPB_SRGN_INVALID;
891 srgn->srgn_state = HPB_SRGN_VALID;
894 static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
896 struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
898 ufshpb_put_req(umap_req->hpb, umap_req);
901 static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
903 struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
904 struct ufshpb_lu *hpb = map_req->hpb;
905 struct ufshpb_subregion *srgn;
908 srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
909 map_req->rb.srgn_idx;
911 ufshpb_clear_dirty_bitmap(hpb, srgn);
912 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
913 ufshpb_activate_subregion(hpb, srgn);
914 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
916 ufshpb_put_map_req(map_req->hpb, map_req);
919 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
921 cdb[0] = UFSHPB_WRITE_BUFFER;
922 cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
923 UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
925 put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
929 static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
930 int srgn_idx, int srgn_mem_size)
932 cdb[0] = UFSHPB_READ_BUFFER;
933 cdb[1] = UFSHPB_READ_BUFFER_ID;
935 put_unaligned_be16(rgn_idx, &cdb[2]);
936 put_unaligned_be16(srgn_idx, &cdb[4]);
937 put_unaligned_be24(srgn_mem_size, &cdb[6]);
942 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
943 struct ufshpb_req *umap_req,
944 struct ufshpb_region *rgn)
947 struct scsi_request *rq;
951 req->end_io_data = (void *)umap_req;
953 ufshpb_set_unmap_cmd(rq->cmd, rgn);
954 rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
956 blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn);
958 hpb->stats.umap_req_cnt++;
961 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
962 struct ufshpb_req *map_req, bool last)
964 struct request_queue *q;
966 struct scsi_request *rq;
967 int mem_size = hpb->srgn_mem_size;
971 q = hpb->sdev_ufs_lu->request_queue;
972 for (i = 0; i < hpb->pages_per_srgn; i++) {
973 ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
975 if (ret != PAGE_SIZE) {
976 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
977 "bio_add_pc_page fail %d - %d\n",
978 map_req->rb.rgn_idx, map_req->rb.srgn_idx);
985 blk_rq_append_bio(req, map_req->bio);
987 req->end_io_data = map_req;
992 mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
994 ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx,
995 map_req->rb.srgn_idx, mem_size);
996 rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
998 blk_execute_rq_nowait(NULL, req, 1, ufshpb_map_req_compl_fn);
1000 hpb->stats.map_req_cnt++;
1004 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
1007 struct ufshpb_map_ctx *mctx;
1008 u32 num_entries = hpb->entries_per_srgn;
1011 mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
1015 mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
1020 num_entries = hpb->last_srgn_entries;
1022 mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
1023 if (!mctx->ppn_dirty)
1024 goto release_m_page;
1026 for (i = 0; i < hpb->pages_per_srgn; i++) {
1027 mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
1028 if (!mctx->m_page[i]) {
1029 for (j = 0; j < i; j++)
1030 mempool_free(mctx->m_page[j], ufshpb_page_pool);
1031 goto release_ppn_dirty;
1033 clear_page(page_address(mctx->m_page[i]));
1039 bitmap_free(mctx->ppn_dirty);
1041 kmem_cache_free(hpb->m_page_cache, mctx->m_page);
1043 mempool_free(mctx, ufshpb_mctx_pool);
1047 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
1048 struct ufshpb_map_ctx *mctx)
1052 for (i = 0; i < hpb->pages_per_srgn; i++)
1053 mempool_free(mctx->m_page[i], ufshpb_page_pool);
1055 bitmap_free(mctx->ppn_dirty);
1056 kmem_cache_free(hpb->m_page_cache, mctx->m_page);
1057 mempool_free(mctx, ufshpb_mctx_pool);
1060 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
1061 struct ufshpb_region *rgn)
1063 struct ufshpb_subregion *srgn;
1066 for_each_sub_region(rgn, srgn_idx, srgn)
1067 if (srgn->srgn_state == HPB_SRGN_ISSUED)
1073 static void ufshpb_read_to_handler(struct work_struct *work)
1075 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1076 ufshpb_read_to_work.work);
1077 struct victim_select_info *lru_info = &hpb->lru_info;
1078 struct ufshpb_region *rgn, *next_rgn;
1079 unsigned long flags;
1081 LIST_HEAD(expired_list);
1083 if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
1086 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1088 list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
1090 bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
1093 rgn->read_timeout_expiries--;
1094 if (is_rgn_dirty(rgn) ||
1095 rgn->read_timeout_expiries == 0)
1096 list_add(&rgn->list_expired_rgn, &expired_list);
1098 rgn->read_timeout = ktime_add_ms(ktime_get(),
1099 hpb->params.read_timeout_ms);
1103 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1105 list_for_each_entry_safe(rgn, next_rgn, &expired_list,
1107 list_del_init(&rgn->list_expired_rgn);
1108 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1109 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1110 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1113 ufshpb_kick_map_work(hpb);
1115 clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
1117 poll = hpb->params.timeout_polling_interval_ms;
1118 schedule_delayed_work(&hpb->ufshpb_read_to_work,
1119 msecs_to_jiffies(poll));
1122 static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
1123 struct ufshpb_region *rgn)
1125 rgn->rgn_state = HPB_RGN_ACTIVE;
1126 list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
1127 atomic_inc(&lru_info->active_cnt);
1128 if (rgn->hpb->is_hcm) {
1130 ktime_add_ms(ktime_get(),
1131 rgn->hpb->params.read_timeout_ms);
1132 rgn->read_timeout_expiries =
1133 rgn->hpb->params.read_timeout_expiries;
1137 static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
1138 struct ufshpb_region *rgn)
1140 list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
1143 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
1145 struct victim_select_info *lru_info = &hpb->lru_info;
1146 struct ufshpb_region *rgn, *victim_rgn = NULL;
1148 list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
1150 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1151 "%s: no region allocated\n",
1155 if (ufshpb_check_srgns_issue_state(hpb, rgn))
1159 * in host control mode, verify that the exiting region
1163 rgn->reads > hpb->params.eviction_thld_exit)
1173 static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
1174 struct ufshpb_region *rgn)
1176 list_del_init(&rgn->list_lru_rgn);
1177 rgn->rgn_state = HPB_RGN_INACTIVE;
1178 atomic_dec(&lru_info->active_cnt);
1181 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
1182 struct ufshpb_subregion *srgn)
1184 if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1185 ufshpb_put_map_ctx(hpb, srgn->mctx);
1186 srgn->srgn_state = HPB_SRGN_UNUSED;
1191 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
1192 struct ufshpb_region *rgn,
1195 struct ufshpb_req *umap_req;
1196 int rgn_idx = rgn ? rgn->rgn_idx : 0;
1198 umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
1202 ufshpb_execute_umap_req(hpb, umap_req, rgn);
1207 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
1208 struct ufshpb_region *rgn)
1210 return ufshpb_issue_umap_req(hpb, rgn, true);
1213 static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
1215 return ufshpb_issue_umap_req(hpb, NULL, false);
1218 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
1219 struct ufshpb_region *rgn)
1221 struct victim_select_info *lru_info;
1222 struct ufshpb_subregion *srgn;
1225 lru_info = &hpb->lru_info;
1227 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
1229 ufshpb_cleanup_lru_info(lru_info, rgn);
1231 for_each_sub_region(rgn, srgn_idx, srgn)
1232 ufshpb_purge_active_subregion(hpb, srgn);
1235 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1237 unsigned long flags;
1240 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1241 if (rgn->rgn_state == HPB_RGN_PINNED) {
1242 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1243 "pinned region cannot drop-out. region %d\n",
1248 if (!list_empty(&rgn->list_lru_rgn)) {
1249 if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
1255 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1256 ret = ufshpb_issue_umap_single_req(hpb, rgn);
1257 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1262 __ufshpb_evict_region(hpb, rgn);
1265 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1269 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
1270 struct ufshpb_region *rgn,
1271 struct ufshpb_subregion *srgn)
1273 struct ufshpb_req *map_req;
1274 unsigned long flags;
1277 bool alloc_required = false;
1278 enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
1280 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1282 if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1283 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1284 "%s: ufshpb state is not PRESENT\n", __func__);
1288 if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1289 (srgn->srgn_state == HPB_SRGN_INVALID)) {
1294 if (srgn->srgn_state == HPB_SRGN_UNUSED)
1295 alloc_required = true;
1298 * If the subregion is already ISSUED state,
1299 * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1300 * the device and HPB response for map loading is received.
1301 * In this case, after finishing the HPB_READ_BUFFER,
1302 * the next HPB_READ_BUFFER is performed again to obtain the latest
1305 if (srgn->srgn_state == HPB_SRGN_ISSUED)
1308 srgn->srgn_state = HPB_SRGN_ISSUED;
1309 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1311 if (alloc_required) {
1312 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1314 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1315 "get map_ctx failed. region %d - %d\n",
1316 rgn->rgn_idx, srgn->srgn_idx);
1317 state = HPB_SRGN_UNUSED;
1318 goto change_srgn_state;
1322 map_req = ufshpb_get_map_req(hpb, srgn);
1324 goto change_srgn_state;
1327 ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1329 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1330 "%s: issue map_req failed: %d, region %d - %d\n",
1331 __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1337 ufshpb_put_map_req(hpb, map_req);
1339 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1340 srgn->srgn_state = state;
1342 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1346 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1348 struct ufshpb_region *victim_rgn = NULL;
1349 struct victim_select_info *lru_info = &hpb->lru_info;
1350 unsigned long flags;
1353 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1355 * If region belongs to lru_list, just move the region
1356 * to the front of lru list because the state of the region
1357 * is already active-state.
1359 if (!list_empty(&rgn->list_lru_rgn)) {
1360 ufshpb_hit_lru_info(lru_info, rgn);
1364 if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1365 if (atomic_read(&lru_info->active_cnt) ==
1366 lru_info->max_lru_active_cnt) {
1368 * If the maximum number of active regions
1369 * is exceeded, evict the least recently used region.
1370 * This case may occur when the device responds
1371 * to the eviction information late.
1372 * It is okay to evict the least recently used region,
1373 * because the device could detect this region
1374 * by not issuing HPB_READ
1376 * in host control mode, verify that the entering
1377 * region has enough reads
1380 rgn->reads < hpb->params.eviction_thld_enter) {
1385 victim_rgn = ufshpb_victim_lru_info(hpb);
1387 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1388 "cannot get victim region %s\n",
1389 hpb->is_hcm ? "" : "error");
1394 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1395 "LRU full (%d), choose victim %d\n",
1396 atomic_read(&lru_info->active_cnt),
1397 victim_rgn->rgn_idx);
1400 spin_unlock_irqrestore(&hpb->rgn_state_lock,
1402 ret = ufshpb_issue_umap_single_req(hpb,
1404 spin_lock_irqsave(&hpb->rgn_state_lock,
1410 __ufshpb_evict_region(hpb, victim_rgn);
1414 * When a region is added to lru_info list_head,
1415 * it is guaranteed that the subregion has been
1416 * assigned all mctx. If failed, try to receive mctx again
1417 * without being added to lru_info list_head
1419 ufshpb_add_lru_info(lru_info, rgn);
1422 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1426 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1427 struct utp_hpb_rsp *rsp_field)
1429 struct ufshpb_region *rgn;
1430 struct ufshpb_subregion *srgn;
1431 int i, rgn_i, srgn_i;
1433 BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1435 * If the active region and the inactive region are the same,
1436 * we will inactivate this region.
1437 * The device could check this (region inactivated) and
1438 * will response the proper active region information
1440 for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1442 be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1444 be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1446 rgn = hpb->rgn_tbl + rgn_i;
1448 (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1450 * in host control mode, subregion activation
1451 * recommendations are only allowed to active regions.
1452 * Also, ignore recommendations for dirty regions - the
1453 * host will make decisions concerning those by himself
1458 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1459 "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1461 spin_lock(&hpb->rsp_list_lock);
1462 ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1463 spin_unlock(&hpb->rsp_list_lock);
1465 srgn = rgn->srgn_tbl + srgn_i;
1467 /* blocking HPB_READ */
1468 spin_lock(&hpb->rgn_state_lock);
1469 if (srgn->srgn_state == HPB_SRGN_VALID)
1470 srgn->srgn_state = HPB_SRGN_INVALID;
1471 spin_unlock(&hpb->rgn_state_lock);
1476 * in host control mode the device is not allowed to inactivate
1482 for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1483 rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1484 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1485 "inactivate(%d) region %d\n", i, rgn_i);
1487 spin_lock(&hpb->rsp_list_lock);
1488 ufshpb_update_inactive_info(hpb, rgn_i);
1489 spin_unlock(&hpb->rsp_list_lock);
1491 rgn = hpb->rgn_tbl + rgn_i;
1493 spin_lock(&hpb->rgn_state_lock);
1494 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1495 for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
1496 srgn = rgn->srgn_tbl + srgn_i;
1497 if (srgn->srgn_state == HPB_SRGN_VALID)
1498 srgn->srgn_state = HPB_SRGN_INVALID;
1501 spin_unlock(&hpb->rgn_state_lock);
1506 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1507 rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1509 if (ufshpb_get_state(hpb) == HPB_PRESENT)
1510 queue_work(ufshpb_wq, &hpb->map_work);
1513 static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
1515 struct victim_select_info *lru_info = &hpb->lru_info;
1516 struct ufshpb_region *rgn;
1517 unsigned long flags;
1519 spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1521 list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1522 set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1524 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1528 * This function will parse recommended active subregion information in sense
1529 * data field of response UPIU with SAM_STAT_GOOD state.
1531 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1533 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1534 struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1537 if (unlikely(lrbp->lun != rsp_field->lun)) {
1538 struct scsi_device *sdev;
1541 __shost_for_each_device(sdev, hba->host) {
1542 hpb = ufshpb_get_hpb_data(sdev);
1547 if (rsp_field->lun == hpb->lun) {
1560 if (ufshpb_get_state(hpb) == HPB_INIT)
1563 if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1564 (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1565 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1566 "%s: ufshpb state is not PRESENT/SUSPEND\n",
1571 data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1572 & MASK_RSP_UPIU_DATA_SEG_LEN;
1574 /* To flush remained rsp_list, we queue the map_work task */
1575 if (!data_seg_len) {
1576 if (!ufshpb_is_general_lun(hpb->lun))
1579 ufshpb_kick_map_work(hpb);
1583 BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1585 if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1588 hpb->stats.rb_noti_cnt++;
1590 switch (rsp_field->hpb_op) {
1591 case HPB_RSP_REQ_REGION_UPDATE:
1592 if (data_seg_len != DEV_DATA_SEG_LEN)
1593 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1594 "%s: data seg length is not same.\n",
1596 ufshpb_rsp_req_region_update(hpb, rsp_field);
1598 case HPB_RSP_DEV_RESET:
1599 dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1600 "UFS device lost HPB information during PM.\n");
1603 struct scsi_device *sdev;
1605 __shost_for_each_device(sdev, hba->host) {
1606 struct ufshpb_lu *h = sdev->hostdata;
1609 ufshpb_dev_reset_handler(h);
1615 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1616 "hpb_op is not available: %d\n",
1622 static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1623 struct ufshpb_region *rgn,
1624 struct ufshpb_subregion *srgn)
1626 if (!list_empty(&rgn->list_inact_rgn))
1629 if (!list_empty(&srgn->list_act_srgn)) {
1630 list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1634 list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1637 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1638 struct ufshpb_region *rgn,
1639 struct list_head *pending_list)
1641 struct ufshpb_subregion *srgn;
1644 if (!list_empty(&rgn->list_inact_rgn))
1647 for_each_sub_region(rgn, srgn_idx, srgn)
1648 if (!list_empty(&srgn->list_act_srgn))
1651 list_add_tail(&rgn->list_inact_rgn, pending_list);
1654 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1656 struct ufshpb_region *rgn;
1657 struct ufshpb_subregion *srgn;
1658 unsigned long flags;
1661 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1662 while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1663 struct ufshpb_subregion,
1665 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1668 list_del_init(&srgn->list_act_srgn);
1669 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1671 rgn = hpb->rgn_tbl + srgn->rgn_idx;
1672 ret = ufshpb_add_region(hpb, rgn);
1676 ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1678 dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1679 "issue map_req failed. ret %d, region %d - %d\n",
1680 ret, rgn->rgn_idx, srgn->srgn_idx);
1683 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1685 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1689 dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1690 rgn->rgn_idx, srgn->srgn_idx);
1691 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1692 ufshpb_add_active_list(hpb, rgn, srgn);
1693 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1696 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1698 struct ufshpb_region *rgn;
1699 unsigned long flags;
1701 LIST_HEAD(pending_list);
1703 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1704 while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1705 struct ufshpb_region,
1707 if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1710 list_del_init(&rgn->list_inact_rgn);
1711 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1713 ret = ufshpb_evict_region(hpb, rgn);
1715 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1716 ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1717 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1720 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1723 list_splice(&pending_list, &hpb->lh_inact_rgn);
1724 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1727 static void ufshpb_normalization_work_handler(struct work_struct *work)
1729 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1730 ufshpb_normalization_work);
1732 u8 factor = hpb->params.normalization_factor;
1734 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1735 struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1738 spin_lock(&rgn->rgn_lock);
1740 for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1741 struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1743 srgn->reads >>= factor;
1744 rgn->reads += srgn->reads;
1746 spin_unlock(&rgn->rgn_lock);
1748 if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1751 /* if region is active but has no reads - inactivate it */
1752 spin_lock(&hpb->rsp_list_lock);
1753 ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1754 spin_unlock(&hpb->rsp_list_lock);
1758 static void ufshpb_map_work_handler(struct work_struct *work)
1760 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1762 if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1763 dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1764 "%s: ufshpb state is not PRESENT\n", __func__);
1768 ufshpb_run_inactive_region_list(hpb);
1769 ufshpb_run_active_subregion_list(hpb);
1773 * this function doesn't need to hold lock due to be called in init.
1774 * (rgn_state_lock, rsp_list_lock, etc..)
1776 static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1777 struct ufshpb_lu *hpb,
1778 struct ufshpb_region *rgn)
1780 struct ufshpb_subregion *srgn;
1784 for_each_sub_region(rgn, srgn_idx, srgn) {
1785 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1786 srgn->srgn_state = HPB_SRGN_INVALID;
1790 "alloc mctx for pinned region failed\n");
1794 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1797 rgn->rgn_state = HPB_RGN_PINNED;
1801 for (i = 0; i < srgn_idx; i++) {
1802 srgn = rgn->srgn_tbl + i;
1803 ufshpb_put_map_ctx(hpb, srgn->mctx);
1808 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1809 struct ufshpb_region *rgn, bool last)
1812 struct ufshpb_subregion *srgn;
1814 for_each_sub_region(rgn, srgn_idx, srgn) {
1815 INIT_LIST_HEAD(&srgn->list_act_srgn);
1817 srgn->rgn_idx = rgn->rgn_idx;
1818 srgn->srgn_idx = srgn_idx;
1819 srgn->srgn_state = HPB_SRGN_UNUSED;
1822 if (unlikely(last && hpb->last_srgn_entries))
1823 srgn->is_last = true;
1826 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1827 struct ufshpb_region *rgn, int srgn_cnt)
1829 rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1834 rgn->srgn_cnt = srgn_cnt;
1838 static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1839 struct ufshpb_lu *hpb,
1840 struct ufshpb_dev_info *hpb_dev_info,
1841 struct ufshpb_lu_info *hpb_lu_info)
1843 u32 entries_per_rgn;
1844 u64 rgn_mem_size, tmp;
1847 hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1;
1849 if (ufshpb_is_legacy(hba))
1850 hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1852 hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
1854 hpb->cur_read_id = 0;
1856 hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1857 hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1858 (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1860 hpb->lru_info.max_lru_active_cnt =
1861 hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1863 rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1865 do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1866 hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1867 * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1870 do_div(tmp, HPB_ENTRY_SIZE);
1871 entries_per_rgn = (u32)tmp;
1872 hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1873 hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1875 hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1876 hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1877 hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1880 do_div(tmp, hpb->srgn_mem_size);
1881 hpb->srgns_per_rgn = (int)tmp;
1883 hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1885 hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1886 (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1887 hpb->last_srgn_entries = hpb_lu_info->num_blocks
1888 % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1890 hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1892 if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1896 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1898 struct ufshpb_region *rgn_table, *rgn;
1902 rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1907 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1908 int srgn_cnt = hpb->srgns_per_rgn;
1909 bool last_srgn = false;
1911 rgn = rgn_table + rgn_idx;
1912 rgn->rgn_idx = rgn_idx;
1914 spin_lock_init(&rgn->rgn_lock);
1916 INIT_LIST_HEAD(&rgn->list_inact_rgn);
1917 INIT_LIST_HEAD(&rgn->list_lru_rgn);
1918 INIT_LIST_HEAD(&rgn->list_expired_rgn);
1920 if (rgn_idx == hpb->rgns_per_lu - 1) {
1921 srgn_cnt = ((hpb->srgns_per_lu - 1) %
1922 hpb->srgns_per_rgn) + 1;
1926 ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1928 goto release_srgn_table;
1929 ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1931 if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1932 ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1934 goto release_srgn_table;
1936 rgn->rgn_state = HPB_RGN_INACTIVE;
1943 hpb->rgn_tbl = rgn_table;
1948 for (i = 0; i <= rgn_idx; i++)
1949 kvfree(rgn_table[i].srgn_tbl);
1955 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1956 struct ufshpb_region *rgn)
1959 struct ufshpb_subregion *srgn;
1961 for_each_sub_region(rgn, srgn_idx, srgn)
1962 if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1963 srgn->srgn_state = HPB_SRGN_UNUSED;
1964 ufshpb_put_map_ctx(hpb, srgn->mctx);
1968 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1972 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1973 struct ufshpb_region *rgn;
1975 rgn = hpb->rgn_tbl + rgn_idx;
1976 if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1977 rgn->rgn_state = HPB_RGN_INACTIVE;
1979 ufshpb_destroy_subregion_tbl(hpb, rgn);
1982 kvfree(rgn->srgn_tbl);
1985 kvfree(hpb->rgn_tbl);
1988 /* SYSFS functions */
1989 #define ufshpb_sysfs_attr_show_func(__name) \
1990 static ssize_t __name##_show(struct device *dev, \
1991 struct device_attribute *attr, char *buf) \
1993 struct scsi_device *sdev = to_scsi_device(dev); \
1994 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
1999 return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \
2002 static DEVICE_ATTR_RO(__name)
2004 ufshpb_sysfs_attr_show_func(hit_cnt);
2005 ufshpb_sysfs_attr_show_func(miss_cnt);
2006 ufshpb_sysfs_attr_show_func(rb_noti_cnt);
2007 ufshpb_sysfs_attr_show_func(rb_active_cnt);
2008 ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
2009 ufshpb_sysfs_attr_show_func(map_req_cnt);
2010 ufshpb_sysfs_attr_show_func(umap_req_cnt);
2012 static struct attribute *hpb_dev_stat_attrs[] = {
2013 &dev_attr_hit_cnt.attr,
2014 &dev_attr_miss_cnt.attr,
2015 &dev_attr_rb_noti_cnt.attr,
2016 &dev_attr_rb_active_cnt.attr,
2017 &dev_attr_rb_inactive_cnt.attr,
2018 &dev_attr_map_req_cnt.attr,
2019 &dev_attr_umap_req_cnt.attr,
2023 struct attribute_group ufs_sysfs_hpb_stat_group = {
2024 .name = "hpb_stats",
2025 .attrs = hpb_dev_stat_attrs,
2028 /* SYSFS functions */
2029 #define ufshpb_sysfs_param_show_func(__name) \
2030 static ssize_t __name##_show(struct device *dev, \
2031 struct device_attribute *attr, char *buf) \
2033 struct scsi_device *sdev = to_scsi_device(dev); \
2034 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
2039 return sysfs_emit(buf, "%d\n", hpb->params.__name); \
2042 ufshpb_sysfs_param_show_func(requeue_timeout_ms);
2044 requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
2045 const char *buf, size_t count)
2047 struct scsi_device *sdev = to_scsi_device(dev);
2048 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2054 if (kstrtouint(buf, 0, &val))
2060 hpb->params.requeue_timeout_ms = val;
2064 static DEVICE_ATTR_RW(requeue_timeout_ms);
2066 ufshpb_sysfs_param_show_func(activation_thld);
2068 activation_thld_store(struct device *dev, struct device_attribute *attr,
2069 const char *buf, size_t count)
2071 struct scsi_device *sdev = to_scsi_device(dev);
2072 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2081 if (kstrtouint(buf, 0, &val))
2087 hpb->params.activation_thld = val;
2091 static DEVICE_ATTR_RW(activation_thld);
2093 ufshpb_sysfs_param_show_func(normalization_factor);
2095 normalization_factor_store(struct device *dev, struct device_attribute *attr,
2096 const char *buf, size_t count)
2098 struct scsi_device *sdev = to_scsi_device(dev);
2099 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2108 if (kstrtouint(buf, 0, &val))
2111 if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
2114 hpb->params.normalization_factor = val;
2118 static DEVICE_ATTR_RW(normalization_factor);
2120 ufshpb_sysfs_param_show_func(eviction_thld_enter);
2122 eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
2123 const char *buf, size_t count)
2125 struct scsi_device *sdev = to_scsi_device(dev);
2126 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2135 if (kstrtouint(buf, 0, &val))
2138 if (val <= hpb->params.eviction_thld_exit)
2141 hpb->params.eviction_thld_enter = val;
2145 static DEVICE_ATTR_RW(eviction_thld_enter);
2147 ufshpb_sysfs_param_show_func(eviction_thld_exit);
2149 eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
2150 const char *buf, size_t count)
2152 struct scsi_device *sdev = to_scsi_device(dev);
2153 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2162 if (kstrtouint(buf, 0, &val))
2165 if (val <= hpb->params.activation_thld)
2168 hpb->params.eviction_thld_exit = val;
2172 static DEVICE_ATTR_RW(eviction_thld_exit);
2174 ufshpb_sysfs_param_show_func(read_timeout_ms);
2176 read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
2177 const char *buf, size_t count)
2179 struct scsi_device *sdev = to_scsi_device(dev);
2180 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2189 if (kstrtouint(buf, 0, &val))
2192 /* read_timeout >> timeout_polling_interval */
2193 if (val < hpb->params.timeout_polling_interval_ms * 2)
2196 hpb->params.read_timeout_ms = val;
2200 static DEVICE_ATTR_RW(read_timeout_ms);
2202 ufshpb_sysfs_param_show_func(read_timeout_expiries);
2204 read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
2205 const char *buf, size_t count)
2207 struct scsi_device *sdev = to_scsi_device(dev);
2208 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2217 if (kstrtouint(buf, 0, &val))
2223 hpb->params.read_timeout_expiries = val;
2227 static DEVICE_ATTR_RW(read_timeout_expiries);
2229 ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
2231 timeout_polling_interval_ms_store(struct device *dev,
2232 struct device_attribute *attr,
2233 const char *buf, size_t count)
2235 struct scsi_device *sdev = to_scsi_device(dev);
2236 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2245 if (kstrtouint(buf, 0, &val))
2248 /* timeout_polling_interval << read_timeout */
2249 if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
2252 hpb->params.timeout_polling_interval_ms = val;
2256 static DEVICE_ATTR_RW(timeout_polling_interval_ms);
2258 ufshpb_sysfs_param_show_func(inflight_map_req);
2259 static ssize_t inflight_map_req_store(struct device *dev,
2260 struct device_attribute *attr,
2261 const char *buf, size_t count)
2263 struct scsi_device *sdev = to_scsi_device(dev);
2264 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2273 if (kstrtouint(buf, 0, &val))
2276 if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
2279 hpb->params.inflight_map_req = val;
2283 static DEVICE_ATTR_RW(inflight_map_req);
2285 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
2287 hpb->params.activation_thld = ACTIVATION_THRESHOLD;
2288 hpb->params.normalization_factor = 1;
2289 hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2290 hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2291 hpb->params.read_timeout_ms = READ_TO_MS;
2292 hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2293 hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2294 hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2297 static struct attribute *hpb_dev_param_attrs[] = {
2298 &dev_attr_requeue_timeout_ms.attr,
2299 &dev_attr_activation_thld.attr,
2300 &dev_attr_normalization_factor.attr,
2301 &dev_attr_eviction_thld_enter.attr,
2302 &dev_attr_eviction_thld_exit.attr,
2303 &dev_attr_read_timeout_ms.attr,
2304 &dev_attr_read_timeout_expiries.attr,
2305 &dev_attr_timeout_polling_interval_ms.attr,
2306 &dev_attr_inflight_map_req.attr,
2310 struct attribute_group ufs_sysfs_hpb_param_group = {
2311 .name = "hpb_params",
2312 .attrs = hpb_dev_param_attrs,
2315 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2317 struct ufshpb_req *pre_req = NULL, *t;
2318 int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2321 INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2323 hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2324 hpb->throttle_pre_req = qd;
2325 hpb->num_inflight_pre_req = 0;
2330 for (i = 0; i < qd; i++) {
2331 pre_req = hpb->pre_req + i;
2332 INIT_LIST_HEAD(&pre_req->list_req);
2333 pre_req->req = NULL;
2335 pre_req->bio = bio_alloc(GFP_KERNEL, 1);
2339 pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2340 if (!pre_req->wb.m_page) {
2341 bio_put(pre_req->bio);
2345 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2350 list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2351 list_del_init(&pre_req->list_req);
2352 bio_put(pre_req->bio);
2353 __free_page(pre_req->wb.m_page);
2356 kfree(hpb->pre_req);
2360 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2362 struct ufshpb_req *pre_req = NULL;
2365 for (i = 0; i < hpb->throttle_pre_req; i++) {
2366 pre_req = hpb->pre_req + i;
2367 bio_put(hpb->pre_req[i].bio);
2368 if (!pre_req->wb.m_page)
2369 __free_page(hpb->pre_req[i].wb.m_page);
2370 list_del_init(&pre_req->list_req);
2373 kfree(hpb->pre_req);
2376 static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2378 hpb->stats.hit_cnt = 0;
2379 hpb->stats.miss_cnt = 0;
2380 hpb->stats.rb_noti_cnt = 0;
2381 hpb->stats.rb_active_cnt = 0;
2382 hpb->stats.rb_inactive_cnt = 0;
2383 hpb->stats.map_req_cnt = 0;
2384 hpb->stats.umap_req_cnt = 0;
2387 static void ufshpb_param_init(struct ufshpb_lu *hpb)
2389 hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2391 ufshpb_hcm_param_init(hpb);
2394 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2398 spin_lock_init(&hpb->rgn_state_lock);
2399 spin_lock_init(&hpb->rsp_list_lock);
2400 spin_lock_init(&hpb->param_lock);
2402 INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2403 INIT_LIST_HEAD(&hpb->lh_act_srgn);
2404 INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2405 INIT_LIST_HEAD(&hpb->list_hpb_lu);
2407 INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2409 INIT_WORK(&hpb->ufshpb_normalization_work,
2410 ufshpb_normalization_work_handler);
2411 INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2412 ufshpb_read_to_handler);
2415 hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2416 sizeof(struct ufshpb_req), 0, 0, NULL);
2417 if (!hpb->map_req_cache) {
2418 dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2423 hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2424 sizeof(struct page *) * hpb->pages_per_srgn,
2426 if (!hpb->m_page_cache) {
2427 dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2430 goto release_req_cache;
2433 ret = ufshpb_pre_req_mempool_init(hpb);
2435 dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2437 goto release_m_page_cache;
2440 ret = ufshpb_alloc_region_tbl(hba, hpb);
2442 goto release_pre_req_mempool;
2444 ufshpb_stat_init(hpb);
2445 ufshpb_param_init(hpb);
2450 poll = hpb->params.timeout_polling_interval_ms;
2451 schedule_delayed_work(&hpb->ufshpb_read_to_work,
2452 msecs_to_jiffies(poll));
2457 release_pre_req_mempool:
2458 ufshpb_pre_req_mempool_destroy(hpb);
2459 release_m_page_cache:
2460 kmem_cache_destroy(hpb->m_page_cache);
2462 kmem_cache_destroy(hpb->map_req_cache);
2466 static struct ufshpb_lu *
2467 ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2468 struct ufshpb_dev_info *hpb_dev_info,
2469 struct ufshpb_lu_info *hpb_lu_info)
2471 struct ufshpb_lu *hpb;
2474 hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2478 hpb->lun = sdev->lun;
2479 hpb->sdev_ufs_lu = sdev;
2481 ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2483 ret = ufshpb_lu_hpb_init(hba, hpb);
2485 dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2489 sdev->hostdata = hpb;
2497 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2499 struct ufshpb_region *rgn, *next_rgn;
2500 struct ufshpb_subregion *srgn, *next_srgn;
2501 unsigned long flags;
2504 * If the device reset occurred, the remaining HPB region information
2505 * may be stale. Therefore, by discarding the lists of HPB response
2506 * that remained after reset, we prevent unnecessary work.
2508 spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2509 list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2511 list_del_init(&rgn->list_inact_rgn);
2513 list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2515 list_del_init(&srgn->list_act_srgn);
2516 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2519 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2522 cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2523 cancel_work_sync(&hpb->ufshpb_normalization_work);
2525 cancel_work_sync(&hpb->map_work);
2528 static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2531 bool flag_res = true;
2534 /* wait for the device to complete HPB reset query */
2535 for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2537 "%s start flag reset polling %d times\n",
2540 /* Poll fHpbReset flag to be cleared */
2541 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2542 QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2546 "%s reading fHpbReset flag failed with error %d\n",
2554 usleep_range(1000, 1100);
2558 "%s fHpbReset was not cleared by the device\n",
2565 void ufshpb_reset(struct ufs_hba *hba)
2567 struct ufshpb_lu *hpb;
2568 struct scsi_device *sdev;
2570 shost_for_each_device(sdev, hba->host) {
2571 hpb = ufshpb_get_hpb_data(sdev);
2575 if (ufshpb_get_state(hpb) != HPB_RESET)
2578 ufshpb_set_state(hpb, HPB_PRESENT);
2582 void ufshpb_reset_host(struct ufs_hba *hba)
2584 struct ufshpb_lu *hpb;
2585 struct scsi_device *sdev;
2587 shost_for_each_device(sdev, hba->host) {
2588 hpb = ufshpb_get_hpb_data(sdev);
2592 if (ufshpb_get_state(hpb) != HPB_PRESENT)
2594 ufshpb_set_state(hpb, HPB_RESET);
2595 ufshpb_cancel_jobs(hpb);
2596 ufshpb_discard_rsp_lists(hpb);
2600 void ufshpb_suspend(struct ufs_hba *hba)
2602 struct ufshpb_lu *hpb;
2603 struct scsi_device *sdev;
2605 shost_for_each_device(sdev, hba->host) {
2606 hpb = ufshpb_get_hpb_data(sdev);
2610 if (ufshpb_get_state(hpb) != HPB_PRESENT)
2612 ufshpb_set_state(hpb, HPB_SUSPEND);
2613 ufshpb_cancel_jobs(hpb);
2617 void ufshpb_resume(struct ufs_hba *hba)
2619 struct ufshpb_lu *hpb;
2620 struct scsi_device *sdev;
2622 shost_for_each_device(sdev, hba->host) {
2623 hpb = ufshpb_get_hpb_data(sdev);
2627 if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
2628 (ufshpb_get_state(hpb) != HPB_SUSPEND))
2630 ufshpb_set_state(hpb, HPB_PRESENT);
2631 ufshpb_kick_map_work(hpb);
2634 hpb->params.timeout_polling_interval_ms;
2636 schedule_delayed_work(&hpb->ufshpb_read_to_work,
2637 msecs_to_jiffies(poll));
2642 static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2643 struct ufshpb_lu_info *hpb_lu_info)
2645 u16 max_active_rgns;
2649 char desc_buf[QUERY_DESC_MAX_SIZE];
2651 ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2653 pm_runtime_get_sync(hba->dev);
2654 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2655 QUERY_DESC_IDN_UNIT, lun, 0,
2657 pm_runtime_put_sync(hba->dev);
2661 "%s: idn: %d lun: %d query request failed",
2662 __func__, QUERY_DESC_IDN_UNIT, lun);
2666 lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2667 if (lu_enable != LU_ENABLED_HPB_FUNC)
2670 max_active_rgns = get_unaligned_be16(
2671 desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2672 if (!max_active_rgns) {
2674 "lun %d wrong number of max active regions\n", lun);
2678 hpb_lu_info->num_blocks = get_unaligned_be64(
2679 desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2680 hpb_lu_info->pinned_start = get_unaligned_be16(
2681 desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2682 hpb_lu_info->num_pinned = get_unaligned_be16(
2683 desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2684 hpb_lu_info->max_active_rgns = max_active_rgns;
2689 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2691 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2696 ufshpb_set_state(hpb, HPB_FAILED);
2698 sdev = hpb->sdev_ufs_lu;
2699 sdev->hostdata = NULL;
2701 ufshpb_cancel_jobs(hpb);
2703 ufshpb_pre_req_mempool_destroy(hpb);
2704 ufshpb_destroy_region_tbl(hpb);
2706 kmem_cache_destroy(hpb->map_req_cache);
2707 kmem_cache_destroy(hpb->m_page_cache);
2709 list_del_init(&hpb->list_hpb_lu);
2714 static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2717 struct ufshpb_lu *hpb;
2718 struct scsi_device *sdev;
2721 if (tot_active_srgn_pages == 0) {
2726 init_success = !ufshpb_check_hpb_reset_query(hba);
2728 pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2729 if (pool_size > tot_active_srgn_pages) {
2730 mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2731 mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2734 shost_for_each_device(sdev, hba->host) {
2735 hpb = ufshpb_get_hpb_data(sdev);
2740 ufshpb_set_state(hpb, HPB_PRESENT);
2741 if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2742 queue_work(ufshpb_wq, &hpb->map_work);
2744 ufshpb_issue_umap_all_req(hpb);
2746 dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2747 ufshpb_destroy_lu(hba, sdev);
2755 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2757 struct ufshpb_lu *hpb;
2759 struct ufshpb_lu_info hpb_lu_info = { 0 };
2760 int lun = sdev->lun;
2762 if (lun >= hba->dev_info.max_lu_supported)
2765 ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2769 hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
2774 tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2775 hpb->srgns_per_rgn * hpb->pages_per_srgn;
2778 /* All LUs are initialized */
2779 if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
2780 ufshpb_hpb_lu_prepared(hba);
2783 static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2786 unsigned int pool_size;
2788 ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2789 sizeof(struct ufshpb_map_ctx),
2791 if (!ufshpb_mctx_cache) {
2792 dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2796 pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2797 dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2798 __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2800 ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2802 if (!ufshpb_mctx_pool) {
2803 dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2805 goto release_mctx_cache;
2808 ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2809 if (!ufshpb_page_pool) {
2810 dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2812 goto release_mctx_pool;
2815 ufshpb_wq = alloc_workqueue("ufshpb-wq",
2816 WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2818 dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2820 goto release_page_pool;
2826 mempool_destroy(ufshpb_page_pool);
2828 mempool_destroy(ufshpb_mctx_pool);
2830 kmem_cache_destroy(ufshpb_mctx_cache);
2834 void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2836 struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
2837 int max_active_rgns = 0;
2840 hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2841 if (hpb_num_lu == 0) {
2842 dev_err(hba->dev, "No HPB LU supported\n");
2843 hpb_info->hpb_disabled = true;
2847 hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2848 hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2849 max_active_rgns = get_unaligned_be16(geo_buf +
2850 GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2852 if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2853 max_active_rgns == 0) {
2854 dev_err(hba->dev, "No HPB supported device\n");
2855 hpb_info->hpb_disabled = true;
2860 void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2862 struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2864 u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW;
2866 hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2868 version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
2869 if ((version != HPB_SUPPORT_VERSION) &&
2870 (version != HPB_SUPPORT_LEGACY_VERSION)) {
2871 dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2873 hpb_dev_info->hpb_disabled = true;
2877 if (version == HPB_SUPPORT_LEGACY_VERSION)
2878 hpb_dev_info->is_legacy = true;
2880 pm_runtime_get_sync(hba->dev);
2881 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2882 QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd);
2883 pm_runtime_put_sync(hba->dev);
2886 dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed",
2888 hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd;
2891 * Get the number of user logical unit to check whether all
2892 * scsi_device finish initialization
2894 hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2897 void ufshpb_init(struct ufs_hba *hba)
2899 struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
2903 if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2906 if (ufshpb_init_mem_wq(hba)) {
2907 hpb_dev_info->hpb_disabled = true;
2911 atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2912 tot_active_srgn_pages = 0;
2913 /* issue HPB reset query */
2914 for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2915 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2916 QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2922 void ufshpb_remove(struct ufs_hba *hba)
2924 mempool_destroy(ufshpb_page_pool);
2925 mempool_destroy(ufshpb_mctx_pool);
2926 kmem_cache_destroy(ufshpb_mctx_cache);
2928 destroy_workqueue(ufshpb_wq);
2931 module_param(ufshpb_host_map_kbytes, uint, 0644);
2932 MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2933 "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");