scsi: ufs: core: Do not open code SZ_x
authorAvri Altman <avri.altman@wdc.com>
Wed, 31 May 2023 07:00:09 +0000 (10:00 +0300)
committerMartin K. Petersen <martin.petersen@oracle.com>
Wed, 31 May 2023 15:49:28 +0000 (11:49 -0400)
Do not open code SZ_x.

Signed-off-by: Avri Altman <avri.altman@wdc.com>
Link: https://lore.kernel.org/r/20230531070009.4593-1-avri.altman@wdc.com
Reviewed-by: Bean Huo <beanhuo@micron.com>
Reviewed-by: Stanley Chu <stanley.chu@mediatek.com>
Reviewed-by: Keoseong Park <keosung.park@samsung.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/ufs/core/ufshcd.c
drivers/ufs/core/ufshpb.c
drivers/ufs/core/ufshpb.h
drivers/ufs/host/ufs-exynos.c
drivers/ufs/host/ufs-hisi.c
include/ufs/ufshci.h

index 941e613548daa5ec2708ce51b2410e855b009549..0b2dc692d1b8c1fdf07b77c09f753b2628a67e03 100644 (file)
@@ -2501,7 +2501,7 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int
                         * 11b to indicate Dword granularity. A value of '3'
                         * indicates 4 bytes, '7' indicates 8 bytes, etc."
                         */
-                       WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
+                       WARN_ONCE(len > SZ_256K, "len = %#x\n", len);
                        prd->size = cpu_to_le32(len - 1);
                        prd->addr = cpu_to_le64(sg->dma_address);
                        prd->reserved = 0;
@@ -3733,7 +3733,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
 
        /*
         * Allocate memory for UTP Transfer descriptors
-        * UFSHCI requires 1024 byte alignment of UTRD
+        * UFSHCI requires 1KB alignment of UTRD
         */
        utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
        hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
@@ -3741,7 +3741,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
                                                   &hba->utrdl_dma_addr,
                                                   GFP_KERNEL);
        if (!hba->utrdl_base_addr ||
-           WARN_ON(hba->utrdl_dma_addr & (1024 - 1))) {
+           WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) {
                dev_err(hba->dev,
                        "Transfer Descriptor Memory allocation failed\n");
                goto out;
@@ -3757,7 +3757,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
                goto skip_utmrdl;
        /*
         * Allocate memory for UTP Task Management descriptors
-        * UFSHCI requires 1024 byte alignment of UTMRD
+        * UFSHCI requires 1KB alignment of UTMRD
         */
        utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
        hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
@@ -3765,7 +3765,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
                                                    &hba->utmrdl_dma_addr,
                                                    GFP_KERNEL);
        if (!hba->utmrdl_base_addr ||
-           WARN_ON(hba->utmrdl_dma_addr & (1024 - 1))) {
+           WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) {
                dev_err(hba->dev,
                "Task Management Descriptor Memory allocation failed\n");
                goto out;
@@ -5102,7 +5102,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
 
        blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
        if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
-               blk_queue_update_dma_alignment(q, 4096 - 1);
+               blk_queue_update_dma_alignment(q, SZ_4K - 1);
        /*
         * Block runtime-pm until all consumers are added.
         * Refer ufshcd_setup_links().
@@ -8728,7 +8728,7 @@ static const struct scsi_host_template ufshcd_driver_template = {
        .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
        .can_queue              = UFSHCD_CAN_QUEUE,
        .max_segment_size       = PRDT_DATA_BYTE_COUNT_MAX,
-       .max_sectors            = (1 << 20) / SECTOR_SIZE, /* 1 MiB */
+       .max_sectors            = SZ_1M / SECTOR_SIZE,
        .max_host_blocked       = 1,
        .track_queue_depth      = 1,
        .skip_settle_delay      = 1,
index a46a7666c891b49a8ff7a50f20137367943e919a..255f8b38d0c2d29f2ad016e10229e1039de6e21c 100644 (file)
@@ -30,7 +30,7 @@ static struct kmem_cache *ufshpb_mctx_cache;
 static mempool_t *ufshpb_mctx_pool;
 static mempool_t *ufshpb_page_pool;
 /* A cache size of 2MB can cache ppn in the 1GB range. */
-static unsigned int ufshpb_host_map_kbytes = 2048;
+static unsigned int ufshpb_host_map_kbytes = SZ_2K;
 static int tot_active_srgn_pages;
 
 static struct workqueue_struct *ufshpb_wq;
@@ -2461,7 +2461,7 @@ static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
 
        init_success = !ufshpb_check_hpb_reset_query(hba);
 
-       pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
+       pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * SZ_1K) / PAGE_SIZE;
        if (pool_size > tot_active_srgn_pages) {
                mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
                mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
@@ -2527,7 +2527,7 @@ static int ufshpb_init_mem_wq(struct ufs_hba *hba)
                return -ENOMEM;
        }
 
-       pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
+       pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * SZ_1K) / PAGE_SIZE;
        dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
               __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
 
index 0d6e6004d7837b7b3c1c0508c6c76c0f5d1d7ffd..b428bbdd27992db14011a986ed98118a5d5bd1de 100644 (file)
@@ -25,7 +25,7 @@
 
 /* hpb map & entries macro */
 #define HPB_RGN_SIZE_UNIT                      512
-#define HPB_ENTRY_BLOCK_SIZE                   4096
+#define HPB_ENTRY_BLOCK_SIZE                   SZ_4K
 #define HPB_ENTRY_SIZE                         0x8
 #define PINNED_NOT_SET                         U32_MAX
 
index 0bf5390739e1f0200d4b1b8609c86082a0470cc4..d56840447bd02a1268a31896fbe3325fbb9a2aa4 100644 (file)
@@ -1306,7 +1306,7 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
                 * (ufshcd_async_scan()). Note: this callback may also be called
                 * from other functions than ufshcd_init().
                 */
-               hba->host->max_segment_size = 4096;
+               hba->host->max_segment_size = SZ_4K;
 
                if (ufs->drv_data->pre_hce_enable) {
                        ret = ufs->drv_data->pre_hce_enable(ufs);
index 4c423eba8aa929ec5fd8c156e1862ef68277da76..1e1d388f359a6f356d317c3ac8df79846cf42c81 100644 (file)
@@ -335,29 +335,29 @@ static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
        /* PA_TxSkip */
        ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
        /*PA_PWRModeUserData0 = 8191, default is 0*/
-       ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), SZ_8K - 1);
        /*PA_PWRModeUserData1 = 65535, default is 0*/
-       ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), SZ_64K - 1);
        /*PA_PWRModeUserData2 = 32767, default is 0*/
-       ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), SZ_32K - 1);
        /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
-       ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), SZ_8K - 1);
        /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
-       ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), SZ_64K - 1);
        /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
-       ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), SZ_32K - 1);
        /*PA_PWRModeUserData3 = 8191, default is 0*/
-       ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), SZ_8K - 1);
        /*PA_PWRModeUserData4 = 65535, default is 0*/
-       ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), SZ_64K - 1);
        /*PA_PWRModeUserData5 = 32767, default is 0*/
-       ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), SZ_32K - 1);
        /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
-       ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), SZ_8K - 1);
        /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
-       ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), SZ_64K - 1);
        /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
-       ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), SZ_32K - 1);
 }
 
 static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
index 11424bb038141bb5a08f485187deec5b31fa9f3b..db2d5db5c88e0a1be23d33273667ad1c9e7439e2 100644 (file)
@@ -453,7 +453,7 @@ enum {
 };
 
 /* The maximum length of the data byte count field in the PRDT is 256KB */
-#define PRDT_DATA_BYTE_COUNT_MAX       (256 * 1024)
+#define PRDT_DATA_BYTE_COUNT_MAX       SZ_256K
 /* The granularity of the data byte count field in the PRDT is 32-bit */
 #define PRDT_DATA_BYTE_COUNT_PAD       4