diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs index ae0191295d29..e7b49bc894f5 100644 --- a/Documentation/ABI/testing/sysfs-driver-ufs +++ b/Documentation/ABI/testing/sysfs-driver-ufs @@ -1604,3 +1604,52 @@ Description: prevent the UFS from frequently performing clock gating/ungating. The attribute is read/write. + +What: /sys/bus/platform/drivers/ufshcd/*/wb_resize_enable +What: /sys/bus/platform/devices/*.ufs/wb_resize_enable +Date: April 2025 +Contact: Huan Tang +Description: + The host can enable the WriteBooster buffer resize by setting this + attribute. + + ======== ====================================== + idle There is no resize operation + decrease Decrease WriteBooster buffer size + increase Increase WriteBooster buffer size + ======== ====================================== + + The file is write only. + +What: /sys/bus/platform/drivers/ufshcd/*/attributes/wb_resize_hint +What: /sys/bus/platform/devices/*.ufs/attributes/wb_resize_hint +Date: April 2025 +Contact: Huan Tang +Description: + wb_resize_hint indicates hint information about which type of resize + for WriteBooster buffer is recommended by the device. + + ========= ====================================== + keep Recommend keep the buffer size + decrease Recommend to decrease the buffer size + increase Recommend to increase the buffer size + ========= ====================================== + + The file is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/attributes/wb_resize_status +What: /sys/bus/platform/devices/*.ufs/attributes/wb_resize_status +Date: April 2025 +Contact: Huan Tang +Description: + The host can check the resize operation status of the WriteBooster + buffer by reading this attribute. + + ================ ======================================== + idle Resize operation is not issued + in_progress Resize operation in progress + complete_success Resize operation completed successfully + general_failure Resize operation general failure + ================ ======================================== + + The file is read only. diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 57bd49eea777..66c0d1ba2a33 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1882,6 +1882,11 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host, if (IS_ERR_OR_NULL(ice)) return PTR_ERR_OR_ZERO(ice); + if (qcom_ice_get_supported_key_type(ice) != BLK_CRYPTO_KEY_TYPE_RAW) { + dev_warn(dev, "Wrapped keys not supported. Disabling inline encryption support.\n"); + return 0; + } + msm_host->ice = ice; /* Initialize the blk_crypto_profile */ @@ -1962,16 +1967,7 @@ static int sdhci_msm_ice_keyslot_program(struct blk_crypto_profile *profile, struct sdhci_msm_host *msm_host = sdhci_msm_host_from_crypto_profile(profile); - /* Only AES-256-XTS has been tested so far. */ - if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS) - return -EOPNOTSUPP; - - return qcom_ice_program_key(msm_host->ice, - QCOM_ICE_CRYPTO_ALG_AES_XTS, - QCOM_ICE_CRYPTO_KEY_SIZE_256, - key->bytes, - key->crypto_cfg.data_unit_size / 512, - slot); + return qcom_ice_program_key(msm_host->ice, slot, key); } static int sdhci_msm_ice_keyslot_evict(struct blk_crypto_profile *profile, diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c index 5e7fb110bc3f..d9a231fc0e0d 100644 --- a/drivers/scsi/elx/libefc_sli/sli4.c +++ b/drivers/scsi/elx/libefc_sli/sli4.c @@ -3804,7 +3804,7 @@ sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc, wr_obj->desired_write_len_dword = cpu_to_le32(dwflags); wr_obj->write_offset = cpu_to_le32(offset); - strncpy(wr_obj->object_name, obj_name, sizeof(wr_obj->object_name) - 1); + strscpy(wr_obj->object_name, obj_name); wr_obj->host_buffer_descriptor_count = cpu_to_le32(1); bde = (struct sli4_bde *)wr_obj->host_buffer_descriptor; @@ -3833,7 +3833,7 @@ sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *obj_name) SLI4_SUBSYSTEM_COMMON, CMD_V0, SLI4_RQST_PYLD_LEN(cmn_delete_object)); - strncpy(req->object_name, obj_name, sizeof(req->object_name) - 1); + strscpy(req->object_name, obj_name); return 0; } @@ -3856,7 +3856,7 @@ sli_cmd_common_read_object(struct sli4 *sli4, void *buf, u32 desired_read_len, cpu_to_le32(desired_read_len & SLI4_REQ_DESIRE_READLEN); rd_obj->read_offset = cpu_to_le32(offset); - strncpy(rd_obj->object_name, obj_name, sizeof(rd_obj->object_name) - 1); + strscpy(rd_obj->object_name, obj_name); rd_obj->host_buffer_descriptor_count = cpu_to_le32(1); bde = (struct sli4_bde *)rd_obj->host_buffer_descriptor; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 6574f9e74476..a335d34070d3 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -6003,9 +6003,9 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr); phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr); - memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); - strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, + memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str, sizeof(phba->BIOSVersion)); + phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0'; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, " diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 85ff95c6543a..7618f9cc9986 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -644,7 +644,7 @@ static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL); #define FLASH_CMD_SET_NVMD 0x02 struct flash_command { - u8 command[8]; + u8 command[8] __nonstring; int code; }; diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index f0eec4708ddd..8cad54a682d5 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -294,6 +294,14 @@ struct tape_block { #define FF_SA (F_SA_HIGH | F_SA_LOW) #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY) +/* Device selection bit mask */ +#define DS_ALL 0xffffffff +#define DS_SBC (1 << TYPE_DISK) +#define DS_SSC (1 << TYPE_TAPE) +#define DS_ZBC (1 << TYPE_ZBC) + +#define DS_NO_SSC (DS_ALL & ~DS_SSC) + #define SDEBUG_MAX_PARTS 4 #define SDEBUG_MAX_CMD_LEN 32 @@ -472,6 +480,7 @@ struct opcode_info_t { /* for terminating element */ u8 opcode; /* if num_attached > 0, preferred */ u16 sa; /* service action */ + u32 devsel; /* device type mask for this definition */ u32 flags; /* OR-ed set of SDEB_F_* */ int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); const struct opcode_info_t *arrp; /* num_attached elements or NULL */ @@ -519,7 +528,8 @@ enum sdeb_opcode_index { SDEB_I_WRITE_FILEMARKS = 35, SDEB_I_SPACE = 36, SDEB_I_FORMAT_MEDIUM = 37, - SDEB_I_LAST_ELEM_P1 = 38, /* keep this last (previous + 1) */ + SDEB_I_ERASE = 38, + SDEB_I_LAST_ELEM_P1 = 39, /* keep this last (previous + 1) */ }; @@ -530,7 +540,7 @@ static const unsigned char opcode_ind_arr[256] = { SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0, SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE, - 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG, + 0, SDEB_I_ERASE, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG, SDEB_I_ALLOW_REMOVAL, 0, /* 0x20; 0x20->0x3f: 10 byte cdbs */ 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0, @@ -585,7 +595,9 @@ static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_read_tape(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_write_tape(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *); @@ -613,8 +625,10 @@ static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_read_position(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_erase(struct scsi_cmnd *, struct sdebug_dev_info *); static int sdebug_do_add_host(bool mk_new_store); static int sdebug_add_host_helper(int per_host_idx); @@ -629,113 +643,121 @@ static void sdebug_erase_all_stores(bool apart_from_first); * should be placed in opcode_info_arr[], the others should be placed here. */ static const struct opcode_info_t msense_iarr[] = { - {0, 0x1a, 0, F_D_IN, NULL, NULL, + {0, 0x1a, 0, DS_ALL, F_D_IN, NULL, NULL, {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t mselect_iarr[] = { - {0, 0x15, 0, F_D_OUT, NULL, NULL, + {0, 0x15, 0, DS_ALL, F_D_OUT, NULL, NULL, {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t read_iarr[] = { - {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */ + {0, 0x28, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */ + {0, 0x8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) disk */ {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */ + {0, 0x8, 0, DS_SSC, F_D_IN | FF_MEDIA_IO, resp_read_tape, NULL, /* READ(6) tape */ + {6, 0x03, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0xa8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */ {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7, 0, 0, 0, 0} }, }; static const struct opcode_info_t write_iarr[] = { - {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */ + {0, 0x2a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */ NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */ + {0, 0xa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) disk */ NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */ + {0, 0xa, 0, DS_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_tape, /* WRITE(6) tape */ + NULL, {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0} }, + {0, 0xaa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */ NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7, 0, 0, 0, 0} }, }; static const struct opcode_info_t verify_iarr[] = { - {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */ + {0, 0x2f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */ NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t sa_in_16_iarr[] = { - {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL, + {0, 0x9e, 0x12, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL, {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */ - {0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL, + {0, 0x9e, 0x16, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL, {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0} }, /* GET STREAM STATUS */ }; static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */ - {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0, + {0, 0x7f, 0xb, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0, NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa, 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */ - {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat, + {0, 0x7f, 0x11, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat, NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8, 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */ }; static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */ - {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL, + {0, 0xa3, 0xc, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL, {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */ - {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL, + {0, 0xa3, 0xd, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL, {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */ }; static const struct opcode_info_t write_same_iarr[] = { - {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL, + {0, 0x93, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL, {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */ }; static const struct opcode_info_t reserve_iarr[] = { - {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */ + {0, 0x16, 0, DS_ALL, F_D_OUT, NULL, NULL, /* RESERVE(6) */ {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t release_iarr[] = { - {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */ + {0, 0x17, 0, DS_ALL, F_D_OUT, NULL, NULL, /* RELEASE(6) */ {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t sync_cache_iarr[] = { - {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL, + {0, 0x91, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL, {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */ }; static const struct opcode_info_t pre_fetch_iarr[] = { - {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL, + {0, 0x90, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL, {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */ + {0, 0x34, 0, DS_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_read_position, NULL, + {10, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0, 0, + 0, 0, 0, 0} }, /* READ POSITION (10) */ }; static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */ - {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL, + {0, 0x94, 0x1, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL, {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */ - {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL, + {0, 0x94, 0x2, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL, {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */ - {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL, + {0, 0x94, 0x4, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL, {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */ }; static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */ - {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL, + {0, 0x95, 0x6, DS_NO_SSC, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL, {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */ }; @@ -746,130 +768,132 @@ static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */ * REPORT SUPPORTED OPERATION CODES. */ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = { /* 0 */ - {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */ + {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */ + {0, 0x12, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */ {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL, + {0, 0xa0, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_report_luns, NULL, {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 0} }, /* REPORT LUNS */ - {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL, + {0, 0x3, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_requests, NULL, {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */ + {0, 0x0, 0, DS_ALL, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */ {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, /* 5 */ - {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */ + {ARRAY_SIZE(msense_iarr), 0x5a, 0, DS_ALL, F_D_IN, /* MODE SENSE(10) */ resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */ + {ARRAY_SIZE(mselect_iarr), 0x55, 0, DS_ALL, F_D_OUT, /* MODE SELECT(10) */ resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */ + {0, 0x4d, 0, DS_NO_SSC, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */ {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */ + {0, 0x25, 0, DS_NO_SSC, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */ {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */ + {ARRAY_SIZE(read_iarr), 0x88, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, /* READ(16) */ resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* 10 */ - {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO, + {ARRAY_SIZE(write_iarr), 0x8a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, write_iarr, /* WRITE(16) */ {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, - {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */ + {0, 0x1b, 0, DS_ALL, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */ {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN, + {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */ {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} }, - {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat, + {0, 0x9f, 0x12, DS_NO_SSC, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat, NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */ - {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN, + {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, DS_ALL, F_SA_LOW | F_D_IN, resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */ maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 0} }, /* 15 */ - {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ + {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(verify_iarr), 0x8f, 0, + {ARRAY_SIZE(verify_iarr), 0x8f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */ verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, - {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO, + {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, DS_NO_SSC, F_SA_HIGH | F_D_IN | FF_MEDIA_IO, resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */ {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff, 0xff, 0xff} }, - {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT, + {ARRAY_SIZE(reserve_iarr), 0x56, 0, DS_ALL, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */ {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT, + {ARRAY_SIZE(release_iarr), 0x57, 0, DS_ALL, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */ {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* 20 */ - {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */ + {0, 0x1e, 0, DS_ALL, 0, NULL, NULL, /* ALLOW REMOVAL */ {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x1, 0, 0, resp_rewind, NULL, + {0, 0x1, 0, DS_SSC, 0, resp_rewind, NULL, {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ + {0, 0, 0, DS_NO_SSC, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */ + {0, 0x1d, 0, DS_ALL, F_D_OUT, NULL, NULL, /* SEND DIAGNOSTIC */ {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */ + {0, 0x42, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */ {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* 25 */ - {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL, + {0, 0x3b, 0, DS_NO_SSC, F_D_OUT_MAYBE, resp_write_buffer, NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* WRITE_BUFFER */ - {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, + {ARRAY_SIZE(write_same_iarr), 0x41, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS, + {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, sync_cache_iarr, {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* SYNC_CACHE (10) */ - {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL, + {0, 0x89, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL, {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */ - {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO, + {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, pre_fetch_iarr, {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* PRE-FETCH (10) */ /* READ POSITION (10) */ /* 30 */ - {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS, + {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */ {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} }, - {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS, + {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */ {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} }, /* 32 */ - {0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO, + {0, 0x9c, 0x0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_atomic_write, NULL, /* ATOMIC WRITE 16 */ {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }, - {0, 0x05, 0, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */ + {0, 0x05, 0, DS_SSC, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */ {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x2b, 0, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */ + {0, 0x2b, 0, DS_SSC, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */ {10, 0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {0, 0x10, 0, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */ + {0, 0x10, 0, DS_SSC, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */ {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x11, 0, F_D_IN, resp_space, NULL, /* SPACE (6) */ + {0, 0x11, 0, DS_SSC, F_D_IN, resp_space, NULL, /* SPACE (6) */ {6, 0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x4, 0, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */ + {0, 0x4, 0, DS_SSC, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */ {6, 0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, -/* 38 */ + {0, 0x19, 0, DS_SSC, F_D_IN, resp_erase, NULL, /* ERASE (6) */ + {6, 0x03, 0x33, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, +/* 39 */ /* sentinel */ - {0xff, 0, 0, 0, NULL, NULL, /* terminating element */ + {0xff, 0, 0, 0, 0, NULL, NULL, /* terminating element */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; @@ -1015,6 +1039,19 @@ static const int condition_met_result = SAM_STAT_CONDITION_MET; static struct dentry *sdebug_debugfs_root; static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain); +static u32 sdebug_get_devsel(struct scsi_device *sdp) +{ + unsigned char devtype = sdp->type; + u32 devsel; + + if (devtype < 32) + devsel = (1 << devtype); + else + devsel = DS_ALL; + + return devsel; +} + static void sdebug_err_free(struct rcu_head *head) { struct sdebug_err_inject *inject = @@ -2032,13 +2069,19 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) unsigned char *cmd = scp->cmnd; u32 alloc_len, n; int ret; - bool have_wlun, is_disk, is_zbc, is_disk_zbc; + bool have_wlun, is_disk, is_zbc, is_disk_zbc, is_tape; alloc_len = get_unaligned_be16(cmd + 3); arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); if (! arr) return DID_REQUEUE << 16; - is_disk = (sdebug_ptype == TYPE_DISK); + if (scp->device->type >= 32) { + is_disk = (sdebug_ptype == TYPE_DISK); + is_tape = (sdebug_ptype == TYPE_TAPE); + } else { + is_disk = (scp->device->type == TYPE_DISK); + is_tape = (scp->device->type == TYPE_TAPE); + } is_zbc = devip->zoned; is_disk_zbc = (is_disk || is_zbc); have_wlun = scsi_is_wlun(scp->device->lun); @@ -2047,7 +2090,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL)) pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */ else - pq_pdt = (sdebug_ptype & 0x1f); + pq_pdt = ((scp->device->type >= 32 ? + sdebug_ptype : scp->device->type) & 0x1f); arr[0] = pq_pdt; if (0x2 & cmd[1]) { /* CMDDT bit set */ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1); @@ -2170,7 +2214,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) if (is_disk) { /* SBC-4 no version claimed */ put_unaligned_be16(0x600, arr + n); n += 2; - } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */ + } else if (is_tape) { /* SSC-4 rev 3 */ put_unaligned_be16(0x525, arr + n); n += 2; } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */ @@ -2279,7 +2323,7 @@ static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) changing = (stopped_state != want_stop); if (changing) atomic_xchg(&devip->stopped, want_stop); - if (sdebug_ptype == TYPE_TAPE && !want_stop) { + if (scp->device->type == TYPE_TAPE && !want_stop) { int i; set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */ @@ -2454,11 +2498,12 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, u8 reporting_opts, req_opcode, sdeb_i, supp; u16 req_sa, u; u32 alloc_len, a_len; - int k, offset, len, errsts, count, bump, na; + int k, offset, len, errsts, bump, na; const struct opcode_info_t *oip; const struct opcode_info_t *r_oip; u8 *arr; u8 *cmd = scp->cmnd; + u32 devsel = sdebug_get_devsel(scp->device); rctd = !!(cmd[2] & 0x80); reporting_opts = cmd[2] & 0x7; @@ -2481,34 +2526,30 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, } switch (reporting_opts) { case 0: /* all commands */ - /* count number of commands */ - for (count = 0, oip = opcode_info_arr; - oip->num_attached != 0xff; ++oip) { - if (F_INV_OP & oip->flags) - continue; - count += (oip->num_attached + 1); - } bump = rctd ? 20 : 8; - put_unaligned_be32(count * bump, arr); for (offset = 4, oip = opcode_info_arr; oip->num_attached != 0xff && offset < a_len; ++oip) { if (F_INV_OP & oip->flags) continue; + if ((devsel & oip->devsel) != 0) { + arr[offset] = oip->opcode; + put_unaligned_be16(oip->sa, arr + offset + 2); + if (rctd) + arr[offset + 5] |= 0x2; + if (FF_SA & oip->flags) + arr[offset + 5] |= 0x1; + put_unaligned_be16(oip->len_mask[0], arr + offset + 6); + if (rctd) + put_unaligned_be16(0xa, arr + offset + 8); + offset += bump; + } na = oip->num_attached; - arr[offset] = oip->opcode; - put_unaligned_be16(oip->sa, arr + offset + 2); - if (rctd) - arr[offset + 5] |= 0x2; - if (FF_SA & oip->flags) - arr[offset + 5] |= 0x1; - put_unaligned_be16(oip->len_mask[0], arr + offset + 6); - if (rctd) - put_unaligned_be16(0xa, arr + offset + 8); r_oip = oip; for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { if (F_INV_OP & oip->flags) continue; - offset += bump; + if ((devsel & oip->devsel) == 0) + continue; arr[offset] = oip->opcode; put_unaligned_be16(oip->sa, arr + offset + 2); if (rctd) @@ -2516,14 +2557,15 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, if (FF_SA & oip->flags) arr[offset + 5] |= 0x1; put_unaligned_be16(oip->len_mask[0], - arr + offset + 6); + arr + offset + 6); if (rctd) put_unaligned_be16(0xa, arr + offset + 8); + offset += bump; } oip = r_oip; - offset += bump; } + put_unaligned_be32(offset - 4, arr); break; case 1: /* one command: opcode only */ case 2: /* one command: opcode plus service action */ @@ -2549,13 +2591,15 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, return check_condition_result; } if (0 == (FF_SA & oip->flags) && - req_opcode == oip->opcode) + (devsel & oip->devsel) != 0 && + req_opcode == oip->opcode) supp = 3; else if (0 == (FF_SA & oip->flags)) { na = oip->num_attached; for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { - if (req_opcode == oip->opcode) + if (req_opcode == oip->opcode && + (devsel & oip->devsel) != 0) break; } supp = (k >= na) ? 1 : 3; @@ -2563,7 +2607,8 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, na = oip->num_attached; for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { - if (req_sa == oip->sa) + if (req_sa == oip->sa && + (devsel & oip->devsel) != 0) break; } supp = (k >= na) ? 1 : 3; @@ -2914,9 +2959,9 @@ static int resp_mode_sense(struct scsi_cmnd *scp, subpcode = cmd[3]; msense_6 = (MODE_SENSE == cmd[0]); llbaa = msense_6 ? false : !!(cmd[1] & 0x10); - is_disk = (sdebug_ptype == TYPE_DISK); + is_disk = (scp->device->type == TYPE_DISK); is_zbc = devip->zoned; - is_tape = (sdebug_ptype == TYPE_TAPE); + is_tape = (scp->device->type == TYPE_TAPE); if ((is_disk || is_zbc || is_tape) && !dbd) bd_len = llbaa ? 16 : 8; else @@ -3131,7 +3176,7 @@ static int resp_mode_select(struct scsi_cmnd *scp, md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); off = (mselect6 ? 4 : 8); - if (sdebug_ptype == TYPE_TAPE) { + if (scp->device->type == TYPE_TAPE) { int blksize; if (bd_len != 8) { @@ -3196,7 +3241,7 @@ static int resp_mode_select(struct scsi_cmnd *scp, } break; case 0xf: /* Compression mode page */ - if (sdebug_ptype != TYPE_TAPE) + if (scp->device->type != TYPE_TAPE) goto bad_pcode; if ((arr[off + 2] & 0x40) != 0) { devip->tape_dce = (arr[off + 2] & 0x80) != 0; @@ -3204,7 +3249,7 @@ static int resp_mode_select(struct scsi_cmnd *scp, } break; case 0x11: /* Medium Partition Mode Page (tape) */ - if (sdebug_ptype == TYPE_TAPE) { + if (scp->device->type == TYPE_TAPE) { int fld; fld = process_medium_part_m_pg(devip, &arr[off], pg_len); @@ -3563,6 +3608,30 @@ static int resp_space(struct scsi_cmnd *scp, return check_condition_result; } +enum {SDEBUG_READ_POSITION_ARR_SZ = 20}; +static int resp_read_position(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + u8 *cmd = scp->cmnd; + int all_length; + unsigned char arr[20]; + unsigned int pos; + + all_length = get_unaligned_be16(cmd + 7); + if ((cmd[1] & 0xfe) != 0 || + all_length != 0) { /* only short form */ + mk_sense_invalid_fld(scp, SDEB_IN_CDB, + all_length ? 7 : 1, 0); + return check_condition_result; + } + memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ); + arr[1] = devip->tape_partition; + pos = devip->tape_location[devip->tape_partition]; + put_unaligned_be32(pos, arr + 4); + put_unaligned_be32(pos, arr + 8); + return fill_from_dev_buffer(scp, arr, SDEBUG_READ_POSITION_ARR_SZ); +} + static int resp_rewind(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { @@ -3604,10 +3673,6 @@ static int resp_format_medium(struct scsi_cmnd *scp, int res = 0; unsigned char *cmd = scp->cmnd; - if (sdebug_ptype != TYPE_TAPE) { - mk_sense_invalid_fld(scp, SDEB_IN_CDB, 0, -1); - return check_condition_result; - } if (cmd[2] > 2) { mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1); return check_condition_result; @@ -3631,6 +3696,19 @@ static int resp_format_medium(struct scsi_cmnd *scp, return 0; } +static int resp_erase(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + int partition = devip->tape_partition; + int pos = devip->tape_location[partition]; + struct tape_block *blp; + + blp = devip->tape_blocks[partition] + pos; + blp->fl_size = TAPE_BLOCK_EOD_FLAG; + + return 0; +} + static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip) { return devip->nr_zones != 0; @@ -4467,9 +4545,6 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) u8 *cmd = scp->cmnd; bool meta_data_locked = false; - if (sdebug_ptype == TYPE_TAPE) - return resp_read_tape(scp, devip); - switch (cmd[0]) { case READ_16: ei_lba = 0; @@ -4839,9 +4914,6 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) u8 *cmd = scp->cmnd; bool meta_data_locked = false; - if (sdebug_ptype == TYPE_TAPE) - return resp_write_tape(scp, devip); - switch (cmd[0]) { case WRITE_16: ei_lba = 0; @@ -5573,7 +5645,6 @@ static int resp_sync_cache(struct scsi_cmnd *scp, * * The pcode 0x34 is also used for READ POSITION by tape devices. */ -enum {SDEBUG_READ_POSITION_ARR_SZ = 20}; static int resp_pre_fetch(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { @@ -5585,31 +5656,6 @@ static int resp_pre_fetch(struct scsi_cmnd *scp, struct sdeb_store_info *sip = devip2sip(devip, true); u8 *fsp = sip->storep; - if (sdebug_ptype == TYPE_TAPE) { - if (cmd[0] == PRE_FETCH) { /* READ POSITION (10) */ - int all_length; - unsigned char arr[20]; - unsigned int pos; - - all_length = get_unaligned_be16(cmd + 7); - if ((cmd[1] & 0xfe) != 0 || - all_length != 0) { /* only short form */ - mk_sense_invalid_fld(scp, SDEB_IN_CDB, - all_length ? 7 : 1, 0); - return check_condition_result; - } - memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ); - arr[1] = devip->tape_partition; - pos = devip->tape_location[devip->tape_partition]; - put_unaligned_be32(pos, arr + 4); - put_unaligned_be32(pos, arr + 8); - return fill_from_dev_buffer(scp, arr, - SDEBUG_READ_POSITION_ARR_SZ); - } - mk_sense_invalid_opcode(scp); - return check_condition_result; - } - if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */ lba = get_unaligned_be32(cmd + 2); nblks = get_unaligned_be16(cmd + 7); @@ -6645,7 +6691,7 @@ static void scsi_debug_sdev_destroy(struct scsi_device *sdp) debugfs_remove(devip->debugfs_entry); - if (sdebug_ptype == TYPE_TAPE) { + if (sdp->type == TYPE_TAPE) { kfree(devip->tape_blocks[0]); devip->tape_blocks[0] = NULL; } @@ -6833,18 +6879,16 @@ static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd) static void scsi_tape_reset_clear(struct sdebug_dev_info *devip) { - if (sdebug_ptype == TYPE_TAPE) { - int i; + int i; - devip->tape_blksize = TAPE_DEF_BLKSIZE; - devip->tape_density = TAPE_DEF_DENSITY; - devip->tape_partition = 0; - devip->tape_dce = 0; - for (i = 0; i < TAPE_MAX_PARTITIONS; i++) - devip->tape_location[i] = 0; - devip->tape_pending_nbr_partitions = -1; - /* Don't reset partitioning? */ - } + devip->tape_blksize = TAPE_DEF_BLKSIZE; + devip->tape_density = TAPE_DEF_DENSITY; + devip->tape_partition = 0; + devip->tape_dce = 0; + for (i = 0; i < TAPE_MAX_PARTITIONS; i++) + devip->tape_location[i] = 0; + devip->tape_pending_nbr_partitions = -1; + /* Don't reset partitioning? */ } static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt) @@ -6862,7 +6906,8 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt) scsi_debug_stop_all_queued(sdp); if (devip) { set_bit(SDEBUG_UA_POR, devip->uas_bm); - scsi_tape_reset_clear(devip); + if (SCpnt->device->type == TYPE_TAPE) + scsi_tape_reset_clear(devip); } if (sdebug_fail_lun_reset(SCpnt)) { @@ -6901,7 +6946,8 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt) list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { if (devip->target == sdp->id) { set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); - scsi_tape_reset_clear(devip); + if (SCpnt->device->type == TYPE_TAPE) + scsi_tape_reset_clear(devip); ++k; } } @@ -6933,7 +6979,8 @@ static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt) list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); - scsi_tape_reset_clear(devip); + if (SCpnt->device->type == TYPE_TAPE) + scsi_tape_reset_clear(devip); ++k; } @@ -6957,7 +7004,8 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt) list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); - scsi_tape_reset_clear(devip); + if (SCpnt->device->type == TYPE_TAPE) + scsi_tape_reset_clear(devip); ++k; } } @@ -9173,6 +9221,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost, u32 flags; u16 sa; u8 opcode = cmd[0]; + u32 devsel = sdebug_get_devsel(scp->device); bool has_wlun_rl; bool inject_now; int ret = 0; @@ -9252,12 +9301,14 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost, else sa = get_unaligned_be16(cmd + 8); for (k = 0; k <= na; oip = r_oip->arrp + k++) { - if (opcode == oip->opcode && sa == oip->sa) + if (opcode == oip->opcode && sa == oip->sa && + (devsel & oip->devsel) != 0) break; } } else { /* since no service action only check opcode */ for (k = 0; k <= na; oip = r_oip->arrp + k++) { - if (opcode == oip->opcode) + if (opcode == oip->opcode && + (devsel & oip->devsel) != 0) break; } } diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c index 2310afa77b76..c467b55b4174 100644 --- a/drivers/soc/qcom/ice.c +++ b/drivers/soc/qcom/ice.c @@ -21,20 +21,63 @@ #include -#define AES_256_XTS_KEY_SIZE 64 +#define AES_256_XTS_KEY_SIZE 64 /* for raw keys only */ +#define QCOM_ICE_HWKM_WRAPPED_KEY_SIZE 100 /* assuming HWKM v2 */ /* QCOM ICE registers */ + +#define QCOM_ICE_REG_CONTROL 0x0000 +#define QCOM_ICE_LEGACY_MODE_ENABLED BIT(0) + #define QCOM_ICE_REG_VERSION 0x0008 + #define QCOM_ICE_REG_FUSE_SETTING 0x0010 +#define QCOM_ICE_FUSE_SETTING_MASK BIT(0) +#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK BIT(1) +#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK BIT(2) + #define QCOM_ICE_REG_BIST_STATUS 0x0070 +#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28) + #define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000 -/* BIST ("built-in self-test") status flags */ -#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28) +#define QCOM_ICE_REG_CRYPTOCFG_BASE 0x4040 +#define QCOM_ICE_REG_CRYPTOCFG_SIZE 0x80 +#define QCOM_ICE_REG_CRYPTOCFG(slot) (QCOM_ICE_REG_CRYPTOCFG_BASE + \ + QCOM_ICE_REG_CRYPTOCFG_SIZE * (slot)) +union crypto_cfg { + __le32 regval; + struct { + u8 dusize; + u8 capidx; + u8 reserved; +#define QCOM_ICE_HWKM_CFG_ENABLE_VAL BIT(7) + u8 cfge; + }; +}; -#define QCOM_ICE_FUSE_SETTING_MASK 0x1 -#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2 -#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4 +/* QCOM ICE HWKM (Hardware Key Manager) registers */ + +#define HWKM_OFFSET 0x8000 + +#define QCOM_ICE_REG_HWKM_TZ_KM_CTL (HWKM_OFFSET + 0x1000) +#define QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL (BIT(1) | BIT(2)) + +#define QCOM_ICE_REG_HWKM_TZ_KM_STATUS (HWKM_OFFSET + 0x1004) +#define QCOM_ICE_HWKM_KT_CLEAR_DONE BIT(0) +#define QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE BIT(1) +#define QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE BIT(2) +#define QCOM_ICE_HWKM_CRYPTO_BIST_DONE_V2 BIT(7) +#define QCOM_ICE_HWKM_BIST_DONE_V2 BIT(9) + +#define QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS (HWKM_OFFSET + 0x2008) +#define QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL BIT(3) + +#define QCOM_ICE_REG_HWKM_BANK0_BBAC_0 (HWKM_OFFSET + 0x5000) +#define QCOM_ICE_REG_HWKM_BANK0_BBAC_1 (HWKM_OFFSET + 0x5004) +#define QCOM_ICE_REG_HWKM_BANK0_BBAC_2 (HWKM_OFFSET + 0x5008) +#define QCOM_ICE_REG_HWKM_BANK0_BBAC_3 (HWKM_OFFSET + 0x500C) +#define QCOM_ICE_REG_HWKM_BANK0_BBAC_4 (HWKM_OFFSET + 0x5010) #define qcom_ice_writel(engine, val, reg) \ writel((val), (engine)->base + (reg)) @@ -42,11 +85,18 @@ #define qcom_ice_readl(engine, reg) \ readl((engine)->base + (reg)) +static bool qcom_ice_use_wrapped_keys; +module_param_named(use_wrapped_keys, qcom_ice_use_wrapped_keys, bool, 0660); +MODULE_PARM_DESC(use_wrapped_keys, + "Support wrapped keys instead of raw keys, if available on the platform"); + struct qcom_ice { struct device *dev; void __iomem *base; struct clk *core_clk; + bool use_hwkm; + bool hwkm_init_complete; }; static bool qcom_ice_check_supported(struct qcom_ice *ice) @@ -76,6 +126,35 @@ static bool qcom_ice_check_supported(struct qcom_ice *ice) return false; } + /* + * Check for HWKM support and decide whether to use it or not. ICE + * v3.2.1 and later have HWKM v2. ICE v3.2.0 has HWKM v1. Earlier ICE + * versions don't have HWKM at all. However, for HWKM to be fully + * usable by Linux, the TrustZone software also needs to support certain + * SCM calls including the ones to generate and prepare keys. That + * effectively makes the earliest supported SoC be SM8650, which has + * HWKM v2. Therefore, this driver doesn't include support for HWKM v1, + * and it checks for the SCM call support before it decides to use HWKM. + * + * Also, since HWKM and legacy mode are mutually exclusive, and + * ICE-capable storage driver(s) need to know early on whether to + * advertise support for raw keys or wrapped keys, HWKM cannot be used + * unconditionally. A module parameter is used to opt into using it. + */ + if ((major >= 4 || + (major == 3 && (minor >= 3 || (minor == 2 && step >= 1)))) && + qcom_scm_has_wrapped_key_support()) { + if (qcom_ice_use_wrapped_keys) { + dev_info(dev, "Using HWKM. Supporting wrapped keys only.\n"); + ice->use_hwkm = true; + } else { + dev_info(dev, "Not using HWKM. Supporting raw keys only.\n"); + } + } else if (qcom_ice_use_wrapped_keys) { + dev_warn(dev, "A supported HWKM is not present. Ignoring qcom_ice.use_wrapped_keys=1.\n"); + } else { + dev_info(dev, "A supported HWKM is not present. Supporting raw keys only.\n"); + } return true; } @@ -123,17 +202,71 @@ static int qcom_ice_wait_bist_status(struct qcom_ice *ice) err = readl_poll_timeout(ice->base + QCOM_ICE_REG_BIST_STATUS, regval, !(regval & QCOM_ICE_BIST_STATUS_MASK), 50, 5000); - if (err) + if (err) { dev_err(ice->dev, "Timed out waiting for ICE self-test to complete\n"); + return err; + } - return err; + if (ice->use_hwkm && + qcom_ice_readl(ice, QCOM_ICE_REG_HWKM_TZ_KM_STATUS) != + (QCOM_ICE_HWKM_KT_CLEAR_DONE | + QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE | + QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE | + QCOM_ICE_HWKM_CRYPTO_BIST_DONE_V2 | + QCOM_ICE_HWKM_BIST_DONE_V2)) { + dev_err(ice->dev, "HWKM self-test error!\n"); + /* + * Too late to revoke use_hwkm here, as it was already + * propagated up the stack into the crypto capabilities. + */ + } + return 0; +} + +static void qcom_ice_hwkm_init(struct qcom_ice *ice) +{ + u32 regval; + + if (!ice->use_hwkm) + return; + + BUILD_BUG_ON(QCOM_ICE_HWKM_WRAPPED_KEY_SIZE > + BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE); + /* + * When ICE is in HWKM mode, it only supports wrapped keys. + * When ICE is in legacy mode, it only supports raw keys. + * + * Put ICE in HWKM mode. ICE defaults to legacy mode. + */ + regval = qcom_ice_readl(ice, QCOM_ICE_REG_CONTROL); + regval &= ~QCOM_ICE_LEGACY_MODE_ENABLED; + qcom_ice_writel(ice, regval, QCOM_ICE_REG_CONTROL); + + /* Disable CRC checks. This HWKM feature is not used. */ + qcom_ice_writel(ice, QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL, + QCOM_ICE_REG_HWKM_TZ_KM_CTL); + + /* + * Allow the HWKM slave to read and write the keyslots in the ICE HWKM + * slave. Without this, TrustZone cannot program keys into ICE. + */ + qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_0); + qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_1); + qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_2); + qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_3); + qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_4); + + /* Clear the HWKM response FIFO. */ + qcom_ice_writel(ice, QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL, + QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS); + ice->hwkm_init_complete = true; } int qcom_ice_enable(struct qcom_ice *ice) { qcom_ice_low_power_mode_enable(ice); qcom_ice_optimization_enable(ice); - + qcom_ice_hwkm_init(ice); return qcom_ice_wait_bist_status(ice); } EXPORT_SYMBOL_GPL(qcom_ice_enable); @@ -149,7 +282,7 @@ int qcom_ice_resume(struct qcom_ice *ice) err); return err; } - + qcom_ice_hwkm_init(ice); return qcom_ice_wait_bist_status(ice); } EXPORT_SYMBOL_GPL(qcom_ice_resume); @@ -157,15 +290,58 @@ EXPORT_SYMBOL_GPL(qcom_ice_resume); int qcom_ice_suspend(struct qcom_ice *ice) { clk_disable_unprepare(ice->core_clk); + ice->hwkm_init_complete = false; return 0; } EXPORT_SYMBOL_GPL(qcom_ice_suspend); -int qcom_ice_program_key(struct qcom_ice *ice, - u8 algorithm_id, u8 key_size, - const u8 crypto_key[], u8 data_unit_size, - int slot) +static unsigned int translate_hwkm_slot(struct qcom_ice *ice, unsigned int slot) +{ + return slot * 2; +} + +static int qcom_ice_program_wrapped_key(struct qcom_ice *ice, unsigned int slot, + const struct blk_crypto_key *bkey) +{ + struct device *dev = ice->dev; + union crypto_cfg cfg = { + .dusize = bkey->crypto_cfg.data_unit_size / 512, + .capidx = QCOM_SCM_ICE_CIPHER_AES_256_XTS, + .cfge = QCOM_ICE_HWKM_CFG_ENABLE_VAL, + }; + int err; + + if (!ice->use_hwkm) { + dev_err_ratelimited(dev, "Got wrapped key when not using HWKM\n"); + return -EINVAL; + } + if (!ice->hwkm_init_complete) { + dev_err_ratelimited(dev, "HWKM not yet initialized\n"); + return -EINVAL; + } + + /* Clear CFGE before programming the key. */ + qcom_ice_writel(ice, 0x0, QCOM_ICE_REG_CRYPTOCFG(slot)); + + /* Call into TrustZone to program the wrapped key using HWKM. */ + err = qcom_scm_ice_set_key(translate_hwkm_slot(ice, slot), bkey->bytes, + bkey->size, cfg.capidx, cfg.dusize); + if (err) { + dev_err_ratelimited(dev, + "qcom_scm_ice_set_key failed; err=%d, slot=%u\n", + err, slot); + return err; + } + + /* Set CFGE after programming the key. */ + qcom_ice_writel(ice, le32_to_cpu(cfg.regval), + QCOM_ICE_REG_CRYPTOCFG(slot)); + return 0; +} + +int qcom_ice_program_key(struct qcom_ice *ice, unsigned int slot, + const struct blk_crypto_key *blk_key) { struct device *dev = ice->dev; union { @@ -176,15 +352,26 @@ int qcom_ice_program_key(struct qcom_ice *ice, int err; /* Only AES-256-XTS has been tested so far. */ - if (algorithm_id != QCOM_ICE_CRYPTO_ALG_AES_XTS || - key_size != QCOM_ICE_CRYPTO_KEY_SIZE_256) { - dev_err_ratelimited(dev, - "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n", - algorithm_id, key_size); + if (blk_key->crypto_cfg.crypto_mode != + BLK_ENCRYPTION_MODE_AES_256_XTS) { + dev_err_ratelimited(dev, "Unsupported crypto mode: %d\n", + blk_key->crypto_cfg.crypto_mode); + return -EINVAL; + } + + if (blk_key->crypto_cfg.key_type == BLK_CRYPTO_KEY_TYPE_HW_WRAPPED) + return qcom_ice_program_wrapped_key(ice, slot, blk_key); + + if (ice->use_hwkm) { + dev_err_ratelimited(dev, "Got raw key when using HWKM\n"); return -EINVAL; } - memcpy(key.bytes, crypto_key, AES_256_XTS_KEY_SIZE); + if (blk_key->size != AES_256_XTS_KEY_SIZE) { + dev_err_ratelimited(dev, "Incorrect key size\n"); + return -EINVAL; + } + memcpy(key.bytes, blk_key->bytes, AES_256_XTS_KEY_SIZE); /* The SCM call requires that the key words are encoded in big endian */ for (i = 0; i < ARRAY_SIZE(key.words); i++) @@ -192,7 +379,7 @@ int qcom_ice_program_key(struct qcom_ice *ice, err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE, QCOM_SCM_ICE_CIPHER_AES_256_XTS, - data_unit_size); + blk_key->crypto_cfg.data_unit_size / 512); memzero_explicit(&key, sizeof(key)); @@ -202,10 +389,131 @@ EXPORT_SYMBOL_GPL(qcom_ice_program_key); int qcom_ice_evict_key(struct qcom_ice *ice, int slot) { + if (ice->hwkm_init_complete) + slot = translate_hwkm_slot(ice, slot); return qcom_scm_ice_invalidate_key(slot); } EXPORT_SYMBOL_GPL(qcom_ice_evict_key); +/** + * qcom_ice_get_supported_key_type() - Get the supported key type + * @ice: ICE driver data + * + * Return: the blk-crypto key type that the ICE driver is configured to use. + * This is the key type that ICE-capable storage drivers should advertise as + * supported in the crypto capabilities of any disks they register. + */ +enum blk_crypto_key_type qcom_ice_get_supported_key_type(struct qcom_ice *ice) +{ + if (ice->use_hwkm) + return BLK_CRYPTO_KEY_TYPE_HW_WRAPPED; + return BLK_CRYPTO_KEY_TYPE_RAW; +} +EXPORT_SYMBOL_GPL(qcom_ice_get_supported_key_type); + +/** + * qcom_ice_derive_sw_secret() - Derive software secret from wrapped key + * @ice: ICE driver data + * @eph_key: an ephemerally-wrapped key + * @eph_key_size: size of @eph_key in bytes + * @sw_secret: output buffer for the software secret + * + * Use HWKM to derive the "software secret" from a hardware-wrapped key that is + * given in ephemerally-wrapped form. + * + * Return: 0 on success; -EBADMSG if the given ephemerally-wrapped key is + * invalid; or another -errno value. + */ +int qcom_ice_derive_sw_secret(struct qcom_ice *ice, + const u8 *eph_key, size_t eph_key_size, + u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]) +{ + int err = qcom_scm_derive_sw_secret(eph_key, eph_key_size, + sw_secret, + BLK_CRYPTO_SW_SECRET_SIZE); + if (err == -EIO || err == -EINVAL) + err = -EBADMSG; /* probably invalid key */ + return err; +} +EXPORT_SYMBOL_GPL(qcom_ice_derive_sw_secret); + +/** + * qcom_ice_generate_key() - Generate a wrapped key for inline encryption + * @ice: ICE driver data + * @lt_key: output buffer for the long-term wrapped key + * + * Use HWKM to generate a new key and return it as a long-term wrapped key. + * + * Return: the size of the resulting wrapped key on success; -errno on failure. + */ +int qcom_ice_generate_key(struct qcom_ice *ice, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + int err; + + err = qcom_scm_generate_ice_key(lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE); + if (err) + return err; + + return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE; +} +EXPORT_SYMBOL_GPL(qcom_ice_generate_key); + +/** + * qcom_ice_prepare_key() - Prepare a wrapped key for inline encryption + * @ice: ICE driver data + * @lt_key: a long-term wrapped key + * @lt_key_size: size of @lt_key in bytes + * @eph_key: output buffer for the ephemerally-wrapped key + * + * Use HWKM to re-wrap a long-term wrapped key with the per-boot ephemeral key. + * + * Return: the size of the resulting wrapped key on success; -EBADMSG if the + * given long-term wrapped key is invalid; or another -errno value. + */ +int qcom_ice_prepare_key(struct qcom_ice *ice, + const u8 *lt_key, size_t lt_key_size, + u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + int err; + + err = qcom_scm_prepare_ice_key(lt_key, lt_key_size, + eph_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE); + if (err == -EIO || err == -EINVAL) + err = -EBADMSG; /* probably invalid key */ + if (err) + return err; + + return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE; +} +EXPORT_SYMBOL_GPL(qcom_ice_prepare_key); + +/** + * qcom_ice_import_key() - Import a raw key for inline encryption + * @ice: ICE driver data + * @raw_key: the raw key to import + * @raw_key_size: size of @raw_key in bytes + * @lt_key: output buffer for the long-term wrapped key + * + * Use HWKM to import a raw key and return it as a long-term wrapped key. + * + * Return: the size of the resulting wrapped key on success; -errno on failure. + */ +int qcom_ice_import_key(struct qcom_ice *ice, + const u8 *raw_key, size_t raw_key_size, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + int err; + + err = qcom_scm_import_ice_key(raw_key, raw_key_size, + lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE); + if (err) + return err; + + return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE; +} +EXPORT_SYMBOL_GPL(qcom_ice_import_key); + static struct qcom_ice *qcom_ice_create(struct device *dev, void __iomem *base) { diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 66804bf1ee32..0904ecae253a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -673,12 +673,10 @@ static ssize_t emulate_model_alias_store(struct config_item *item, return ret; BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1); - if (flag) { + if (flag) dev_set_t10_wwn_model_alias(dev); - } else { - strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod, - sizeof(dev->t10_wwn.model)); - } + else + strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod); da->emulate_model_alias = flag; return count; } @@ -1433,7 +1431,7 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item, ssize_t len; ssize_t ret; - len = strscpy(buf, page, sizeof(buf)); + len = strscpy(buf, page); if (len > 0) { /* Strip any newline added from userspace. */ stripped = strstrip(buf); @@ -1464,7 +1462,7 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item, } BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1); - strscpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor)); + strscpy(dev->t10_wwn.vendor, stripped); pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:" " %s\n", dev->t10_wwn.vendor); @@ -1489,7 +1487,7 @@ static ssize_t target_wwn_product_id_store(struct config_item *item, ssize_t len; ssize_t ret; - len = strscpy(buf, page, sizeof(buf)); + len = strscpy(buf, page); if (len > 0) { /* Strip any newline added from userspace. */ stripped = strstrip(buf); @@ -1520,7 +1518,7 @@ static ssize_t target_wwn_product_id_store(struct config_item *item, } BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1); - strscpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model)); + strscpy(dev->t10_wwn.model, stripped); pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n", dev->t10_wwn.model); @@ -1545,7 +1543,7 @@ static ssize_t target_wwn_revision_store(struct config_item *item, ssize_t len; ssize_t ret; - len = strscpy(buf, page, sizeof(buf)); + len = strscpy(buf, page); if (len > 0) { /* Strip any newline added from userspace. */ stripped = strstrip(buf); @@ -1576,7 +1574,7 @@ static ssize_t target_wwn_revision_store(struct config_item *item, } BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1); - strscpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision)); + strscpy(dev->t10_wwn.revision, stripped); pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n", dev->t10_wwn.revision); diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 90b5ab60f5ae..a27a3980480e 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -57,6 +57,36 @@ static const char *ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear) } } +static const char *ufs_wb_resize_hint_to_string(enum wb_resize_hint hint) +{ + switch (hint) { + case WB_RESIZE_HINT_KEEP: + return "keep"; + case WB_RESIZE_HINT_DECREASE: + return "decrease"; + case WB_RESIZE_HINT_INCREASE: + return "increase"; + default: + return "unknown"; + } +} + +static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status) +{ + switch (status) { + case WB_RESIZE_STATUS_IDLE: + return "idle"; + case WB_RESIZE_STATUS_IN_PROGRESS: + return "in_progress"; + case WB_RESIZE_STATUS_COMPLETE_SUCCESS: + return "complete_success"; + case WB_RESIZE_STATUS_GENERAL_FAILURE: + return "general_failure"; + default: + return "unknown"; + } +} + static const char *ufshcd_uic_link_state_to_string( enum uic_link_state state) { @@ -411,6 +441,44 @@ static ssize_t wb_flush_threshold_store(struct device *dev, return count; } +static const char * const wb_resize_en_mode[] = { + [WB_RESIZE_EN_IDLE] = "idle", + [WB_RESIZE_EN_DECREASE] = "decrease", + [WB_RESIZE_EN_INCREASE] = "increase", +}; + +static ssize_t wb_resize_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int mode; + ssize_t res; + + if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled + || !hba->dev_info.b_presrv_uspc_en + || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE)) + return -EOPNOTSUPP; + + mode = sysfs_match_string(wb_resize_en_mode, buf); + if (mode < 0) + return -EINVAL; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + res = -EBUSY; + goto out; + } + + ufshcd_rpm_get_sync(hba); + res = ufshcd_wb_set_resize_en(hba, mode); + ufshcd_rpm_put_sync(hba); + +out: + up(&hba->host_sem); + return res < 0 ? res : count; +} + /** * pm_qos_enable_show - sysfs handler to show pm qos enable value * @dev: device associated with the UFS controller @@ -476,6 +544,7 @@ static DEVICE_ATTR_RW(auto_hibern8); static DEVICE_ATTR_RW(wb_on); static DEVICE_ATTR_RW(enable_wb_buf_flush); static DEVICE_ATTR_RW(wb_flush_threshold); +static DEVICE_ATTR_WO(wb_resize_enable); static DEVICE_ATTR_RW(rtc_update_ms); static DEVICE_ATTR_RW(pm_qos_enable); static DEVICE_ATTR_RO(critical_health); @@ -491,6 +560,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = { &dev_attr_wb_on.attr, &dev_attr_enable_wb_buf_flush.attr, &dev_attr_wb_flush_threshold.attr, + &dev_attr_wb_resize_enable.attr, &dev_attr_rtc_update_ms.attr, &dev_attr_pm_qos_enable.attr, &dev_attr_critical_health.attr, @@ -1495,6 +1565,67 @@ static inline bool ufshcd_is_wb_attrs(enum attr_idn idn) idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE; } +static int wb_read_resize_attrs(struct ufs_hba *hba, + enum attr_idn idn, u32 *attr_val) +{ + u8 index = 0; + int ret; + + if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled + || !hba->dev_info.b_presrv_uspc_en + || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE)) + return -EOPNOTSUPP; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + index = ufshcd_wb_get_query_index(hba); + ufshcd_rpm_get_sync(hba); + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + idn, index, 0, attr_val); + ufshcd_rpm_put_sync(hba); + + up(&hba->host_sem); + return ret; +} + +static ssize_t wb_resize_hint_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + u32 value; + + ret = wb_read_resize_attrs(hba, + QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%s\n", ufs_wb_resize_hint_to_string(value)); +} + +static DEVICE_ATTR_RO(wb_resize_hint); + +static ssize_t wb_resize_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + u32 value; + + ret = wb_read_resize_attrs(hba, + QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%s\n", ufs_wb_resize_status_to_string(value)); +} + +static DEVICE_ATTR_RO(wb_resize_status); + #define UFS_ATTRIBUTE(_name, _uname) \ static ssize_t _name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ @@ -1568,6 +1699,8 @@ static struct attribute *ufs_sysfs_attributes[] = { &dev_attr_wb_avail_buf.attr, &dev_attr_wb_life_time_est.attr, &dev_attr_wb_cur_buf.attr, + &dev_attr_wb_resize_hint.attr, + &dev_attr_wb_resize_status.attr, NULL, }; diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 0534390c2a35..be65fc4b5ccd 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -643,9 +643,6 @@ static void ufshcd_print_host_state(struct ufs_hba *hba) "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n", div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000), hba->ufs_stats.hibern8_exit_cnt); - dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n", - div_u64(hba->ufs_stats.last_intr_ts, 1000), - hba->ufs_stats.last_intr_status); dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", hba->eh_flags, hba->req_abort_count); dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n", @@ -6083,6 +6080,21 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable) return ret; } +int ufshcd_wb_set_resize_en(struct ufs_hba *hba, enum wb_resize_en en_mode) +{ + int ret; + u8 index; + + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_WB_BUF_RESIZE_EN, index, 0, &en_mode); + if (ret) + dev_err(hba->dev, "%s: Enable WB buf resize operation failed %d\n", + __func__, ret); + + return ret; +} + static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, u32 avail_buf) { @@ -6974,7 +6986,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) } /** - * ufshcd_intr - Main interrupt service routine + * ufshcd_threaded_intr - Threaded interrupt service routine * @irq: irq number * @__hba: pointer to adapter instance * @@ -6982,16 +6994,14 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) * IRQ_HANDLED - If interrupt is valid * IRQ_NONE - If invalid interrupt */ -static irqreturn_t ufshcd_intr(int irq, void *__hba) +static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba) { - u32 intr_status, enabled_intr_status = 0; + u32 last_intr_status, intr_status, enabled_intr_status = 0; irqreturn_t retval = IRQ_NONE; struct ufs_hba *hba = __hba; int retries = hba->nutrs; - intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); - hba->ufs_stats.last_intr_status = intr_status; - hba->ufs_stats.last_intr_ts = local_clock(); + last_intr_status = intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); /* * There could be max of hba->nutrs reqs in flight and in worst case @@ -7015,7 +7025,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", __func__, intr_status, - hba->ufs_stats.last_intr_status, + last_intr_status, enabled_intr_status); ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); } @@ -7023,6 +7033,29 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) return retval; } +/** + * ufshcd_intr - Main interrupt service routine + * @irq: irq number + * @__hba: pointer to adapter instance + * + * Return: + * IRQ_HANDLED - If interrupt is valid + * IRQ_WAKE_THREAD - If handling is moved to threaded handled + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_intr(int irq, void *__hba) +{ + struct ufs_hba *hba = __hba; + + /* Move interrupt handling to thread when MCQ & ESI are not enabled */ + if (!hba->mcq_enabled || !hba->mcq_esi_enabled) + return IRQ_WAKE_THREAD; + + /* Directly handle interrupts since MCQ ESI handlers does the hard job */ + return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) & + ufshcd_readl(hba, REG_INTERRUPT_ENABLE)); +} + static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) { int err = 0; @@ -8082,6 +8115,9 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf) */ dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE]; + dev_info->ext_wb_sup = get_unaligned_be16(desc_buf + + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); + dev_info->b_presrv_uspc_en = desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN]; @@ -8722,6 +8758,7 @@ static void ufshcd_config_mcq(struct ufs_hba *hba) u32 intrs; ret = ufshcd_mcq_vops_config_esi(hba); + hba->mcq_esi_enabled = !ret; dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : ""); intrs = UFSHCD_ENABLE_MCQ_INTRS; @@ -10581,7 +10618,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ufshcd_readl(hba, REG_INTERRUPT_ENABLE); /* IRQ registration */ - err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); + err = devm_request_threaded_irq(dev, irq, ufshcd_intr, ufshcd_threaded_intr, + IRQF_ONESHOT | IRQF_SHARED, UFSHCD, hba); if (err) { dev_err(hba->dev, "request irq failed\n"); goto out_disable; diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 1b37449fbffc..46cca52aa6f1 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -169,7 +169,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host) profile->ll_ops = ufs_qcom_crypto_ops; profile->max_dun_bytes_supported = 8; - profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW; + profile->key_types_supported = qcom_ice_get_supported_key_type(ice); profile->dev = dev; /* @@ -217,17 +217,8 @@ static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile, struct ufs_qcom_host *host = ufshcd_get_variant(hba); int err; - /* Only AES-256-XTS has been tested so far. */ - if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS) - return -EOPNOTSUPP; - ufshcd_hold(hba); - err = qcom_ice_program_key(host->ice, - QCOM_ICE_CRYPTO_ALG_AES_XTS, - QCOM_ICE_CRYPTO_KEY_SIZE_256, - key->bytes, - key->crypto_cfg.data_unit_size / 512, - slot); + err = qcom_ice_program_key(host->ice, slot, key); ufshcd_release(hba); return err; } @@ -246,9 +237,53 @@ static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile, return err; } +static int ufs_qcom_ice_derive_sw_secret(struct blk_crypto_profile *profile, + const u8 *eph_key, size_t eph_key_size, + u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_derive_sw_secret(host->ice, eph_key, eph_key_size, + sw_secret); +} + +static int ufs_qcom_ice_import_key(struct blk_crypto_profile *profile, + const u8 *raw_key, size_t raw_key_size, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_import_key(host->ice, raw_key, raw_key_size, lt_key); +} + +static int ufs_qcom_ice_generate_key(struct blk_crypto_profile *profile, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_generate_key(host->ice, lt_key); +} + +static int ufs_qcom_ice_prepare_key(struct blk_crypto_profile *profile, + const u8 *lt_key, size_t lt_key_size, + u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_prepare_key(host->ice, lt_key, lt_key_size, eph_key); +} + static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = { .keyslot_program = ufs_qcom_ice_keyslot_program, .keyslot_evict = ufs_qcom_ice_keyslot_evict, + .derive_sw_secret = ufs_qcom_ice_derive_sw_secret, + .import_key = ufs_qcom_ice_import_key, + .generate_key = ufs_qcom_ice_generate_key, + .prepare_key = ufs_qcom_ice_prepare_key, }; #else diff --git a/include/soc/qcom/ice.h b/include/soc/qcom/ice.h index fdf1b5c21eb9..4bee553f0a59 100644 --- a/include/soc/qcom/ice.h +++ b/include/soc/qcom/ice.h @@ -6,33 +6,29 @@ #ifndef __QCOM_ICE_H__ #define __QCOM_ICE_H__ +#include #include struct qcom_ice; -enum qcom_ice_crypto_key_size { - QCOM_ICE_CRYPTO_KEY_SIZE_INVALID = 0x0, - QCOM_ICE_CRYPTO_KEY_SIZE_128 = 0x1, - QCOM_ICE_CRYPTO_KEY_SIZE_192 = 0x2, - QCOM_ICE_CRYPTO_KEY_SIZE_256 = 0x3, - QCOM_ICE_CRYPTO_KEY_SIZE_512 = 0x4, -}; - -enum qcom_ice_crypto_alg { - QCOM_ICE_CRYPTO_ALG_AES_XTS = 0x0, - QCOM_ICE_CRYPTO_ALG_BITLOCKER_AES_CBC = 0x1, - QCOM_ICE_CRYPTO_ALG_AES_ECB = 0x2, - QCOM_ICE_CRYPTO_ALG_ESSIV_AES_CBC = 0x3, -}; - int qcom_ice_enable(struct qcom_ice *ice); int qcom_ice_resume(struct qcom_ice *ice); int qcom_ice_suspend(struct qcom_ice *ice); -int qcom_ice_program_key(struct qcom_ice *ice, - u8 algorithm_id, u8 key_size, - const u8 crypto_key[], u8 data_unit_size, - int slot); +int qcom_ice_program_key(struct qcom_ice *ice, unsigned int slot, + const struct blk_crypto_key *blk_key); int qcom_ice_evict_key(struct qcom_ice *ice, int slot); +enum blk_crypto_key_type qcom_ice_get_supported_key_type(struct qcom_ice *ice); +int qcom_ice_derive_sw_secret(struct qcom_ice *ice, + const u8 *eph_key, size_t eph_key_size, + u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]); +int qcom_ice_generate_key(struct qcom_ice *ice, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]); +int qcom_ice_prepare_key(struct qcom_ice *ice, + const u8 *lt_key, size_t lt_key_size, + u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]); +int qcom_ice_import_key(struct qcom_ice *ice, + const u8 *raw_key, size_t raw_key_size, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]); struct qcom_ice *devm_of_qcom_ice_get(struct device *dev); #endif /* __QCOM_ICE_H__ */ diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h index 8a24ed59ec46..9188f7aa99da 100644 --- a/include/ufs/ufs.h +++ b/include/ufs/ufs.h @@ -180,7 +180,10 @@ enum attr_idn { QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D, QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E, QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F, - QUERY_ATTR_IDN_TIMESTAMP = 0x30 + QUERY_ATTR_IDN_TIMESTAMP = 0x30, + QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT = 0x3C, + QUERY_ATTR_IDN_WB_BUF_RESIZE_EN = 0x3D, + QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS = 0x3E, }; /* Descriptor idn for Query requests */ @@ -289,6 +292,7 @@ enum device_desc_param { DEVICE_DESC_PARAM_PRDCT_REV = 0x2A, DEVICE_DESC_PARAM_HPB_VER = 0x40, DEVICE_DESC_PARAM_HPB_CONTROL = 0x42, + DEVICE_DESC_PARAM_EXT_WB_SUP = 0x4D, DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F, DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53, DEVICE_DESC_PARAM_WB_TYPE = 0x54, @@ -383,6 +387,11 @@ enum { UFSHCD_AMP = 3, }; +/* Possible values for wExtendedWriteBoosterSupport */ +enum { + UFS_DEV_WB_BUF_RESIZE = BIT(0), +}; + /* Possible values for dExtendedUFSFeaturesSupport */ enum { UFS_DEV_HIGH_TEMP_NOTIF = BIT(4), @@ -454,6 +463,28 @@ enum ufs_ref_clk_freq { REF_CLK_FREQ_INVAL = -1, }; +/* bWriteBoosterBufferResizeEn attribute */ +enum wb_resize_en { + WB_RESIZE_EN_IDLE = 0, + WB_RESIZE_EN_DECREASE = 1, + WB_RESIZE_EN_INCREASE = 2, +}; + +/* bWriteBoosterBufferResizeHint attribute */ +enum wb_resize_hint { + WB_RESIZE_HINT_KEEP = 0, + WB_RESIZE_HINT_DECREASE = 1, + WB_RESIZE_HINT_INCREASE = 2, +}; + +/* bWriteBoosterBufferResizeStatus attribute */ +enum wb_resize_status { + WB_RESIZE_STATUS_IDLE = 0, + WB_RESIZE_STATUS_IN_PROGRESS = 1, + WB_RESIZE_STATUS_COMPLETE_SUCCESS = 2, + WB_RESIZE_STATUS_GENERAL_FAILURE = 3, +}; + /* Query response result code */ enum { QUERY_RESULT_SUCCESS = 0x00, @@ -578,6 +609,7 @@ struct ufs_dev_info { bool wb_buf_flush_enabled; u8 wb_dedicated_lu; u8 wb_buffer_type; + u16 ext_wb_sup; bool b_rpm_dev_flush_capable; u8 b_presrv_uspc_en; diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index e3909cc691b2..a2ffdc0b1044 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -501,8 +501,6 @@ struct ufs_event_hist { /** * struct ufs_stats - keeps usage/err statistics - * @last_intr_status: record the last interrupt status. - * @last_intr_ts: record the last interrupt timestamp. * @hibern8_exit_cnt: Counter to keep track of number of exits, * reset this after link-startup. * @last_hibern8_exit_tstamp: Set time after the hibern8 exit. @@ -510,9 +508,6 @@ struct ufs_event_hist { * @event: array with event history. */ struct ufs_stats { - u32 last_intr_status; - u64 last_intr_ts; - u32 hibern8_exit_cnt; u64 last_hibern8_exit_tstamp; struct ufs_event_hist event[UFS_EVT_CNT]; @@ -959,6 +954,7 @@ enum ufshcd_mcq_opr { * ufshcd_resume_complete() * @mcq_sup: is mcq supported by UFSHC * @mcq_enabled: is mcq ready to accept requests + * @mcq_esi_enabled: is mcq ESI configured * @res: array of resource info of MCQ registers * @mcq_base: Multi circular queue registers base address * @uhq: array of supported hardware queues @@ -1127,6 +1123,7 @@ struct ufs_hba { bool mcq_sup; bool lsdb_sup; bool mcq_enabled; + bool mcq_esi_enabled; struct ufshcd_res_info res[RES_MAX]; void __iomem *mcq_base; struct ufs_hw_queue *uhq; @@ -1471,6 +1468,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r struct scatterlist *sg_list, enum dma_data_direction dir); int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable); int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable); +int ufshcd_wb_set_resize_en(struct ufs_hba *hba, enum wb_resize_en en_mode); int ufshcd_suspend_prepare(struct device *dev); int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm); void ufshcd_resume_complete(struct device *dev);