All of lore.kernel.org
 help / color / mirror / Atom feed
From: Martin Wilck <mwilck@suse.com>
To: Don Brace <don.brace@microchip.com>,
	Kevin.Barnett@microchip.com, scott.teel@microchip.com,
	Justin.Lindley@microchip.com, scott.benesh@microchip.com,
	gerry.morong@microchip.com, mahesh.rajashekhara@microchip.com,
	hch@infradead.org, jejb@linux.vnet.ibm.com,
	joseph.szczypek@hpe.com, POSWALD@suse.com
Cc: linux-scsi@vger.kernel.org
Subject: Re: [PATCH V3 04/25] smartpqi: add support for raid5 and raid6 writes
Date: Thu, 07 Jan 2021 17:44:20 +0100	[thread overview]
Message-ID: <15d80793c64ffd044da1e40334acfd8ad8988fb9.camel@suse.com> (raw)
In-Reply-To: <160763248354.26927.303366932249508542.stgit@brunhilda>

On Thu, 2020-12-10 at 14:34 -0600, Don Brace wrote:
> * Add in new IU definition.
> * Add in support raid5 and raid6 writes.
> 
> Reviewed-by: Scott Benesh <scott.benesh@microchip.com>
> Reviewed-by: Scott Teel <scott.teel@microchip.com>
> Reviewed-by: Kevin Barnett <kevin.barnett@microchip.com>
> Signed-off-by: Don Brace <don.brace@microchip.com>
> ---
>  drivers/scsi/smartpqi/smartpqi.h      |   39 +++++
>  drivers/scsi/smartpqi/smartpqi_init.c |  247
> ++++++++++++++++++++++++++++++++-
>  2 files changed, 278 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/scsi/smartpqi/smartpqi.h
> b/drivers/scsi/smartpqi/smartpqi.h
> index d486a2ec3045..e9844210c4a0 100644
> --- a/drivers/scsi/smartpqi/smartpqi.h
> +++ b/drivers/scsi/smartpqi/smartpqi.h
> @@ -257,6 +257,7 @@ struct pqi_device_capability {
>  };
>  
>  #define PQI_MAX_EMBEDDED_SG_DESCRIPTORS                4
> +#define PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS    3
>  
>  struct pqi_raid_path_request {
>         struct pqi_iu_header header;
> @@ -312,6 +313,39 @@ struct pqi_aio_path_request {
>                 sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
>  };
>  
> +#define PQI_RAID56_XFER_LIMIT_4K       0x1000 /* 4Kib */
> +#define PQI_RAID56_XFER_LIMIT_8K       0x2000 /* 8Kib */

You don't seem to use these, and you'll remove them again in patch
06/25.

> +struct pqi_aio_r56_path_request {
> +       struct pqi_iu_header header;
> +       __le16  request_id;
> +       __le16  volume_id;              /* ID of the RAID volume */
> +       __le32  data_it_nexus;          /* IT nexus for the data
> drive */
> +       __le32  p_parity_it_nexus;      /* IT nexus for the P parity
> drive */
> +       __le32  q_parity_it_nexus;      /* IT nexus for the Q parity
> drive */
> +       __le32  data_length;            /* total bytes to read/write
> */
> +       u8      data_direction : 2;
> +       u8      partial : 1;
> +       u8      mem_type : 1;           /* 0b: PCIe, 1b: DDR */
> +       u8      fence : 1;
> +       u8      encryption_enable : 1;
> +       u8      reserved : 2;
> +       u8      task_attribute : 3;
> +       u8      command_priority : 4;
> +       u8      reserved1 : 1;
> +       __le16  data_encryption_key_index;
> +       u8      cdb[16];
> +       __le16  error_index;
> +       u8      num_sg_descriptors;
> +       u8      cdb_length;
> +       u8      xor_multiplier;
> +       u8      reserved2[3];
> +       __le32  encrypt_tweak_lower;
> +       __le32  encrypt_tweak_upper;
> +       u8      row;                    /* row = logical lba/blocks
> per row */
> +       u8      reserved3[8];
> +       struct pqi_sg_descriptor
> sg_descriptors[PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS];
> +};
> +
>  struct pqi_io_response {
>         struct pqi_iu_header header;
>         __le16  request_id;
> @@ -484,6 +518,8 @@ struct pqi_raid_error_info {
>  #define PQI_REQUEST_IU_TASK_MANAGEMENT                 0x13
>  #define PQI_REQUEST_IU_RAID_PATH_IO                    0x14
>  #define PQI_REQUEST_IU_AIO_PATH_IO                     0x15
> +#define PQI_REQUEST_IU_AIO_PATH_RAID5_IO               0x18
> +#define PQI_REQUEST_IU_AIO_PATH_RAID6_IO               0x19
>  #define PQI_REQUEST_IU_GENERAL_ADMIN                   0x60
>  #define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG      0x72
>  #define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG         0x73
> @@ -1179,6 +1215,7 @@ struct pqi_ctrl_info {
>         u16             max_inbound_iu_length_per_firmware;
>         u16             max_inbound_iu_length;
>         unsigned int    max_sg_per_iu;
> +       unsigned int    max_sg_per_r56_iu;
>         void            *admin_queue_memory_base;
>         u32             admin_queue_memory_length;
>         dma_addr_t      admin_queue_memory_base_dma_handle;
> @@ -1210,6 +1247,8 @@ struct pqi_ctrl_info {
>         u8              soft_reset_handshake_supported : 1;
>         u8              raid_iu_timeout_supported: 1;
>         u8              tmf_iu_timeout_supported: 1;
> +       u8              enable_r5_writes : 1;
> +       u8              enable_r6_writes : 1;
>  
>         struct list_head scsi_device_list;
>         spinlock_t      scsi_device_list_lock;
> diff --git a/drivers/scsi/smartpqi/smartpqi_init.c
> b/drivers/scsi/smartpqi/smartpqi_init.c
> index 6bcb037ae9d7..c813cec10003 100644
> --- a/drivers/scsi/smartpqi/smartpqi_init.c
> +++ b/drivers/scsi/smartpqi/smartpqi_init.c
> @@ -67,6 +67,10 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info
> *ctrl_info,
>         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
>         unsigned int cdb_length, struct pqi_queue_group *queue_group,
>         struct pqi_encryption_info *encryption_info, bool
> raid_bypass);
> +static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info
> *ctrl_info,
> +       struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
> +       struct pqi_encryption_info *encryption_info, struct
> pqi_scsi_dev *device,
> +       struct pqi_scsi_dev_raid_map_data *rmd);
>  static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
>  static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
>  static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
> @@ -2237,7 +2241,8 @@ static inline void pqi_set_encryption_info(
>   * Attempt to perform RAID bypass mapping for a logical volume I/O.
>   */
>  
> -static bool pqi_aio_raid_level_supported(struct
> pqi_scsi_dev_raid_map_data *rmd)
> +static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info
> *ctrl_info,
> +       struct pqi_scsi_dev_raid_map_data *rmd)
>  {
>         bool is_supported = true;
>  
> @@ -2245,13 +2250,14 @@ static bool
> pqi_aio_raid_level_supported(struct pqi_scsi_dev_raid_map_data *rmd)
>         case SA_RAID_0:
>                 break;
>         case SA_RAID_1:
> -               if (rmd->is_write)
> -                       is_supported = false;
> +               is_supported = false;

You disable RAID1 READs with this patch. I can see you fix it again in
05/25, still it looks wrong.

>                 break;
>         case SA_RAID_5:
> -               fallthrough;
> +               if (rmd->is_write && !ctrl_info->enable_r5_writes)
> +                       is_supported = false;
> +               break;
>         case SA_RAID_6:
> -               if (rmd->is_write)
> +               if (rmd->is_write && !ctrl_info->enable_r6_writes)
>                         is_supported = false;
>                 break;
>         case SA_RAID_ADM:
> @@ -2526,6 +2532,26 @@ static int pqi_calc_aio_r5_or_r6(struct
> pqi_scsi_dev_raid_map_data *rmd,
>                 rmd->total_disks_per_row)) +
>                 (rmd->map_row * rmd->total_disks_per_row) + rmd-
> > first_column;
>  
> +       if (rmd->is_write) {
> +               rmd->p_index = (rmd->map_row * rmd-
> > total_disks_per_row) + rmd->data_disks_per_row;
> +               rmd->p_parity_it_nexus = raid_map->disk_data[rmd-
> > p_index].aio_handle;

I suppose you have made sure rmd->p_index can't be larger than the
size of raid_map->disk_data. A comment explaining that would be helpful
for the reader though.


> +               if (rmd->raid_level == SA_RAID_6) {
> +                       rmd->q_index = (rmd->map_row * rmd-
> > total_disks_per_row) +
> +                               (rmd->data_disks_per_row + 1);
> +                       rmd->q_parity_it_nexus = raid_map-
> > disk_data[rmd->q_index].aio_handle;
> +                       rmd->xor_mult = raid_map->disk_data[rmd-
> > map_index].xor_mult[1];

See above.

> +               }
> +               if (rmd->blocks_per_row == 0)
> +                       return PQI_RAID_BYPASS_INELIGIBLE;
> +#if BITS_PER_LONG == 32
> +               tmpdiv = rmd->first_block;
> +               do_div(tmpdiv, rmd->blocks_per_row);
> +               rmd->row = tmpdiv;
> +#else
> +               rmd->row = rmd->first_block / rmd->blocks_per_row;
> +#endif

Why not always use do_div()?

> +       }
> +
>         return 0;
>  }
>  
> @@ -2567,7 +2593,7 @@ static int
> pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
>  
>         rmd.raid_level = device->raid_level;
>  
> -       if (!pqi_aio_raid_level_supported(&rmd))
> +       if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
>                 return PQI_RAID_BYPASS_INELIGIBLE;
>  
>         if (unlikely(rmd.block_cnt == 0))
> @@ -2587,7 +2613,8 @@ static int
> pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
>         } else if (device->raid_level == SA_RAID_ADM) {
>                 rc = pqi_calc_aio_raid_adm(&rmd, device);
>         } else if ((device->raid_level == SA_RAID_5 ||
> -               device->raid_level == SA_RAID_6) &&
> rmd.layout_map_count > 1) {
> +               device->raid_level == SA_RAID_6) &&
> +               (rmd.layout_map_count > 1 || rmd.is_write)) {
>                 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
>                 if (rc)
>                         return PQI_RAID_BYPASS_INELIGIBLE;
> @@ -2622,9 +2649,27 @@ static int
> pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
>                 encryption_info_ptr = NULL;
>         }
>  
> -       return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
> +       if (rmd.is_write) {
> +               switch (device->raid_level) {
> +               case SA_RAID_0:
> +                       return pqi_aio_submit_io(ctrl_info, scmd,
> rmd.aio_handle,
>                                 rmd.cdb, rmd.cdb_length, queue_group,
>                                 encryption_info_ptr, true);
> +               case SA_RAID_5:
> +               case SA_RAID_6:
> +                       return pqi_aio_submit_r56_write_io(ctrl_info,
> scmd, queue_group,
> +                                       encryption_info_ptr, device,
> &rmd);
> +               default:
> +                       return pqi_aio_submit_io(ctrl_info, scmd,
> rmd.aio_handle,
> +                               rmd.cdb, rmd.cdb_length, queue_group,
> +                               encryption_info_ptr, true);
> +               }
> +       } else {
> +               return pqi_aio_submit_io(ctrl_info, scmd,
> rmd.aio_handle,
> +                       rmd.cdb, rmd.cdb_length, queue_group,
> +                       encryption_info_ptr, true);
> +       }
> +
>  }
>  
>  #define PQI_STATUS_IDLE                0x0
> @@ -4844,6 +4889,12 @@ static void
> pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
>                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
>                 sizeof(struct pqi_sg_descriptor)) +
>                 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
> +
> +       ctrl_info->max_sg_per_r56_iu =
> +               ((ctrl_info->max_inbound_iu_length -
> +               PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
> +               sizeof(struct pqi_sg_descriptor)) +
> +               PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
>  }
>  
>  static inline void pqi_set_sg_descriptor(
> @@ -4931,6 +4982,44 @@ static int pqi_build_raid_sg_list(struct
> pqi_ctrl_info *ctrl_info,
>         return 0;
>  }
>  
> +static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info
> *ctrl_info,
> +       struct pqi_aio_r56_path_request *request, struct scsi_cmnd
> *scmd,
> +       struct pqi_io_request *io_request)
> +{
> +       u16 iu_length;
> +       int sg_count;
> +       bool chained;
> +       unsigned int num_sg_in_iu;
> +       struct scatterlist *sg;
> +       struct pqi_sg_descriptor *sg_descriptor;
> +
> +       sg_count = scsi_dma_map(scmd);
> +       if (sg_count < 0)
> +               return sg_count;
> +
> +       iu_length = offsetof(struct pqi_aio_r56_path_request,
> sg_descriptors) -
> +               PQI_REQUEST_HEADER_LENGTH;
> +       num_sg_in_iu = 0;
> +
> +       if (sg_count == 0)
> +               goto out;

An if {} block would be better readable here.

> +
> +       sg = scsi_sglist(scmd);
> +       sg_descriptor = request->sg_descriptors;
> +
> +       num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count,
> io_request,
> +               ctrl_info->max_sg_per_r56_iu, &chained);
> +
> +       request->partial = chained;
> +       iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
> +
> +out:
> +       put_unaligned_le16(iu_length, &request->header.iu_length);
> +       request->num_sg_descriptors = num_sg_in_iu;
> +
> +       return 0;
> +}
> +
>  static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
>         struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
>         struct pqi_io_request *io_request)
> @@ -5335,6 +5424,88 @@ static int pqi_aio_submit_io(struct
> pqi_ctrl_info *ctrl_info,
>         return 0;
>  }
>  
> +static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info
> *ctrl_info,
> +       struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
> +       struct pqi_encryption_info *encryption_info, struct
> pqi_scsi_dev *device,
> +       struct pqi_scsi_dev_raid_map_data *rmd)
> +{
> +       int rc;
> +       struct pqi_io_request *io_request;
> +       struct pqi_aio_r56_path_request *r56_request;
> +
> +       io_request = pqi_alloc_io_request(ctrl_info);
> +       io_request->io_complete_callback = pqi_aio_io_complete;
> +       io_request->scmd = scmd;
> +       io_request->raid_bypass = true;
> +
> +       r56_request = io_request->iu;
> +       memset(r56_request, 0, offsetof(struct
> pqi_aio_r56_path_request, sg_descriptors));
> +
> +       if (device->raid_level == SA_RAID_5 || device->raid_level ==
> SA_RAID_51)
> +               r56_request->header.iu_type =
> PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
> +       else
> +               r56_request->header.iu_type =
> PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
> +
> +       put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff,
> &r56_request->volume_id);
> +       put_unaligned_le32(rmd->aio_handle, &r56_request-
> > data_it_nexus);
> +       put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request-
> > p_parity_it_nexus);
> +       if (rmd->raid_level == SA_RAID_6) {
> +               put_unaligned_le32(rmd->q_parity_it_nexus,
> &r56_request->q_parity_it_nexus);
> +               r56_request->xor_multiplier = rmd->xor_mult;
> +       }
> +       put_unaligned_le32(scsi_bufflen(scmd), &r56_request-
> > data_length);
> +       r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
> +       put_unaligned_le64(rmd->row, &r56_request->row);
> +
> +       put_unaligned_le16(io_request->index, &r56_request-
> > request_id);
> +       r56_request->error_index = r56_request->request_id;
> +
> +       if (rmd->cdb_length > sizeof(r56_request->cdb))
> +               rmd->cdb_length = sizeof(r56_request->cdb);
> +       r56_request->cdb_length = rmd->cdb_length;
> +       memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
> +
> +       switch (scmd->sc_data_direction) {
> +       case DMA_TO_DEVICE:
> +               r56_request->data_direction = SOP_READ_FLAG;
> +               break;

I wonder how it would be possible that sc_data_direction is anything
else but DMA_TO_DEVICE here. AFAICS we will only reach this code for
WRITE commands. Add a comment, please.


> +       case DMA_FROM_DEVICE:
> +               r56_request->data_direction = SOP_WRITE_FLAG;
> +               break;
> +       case DMA_NONE:
> +               r56_request->data_direction = SOP_NO_DIRECTION_FLAG;
> +               break;
> +       case DMA_BIDIRECTIONAL:
> +               r56_request->data_direction = SOP_BIDIRECTIONAL;
> +               break;
> +       default:
> +               dev_err(&ctrl_info->pci_dev->dev,
> +                       "unknown data direction: %d\n",
> +                       scmd->sc_data_direction);
> +               break;
> +       }
> +
> +       if (encryption_info) {
> +               r56_request->encryption_enable = true;
> +               put_unaligned_le16(encryption_info-
> > data_encryption_key_index,
> +                               &r56_request-
> > data_encryption_key_index);
> +               put_unaligned_le32(encryption_info-
> > encrypt_tweak_lower,
> +                               &r56_request->encrypt_tweak_lower);
> +               put_unaligned_le32(encryption_info-
> > encrypt_tweak_upper,
> +                               &r56_request->encrypt_tweak_upper);
> +       }
> +
> +       rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd,
> io_request);
> +       if (rc) {
> +               pqi_free_io_request(io_request);
> +               return SCSI_MLQUEUE_HOST_BUSY;
> +       }
> +
> +       pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
> +
> +       return 0;
> +}
> +
>  static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
>         struct scsi_cmnd *scmd)
>  {
> @@ -6298,6 +6469,60 @@ static ssize_t pqi_lockup_action_store(struct
> device *dev,
>         return -EINVAL;
>  }
>  
> +static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
> +       struct device_attribute *attr, char *buffer)
> +{
> +       struct Scsi_Host *shost = class_to_shost(dev);
> +       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
> +
> +       return scnprintf(buffer, 10, "%hhx\n", ctrl_info-
> > enable_r5_writes);

"%hhx" is deprecated, see
https://lore.kernel.org/lkml/20190914015858.7c76e036@lwn.net/T/


> +}
> +
> +static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
> +       struct device_attribute *attr, const char *buffer, size_t
> count)
> +{
> +       struct Scsi_Host *shost = class_to_shost(dev);
> +       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
> +       u8 set_r5_writes = 0;
> +
> +       if (kstrtou8(buffer, 0, &set_r5_writes))
> +               return -EINVAL;
> +
> +       if (set_r5_writes > 0)
> +               set_r5_writes = 1;
> +
> +       ctrl_info->enable_r5_writes = set_r5_writes;
> +
> +       return count;
> +}
> +
> +static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
> +       struct device_attribute *attr, char *buffer)
> +{
> +       struct Scsi_Host *shost = class_to_shost(dev);
> +       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
> +
> +       return scnprintf(buffer, 10, "%hhx\n", ctrl_info-
> > enable_r6_writes);

See above

> +}
> +
> +static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
> +       struct device_attribute *attr, const char *buffer, size_t
> count)
> +{
> +       struct Scsi_Host *shost = class_to_shost(dev);
> +       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
> +       u8 set_r6_writes = 0;
> +
> +       if (kstrtou8(buffer, 0, &set_r6_writes))
> +               return -EINVAL;
> +
> +       if (set_r6_writes > 0)
> +               set_r6_writes = 1;
> +
> +       ctrl_info->enable_r6_writes = set_r6_writes;
> +
> +       return count;
> +}
> +
>  static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show,
> NULL);
>  static DEVICE_ATTR(firmware_version, 0444,
> pqi_firmware_version_show, NULL);
>  static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
> @@ -6306,6 +6531,10 @@ static DEVICE_ATTR(vendor, 0444,
> pqi_vendor_show, NULL);
>  static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
>  static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
>         pqi_lockup_action_store);
> +static DEVICE_ATTR(enable_r5_writes, 0644,
> +       pqi_host_enable_r5_writes_show,
> pqi_host_enable_r5_writes_store);
> +static DEVICE_ATTR(enable_r6_writes, 0644,
> +       pqi_host_enable_r6_writes_show,
> pqi_host_enable_r6_writes_store);
>  
>  static struct device_attribute *pqi_shost_attrs[] = {
>         &dev_attr_driver_version,
> @@ -6315,6 +6544,8 @@ static struct device_attribute
> *pqi_shost_attrs[] = {
>         &dev_attr_vendor,
>         &dev_attr_rescan,
>         &dev_attr_lockup_action,
> +       &dev_attr_enable_r5_writes,
> +       &dev_attr_enable_r6_writes,
>         NULL
>  };
>  
> 






  reply	other threads:[~2021-01-07 16:45 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-10 20:34 [PATCH V3 00/25] smartpqi updates Don Brace
2020-12-10 20:34 ` [PATCH V3 01/25] smartpqi: add support for product id Don Brace
2021-01-07 16:43   ` Martin Wilck
2020-12-10 20:34 ` [PATCH V3 02/25] smartpqi: refactor aio submission code Don Brace
2021-01-07 16:43   ` Martin Wilck
2020-12-10 20:34 ` [PATCH V3 03/25] smartpqi: refactor build sg list code Don Brace
2021-01-07 16:43   ` Martin Wilck
2020-12-10 20:34 ` [PATCH V3 04/25] smartpqi: add support for raid5 and raid6 writes Don Brace
2021-01-07 16:44   ` Martin Wilck [this message]
2021-01-08 22:56     ` Don.Brace
2021-01-13 10:26       ` Martin Wilck
2020-12-10 20:34 ` [PATCH V3 05/25] smartpqi: add support for raid1 writes Don Brace
2021-01-07 16:44   ` Martin Wilck
2021-01-09 16:56     ` Don.Brace
2020-12-10 20:34 ` [PATCH V3 06/25] smartpqi: add support for BMIC sense feature cmd and feature bits Don Brace
2021-01-07 16:44   ` Martin Wilck
2021-01-11 17:22     ` Don.Brace
2021-01-22 16:45     ` Don.Brace
2021-01-22 19:04       ` Martin Wilck
2020-12-10 20:35 ` [PATCH V3 07/25] smartpqi: update AIO Sub Page 0x02 support Don Brace
2021-01-07 16:44   ` Martin Wilck
2021-01-11 20:53     ` Don.Brace
2020-12-10 20:35 ` [PATCH V3 08/25] smartpqi: add support for long firmware version Don Brace
2021-01-07 16:45   ` Martin Wilck
2021-01-11 22:25     ` Don.Brace
2021-01-22 20:01     ` Don.Brace
2020-12-10 20:35 ` [PATCH V3 09/25] smartpqi: align code with oob driver Don Brace
2021-01-08  0:13   ` Martin Wilck
2020-12-10 20:35 ` [PATCH V3 10/25] smartpqi: add stream detection Don Brace
2021-01-08  0:14   ` Martin Wilck
2021-01-15 21:58     ` Don.Brace
2020-12-10 20:35 ` [PATCH V3 11/25] smartpqi: add host level stream detection enable Don Brace
2021-01-08  0:13   ` Martin Wilck
2021-01-12 20:28     ` Don.Brace
2020-12-10 20:35 ` [PATCH V3 12/25] smartpqi: enable support for NVMe encryption Don Brace
2021-01-08  0:14   ` Martin Wilck
2020-12-10 20:35 ` [PATCH V3 13/25] smartpqi: disable write_same for nvme hba disks Don Brace
2021-01-08  0:13   ` Martin Wilck
2020-12-10 20:35 ` [PATCH V3 14/25] smartpqi: fix driver synchronization issues Don Brace
2021-01-07 23:32   ` Martin Wilck
2021-01-08  4:13     ` Martin K. Petersen
2021-01-15 21:13     ` Don.Brace
2021-01-27 23:01     ` Don.Brace
     [not found]       ` <c1e6b199f5ccda5ccec5223dfcbd1fba22171c86.camel@suse.com>
2021-02-01 22:47         ` Don.Brace
2020-12-10 20:35 ` [PATCH V3 15/25] smartpqi: fix host qdepth limit Don Brace
2020-12-14 17:54   ` Paul Menzel
2020-12-15 20:23     ` Don.Brace
2021-01-07 23:43       ` Martin Wilck
2021-01-15 21:17         ` Don.Brace
2021-01-19 10:33           ` John Garry
2021-01-19 14:12             ` Martin Wilck
2021-01-19 17:43               ` Paul Menzel
2021-01-20 16:42               ` Donald Buczek
2021-01-20 17:03                 ` Don.Brace
2021-01-20 18:35                 ` Martin Wilck
2021-02-10 15:27             ` Don.Brace
2021-02-10 15:42               ` John Garry
2021-02-10 16:29                 ` Don.Brace
2021-03-29 21:15                   ` Paul Menzel
2021-03-29 21:16                     ` Paul Menzel
2021-03-30 14:37                       ` Donald Buczek
2020-12-10 20:35 ` [PATCH V3 16/25] smartpqi: convert snprintf to scnprintf Don Brace
2021-01-07 23:51   ` Martin Wilck
2020-12-10 20:35 ` [PATCH V3 17/25] smartpqi: change timing of release of QRM memory during OFA Don Brace
2021-01-08  0:14   ` Martin Wilck
2021-01-27 17:46     ` Don.Brace
2020-12-10 20:36 ` [PATCH V3 18/25] smartpqi: return busy indication for IOCTLs when ofa is active Don Brace
2020-12-10 20:36 ` [PATCH V3 19/25] smartpqi: add phy id support for the physical drives Don Brace
2021-01-08  0:03   ` Martin Wilck
2020-12-10 20:36 ` [PATCH V3 20/25] smartpqi: update sas initiator_port_protocols and target_port_protocols Don Brace
2021-01-08  0:12   ` Martin Wilck
2020-12-10 20:36 ` [PATCH V3 21/25] smartpqi: add additional logging for LUN resets Don Brace
2021-01-08  0:27   ` Martin Wilck
2021-01-25 17:09     ` Don.Brace
2020-12-10 20:36 ` [PATCH V3 22/25] smartpqi: update enclosure identifier in sysf Don Brace
2021-01-08  0:30   ` Martin Wilck
2021-01-25 17:13     ` Don.Brace
2021-01-25 19:44       ` Martin Wilck
2021-01-25 20:36         ` Don.Brace
2020-12-10 20:36 ` [PATCH V3 23/25] smartpqi: correct system hangs when resuming from hibernation Don Brace
2021-01-08  0:34   ` Martin Wilck
2021-01-27 17:39     ` Don.Brace
2021-01-27 17:45       ` Martin Wilck
2020-12-10 20:36 ` [PATCH V3 24/25] smartpqi: add new pci ids Don Brace
2021-01-08  0:35   ` Martin Wilck
2020-12-10 20:36 ` [PATCH V3 25/25] smartpqi: update version to 2.1.6-005 Don Brace
2020-12-21 14:31 ` [PATCH V3 00/25] smartpqi updates Donald Buczek
     [not found]   ` <SN6PR11MB2848D8C9DF9856A2B7AA69ACE1C00@SN6PR11MB2848.namprd11.prod.outlook.com>
2020-12-22 13:13     ` Donald Buczek
2020-12-28 15:57       ` Don.Brace
2020-12-28 19:25         ` Don.Brace
2020-12-28 22:36           ` Donald Buczek

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=15d80793c64ffd044da1e40334acfd8ad8988fb9.camel@suse.com \
    --to=mwilck@suse.com \
    --cc=Justin.Lindley@microchip.com \
    --cc=Kevin.Barnett@microchip.com \
    --cc=POSWALD@suse.com \
    --cc=don.brace@microchip.com \
    --cc=gerry.morong@microchip.com \
    --cc=hch@infradead.org \
    --cc=jejb@linux.vnet.ibm.com \
    --cc=joseph.szczypek@hpe.com \
    --cc=linux-scsi@vger.kernel.org \
    --cc=mahesh.rajashekhara@microchip.com \
    --cc=scott.benesh@microchip.com \
    --cc=scott.teel@microchip.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.