All of lore.kernel.org
 help / color / mirror / Atom feed
From: Martin Wilck <mwilck@suse.com>
To: Don Brace <don.brace@microchip.com>,
	Kevin.Barnett@microchip.com, scott.teel@microchip.com,
	Justin.Lindley@microchip.com, scott.benesh@microchip.com,
	gerry.morong@microchip.com, mahesh.rajashekhara@microchip.com,
	hch@infradead.org, jejb@linux.vnet.ibm.com,
	joseph.szczypek@hpe.com, POSWALD@suse.com
Cc: linux-scsi@vger.kernel.org
Subject: Re: [PATCH V3 02/25] smartpqi: refactor aio submission code
Date: Thu, 07 Jan 2021 17:43:49 +0100	[thread overview]
Message-ID: <2b8ec404c0270a66d0f8b33ccf6422acb174555e.camel@suse.com> (raw)
In-Reply-To: <160763247186.26927.2277580027307471315.stgit@brunhilda>

On Thu, 2020-12-10 at 14:34 -0600, Don Brace wrote:
> * No functional changes.
>   * Refactor aio submission code:
>     1. break-up function into smaller functions.
>     2. add common block of data to carry around
>        into newly added functions.
>     3. Prepare for new AIO functionality.
> 
> Reviewed-by: Scott Benesh <scott.benesh@microchip.com>
> Reviewed-by: Scott Teel <scott.teel@microchip.com>
> Reviewed-by: Kevin Barnett <kevin.barnett@microchip.com>
> Signed-off-by: Don Brace <don.brace@microchip.com>

I've got a few nitpicks, see below. But they should rather go
into a separate patch, so

Reviewed-by: Martin Wilck <mwilck@suse.com>

> ---
>  drivers/scsi/smartpqi/smartpqi.h      |   52 +++
>  drivers/scsi/smartpqi/smartpqi_init.c |  554 ++++++++++++++++++-----
> ----------
>  2 files changed, 360 insertions(+), 246 deletions(-)
> 
> diff --git a/drivers/scsi/smartpqi/smartpqi.h
> b/drivers/scsi/smartpqi/smartpqi.h
> index 7d3f956e949f..d486a2ec3045 100644
> --- a/drivers/scsi/smartpqi/smartpqi.h
> +++ b/drivers/scsi/smartpqi/smartpqi.h
> @@ -908,6 +908,58 @@ struct raid_map {
>  
>  #pragma pack()
>  
> +struct pqi_scsi_dev_raid_map_data {
> +       bool    is_write;
> +       u8      raid_level;
> +       u32     map_index;
> +       u64     first_block;
> +       u64     last_block;
> +       u32     data_length;
> +       u32     block_cnt;
> +       u32     blocks_per_row;
> +       u64     first_row;
> +       u64     last_row;
> +       u32     first_row_offset;
> +       u32     last_row_offset;
> +       u32     first_column;
> +       u32     last_column;
> +       u64     r5or6_first_row;
> +       u64     r5or6_last_row;
> +       u32     r5or6_first_row_offset;
> +       u32     r5or6_last_row_offset;
> +       u32     r5or6_first_column;
> +       u32     r5or6_last_column;
> +       u16     data_disks_per_row;
> +       u32     total_disks_per_row;
> +       u16     layout_map_count;
> +       u32     stripesize;
> +       u16     strip_size;
> +       u32     first_group;
> +       u32     last_group;
> +       u32     current_group;
> +       u32     map_row;
> +       u32     aio_handle;
> +       u64     disk_block;
> +       u32     disk_block_cnt;
> +       u8      cdb[16];
> +       u8      cdb_length;
> +       int     offload_to_mirror;
> +
> +       /* RAID1 specific */
> +#define NUM_RAID1_MAP_ENTRIES 3
> +       u32     num_it_nexus_entries;
> +       u32     it_nexus[NUM_RAID1_MAP_ENTRIES];
> +
> +       /* RAID5 RAID6 specific */
> +       u32     p_parity_it_nexus; /* aio_handle */
> +       u32     q_parity_it_nexus; /* aio_handle */
> +       u8      xor_mult;
> +       u64     row;
> +       u64     stripe_lba;
> +       u32     p_index;
> +       u32     q_index;
> +};

There seem to be more Raid 5/6 specific fields above.
Have you considered using a union?


> +
>  #define RAID_CTLR_LUNID                "\0\0\0\0\0\0\0\0"
>  
>  struct pqi_scsi_dev {
> diff --git a/drivers/scsi/smartpqi/smartpqi_init.c
> b/drivers/scsi/smartpqi/smartpqi_init.c
> index 68fc4327944e..2348b9f24d8c 100644
> --- a/drivers/scsi/smartpqi/smartpqi_init.c
> +++ b/drivers/scsi/smartpqi/smartpqi_init.c
> @@ -2237,332 +2237,394 @@ static inline void pqi_set_encryption_info(
>   * Attempt to perform RAID bypass mapping for a logical volume I/O.
>   */
>  
> +static bool pqi_aio_raid_level_supported(struct
> pqi_scsi_dev_raid_map_data *rmd)
> +{
> +       bool is_supported = true;
> +
> +       switch (rmd->raid_level) {
> +       case SA_RAID_0:
> +               break;
> +       case SA_RAID_1:
> +               if (rmd->is_write)
> +                       is_supported = false;
> +               break;
> +       case SA_RAID_5:
> +               fallthrough;

I don't think "fallthrough" is necessary here.

> +       case SA_RAID_6:
> +               if (rmd->is_write)
> +                       is_supported = false;
> +               break;
> +       case SA_RAID_ADM:
> +               if (rmd->is_write)
> +                       is_supported = false;
> +               break;
> +       default:
> +               is_supported = false;
> +       }
> +
> +       return is_supported;
> +}
> +
>  #define PQI_RAID_BYPASS_INELIGIBLE     1
>  
> -static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info
> *ctrl_info,
> -       struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
> -       struct pqi_queue_group *queue_group)
> +static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
> +                       struct pqi_scsi_dev_raid_map_data *rmd)
>  {
> -       struct raid_map *raid_map;
> -       bool is_write = false;
> -       u32 map_index;
> -       u64 first_block;
> -       u64 last_block;
> -       u32 block_cnt;
> -       u32 blocks_per_row;
> -       u64 first_row;
> -       u64 last_row;
> -       u32 first_row_offset;
> -       u32 last_row_offset;
> -       u32 first_column;
> -       u32 last_column;
> -       u64 r0_first_row;
> -       u64 r0_last_row;
> -       u32 r5or6_blocks_per_row;
> -       u64 r5or6_first_row;
> -       u64 r5or6_last_row;
> -       u32 r5or6_first_row_offset;
> -       u32 r5or6_last_row_offset;
> -       u32 r5or6_first_column;
> -       u32 r5or6_last_column;
> -       u16 data_disks_per_row;
> -       u32 total_disks_per_row;
> -       u16 layout_map_count;
> -       u32 stripesize;
> -       u16 strip_size;
> -       u32 first_group;
> -       u32 last_group;
> -       u32 current_group;
> -       u32 map_row;
> -       u32 aio_handle;
> -       u64 disk_block;
> -       u32 disk_block_cnt;
> -       u8 cdb[16];
> -       u8 cdb_length;
> -       int offload_to_mirror;
> -       struct pqi_encryption_info *encryption_info_ptr;
> -       struct pqi_encryption_info encryption_info;
> -#if BITS_PER_LONG == 32
> -       u64 tmpdiv;
> -#endif
> -
>         /* Check for valid opcode, get LBA and block count. */
>         switch (scmd->cmnd[0]) {
>         case WRITE_6:
> -               is_write = true;
> +               rmd->is_write = true;
>                 fallthrough;
>         case READ_6:
> -               first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
> +               rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) <<
> 16) |
>                         (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
> -               block_cnt = (u32)scmd->cmnd[4];
> -               if (block_cnt == 0)
> -                       block_cnt = 256;
> +               rmd->block_cnt = (u32)scmd->cmnd[4];
> +               if (rmd->block_cnt == 0)
> +                       rmd->block_cnt = 256;
>                 break;
>         case WRITE_10:
> -               is_write = true;
> +               rmd->is_write = true;
>                 fallthrough;
>         case READ_10:
> -               first_block = (u64)get_unaligned_be32(&scmd-
> > cmnd[2]);
> -               block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
> +               rmd->first_block = (u64)get_unaligned_be32(&scmd-
> > cmnd[2]);
> +               rmd->block_cnt = (u32)get_unaligned_be16(&scmd-
> > cmnd[7]);
>                 break;
>         case WRITE_12:
> -               is_write = true;
> +               rmd->is_write = true;
>                 fallthrough;
>         case READ_12:
> -               first_block = (u64)get_unaligned_be32(&scmd-
> > cmnd[2]);
> -               block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
> +               rmd->first_block = (u64)get_unaligned_be32(&scmd-
> > cmnd[2]);
> +               rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
>                 break;
>         case WRITE_16:
> -               is_write = true;
> +               rmd->is_write = true;
>                 fallthrough;
>         case READ_16:
> -               first_block = get_unaligned_be64(&scmd->cmnd[2]);
> -               block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
> +               rmd->first_block = get_unaligned_be64(&scmd-
> > cmnd[2]);
> +               rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
>                 break;
>         default:
>                 /* Process via normal I/O path. */
>                 return PQI_RAID_BYPASS_INELIGIBLE;
>         }
>  
> -       /* Check for write to non-RAID-0. */
> -       if (is_write && device->raid_level != SA_RAID_0)
> -               return PQI_RAID_BYPASS_INELIGIBLE;
> +       put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
>  
> -       if (unlikely(block_cnt == 0))
> -               return PQI_RAID_BYPASS_INELIGIBLE;
> +       return 0;
> +}
>  
> -       last_block = first_block + block_cnt - 1;
> -       raid_map = device->raid_map;
> +static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info
> *ctrl_info,
> +                                       struct
> pqi_scsi_dev_raid_map_data *rmd,
> +                                       struct raid_map *raid_map)
> +{
> +#if BITS_PER_LONG == 32
> +       u64 tmpdiv;
> +#endif
> +
> +       rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
>  
>         /* Check for invalid block or wraparound. */
> -       if (last_block >= get_unaligned_le64(&raid_map-
> > volume_blk_cnt) ||
> -               last_block < first_block)
> +       if (rmd->last_block >=
> +               get_unaligned_le64(&raid_map->volume_blk_cnt) ||
> +               rmd->last_block < rmd->first_block)
>                 return PQI_RAID_BYPASS_INELIGIBLE;
>  
> -       data_disks_per_row = get_unaligned_le16(&raid_map-
> > data_disks_per_row);
> -       strip_size = get_unaligned_le16(&raid_map->strip_size);
> -       layout_map_count = get_unaligned_le16(&raid_map-
> > layout_map_count);
> +       rmd->data_disks_per_row =
> +                       get_unaligned_le16(&raid_map-
> > data_disks_per_row);
> +       rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
> +       rmd->layout_map_count = get_unaligned_le16(&raid_map-
> > layout_map_count);
>  
>         /* Calculate stripe information for the request. */
> -       blocks_per_row = data_disks_per_row * strip_size;
> +       rmd->blocks_per_row = rmd->data_disks_per_row * rmd-
> > strip_size;
>  #if BITS_PER_LONG == 32

Just wondering - why don't you use do_div() for 64 bit, too?
Same question for pqi_calc_aio_r5_or_r6() below.

> -       tmpdiv = first_block;
> -       do_div(tmpdiv, blocks_per_row);
> -       first_row = tmpdiv;
> -       tmpdiv = last_block;
> -       do_div(tmpdiv, blocks_per_row);
> -       last_row = tmpdiv;
> -       first_row_offset = (u32)(first_block - (first_row *
> blocks_per_row));
> -       last_row_offset = (u32)(last_block - (last_row *
> blocks_per_row));
> -       tmpdiv = first_row_offset;
> -       do_div(tmpdiv, strip_size);
> -       first_column = tmpdiv;
> -       tmpdiv = last_row_offset;
> -       do_div(tmpdiv, strip_size);
> -       last_column = tmpdiv;
> +       tmpdiv = rmd->first_block;
> +       do_div(tmpdiv, rmd->blocks_per_row);
> +       rmd->first_row = tmpdiv;
> +       tmpdiv = rmd->last_block;
> +       do_div(tmpdiv, rmd->blocks_per_row);
> +       rmd->last_row = tmpdiv;
> +       rmd->first_row_offset = (u32)(rmd->first_block - (rmd-
> > first_row * rmd->blocks_per_row));
> +       rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row
> * rmd->blocks_per_row));
> +       tmpdiv = rmd->first_row_offset;
> +       do_div(tmpdiv, rmd->strip_size);
> +       rmd->first_column = tmpdiv;
> +       tmpdiv = rmd->last_row_offset;
> +       do_div(tmpdiv, rmd->strip_size);
> +       rmd->last_column = tmpdiv;
>  #else
> -       first_row = first_block / blocks_per_row;
> -       last_row = last_block / blocks_per_row;
> -       first_row_offset = (u32)(first_block - (first_row *
> blocks_per_row));
> -       last_row_offset = (u32)(last_block - (last_row *
> blocks_per_row));
> -       first_column = first_row_offset / strip_size;
> -       last_column = last_row_offset / strip_size;
> +       rmd->first_row = rmd->first_block / rmd->blocks_per_row;
> +       rmd->last_row = rmd->last_block / rmd->blocks_per_row;
> +       rmd->first_row_offset = (u32)(rmd->first_block -
> +                               (rmd->first_row * rmd-
> > blocks_per_row));
> +       rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row
> *
> +                               rmd->blocks_per_row));
> +       rmd->first_column = rmd->first_row_offset / rmd->strip_size;
> +       rmd->last_column = rmd->last_row_offset / rmd->strip_size;
>  #endif
>  
>         /* If this isn't a single row/column then give to the
> controller. */
> -       if (first_row != last_row || first_column != last_column)
> +       if (rmd->first_row != rmd->last_row ||
> +                       rmd->first_column != rmd->last_column)
>                 return PQI_RAID_BYPASS_INELIGIBLE;

You could save a few cycles here by testing rows and columns
separately.

>  
>         /* Proceeding with driver mapping. */
> -       total_disks_per_row = data_disks_per_row +
> +       rmd->total_disks_per_row = rmd->data_disks_per_row +
>                 get_unaligned_le16(&raid_map-
> > metadata_disks_per_row);
> -       map_row = ((u32)(first_row >> raid_map-
> > parity_rotation_shift)) %
> +       rmd->map_row = ((u32)(rmd->first_row >>
> +               raid_map->parity_rotation_shift)) %
>                 get_unaligned_le16(&raid_map->row_cnt);
> -       map_index = (map_row * total_disks_per_row) + first_column;
> +       rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
> +                       rmd->first_column;
>  
> -       /* RAID 1 */
> -       if (device->raid_level == SA_RAID_1) {
> -               if (device->offload_to_mirror)
> -                       map_index += data_disks_per_row;
> -               device->offload_to_mirror = !device-
> > offload_to_mirror;
> -       } else if (device->raid_level == SA_RAID_ADM) {
> -               /* RAID ADM */
> -               /*
> -                * Handles N-way mirrors  (R1-ADM) and R10 with # of
> drives
> -                * divisible by 3.
> -                */
> -               offload_to_mirror = device->offload_to_mirror;
> -               if (offload_to_mirror == 0)  {
> -                       /* use physical disk in the first mirrored
> group. */
> -                       map_index %= data_disks_per_row;
> -               } else {
> -                       do {
> -                               /*
> -                                * Determine mirror group that
> map_index
> -                                * indicates.
> -                                */
> -                               current_group = map_index /
> data_disks_per_row;
> -
> -                               if (offload_to_mirror !=
> current_group) {
> -                                       if (current_group <
> -                                               layout_map_count - 1)
> {
> -                                               /*
> -                                                * Select raid index
> from
> -                                                * next group.
> -                                                */
> -                                               map_index +=
> data_disks_per_row;
> -                                               current_group++;
> -                                       } else {
> -                                               /*
> -                                                * Select raid index
> from first
> -                                                * group.
> -                                                */
> -                                               map_index %=
> data_disks_per_row;
> -                                               current_group = 0;
> -                                       }
> +       return 0;
> +}
> +
> +static int pqi_calc_aio_raid_adm(struct pqi_scsi_dev_raid_map_data
> *rmd,
> +                               struct pqi_scsi_dev *device)
> +{
> +       /* RAID ADM */
> +       /*
> +        * Handles N-way mirrors  (R1-ADM) and R10 with # of drives
> +        * divisible by 3.
> +        */
> +       rmd->offload_to_mirror = device->offload_to_mirror;
> +
> +       if (rmd->offload_to_mirror == 0)  {
> +               /* use physical disk in the first mirrored group. */
> +               rmd->map_index %= rmd->data_disks_per_row;
> +       } else {
> +               do {
> +                       /*
> +                        * Determine mirror group that map_index
> +                        * indicates.
> +                        */
> +                       rmd->current_group =
> +                               rmd->map_index / rmd-
> > data_disks_per_row;
> +
> +                       if (rmd->offload_to_mirror !=
> +                                       rmd->current_group) {
> +                               if (rmd->current_group <
> +                                       rmd->layout_map_count - 1) {
> +                                       /*
> +                                        * Select raid index from
> +                                        * next group.
> +                                        */
> +                                       rmd->map_index += rmd-
> > data_disks_per_row;
> +                                       rmd->current_group++;
> +                               } else {
> +                                       /*
> +                                        * Select raid index from
> first
> +                                        * group.
> +                                        */
> +                                       rmd->map_index %= rmd-
> > data_disks_per_row;
> +                                       rmd->current_group = 0;
>                                 }
> -                       } while (offload_to_mirror != current_group);
> -               }
> +                       }
> +               } while (rmd->offload_to_mirror != rmd-
> > current_group);
> +       }
>  
> -               /* Set mirror group to use next time. */
> -               offload_to_mirror =
> -                       (offload_to_mirror >= layout_map_count - 1) ?
> -                               0 : offload_to_mirror + 1;
> -               device->offload_to_mirror = offload_to_mirror;
> -               /*
> -                * Avoid direct use of device->offload_to_mirror
> within this
> -                * function since multiple threads might
> simultaneously
> -                * increment it beyond the range of device-
> > layout_map_count -1.
> -                */
> -       } else if ((device->raid_level == SA_RAID_5 ||
> -               device->raid_level == SA_RAID_6) && layout_map_count
> > 1) {
> -               /* RAID 50/60 */
> -               /* Verify first and last block are in same RAID group
> */
> -               r5or6_blocks_per_row = strip_size *
> data_disks_per_row;
> -               stripesize = r5or6_blocks_per_row * layout_map_count;
> +       /* Set mirror group to use next time. */
> +       rmd->offload_to_mirror =
> +               (rmd->offload_to_mirror >= rmd->layout_map_count - 1)
> ?
> +                       0 : rmd->offload_to_mirror + 1;
> +       device->offload_to_mirror = rmd->offload_to_mirror;
> +       /*
> +        * Avoid direct use of device->offload_to_mirror within this
> +        * function since multiple threads might simultaneously
> +        * increment it beyond the range of device->layout_map_count
> -1.
> +        */
> +
> +       return 0;
> +}
> +
> +static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data
> *rmd,
> +                               struct raid_map *raid_map)
> +{
> +#if BITS_PER_LONG == 32
> +       u64 tmpdiv;
> +#endif
> +       /* RAID 50/60 */
> +       /* Verify first and last block are in same RAID group */
> +       rmd->stripesize = rmd->blocks_per_row * rmd-
> > layout_map_count;
>  #if BITS_PER_LONG == 32
> -               tmpdiv = first_block;
> -               first_group = do_div(tmpdiv, stripesize);
> -               tmpdiv = first_group;
> -               do_div(tmpdiv, r5or6_blocks_per_row);
> -               first_group = tmpdiv;
> -               tmpdiv = last_block;
> -               last_group = do_div(tmpdiv, stripesize);
> -               tmpdiv = last_group;
> -               do_div(tmpdiv, r5or6_blocks_per_row);
> -               last_group = tmpdiv;
> +       tmpdiv = rmd->first_block;
> +       rmd->first_group = do_div(tmpdiv, rmd->stripesize);
> +       tmpdiv = rmd->first_group;
> +       do_div(tmpdiv, rmd->blocks_per_row);
> +       rmd->first_group = tmpdiv;
> +       tmpdiv = rmd->last_block;
> +       rmd->last_group = do_div(tmpdiv, rmd->stripesize);
> +       tmpdiv = rmd->last_group;
> +       do_div(tmpdiv, rmd->blocks_per_row);
> +       rmd->last_group = tmpdiv;
>  #else
> -               first_group = (first_block % stripesize) /
> r5or6_blocks_per_row;
> -               last_group = (last_block % stripesize) /
> r5or6_blocks_per_row;
> +       rmd->first_group = (rmd->first_block % rmd->stripesize) /
> rmd->blocks_per_row;
> +       rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd-
> > blocks_per_row;
>  #endif
> -               if (first_group != last_group)
> -                       return PQI_RAID_BYPASS_INELIGIBLE;
> +       if (rmd->first_group != rmd->last_group)
> +               return PQI_RAID_BYPASS_INELIGIBLE;
>  
> -               /* Verify request is in a single row of RAID 5/6 */
> +       /* Verify request is in a single row of RAID 5/6 */
>  #if BITS_PER_LONG == 32
> -               tmpdiv = first_block;
> -               do_div(tmpdiv, stripesize);
> -               first_row = r5or6_first_row = r0_first_row = tmpdiv;
> -               tmpdiv = last_block;
> -               do_div(tmpdiv, stripesize);
> -               r5or6_last_row = r0_last_row = tmpdiv;
> +       tmpdiv = rmd->first_block;
> +       do_div(tmpdiv, rmd->stripesize);
> +       rmd->first_row = tmpdiv;
> +       rmd->r5or6_first_row = tmpdiv;
> +       tmpdiv = rmd->last_block;
> +       do_div(tmpdiv, rmd->stripesize);
> +       rmd->r5or6_last_row = tmpdiv;
>  #else
> -               first_row = r5or6_first_row = r0_first_row =
> -                       first_block / stripesize;
> -               r5or6_last_row = r0_last_row = last_block /
> stripesize;
> +       rmd->first_row = rmd->r5or6_first_row =
> +               rmd->first_block / rmd->stripesize;
> +       rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
>  #endif
> -               if (r5or6_first_row != r5or6_last_row)
> -                       return PQI_RAID_BYPASS_INELIGIBLE;
> +       if (rmd->r5or6_first_row != rmd->r5or6_last_row)
> +               return PQI_RAID_BYPASS_INELIGIBLE;
>  
> -               /* Verify request is in a single column */
> +       /* Verify request is in a single column */
>  #if BITS_PER_LONG == 32
> -               tmpdiv = first_block;
> -               first_row_offset = do_div(tmpdiv, stripesize);
> -               tmpdiv = first_row_offset;
> -               first_row_offset = (u32)do_div(tmpdiv,
> r5or6_blocks_per_row);
> -               r5or6_first_row_offset = first_row_offset;
> -               tmpdiv = last_block;
> -               r5or6_last_row_offset = do_div(tmpdiv, stripesize);
> -               tmpdiv = r5or6_last_row_offset;
> -               r5or6_last_row_offset = do_div(tmpdiv,
> r5or6_blocks_per_row);
> -               tmpdiv = r5or6_first_row_offset;
> -               do_div(tmpdiv, strip_size);
> -               first_column = r5or6_first_column = tmpdiv;
> -               tmpdiv = r5or6_last_row_offset;
> -               do_div(tmpdiv, strip_size);
> -               r5or6_last_column = tmpdiv;
> +       tmpdiv = rmd->first_block;
> +       rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
> +       tmpdiv = rmd->first_row_offset;
> +       rmd->first_row_offset = (u32)do_div(tmpdiv, rmd-
> > blocks_per_row);
> +       rmd->r5or6_first_row_offset = rmd->first_row_offset;
> +       tmpdiv = rmd->last_block;
> +       rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
> +       tmpdiv = rmd->r5or6_last_row_offset;
> +       rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd-
> > blocks_per_row);
> +       tmpdiv = rmd->r5or6_first_row_offset;
> +       do_div(tmpdiv, rmd->strip_size);
> +       rmd->first_column = rmd->r5or6_first_column = tmpdiv;
> +       tmpdiv = rmd->r5or6_last_row_offset;
> +       do_div(tmpdiv, rmd->strip_size);
> +       rmd->r5or6_last_column = tmpdiv;
>  #else
> -               first_row_offset = r5or6_first_row_offset =
> -                       (u32)((first_block % stripesize) %
> -                       r5or6_blocks_per_row);
> +       rmd->first_row_offset = rmd->r5or6_first_row_offset =
> +               (u32)((rmd->first_block %
> +                               rmd->stripesize) %
> +                               rmd->blocks_per_row);
> +
> +       rmd->r5or6_last_row_offset =
> +               (u32)((rmd->last_block % rmd->stripesize) %
> +               rmd->blocks_per_row);
> +
> +       rmd->first_column =
> +                       rmd->r5or6_first_row_offset / rmd-
> > strip_size;
> +       rmd->r5or6_first_column = rmd->first_column;
> +       rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd-
> > strip_size;
> +#endif
> +       if (rmd->r5or6_first_column != rmd->r5or6_last_column)
> +               return PQI_RAID_BYPASS_INELIGIBLE;
> +
> +       /* Request is eligible */
> +       rmd->map_row =
> +               ((u32)(rmd->first_row >> raid_map-
> > parity_rotation_shift)) %
> +               get_unaligned_le16(&raid_map->row_cnt);
>  
> -               r5or6_last_row_offset =
> -                       (u32)((last_block % stripesize) %
> -                       r5or6_blocks_per_row);
> +       rmd->map_index = (rmd->first_group *
> +               (get_unaligned_le16(&raid_map->row_cnt) *
> +               rmd->total_disks_per_row)) +
> +               (rmd->map_row * rmd->total_disks_per_row) + rmd-
> > first_column;
>  
> -               first_column = r5or6_first_row_offset / strip_size;
> -               r5or6_first_column = first_column;
> -               r5or6_last_column = r5or6_last_row_offset /
> strip_size;
> -#endif
> -               if (r5or6_first_column != r5or6_last_column)
> -                       return PQI_RAID_BYPASS_INELIGIBLE;
> +       return 0;
> +}
> +
> +static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
> +{
> +       /* Build the new CDB for the physical disk I/O. */
> +       if (rmd->disk_block > 0xffffffff) {
> +               rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
> +               rmd->cdb[1] = 0;
> +               put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
> +               put_unaligned_be32(rmd->disk_block_cnt, &rmd-
> > cdb[10]);
> +               rmd->cdb[14] = 0;
> +               rmd->cdb[15] = 0;
> +               rmd->cdb_length = 16;
> +       } else {
> +               rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
> +               rmd->cdb[1] = 0;
> +               put_unaligned_be32((u32)rmd->disk_block, &rmd-
> > cdb[2]);
> +               rmd->cdb[6] = 0;
> +               put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd-
> > cdb[7]);
> +               rmd->cdb[9] = 0;
> +               rmd->cdb_length = 10;
> +       }
> +}
> +
> +static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info
> *ctrl_info,
> +       struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
> +       struct pqi_queue_group *queue_group)
> +{
> +       struct raid_map *raid_map;
> +       int rc;
> +       struct pqi_encryption_info *encryption_info_ptr;
> +       struct pqi_encryption_info encryption_info;
> +       struct pqi_scsi_dev_raid_map_data rmd = {0};
> +
> +       rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
> +       if (rc)
> +               return PQI_RAID_BYPASS_INELIGIBLE;
> +
> +       rmd.raid_level = device->raid_level;
> +
> +       if (!pqi_aio_raid_level_supported(&rmd))
> +               return PQI_RAID_BYPASS_INELIGIBLE;
> +
> +       if (unlikely(rmd.block_cnt == 0))
> +               return PQI_RAID_BYPASS_INELIGIBLE;
> +
> +       raid_map = device->raid_map;
>  
> -               /* Request is eligible */
> -               map_row =
> -                       ((u32)(first_row >> raid_map-
> > parity_rotation_shift)) %
> -                       get_unaligned_le16(&raid_map->row_cnt);
> +       rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd,
> raid_map);
> +       if (rc)
> +               return PQI_RAID_BYPASS_INELIGIBLE;
>  
> -               map_index = (first_group *
> -                       (get_unaligned_le16(&raid_map->row_cnt) *
> -                       total_disks_per_row)) +
> -                       (map_row * total_disks_per_row) +
> first_column;
> +       /* RAID 1 */
> +       if (device->raid_level == SA_RAID_1) {
> +               if (device->offload_to_mirror)
> +                       rmd.map_index += rmd.data_disks_per_row;
> +               device->offload_to_mirror = !device-
> > offload_to_mirror;
> +       } else if (device->raid_level == SA_RAID_ADM) {
> +               rc = pqi_calc_aio_raid_adm(&rmd, device);

You don't use this return value. Actually, pqi_calc_aio_raid_adm()
could be a void function.




  reply	other threads:[~2021-01-07 16:44 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-10 20:34 [PATCH V3 00/25] smartpqi updates Don Brace
2020-12-10 20:34 ` [PATCH V3 01/25] smartpqi: add support for product id Don Brace
2021-01-07 16:43   ` Martin Wilck
2020-12-10 20:34 ` [PATCH V3 02/25] smartpqi: refactor aio submission code Don Brace
2021-01-07 16:43   ` Martin Wilck [this message]
2020-12-10 20:34 ` [PATCH V3 03/25] smartpqi: refactor build sg list code Don Brace
2021-01-07 16:43   ` Martin Wilck
2020-12-10 20:34 ` [PATCH V3 04/25] smartpqi: add support for raid5 and raid6 writes Don Brace
2021-01-07 16:44   ` Martin Wilck
2021-01-08 22:56     ` Don.Brace
2021-01-13 10:26       ` Martin Wilck
2020-12-10 20:34 ` [PATCH V3 05/25] smartpqi: add support for raid1 writes Don Brace
2021-01-07 16:44   ` Martin Wilck
2021-01-09 16:56     ` Don.Brace
2020-12-10 20:34 ` [PATCH V3 06/25] smartpqi: add support for BMIC sense feature cmd and feature bits Don Brace
2021-01-07 16:44   ` Martin Wilck
2021-01-11 17:22     ` Don.Brace
2021-01-22 16:45     ` Don.Brace
2021-01-22 19:04       ` Martin Wilck
2020-12-10 20:35 ` [PATCH V3 07/25] smartpqi: update AIO Sub Page 0x02 support Don Brace
2021-01-07 16:44   ` Martin Wilck
2021-01-11 20:53     ` Don.Brace
2020-12-10 20:35 ` [PATCH V3 08/25] smartpqi: add support for long firmware version Don Brace
2021-01-07 16:45   ` Martin Wilck
2021-01-11 22:25     ` Don.Brace
2021-01-22 20:01     ` Don.Brace
2020-12-10 20:35 ` [PATCH V3 09/25] smartpqi: align code with oob driver Don Brace
2021-01-08  0:13   ` Martin Wilck
2020-12-10 20:35 ` [PATCH V3 10/25] smartpqi: add stream detection Don Brace
2021-01-08  0:14   ` Martin Wilck
2021-01-15 21:58     ` Don.Brace
2020-12-10 20:35 ` [PATCH V3 11/25] smartpqi: add host level stream detection enable Don Brace
2021-01-08  0:13   ` Martin Wilck
2021-01-12 20:28     ` Don.Brace
2020-12-10 20:35 ` [PATCH V3 12/25] smartpqi: enable support for NVMe encryption Don Brace
2021-01-08  0:14   ` Martin Wilck
2020-12-10 20:35 ` [PATCH V3 13/25] smartpqi: disable write_same for nvme hba disks Don Brace
2021-01-08  0:13   ` Martin Wilck
2020-12-10 20:35 ` [PATCH V3 14/25] smartpqi: fix driver synchronization issues Don Brace
2021-01-07 23:32   ` Martin Wilck
2021-01-08  4:13     ` Martin K. Petersen
2021-01-15 21:13     ` Don.Brace
2021-01-27 23:01     ` Don.Brace
     [not found]       ` <c1e6b199f5ccda5ccec5223dfcbd1fba22171c86.camel@suse.com>
2021-02-01 22:47         ` Don.Brace
2020-12-10 20:35 ` [PATCH V3 15/25] smartpqi: fix host qdepth limit Don Brace
2020-12-14 17:54   ` Paul Menzel
2020-12-15 20:23     ` Don.Brace
2021-01-07 23:43       ` Martin Wilck
2021-01-15 21:17         ` Don.Brace
2021-01-19 10:33           ` John Garry
2021-01-19 14:12             ` Martin Wilck
2021-01-19 17:43               ` Paul Menzel
2021-01-20 16:42               ` Donald Buczek
2021-01-20 17:03                 ` Don.Brace
2021-01-20 18:35                 ` Martin Wilck
2021-02-10 15:27             ` Don.Brace
2021-02-10 15:42               ` John Garry
2021-02-10 16:29                 ` Don.Brace
2021-03-29 21:15                   ` Paul Menzel
2021-03-29 21:16                     ` Paul Menzel
2021-03-30 14:37                       ` Donald Buczek
2020-12-10 20:35 ` [PATCH V3 16/25] smartpqi: convert snprintf to scnprintf Don Brace
2021-01-07 23:51   ` Martin Wilck
2020-12-10 20:35 ` [PATCH V3 17/25] smartpqi: change timing of release of QRM memory during OFA Don Brace
2021-01-08  0:14   ` Martin Wilck
2021-01-27 17:46     ` Don.Brace
2020-12-10 20:36 ` [PATCH V3 18/25] smartpqi: return busy indication for IOCTLs when ofa is active Don Brace
2020-12-10 20:36 ` [PATCH V3 19/25] smartpqi: add phy id support for the physical drives Don Brace
2021-01-08  0:03   ` Martin Wilck
2020-12-10 20:36 ` [PATCH V3 20/25] smartpqi: update sas initiator_port_protocols and target_port_protocols Don Brace
2021-01-08  0:12   ` Martin Wilck
2020-12-10 20:36 ` [PATCH V3 21/25] smartpqi: add additional logging for LUN resets Don Brace
2021-01-08  0:27   ` Martin Wilck
2021-01-25 17:09     ` Don.Brace
2020-12-10 20:36 ` [PATCH V3 22/25] smartpqi: update enclosure identifier in sysf Don Brace
2021-01-08  0:30   ` Martin Wilck
2021-01-25 17:13     ` Don.Brace
2021-01-25 19:44       ` Martin Wilck
2021-01-25 20:36         ` Don.Brace
2020-12-10 20:36 ` [PATCH V3 23/25] smartpqi: correct system hangs when resuming from hibernation Don Brace
2021-01-08  0:34   ` Martin Wilck
2021-01-27 17:39     ` Don.Brace
2021-01-27 17:45       ` Martin Wilck
2020-12-10 20:36 ` [PATCH V3 24/25] smartpqi: add new pci ids Don Brace
2021-01-08  0:35   ` Martin Wilck
2020-12-10 20:36 ` [PATCH V3 25/25] smartpqi: update version to 2.1.6-005 Don Brace
2020-12-21 14:31 ` [PATCH V3 00/25] smartpqi updates Donald Buczek
     [not found]   ` <SN6PR11MB2848D8C9DF9856A2B7AA69ACE1C00@SN6PR11MB2848.namprd11.prod.outlook.com>
2020-12-22 13:13     ` Donald Buczek
2020-12-28 15:57       ` Don.Brace
2020-12-28 19:25         ` Don.Brace
2020-12-28 22:36           ` Donald Buczek

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=2b8ec404c0270a66d0f8b33ccf6422acb174555e.camel@suse.com \
    --to=mwilck@suse.com \
    --cc=Justin.Lindley@microchip.com \
    --cc=Kevin.Barnett@microchip.com \
    --cc=POSWALD@suse.com \
    --cc=don.brace@microchip.com \
    --cc=gerry.morong@microchip.com \
    --cc=hch@infradead.org \
    --cc=jejb@linux.vnet.ibm.com \
    --cc=joseph.szczypek@hpe.com \
    --cc=linux-scsi@vger.kernel.org \
    --cc=mahesh.rajashekhara@microchip.com \
    --cc=scott.benesh@microchip.com \
    --cc=scott.teel@microchip.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.