All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 10/12] megaraid_sas : Add support for Uneven Span PRL11
@ 2013-05-22  7:05 Sumit.Saxena
  0 siblings, 0 replies; only message in thread
From: Sumit.Saxena @ 2013-05-22  7:05 UTC (permalink / raw)
  To: linux-scsi; +Cc: jbottomley, kashyap.desai, aradford

Add support for Uneven Span PRL11.

MegaRAID older Firmware does not support uneven span configuration for PRL11.
E.g User wants to create 34 Driver PRL11 config, it was not possible using old firmware,
since it was not supported configuration in old firmware

Old Firmware expect even number of Drives in each span and same number of physical drives at each span.
Considering above design, 17 Drives at Span-0 and 17 drives at span-1 was not possible.

Now, using this new feature Firmware and Driver both required changes.
New Firmware can allow user to create 16 Drives at span-0 and 18 Drives at span-1. This will allow user to
create 34 Drives Uneven span PRL11. 

RAID map is interface between Driver and FW to fetch all required fields(attributes) for each Virtual Drives.
Since legacy RAID map consider Even Span design, there was no place to keep Uneven span information in existing Raid map.
Because of this limitation, for Uneven span VD, driver can not use RAID map.

This patch address the changes required in Driver to support Uneven span PRL11 support.
1. Driver will find if Firmware has UnevenSpanSupport or not by reading Controller Info.
2. If Firmware has UnvenSpan PRL11 support, then Driver will inform about its capability of handling UnevenSpan PRL11 to the firmware.
3. Driver will update its copy of span info on each time Raid map update is called.
4. Follow different IO path if it is Uneven Span. (For Uneven Span, Driver uses Span Set info to find relavent fields for that particular
   Virtual Disk)


More verbose prints will be available by setting "SPAN_DEBUG" to 1 at compilation time.

Signed-off-by: Sumit Saxena <sumit.saxena@lsi.com>
Signed-off-by: Kashyap Desai <kashyap.desai@lsi.com>
---
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index d430618..dab46c2 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -190,6 +190,12 @@
 #define MR_DCMD_PD_LIST_QUERY                   0x02010100
 
 /*
+ * Global functions
+ */
+extern u8 MR_ValidateMapInfo(struct megasas_instance *instance);
+
+
+/*
  * MFI command completion codes
  */
 enum MFI_STAT {
@@ -729,8 +735,126 @@ struct megasas_ctrl_info {
 	 */
 	char package_version[0x60];
 
-	u8 pad[0x800 - 0x6a0];
 
+	/*
+	* If adapterOperations.supportMoreThan8Phys is set,
+	* and deviceInterface.portCount is greater than 8,
+	* SAS Addrs for first 8 ports shall be populated in
+	* deviceInterface.portAddr, and the rest shall be
+	* populated in deviceInterfacePortAddr2.
+	*/
+	u64         deviceInterfacePortAddr2[8]; /*6a0h */
+	u8          reserved3[128];              /*6e0h */
+
+	struct {                                /*760h */
+		u16 minPdRaidLevel_0:4;
+		u16 maxPdRaidLevel_0:12;
+
+		u16 minPdRaidLevel_1:4;
+		u16 maxPdRaidLevel_1:12;
+
+		u16 minPdRaidLevel_5:4;
+		u16 maxPdRaidLevel_5:12;
+
+		u16 minPdRaidLevel_1E:4;
+		u16 maxPdRaidLevel_1E:12;
+
+		u16 minPdRaidLevel_6:4;
+		u16 maxPdRaidLevel_6:12;
+
+		u16 minPdRaidLevel_10:4;
+		u16 maxPdRaidLevel_10:12;
+
+		u16 minPdRaidLevel_50:4;
+		u16 maxPdRaidLevel_50:12;
+
+		u16 minPdRaidLevel_60:4;
+		u16 maxPdRaidLevel_60:12;
+
+		u16 minPdRaidLevel_1E_RLQ0:4;
+		u16 maxPdRaidLevel_1E_RLQ0:12;
+
+		u16 minPdRaidLevel_1E0_RLQ0:4;
+		u16 maxPdRaidLevel_1E0_RLQ0:12;
+
+		u16 reserved[6];
+	} pdsForRaidLevels;
+
+	u16 maxPds;                             /*780h */
+	u16 maxDedHSPs;                         /*782h */
+	u16 maxGlobalHSPs;                      /*784h */
+	u16 ddfSize;                            /*786h */
+	u8  maxLdsPerArray;                     /*788h */
+	u8  partitionsInDDF;                    /*789h */
+	u8  lockKeyBinding;                     /*78ah */
+	u8  maxPITsPerLd;                       /*78bh */
+	u8  maxViewsPerLd;                      /*78ch */
+	u8  maxTargetId;                        /*78dh */
+	u16 maxBvlVdSize;                       /*78eh */
+
+	u16 maxConfigurableSSCSize;             /*790h */
+	u16 currentSSCsize;                     /*792h */
+
+	char    expanderFwVersion[12];          /*794h */
+
+	u16 PFKTrialTimeRemaining;              /*7A0h */
+
+	u16 cacheMemorySize;                    /*7A2h */
+
+	struct {                                /*7A4h */
+		u32     supportPIcontroller:1;
+		u32     supportLdPIType1:1;
+		u32     supportLdPIType2:1;
+		u32     supportLdPIType3:1;
+		u32     supportLdBBMInfo:1;
+		u32     supportShieldState:1;
+		u32     blockSSDWriteCacheChange:1;
+		u32     supportSuspendResumeBGops:1;
+		u32     supportEmergencySpares:1;
+		u32     supportSetLinkSpeed:1;
+		u32     supportBootTimePFKChange:1;
+		u32     supportJBOD:1;
+		u32     disableOnlinePFKChange:1;
+		u32     supportPerfTuning:1;
+		u32     supportSSDPatrolRead:1;
+		u32     realTimeScheduler:1;
+
+		u32     supportResetNow:1;
+		u32     supportEmulatedDrives:1;
+		u32     headlessMode:1;
+		u32     dedicatedHotSparesLimited:1;
+
+
+		u32     supportUnevenSpans:1;
+		u32     reserved:11;
+	} adapterOperations2;
+
+	u8  driverVersion[32];                  /*7A8h */
+	u8  maxDAPdCountSpinup60;               /*7C8h */
+	u8  temperatureROC;                     /*7C9h */
+	u8  temperatureCtrl;                    /*7CAh */
+	u8  reserved4;                          /*7CBh */
+	u16 maxConfigurablePds;                 /*7CCh */
+
+
+	u8  reserved5[2];                       /*0x7CDh */
+
+	/*
+	* HA cluster information
+	*/
+	struct {
+		u32     peerIsPresent:1;
+		u32     peerIsIncompatible:1;
+		u32     hwIncompatible:1;
+		u32     fwVersionMismatch:1;
+		u32     ctrlPropIncompatible:1;
+		u32     premiumFeatureMismatch:1;
+		u32     reserved:26;
+	} cluster;
+
+	char clusterId[16];                     /*7D4h */
+
+	u8          pad[0x800-0x7E4];           /*7E4 */
 } __packed;
 
 /*
@@ -1389,6 +1513,7 @@ struct megasas_instance {
 	u8 flag_ieee;
 	u8 issuepend_done;
 	u8 disableOnlineCtrlReset;
+	u8 UnevenSpanSupport;
 	u8 adprecovery;
 	unsigned long last_time;
 	u32 mfiStatus;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 771a0f9..21f0434 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -171,8 +171,6 @@ megasas_sync_map_info(struct megasas_instance *instance);
 int
 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd);
 void megasas_reset_reply_desc(struct megasas_instance *instance);
-u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
-		      struct LD_LOAD_BALANCE_INFO *lbInfo);
 int megasas_reset_fusion(struct Scsi_Host *shost);
 void megasas_fusion_ocr_wq(struct work_struct *work);
 
@@ -2295,6 +2293,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
 		/* Check for LD map update */
 		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
 		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
+			fusion->fast_path_io = 0;
 			spin_lock_irqsave(instance->host->host_lock, flags);
 			if (cmd->frame->hdr.cmd_status != 0) {
 				if (cmd->frame->hdr.cmd_status !=
@@ -2312,9 +2311,13 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
 			} else
 				instance->map_id++;
 			megasas_return_cmd(instance, cmd);
-			if (MR_ValidateMapInfo(
-				    fusion->ld_map[(instance->map_id & 1)],
-				    fusion->load_balance_info))
+
+			/*
+			 * Set fast path IO to ZERO.
+			 * Validate Map will set proper value.
+			 * Meanwhile all IOs will go as LD IO.
+			 */
+			if (MR_ValidateMapInfo(instance))
 				fusion->fast_path_io = 1;
 			else
 				fusion->fast_path_io = 0;
@@ -3661,6 +3664,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
 		tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
 		instance->disableOnlineCtrlReset =
 		ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+		instance->UnevenSpanSupport =
+			ctrl_info->adapterOperations2.supportUnevenSpans;
+		if (instance->UnevenSpanSupport) {
+			struct fusion_context *fusion = instance->ctrl_context;
+			dev_info(&instance->pdev->dev, "FW supports: "
+			"UnevenSpanSupport=%x\n", instance->UnevenSpanSupport);
+			if (MR_ValidateMapInfo(instance))
+				fusion->fast_path_io = 1;
+			else
+				fusion->fast_path_io = 0;
+
+		}
 	}
 
 	instance->max_sectors_per_req = instance->max_num_sge *
@@ -4202,6 +4217,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
 	instance->unload = 1;
 	instance->last_time = 0;
 	instance->disableOnlineCtrlReset = 1;
+	instance->UnevenSpanSupport = 0;
 
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 8e62b87..ede1edb 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -60,10 +60,22 @@
 #define FALSE 0
 #define TRUE 1
 
+#define SPAN_DEBUG 0
+#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
+#define SPAN_ROW_DATA_SIZE(map_, ld, index_)   (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
+#define SPAN_INVALID  0xff
+
 /* Prototypes */
-void
-mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
-			      struct LD_LOAD_BALANCE_INFO *lbInfo);
+void mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
+	struct LD_LOAD_BALANCE_INFO *lbInfo);
+
+static void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
+	PLD_SPAN_INFO ldSpanInfo);
+static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
+	u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
+	struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map);
+static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
+	u64 strip, struct MR_FW_RAID_MAP_ALL *map);
 
 u32 mega_mod64(u64 dividend, u32 divisor)
 {
@@ -148,9 +160,12 @@ static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
 /*
  * This function will validate Map info data provided by FW
  */
-u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
-		      struct LD_LOAD_BALANCE_INFO *lbInfo)
+u8 MR_ValidateMapInfo(struct megasas_instance *instance)
 {
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_FW_RAID_MAP_ALL *map = fusion->ld_map[(instance->map_id & 1)];
+	struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
+	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
 	struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
 
 	if (pFwRaidMap->totalSize !=
@@ -167,13 +182,16 @@ u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
 		return 0;
 	}
 
+	if (instance->UnevenSpanSupport)
+		mr_update_span_set(map, ldSpanInfo);
+
 	mr_update_load_balance_params(map, lbInfo);
 
 	return 1;
 }
 
 u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
-		    struct MR_FW_RAID_MAP_ALL *map, int *div_error)
+		    struct MR_FW_RAID_MAP_ALL *map)
 {
 	struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
 	struct MR_QUAD_ELEMENT    *quad;
@@ -185,10 +203,8 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
 		for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
 			quad = &pSpanBlock->block_span_info.quad[j];
 
-			if (quad->diff == 0) {
-				*div_error = 1;
-				return span;
-			}
+			if (quad->diff == 0)
+				return SPAN_INVALID;
 			if (quad->logStart <= row  &&  row <= quad->logEnd  &&
 			    (mega_mod64(row-quad->logStart, quad->diff)) == 0) {
 				if (span_blk != NULL) {
@@ -207,7 +223,456 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
 			}
 		}
 	}
-	return span;
+	return SPAN_INVALID;
+}
+
+/*
+******************************************************************************
+*
+* Function to print info about span set created in driver from FW raid map
+*
+* Inputs :
+* map    - LD map
+* ldSpanInfo - ldSpanInfo per HBA instance
+*/
+#if SPAN_DEBUG
+static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
+{
+
+	u8   span;
+	u32    element;
+	struct MR_LD_RAID *raid;
+	LD_SPAN_SET *span_set;
+	struct MR_QUAD_ELEMENT    *quad;
+	int ldCount;
+	u16 ld;
+
+	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+		ld = MR_TargetIdToLdGet(ldCount, map);
+			if (ld >= MAX_LOGICAL_DRIVES)
+				continue;
+		raid = MR_LdRaidGet(ld, map);
+		dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
+			ld, raid->spanDepth);
+		for (span = 0; span < raid->spanDepth; span++)
+			dev_dbg(&instance->pdev->dev, "Span=%x,"
+			" number of quads=%x\n", span,
+			map->raidMap.ldSpanMap[ld].spanBlock[span].
+			block_span_info.noElements);
+		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+			span_set = &(ldSpanInfo[ld].span_set[element]);
+			if (span_set->span_row_data_width == 0)
+				break;
+
+			dev_dbg(&instance->pdev->dev, "Span Set %x:"
+				"width=%x, diff=%x\n", element,
+				(unsigned int)span_set->span_row_data_width,
+				(unsigned int)span_set->diff);
+			dev_dbg(&instance->pdev->dev, "logical LBA"
+				"start=0x%08lx, end=0x%08lx\n",
+				(long unsigned int)span_set->log_start_lba,
+				(long unsigned int)span_set->log_end_lba);
+			dev_dbg(&instance->pdev->dev, "span row start=0x%08lx,"
+				" end=0x%08lx\n",
+				(long unsigned int)span_set->span_row_start,
+				(long unsigned int)span_set->span_row_end);
+			dev_dbg(&instance->pdev->dev, "data row start=0x%08lx,"
+				" end=0x%08lx\n",
+				(long unsigned int)span_set->data_row_start,
+				(long unsigned int)span_set->data_row_end);
+			dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx,"
+				" end=0x%08lx\n",
+				(long unsigned int)span_set->data_strip_start,
+				(long unsigned int)span_set->data_strip_end);
+
+			for (span = 0; span < raid->spanDepth; span++) {
+				if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+					block_span_info.noElements >=
+					element + 1) {
+					quad = &map->raidMap.ldSpanMap[ld].
+						spanBlock[span].block_span_info.
+						quad[element];
+				dev_dbg(&instance->pdev->dev, "Span=%x,"
+					"Quad=%x, diff=%x\n", span,
+					element, quad->diff);
+				dev_dbg(&instance->pdev->dev,
+					"offset_in_span=0x%08lx\n",
+					(long unsigned int)quad->offsetInSpan);
+				dev_dbg(&instance->pdev->dev,
+					"logical start=0x%08lx, end=0x%08lx\n",
+					(long unsigned int)quad->logStart,
+					(long unsigned int)quad->logEnd);
+				}
+			}
+		}
+	}
+	return 0;
+}
+#endif
+
+/*
+******************************************************************************
+*
+* This routine calculates the Span block for given row using spanset.
+*
+* Inputs :
+*    instance - HBA instance
+*    ld   - Logical drive number
+*    row        - Row number
+*    map    - LD map
+*
+* Outputs :
+*
+*    span          - Span number
+*    block         - Absolute Block number in the physical disk
+*    div_error	   - Devide error code.
+*/
+
+u32 mr_spanset_get_span_block(struct megasas_instance *instance,
+		u32 ld, u64 row, u64 *span_blk, struct MR_FW_RAID_MAP_ALL *map)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
+	LD_SPAN_SET *span_set;
+	struct MR_QUAD_ELEMENT    *quad;
+	u32    span, info;
+	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+
+	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+		span_set = &(ldSpanInfo[ld].span_set[info]);
+
+		if (span_set->span_row_data_width == 0)
+			break;
+
+		if (row > span_set->data_row_end)
+			continue;
+
+		for (span = 0; span < raid->spanDepth; span++)
+			if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements >= info+1) {
+				quad = &map->raidMap.ldSpanMap[ld].
+					spanBlock[span].
+					block_span_info.quad[info];
+				if (quad->diff == 0)
+					return SPAN_INVALID;
+				if (quad->logStart <= row  &&
+					row <= quad->logEnd  &&
+					(mega_mod64(row - quad->logStart,
+						quad->diff)) == 0) {
+					if (span_blk != NULL) {
+						u64  blk;
+						blk = mega_div64_32
+						    ((row - quad->logStart),
+						    quad->diff);
+						blk = (blk + quad->offsetInSpan)
+							 << raid->stripeShift;
+						*span_blk = blk;
+					}
+					return span;
+				}
+			}
+	}
+	return SPAN_INVALID;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the row for given strip using spanset.
+*
+* Inputs :
+*    instance - HBA instance
+*    ld   - Logical drive number
+*    Strip        - Strip
+*    map    - LD map
+*
+* Outputs :
+*
+*    row         - row associated with strip
+*/
+
+static u64  get_row_from_strip(struct megasas_instance *instance,
+	u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_LD_RAID	*raid = MR_LdRaidGet(ld, map);
+	LD_SPAN_SET	*span_set;
+	PLD_SPAN_INFO	ldSpanInfo = fusion->log_to_span;
+	u32		info, strip_offset, span, span_offset;
+	u64		span_set_Strip, span_set_Row, retval;
+
+	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+		span_set = &(ldSpanInfo[ld].span_set[info]);
+
+		if (span_set->span_row_data_width == 0)
+			break;
+		if (strip > span_set->data_strip_end)
+			continue;
+
+		span_set_Strip = strip - span_set->data_strip_start;
+		strip_offset = mega_mod64(span_set_Strip,
+				span_set->span_row_data_width);
+		span_set_Row = mega_div64_32(span_set_Strip,
+				span_set->span_row_data_width) * span_set->diff;
+		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
+			if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements >= info+1) {
+				if (strip_offset >=
+					span_set->strip_offset[span])
+					span_offset++;
+				else
+					break;
+			}
+#if SPAN_DEBUG
+		dev_info(&instance->pdev->dev, "Strip 0x%llx,"
+			"span_set_Strip 0x%llx, span_set_Row 0x%llx"
+			"data width 0x%llx span offset 0x%x\n", strip,
+			(unsigned long long)span_set_Strip,
+			(unsigned long long)span_set_Row,
+			(unsigned long long)span_set->span_row_data_width,
+			span_offset);
+		dev_info(&instance->pdev->dev, "For strip 0x%llx"
+			"row is 0x%llx\n", strip,
+			(unsigned long long) span_set->data_row_start +
+			(unsigned long long) span_set_Row + (span_offset - 1));
+#endif
+		retval = (span_set->data_row_start + span_set_Row +
+				(span_offset - 1));
+		return retval;
+	}
+	return -1LLU;
+}
+
+
+/*
+******************************************************************************
+*
+* This routine calculates the Start Strip for given row using spanset.
+*
+* Inputs :
+*    instance - HBA instance
+*    ld   - Logical drive number
+*    row        - Row number
+*    map    - LD map
+*
+* Outputs :
+*
+*    Strip         - Start strip associated with row
+*/
+
+static u64 get_strip_from_row(struct megasas_instance *instance,
+		u32 ld, u64 row, struct MR_FW_RAID_MAP_ALL *map)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
+	LD_SPAN_SET *span_set;
+	struct MR_QUAD_ELEMENT    *quad;
+	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+	u32    span, info;
+	u64  strip;
+
+	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+		span_set = &(ldSpanInfo[ld].span_set[info]);
+
+		if (span_set->span_row_data_width == 0)
+			break;
+		if (row > span_set->data_row_end)
+			continue;
+
+		for (span = 0; span < raid->spanDepth; span++)
+			if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements >= info+1) {
+				quad = &map->raidMap.ldSpanMap[ld].
+					spanBlock[span].block_span_info.quad[info];
+				if (quad->logStart <= row  &&
+					row <= quad->logEnd  &&
+					mega_mod64((row - quad->logStart),
+					quad->diff) == 0) {
+					strip = mega_div64_32
+						(((row - span_set->data_row_start)
+							- quad->logStart),
+							quad->diff);
+					strip *= span_set->span_row_data_width;
+					strip += span_set->data_strip_start;
+					strip += span_set->strip_offset[span];
+					return strip;
+				}
+			}
+	}
+	dev_err(&instance->pdev->dev, "get_strip_from_row"
+		"returns invalid strip for ld=%x, row=%lx\n",
+		ld, (long unsigned int)row);
+	return -1;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the Physical Arm for given strip using spanset.
+*
+* Inputs :
+*    instance - HBA instance
+*    ld   - Logical drive number
+*    strip      - Strip
+*    map    - LD map
+*
+* Outputs :
+*
+*    Phys Arm         - Phys Arm associated with strip
+*/
+
+static u32 get_arm_from_strip(struct megasas_instance *instance,
+	u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
+	LD_SPAN_SET *span_set;
+	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+	u32    info, strip_offset, span, span_offset, retval;
+
+	for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
+		span_set = &(ldSpanInfo[ld].span_set[info]);
+
+		if (span_set->span_row_data_width == 0)
+			break;
+		if (strip > span_set->data_strip_end)
+			continue;
+
+		strip_offset = (uint)mega_mod64
+				((strip - span_set->data_strip_start),
+				span_set->span_row_data_width);
+
+		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
+			if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements >= info+1) {
+				if (strip_offset >=
+					span_set->strip_offset[span])
+					span_offset =
+						span_set->strip_offset[span];
+				else
+					break;
+			}
+#if SPAN_DEBUG
+		dev_info(&instance->pdev->dev, "get_arm_from_strip:"
+			"for ld=0x%x strip=0x%lx arm is  0x%x\n", ld,
+			(long unsigned int)strip, (strip_offset - span_offset));
+#endif
+		retval = (strip_offset - span_offset);
+		return retval;
+	}
+
+	dev_err(&instance->pdev->dev, "get_arm_from_strip"
+		"returns invalid arm for ld=%x strip=%lx\n",
+		ld, (long unsigned int)strip);
+
+	return -1;
+}
+
+/* This Function will return Phys arm */
+u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
+		struct MR_FW_RAID_MAP_ALL *map)
+{
+	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
+	/* Need to check correct default value */
+	u32    arm = 0;
+
+	switch (raid->level) {
+	case 0:
+	case 5:
+	case 6:
+		arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
+		break;
+	case 1:
+		/* start with logical arm */
+		arm = get_arm_from_strip(instance, ld, stripe, map);
+		if (arm != -1UL)
+			arm *= 2;
+		break;
+	}
+
+	return arm;
+}
+
+
+/*
+******************************************************************************
+*
+* This routine calculates the arm, span and block for the specified stripe and
+* reference in stripe using spanset
+*
+* Inputs :
+*
+*    ld   - Logical drive number
+*    stripRow        - Stripe number
+*    stripRef    - Reference in stripe
+*
+* Outputs :
+*
+*    span          - Span number
+*    block         - Absolute Block number in the physical disk
+*/
+static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
+		u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
+		struct RAID_CONTEXT *pRAID_Context,
+		struct MR_FW_RAID_MAP_ALL *map)
+{
+	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
+	u32     pd, arRef;
+	u8      physArm, span;
+	u64     row;
+	u8	retval = TRUE;
+	u8	do_invader = 0;
+	u64	*pdBlock = &io_info->pdBlock;
+	u16	*pDevHandle = &io_info->devHandle;
+	u32	logArm, rowMod, armQ, arm;
+
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
+		instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+		do_invader = 1;
+
+	/*Get row and span from io_info for Uneven Span IO.*/
+	row	    = io_info->start_row;
+	span	    = io_info->start_span;
+
+
+	if (raid->level == 6) {
+		logArm = get_arm_from_strip(instance, ld, stripRow, map);
+		if (logArm == -1UL)
+			return FALSE;
+		rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
+		armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
+		arm = armQ + 1 + logArm;
+		if (arm >= SPAN_ROW_SIZE(map, ld, span))
+			arm -= SPAN_ROW_SIZE(map, ld, span);
+		physArm = (u8)arm;
+	} else
+		/* Calculate the arm */
+		physArm = get_arm(instance, ld, span, stripRow, map);
+	if (physArm == 0xFF)
+		return FALSE;
+
+	arRef       = MR_LdSpanArrayGet(ld, span, map);
+	pd          = MR_ArPdGet(arRef, physArm, map);
+
+	if (pd != MR_PD_INVALID)
+		*pDevHandle = MR_PdDevHandleGet(pd, map);
+	else {
+		*pDevHandle = MR_PD_INVALID;
+		if ((raid->level >= 5) &&
+			(!do_invader  || (do_invader &&
+			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
+			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+		else if (raid->level == 1) {
+			pd = MR_ArPdGet(arRef, physArm + 1, map);
+			if (pd != MR_PD_INVALID)
+				*pDevHandle = MR_PdDevHandleGet(pd, map);
+		}
+	}
+
+	*pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
+					physArm;
+	return retval;
 }
 
 /*
@@ -228,17 +693,18 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
 *    block         - Absolute Block number in the physical disk
 */
 u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
-		   u16 stripRef, u64 *pdBlock, u16 *pDevHandle,
-		   struct RAID_CONTEXT *pRAID_Context,
-		   struct MR_FW_RAID_MAP_ALL *map)
+		u16 stripRef, struct IO_REQUEST_INFO *io_info,
+		struct RAID_CONTEXT *pRAID_Context,
+		struct MR_FW_RAID_MAP_ALL *map)
 {
 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
 	u32         pd, arRef;
 	u8          physArm, span;
 	u64         row;
 	u8	    retval = TRUE;
-	int	    error_code = 0;
 	u8          do_invader = 0;
+	u64	    *pdBlock = &io_info->pdBlock;
+	u16	    *pDevHandle = &io_info->devHandle;
 
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
 		instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
@@ -272,8 +738,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
 		span = 0;
 		*pdBlock = row << raid->stripeShift;
 	} else {
-		span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
-		if (error_code == 1)
+		span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
+		if (span == SPAN_INVALID)
 			return FALSE;
 	}
 
@@ -331,17 +797,42 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 	u32         numBlocks, ldTgtId;
 	u8          isRead;
 	u8	    retval = 0;
+	u8	    startlba_span = SPAN_INVALID;
+	u64 *pdBlock = &io_info->pdBlock;
 
 	ldStartBlock = io_info->ldStartBlock;
 	numBlocks = io_info->numBlocks;
 	ldTgtId = io_info->ldTgtId;
 	isRead = io_info->isRead;
+	io_info->IoforUnevenSpan = 0;
+	io_info->start_span	= SPAN_INVALID;
 
 	ld = MR_TargetIdToLdGet(ldTgtId, map);
 	raid = MR_LdRaidGet(ld, map);
 
+	/*
+	 * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
+	 * return FALSE
+	 */
+	if (raid->rowDataSize == 0) {
+		if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
+			return FALSE;
+		else if (instance->UnevenSpanSupport) {
+			io_info->IoforUnevenSpan = 1;
+		} else {
+			dev_info(&instance->pdev->dev,
+				"raid->rowDataSize is 0, but has SPAN[0]"
+				"rowDataSize = 0x%0x,"
+				"but there is _NO_ UnevenSpanSupport\n",
+				MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
+			return FALSE;
+		}
+	}
+
 	stripSize = 1 << raid->stripeShift;
 	stripe_mask = stripSize-1;
+
+
 	/*
 	 * calculate starting row and stripe, and number of strips and rows
 	 */
@@ -351,11 +842,50 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 	ref_in_end_stripe   = (u16)(endLba & stripe_mask);
 	endStrip            = endLba >> raid->stripeShift;
 	num_strips          = (u8)(endStrip - start_strip + 1); /* End strip */
-	if (raid->rowDataSize == 0)
-		return FALSE;
-	start_row           =  mega_div64_32(start_strip, raid->rowDataSize);
-	endRow              =  mega_div64_32(endStrip, raid->rowDataSize);
-	numRows             = (u8)(endRow - start_row + 1);
+
+	if (io_info->IoforUnevenSpan) {
+		start_row = get_row_from_strip(instance, ld, start_strip, map);
+		endRow	  = get_row_from_strip(instance, ld, endStrip, map);
+		if (start_row == -1ULL || endRow == -1ULL) {
+			dev_info(&instance->pdev->dev, "return from %s %d."
+				"Send IO w/o region lock.\n",
+				__func__, __LINE__);
+			return FALSE;
+		}
+
+		if (raid->spanDepth == 1) {
+			startlba_span = 0;
+			*pdBlock = start_row << raid->stripeShift;
+		} else
+			startlba_span = (u8)mr_spanset_get_span_block(instance,
+						ld, start_row, pdBlock, map);
+		if (startlba_span == SPAN_INVALID) {
+			dev_info(&instance->pdev->dev, "return from %s %d"
+				"for row 0x%llx,start strip %llx"
+				"endSrip %llx\n", __func__, __LINE__,
+				(unsigned long long)start_row,
+				(unsigned long long)start_strip,
+				(unsigned long long)endStrip);
+			return FALSE;
+		}
+		io_info->start_span	= startlba_span;
+		io_info->start_row	= start_row;
+#if SPAN_DEBUG
+		dev_dbg(&instance->pdev->dev, "Check Span number from %s %d"
+			"for row 0x%llx, start strip 0x%llx end strip 0x%llx"
+			" span 0x%x\n", __func__, __LINE__,
+			(unsigned long long)start_row,
+			(unsigned long long)start_strip,
+			(unsigned long long)endStrip, startlba_span);
+		dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx"
+			"Start span 0x%x\n", (unsigned long long)start_row,
+			(unsigned long long)endRow, startlba_span);
+#endif
+	} else {
+		start_row = mega_div64_32(start_strip, raid->rowDataSize);
+		endRow    = mega_div64_32(endStrip, raid->rowDataSize);
+	}
+	numRows = (u8)(endRow - start_row + 1);
 
 	/*
 	 * calculate region info.
@@ -388,22 +918,49 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 			regSize = numBlocks;
 		}
 		/* multi-strip IOs always need to full stripe locked */
-	} else {
+	} else if (io_info->IoforUnevenSpan == 0) {
+		/*
+		 * For Even span region lock optimization.
+		 * If the start strip is the last in the start row
+		 */
 		if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
-			/* If the start strip is the last in the start row */
 			regStart += ref_in_start_stripe;
-			regSize = stripSize - ref_in_start_stripe;
 			/* initialize count to sectors from startref to end
 			   of strip */
+			regSize = stripSize - ref_in_start_stripe;
+		}
+
+		/* add complete rows in the middle of the transfer */
+		if (numRows > 2)
+			regSize += (numRows-2) << raid->stripeShift;
+
+		/* if IO ends within first strip of last row*/
+		if (endStrip == endRow*raid->rowDataSize)
+			regSize += ref_in_end_stripe+1;
+		else
+			regSize += stripSize;
+	} else {
+		/*
+		 * For Uneven span region lock optimization.
+		 * If the start strip is the last in the start row
+		 */
+		if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
+				SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
+			regStart += ref_in_start_stripe;
+			/* initialize count to sectors from
+			 * startRef to end of strip
+			 */
+			regSize = stripSize - ref_in_start_stripe;
 		}
+		/* Add complete rows in the middle of the transfer*/
 
 		if (numRows > 2)
-			/* Add complete rows in the middle of the transfer */
+			/* Add complete rows in the middle of the transfer*/
 			regSize += (numRows-2) << raid->stripeShift;
 
 		/* if IO ends within first strip of last row */
-		if (endStrip == endRow*raid->rowDataSize)
-			regSize += ref_in_end_stripe+1;
+		if (endStrip == get_strip_from_row(instance, ld, endRow, map))
+			regSize += ref_in_end_stripe + 1;
 		else
 			regSize += stripSize;
 	}
@@ -424,30 +981,161 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 	/*Get Phy Params only if FP capable, or else leave it to MR firmware
 	  to do the calculation.*/
 	if (io_info->fpOkForIo) {
-		retval = MR_GetPhyParams(instance, ld, start_strip,
-					 ref_in_start_stripe,
-					 &io_info->pdBlock,
-					 &io_info->devHandle, pRAID_Context,
-					 map);
-		/* If IO on an invalid Pd, then FP i snot possible */
+		retval = io_info->IoforUnevenSpan ?
+				mr_spanset_get_phy_params(instance, ld,
+					start_strip, ref_in_start_stripe,
+					io_info, pRAID_Context, map) :
+				MR_GetPhyParams(instance, ld, start_strip,
+					ref_in_start_stripe, io_info,
+					pRAID_Context, map);
+		/* If IO on an invalid Pd, then FP is not possible.*/
 		if (io_info->devHandle == MR_PD_INVALID)
 			io_info->fpOkForIo = FALSE;
 		return retval;
 	} else if (isRead) {
 		uint stripIdx;
 		for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
-			if (!MR_GetPhyParams(instance, ld,
-					     start_strip + stripIdx,
-					     ref_in_start_stripe,
-					     &io_info->pdBlock,
-					     &io_info->devHandle,
-					     pRAID_Context, map))
+			retval = io_info->IoforUnevenSpan ?
+				mr_spanset_get_phy_params(instance, ld,
+				    start_strip + stripIdx,
+				    ref_in_start_stripe, io_info,
+				    pRAID_Context, map) :
+				MR_GetPhyParams(instance, ld,
+				    start_strip + stripIdx, ref_in_start_stripe,
+				    io_info, pRAID_Context, map);
+			if (!retval)
 				return TRUE;
 		}
 	}
+
+#if SPAN_DEBUG
+	/* Just for testing what arm we get for strip.*/
+	if (io_info->IoforUnevenSpan)
+		get_arm_from_strip(instance, ld, start_strip, map);
+#endif
 	return TRUE;
 }
 
+/*
+******************************************************************************
+*
+* This routine pepare spanset info from Valid Raid map and store it into
+* local copy of ldSpanInfo per instance data structure.
+*
+* Inputs :
+* map    - LD map
+* ldSpanInfo - ldSpanInfo per HBA instance
+*
+*/
+void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
+			PLD_SPAN_INFO ldSpanInfo)
+{
+	u8   span, count;
+	u32  element, span_row_width;
+	u64  span_row;
+	struct MR_LD_RAID *raid;
+	LD_SPAN_SET *span_set, *span_set_prev;
+	struct MR_QUAD_ELEMENT    *quad;
+	int ldCount;
+	u16 ld;
+
+
+	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+		ld = MR_TargetIdToLdGet(ldCount, map);
+		if (ld >= MAX_LOGICAL_DRIVES)
+			continue;
+		raid = MR_LdRaidGet(ld, map);
+		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+			for (span = 0; span < raid->spanDepth; span++) {
+				if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+					block_span_info.noElements <
+					element + 1)
+					continue;
+				span_set = &(ldSpanInfo[ld].span_set[element]);
+				quad = &map->raidMap.ldSpanMap[ld].
+					spanBlock[span].block_span_info.
+					quad[element];
+
+				span_set->diff = quad->diff;
+
+				for (count = 0, span_row_width = 0;
+					count < raid->spanDepth; count++) {
+					if (map->raidMap.ldSpanMap[ld].
+						spanBlock[count].
+						block_span_info.
+						noElements >= element + 1) {
+						span_set->strip_offset[count] =
+							span_row_width;
+						span_row_width +=
+							MR_LdSpanPtrGet
+							(ld, count, map)->spanRowDataSize;
+						printk(KERN_INFO "megasas:"
+							"span %x rowDataSize %x\n",
+							count, MR_LdSpanPtrGet
+							(ld, count, map)->spanRowDataSize);
+					}
+				}
+
+				span_set->span_row_data_width = span_row_width;
+				span_row = mega_div64_32(((quad->logEnd -
+					quad->logStart) + quad->diff),
+					quad->diff);
+
+				if (element == 0) {
+					span_set->log_start_lba = 0;
+					span_set->log_end_lba =
+						((span_row << raid->stripeShift)
+						* span_row_width) - 1;
+
+					span_set->span_row_start = 0;
+					span_set->span_row_end = span_row - 1;
+
+					span_set->data_strip_start = 0;
+					span_set->data_strip_end =
+						(span_row * span_row_width) - 1;
+
+					span_set->data_row_start = 0;
+					span_set->data_row_end =
+						(span_row * quad->diff) - 1;
+				} else {
+					span_set_prev = &(ldSpanInfo[ld].
+							span_set[element - 1]);
+					span_set->log_start_lba =
+						span_set_prev->log_end_lba + 1;
+					span_set->log_end_lba =
+						span_set->log_start_lba +
+						((span_row << raid->stripeShift)
+						* span_row_width) - 1;
+
+					span_set->span_row_start =
+						span_set_prev->span_row_end + 1;
+					span_set->span_row_end =
+					span_set->span_row_start + span_row - 1;
+
+					span_set->data_strip_start =
+					span_set_prev->data_strip_end + 1;
+					span_set->data_strip_end =
+						span_set->data_strip_start +
+						(span_row * span_row_width) - 1;
+
+					span_set->data_row_start =
+						span_set_prev->data_row_end + 1;
+					span_set->data_row_end =
+						span_set->data_row_start +
+						(span_row * quad->diff) - 1;
+				}
+				break;
+		}
+		if (span == raid->spanDepth)
+			break;
+	    }
+	}
+#if SPAN_DEBUG
+	getSpanInfo(map, ldSpanInfo);
+#endif
+
+}
+
 void
 mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
 			      struct LD_LOAD_BALANCE_INFO *lbInfo)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 228400a..235185d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -86,8 +86,6 @@ u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
 void
 megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
 
-u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
-		      struct LD_LOAD_BALANCE_INFO *lbInfo);
 u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
 			   struct IO_REQUEST_INFO *in_info);
 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
@@ -782,8 +780,7 @@ megasas_get_map_info(struct megasas_instance *instance)
 
 	fusion->fast_path_io = 0;
 	if (!megasas_get_ld_map_info(instance)) {
-		if (MR_ValidateMapInfo(fusion->ld_map[(instance->map_id & 1)],
-				       fusion->load_balance_info)) {
+		if (MR_ValidateMapInfo(instance)) {
 			fusion->fast_path_io = 1;
 			return 0;
 		}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 004c18e..12ff01c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -463,6 +463,7 @@ struct MPI2_IOC_INIT_REQUEST {
 /* mrpriv defines */
 #define MR_PD_INVALID 0xFFFF
 #define MAX_SPAN_DEPTH 8
+#define MAX_QUAD_DEPTH	MAX_SPAN_DEPTH
 #define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
 #define MAX_ROW_SIZE 32
 #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
@@ -504,7 +505,9 @@ struct MR_LD_SPAN {
 	u64      startBlk;
 	u64      numBlks;
 	u16      arrayRef;
-	u8       reserved[6];
+	u8       spanRowSize;
+	u8       spanRowDataSize;
+	u8       reserved[4];
 };
 
 struct MR_SPAN_BLOCK_INFO {
@@ -590,6 +593,10 @@ struct IO_REQUEST_INFO {
 	u16 devHandle;
 	u64 pdBlock;
 	u8 fpOkForIo;
+	u8 IoforUnevenSpan;
+	u8 start_span;
+	u8 reserved;
+	u64 start_row;
 };
 
 struct MR_LD_TARGET_SYNC {
@@ -651,6 +658,26 @@ struct LD_LOAD_BALANCE_INFO {
 	u64     last_accessed_block[2];
 };
 
+/* SPAN_SET is info caclulated from span info from Raid map per LD */
+typedef struct _LD_SPAN_SET {
+	u64  log_start_lba;
+	u64  log_end_lba;
+	u64  span_row_start;
+	u64  span_row_end;
+	u64  data_strip_start;
+	u64  data_strip_end;
+	u64  data_row_start;
+	u64  data_row_end;
+	u8   strip_offset[MAX_SPAN_DEPTH];
+	u32    span_row_data_width;
+	u32    diff;
+	u32    reserved[2];
+} LD_SPAN_SET, *PLD_SPAN_SET;
+
+typedef struct LOG_BLOCK_SPAN_INFO {
+	LD_SPAN_SET  span_set[MAX_SPAN_DEPTH];
+} LD_SPAN_INFO, *PLD_SPAN_INFO;
+
 struct MR_FW_RAID_MAP_ALL {
 	struct MR_FW_RAID_MAP raidMap;
 	struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
@@ -695,6 +722,7 @@ struct fusion_context {
 	u32 map_sz;
 	u8 fast_path_io;
 	struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES];
+	LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES];
 };
 
 union desc_value {


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2013-05-22 12:35 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-05-22  7:05 [PATCH 10/12] megaraid_sas : Add support for Uneven Span PRL11 Sumit.Saxena

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.