All of lore.kernel.org
 help / color / mirror / Atom feed
From: dinguyen at opensource.altera.com <dinguyen@opensource.altera.com>
To: u-boot@lists.denx.de
Subject: [U-Boot] [PATCHv3 01/17] arm: socfpga: spl: Add main sdram code
Date: Mon, 30 Mar 2015 17:01:02 -0500	[thread overview]
Message-ID: <1427752878-18426-2-git-send-email-dinguyen@opensource.altera.com> (raw)
In-Reply-To: <1427752878-18426-1-git-send-email-dinguyen@opensource.altera.com>

From: Dinh Nguyen <dinguyen@opensource.altera.com>

This adds the code to configure the SDRAM controller that is found in the
SoCFGPA Cyclone5 and Arria5 platforms.

Signed-off-by: Dinh Nguyen <dinguyen@opensource.altera.com>
---
v2: moved to drivers/ddr/altera/
    only supports DDR3 at FULLRATE
---
 Makefile                                         |    1 +
 arch/arm/include/asm/arch-socfpga/sdram.h        |  434 +++
 arch/arm/include/asm/arch-socfpga/sdram_config.h |  100 +
 drivers/ddr/altera/Makefile                      |   12 +
 drivers/ddr/altera/sdram.c                       | 1307 +++++++
 drivers/ddr/altera/sequencer.c                   | 3970 ++++++++++++++++++++++
 drivers/ddr/altera/sequencer.h                   |  359 ++
 drivers/ddr/altera/sequencer_auto.h              |  216 ++
 drivers/ddr/altera/sequencer_auto_ac_init.c      |   85 +
 drivers/ddr/altera/sequencer_auto_inst_init.c    |  270 ++
 drivers/ddr/altera/sequencer_defines.h           |  121 +
 include/configs/socfpga_cyclone5.h               |    1 +
 scripts/Makefile.spl                             |    1 +
 13 files changed, 6877 insertions(+)
 create mode 100644 arch/arm/include/asm/arch-socfpga/sdram.h
 create mode 100644 arch/arm/include/asm/arch-socfpga/sdram_config.h
 create mode 100644 drivers/ddr/altera/Makefile
 create mode 100644 drivers/ddr/altera/sdram.c
 create mode 100644 drivers/ddr/altera/sequencer.c
 create mode 100644 drivers/ddr/altera/sequencer.h
 create mode 100644 drivers/ddr/altera/sequencer_auto.h
 create mode 100644 drivers/ddr/altera/sequencer_auto_ac_init.c
 create mode 100644 drivers/ddr/altera/sequencer_auto_inst_init.c
 create mode 100644 drivers/ddr/altera/sequencer_defines.h

diff --git a/Makefile b/Makefile
index 1b3ebe7..e58e023 100644
--- a/Makefile
+++ b/Makefile
@@ -636,6 +636,7 @@ libs-y += drivers/power/ \
 libs-y += drivers/spi/
 libs-$(CONFIG_FMAN_ENET) += drivers/net/fm/
 libs-$(CONFIG_SYS_FSL_DDR) += drivers/ddr/fsl/
+libs-$(CONFIG_ALTERA_SDRAM) += drivers/ddr/altera/
 libs-y += drivers/serial/
 libs-y += drivers/usb/eth/
 libs-y += drivers/usb/gadget/
diff --git a/arch/arm/include/asm/arch-socfpga/sdram.h b/arch/arm/include/asm/arch-socfpga/sdram.h
new file mode 100644
index 0000000..83e45d2
--- /dev/null
+++ b/arch/arm/include/asm/arch-socfpga/sdram.h
@@ -0,0 +1,434 @@
+/*
+ * Copyright (C) 2014 Altera Corporation <www.altera.com>
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+#ifndef	_SDRAM_H_
+#define	_SDRAM_H_
+
+#ifndef __ASSEMBLY__
+
+/* function declaration */
+unsigned long sdram_calculate_size(void);
+unsigned sdram_mmr_init_full(unsigned int sdr_phy_reg);
+int sdram_calibration_full(void);
+
+extern int sdram_calibration(void);
+
+/* Group: sdr.phygrp.sccgrp                                                */
+#define SDR_PHYGRP_SCCGRP_ADDRESS 0x0
+/* Group: sdr.phygrp.phymgrgrp                                             */
+#define SDR_PHYGRP_PHYMGRGRP_ADDRESS 0x1000
+/* Group: sdr.phygrp.rwmgrgrp                                              */
+#define SDR_PHYGRP_RWMGRGRP_ADDRESS 0x2000
+/* Group: sdr.phygrp.datamgrgrp                                            */
+#define SDR_PHYGRP_DATAMGRGRP_ADDRESS 0x4000
+/* Group: sdr.phygrp.regfilegrp                                            */
+#define SDR_PHYGRP_REGFILEGRP_ADDRESS 0x4800
+/* Group: sdr.ctrlgrp                                                      */
+#define SDR_CTRLGRP_ADDRESS 0x5000
+/* Register: sdr.ctrlgrp.ctrlcfg                                           */
+#define SDR_CTRLGRP_CTRLCFG_ADDRESS 0x5000
+/* Register: sdr.ctrlgrp.dramtiming1                                       */
+#define SDR_CTRLGRP_DRAMTIMING1_ADDRESS 0x5004
+/* Register: sdr.ctrlgrp.dramtiming2                                       */
+#define SDR_CTRLGRP_DRAMTIMING2_ADDRESS 0x5008
+/* Register: sdr.ctrlgrp.dramtiming3                                       */
+#define SDR_CTRLGRP_DRAMTIMING3_ADDRESS 0x500c
+/* Register: sdr.ctrlgrp.dramtiming4                                       */
+#define SDR_CTRLGRP_DRAMTIMING4_ADDRESS 0x5010
+/* Register: sdr.ctrlgrp.lowpwrtiming                                      */
+#define SDR_CTRLGRP_LOWPWRTIMING_ADDRESS 0x5014
+/* Register: sdr.ctrlgrp.dramodt                                           */
+#define SDR_CTRLGRP_DRAMODT_ADDRESS 0x5018
+/* Register: sdr.ctrlgrp.dramaddrw                                         */
+#define SDR_CTRLGRP_DRAMADDRW_ADDRESS 0x502c
+/* Register: sdr.ctrlgrp.dramifwidth                                       */
+#define SDR_CTRLGRP_DRAMIFWIDTH_ADDRESS 0x5030
+/* Register: sdr.ctrlgrp.dramdevwidth                                      */
+#define SDR_CTRLGRP_DRAMDEVWIDTH_ADDRESS 0x5034
+/* Register: sdr.ctrlgrp.dramsts                                           */
+#define SDR_CTRLGRP_DRAMSTS_ADDRESS 0x5038
+/* Register: sdr.ctrlgrp.dramintr                                          */
+#define SDR_CTRLGRP_DRAMINTR_ADDRESS 0x503c
+/* Register: sdr.ctrlgrp.sbecount                                          */
+#define SDR_CTRLGRP_SBECOUNT_ADDRESS 0x5040
+/* Register: sdr.ctrlgrp.dbecount                                          */
+#define SDR_CTRLGRP_DBECOUNT_ADDRESS 0x5044
+/* Register: sdr.ctrlgrp.erraddr                                           */
+#define SDR_CTRLGRP_ERRADDR_ADDRESS 0x5048
+/* Register: sdr.ctrlgrp.dropcount                                         */
+#define SDR_CTRLGRP_DROPCOUNT_ADDRESS 0x504c
+/* Register: sdr.ctrlgrp.dropaddr                                          */
+#define SDR_CTRLGRP_DROPADDR_ADDRESS 0x5050
+/* Register: sdr.ctrlgrp.lowpwreq                                          */
+#define SDR_CTRLGRP_LOWPWREQ_ADDRESS 0x5054
+/* Register: sdr.ctrlgrp.lowpwrack                                         */
+#define SDR_CTRLGRP_LOWPWRACK_ADDRESS 0x5058
+/* Register: sdr.ctrlgrp.staticcfg                                         */
+#define SDR_CTRLGRP_STATICCFG_ADDRESS 0x505c
+/* Register: sdr.ctrlgrp.ctrlwidth                                         */
+#define SDR_CTRLGRP_CTRLWIDTH_ADDRESS 0x5060
+/* Register: sdr.ctrlgrp.cportwidth                                        */
+#define SDR_CTRLGRP_CPORTWIDTH_ADDRESS 0x5064
+/* Register: sdr.ctrlgrp.cportwmap                                         */
+#define SDR_CTRLGRP_CPORTWMAP_ADDRESS 0x5068
+/* Register: sdr.ctrlgrp.cportrmap                                         */
+#define SDR_CTRLGRP_CPORTRMAP_ADDRESS 0x506c
+/* Register: sdr.ctrlgrp.rfifocmap                                         */
+#define SDR_CTRLGRP_RFIFOCMAP_ADDRESS 0x5070
+/* Register: sdr.ctrlgrp.wfifocmap                                         */
+#define SDR_CTRLGRP_WFIFOCMAP_ADDRESS 0x5074
+/* Register: sdr.ctrlgrp.cportrdwr                                         */
+#define SDR_CTRLGRP_CPORTRDWR_ADDRESS 0x5078
+/* Register: sdr.ctrlgrp.portcfg                                           */
+#define SDR_CTRLGRP_PORTCFG_ADDRESS 0x507c
+/* Register: sdr.ctrlgrp.fpgaportrst                                       */
+#define SDR_CTRLGRP_FPGAPORTRST_ADDRESS 0x5080
+/* Register: sdr.ctrlgrp.fifocfg                                           */
+#define SDR_CTRLGRP_FIFOCFG_ADDRESS 0x5088
+/* Register: sdr.ctrlgrp.protportdefault				   */
+#define SDR_CTRLGRP_PROTPORTDEFAULT_ADDRESS 0x508c
+/* Register: sdr.ctrlgrp.protruleaddr					   */
+#define SDR_CTRLGRP_PROTRULEADDR_ADDRESS 0x5090
+/* Register: sdr.ctrlgrp.protruleid					   */
+#define SDR_CTRLGRP_PROTRULEID_ADDRESS 0x5094
+/* Register: sdr.ctrlgrp.protruledata					   */
+#define SDR_CTRLGRP_PROTRULEDATA_ADDRESS 0x5098
+/* Register: sdr.ctrlgrp.protrulerdwr					   */
+#define SDR_CTRLGRP_PROTRULERDWR_ADDRESS 0x509c
+/* Register: sdr.ctrlgrp.mppriority                                        */
+#define SDR_CTRLGRP_MPPRIORITY_ADDRESS 0x50ac
+/* Wide Register: sdr.ctrlgrp.mpweight                                     */
+#define SDR_CTRLGRP_MPWEIGHT_ADDRESS 0x50b0
+/* Register: sdr.ctrlgrp.mpweight.mpweight_0                               */
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_ADDRESS 0x50b0
+/* Register: sdr.ctrlgrp.mpweight.mpweight_1                               */
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_ADDRESS 0x50b4
+/* Register: sdr.ctrlgrp.mpweight.mpweight_2                               */
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_ADDRESS 0x50b8
+/* Register: sdr.ctrlgrp.mpweight.mpweight_3                               */
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_ADDRESS 0x50bc
+/* Register: sdr.ctrlgrp.mppacing.mppacing_0                               */
+#define SDR_CTRLGRP_MPPACING_MPPACING_0_ADDRESS 0x50c0
+/* Register: sdr.ctrlgrp.mppacing.mppacing_1                               */
+#define SDR_CTRLGRP_MPPACING_MPPACING_1_ADDRESS 0x50c4
+/* Register: sdr.ctrlgrp.mppacing.mppacing_2                               */
+#define SDR_CTRLGRP_MPPACING_MPPACING_2_ADDRESS 0x50c8
+/* Register: sdr.ctrlgrp.mppacing.mppacing_3                               */
+#define SDR_CTRLGRP_MPPACING_MPPACING_3_ADDRESS 0x50cc
+/* Register: sdr.ctrlgrp.mpthresholdrst.mpthresholdrst_0                   */
+#define SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_ADDRESS 0x50d0
+/* Register: sdr.ctrlgrp.mpthresholdrst.mpthresholdrst_1                   */
+#define SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_ADDRESS 0x50d4
+/* Register: sdr.ctrlgrp.mpthresholdrst.mpthresholdrst_2                   */
+#define SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_ADDRESS 0x50d8
+/* Wide Register: sdr.ctrlgrp.phyctrl                                      */
+#define SDR_CTRLGRP_PHYCTRL_ADDRESS 0x5150
+/* Register: sdr.ctrlgrp.phyctrl.phyctrl_0                                 */
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDRESS 0x5150
+/* Register: sdr.ctrlgrp.phyctrl.phyctrl_1                                 */
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_ADDRESS 0x5154
+/* Register: sdr.ctrlgrp.phyctrl.phyctrl_2                                 */
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_ADDRESS 0x5158
+/* Register instance: sdr::ctrlgrp::phyctrl.phyctrl_0                      */
+/* Register template referenced: sdr::ctrlgrp::phyctrl::phyctrl_0          */
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_OFFSET 0x150
+/* Register instance: sdr::ctrlgrp::phyctrl.phyctrl_1                      */
+/* Register template referenced: sdr::ctrlgrp::phyctrl::phyctrl_1          */
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_OFFSET 0x154
+/* Register instance: sdr::ctrlgrp::phyctrl.phyctrl_2                      */
+/* Register template referenced: sdr::ctrlgrp::phyctrl::phyctrl_2          */
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_OFFSET 0x158
+
+/* Register template: sdr::ctrlgrp::ctrlcfg                                */
+#define SDR_CTRLGRP_CTRLCFG_OUTPUTREG_LSB 26
+#define SDR_CTRLGRP_CTRLCFG_OUTPUTREG_MASK 0x04000000
+#define SDR_CTRLGRP_CTRLCFG_BURSTTERMEN_LSB 25
+#define SDR_CTRLGRP_CTRLCFG_BURSTTERMEN_MASK 0x02000000
+#define SDR_CTRLGRP_CTRLCFG_BURSTINTREN_LSB 24
+#define SDR_CTRLGRP_CTRLCFG_BURSTINTREN_MASK 0x01000000
+#define SDR_CTRLGRP_CTRLCFG_NODMPINS_LSB 23
+#define SDR_CTRLGRP_CTRLCFG_NODMPINS_MASK 0x00800000
+#define SDR_CTRLGRP_CTRLCFG_DQSTRKEN_LSB 22
+#define SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK 0x00400000
+#define SDR_CTRLGRP_CTRLCFG_STARVELIMIT_LSB 16
+#define SDR_CTRLGRP_CTRLCFG_STARVELIMIT_MASK 0x003f0000
+#define SDR_CTRLGRP_CTRLCFG_REORDEREN_LSB 15
+#define SDR_CTRLGRP_CTRLCFG_REORDEREN_MASK 0x00008000
+#define SDR_CTRLGRP_CTRLCFG_GENDBE_LSB 14
+#define SDR_CTRLGRP_CTRLCFG_GENDBE_MASK 0x00004000
+#define SDR_CTRLGRP_CTRLCFG_GENSBE_LSB 13
+#define SDR_CTRLGRP_CTRLCFG_GENSBE_MASK 0x00002000
+#define SDR_CTRLGRP_CTRLCFG_CFG_ENABLE_ECC_CODE_OVERWRITES_LSB 12
+#define SDR_CTRLGRP_CTRLCFG_CFG_ENABLE_ECC_CODE_OVERWRITES_MASK 0x00001000
+#define SDR_CTRLGRP_CTRLCFG_ECCCORREN_LSB 11
+#define SDR_CTRLGRP_CTRLCFG_ECCCORREN_MASK 0x00000800
+#define SDR_CTRLGRP_CTRLCFG_ECCEN_LSB 10
+#define SDR_CTRLGRP_CTRLCFG_ECCEN_MASK 0x00000400
+#define SDR_CTRLGRP_CTRLCFG_ADDRORDER_LSB 8
+#define SDR_CTRLGRP_CTRLCFG_ADDRORDER_MASK 0x00000300
+#define SDR_CTRLGRP_CTRLCFG_MEMBL_LSB 3
+#define SDR_CTRLGRP_CTRLCFG_MEMBL_MASK 0x000000f8
+#define SDR_CTRLGRP_CTRLCFG_MEMTYPE_LSB 0
+#define SDR_CTRLGRP_CTRLCFG_MEMTYPE_MASK 0x00000007
+/* Register template: sdr::ctrlgrp::dramtiming1                            */
+#define SDR_CTRLGRP_DRAMTIMING1_TRFC_LSB 24
+#define SDR_CTRLGRP_DRAMTIMING1_TRFC_MASK 0xff000000
+#define SDR_CTRLGRP_DRAMTIMING1_TFAW_LSB 18
+#define SDR_CTRLGRP_DRAMTIMING1_TFAW_MASK 0x00fc0000
+#define SDR_CTRLGRP_DRAMTIMING1_TRRD_LSB 14
+#define SDR_CTRLGRP_DRAMTIMING1_TRRD_MASK 0x0003c000
+#define SDR_CTRLGRP_DRAMTIMING1_TCL_LSB 9
+#define SDR_CTRLGRP_DRAMTIMING1_TCL_MASK 0x00003e00
+#define SDR_CTRLGRP_DRAMTIMING1_TAL_LSB 4
+#define SDR_CTRLGRP_DRAMTIMING1_TAL_MASK 0x000001f0
+#define SDR_CTRLGRP_DRAMTIMING1_TCWL_LSB 0
+#define SDR_CTRLGRP_DRAMTIMING1_TCWL_MASK 0x0000000f
+/* Register template: sdr::ctrlgrp::dramtiming2                            */
+#define SDR_CTRLGRP_DRAMTIMING2_TWTR_LSB 25
+#define SDR_CTRLGRP_DRAMTIMING2_TWTR_MASK 0x1e000000
+#define SDR_CTRLGRP_DRAMTIMING2_TWR_LSB 21
+#define SDR_CTRLGRP_DRAMTIMING2_TWR_MASK 0x01e00000
+#define SDR_CTRLGRP_DRAMTIMING2_TRP_LSB 17
+#define SDR_CTRLGRP_DRAMTIMING2_TRP_MASK 0x001e0000
+#define SDR_CTRLGRP_DRAMTIMING2_TRCD_LSB 13
+#define SDR_CTRLGRP_DRAMTIMING2_TRCD_MASK 0x0001e000
+#define SDR_CTRLGRP_DRAMTIMING2_TREFI_LSB 0
+#define SDR_CTRLGRP_DRAMTIMING2_TREFI_MASK 0x00001fff
+/* Register template: sdr::ctrlgrp::dramtiming3                            */
+#define SDR_CTRLGRP_DRAMTIMING3_TCCD_LSB 19
+#define SDR_CTRLGRP_DRAMTIMING3_TCCD_MASK 0x00780000
+#define SDR_CTRLGRP_DRAMTIMING3_TMRD_LSB 15
+#define SDR_CTRLGRP_DRAMTIMING3_TMRD_MASK 0x00078000
+#define SDR_CTRLGRP_DRAMTIMING3_TRC_LSB 9
+#define SDR_CTRLGRP_DRAMTIMING3_TRC_MASK 0x00007e00
+#define SDR_CTRLGRP_DRAMTIMING3_TRAS_LSB 4
+#define SDR_CTRLGRP_DRAMTIMING3_TRAS_MASK 0x000001f0
+#define SDR_CTRLGRP_DRAMTIMING3_TRTP_LSB 0
+#define SDR_CTRLGRP_DRAMTIMING3_TRTP_MASK 0x0000000f
+/* Register template: sdr::ctrlgrp::dramtiming4                            */
+#define SDR_CTRLGRP_DRAMTIMING4_MINPWRSAVECYCLES_LSB 20
+#define SDR_CTRLGRP_DRAMTIMING4_MINPWRSAVECYCLES_MASK 0x00f00000
+#define SDR_CTRLGRP_DRAMTIMING4_PWRDOWNEXIT_LSB 10
+#define SDR_CTRLGRP_DRAMTIMING4_PWRDOWNEXIT_MASK 0x000ffc00
+#define SDR_CTRLGRP_DRAMTIMING4_SELFRFSHEXIT_LSB 0
+#define SDR_CTRLGRP_DRAMTIMING4_SELFRFSHEXIT_MASK 0x000003ff
+/* Register template: sdr::ctrlgrp::lowpwrtiming                           */
+#define SDR_CTRLGRP_LOWPWRTIMING_CLKDISABLECYCLES_LSB 16
+#define SDR_CTRLGRP_LOWPWRTIMING_CLKDISABLECYCLES_MASK 0x000f0000
+#define SDR_CTRLGRP_LOWPWRTIMING_AUTOPDCYCLES_LSB 0
+#define SDR_CTRLGRP_LOWPWRTIMING_AUTOPDCYCLES_MASK 0x0000ffff
+/* Register template: sdr::ctrlgrp::dramaddrw                              */
+#define SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB 13
+#define SDR_CTRLGRP_DRAMADDRW_CSBITS_MASK 0x0000e000
+#define SDR_CTRLGRP_DRAMADDRW_BANKBITS_LSB 10
+#define SDR_CTRLGRP_DRAMADDRW_BANKBITS_MASK 0x00001c00
+#define SDR_CTRLGRP_DRAMADDRW_ROWBITS_LSB 5
+#define SDR_CTRLGRP_DRAMADDRW_ROWBITS_MASK 0x000003e0
+#define SDR_CTRLGRP_DRAMADDRW_COLBITS_LSB 0
+#define SDR_CTRLGRP_DRAMADDRW_COLBITS_MASK 0x0000001f
+/* Register template: sdr::ctrlgrp::dramifwidth                            */
+#define SDR_CTRLGRP_DRAMIFWIDTH_IFWIDTH_LSB 0
+#define SDR_CTRLGRP_DRAMIFWIDTH_IFWIDTH_MASK 0x000000ff
+/* Register template: sdr::ctrlgrp::dramdevwidth                           */
+#define SDR_CTRLGRP_DRAMDEVWIDTH_DEVWIDTH_LSB 0
+#define SDR_CTRLGRP_DRAMDEVWIDTH_DEVWIDTH_MASK 0x0000000f
+/* Register template: sdr::ctrlgrp::dramintr                               */
+#define SDR_CTRLGRP_DRAMINTR_INTRCLR_LSB 4
+#define SDR_CTRLGRP_DRAMINTR_INTRCLR_MASK 0x00000010
+#define SDR_CTRLGRP_DRAMINTR_CORRDROPMASK_LSB 3
+#define SDR_CTRLGRP_DRAMINTR_CORRDROPMASK_MASK 0x00000008
+#define SDR_CTRLGRP_DRAMINTR_DBEMASK_LSB 2
+#define SDR_CTRLGRP_DRAMINTR_DBEMASK_MASK 0x00000004
+#define SDR_CTRLGRP_DRAMINTR_SBEMASK_LSB 1
+#define SDR_CTRLGRP_DRAMINTR_SBEMASK_MASK 0x00000002
+#define SDR_CTRLGRP_DRAMINTR_INTREN_LSB 0
+#define SDR_CTRLGRP_DRAMINTR_INTREN_MASK 0x00000001
+/* Register template: sdr::ctrlgrp::sbecount                               */
+#define SDR_CTRLGRP_SBECOUNT_COUNT_LSB 0
+#define SDR_CTRLGRP_SBECOUNT_COUNT_MASK 0x000000ff
+/* Register template: sdr::ctrlgrp::dbecount                               */
+#define SDR_CTRLGRP_DBECOUNT_COUNT_LSB 0
+#define SDR_CTRLGRP_DBECOUNT_COUNT_MASK 0x000000ff
+/* Register template: sdr.ctrlgrp.lowpwreq                                 */
+#define SDR_CTRLGRP_LOWPWREQ_DEEPPWRDNREQ_LSB 0
+#define SDR_CTRLGRP_LOWPWREQ_DEEPPWRDNREQ_MASK 0x00000001
+#define SDR_CTRLGRP_LOWPWREQ_DEEPPWRDNMASK_LSB 1
+#define SDR_CTRLGRP_LOWPWREQ_DEEPPWRDNMASK_MASK 0x00000006
+#define SDR_CTRLGRP_LOWPWREQ_SELFRSHREQ_LSB 3
+#define SDR_CTRLGRP_LOWPWREQ_SELFRSHREQ_MASK 0x00000008
+#define SDR_CTRLGRP_LOWPWREQ_SELFRSHREQ_ENABLED 0x1
+#define SDR_CTRLGRP_LOWPWREQ_SELFRSHREQ_DISABLED 0x0
+#define SDR_CTRLGRP_LOWPWREQ_SELFRFSHMASK_LSB 4
+#define SDR_CTRLGRP_LOWPWREQ_SELFRFSHMASK_MASK 0x00000030
+#define SDR_CTRLGRP_LOWPWREQ_SELFRFSHMASK_BOTH_CHIPS 0x3
+/* Register template: sdr::ctrlgrp::lowpwrack                              */
+#define SDR_CTRLGRP_LOWPWRACK_SELFRFSHACK_LSB 1
+#define SDR_CTRLGRP_LOWPWRACK_SELFRFSHACK_MASK 0x00000002
+/* Register template: sdr::ctrlgrp::staticcfg                              */
+#define SDR_CTRLGRP_STATICCFG_APPLYCFG_LSB 3
+#define SDR_CTRLGRP_STATICCFG_APPLYCFG_MASK 0x00000008
+#define SDR_CTRLGRP_STATICCFG_USEECCASDATA_LSB 2
+#define SDR_CTRLGRP_STATICCFG_USEECCASDATA_MASK 0x00000004
+#define SDR_CTRLGRP_STATICCFG_MEMBL_LSB 0
+#define SDR_CTRLGRP_STATICCFG_MEMBL_MASK 0x00000003
+/* Register template: sdr::ctrlgrp::ctrlwidth                              */
+#define SDR_CTRLGRP_CTRLWIDTH_CTRLWIDTH_LSB 0
+#define SDR_CTRLGRP_CTRLWIDTH_CTRLWIDTH_MASK 0x00000003
+/* Register template: sdr::ctrlgrp::cportwidth                             */
+#define SDR_CTRLGRP_CPORTWIDTH_CMDPORTWIDTH_LSB 0
+#define SDR_CTRLGRP_CPORTWIDTH_CMDPORTWIDTH_MASK 0x000fffff
+/* Register template: sdr::ctrlgrp::cportwmap                              */
+#define SDR_CTRLGRP_CPORTWMAP_CPORTWFIFOMAP_LSB 0
+#define SDR_CTRLGRP_CPORTWMAP_CPORTWFIFOMAP_MASK 0x3fffffff
+/* Register template: sdr::ctrlgrp::cportrmap                              */
+#define SDR_CTRLGRP_CPORTRMAP_CPORTRFIFOMAP_LSB 0
+#define SDR_CTRLGRP_CPORTRMAP_CPORTRFIFOMAP_MASK 0x3fffffff
+/* Register template: sdr::ctrlgrp::rfifocmap                              */
+#define SDR_CTRLGRP_RFIFOCMAP_RFIFOCPORTMAP_LSB 0
+#define SDR_CTRLGRP_RFIFOCMAP_RFIFOCPORTMAP_MASK 0x00ffffff
+/* Register template: sdr::ctrlgrp::wfifocmap                              */
+#define SDR_CTRLGRP_WFIFOCMAP_WFIFOCPORTMAP_LSB 0
+#define SDR_CTRLGRP_WFIFOCMAP_WFIFOCPORTMAP_MASK 0x00ffffff
+/* Register template: sdr::ctrlgrp::cportrdwr                              */
+#define SDR_CTRLGRP_CPORTRDWR_CPORTRDWR_LSB 0
+#define SDR_CTRLGRP_CPORTRDWR_CPORTRDWR_MASK 0x000fffff
+/* Register template: sdr::ctrlgrp::portcfg                                */
+#define SDR_CTRLGRP_PORTCFG_AUTOPCHEN_LSB 10
+#define SDR_CTRLGRP_PORTCFG_AUTOPCHEN_MASK 0x000ffc00
+#define SDR_CTRLGRP_PORTCFG_PORTPROTOCOL_LSB 0
+#define SDR_CTRLGRP_PORTCFG_PORTPROTOCOL_MASK 0x000003ff
+/* Register template: sdr::ctrlgrp::fifocfg                                */
+#define SDR_CTRLGRP_FIFOCFG_INCSYNC_LSB 10
+#define SDR_CTRLGRP_FIFOCFG_INCSYNC_MASK 0x00000400
+#define SDR_CTRLGRP_FIFOCFG_SYNCMODE_LSB 0
+#define SDR_CTRLGRP_FIFOCFG_SYNCMODE_MASK 0x000003ff
+/* Register template: sdr::ctrlgrp::mppriority                             */
+#define SDR_CTRLGRP_MPPRIORITY_USERPRIORITY_LSB 0
+#define SDR_CTRLGRP_MPPRIORITY_USERPRIORITY_MASK 0x3fffffff
+/* Wide Register template: sdr::ctrlgrp::mpweight                          */
+/* Register template: sdr::ctrlgrp::mpweight::mpweight_0                   */
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_STATICWEIGHT_31_0_LSB 0
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_STATICWEIGHT_31_0_MASK 0xffffffff
+/* Register template: sdr::ctrlgrp::mpweight::mpweight_1                   */
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_SUMOFWEIGHTS_13_0_LSB 18
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_SUMOFWEIGHTS_13_0_MASK 0xfffc0000
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_STATICWEIGHT_49_32_LSB 0
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_STATICWEIGHT_49_32_MASK 0x0003ffff
+/* Register template: sdr::ctrlgrp::mpweight::mpweight_2                   */
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_SUMOFWEIGHTS_45_14_LSB 0
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_SUMOFWEIGHTS_45_14_MASK 0xffffffff
+/* Register template: sdr::ctrlgrp::mpweight::mpweight_3                   */
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_SUMOFWEIGHTS_63_46_LSB 0
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_SUMOFWEIGHTS_63_46_MASK 0x0003ffff
+/* Wide Register template: sdr::ctrlgrp::mppacing                          */
+/* Register template: sdr::ctrlgrp::mppacing::mppacing_0                   */
+#define SDR_CTRLGRP_MPPACING_MPPACING_0_THRESHOLD1_31_0_LSB 0
+#define SDR_CTRLGRP_MPPACING_MPPACING_0_THRESHOLD1_31_0_MASK 0xffffffff
+/* Register template: sdr::ctrlgrp::mppacing::mppacing_1                   */
+#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD2_3_0_LSB 28
+#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD2_3_0_MASK 0xf0000000
+#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD1_59_32_LSB 0
+#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD1_59_32_MASK 0x0fffffff
+/* Register template: sdr::ctrlgrp::mppacing::mppacing_2                   */
+#define SDR_CTRLGRP_MPPACING_MPPACING_2_THRESHOLD2_35_4_LSB 0
+#define SDR_CTRLGRP_MPPACING_MPPACING_2_THRESHOLD2_35_4_MASK 0xffffffff
+/* Register template: sdr::ctrlgrp::mppacing::mppacing_3                   */
+#define SDR_CTRLGRP_MPPACING_MPPACING_3_THRESHOLD2_59_36_LSB 0
+#define SDR_CTRLGRP_MPPACING_MPPACING_3_THRESHOLD2_59_36_MASK 0x00ffffff
+/* Wide Register template: sdr::ctrlgrp::mpthresholdrst                    */
+/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_0       */
+#define \
+SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_LSB 0
+#define  \
+SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_MASK \
+0xffffffff
+/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_1       */
+#define \
+SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_LSB 0
+#define \
+SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_MASK \
+0xffffffff
+/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_2       */
+#define \
+SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_LSB 0
+#define \
+SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_MASK \
+0x0000ffff
+/* Register template: sdr::ctrlgrp::remappriority                          */
+#define SDR_CTRLGRP_REMAPPRIORITY_PRIORITYREMAP_LSB 0
+#define SDR_CTRLGRP_REMAPPRIORITY_PRIORITYREMAP_MASK 0x000000ff
+/* Wide Register template: sdr::ctrlgrp::phyctrl                           */
+/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_0                     */
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_LSB 12
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH 20
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_MASK 0xfffff000
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(x) \
+ (((x) << 12) & 0xfffff000)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_LSB 10
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_MASK 0x00000c00
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(x) \
+ (((x) << 10) & 0x00000c00)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_LSB 9
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_MASK 0x00000200
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(x) \
+ (((x) << 9) & 0x00000200)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_LSB 8
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_MASK 0x00000100
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(x) \
+ (((x) << 8) & 0x00000100)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_LSB 6
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_MASK 0x000000c0
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(x) \
+ (((x) << 6) & 0x000000c0)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_LSB 4
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_MASK 0x00000030
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(x) \
+ (((x) << 4) & 0x00000030)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_LSB 2
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_MASK 0x0000000c
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(x) \
+ (((x) << 2) & 0x0000000c)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_LSB 0
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_MASK 0x00000003
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(x) \
+ (((x) << 0) & 0x00000003)
+/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_1                     */
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_LSB 12
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH 20
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_MASK 0xfffff000
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(x) \
+ (((x) << 12) & 0xfffff000)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_LSB 0
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_MASK 0x00000fff
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(x) \
+ (((x) << 0) & 0x00000fff)
+/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_2                     */
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_LSB 0
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_MASK 0x00000fff
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(x) \
+ (((x) << 0) & 0x00000fff)
+/* Register template: sdr::ctrlgrp::dramodt                                */
+#define SDR_CTRLGRP_DRAMODT_READ_LSB 4
+#define SDR_CTRLGRP_DRAMODT_READ_MASK 0x000000f0
+#define SDR_CTRLGRP_DRAMODT_WRITE_LSB 0
+#define SDR_CTRLGRP_DRAMODT_WRITE_MASK 0x0000000f
+/* Field instance: sdr::ctrlgrp::dramsts                                   */
+#define SDR_CTRLGRP_DRAMSTS_DBEERR_MASK 0x00000008
+#define SDR_CTRLGRP_DRAMSTS_SBEERR_MASK 0x00000004
+
+/* To determine the duration of SDRAM test */
+/* quick test which run around 5s */
+#define SDRAM_TEST_FAST		0
+/* normal test which run around 30s */
+#define SDRAM_TEST_NORMAL	1
+/* long test which run in minutes */
+#define SDRAM_TEST_LONG		2
+
+/* SDRAM width macro for configuration with ECC */
+#define SDRAM_WIDTH_32BIT_WITH_ECC	40
+#define SDRAM_WIDTH_16BIT_WITH_ECC	24
+
+#endif
+#endif /* _SDRAM_H_ */
diff --git a/arch/arm/include/asm/arch-socfpga/sdram_config.h b/arch/arm/include/asm/arch-socfpga/sdram_config.h
new file mode 100644
index 0000000..5c3ba86
--- /dev/null
+++ b/arch/arm/include/asm/arch-socfpga/sdram_config.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright Altera Corporation (C) 2012-2014. All rights reserved
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ */
+
+#ifndef __SDRAM_CONFIG_H
+#define __SDRAM_CONFIG_H
+
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMTYPE			(2)
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMBL			(8)
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ADDRORDER		(0)
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCEN			(1)
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCCORREN		(1)
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_REORDEREN		(1)
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_STARVELIMIT		(10)
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_DQSTRKEN			(0)
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_NODMPINS			(0)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCWL			(6)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_AL			(0)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCL			(7)
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRRD			(4)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TFAW			(19)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRFC			(139)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TREFI		(4160)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRCD		(8)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRP		(8)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWR		(8)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWTR		(4)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRTP			(4)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRAS			(19)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRC			(26)
+#else
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRRD			(3)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TFAW			(14)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRFC			(104)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TREFI		(3120)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRCD		(6)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRP		(6)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWR		(6)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWTR		(4)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRTP			(3)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRAS			(14)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRC			(20)
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TMRD			(4)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TCCD			(4)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_SELFRFSHEXIT		(512)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_PWRDOWNEXIT		(3)
+#define CONFIG_HPS_SDR_CTRLCFG_LOWPWRTIMING_AUTOPDCYCLES	(0)
+#define CONFIG_HPS_SDR_CTRLCFG_LOWPWRTIMING_CLKDISABLECYCLES	(8)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS		(10)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS		(15)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_BANKBITS		(3)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS			(1)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMIFWIDTH_IFWIDTH		(40)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMDEVWIDTH_DEVWIDTH		(8)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMINTR_INTREN			(0)
+#define CONFIG_HPS_SDR_CTRLCFG_LOWPWREQ_SELFRFSHMASK		(3)
+#define CONFIG_HPS_SDR_CTRLCFG_STATICCFG_MEMBL			(2)
+#define CONFIG_HPS_SDR_CTRLCFG_STATICCFG_USEECCASDATA		(0)
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLWIDTH_CTRLWIDTH		(2)
+#define CONFIG_HPS_SDR_CTRLCFG_PORTCFG_AUTOPCHEN		(0)
+#define CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_SYNCMODE			(0)
+#define CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_INCSYNC			(0)
+#define CONFIG_HPS_SDR_CTRLCFG_MPPRIORITY_USERPRIORITY		(0x3FFD1088)
+#define CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_0_STATICWEIGHT_31_0	(0x21084210)
+#define CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_STATICWEIGHT_49_32	(0x1EF84)
+#define CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_SUMOFWEIGHT_13_0	(0x2020)
+#define CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_2_SUMOFWEIGHT_45_14	(0x0)
+#define CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_3_SUMOFWEIGHT_63_46	(0xF800)
+#define CONFIG_HPS_SDR_CTRLCFG_PHYCTRL_PHYCTRL_0		(0x200)
+
+#define CONFIG_HPS_SDR_CTRLCFG_CPORTWIDTH_CPORTWIDTH		(0x44555)
+#define CONFIG_HPS_SDR_CTRLCFG_CPORTWMAP_CPORTWMAP		(0x2C011000)
+#define CONFIG_HPS_SDR_CTRLCFG_CPORTRMAP_CPORTRMAP		(0xB00088)
+#define CONFIG_HPS_SDR_CTRLCFG_RFIFOCMAP_RFIFOCMAP		(0x760210)
+#define CONFIG_HPS_SDR_CTRLCFG_WFIFOCMAP_WFIFOCMAP		(0x980543)
+#define CONFIG_HPS_SDR_CTRLCFG_CPORTRDWR_CPORTRDWR		(0x5A56A)
+#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_0_THRESHOLD1_31_0	(0x20820820)
+#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD1_59_32	(0x8208208)
+#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD2_3_0	(0)
+#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_2_THRESHOLD2_35_4	(0x41041041)
+#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_3_THRESHOLD2_59_36	(0x410410)
+#define CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0 \
+(0x01010101)
+#define CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32 \
+(0x01010101)
+#define CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64 \
+(0x0101)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMODT_READ			(0)
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMODT_WRITE			(1)
+#define CONFIG_HPS_SDR_CTRLCFG_FPGAPORTRST_READ_PORT_USED	(0)
+#define CONFIG_HPS_SDR_CTRLCFG_FPGAPORTRST_WRITE_PORT_USED	(0)
+#define CONFIG_HPS_SDR_CTRLCFG_FPGAPORTRST_COMMAND_PORT_USED	(0)
+#define CONFIG_HPS_SDR_CTRLCFG_FPGAPORTRST			(0)
+
+#endif	/*#ifndef__SDRAM_CONFIG_H*/
diff --git a/drivers/ddr/altera/Makefile b/drivers/ddr/altera/Makefile
new file mode 100644
index 0000000..38fba57
--- /dev/null
+++ b/drivers/ddr/altera/Makefile
@@ -0,0 +1,12 @@
+#
+# (C) Copyright 2000-2003
+# Wolfgang Denk, DENX Software Engineering, wd at denx.de.
+#
+# (C) Copyright 2010, Thomas Chou <thomas@wytron.com.tw>
+# Copyright (C) 2014 Altera Corporation <www.altera.com>
+#
+# SPDX-License-Identifier:	GPL-2.0+
+#
+
+obj-$(CONFIG_ALTERA_SDRAM) += sdram.o sequencer.o sequencer_auto_ac_init.o \
+			  sequencer_auto_inst_init.o
diff --git a/drivers/ddr/altera/sdram.c b/drivers/ddr/altera/sdram.c
new file mode 100644
index 0000000..546daff
--- /dev/null
+++ b/drivers/ddr/altera/sdram.c
@@ -0,0 +1,1307 @@
+/*
+ * Copyright (C) 2014 Altera Corporation <www.altera.com>
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+#include <common.h>
+#include <div64.h>
+#include <watchdog.h>
+#include <asm/arch/fpga_manager.h>
+#include <asm/arch/sdram.h>
+#include <asm/arch/sdram_config.h>
+#include <asm/arch/system_manager.h>
+#include <asm/io.h>
+
+#define COMPARE_FAIL_ACTION	return 1;
+
+#define ADDRORDER2_INFO \
+	"INFO: Changing address order to 2 (row, chip, bank, column)\n"
+#define ADDRORDER0_INFO \
+	"INFO: Changing address order to 0 (chip, row, bank, column)\n"
+
+/* define constant for 4G memory - used for SDRAM errata workaround */
+#define MEMSIZE_4G (4ULL * 1024ULL * 1024ULL * 1024ULL)
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static struct socfpga_system_manager *sysmgr_regs =
+	(struct socfpga_system_manager *)SOCFPGA_SYSMGR_ADDRESS;
+
+/* Below function only applicable for SPL */
+static int compute_errata_rows(unsigned long long memsize, int cs, int width,
+			       int rows, int banks, int cols)
+{
+	unsigned long long newrows;
+	int inewrowslog2;
+	int bits;
+
+	debug("workaround rows - memsize %lld\n", memsize);
+	debug("workaround rows - cs        %d\n", cs);
+	debug("workaround rows - width     %d\n", width);
+	debug("workaround rows - rows      %d\n", rows);
+	debug("workaround rows - banks     %d\n", banks);
+	debug("workaround rows - cols      %d\n", cols);
+
+	newrows = lldiv(memsize, (cs * (width / 8)));
+	debug("rows workaround - term1 %lld\n", newrows);
+
+	newrows = lldiv(newrows, ((1 << banks) * (1 << cols)));
+	debug("rows workaround - term2 %lld\n", newrows);
+
+	/* Compute the hamming weight - same as number of bits set.
+	 * Need to see if result is ordinal power of 2 before
+	 * attempting log2 of result.
+	 */
+	bits = hweight32(newrows);
+
+	debug("rows workaround - bits %d\n", bits);
+
+	if (bits != 1) {
+		printf("SDRAM workaround failed, bits set %d\n", bits);
+		return rows;
+	}
+
+	if (newrows > UINT_MAX) {
+		printf("SDRAM workaround rangecheck failed, %lld\n", newrows);
+		return rows;
+	}
+
+	inewrowslog2 = __ilog2((unsigned int)newrows);
+
+	debug("rows workaround - ilog2 %d, %d\n", inewrowslog2,
+	       (int)newrows);
+
+	if (inewrowslog2 == -1) {
+		printf("SDRAM workaround failed, newrows %d\n", (int)newrows);
+		return rows;
+	}
+
+	return inewrowslog2;
+}
+
+typedef struct _sdram_prot_rule {
+	uint32_t	rule;		/* SDRAM protection rule number: 0-19 */
+	uint64_t	sdram_start;	/* SDRAM start address */
+	uint64_t	sdram_end;	/* SDRAM end address */
+	int		valid;		/* Rule valid or not? 1 - valid, 0 not*/
+
+	uint32_t	security;
+	uint32_t	portmask;
+	uint32_t	result;
+	uint32_t	lo_prot_id;
+	uint32_t	hi_prot_id;
+} sdram_prot_rule, *psdram_prot_rule;
+
+/* SDRAM protection rules vary from 0-19, a total of 20 rules. */
+
+static void sdram_set_rule(psdram_prot_rule prule)
+{
+	int regoffs;
+	uint32_t lo_addr_bits;
+	uint32_t hi_addr_bits;
+	int ruleno = prule->rule;
+
+	/* Select the rule */
+	regoffs = SDR_CTRLGRP_PROTRULERDWR_ADDRESS;
+	writel(ruleno, (SOCFPGA_SDR_ADDRESS + regoffs));
+
+	/* Obtain the address bits */
+	lo_addr_bits = (uint32_t)(((prule->sdram_start) >> 20ULL) & 0xFFF);
+	hi_addr_bits = (uint32_t)((((prule->sdram_end-1) >> 20ULL)) & 0xFFF);
+
+	debug("sdram set rule start %x, %lld\n", lo_addr_bits,
+	      prule->sdram_start);
+	debug("sdram set rule end   %x, %lld\n", hi_addr_bits,
+	      prule->sdram_end);
+
+	/* Set rule addresses */
+	regoffs = SDR_CTRLGRP_PROTRULEADDR_ADDRESS;
+	writel(lo_addr_bits | (hi_addr_bits << 12),
+	       (SOCFPGA_SDR_ADDRESS + regoffs));
+
+	/* Set rule protection ids */
+	regoffs = SDR_CTRLGRP_PROTRULEID_ADDRESS;
+	writel(prule->lo_prot_id | (prule->hi_prot_id << 12),
+	       (SOCFPGA_SDR_ADDRESS + regoffs));
+
+	/* Set the rule data */
+	regoffs = SDR_CTRLGRP_PROTRULEDATA_ADDRESS;
+	writel(prule->security | (prule->valid << 2) |
+	       (prule->portmask << 3) | (prule->result << 13),
+	       (SOCFPGA_SDR_ADDRESS + regoffs));
+
+	/* write the rule */
+	regoffs = SDR_CTRLGRP_PROTRULERDWR_ADDRESS;
+	writel(ruleno | (1L << 5),
+	       (SOCFPGA_SDR_ADDRESS + regoffs));
+
+	/* Set rule number to 0 by default */
+	writel(0, (SOCFPGA_SDR_ADDRESS + regoffs));
+}
+
+static void sdram_get_rule(psdram_prot_rule prule)
+{
+	int regoffs;
+	uint32_t protruleaddr;
+	uint32_t protruleid;
+	uint32_t protruledata;
+	int ruleno = prule->rule;
+
+	/* Read the rule */
+	regoffs = SDR_CTRLGRP_PROTRULERDWR_ADDRESS;
+	writel(ruleno, (SOCFPGA_SDR_ADDRESS + regoffs));
+	writel(ruleno | (1L << 6),
+	       (SOCFPGA_SDR_ADDRESS + regoffs));
+
+	/* Get the addresses */
+	regoffs = SDR_CTRLGRP_PROTRULEADDR_ADDRESS;
+	protruleaddr = readl(SOCFPGA_SDR_ADDRESS + regoffs);
+	prule->sdram_start = (protruleaddr & 0xFFF) << 20;
+	prule->sdram_end = ((protruleaddr >> 12) & 0xFFF) << 20;
+
+	/* Get the configured protection IDs */
+	regoffs = SDR_CTRLGRP_PROTRULEID_ADDRESS;
+	protruleid = readl(SOCFPGA_SDR_ADDRESS + regoffs);
+	prule->lo_prot_id = protruleid & 0xFFF;
+	prule->hi_prot_id = (protruleid >> 12) & 0xFFF;
+
+	/* Get protection data */
+	regoffs = SDR_CTRLGRP_PROTRULEDATA_ADDRESS;
+	protruledata = readl(SOCFPGA_SDR_ADDRESS + regoffs);
+
+	prule->security = protruledata & 0x3;
+	prule->valid = (protruledata >> 2) & 0x1;
+	prule->portmask = (protruledata >> 3) & 0x3FF;
+	prule->result = (protruledata >> 13) & 0x1;
+}
+
+static void sdram_set_protection_config(uint64_t sdram_start, uint64_t sdram_end)
+{
+	sdram_prot_rule rule;
+	int rules;
+	int regoffs = SDR_CTRLGRP_PROTPORTDEFAULT_ADDRESS;
+
+	/* Start with accepting all SDRAM transaction */
+	writel(0x0, (SOCFPGA_SDR_ADDRESS + regoffs));
+
+	/* Clear all protection rules for warm boot case */
+
+	rule.sdram_start = 0;
+	rule.sdram_end = 0;
+	rule.lo_prot_id = 0;
+	rule.hi_prot_id = 0;
+	rule.portmask = 0;
+	rule.security = 0;
+	rule.result = 0;
+	rule.valid = 0;
+	rule.rule = 0;
+
+	for (rules = 0; rules < 20; rules++) {
+		rule.rule = rules;
+		sdram_set_rule(&rule);
+	}
+
+	/* new rule: accept SDRAM */
+	rule.sdram_start = sdram_start;
+	rule.sdram_end = sdram_end;
+	rule.lo_prot_id = 0x0;
+	rule.hi_prot_id = 0xFFF;
+	rule.portmask = 0x3FF;
+	rule.security = 0x3;
+	rule.result = 0;
+	rule.valid = 1;
+	rule.rule = 0;
+
+	/* set new rule */
+	sdram_set_rule(&rule);
+
+	/* default rule: reject everything */
+	writel(0x3ff, (SOCFPGA_SDR_ADDRESS + regoffs));
+}
+
+static void sdram_dump_protection_config(void)
+{
+	sdram_prot_rule rule;
+	int rules;
+	int regoffs = SDR_CTRLGRP_PROTPORTDEFAULT_ADDRESS;
+
+	debug("SDRAM Prot rule, default %x\n",
+	      readl(SOCFPGA_SDR_ADDRESS + regoffs));
+
+	for (rules = 0; rules < 20; rules++) {
+		sdram_get_rule(&rule);
+		debug("Rule %d, rules ...\n", rules);
+		debug("    sdram start %llx\n", rule.sdram_start);
+		debug("    sdram end   %llx\n", rule.sdram_end);
+		debug("    low prot id %d, hi prot id %d\n",
+		      rule.lo_prot_id,
+		      rule.hi_prot_id);
+		debug("    portmask %x\n", rule.portmask);
+		debug("    security %d\n", rule.security);
+		debug("    result %d\n", rule.result);
+		debug("    valid %d\n", rule.valid);
+	}
+}
+
+/* Function to update the field within variable */
+static unsigned sdram_write_register_field(unsigned masked_value,
+	unsigned data, unsigned shift, unsigned mask)
+{
+	masked_value &= ~(mask);
+	masked_value |= (data << shift) & mask;
+	return masked_value;
+}
+
+/* Function to write to register and verify the write */
+static unsigned sdram_write_verify(unsigned register_offset, unsigned reg_value)
+{
+#ifndef SDRAM_MMR_SKIP_VERIFY
+	unsigned reg_value1;
+#endif
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(SOCFPGA_SDR_ADDRESS+register_offset), reg_value);
+	/* Write to register */
+	writel(reg_value, (SOCFPGA_SDR_ADDRESS + register_offset));
+#ifndef SDRAM_MMR_SKIP_VERIFY
+	debug("   Read and verify...");
+	/* Read back the wrote value */
+	reg_value1 = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	/* Indicate failure if value not matched */
+	if (reg_value1 != reg_value) {
+		debug("FAIL - Address 0x%08x Expected 0x%08x Data 0x%08x\n",
+			(SOCFPGA_SDR_ADDRESS+register_offset),
+			reg_value, reg_value1);
+		return 1;
+	}
+	debug("correct!\n");
+#endif	/* SDRAM_MMR_SKIP_VERIFY */
+	return 0;
+}
+
+/* Function to initialize SDRAM MMR */
+unsigned sdram_mmr_init_full(unsigned int sdr_phy_reg)
+{
+	unsigned long register_offset, reg_value;
+	unsigned long status = 0;
+	int addrorder;
+	char *paddrorderinfo = NULL;
+
+#if defined(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS) && \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS) && \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_BANKBITS) && \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS) && \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS)
+
+	int cs = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS;
+	int width = 8;
+	int rows = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS;
+	int banks = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_BANKBITS;
+	int cols = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS;
+	unsigned long long workaround_memsize = MEMSIZE_4G;
+
+	writel(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS,
+	       &sysmgr_regs->iswgrp_handoff[4]);
+#endif
+
+	/***** CTRLCFG *****/
+#if defined(CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMTYPE) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMBL) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ADDRORDER) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCEN) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCCORREN) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_REORDEREN) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_STARVELIMIT) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_DQSTRKEN) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_NODMPINS)
+	debug("\nConfiguring CTRLCFG\n");
+	register_offset = SDR_CTRLGRP_CTRLCFG_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+#ifdef CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMTYPE
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMTYPE,
+			SDR_CTRLGRP_CTRLCFG_MEMTYPE_LSB,
+			SDR_CTRLGRP_CTRLCFG_MEMTYPE_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMBL
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMBL,
+			SDR_CTRLGRP_CTRLCFG_MEMBL_LSB,
+			SDR_CTRLGRP_CTRLCFG_MEMBL_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ADDRORDER
+
+	/* SDRAM Failure When Accessing Non-Existent Memory
+	 * Set the addrorder field of the SDRAM control register
+	 * based on the CSBITs setting.
+	 */
+
+	switch (CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS) {
+	case 1:
+		addrorder = 0; /* chip, row, bank, column */
+		if (CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ADDRORDER != 0)
+			paddrorderinfo = ADDRORDER0_INFO;
+		break;
+	case 2:
+		addrorder = 2; /* row, chip, bank, column */
+		if (CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ADDRORDER != 2)
+			paddrorderinfo = ADDRORDER2_INFO;
+		break;
+	default:
+		addrorder = CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ADDRORDER;
+		break;
+	}
+	if (paddrorderinfo)
+		printf(paddrorderinfo);
+
+	reg_value = sdram_write_register_field(reg_value,
+			addrorder,
+			SDR_CTRLGRP_CTRLCFG_ADDRORDER_LSB,
+			SDR_CTRLGRP_CTRLCFG_ADDRORDER_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCEN
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCEN,
+			SDR_CTRLGRP_CTRLCFG_ECCEN_LSB,
+			SDR_CTRLGRP_CTRLCFG_ECCEN_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCCORREN
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCCORREN,
+			SDR_CTRLGRP_CTRLCFG_ECCCORREN_LSB,
+			SDR_CTRLGRP_CTRLCFG_ECCCORREN_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_REORDEREN
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_REORDEREN,
+			SDR_CTRLGRP_CTRLCFG_REORDEREN_LSB,
+			SDR_CTRLGRP_CTRLCFG_REORDEREN_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_STARVELIMIT
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_STARVELIMIT,
+			SDR_CTRLGRP_CTRLCFG_STARVELIMIT_LSB,
+			SDR_CTRLGRP_CTRLCFG_STARVELIMIT_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_DQSTRKEN
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_DQSTRKEN,
+			SDR_CTRLGRP_CTRLCFG_DQSTRKEN_LSB,
+			SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_NODMPINS
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_NODMPINS,
+			SDR_CTRLGRP_CTRLCFG_NODMPINS_LSB,
+			SDR_CTRLGRP_CTRLCFG_NODMPINS_MASK);
+#endif
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** DRAMTIMING1 *****/
+#if defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCWL) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_AL) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCL) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRRD) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TFAW) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRFC)
+	debug("Configuring DRAMTIMING1\n");
+	register_offset = SDR_CTRLGRP_DRAMTIMING1_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCWL
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCWL,
+			SDR_CTRLGRP_DRAMTIMING1_TCWL_LSB,
+			SDR_CTRLGRP_DRAMTIMING1_TCWL_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_AL
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_AL,
+			SDR_CTRLGRP_DRAMTIMING1_TAL_LSB,
+			SDR_CTRLGRP_DRAMTIMING1_TAL_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCL
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCL,
+			SDR_CTRLGRP_DRAMTIMING1_TCL_LSB,
+			SDR_CTRLGRP_DRAMTIMING1_TCL_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRRD
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRRD,
+			SDR_CTRLGRP_DRAMTIMING1_TRRD_LSB,
+			SDR_CTRLGRP_DRAMTIMING1_TRRD_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TFAW
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TFAW,
+			SDR_CTRLGRP_DRAMTIMING1_TFAW_LSB,
+			SDR_CTRLGRP_DRAMTIMING1_TFAW_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRFC
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRFC,
+			SDR_CTRLGRP_DRAMTIMING1_TRFC_LSB,
+			SDR_CTRLGRP_DRAMTIMING1_TRFC_MASK);
+#endif
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** DRAMTIMING2 *****/
+#if defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TREFI) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRCD) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRP) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWR) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWTR)
+	debug("Configuring DRAMTIMING2\n");
+	register_offset = SDR_CTRLGRP_DRAMTIMING2_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TREFI
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TREFI,
+			SDR_CTRLGRP_DRAMTIMING2_TREFI_LSB,
+			SDR_CTRLGRP_DRAMTIMING2_TREFI_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRCD
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRCD,
+			SDR_CTRLGRP_DRAMTIMING2_TRCD_LSB,
+			SDR_CTRLGRP_DRAMTIMING2_TRCD_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRP
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRP,
+			SDR_CTRLGRP_DRAMTIMING2_TRP_LSB,
+			SDR_CTRLGRP_DRAMTIMING2_TRP_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWR
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWR,
+			SDR_CTRLGRP_DRAMTIMING2_TWR_LSB,
+			SDR_CTRLGRP_DRAMTIMING2_TWR_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWTR
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWTR,
+			SDR_CTRLGRP_DRAMTIMING2_TWTR_LSB,
+			SDR_CTRLGRP_DRAMTIMING2_TWTR_MASK);
+#endif
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** DRAMTIMING3 *****/
+#if defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRTP) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRAS) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRC) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TMRD) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TCCD)
+	debug("Configuring DRAMTIMING3\n");
+	register_offset = SDR_CTRLGRP_DRAMTIMING3_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRTP
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRTP,
+			SDR_CTRLGRP_DRAMTIMING3_TRTP_LSB,
+			SDR_CTRLGRP_DRAMTIMING3_TRTP_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRAS
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRAS,
+			SDR_CTRLGRP_DRAMTIMING3_TRAS_LSB,
+			SDR_CTRLGRP_DRAMTIMING3_TRAS_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRC
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRC,
+			SDR_CTRLGRP_DRAMTIMING3_TRC_LSB,
+			SDR_CTRLGRP_DRAMTIMING3_TRC_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TMRD
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TMRD,
+			SDR_CTRLGRP_DRAMTIMING3_TMRD_LSB,
+			SDR_CTRLGRP_DRAMTIMING3_TMRD_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TCCD
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TCCD,
+			SDR_CTRLGRP_DRAMTIMING3_TCCD_LSB,
+			SDR_CTRLGRP_DRAMTIMING3_TCCD_MASK);
+#endif
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** DRAMTIMING4 *****/
+#if defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_SELFRFSHEXIT) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_PWRDOWNEXIT)
+	debug("Configuring DRAMTIMING4\n");
+	register_offset = SDR_CTRLGRP_DRAMTIMING4_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_SELFRFSHEXIT
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_SELFRFSHEXIT,
+			SDR_CTRLGRP_DRAMTIMING4_SELFRFSHEXIT_LSB,
+			SDR_CTRLGRP_DRAMTIMING4_SELFRFSHEXIT_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_PWRDOWNEXIT
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_PWRDOWNEXIT,
+			SDR_CTRLGRP_DRAMTIMING4_PWRDOWNEXIT_LSB,
+			SDR_CTRLGRP_DRAMTIMING4_PWRDOWNEXIT_MASK);
+#endif
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** LOWPWRTIMING *****/
+#if defined(CONFIG_HPS_SDR_CTRLCFG_LOWPWRTIMING_AUTOPDCYCLES) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_LOWPWRTIMING_CLKDISABLECYCLES)
+	debug("Configuring LOWPWRTIMING\n");
+	register_offset = SDR_CTRLGRP_LOWPWRTIMING_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+#ifdef CONFIG_HPS_SDR_CTRLCFG_LOWPWRTIMING_AUTOPDCYCLES
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_LOWPWRTIMING_AUTOPDCYCLES,
+			SDR_CTRLGRP_LOWPWRTIMING_AUTOPDCYCLES_LSB,
+			SDR_CTRLGRP_LOWPWRTIMING_AUTOPDCYCLES_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_LOWPWRTIMING_CLKDISABLECYCLES
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_LOWPWRTIMING_CLKDISABLECYCLES,
+			SDR_CTRLGRP_LOWPWRTIMING_CLKDISABLECYCLES_LSB,
+			SDR_CTRLGRP_LOWPWRTIMING_CLKDISABLECYCLES_MASK);
+#endif
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+	/***** DRAMADDRW *****/
+#if defined(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_BANKBITS) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS)
+	debug("Configuring DRAMADDRW\n");
+	register_offset = SDR_CTRLGRP_DRAMADDRW_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS,
+			SDR_CTRLGRP_DRAMADDRW_COLBITS_LSB,
+			SDR_CTRLGRP_DRAMADDRW_COLBITS_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS
+	/* SDRAM Failure When Accessing Non-Existent Memory
+	 * Update Preloader to artificially increase the number of rows so
+	 * that the memory thinks it has 4GB of RAM.
+	 */
+	rows = compute_errata_rows(workaround_memsize, cs, width, rows, banks,
+				   cols);
+
+	reg_value = sdram_write_register_field(reg_value,
+			rows,
+			SDR_CTRLGRP_DRAMADDRW_ROWBITS_LSB,
+			SDR_CTRLGRP_DRAMADDRW_ROWBITS_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_BANKBITS
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_BANKBITS,
+			SDR_CTRLGRP_DRAMADDRW_BANKBITS_LSB,
+			SDR_CTRLGRP_DRAMADDRW_BANKBITS_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS
+	/* SDRAM Failure When Accessing Non-Existent Memory
+	 * Set SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB to
+	 * log2(number of chip select bits). Since there's only
+	 * 1 or 2 chip selects, log2(1) => 0, and log2(2) => 1,
+	 * which is the same as "chip selects" - 1.
+	 */
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS - 1,
+			SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB,
+			SDR_CTRLGRP_DRAMADDRW_CSBITS_MASK);
+#endif
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+	/***** DRAMIFWIDTH *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMIFWIDTH_IFWIDTH
+	debug("Configuring DRAMIFWIDTH\n");
+	register_offset = SDR_CTRLGRP_DRAMIFWIDTH_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMIFWIDTH_IFWIDTH,
+			SDR_CTRLGRP_DRAMIFWIDTH_IFWIDTH_LSB,
+			SDR_CTRLGRP_DRAMIFWIDTH_IFWIDTH_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** DRAMDEVWIDTH *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMDEVWIDTH_DEVWIDTH
+	debug("Configuring DRAMDEVWIDTH\n");
+	register_offset = SDR_CTRLGRP_DRAMDEVWIDTH_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMDEVWIDTH_DEVWIDTH,
+			SDR_CTRLGRP_DRAMDEVWIDTH_DEVWIDTH_LSB,
+			SDR_CTRLGRP_DRAMDEVWIDTH_DEVWIDTH_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** LOWPWREQ *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_LOWPWREQ_SELFRFSHMASK
+	debug("Configuring LOWPWREQ\n");
+	register_offset = SDR_CTRLGRP_LOWPWREQ_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_LOWPWREQ_SELFRFSHMASK,
+			SDR_CTRLGRP_LOWPWREQ_SELFRFSHMASK_LSB,
+			SDR_CTRLGRP_LOWPWREQ_SELFRFSHMASK_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** DRAMINTR *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMINTR_INTREN
+	debug("Configuring DRAMINTR\n");
+	register_offset = SDR_CTRLGRP_DRAMINTR_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMINTR_INTREN,
+			SDR_CTRLGRP_DRAMINTR_INTREN_LSB,
+			SDR_CTRLGRP_DRAMINTR_INTREN_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** STATICCFG *****/
+#if defined(CONFIG_HPS_SDR_CTRLCFG_STATICCFG_MEMBL) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_STATICCFG_USEECCASDATA)
+	debug("Configuring STATICCFG\n");
+	register_offset = SDR_CTRLGRP_STATICCFG_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+#ifdef CONFIG_HPS_SDR_CTRLCFG_STATICCFG_MEMBL
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_STATICCFG_MEMBL,
+			SDR_CTRLGRP_STATICCFG_MEMBL_LSB,
+			SDR_CTRLGRP_STATICCFG_MEMBL_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_STATICCFG_USEECCASDATA
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_STATICCFG_USEECCASDATA,
+			SDR_CTRLGRP_STATICCFG_USEECCASDATA_LSB,
+			SDR_CTRLGRP_STATICCFG_USEECCASDATA_MASK);
+#endif
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** CTRLWIDTH *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_CTRLWIDTH_CTRLWIDTH
+	debug("Configuring CTRLWIDTH\n");
+	register_offset = SDR_CTRLGRP_CTRLWIDTH_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLWIDTH_CTRLWIDTH,
+			SDR_CTRLGRP_CTRLWIDTH_CTRLWIDTH_LSB,
+			SDR_CTRLGRP_CTRLWIDTH_CTRLWIDTH_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** PORTCFG *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_PORTCFG_AUTOPCHEN
+	debug("Configuring PORTCFG\n");
+	register_offset = SDR_CTRLGRP_PORTCFG_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_PORTCFG_AUTOPCHEN,
+			SDR_CTRLGRP_PORTCFG_AUTOPCHEN_LSB,
+			SDR_CTRLGRP_PORTCFG_AUTOPCHEN_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** FIFOCFG *****/
+#if defined(CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_SYNCMODE) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_INCSYNC)
+	debug("Configuring FIFOCFG\n");
+	register_offset = SDR_CTRLGRP_FIFOCFG_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+#ifdef CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_SYNCMODE
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_SYNCMODE,
+			SDR_CTRLGRP_FIFOCFG_SYNCMODE_LSB,
+			SDR_CTRLGRP_FIFOCFG_SYNCMODE_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_INCSYNC
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_INCSYNC,
+			SDR_CTRLGRP_FIFOCFG_INCSYNC_LSB,
+			SDR_CTRLGRP_FIFOCFG_INCSYNC_MASK);
+#endif
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** MPPRIORITY *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_MPPRIORITY_USERPRIORITY
+	debug("Configuring MPPRIORITY\n");
+	register_offset = SDR_CTRLGRP_MPPRIORITY_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_MPPRIORITY_USERPRIORITY,
+			SDR_CTRLGRP_MPPRIORITY_USERPRIORITY_LSB,
+			SDR_CTRLGRP_MPPRIORITY_USERPRIORITY_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** MPWEIGHT_MPWEIGHT_0 *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_0_STATICWEIGHT_31_0
+	debug("Configuring MPWEIGHT_MPWEIGHT_0\n");
+	register_offset = SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+		CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_0_STATICWEIGHT_31_0,
+		SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_STATICWEIGHT_31_0_LSB,
+		SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_STATICWEIGHT_31_0_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** MPWEIGHT_MPWEIGHT_1 *****/
+#if defined(CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_STATICWEIGHT_49_32) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_SUMOFWEIGHT_13_0)
+	debug("Configuring MPWEIGHT_MPWEIGHT_1\n");
+	register_offset = SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+#ifdef CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_STATICWEIGHT_49_32
+	reg_value = sdram_write_register_field(reg_value,
+		CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_STATICWEIGHT_49_32,
+		SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_STATICWEIGHT_49_32_LSB,
+		SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_STATICWEIGHT_49_32_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_SUMOFWEIGHT_13_0
+	reg_value = sdram_write_register_field(reg_value,
+		CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_SUMOFWEIGHT_13_0,
+		SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_SUMOFWEIGHTS_13_0_LSB,
+		SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_SUMOFWEIGHTS_13_0_MASK);
+#endif
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** MPWEIGHT_MPWEIGHT_2 *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_2_SUMOFWEIGHT_45_14
+	debug("Configuring MPWEIGHT_MPWEIGHT_2\n");
+	register_offset = SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+		CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_2_SUMOFWEIGHT_45_14,
+		SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_SUMOFWEIGHTS_45_14_LSB,
+		SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_SUMOFWEIGHTS_45_14_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** MPWEIGHT_MPWEIGHT_3 *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_3_SUMOFWEIGHT_63_46
+	debug("Configuring MPWEIGHT_MPWEIGHT_3\n");
+	register_offset = SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+		CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_3_SUMOFWEIGHT_63_46,
+		SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_SUMOFWEIGHTS_63_46_LSB,
+		SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_SUMOFWEIGHTS_63_46_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** MPPACING_MPPACING_0 *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_MPPACING_0_THRESHOLD1_31_0
+	debug("Configuring MPPACING_MPPACING_0\n");
+	register_offset = SDR_CTRLGRP_MPPACING_MPPACING_0_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_MPPACING_0_THRESHOLD1_31_0,
+			SDR_CTRLGRP_MPPACING_MPPACING_0_THRESHOLD1_31_0_LSB,
+			SDR_CTRLGRP_MPPACING_MPPACING_0_THRESHOLD1_31_0_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+	/***** MPPACING_MPPACING_1 *****/
+#if defined(CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD1_59_32) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD2_3_0)
+	debug("Configuring MPPACING_MPPACING_1\n");
+	register_offset = SDR_CTRLGRP_MPPACING_MPPACING_1_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+#ifdef CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD1_59_32
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD1_59_32,
+			SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD1_59_32_LSB,
+			SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD1_59_32_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD2_3_0
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD2_3_0,
+			SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD2_3_0_LSB,
+			SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD2_3_0_MASK);
+#endif
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+	/***** MPPACING_MPPACING_2 *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_MPPACING_2_THRESHOLD2_35_4
+	debug("Configuring MPPACING_MPPACING_2\n");
+	register_offset = SDR_CTRLGRP_MPPACING_MPPACING_2_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_MPPACING_2_THRESHOLD2_35_4,
+			SDR_CTRLGRP_MPPACING_MPPACING_2_THRESHOLD2_35_4_LSB,
+			SDR_CTRLGRP_MPPACING_MPPACING_2_THRESHOLD2_35_4_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** MPPACING_MPPACING_3 *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_MPPACING_3_THRESHOLD2_59_36
+	debug("Configuring MPPACING_MPPACING_3\n");
+	register_offset = SDR_CTRLGRP_MPPACING_MPPACING_3_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_MPPACING_3_THRESHOLD2_59_36,
+			SDR_CTRLGRP_MPPACING_MPPACING_3_THRESHOLD2_59_36_LSB,
+			SDR_CTRLGRP_MPPACING_MPPACING_3_THRESHOLD2_59_36_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** MPTHRESHOLDRST_MPTHRESHOLDRST_0 *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0
+	debug("Configuring MPTHRESHOLDRST_MPTHRESHOLDRST_0\n");
+	register_offset = SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+	CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0,
+	SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_LSB,
+SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** MPTHRESHOLDRST_MPTHRESHOLDRST_1 *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32
+	debug("Configuring MPTHRESHOLDRST_MPTHRESHOLDRST_1\n");
+	register_offset = SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+	CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32,
+SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_LSB,
+SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+
+	/***** MPTHRESHOLDRST_MPTHRESHOLDRST_2 *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64
+	debug("Configuring MPTHRESHOLDRST_MPTHRESHOLDRST_2\n");
+	register_offset = SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+	CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64,
+SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_LSB,
+SDR_CTRLGRP_MPTHRESHOLDRST_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_MASK);
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+	/***** PHYCTRL_PHYCTRL_0 *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_PHYCTRL_PHYCTRL_0
+	debug("Configuring PHYCTRL_PHYCTRL_0\n");
+	register_offset = SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDRESS;
+	/* Read original register value */
+	reg_value = CONFIG_HPS_SDR_CTRLCFG_PHYCTRL_PHYCTRL_0;
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+/* newly added registers */
+	/***** CPORTWIDTH_CPORTWIDTH *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_CPORTWIDTH_CPORTWIDTH
+	debug("Configuring CPORTWIDTH\n");
+	register_offset = SDR_CTRLGRP_CPORTWIDTH_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_CPORTWIDTH_CPORTWIDTH,
+			SDR_CTRLGRP_CPORTWIDTH_CMDPORTWIDTH_LSB,
+			SDR_CTRLGRP_CPORTWIDTH_CMDPORTWIDTH_MASK);
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(unsigned)(SOCFPGA_SDR_ADDRESS+register_offset),
+		(unsigned)reg_value);
+	writel(reg_value, (SOCFPGA_SDR_ADDRESS + register_offset));
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	debug("   Read value without verify 0x%08x\n", (unsigned)reg_value);
+#endif
+
+	/***** CPORTWMAP_CPORTWMAP *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_CPORTWMAP_CPORTWMAP
+	debug("Configuring CPORTWMAP\n");
+	register_offset = SDR_CTRLGRP_CPORTWMAP_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_CPORTWMAP_CPORTWMAP,
+			SDR_CTRLGRP_CPORTWMAP_CPORTWFIFOMAP_LSB,
+			SDR_CTRLGRP_CPORTWMAP_CPORTWFIFOMAP_MASK);
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(unsigned)(SOCFPGA_SDR_ADDRESS+register_offset),
+		(unsigned)reg_value);
+	writel(reg_value, (SOCFPGA_SDR_ADDRESS + register_offset));
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	debug("   Read value without verify 0x%08x\n", (unsigned)reg_value);
+#endif
+
+	/***** CPORTRMAP_CPORTRMAP *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_CPORTRMAP_CPORTRMAP
+	debug("Configuring CPORTRMAP\n");
+	register_offset = SDR_CTRLGRP_CPORTRMAP_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_CPORTRMAP_CPORTRMAP,
+			SDR_CTRLGRP_CPORTRMAP_CPORTRFIFOMAP_LSB,
+			SDR_CTRLGRP_CPORTRMAP_CPORTRFIFOMAP_MASK);
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(unsigned)(SOCFPGA_SDR_ADDRESS+register_offset),
+		(unsigned)reg_value);
+	writel(reg_value, (SOCFPGA_SDR_ADDRESS + register_offset));
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	debug("   Read value without verify 0x%08x\n", (unsigned)reg_value);
+#endif
+
+	/***** RFIFOCMAP_RFIFOCMAP *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_RFIFOCMAP_RFIFOCMAP
+	debug("Configuring RFIFOCMAP\n");
+	register_offset = SDR_CTRLGRP_RFIFOCMAP_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_RFIFOCMAP_RFIFOCMAP,
+			SDR_CTRLGRP_RFIFOCMAP_RFIFOCPORTMAP_LSB,
+			SDR_CTRLGRP_RFIFOCMAP_RFIFOCPORTMAP_MASK);
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(unsigned)(SOCFPGA_SDR_ADDRESS+register_offset),
+		(unsigned)reg_value);
+	writel(reg_value, (SOCFPGA_SDR_ADDRESS + register_offset));
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	debug("   Read value without verify 0x%08x\n", (unsigned)reg_value);
+#endif
+
+	/***** WFIFOCMAP_WFIFOCMAP *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_WFIFOCMAP_WFIFOCMAP
+	debug("Configuring WFIFOCMAP\n");
+	register_offset = SDR_CTRLGRP_WFIFOCMAP_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_WFIFOCMAP_WFIFOCMAP,
+			SDR_CTRLGRP_WFIFOCMAP_WFIFOCPORTMAP_LSB,
+			SDR_CTRLGRP_WFIFOCMAP_WFIFOCPORTMAP_MASK);
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(unsigned)(SOCFPGA_SDR_ADDRESS+register_offset),
+		(unsigned)reg_value);
+	writel(reg_value, (SOCFPGA_SDR_ADDRESS + register_offset));
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	debug("   Read value without verify 0x%08x\n", (unsigned)reg_value);
+#endif
+
+	/***** CPORTRDWR_CPORTRDWR *****/
+#ifdef CONFIG_HPS_SDR_CTRLCFG_CPORTRDWR_CPORTRDWR
+	debug("Configuring CPORTRDWR\n");
+	register_offset = SDR_CTRLGRP_CPORTRDWR_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_CPORTRDWR_CPORTRDWR,
+			SDR_CTRLGRP_CPORTRDWR_CPORTRDWR_LSB,
+			SDR_CTRLGRP_CPORTRDWR_CPORTRDWR_MASK);
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(unsigned)(SOCFPGA_SDR_ADDRESS+register_offset),
+		(unsigned)reg_value);
+	writel(reg_value, (SOCFPGA_SDR_ADDRESS + register_offset));
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	debug("   Read value without verify 0x%08x\n", (unsigned)reg_value);
+#endif
+/* end of newly added registers */
+
+
+	/***** DRAMODT *****/
+#if defined(CONFIG_HPS_SDR_CTRLCFG_DRAMODT_READ) || \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMODT_WRITE)
+	debug("Configuring DRAMODT\n");
+	register_offset = SDR_CTRLGRP_DRAMODT_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMODT_READ
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMODT_READ,
+			SDR_CTRLGRP_DRAMODT_READ_LSB,
+			SDR_CTRLGRP_DRAMODT_READ_MASK);
+#endif
+#ifdef CONFIG_HPS_SDR_CTRLCFG_DRAMODT_WRITE
+	reg_value = sdram_write_register_field(reg_value,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMODT_WRITE,
+			SDR_CTRLGRP_DRAMODT_WRITE_LSB,
+			SDR_CTRLGRP_DRAMODT_WRITE_MASK);
+#endif
+	if (sdram_write_verify(register_offset,	reg_value) == 1) {
+		status = 1;
+		COMPARE_FAIL_ACTION
+	}
+#endif
+
+	/***** FPGAPORTRST *****/
+#if defined(CONFIG_HPS_SDR_CTRLCFG_FPGAPORTRST_READ_PORT_USED)
+	debug("Configuring FPGAPORTRST\n");
+	register_offset = SDR_CTRLGRP_FPGAPORTRST_ADDRESS;
+	/* All value will be provided */
+	reg_value = CONFIG_HPS_SDR_CTRLCFG_FPGAPORTRST;
+
+	/* saving this value to SYSMGR.ISWGRP.HANDOFF.FPGA2SDR */
+	writel(reg_value, &sysmgr_regs->iswgrp_handoff[3]);
+
+	/* only enable if the FPGA is programmed */
+	if (fpgamgr_test_fpga_ready()) {
+		if (sdram_write_verify(register_offset,	reg_value) == 1) {
+			/* Set status to 1 to ensure we return failed status
+			if user wish the COMPARE_FAIL_ACTION not to do anything.
+			This is to cater scenario where user wish to
+			continue initlization even verify failed. */
+			status = 1;
+			COMPARE_FAIL_ACTION
+		}
+	}
+#endif
+
+	/* Restore the SDR PHY Register if valid */
+	if (sdr_phy_reg != 0xffffffff)
+		writel(sdr_phy_reg, SOCFPGA_SDR_ADDRESS +
+			SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDRESS);
+
+/***** Final step - apply configuration changes *****/
+	debug("Configuring STATICCFG_\n");
+	register_offset = SDR_CTRLGRP_STATICCFG_ADDRESS;
+	/* Read original register value */
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	reg_value = sdram_write_register_field(reg_value, 1,
+			SDR_CTRLGRP_STATICCFG_APPLYCFG_LSB,
+			SDR_CTRLGRP_STATICCFG_APPLYCFG_MASK);
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(unsigned)(SOCFPGA_SDR_ADDRESS+register_offset),
+		(unsigned)reg_value);
+	writel(reg_value, (SOCFPGA_SDR_ADDRESS + register_offset));
+	reg_value = readl(SOCFPGA_SDR_ADDRESS + register_offset);
+	debug("   Read value without verify 0x%08x\n", (unsigned)reg_value);
+
+	sdram_set_protection_config(0, sdram_calculate_size());
+
+	sdram_dump_protection_config();
+
+	return status;
+}
+
+/* To calculate SDRAM device size based on SDRAM controller parameters.
+ * Size is specified in bytes.
+ *
+ * NOTE!!!!
+ * This function is compiled and linked into the preloader and
+ * Uboot (there may be others). So if this function changes, the Preloader
+ * and UBoot must be updated simultaneously.
+ */
+unsigned long sdram_calculate_size(void)
+{
+	unsigned long temp;
+	unsigned long row, bank, col, cs, width;
+
+	temp = readl(SOCFPGA_SDR_ADDRESS +
+		SDR_CTRLGRP_DRAMADDRW_ADDRESS);
+	col = (temp & SDR_CTRLGRP_DRAMADDRW_COLBITS_MASK) >>
+		SDR_CTRLGRP_DRAMADDRW_COLBITS_LSB;
+
+	/* SDRAM Failure When Accessing Non-Existent Memory
+	 * Use ROWBITS from Quartus/QSys to calculate SDRAM size
+	 * since the FB specifies we modify ROWBITs to work around SDRAM
+	 * controller issue.
+	 *
+	 * If the stored handoff value for rows is 0, it probably means
+	 * the preloader is older than UBoot. Use the
+	 * #define from the SOCEDS Tools per Crucible review
+	 * uboot-socfpga-204. Note that this is not a supported
+	 * configuration and is not tested. The customer
+	 * should be using preloader and uboot built from the
+	 * same tag.
+	 */
+	row = readl(&sysmgr_regs->iswgrp_handoff[4]);
+	if (row == 0)
+		row = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS;
+	/* If the stored handoff value for rows is greater than
+	 * the field width in the sdr.dramaddrw register then
+	 * something is very wrong. Revert to using the the #define
+	 * value handed off by the SOCEDS tool chain instead of
+	 * using a broken value.
+	 */
+	if (row > 31)
+		row = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS;
+
+	bank = (temp & SDR_CTRLGRP_DRAMADDRW_BANKBITS_MASK) >>
+		SDR_CTRLGRP_DRAMADDRW_BANKBITS_LSB;
+
+	/* SDRAM Failure When Accessing Non-Existent Memory
+	 * Use CSBITs from Quartus/QSys to calculate SDRAM size
+	 * since the FB specifies we modify CSBITs to work around SDRAM
+	 * controller issue.
+	 */
+	cs = (temp & SDR_CTRLGRP_DRAMADDRW_CSBITS_MASK) >>
+	      SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB;
+	cs += 1;
+
+	cs = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS;
+
+	width = readl(SOCFPGA_SDR_ADDRESS +
+		SDR_CTRLGRP_DRAMIFWIDTH_ADDRESS);
+	/* ECC would not be calculated as its not addressible */
+	if (width == SDRAM_WIDTH_32BIT_WITH_ECC)
+		width = 32;
+	if (width == SDRAM_WIDTH_16BIT_WITH_ECC)
+		width = 16;
+
+	/* calculate the SDRAM size base on this info */
+	temp = 1 << (row + bank + col);
+	temp = temp * cs * (width  / 8);
+
+	debug("sdram_calculate_memory returns %ld\n", temp);
+
+	return temp;
+}
diff --git a/drivers/ddr/altera/sequencer.c b/drivers/ddr/altera/sequencer.c
new file mode 100644
index 0000000..bf4feec
--- /dev/null
+++ b/drivers/ddr/altera/sequencer.c
@@ -0,0 +1,3970 @@
+/*
+ * Copyright Altera Corporation (C) 2012-2014. All rights reserved
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <asm/arch/sdram.h>
+#include "sequencer.h"
+#include "sequencer_auto.h"
+#include "sequencer_defines.h"
+
+static void scc_mgr_load_dqs_for_write_group(uint32_t write_group);
+
+static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
+	(struct socfpga_sdr_rw_load_manager *)(BASE_RW_MGR + 0x800);
+
+static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
+	(struct socfpga_sdr_rw_load_jump_manager *)(BASE_RW_MGR + 0xC00);
+
+static struct socfpga_sdr_reg_file *sdr_reg_file =
+	(struct socfpga_sdr_reg_file *)(BASE_REG_FILE);
+/******************************************************************************
+ ******************************************************************************
+ ** NOTE: Special Rules for Globale Variables                                **
+ **                                                                          **
+ ** All global variables that are explicitly initialized (including          **
+ ** explicitly initialized to zero), are only initialized once, during       **
+ ** configuration time, and not again on reset.  This means that they        **
+ ** preserve their current contents across resets, which is needed for some  **
+ ** special cases involving communication with external modules.  In         **
+ ** addition, this avoids paying the price to have the memory initialized,   **
+ ** even for zeroed data, provided it is explicitly set to zero in the code, **
+ ** and doesn't rely on implicit initialization.                             **
+ ******************************************************************************
+ ******************************************************************************/
+
+/*
+ * Temporary workaround to place the initial stack pointer at a safe
+ * offset from end
+ */
+asm(".global __alt_stack_pointer");
+asm("__alt_stack_pointer = " __stringify(STACK_POINTER));
+
+/* Just to make the debugging code more uniform */
+#ifndef RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM
+#define RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM 0
+#endif
+
+#define SDR_WRITE	1
+#define SDR_READ	0
+
+#define DELTA_D		1
+#define MGR_SELECT_MASK		0xf8000
+
+/*
+ * In order to reduce ROM size, most of the selectable calibration steps are
+ * decided at compile time based on the user's calibration mode selection,
+ * as captured by the STATIC_CALIB_STEPS selection below.
+ *
+ * However, to support simulation-time selection of fast simulation mode, where
+ * we skip everything except the bare minimum, we need a few of the steps to
+ * be dynamic.  In those cases, we either use the DYNAMIC_CALIB_STEPS for the
+ * check, which is based on the rtl-supplied value, or we dynamically compute
+ * the value to use based on the dynamically-chosen calibration mode
+ */
+
+#define BTFLD_FMT "%u"
+
+/* For HPS running on actual hardware */
+
+#define DLEVEL 0
+/*
+ * space around comma is required for varargs macro to remove comma if args
+ * is empty
+ */
+#define BFM_GBL_SET(field, value)
+#define BFM_GBL_GET(field)		((long unsigned int)0)
+#define BFM_STAGE(stage)
+#define BFM_INC_VFIFO
+#define COV(label)
+
+#define DYNAMIC_CALIB_STEPS (dyn_calib_steps)
+#define STATIC_IN_RTL_SIM 0
+
+#define STATIC_SKIP_DELAY_LOOPS 0
+
+#define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
+	STATIC_SKIP_DELAY_LOOPS)
+
+/* calibration steps requested by the rtl */
+uint16_t dyn_calib_steps;
+
+/*
+ * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
+ * instead of static, we use boolean logic to select between
+ * non-skip and skip values
+ *
+ * The mask is set to include all bits when not-skipping, but is
+ * zero when skipping
+ */
+
+uint16_t skip_delay_mask;	/* mask off bits when skipping/not-skipping */
+
+#define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
+	((non_skip_value) & skip_delay_mask)
+
+struct gbl_type *gbl;
+struct param_type *param;
+uint32_t curr_shadow_reg;
+
+static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
+	uint32_t write_group, uint32_t use_dm,
+	uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks);
+
+static int sdr_ops(u32 *base, u32 offset, u32 data, bool write)
+{
+	u32 addr = (u32)base & MGR_SELECT_MASK;
+	u32 temp;
+
+	switch (addr) {
+	case BASE_PHY_MGR:
+		addr = (((u32)base >> 8) & (1 << 6)) | ((u32)base & 0x3f) |
+			SDR_PHYGRP_PHYMGRGRP_ADDRESS;
+		break;
+	case BASE_RW_MGR:
+		addr = ((u32)base & 0x1fff) | SDR_PHYGRP_RWMGRGRP_ADDRESS;
+		break;
+	case BASE_DATA_MGR:
+		addr = ((u32)base & 0x7ff) | SDR_PHYGRP_DATAMGRGRP_ADDRESS;
+		break;
+	case BASE_SCC_MGR:
+		addr = ((u32)base & 0xfff) | SDR_PHYGRP_SCCGRP_ADDRESS;
+		break;
+	case BASE_REG_FILE:
+		addr = ((u32)base & 0x7ff) | SDR_PHYGRP_REGFILEGRP_ADDRESS;
+		break;
+	case BASE_MMR:
+		addr = ((u32)base & 0xfff) | SDR_CTRLGRP_ADDRESS;
+		break;
+	default:
+		return -1;
+	}
+
+	if (write) {
+		writel(data, SOCFPGA_SDR_ADDRESS + addr + offset);
+	} else {
+		temp = readl(SOCFPGA_SDR_ADDRESS + addr + offset);
+		return temp;
+	}
+
+	return 0;
+}
+
+static void set_failing_group_stage(uint32_t group, uint32_t stage,
+	uint32_t substage)
+{
+	/*
+	 * Only set the global stage if there was not been any other
+	 * failing group
+	 */
+	if (gbl->error_stage == CAL_STAGE_NIL)	{
+		gbl->error_substage = substage;
+		gbl->error_stage = stage;
+		gbl->error_group = group;
+	}
+}
+
+static inline void reg_file_set_group(uint32_t set_group)
+{
+	/* Read the current group and stage */
+	uint32_t cur_stage_group = sdr_ops(&sdr_reg_file->cur_stage, 0, 0, SDR_READ);
+
+	/* Clear the group */
+	cur_stage_group &= 0x0000FFFF;
+
+	/* Set the group */
+	cur_stage_group |= (set_group << 16);
+
+	/* Write the data back */
+	sdr_ops(&sdr_reg_file->cur_stage, 0, cur_stage_group, SDR_WRITE);
+}
+
+static inline void reg_file_set_stage(uint32_t set_stage)
+{
+	/* Read the current group and stage */
+	uint32_t cur_stage_group = sdr_ops(&sdr_reg_file->cur_stage, 0, 0, SDR_READ);
+
+	/* Clear the stage and substage */
+	cur_stage_group &= 0xFFFF0000;
+
+	/* Set the stage */
+	cur_stage_group |= (set_stage & 0x000000FF);
+
+	/* Write the data back */
+	sdr_ops(&sdr_reg_file->cur_stage, 0, cur_stage_group, SDR_WRITE);
+}
+
+static inline void reg_file_set_sub_stage(uint32_t set_sub_stage)
+{
+	/* Read the current group and stage */
+	uint32_t cur_stage_group = sdr_ops(&sdr_reg_file->cur_stage, 0, 0, SDR_READ);
+
+	/* Clear the substage */
+	cur_stage_group &= 0xFFFF00FF;
+
+	/* Set the sub stage */
+	cur_stage_group |= ((set_sub_stage << 8) & 0x0000FF00);
+
+	/* Write the data back */
+	sdr_ops(&sdr_reg_file->cur_stage, 0, cur_stage_group, SDR_WRITE);
+}
+
+static void initialize(void)
+{
+	debug("%s:%d\n", __func__, __LINE__);
+	/* USER calibration has control over path to memory */
+	/*
+	 * In Hard PHY this is a 2-bit control:
+	 * 0: AFI Mux Select
+	 * 1: DDIO Mux Select
+	 */
+	sdr_ops((u32 *)PHY_MGR_MUX_SEL, 0, 0x3, SDR_WRITE);
+
+	/* USER memory clock is not stable we begin initialization  */
+
+	sdr_ops((u32 *)PHY_MGR_RESET_MEM_STBL, 0, 0, SDR_WRITE);
+
+	/* USER calibration status all set to zero */
+
+	sdr_ops((u32 *)PHY_MGR_CAL_STATUS, 0, 0, SDR_WRITE);
+	sdr_ops((u32 *)PHY_MGR_CAL_DEBUG_INFO, 0, 0, SDR_WRITE);
+
+	if (((DYNAMIC_CALIB_STEPS) & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) {
+		param->read_correct_mask_vg  = ((uint32_t)1 <<
+			(RW_MGR_MEM_DQ_PER_READ_DQS /
+			RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
+		param->write_correct_mask_vg = ((uint32_t)1 <<
+			(RW_MGR_MEM_DQ_PER_READ_DQS /
+			RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
+		param->read_correct_mask     = ((uint32_t)1 <<
+			RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
+		param->write_correct_mask    = ((uint32_t)1 <<
+			RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
+		param->dm_correct_mask       = ((uint32_t)1 <<
+			(RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH))
+			- 1;
+	}
+}
+
+static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
+{
+	uint32_t odt_mask_0 = 0;
+	uint32_t odt_mask_1 = 0;
+	uint32_t cs_and_odt_mask;
+
+	if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) {
+		if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) {
+			/*
+			 * 1 Rank
+			 * Read: ODT = 0
+			 * Write: ODT = 1
+			 */
+			odt_mask_0 = 0x0;
+			odt_mask_1 = 0x1;
+		} else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) {
+			/* 2 Ranks */
+			if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
+				/* - Dual-Slot , Single-Rank
+				 * (1 chip-select per DIMM)
+				 * OR
+				 * - RDIMM, 4 total CS (2 CS per DIMM)
+				 * means 2 DIMM
+				 * Since MEM_NUMBER_OF_RANKS is 2 they are
+				 * both single rank
+				 * with 2 CS each (special for RDIMM)
+				 * Read: Turn on ODT on the opposite rank
+				 * Write: Turn on ODT on all ranks
+				 */
+				odt_mask_0 = 0x3 & ~(1 << rank);
+				odt_mask_1 = 0x3;
+			} else {
+				/*
+				 * USER - Single-Slot , Dual-rank DIMMs
+				 * (2 chip-selects per DIMM)
+				 * USER Read: Turn on ODT off on all ranks
+				 * USER Write: Turn on ODT on active rank
+				 */
+				odt_mask_0 = 0x0;
+				odt_mask_1 = 0x3 & (1 << rank);
+			}
+				} else {
+			/* 4 Ranks
+			 * Read:
+			 * ----------+-----------------------+
+			 *           |                       |
+			 *           |         ODT           |
+			 * Read From +-----------------------+
+			 *   Rank    |  3  |  2  |  1  |  0  |
+			 * ----------+-----+-----+-----+-----+
+			 *     0     |  0  |  1  |  0  |  0  |
+			 *     1     |  1  |  0  |  0  |  0  |
+			 *     2     |  0  |  0  |  0  |  1  |
+			 *     3     |  0  |  0  |  1  |  0  |
+			 * ----------+-----+-----+-----+-----+
+			 *
+			 * Write:
+			 * ----------+-----------------------+
+			 *           |                       |
+			 *           |         ODT           |
+			 * Write To  +-----------------------+
+			 *   Rank    |  3  |  2  |  1  |  0  |
+			 * ----------+-----+-----+-----+-----+
+			 *     0     |  0  |  1  |  0  |  1  |
+			 *     1     |  1  |  0  |  1  |  0  |
+			 *     2     |  0  |  1  |  0  |  1  |
+			 *     3     |  1  |  0  |  1  |  0  |
+			 * ----------+-----+-----+-----+-----+
+			 */
+			switch (rank) {
+			case 0:
+				odt_mask_0 = 0x4;
+				odt_mask_1 = 0x5;
+				break;
+			case 1:
+				odt_mask_0 = 0x8;
+				odt_mask_1 = 0xA;
+				break;
+			case 2:
+				odt_mask_0 = 0x1;
+				odt_mask_1 = 0x5;
+				break;
+			case 3:
+				odt_mask_0 = 0x2;
+				odt_mask_1 = 0xA;
+				break;
+			}
+		}
+	} else {
+		odt_mask_0 = 0x0;
+		odt_mask_1 = 0x0;
+	}
+
+	cs_and_odt_mask =
+		(0xFF & ~(1 << rank)) |
+		((0xFF & odt_mask_0) << 8) |
+		((0xFF & odt_mask_1) << 16);
+	sdr_ops((u32 *)RW_MGR_SET_CS_AND_ODT_MASK, 0, cs_and_odt_mask, SDR_WRITE);
+}
+
+static void scc_mgr_initialize(void)
+{
+	/*
+	 * Clear register file for HPS
+	 * 16 (2^4) is the size of the full register file in the scc mgr:
+	 *	RFILE_DEPTH = log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
+	 * MEM_IF_READ_DQS_WIDTH - 1) + 1;
+	 */
+	uint32_t i;
+	for (i = 0; i < 16; i++) {
+		debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u",
+			   __func__, __LINE__, i);
+		sdr_ops((u32 *)SCC_MGR_HHP_RFILE, i << 2, 0, SDR_WRITE);
+	}
+}
+
+static inline void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group,
+						uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_DQS_IN_DELAY, read_group << 2, delay, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_dqs_io_in_delay(uint32_t write_group,
+	uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_IO_IN_DELAY, RW_MGR_MEM_DQ_PER_WRITE_DQS << 2,
+		delay, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_DQS_EN_PHASE, read_group << 2, phase, SDR_WRITE);
+}
+
+static void scc_mgr_set_dqs_en_phase_all_ranks(uint32_t read_group,
+					       uint32_t phase)
+{
+	uint32_t r;
+	uint32_t update_scan_chains;
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+	     r += NUM_RANKS_PER_SHADOW_REG) {
+		/*
+		 * USER although the h/w doesn't support different phases per
+		 * shadow register, for simplicity our scc manager modeling
+		 * keeps different phase settings per shadow reg, and it's
+		 * important for us to keep them in sync to match h/w.
+		 * for efficiency, the scan chain update should occur only
+		 * once to sr0.
+		 */
+		update_scan_chains = (r == 0) ? 1 : 0;
+
+		scc_mgr_set_dqs_en_phase(read_group, phase);
+
+		if (update_scan_chains) {
+			sdr_ops((u32 *)SCC_MGR_DQS_ENA, 0, read_group, SDR_WRITE);
+			sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+		}
+	}
+}
+
+static inline void scc_mgr_set_dqdqs_output_phase(uint32_t write_group,
+						  uint32_t phase)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_DQDQS_OUT_PHASE, write_group << 2, phase, SDR_WRITE);
+}
+
+static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
+						     uint32_t phase)
+{
+	uint32_t r;
+	uint32_t update_scan_chains;
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+	     r += NUM_RANKS_PER_SHADOW_REG) {
+		/*
+		 * USER although the h/w doesn't support different phases per
+		 * shadow register, for simplicity our scc manager modeling
+		 * keeps different phase settings per shadow reg, and it's
+		 * important for us to keep them in sync to match h/w.
+		 * for efficiency, the scan chain update should occur only
+		 * once to sr0.
+		 */
+		update_scan_chains = (r == 0) ? 1 : 0;
+
+		scc_mgr_set_dqdqs_output_phase(write_group, phase);
+
+		if (update_scan_chains) {
+			sdr_ops((u32 *)SCC_MGR_DQS_ENA, 0, write_group, SDR_WRITE);
+			sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+		}
+	}
+}
+
+static inline void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_DQS_EN_DELAY, read_group << 2, delay +
+		IO_DQS_EN_DELAY_OFFSET, SDR_WRITE);
+}
+
+static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
+					       uint32_t delay)
+{
+	uint32_t r;
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+		r += NUM_RANKS_PER_SHADOW_REG) {
+		scc_mgr_set_dqs_en_delay(read_group, delay);
+
+		sdr_ops((u32 *)SCC_MGR_DQS_ENA, 0, read_group, SDR_WRITE);
+
+		/*
+		 * In shadow register mode, the T11 settings are stored in
+		 * registers in the core, which are updated by the DQS_ENA
+		 * signals. Not issuing the SCC_MGR_UPD command allows us to
+		 * save lots of rank switching overhead, by calling
+		 * select_shadow_regs_for_update with update_scan_chains
+		 * set to 0.
+		 */
+		sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+	}
+}
+
+static void scc_mgr_set_oct_out1_delay(uint32_t write_group, uint32_t delay)
+{
+	uint32_t read_group;
+
+	/*
+	 * Load the setting in the SCC manager
+	 * Although OCT affects only write data, the OCT delay is controlled
+	 * by the DQS logic block which is instantiated once per read group.
+	 * For protocols where a write group consists of multiple read groups,
+	 * the setting must be set multiple times.
+	 */
+	for (read_group = write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH /
+	     RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
+	     read_group < (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH /
+	     RW_MGR_MEM_IF_WRITE_DQS_WIDTH; ++read_group)
+		sdr_ops((u32 *)SCC_MGR_OCT_OUT1_DELAY, read_group << 2,
+			delay, SDR_WRITE);
+}
+
+static void scc_mgr_set_oct_out2_delay(uint32_t write_group, uint32_t delay)
+{
+	uint32_t read_group;
+
+	/*
+	 * Load the setting in the SCC manager
+	 * Although OCT affects only write data, the OCT delay is controlled
+	 * by the DQS logic block which is instantiated once per read group.
+	 * For protocols where a write group consists
+	 * of multiple read groups, the setting must be set multiple times.
+	 */
+	for (read_group = write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH /
+	     RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
+	     read_group < (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH /
+	     RW_MGR_MEM_IF_WRITE_DQS_WIDTH; ++read_group)
+		WRITE_SCC_OCT_OUT2_DELAY(read_group, delay);
+}
+
+static inline void scc_mgr_set_dqs_bypass(uint32_t write_group, uint32_t bypass)
+{
+	/* Load the setting in the SCC manager */
+	WRITE_SCC_DQS_BYPASS(write_group, bypass);
+}
+
+inline void scc_mgr_set_dq_out1_delay(uint32_t write_group,
+				      uint32_t dq_in_group, uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_IO_OUT1_DELAY, dq_in_group << 2, delay, SDR_WRITE);
+}
+
+inline void scc_mgr_set_dq_out2_delay(uint32_t write_group,
+				      uint32_t dq_in_group, uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	WRITE_SCC_DQ_OUT2_DELAY(dq_in_group, delay);
+}
+
+inline void scc_mgr_set_dq_in_delay(uint32_t write_group,
+	uint32_t dq_in_group, uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_IO_IN_DELAY, dq_in_group << 2, delay, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_dq_bypass(uint32_t write_group,
+					 uint32_t dq_in_group, uint32_t bypass)
+{
+	/* Load the setting in the SCC manager */
+	WRITE_SCC_DQ_BYPASS(dq_in_group, bypass);
+}
+
+static inline void scc_mgr_set_rfifo_mode(uint32_t write_group,
+					  uint32_t dq_in_group, uint32_t mode)
+{
+	/* Load the setting in the SCC manager */
+	WRITE_SCC_RFIFO_MODE(dq_in_group, mode);
+}
+
+static inline void scc_mgr_set_hhp_extras(void)
+{
+	/*
+	 * Load the fixed setting in the SCC manager
+	 * bits: 0:0 = 1'b1   - dqs bypass
+	 * bits: 1:1 = 1'b1   - dq bypass
+	 * bits: 4:2 = 3'b001   - rfifo_mode
+	 * bits: 6:5 = 2'b01  - rfifo clock_select
+	 * bits: 7:7 = 1'b0  - separate gating from ungating setting
+	 * bits: 8:8 = 1'b0  - separate OE from Output delay setting
+	 */
+	uint32_t value = (0<<8) | (0<<7) | (1<<5) | (1<<2) | (1<<1) | (1<<0);
+	sdr_ops((u32 *)SCC_MGR_HHP_GLOBALS, SCC_MGR_HHP_EXTRAS_OFFSET,
+		value, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_hhp_dqse_map(void)
+{
+	/* Load the fixed setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_HHP_GLOBALS, SCC_MGR_HHP_DQSE_MAP_OFFSET, 0,
+		SDR_WRITE);
+}
+
+static inline void scc_mgr_set_dqs_out1_delay(uint32_t write_group,
+					      uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_IO_OUT1_DELAY, RW_MGR_MEM_DQ_PER_WRITE_DQS << 2,
+		delay, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_dqs_out2_delay(uint32_t write_group,
+					      uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	WRITE_SCC_DQS_IO_OUT2_DELAY(delay);
+}
+
+static inline void scc_mgr_set_dm_out1_delay(uint32_t write_group,
+					     uint32_t dm, uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_IO_OUT1_DELAY,
+		(RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm) << 2, delay, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_dm_out2_delay(uint32_t write_group, uint32_t dm,
+					     uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	WRITE_SCC_DM_IO_OUT2_DELAY(dm, delay);
+}
+
+static inline void scc_mgr_set_dm_in_delay(uint32_t write_group,
+					   uint32_t dm, uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_IO_IN_DELAY,
+		(RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm) << 2, delay, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_dm_bypass(uint32_t write_group, uint32_t dm,
+					 uint32_t bypass)
+{
+	/* Load the setting in the SCC manager */
+	WRITE_SCC_DM_BYPASS(dm, bypass);
+}
+
+/*
+ * USER Zero all DQS config
+ * TODO: maybe rename to scc_mgr_zero_dqs_config (or something)
+ */
+static void scc_mgr_zero_all(void)
+{
+	uint32_t i, r;
+
+	/*
+	 * USER Zero all DQS config settings, across all groups and all
+	 * shadow registers
+	 */
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r +=
+	     NUM_RANKS_PER_SHADOW_REG) {
+		for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
+			/*
+			 * The phases actually don't exist on a per-rank basis,
+			 * but there's no harm updating them several times, so
+			 * let's keep the code simple.
+			 */
+			scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
+			scc_mgr_set_dqs_en_phase(i, 0);
+			scc_mgr_set_dqs_en_delay(i, 0);
+		}
+
+		for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
+			scc_mgr_set_dqdqs_output_phase(i, 0);
+			/* av/cv don't have out2 */
+			scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
+		}
+
+		/* multicast to all DQS group enables */
+		sdr_ops((u32 *)SCC_MGR_DQS_ENA, 0, 0xff, SDR_WRITE);
+		sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+	}
+}
+
+static void scc_set_bypass_mode(uint32_t write_group, uint32_t mode)
+{
+	/* mode = 0 : Do NOT bypass - Half Rate Mode */
+	/* mode = 1 : Bypass - Full Rate Mode */
+
+	/* only need to set once for all groups, pins, dq, dqs, dm */
+	if (write_group == 0) {
+		debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n", __func__,
+			   __LINE__);
+		scc_mgr_set_hhp_extras();
+		debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
+			  __func__, __LINE__);
+	}
+	/* multicast to all DQ enables */
+	sdr_ops((u32 *)SCC_MGR_DQ_ENA, 0, 0xff, SDR_WRITE);
+	sdr_ops((u32 *)SCC_MGR_DM_ENA, 0, 0xff, SDR_WRITE);
+
+	/* update current DQS IO enable */
+	sdr_ops((u32 *)SCC_MGR_DQS_IO_ENA, 0, 0, SDR_WRITE);
+
+	/* update the DQS logic */
+	sdr_ops((u32 *)SCC_MGR_DQS_ENA, 0, write_group, SDR_WRITE);
+
+	/* hit update */
+	sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+}
+
+static void scc_mgr_zero_group(uint32_t write_group, uint32_t test_begin,
+			       int32_t out_only)
+{
+	uint32_t i, r;
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r +=
+		NUM_RANKS_PER_SHADOW_REG) {
+		/* Zero all DQ config settings */
+		for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+			scc_mgr_set_dq_out1_delay(write_group, i, 0);
+			scc_mgr_set_dq_out2_delay(write_group, i,
+						  IO_DQ_OUT_RESERVE);
+			if (!out_only)
+				scc_mgr_set_dq_in_delay(write_group, i, 0);
+		}
+
+		/* multicast to all DQ enables */
+		sdr_ops((u32 *)SCC_MGR_DQ_ENA, 0, 0xff, SDR_WRITE);
+
+		/* Zero all DM config settings */
+		for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
+			scc_mgr_set_dm_out1_delay(write_group, i, 0);
+			scc_mgr_set_dm_out2_delay(write_group, i,
+						  IO_DM_OUT_RESERVE);
+		}
+
+		/* multicast to all DM enables */
+		sdr_ops((u32 *)SCC_MGR_DM_ENA, 0, 0xff, SDR_WRITE);
+
+		/* zero all DQS io settings */
+		if (!out_only)
+			scc_mgr_set_dqs_io_in_delay(write_group, 0);
+		/* av/cv don't have out2 */
+		scc_mgr_set_dqs_out1_delay(write_group, IO_DQS_OUT_RESERVE);
+		scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
+		scc_mgr_load_dqs_for_write_group(write_group);
+		/* multicast to all DQS IO enables (only 1) */
+		sdr_ops((u32 *)SCC_MGR_DQS_IO_ENA, 0, 0, SDR_WRITE);
+		/* hit update to zero everything */
+		sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+	}
+}
+
+/* load up dqs config settings */
+static void scc_mgr_load_dqs(uint32_t dqs)
+{
+	sdr_ops((u32 *)SCC_MGR_DQS_ENA, 0, dqs, SDR_WRITE);
+}
+
+static void scc_mgr_load_dqs_for_write_group(uint32_t write_group)
+{
+	uint32_t read_group;
+
+	/*
+	 * Although OCT affects only write data, the OCT delay is controlled
+	 * by the DQS logic block which is instantiated once per read group.
+	 * For protocols where a write group consists of multiple read groups,
+	 * the setting must be scanned multiple times.
+	 */
+	for (read_group = write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH /
+	     RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
+	     read_group < (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH /
+	     RW_MGR_MEM_IF_WRITE_DQS_WIDTH; ++read_group)
+		sdr_ops((u32 *)SCC_MGR_DQS_ENA, 0, read_group, SDR_WRITE);
+}
+
+/* load up dqs io config settings */
+static void scc_mgr_load_dqs_io(void)
+{
+	sdr_ops((u32 *)SCC_MGR_DQS_IO_ENA, 0, 0, SDR_WRITE);
+}
+
+/* load up dq config settings */
+static void scc_mgr_load_dq(uint32_t dq_in_group)
+{
+	sdr_ops((u32 *)SCC_MGR_DQ_ENA, 0, dq_in_group, SDR_WRITE);
+}
+
+/* load up dm config settings */
+static void scc_mgr_load_dm(uint32_t dm)
+{
+	sdr_ops((u32 *)SCC_MGR_DM_ENA, 0, dm, SDR_WRITE);
+}
+
+/*
+ * apply and load a particular input delay for the DQ pins in a group
+ * group_bgn is the index of the first dq pin (in the write group)
+ */
+static void scc_mgr_apply_group_dq_in_delay(uint32_t write_group,
+					    uint32_t group_bgn, uint32_t delay)
+{
+	uint32_t i, p;
+
+	for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
+		scc_mgr_set_dq_in_delay(write_group, p, delay);
+		scc_mgr_load_dq(p);
+	}
+}
+
+/* apply and load a particular output delay for the DQ pins in a group */
+static void scc_mgr_apply_group_dq_out1_delay(uint32_t write_group,
+					      uint32_t group_bgn,
+					      uint32_t delay1)
+{
+	uint32_t i, p;
+
+	for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
+		scc_mgr_set_dq_out1_delay(write_group, i, delay1);
+		scc_mgr_load_dq(i);
+	}
+}
+
+/* apply and load a particular output delay for the DM pins in a group */
+static void scc_mgr_apply_group_dm_out1_delay(uint32_t write_group,
+					      uint32_t delay1)
+{
+	uint32_t i;
+
+	for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
+		scc_mgr_set_dm_out1_delay(write_group, i, delay1);
+		scc_mgr_load_dm(i);
+	}
+}
+
+
+/* apply and load delay on both DQS and OCT out1 */
+static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
+						    uint32_t delay)
+{
+	scc_mgr_set_dqs_out1_delay(write_group, delay);
+	scc_mgr_load_dqs_io();
+
+	scc_mgr_set_oct_out1_delay(write_group, delay);
+	scc_mgr_load_dqs_for_write_group(write_group);
+}
+
+/* apply a delay to the entire output side: DQ, DM, DQS, OCT */
+static void scc_mgr_apply_group_all_out_delay_add(uint32_t write_group,
+						  uint32_t group_bgn,
+						  uint32_t delay)
+{
+	uint32_t i, p, new_delay;
+
+	/* dq shift */
+	for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
+		new_delay = READ_SCC_DQ_OUT2_DELAY(i);
+		new_delay += delay;
+
+		if (new_delay > IO_IO_OUT2_DELAY_MAX) {
+			debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQ[%u,%u]:\
+				   %u > %lu => %lu", __func__, __LINE__,
+				   write_group, group_bgn, delay, i, p, new_delay,
+				   (long unsigned int)IO_IO_OUT2_DELAY_MAX,
+				   (long unsigned int)IO_IO_OUT2_DELAY_MAX);
+			new_delay = IO_IO_OUT2_DELAY_MAX;
+		}
+
+		scc_mgr_set_dq_out2_delay(write_group, i, new_delay);
+		scc_mgr_load_dq(i);
+	}
+
+	/* dm shift */
+	for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
+		new_delay = READ_SCC_DM_IO_OUT2_DELAY(i);
+		new_delay += delay;
+
+		if (new_delay > IO_IO_OUT2_DELAY_MAX) {
+			debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DM[%u]:\
+				   %u > %lu => %lu\n",  __func__, __LINE__,
+				   write_group, group_bgn, delay, i, new_delay,
+				   (long unsigned int)IO_IO_OUT2_DELAY_MAX,
+				   (long unsigned int)IO_IO_OUT2_DELAY_MAX);
+			new_delay = IO_IO_OUT2_DELAY_MAX;
+		}
+
+		scc_mgr_set_dm_out2_delay(write_group, i, new_delay);
+		scc_mgr_load_dm(i);
+	}
+
+	/* dqs shift */
+	new_delay = READ_SCC_DQS_IO_OUT2_DELAY();
+	new_delay += delay;
+
+	if (new_delay > IO_IO_OUT2_DELAY_MAX) {
+		debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
+			   " adding %u to OUT1\n", __func__, __LINE__,
+			   write_group, group_bgn, delay, new_delay,
+			   IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
+			   new_delay - IO_IO_OUT2_DELAY_MAX);
+		scc_mgr_set_dqs_out1_delay(write_group, new_delay -
+					   IO_IO_OUT2_DELAY_MAX);
+		new_delay = IO_IO_OUT2_DELAY_MAX;
+	}
+
+	scc_mgr_set_dqs_out2_delay(write_group, new_delay);
+	scc_mgr_load_dqs_io();
+
+	/* oct shift */
+	new_delay = READ_SCC_OCT_OUT2_DELAY(write_group);
+	new_delay += delay;
+
+	if (new_delay > IO_IO_OUT2_DELAY_MAX) {
+		debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
+			   " adding %u to OUT1\n", __func__, __LINE__,
+			   write_group, group_bgn, delay, new_delay,
+			   IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
+			   new_delay - IO_IO_OUT2_DELAY_MAX);
+		scc_mgr_set_oct_out1_delay(write_group, new_delay -
+					   IO_IO_OUT2_DELAY_MAX);
+		new_delay = IO_IO_OUT2_DELAY_MAX;
+	}
+
+	scc_mgr_set_oct_out2_delay(write_group, new_delay);
+	scc_mgr_load_dqs_for_write_group(write_group);
+}
+
+/*
+ * USER apply a delay to the entire output side (DQ, DM, DQS, OCT)
+ * and to all ranks
+ */
+static void scc_mgr_apply_group_all_out_delay_add_all_ranks(
+	uint32_t write_group, uint32_t group_bgn, uint32_t delay)
+{
+	uint32_t r;
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+		r += NUM_RANKS_PER_SHADOW_REG) {
+		scc_mgr_apply_group_all_out_delay_add(write_group,
+						      group_bgn, delay);
+		sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+	}
+}
+
+/* optimization used to recover some slots in ddr3 inst_rom */
+/* could be applied to other protocols if we wanted to */
+static void set_jump_as_return(void)
+{
+	/*
+	 * to save space, we replace return with jump to special shared
+	 * RETURN instruction so we set the counter to large value so that
+	 * we always jump
+	 */
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0xFF, SDR_WRITE);
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+		__RW_MGR_RETURN, SDR_WRITE);
+}
+
+/*
+ * should always use constants as argument to ensure all computations are
+ * performed at compile time
+ */
+static inline void delay_for_n_mem_clocks(const uint32_t clocks)
+{
+	uint32_t afi_clocks;
+	uint8_t inner = 0;
+	uint8_t outer = 0;
+	uint16_t c_loop = 0;
+
+	debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
+
+
+	afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO;
+	/* scale (rounding up) to get afi clocks */
+
+	/*
+	 * Note, we don't bother accounting for being off a little bit
+	 * because of a few extra instructions in outer loops
+	 * Note, the loops have a test at the end, and do the test before
+	 * the decrement, and so always perform the loop
+	 * 1 time more than the counter value
+	 */
+	if (afi_clocks == 0) {
+		;
+	} else if (afi_clocks <= 0x100) {
+		inner = afi_clocks-1;
+		outer = 0;
+		c_loop = 0;
+	} else if (afi_clocks <= 0x10000) {
+		inner = 0xff;
+		outer = (afi_clocks-1) >> 8;
+		c_loop = 0;
+	} else {
+		inner = 0xff;
+		outer = 0xff;
+		c_loop = (afi_clocks-1) >> 16;
+	}
+
+	/*
+	 * rom instructions are structured as follows:
+	 *
+	 *    IDLE_LOOP2: jnz cntr0, TARGET_A
+	 *    IDLE_LOOP1: jnz cntr1, TARGET_B
+	 *                return
+	 *
+	 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
+	 * TARGET_B is set to IDLE_LOOP2 as well
+	 *
+	 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
+	 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
+	 *
+	 * a little confusing, but it helps save precious space in the inst_rom
+	 * and sequencer rom and keeps the delays more accurate and reduces
+	 * overhead
+	 */
+	if (afi_clocks <= 0x100) {
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0,
+			SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			__RW_MGR_IDLE_LOOP1, SDR_WRITE);
+		sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_IDLE_LOOP1,
+			SDR_WRITE);
+	} else {
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0,
+			SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), SDR_WRITE);
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0,
+			SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer), SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+			__RW_MGR_IDLE_LOOP2, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			__RW_MGR_IDLE_LOOP2, SDR_WRITE);
+
+		/* hack to get around compiler not being smart enough */
+		if (afi_clocks <= 0x10000) {
+			/* only need to run once */
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_IDLE_LOOP2, SDR_WRITE);
+		} else {
+			do {
+				sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+					__RW_MGR_IDLE_LOOP2, SDR_WRITE);
+			} while (c_loop-- != 0);
+		}
+	}
+	debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
+}
+
+/*
+ * should always use constants as argument to ensure all computations are
+ * performed@compile time
+ */
+static inline void delay_for_n_ns(const uint32_t nanoseconds)
+{
+	debug("%s:%d nanoseconds=%u ... end", __func__, __LINE__, nanoseconds);
+	delay_for_n_mem_clocks((1000*nanoseconds) /
+			       (1000000/AFI_CLK_FREQ) * AFI_RATE_RATIO);
+}
+
+static void rw_mgr_mem_initialize(void)
+{
+	uint32_t r;
+
+	debug("%s:%d\n", __func__, __LINE__);
+
+	/* The reset / cke part of initialization is broadcasted to all ranks */
+	sdr_ops((u32 *)RW_MGR_SET_CS_AND_ODT_MASK, 0, RW_MGR_RANK_ALL, SDR_WRITE);
+
+	/*
+	 * Here's how you load register for a loop
+	 * Counters are located @ 0x800
+	 * Jump address are located @ 0xC00
+	 * For both, registers 0 to 3 are selected using bits 3 and 2, like
+	 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
+	 * I know this ain't pretty, but Avalon bus throws away the 2 least
+	 * significant bits
+	 */
+
+	/* start with memory RESET activated */
+
+	/* tINIT = 200us */
+
+	/*
+	 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
+	 * If a and b are the number of iteration in 2 nested loops
+	 * it takes the following number of cycles to complete the operation:
+	 * number_of_cycles = ((2 + n) * a + 2) * b
+	 * where n is the number of instruction in the inner loop
+	 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
+	 * b = 6A
+	 */
+
+	/* Load counters */
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0,
+		SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR0_VAL), SDR_WRITE);
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0,
+		SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR1_VAL), SDR_WRITE);
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr2, 0,
+		SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR2_VAL), SDR_WRITE);
+
+	/* Load jump address */
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+		__RW_MGR_INIT_RESET_0_CKE_0, SDR_WRITE);
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+		__RW_MGR_INIT_RESET_0_CKE_0, SDR_WRITE);
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2, 0,
+		__RW_MGR_INIT_RESET_0_CKE_0, SDR_WRITE);
+
+	/* Execute count instruction */
+	sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_INIT_RESET_0_CKE_0,
+		SDR_WRITE);
+
+	/* indicate that memory is stable */
+	sdr_ops((u32 *)PHY_MGR_RESET_MEM_STBL, 0, 1, SDR_WRITE);
+
+	/*
+	 * transition the RESET to high
+	 * Wait for 500us
+	 */
+
+	/*
+	 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
+	 * If a and b are the number of iteration in 2 nested loops
+	 * it takes the following number of cycles to complete the operation
+	 * number_of_cycles = ((2 + n) * a + 2) * b
+	 * where n is the number of instruction in the inner loop
+	 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
+	 * b = FF
+	 */
+
+	/* Load counters */
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0,
+		SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR0_VAL), SDR_WRITE);
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0,
+		SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR1_VAL), SDR_WRITE);
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr2, 0,
+		SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR2_VAL), SDR_WRITE);
+
+	/* Load jump address */
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+		__RW_MGR_INIT_RESET_1_CKE_0, SDR_WRITE);
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+		__RW_MGR_INIT_RESET_1_CKE_0, SDR_WRITE);
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2, 0,
+		__RW_MGR_INIT_RESET_1_CKE_0, SDR_WRITE);
+
+	sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_INIT_RESET_1_CKE_0,
+		SDR_WRITE);
+
+	/* bring up clock enable */
+
+	/* tXRP < 250 ck cycles */
+	delay_for_n_mem_clocks(250);
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
+		if (param->skip_ranks[r]) {
+			/* request to skip the rank */
+
+			continue;
+		}
+
+		/* set rank */
+		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
+
+		/*
+		 * USER Use Mirror-ed commands for odd ranks if address
+		 * mirrorring is on
+		 */
+		if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS2_MIRR, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS3_MIRR, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS1_MIRR, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS0_DLL_RESET_MIRR, SDR_WRITE);
+		} else {
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS2, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS3, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS1, SDR_WRITE);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS0_DLL_RESET, SDR_WRITE);
+		}
+		set_jump_as_return();
+		sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0, __RW_MGR_ZQCL,
+			SDR_WRITE);
+
+		/* tZQinit = tDLLK = 512 ck cycles */
+		delay_for_n_mem_clocks(512);
+	}
+}
+
+/*
+ * At the end of calibration we have to program the user settings in, and
+ * USER  hand off the memory to the user.
+ */
+static void rw_mgr_mem_handoff(void)
+{
+	uint32_t r;
+
+	debug("%s:%d\n", __func__, __LINE__);
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
+		if (param->skip_ranks[r])
+			/* request to skip the rank */
+			continue;
+		/* set rank */
+		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
+
+		/* precharge all banks ... */
+		sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+			__RW_MGR_PRECHARGE_ALL, SDR_WRITE);
+
+		/* load up MR settings specified by user */
+
+		/*
+		 * Use Mirror-ed commands for odd ranks if address
+		 * mirrorring is on
+		 */
+		if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS2_MIRR, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS3_MIRR, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS1_MIRR, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS0_USER_MIRR, SDR_WRITE);
+		} else {
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS2, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS3, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS1, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				__RW_MGR_MRS0_USER, SDR_WRITE);
+		}
+		/*
+		 * USER  need to wait tMOD (12CK or 15ns) time before issuing
+		 * other commands, but we will have plenty of NIOS cycles before
+		 * actual handoff so its okay.
+		 */
+	}
+}
+
+/*
+ * performs a guaranteed read on the patterns we are going to use during a
+ * read test to ensure memory works
+ */
+uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn,
+	uint32_t group, uint32_t num_tries, uint32_t *bit_chk,
+	uint32_t all_ranks)
+{
+	uint32_t r, vg;
+	uint32_t correct_mask_vg;
+	uint32_t tmp_bit_chk;
+	uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
+		(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
+
+	*bit_chk = param->read_correct_mask;
+	correct_mask_vg = param->read_correct_mask_vg;
+
+	for (r = rank_bgn; r < rank_end; r++) {
+		if (param->skip_ranks[r])
+			/* request to skip the rank */
+			continue;
+
+		/* set rank */
+		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
+
+		/* Load up a constant bursts of read commands */
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x20, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+			__RW_MGR_GUARANTEED_READ, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0, 0x20, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			__RW_MGR_GUARANTEED_READ_CONT, SDR_WRITE);
+
+		tmp_bit_chk = 0;
+		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
+			/* reset the fifos to get pointers to known state */
+
+			sdr_ops((u32 *)PHY_MGR_CMD_FIFO_RESET, 0, 0, SDR_WRITE);
+			sdr_ops((u32 *)RW_MGR_RESET_READ_DATAPATH, 0, 0, SDR_WRITE);
+
+			tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
+				/ RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
+
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP,
+				      ((group *
+				      RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
+				      vg) << 2), __RW_MGR_GUARANTEED_READ, SDR_WRITE);
+			tmp_bit_chk = tmp_bit_chk | (correct_mask_vg &
+				~(sdr_ops((u32 *)BASE_RW_MGR, 0, 0, SDR_READ)));
+
+			if (vg == 0)
+				break;
+		}
+		*bit_chk &= tmp_bit_chk;
+	}
+
+	sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, (group << 2),
+		__RW_MGR_CLEAR_DQS_ENABLE, SDR_WRITE);
+
+	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+	debug_cond(DLEVEL == 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\
+		   %lu\n", __func__, __LINE__, group, *bit_chk, param->read_correct_mask,
+		   (long unsigned int)(*bit_chk == param->read_correct_mask));
+	return *bit_chk == param->read_correct_mask;
+}
+
+static inline uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks
+	(uint32_t group, uint32_t num_tries, uint32_t *bit_chk)
+{
+	return rw_mgr_mem_calibrate_read_test_patterns(0, group,
+		num_tries, bit_chk, 1);
+}
+
+/* load up the patterns we are going to use during a read test */
+static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn,
+	uint32_t all_ranks)
+{
+	uint32_t r;
+	uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
+		(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
+
+	debug("%s:%d\n", __func__, __LINE__);
+	for (r = rank_bgn; r < rank_end; r++) {
+		if (param->skip_ranks[r])
+			/* request to skip the rank */
+			continue;
+
+		/* set rank */
+		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
+
+		/* Load up a constant bursts */
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x20, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+			__RW_MGR_GUARANTEED_WRITE_WAIT0, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0, 0x20, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			__RW_MGR_GUARANTEED_WRITE_WAIT1, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr2, 0, 0x04, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2, 0,
+			__RW_MGR_GUARANTEED_WRITE_WAIT2, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr3, 0, 0x04, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add3, 0,
+			__RW_MGR_GUARANTEED_WRITE_WAIT3, SDR_WRITE);
+
+		sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+			__RW_MGR_GUARANTEED_WRITE, SDR_WRITE);
+	}
+
+	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+}
+
+/*
+ * try a read and see if it returns correct data back. has dummy reads
+ * inserted into the mix used to align dqs enable. has more thorough checks
+ * than the regular read test.
+ */
+static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group,
+	uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
+	uint32_t all_groups, uint32_t all_ranks)
+{
+	uint32_t r, vg;
+	uint32_t correct_mask_vg;
+	uint32_t tmp_bit_chk;
+	uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
+		(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
+
+	*bit_chk = param->read_correct_mask;
+	correct_mask_vg = param->read_correct_mask_vg;
+
+	uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) &
+		CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION);
+
+	for (r = rank_bgn; r < rank_end; r++) {
+		if (param->skip_ranks[r])
+			/* request to skip the rank */
+			continue;
+
+		/* set rank */
+		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0, 0x10, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			__RW_MGR_READ_B2B_WAIT1, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr2, 0, 0x10, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2, 0,
+			__RW_MGR_READ_B2B_WAIT2, SDR_WRITE);
+
+		if (quick_read_mode)
+			sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x1, SDR_WRITE);
+			/* need at least two (1+1) reads to capture failures */
+		else if (all_groups)
+			sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x06, SDR_WRITE);
+		else
+			sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x32, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+			__RW_MGR_READ_B2B, SDR_WRITE);
+		if (all_groups)
+			sdr_ops(&sdr_rw_load_mgr_regs->load_cntr3, 0,
+				      RW_MGR_MEM_IF_READ_DQS_WIDTH *
+				      RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
+				      SDR_WRITE);
+		else
+			sdr_ops(&sdr_rw_load_mgr_regs->load_cntr3, 0, 0x0, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add3, 0,
+			__RW_MGR_READ_B2B, SDR_WRITE);
+
+		tmp_bit_chk = 0;
+		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
+			/* reset the fifos to get pointers to known state */
+			sdr_ops((u32 *)PHY_MGR_CMD_FIFO_RESET, 0, 0, SDR_WRITE);
+			sdr_ops((u32 *)RW_MGR_RESET_READ_DATAPATH, 0, 0, SDR_WRITE);
+
+			tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
+				/ RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
+
+			sdr_ops((u32 *)(all_groups ? RW_MGR_RUN_ALL_GROUPS :
+				      RW_MGR_RUN_SINGLE_GROUP), ((group *
+				      RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS
+				      + vg) << 2), __RW_MGR_READ_B2B, SDR_WRITE);
+			tmp_bit_chk = tmp_bit_chk | (correct_mask_vg &
+				~(sdr_ops((u32 *)BASE_RW_MGR, 0, 0, SDR_READ)));
+
+			if (vg == 0)
+				break;
+		}
+		*bit_chk &= tmp_bit_chk;
+	}
+
+	sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, (group << 2),
+		__RW_MGR_CLEAR_DQS_ENABLE, SDR_WRITE);
+
+	if (all_correct) {
+		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+		debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\
+			   (%u == %u) => %lu", __func__, __LINE__, group,
+			   all_groups, *bit_chk, param->read_correct_mask,
+			   (long unsigned int)(*bit_chk ==
+			   param->read_correct_mask));
+		return *bit_chk == param->read_correct_mask;
+	} else	{
+		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+		debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\
+			   (%u != %lu) => %lu\n", __func__, __LINE__,
+			   group, all_groups, *bit_chk, (long unsigned int)0,
+			   (long unsigned int)(*bit_chk != 0x00));
+		return *bit_chk != 0x00;
+	}
+}
+
+static inline uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group,
+	uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
+	uint32_t all_groups)
+{
+	return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct,
+					      bit_chk, all_groups, 1);
+}
+
+static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t *v)
+{
+	sdr_ops((u32 *)PHY_MGR_CMD_INC_VFIFO_HARD_PHY, 0, grp, SDR_WRITE);
+
+	(*v)++;
+	BFM_INC_VFIFO;
+}
+
+/*Used in quick cal to properly loop through the duplicated VFIFOs in
+AV QDRII/RLDRAM */
+static inline void rw_mgr_incr_vfifo_all(uint32_t grp, uint32_t *v)
+{
+	rw_mgr_incr_vfifo(grp, v);
+}
+
+static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t *v)
+{
+	uint32_t i;
+
+	for (i = 0; i < VFIFO_SIZE-1; i++)
+		rw_mgr_incr_vfifo(grp, v);
+}
+
+/* find a good dqs enable to use */
+static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
+{
+	uint32_t i, d, v, p;
+	uint32_t max_working_cnt;
+	uint32_t fail_cnt;
+	uint32_t bit_chk;
+	uint32_t dtaps_per_ptap;
+	uint32_t found_begin, found_end;
+	uint32_t work_bgn, work_mid, work_end, tmp_delay;
+	uint32_t test_status;
+	uint32_t found_passing_read, found_failing_read, initial_failing_dtap;
+
+	debug("%s:%d %u\n", __func__, __LINE__, grp);
+	BFM_STAGE("find_dqs_en_phase");
+
+	reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
+
+	scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
+	scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
+
+	fail_cnt = 0;
+
+	/* ************************************************************** */
+	/* * Step 0 : Determine number of delay taps for each phase tap * */
+
+	dtaps_per_ptap = 0;
+	tmp_delay = 0;
+	while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
+		dtaps_per_ptap++;
+		tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+	}
+	dtaps_per_ptap--;
+	tmp_delay = 0;
+
+	/* ********************************************************* */
+	/* * Step 1 : First push vfifo until we get a failing read * */
+	for (v = 0; v < VFIFO_SIZE; ) {
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo %lu\n",
+			   __func__, __LINE__, BFM_GBL_GET(vfifo_idx));
+		test_status = rw_mgr_mem_calibrate_read_test_all_ranks
+			(grp, 1, PASS_ONE_BIT, &bit_chk, 0);
+		if (!test_status) {
+			fail_cnt++;
+
+			if (fail_cnt == 2)
+				break;
+		}
+
+		/* fiddle with FIFO */
+		rw_mgr_incr_vfifo(grp, &v);
+	}
+
+	if (v >= VFIFO_SIZE) {
+		/* no failing read found!! Something must have gone wrong */
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo failed\n",
+			   __func__, __LINE__);
+		return 0;
+	}
+
+	max_working_cnt = 0;
+
+	/* ******************************************************** */
+	/* * step 2: find first working phase, increment in ptaps * */
+	found_begin = 0;
+	work_bgn = 0;
+	for (d = 0; d <= dtaps_per_ptap; d++, tmp_delay +=
+		IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
+		work_bgn = tmp_delay;
+		scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
+
+		for (i = 0; i < VFIFO_SIZE; i++) {
+			for (p = 0; p <= IO_DQS_EN_PHASE_MAX; p++, work_bgn +=
+				IO_DELAY_PER_OPA_TAP) {
+				scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
+
+				test_status =
+				rw_mgr_mem_calibrate_read_test_all_ranks
+				(grp, 1, PASS_ONE_BIT, &bit_chk, 0);
+
+				if (test_status) {
+					max_working_cnt = 1;
+					found_begin = 1;
+					break;
+				}
+			}
+
+			if (found_begin)
+				break;
+
+			if (p > IO_DQS_EN_PHASE_MAX)
+				/* fiddle with FIFO */
+				rw_mgr_incr_vfifo(grp, &v);
+		}
+
+		if (found_begin)
+			break;
+	}
+
+	if (i >= VFIFO_SIZE) {
+		/* cannot find working solution */
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/\
+			   ptap/dtap\n", __func__, __LINE__);
+		return 0;
+	}
+
+	work_end = work_bgn;
+
+	/*  If d is 0 then the working window covers a phase tap and
+	we can follow the old procedure otherwise, we've found the beginning,
+	and we need to increment the dtaps until we find the end */
+	if (d == 0) {
+		/* ********************************************************* */
+		/* * step 3a: if we have room, back off by one and
+		increment in dtaps * */
+		COV(EN_PHASE_PTAP_OVERLAP);
+
+		/* Special case code for backing up a phase */
+		if (p == 0) {
+			p = IO_DQS_EN_PHASE_MAX ;
+			rw_mgr_decr_vfifo(grp, &v);
+		} else {
+			p = p - 1;
+		}
+		tmp_delay = work_bgn - IO_DELAY_PER_OPA_TAP;
+		scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
+
+		found_begin = 0;
+		for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < work_bgn;
+			d++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
+			scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
+
+			if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
+								     PASS_ONE_BIT,
+								     &bit_chk, 0)) {
+				found_begin = 1;
+				work_bgn = tmp_delay;
+				break;
+			}
+		}
+
+		/* We have found a working dtap before the ptap found above */
+		if (found_begin == 1)
+			max_working_cnt++;
+
+		/* Restore VFIFO to old state before we decremented it
+		(if needed) */
+		p = p + 1;
+		if (p > IO_DQS_EN_PHASE_MAX) {
+			p = 0;
+			rw_mgr_incr_vfifo(grp, &v);
+		}
+
+		scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
+
+		/* ********************************************************* */
+		/* * step 4a: go forward from working phase to non working
+		phase, increment in ptaps * */
+		p = p + 1;
+		work_end += IO_DELAY_PER_OPA_TAP;
+		if (p > IO_DQS_EN_PHASE_MAX) {
+			/* fiddle with FIFO */
+			p = 0;
+			rw_mgr_incr_vfifo(grp, &v);
+		}
+
+		found_end = 0;
+		for (; i < VFIFO_SIZE + 1; i++) {
+			for (; p <= IO_DQS_EN_PHASE_MAX; p++, work_end
+				+= IO_DELAY_PER_OPA_TAP) {
+				scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
+
+				if (!rw_mgr_mem_calibrate_read_test_all_ranks
+					(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) {
+					found_end = 1;
+					break;
+				} else {
+					max_working_cnt++;
+				}
+			}
+
+			if (found_end)
+				break;
+
+			if (p > IO_DQS_EN_PHASE_MAX) {
+				/* fiddle with FIFO */
+				rw_mgr_incr_vfifo(grp, &v);
+				p = 0;
+			}
+		}
+
+		if (i >= VFIFO_SIZE + 1) {
+			/* cannot see edge of failing read */
+			debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end:\
+				   failed\n", __func__, __LINE__);
+			return 0;
+		}
+
+		/* ********************************************************* */
+		/* * step 5a:  back off one from last, increment in dtaps  * */
+
+		/* Special case code for backing up a phase */
+		if (p == 0) {
+			p = IO_DQS_EN_PHASE_MAX;
+			rw_mgr_decr_vfifo(grp, &v);
+		} else {
+			p = p - 1;
+		}
+
+		work_end -= IO_DELAY_PER_OPA_TAP;
+		scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
+
+		/* * The actual increment of dtaps is done outside of
+		the if/else loop to share code */
+		d = 0;
+
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p: \
+			   vfifo=%lu ptap=%u\n", __func__, __LINE__,
+			   BFM_GBL_GET(vfifo_idx), p);
+	} else {
+		/* ******************************************************* */
+		/* * step 3-5b:  Find the right edge of the window using
+		delay taps   * */
+		COV(EN_PHASE_PTAP_NO_OVERLAP);
+
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase:vfifo=%lu \
+			   ptap=%u dtap=%u bgn=%u\n", __func__, __LINE__,
+			   BFM_GBL_GET(vfifo_idx), p, d, work_bgn);
+		BFM_GBL_SET(dqs_enable_left_edge[grp].v,
+			    BFM_GBL_GET(vfifo_idx));
+		BFM_GBL_SET(dqs_enable_left_edge[grp].p, p);
+		BFM_GBL_SET(dqs_enable_left_edge[grp].d, d);
+		BFM_GBL_SET(dqs_enable_left_edge[grp].ps, work_bgn);
+
+		work_end = work_bgn;
+
+		/* * The actual increment of dtaps is done outside of the
+		if/else loop to share code */
+
+		/* Only here to counterbalance a subtract later on which is
+		not needed if this branch of the algorithm is taken */
+		max_working_cnt++;
+	}
+
+	/* The dtap increment to find the failing edge is done here */
+	for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end +=
+		IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
+			debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
+				   end-2: dtap=%u\n", __func__, __LINE__, d);
+			scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
+
+			if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
+								      PASS_ONE_BIT,
+								      &bit_chk, 0)) {
+				break;
+			}
+		}
+
+	/* Go back to working dtap */
+	if (d != 0)
+		work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+
+	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p/d: vfifo=%lu \
+		   ptap=%u dtap=%u end=%u\n", __func__, __LINE__,
+		   BFM_GBL_GET(vfifo_idx), p, d-1, work_end);
+	BFM_GBL_SET(dqs_enable_right_edge[grp].v, BFM_GBL_GET(vfifo_idx));
+	BFM_GBL_SET(dqs_enable_right_edge[grp].p, p);
+	BFM_GBL_SET(dqs_enable_right_edge[grp].d, d-1);
+	BFM_GBL_SET(dqs_enable_right_edge[grp].ps, work_end);
+
+	if (work_end >= work_bgn) {
+		/* we have a working range */
+	} else {
+		/* nil range */
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end-2: \
+			   failed\n", __func__, __LINE__);
+		return 0;
+	}
+
+	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n",
+		   __func__, __LINE__, work_bgn, work_end);
+
+	/* *************************************************************** */
+	/*
+	 * * We need to calculate the number of dtaps that equal a ptap
+	 * * To do that we'll back up a ptap and re-find the edge of the
+	 * * window using dtaps
+	 */
+
+	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \
+		   for tracking\n", __func__, __LINE__);
+
+	/* Special case code for backing up a phase */
+	if (p == 0) {
+		p = IO_DQS_EN_PHASE_MAX;
+		rw_mgr_decr_vfifo(grp, &v);
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
+			   cycle/phase: v=%lu p=%u\n", __func__, __LINE__,
+			   BFM_GBL_GET(vfifo_idx), p);
+	} else {
+		p = p - 1;
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
+			   phase only: v=%lu p=%u", __func__, __LINE__,
+			   BFM_GBL_GET(vfifo_idx), p);
+	}
+
+	scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
+
+	/*
+	 * Increase dtap until we first see a passing read (in case the
+	 * window is smaller than a ptap),
+	 * and then a failing read to mark the edge of the window again
+	 */
+
+	/* Find a passing read */
+	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find passing read\n",
+		   __func__, __LINE__);
+	found_passing_read = 0;
+	found_failing_read = 0;
+	initial_failing_dtap = d;
+	for (; d <= IO_DQS_EN_DELAY_MAX; d++) {
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: testing \
+			   read d=%u\n", __func__, __LINE__, d);
+		scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
+
+		if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
+							     PASS_ONE_BIT,
+							     &bit_chk, 0)) {
+			found_passing_read = 1;
+			break;
+		}
+	}
+
+	if (found_passing_read) {
+		/* Find a failing read */
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find failing \
+			   read\n", __func__, __LINE__);
+		for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) {
+			debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
+				   testing read d=%u\n", __func__, __LINE__, d);
+			scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
+
+			if (!rw_mgr_mem_calibrate_read_test_all_ranks
+				(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) {
+				found_failing_read = 1;
+				break;
+			}
+		}
+	} else {
+		debug_cond(DLEVEL == 1, "%s:%d find_dqs_en_phase: failed to \
+			   calculate dtaps", __func__, __LINE__);
+		debug_cond(DLEVEL == 1, "per ptap. Fall back on static value\n");
+	}
+
+	/*
+	 * The dynamically calculated dtaps_per_ptap is only valid if we
+	 * found a passing/failing read. If we didn't, it means d hit the max
+	 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
+	 * statically calculated value.
+	 */
+	if (found_passing_read && found_failing_read)
+		dtaps_per_ptap = d - initial_failing_dtap;
+
+	sdr_ops(&sdr_reg_file->dtaps_per_ptap, 0, dtaps_per_ptap, SDR_WRITE);
+	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \
+		   - %u = %u",  __func__, __LINE__, d,
+		   initial_failing_dtap, dtaps_per_ptap);
+
+	/* ******************************************** */
+	/* * step 6:  Find the centre of the window   * */
+
+	work_mid = (work_bgn + work_end) / 2;
+	tmp_delay = 0;
+
+	debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
+		   work_bgn, work_end, work_mid);
+	/* Get the middle delay to be less than a VFIFO delay */
+	for (p = 0; p <= IO_DQS_EN_PHASE_MAX;
+		p++, tmp_delay += IO_DELAY_PER_OPA_TAP)
+		;
+	debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
+	while (work_mid > tmp_delay)
+		work_mid -= tmp_delay;
+	debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid);
+	tmp_delay = 0;
+	for (p = 0; p <= IO_DQS_EN_PHASE_MAX && tmp_delay < work_mid;
+		p++, tmp_delay += IO_DELAY_PER_OPA_TAP)
+		;
+	tmp_delay -= IO_DELAY_PER_OPA_TAP;
+	debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p-1, tmp_delay);
+	for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < work_mid; d++,
+		tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP)
+		;
+	debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
+
+	scc_mgr_set_dqs_en_phase_all_ranks(grp, p-1);
+	scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
+
+	/*
+	 * push vfifo until we can successfully calibrate. We can do this
+	 * because the largest possible margin in 1 VFIFO cycle.
+	 */
+	for (i = 0; i < VFIFO_SIZE; i++) {
+		debug_cond(DLEVEL == 2, "find_dqs_en_phase: center: vfifo=%lu\n",
+		       BFM_GBL_GET(vfifo_idx));
+		if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
+							     PASS_ONE_BIT,
+							     &bit_chk, 0)) {
+			break;
+		}
+
+		/* fiddle with FIFO */
+		rw_mgr_incr_vfifo(grp, &v);
+	}
+
+	if (i >= VFIFO_SIZE) {
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center: \
+			   failed\n", __func__, __LINE__);
+		return 0;
+	}
+	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center found: \
+		   vfifo=%lu ptap=%u dtap=%u\n", __func__, __LINE__,
+		   BFM_GBL_GET(vfifo_idx), p-1, d);
+	BFM_GBL_SET(dqs_enable_mid[grp].v, BFM_GBL_GET(vfifo_idx));
+	BFM_GBL_SET(dqs_enable_mid[grp].p, p-1);
+	BFM_GBL_SET(dqs_enable_mid[grp].d, d);
+	BFM_GBL_SET(dqs_enable_mid[grp].ps, work_mid);
+	return 1;
+}
+
+/* Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
+dq_in_delay values */
+static inline uint32_t
+rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
+(uint32_t write_group, uint32_t read_group, uint32_t test_bgn)
+{
+	uint32_t found;
+	uint32_t i;
+	uint32_t p;
+	uint32_t d;
+	uint32_t r;
+
+	const uint32_t delay_step = IO_IO_IN_DELAY_MAX /
+		(RW_MGR_MEM_DQ_PER_READ_DQS-1);
+		/* we start at zero, so have one less dq to devide among */
+
+	debug("%s:%d (%u,%u,%u)", __func__, __LINE__, write_group, read_group,
+	      test_bgn);
+
+	/* try different dq_in_delays since the dq path is shorter than dqs */
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+		r += NUM_RANKS_PER_SHADOW_REG) {
+		for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS;
+			i++, p++, d += delay_step) {
+			debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_\
+				   vfifo_find_dqs_", __func__, __LINE__);
+			debug_cond(DLEVEL == 1, "en_phase_sweep_dq_in_delay: g=%u/%u ",
+			       write_group, read_group);
+			debug_cond(DLEVEL == 1, "r=%u, i=%u p=%u d=%u\n", r, i , p, d);
+			scc_mgr_set_dq_in_delay(write_group, p, d);
+			scc_mgr_load_dq(p);
+		}
+		sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+	}
+
+	found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group);
+
+	debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\
+		   en_phase_sweep_dq", __func__, __LINE__);
+	debug_cond(DLEVEL == 1, "_in_delay: g=%u/%u found=%u; Reseting delay \
+		   chain to zero\n", write_group, read_group, found);
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+		r += NUM_RANKS_PER_SHADOW_REG) {
+		for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS;
+			i++, p++) {
+			scc_mgr_set_dq_in_delay(write_group, p, 0);
+			scc_mgr_load_dq(p);
+		}
+		sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+	}
+
+	return found;
+}
+
+/* per-bit deskew DQ and center */
+static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn,
+	uint32_t write_group, uint32_t read_group, uint32_t test_bgn,
+	uint32_t use_read_test, uint32_t update_fom)
+{
+	uint32_t i, p, d, min_index;
+	/* Store these as signed since there are comparisons with
+	signed numbers */
+	uint32_t bit_chk;
+	uint32_t sticky_bit_chk;
+	int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
+	int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
+	int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS];
+	int32_t mid;
+	int32_t orig_mid_min, mid_min;
+	int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs,
+		final_dqs_en;
+	int32_t dq_margin, dqs_margin;
+	uint32_t stop;
+	uint32_t temp_dq_in_delay1, temp_dq_in_delay2;
+
+	debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
+
+	start_dqs = sdr_ops((u32 *)SCC_MGR_DQS_IN_DELAY, read_group << 2, 0,
+			    SDR_READ);
+	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
+		start_dqs_en = sdr_ops((u32 *)SCC_MGR_DQS_EN_DELAY,
+				       (read_group << 2) - IO_DQS_EN_DELAY_OFFSET,
+				       0, SDR_READ);
+
+	/* per-bit deskew */
+
+	/* set the left and right edge of each bit to an illegal value */
+	/* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
+	sticky_bit_chk = 0;
+	for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
+		left_edge[i]  = IO_IO_IN_DELAY_MAX + 1;
+		right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
+	}
+
+	/* Search for the left edge of the window for each bit */
+	for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) {
+		scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d);
+
+		sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+
+		/*
+		 * Stop searching when the read test doesn't pass AND when
+		 * we've seen a passing read on every bit.
+		 */
+		if (use_read_test) {
+			stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
+				read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
+				&bit_chk, 0, 0);
+		} else {
+			rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
+							0, PASS_ONE_BIT,
+							&bit_chk, 0);
+			bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
+				(read_group - (write_group *
+					RW_MGR_MEM_IF_READ_DQS_WIDTH /
+					RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
+			stop = (bit_chk == 0);
+		}
+		sticky_bit_chk = sticky_bit_chk | bit_chk;
+		stop = stop && (sticky_bit_chk == param->read_correct_mask);
+		debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => " BTFLD_FMT " == "
+			   BTFLD_FMT " && %u", __func__, __LINE__, d,
+			   sticky_bit_chk,
+			param->read_correct_mask, stop);
+
+		if (stop == 1) {
+			break;
+		} else {
+			for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
+				if (bit_chk & 1) {
+					/* Remember a passing test as the
+					left_edge */
+					left_edge[i] = d;
+				} else {
+					/* If a left edge has not been seen yet,
+					then a future passing test will mark
+					this edge as the right edge */
+					if (left_edge[i] ==
+						IO_IO_IN_DELAY_MAX + 1) {
+						right_edge[i] = -(d + 1);
+					}
+				}
+				bit_chk = bit_chk >> 1;
+			}
+		}
+	}
+
+	/* Reset DQ delay chains to 0 */
+	scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, 0);
+	sticky_bit_chk = 0;
+	for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) {
+		debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
+			   %d right_edge[%u]: %d\n", __func__, __LINE__,
+			   i, left_edge[i], i, right_edge[i]);
+
+		/*
+		 * Check for cases where we haven't found the left edge,
+		 * which makes our assignment of the the right edge invalid.
+		 * Reset it to the illegal value.
+		 */
+		if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && (
+			right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
+			right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
+			debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \
+				   right_edge[%u]: %d\n", __func__, __LINE__,
+				   i, right_edge[i]);
+		}
+
+		/*
+		 * Reset sticky bit (except for bits where we have seen
+		 * both the left and right edge).
+		 */
+		sticky_bit_chk = sticky_bit_chk << 1;
+		if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) &&
+		    (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
+			sticky_bit_chk = sticky_bit_chk | 1;
+		}
+
+		if (i == 0)
+			break;
+	}
+
+	/* Search for the right edge of the window for each bit */
+	for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) {
+		scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
+		if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
+			uint32_t delay = d + start_dqs_en;
+			if (delay > IO_DQS_EN_DELAY_MAX)
+				delay = IO_DQS_EN_DELAY_MAX;
+			scc_mgr_set_dqs_en_delay(read_group, delay);
+		}
+		scc_mgr_load_dqs(read_group);
+
+		sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+
+		/*
+		 * Stop searching when the read test doesn't pass AND when
+		 * we've seen a passing read on every bit.
+		 */
+		if (use_read_test) {
+			stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
+				read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
+				&bit_chk, 0, 0);
+		} else {
+			rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
+							0, PASS_ONE_BIT,
+							&bit_chk, 0);
+			bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
+				(read_group - (write_group *
+					RW_MGR_MEM_IF_READ_DQS_WIDTH /
+					RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
+			stop = (bit_chk == 0);
+		}
+		sticky_bit_chk = sticky_bit_chk | bit_chk;
+		stop = stop && (sticky_bit_chk == param->read_correct_mask);
+
+		debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => " BTFLD_FMT " == "
+			   BTFLD_FMT " && %u", __func__, __LINE__, d,
+			   sticky_bit_chk, param->read_correct_mask, stop);
+
+		if (stop == 1) {
+			break;
+		} else {
+			for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
+				if (bit_chk & 1) {
+					/* Remember a passing test as
+					the right_edge */
+					right_edge[i] = d;
+				} else {
+					if (d != 0) {
+						/* If a right edge has not been
+						seen yet, then a future passing
+						test will mark this edge as the
+						left edge */
+						if (right_edge[i] ==
+						IO_IO_IN_DELAY_MAX + 1) {
+							left_edge[i] = -(d + 1);
+						}
+					} else {
+						/* d = 0 failed, but it passed
+						when testing the left edge,
+						so it must be marginal,
+						set it to -1 */
+						if (right_edge[i] ==
+							IO_IO_IN_DELAY_MAX + 1 &&
+							left_edge[i] !=
+							IO_IO_IN_DELAY_MAX
+							+ 1) {
+							right_edge[i] = -1;
+						}
+						/* If a right edge has not been
+						seen yet, then a future passing
+						test will mark this edge as the
+						left edge */
+						else if (right_edge[i] ==
+							IO_IO_IN_DELAY_MAX +
+							1) {
+							left_edge[i] = -(d + 1);
+						}
+					}
+				}
+
+				debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\
+					   d=%u]: ", __func__, __LINE__, d);
+				debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ",
+					   (int)(bit_chk & 1), i, left_edge[i]);
+				debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
+					   right_edge[i]);
+				bit_chk = bit_chk >> 1;
+			}
+		}
+	}
+
+	/* Check that all bits have a window */
+	for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
+		debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
+			   %d right_edge[%u]: %d", __func__, __LINE__,
+			   i, left_edge[i], i, right_edge[i]);
+		BFM_GBL_SET(dq_read_left_edge[read_group][i], left_edge[i]);
+		BFM_GBL_SET(dq_read_right_edge[read_group][i], right_edge[i]);
+		if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i]
+			== IO_IO_IN_DELAY_MAX + 1)) {
+			/*
+			 * Restore delay chain settings before letting the loop
+			 * in rw_mgr_mem_calibrate_vfifo to retry different
+			 * dqs/ck relationships.
+			 */
+			scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs);
+			if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
+				scc_mgr_set_dqs_en_delay(read_group,
+							 start_dqs_en);
+			}
+			scc_mgr_load_dqs(read_group);
+			sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+
+			debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \
+				   find edge [%u]: %d %d", __func__, __LINE__,
+				   i, left_edge[i], right_edge[i]);
+			if (use_read_test) {
+				set_failing_group_stage(read_group *
+					RW_MGR_MEM_DQ_PER_READ_DQS + i,
+					CAL_STAGE_VFIFO,
+					CAL_SUBSTAGE_VFIFO_CENTER);
+			} else {
+				set_failing_group_stage(read_group *
+					RW_MGR_MEM_DQ_PER_READ_DQS + i,
+					CAL_STAGE_VFIFO_AFTER_WRITES,
+					CAL_SUBSTAGE_VFIFO_CENTER);
+			}
+			return 0;
+		}
+	}
+
+	/* Find middle of window for each DQ bit */
+	mid_min = left_edge[0] - right_edge[0];
+	min_index = 0;
+	for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
+		mid = left_edge[i] - right_edge[i];
+		if (mid < mid_min) {
+			mid_min = mid;
+			min_index = i;
+		}
+	}
+
+	/*
+	 * -mid_min/2 represents the amount that we need to move DQS.
+	 * If mid_min is odd and positive we'll need to add one to
+	 * make sure the rounding in further calculations is correct
+	 * (always bias to the right), so just add 1 for all positive values.
+	 */
+	if (mid_min > 0)
+		mid_min++;
+
+	mid_min = mid_min / 2;
+
+	debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n",
+		   __func__, __LINE__, mid_min, min_index);
+
+	/* Determine the amount we can change DQS (which is -mid_min) */
+	orig_mid_min = mid_min;
+	new_dqs = start_dqs - mid_min;
+	if (new_dqs > IO_DQS_IN_DELAY_MAX)
+		new_dqs = IO_DQS_IN_DELAY_MAX;
+	else if (new_dqs < 0)
+		new_dqs = 0;
+
+	mid_min = start_dqs - new_dqs;
+	debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
+		   mid_min, new_dqs);
+
+	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
+		if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
+			mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
+		else if (start_dqs_en - mid_min < 0)
+			mid_min += start_dqs_en - mid_min;
+	}
+	new_dqs = start_dqs - mid_min;
+
+	debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \
+		   new_dqs=%d mid_min=%d\n", start_dqs,
+		   IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
+		   new_dqs, mid_min);
+
+	/* Initialize data for export structures */
+	dqs_margin = IO_IO_IN_DELAY_MAX + 1;
+	dq_margin  = IO_IO_IN_DELAY_MAX + 1;
+
+	/* add delay to bring centre of all DQ windows to the same "level" */
+	for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
+		/* Use values before divide by 2 to reduce round off error */
+		shift_dq = (left_edge[i] - right_edge[i] -
+			(left_edge[min_index] - right_edge[min_index]))/2  +
+			(orig_mid_min - mid_min);
+
+		debug_cond(DLEVEL == 2, "vfifo_center: before: \
+			   shift_dq[%u]=%d\n", i, shift_dq);
+
+		temp_dq_in_delay1 = sdr_ops((u32 *)SCC_MGR_IO_IN_DELAY, p << 2,
+					    0, SDR_READ);
+		temp_dq_in_delay2 = sdr_ops((u32 *)SCC_MGR_IO_IN_DELAY, i << 2,
+					    0, SDR_READ);
+		if (shift_dq + (int32_t)temp_dq_in_delay1 >
+			(int32_t)IO_IO_IN_DELAY_MAX) {
+			shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2;
+		} else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) {
+			shift_dq = -(int32_t)temp_dq_in_delay1;
+		}
+		debug_cond(DLEVEL == 2, "vfifo_center: after: \
+			   shift_dq[%u]=%d\n", i, shift_dq);
+		final_dq[i] = temp_dq_in_delay1 + shift_dq;
+		scc_mgr_set_dq_in_delay(write_group, p, final_dq[i]);
+		scc_mgr_load_dq(p);
+
+		debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i,
+			   left_edge[i] - shift_dq + (-mid_min),
+			   right_edge[i] + shift_dq - (-mid_min));
+		/* To determine values for export structures */
+		if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
+			dq_margin = left_edge[i] - shift_dq + (-mid_min);
+
+		if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
+			dqs_margin = right_edge[i] + shift_dq - (-mid_min);
+	}
+
+	final_dqs = new_dqs;
+	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
+		final_dqs_en = start_dqs_en - mid_min;
+
+	/* Move DQS-en */
+	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
+		scc_mgr_set_dqs_en_delay(read_group, final_dqs_en);
+		scc_mgr_load_dqs(read_group);
+	}
+
+	/* Move DQS */
+	scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs);
+	scc_mgr_load_dqs(read_group);
+	debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \
+		   dqs_margin=%d", __func__, __LINE__,
+		   dq_margin, dqs_margin);
+
+	/* Do not remove this line as it makes sure all of our decisions
+	have been applied */
+	sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+	return (dq_margin >= 0) && (dqs_margin >= 0);
+}
+
+/*
+ * calibrate the read valid prediction FIFO.
+ *
+ *  - read valid prediction will consist of finding a good DQS enable phase,
+ * DQS enable delay, DQS input phase, and DQS input delay.
+ *  - we also do a per-bit deskew on the DQ lines.
+ */
+static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group,
+					   uint32_t test_bgn)
+{
+	uint32_t p, d, rank_bgn, sr;
+	uint32_t dtaps_per_ptap;
+	uint32_t tmp_delay;
+	uint32_t bit_chk;
+	uint32_t grp_calibrated;
+	uint32_t write_group, write_test_bgn;
+	uint32_t failed_substage;
+
+	debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
+
+	/* update info for sims */
+	reg_file_set_stage(CAL_STAGE_VFIFO);
+
+	write_group = read_group;
+	write_test_bgn = test_bgn;
+
+	/* USER Determine number of delay taps for each phase tap */
+	dtaps_per_ptap = 0;
+	tmp_delay = 0;
+	while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
+		dtaps_per_ptap++;
+		tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+	}
+	dtaps_per_ptap--;
+	tmp_delay = 0;
+
+	/* update info for sims */
+	reg_file_set_group(read_group);
+
+	grp_calibrated = 0;
+
+	reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
+	failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
+
+	for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) {
+		/*
+		 * In RLDRAMX we may be messing the delay of pins in
+		 * the same write group but outside of the current read
+		 * the group, but that's ok because we haven't
+		 * calibrated output side yet.
+		 */
+		if (d > 0) {
+			scc_mgr_apply_group_all_out_delay_add_all_ranks
+			(write_group, write_test_bgn, d);
+		}
+
+		for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0;
+			p++) {
+			/* set a particular dqdqs phase */
+			scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p);
+
+			debug_cond(DLEVEL == 1, "%s:%d calibrate_vfifo: g=%u \
+				   p=%u d=%u\n", __func__, __LINE__,
+				   read_group, p, d);
+			BFM_GBL_SET(gwrite_pos[read_group].p, p);
+			BFM_GBL_SET(gwrite_pos[read_group].d, d);
+
+			/*
+			 * Load up the patterns used by read calibration
+			 * using current DQDQS phase.
+			 */
+			rw_mgr_mem_calibrate_read_load_patterns(0, 1);
+			if (!(gbl->phy_debug_mode_flags &
+				PHY_DEBUG_DISABLE_GUARANTEED_READ)) {
+				if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks
+				    (read_group, 1, &bit_chk)) {
+					debug_cond(DLEVEL == 1, "%s:%d Guaranteed read test failed:",
+						   __func__, __LINE__);
+					debug_cond(DLEVEL == 1, " g=%u p=%u d=%u\n",
+						   read_group, p, d);
+					break;
+				}
+			}
+
+/* case:56390 */
+			grp_calibrated = 1;
+	if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
+		(write_group, read_group, test_bgn)) {
+				/*
+				 * USER Read per-bit deskew can be done on a
+				 * per shadow register basis.
+				 */
+				for (rank_bgn = 0, sr = 0;
+					rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
+					rank_bgn += NUM_RANKS_PER_SHADOW_REG,
+					++sr) {
+					/*
+					 * Determine if this set of ranks
+					 * should be skipped entirely.
+					 */
+					if (!param->skip_shadow_regs[sr]) {
+						/*
+						 * If doing read after write
+						 * calibration, do not update
+						 * FOM, now - do it then.
+						 */
+					if (!rw_mgr_mem_calibrate_vfifo_center
+						(rank_bgn, write_group,
+						read_group, test_bgn, 1, 0)) {
+							grp_calibrated = 0;
+							failed_substage =
+						CAL_SUBSTAGE_VFIFO_CENTER;
+						}
+					}
+				}
+			} else {
+				grp_calibrated = 0;
+				failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
+			}
+		}
+	}
+
+	if (grp_calibrated == 0) {
+		set_failing_group_stage(write_group, CAL_STAGE_VFIFO,
+					failed_substage);
+		return 0;
+	}
+
+	/*
+	 * Reset the delay chains back to zero if they have moved > 1
+	 * (check for > 1 because loop will increase d even when pass in
+	 * first case).
+	 */
+	if (d > 2)
+		scc_mgr_zero_group(write_group, write_test_bgn, 1);
+
+	return 1;
+}
+
+/* VFIFO Calibration -- Read Deskew Calibration after write deskew */
+static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group,
+					       uint32_t test_bgn)
+{
+	uint32_t rank_bgn, sr;
+	uint32_t grp_calibrated;
+	uint32_t write_group;
+
+	debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn);
+
+	/* update info for sims */
+
+	reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
+	reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
+
+	write_group = read_group;
+
+	/* update info for sims */
+	reg_file_set_group(read_group);
+
+	grp_calibrated = 1;
+	/* Read per-bit deskew can be done on a per shadow register basis */
+	for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
+		rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) {
+		/* Determine if this set of ranks should be skipped entirely */
+		if (!param->skip_shadow_regs[sr]) {
+		/* This is the last calibration round, update FOM here */
+			if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
+								write_group,
+								read_group,
+								test_bgn, 0,
+								1)) {
+				grp_calibrated = 0;
+			}
+		}
+	}
+
+
+	if (grp_calibrated == 0) {
+		set_failing_group_stage(write_group,
+					CAL_STAGE_VFIFO_AFTER_WRITES,
+					CAL_SUBSTAGE_VFIFO_CENTER);
+		return 0;
+	}
+
+	return 1;
+}
+
+/* Calibrate LFIFO to find smallest read latency */
+static uint32_t rw_mgr_mem_calibrate_lfifo(void)
+{
+	uint32_t found_one;
+	uint32_t bit_chk;
+
+	debug("%s:%d\n", __func__, __LINE__);
+	BFM_STAGE("lfifo");
+
+	/* update info for sims */
+	reg_file_set_stage(CAL_STAGE_LFIFO);
+	reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
+
+	/* Load up the patterns used by read calibration for all ranks */
+	rw_mgr_mem_calibrate_read_load_patterns(0, 1);
+	found_one = 0;
+
+	do {
+		sdr_ops((u32 *)PHY_MGR_PHY_RLAT, 0, gbl->curr_read_lat, SDR_WRITE);
+		debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
+			   __func__, __LINE__, gbl->curr_read_lat);
+
+		if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
+							      NUM_READ_TESTS,
+							      PASS_ALL_BITS,
+							      &bit_chk, 1)) {
+			break;
+		}
+
+		found_one = 1;
+		/* reduce read latency and see if things are working */
+		/* correctly */
+		gbl->curr_read_lat--;
+	} while (gbl->curr_read_lat > 0);
+
+	/* reset the fifos to get pointers to known state */
+
+	sdr_ops((u32 *)PHY_MGR_CMD_FIFO_RESET, 0, 0, SDR_WRITE);
+
+	if (found_one) {
+		/* add a fudge factor to the read latency that was determined */
+		gbl->curr_read_lat += 2;
+		sdr_ops((u32 *)PHY_MGR_PHY_RLAT, 0, gbl->curr_read_lat, SDR_WRITE);
+		debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \
+			   read_lat=%u\n", __func__, __LINE__,
+			   gbl->curr_read_lat);
+		return 1;
+	} else {
+		set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
+					CAL_SUBSTAGE_READ_LATENCY);
+
+		debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \
+			   read_lat=%u\n", __func__, __LINE__,
+			   gbl->curr_read_lat);
+		return 0;
+	}
+}
+
+/*
+ * issue write test command.
+ * two variants are provided. one that just tests a write pattern and
+ * another that tests datamask functionality.
+ */
+
+static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
+						  uint32_t test_dm)
+{
+	uint32_t mcc_instruction;
+	uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
+		ENABLE_SUPER_QUICK_CALIBRATION);
+	uint32_t rw_wl_nop_cycles;
+
+	/*
+	 * Set counter and jump addresses for the right
+	 * number of NOP cycles.
+	 * The number of supported NOP cycles can range from -1 to infinity
+	 * Three different cases are handled:
+	 *
+	 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
+	 *    mechanism will be used to insert the right number of NOPs
+	 *
+	 * 2. For a number of NOP cycles equals to 0, the micro-instruction
+	 *    issuing the write command will jump straight to the
+	 *    micro-instruction that turns on DQS (for DDRx), or outputs write
+	 *    data (for RLD), skipping
+	 *    the NOP micro-instruction all together
+	 *
+	 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
+	 *    turned on in the same micro-instruction that issues the write
+	 *    command. Then we need
+	 *    to directly jump to the micro-instruction that sends out the data
+	 *
+	 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
+	 *       (2 and 3). One jump-counter (0) is used to perform multiple
+	 *       write-read operations.
+	 *       one counter left to issue this command in "multiple-group" mode
+	 */
+
+	rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
+
+	if (rw_wl_nop_cycles == -1) {
+		/*
+		 * CNTR 2 - We want to execute the special write operation that
+		 * turns on DQS right away and then skip directly to the
+		 * instruction that sends out the data. We set the counter to a
+		 * large number so that the jump is always taken.
+		 */
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr2, 0, 0xFF, SDR_WRITE);
+
+		/* CNTR 3 - Not used */
+		if (test_dm) {
+			mcc_instruction = __RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2,
+				0, __RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA, SDR_WRITE);
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add3,
+				0, __RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP, SDR_WRITE);
+		} else {
+			mcc_instruction = __RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2,
+				0, __RW_MGR_LFSR_WR_RD_BANK_0_DATA, SDR_WRITE);
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add3,
+				0, __RW_MGR_LFSR_WR_RD_BANK_0_NOP, SDR_WRITE);
+		}
+	} else if (rw_wl_nop_cycles == 0) {
+		/*
+		 * CNTR 2 - We want to skip the NOP operation and go straight
+		 * to the DQS enable instruction. We set the counter to a large
+		 * number so that the jump is always taken.
+		 */
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr2, 0, 0xFF, SDR_WRITE);
+
+		/* CNTR 3 - Not used */
+		if (test_dm) {
+			mcc_instruction = __RW_MGR_LFSR_WR_RD_DM_BANK_0;
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2,
+				0, __RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS, SDR_WRITE);
+		} else {
+			mcc_instruction = __RW_MGR_LFSR_WR_RD_BANK_0;
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2,
+				0, __RW_MGR_LFSR_WR_RD_BANK_0_DQS, SDR_WRITE);
+		}
+	} else {
+		/* CNTR 2 - In this case we want to execute the next instruction
+		and NOT take the jump. So we set the counter to 0. The jump
+		address doesn't count */
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr2, 0, 0x0, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2, 0,
+			0x0, SDR_WRITE);
+
+		/* CNTR 3 - Set the nop counter to the number of cycles we
+		need to loop for, minus 1 */
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr3, 0,
+			rw_wl_nop_cycles - 1, SDR_WRITE);
+		if (test_dm) {
+			mcc_instruction = __RW_MGR_LFSR_WR_RD_DM_BANK_0;
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add3,
+				0, __RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP, SDR_WRITE);
+		} else {
+			mcc_instruction = __RW_MGR_LFSR_WR_RD_BANK_0;
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add3,
+				0, __RW_MGR_LFSR_WR_RD_BANK_0_NOP, SDR_WRITE);
+		}
+	}
+
+	sdr_ops((u32 *)RW_MGR_RESET_READ_DATAPATH, 0, 0, SDR_WRITE);
+
+	if (quick_write_mode)
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x08, SDR_WRITE);
+	else
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x40, SDR_WRITE);
+
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+		mcc_instruction, SDR_WRITE);
+
+	/*
+	 * CNTR 1 - This is used to ensure enough time elapses
+	 * for read data to come back.
+	 */
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0, 0x30, SDR_WRITE);
+
+	if (test_dm) {
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			__RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT, SDR_WRITE);
+	} else {
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			__RW_MGR_LFSR_WR_RD_BANK_0_WAIT, SDR_WRITE);
+	}
+
+	sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, (group << 2), mcc_instruction,
+		SDR_WRITE);
+}
+
+/* Test writes, can check for a single bit pass or multiple bit pass */
+static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
+	uint32_t write_group, uint32_t use_dm, uint32_t all_correct,
+	uint32_t *bit_chk, uint32_t all_ranks)
+{
+	uint32_t r;
+	uint32_t correct_mask_vg;
+	uint32_t tmp_bit_chk;
+	uint32_t vg;
+	uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
+		(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
+
+	*bit_chk = param->write_correct_mask;
+	correct_mask_vg = param->write_correct_mask_vg;
+
+	for (r = rank_bgn; r < rank_end; r++) {
+		if (param->skip_ranks[r]) {
+			/* request to skip the rank */
+
+			continue;
+		}
+
+		/* set rank */
+		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
+
+		tmp_bit_chk = 0;
+		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) {
+			/* reset the fifos to get pointers to known state */
+			sdr_ops((u32 *)PHY_MGR_CMD_FIFO_RESET, 0, 0, SDR_WRITE);
+
+			tmp_bit_chk = tmp_bit_chk <<
+				(RW_MGR_MEM_DQ_PER_WRITE_DQS /
+				RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
+			rw_mgr_mem_calibrate_write_test_issue(write_group *
+				RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg,
+				use_dm);
+
+			tmp_bit_chk = tmp_bit_chk | (correct_mask_vg &
+				~(sdr_ops((u32 *)BASE_RW_MGR, 0, 0, SDR_READ)));
+			if (vg == 0)
+				break;
+		}
+		*bit_chk &= tmp_bit_chk;
+	}
+
+	if (all_correct) {
+		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+		debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : " BTFLD_FMT " == "
+			BTFLD_FMT " => %lu", write_group, use_dm,
+			*bit_chk, param->write_correct_mask,
+			(long unsigned int)(*bit_chk ==
+			param->write_correct_mask));
+		return *bit_chk == param->write_correct_mask;
+	} else {
+		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+		debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : " BTFLD_FMT " != ",
+		       write_group, use_dm, *bit_chk);
+		debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0,
+			(long unsigned int)(*bit_chk != 0));
+		return *bit_chk != 0x00;
+	}
+}
+
+static inline uint32_t rw_mgr_mem_calibrate_write_test_all_ranks
+(uint32_t write_group, uint32_t use_dm, uint32_t all_correct, uint32_t *bit_chk)
+{
+	return rw_mgr_mem_calibrate_write_test(0, write_group,
+		use_dm, all_correct, bit_chk, 1);
+}
+
+/*
+ * center all windows. do per-bit-deskew to possibly increase size of
+ * certain windows.
+ */
+static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn,
+	uint32_t write_group, uint32_t test_bgn)
+{
+	uint32_t i, p, min_index;
+	int32_t d;
+	/*
+	 * Store these as signed since there are comparisons with
+	 * signed numbers.
+	 */
+	uint32_t bit_chk;
+	uint32_t sticky_bit_chk;
+	int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
+	int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
+	int32_t mid;
+	int32_t mid_min, orig_mid_min;
+	int32_t new_dqs, start_dqs, shift_dq;
+	int32_t dq_margin, dqs_margin, dm_margin;
+	uint32_t stop;
+	uint32_t temp_dq_out1_delay;
+
+	debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
+	BFM_STAGE("writes_center");
+
+	dm_margin = 0;
+
+	start_dqs = sdr_ops((u32 *)SCC_MGR_IO_OUT1_DELAY,
+			    (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2), 0, SDR_READ);
+
+	/* per-bit deskew */
+
+	/*
+	 * set the left and right edge of each bit to an illegal value
+	 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
+	 */
+	sticky_bit_chk = 0;
+	for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+		left_edge[i]  = IO_IO_OUT1_DELAY_MAX + 1;
+		right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
+	}
+
+	/* Search for the left edge of the window for each bit */
+	for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) {
+		scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, d);
+
+		sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+
+		/*
+		 * Stop searching when the read test doesn't pass AND when
+		 * we've seen a passing read on every bit.
+		 */
+		stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
+			0, PASS_ONE_BIT, &bit_chk, 0);
+		sticky_bit_chk = sticky_bit_chk | bit_chk;
+		stop = stop && (sticky_bit_chk == param->write_correct_mask);
+		debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => " BTFLD_FMT
+			" == " BTFLD_FMT " && %u [bit_chk=" BTFLD_FMT "]\n",
+			d, sticky_bit_chk, param->write_correct_mask,
+			stop, bit_chk);
+
+		if (stop == 1) {
+			break;
+		} else {
+			for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+				if (bit_chk & 1) {
+					/*
+					 * Remember a passing test as the
+					 * left_edge.
+					 */
+					left_edge[i] = d;
+				} else {
+					/*
+					 * If a left edge has not been seen
+					 * yet, then a future passing test will
+					 * mark this edge as the right edge.
+					 */
+					if (left_edge[i] ==
+						IO_IO_OUT1_DELAY_MAX + 1) {
+						right_edge[i] = -(d + 1);
+					}
+				}
+				debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d);
+				debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
+					   (int)(bit_chk & 1), i, left_edge[i]);
+				debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
+				       right_edge[i]);
+				bit_chk = bit_chk >> 1;
+			}
+		}
+	}
+
+	/* Reset DQ delay chains to 0 */
+	scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, 0);
+	sticky_bit_chk = 0;
+	for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) {
+		debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
+			   %d right_edge[%u]: %d\n", __func__, __LINE__,
+			   i, left_edge[i], i, right_edge[i]);
+
+		/*
+		 * Check for cases where we haven't found the left edge,
+		 * which makes our assignment of the the right edge invalid.
+		 * Reset it to the illegal value.
+		 */
+		if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) &&
+		    (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) {
+			right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
+			debug_cond(DLEVEL == 2, "%s:%d write_center: reset \
+				   right_edge[%u]: %d\n", __func__, __LINE__,
+				   i, right_edge[i]);
+		}
+
+		/*
+		 * Reset sticky bit (except for bits where we have
+		 * seen the left edge).
+		 */
+		sticky_bit_chk = sticky_bit_chk << 1;
+		if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1))
+			sticky_bit_chk = sticky_bit_chk | 1;
+
+		if (i == 0)
+			break;
+	}
+
+	/* Search for the right edge of the window for each bit */
+	for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) {
+		scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
+							d + start_dqs);
+
+		sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+
+		/*
+		 * Stop searching when the read test doesn't pass AND when
+		 * we've seen a passing read on every bit.
+		 */
+		stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
+			0, PASS_ONE_BIT, &bit_chk, 0);
+
+		sticky_bit_chk = sticky_bit_chk | bit_chk;
+		stop = stop && (sticky_bit_chk == param->write_correct_mask);
+
+		debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => " BTFLD_FMT " == "
+			   BTFLD_FMT " && %u\n", d, sticky_bit_chk,
+			   param->write_correct_mask, stop);
+
+		if (stop == 1) {
+			if (d == 0) {
+				for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS;
+					i++) {
+					/* d = 0 failed, but it passed when
+					testing the left edge, so it must be
+					marginal, set it to -1 */
+					if (right_edge[i] ==
+						IO_IO_OUT1_DELAY_MAX + 1 &&
+						left_edge[i] !=
+						IO_IO_OUT1_DELAY_MAX + 1) {
+						right_edge[i] = -1;
+					}
+				}
+			}
+			break;
+		} else {
+			for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+				if (bit_chk & 1) {
+					/*
+					 * Remember a passing test as
+					 * the right_edge.
+					 */
+					right_edge[i] = d;
+				} else {
+					if (d != 0) {
+						/*
+						 * If a right edge has not
+						 * been seen yet, then a future
+						 * passing test will mark this
+						 * edge as the left edge.
+						 */
+						if (right_edge[i] ==
+						    IO_IO_OUT1_DELAY_MAX + 1)
+							left_edge[i] = -(d + 1);
+					} else {
+						/*
+						 * d = 0 failed, but it passed
+						 * when testing the left edge,
+						 * so it must be marginal, set
+						 * it to -1.
+						 */
+						if (right_edge[i] ==
+						    IO_IO_OUT1_DELAY_MAX + 1 &&
+						    left_edge[i] !=
+						    IO_IO_OUT1_DELAY_MAX + 1)
+							right_edge[i] = -1;
+						/*
+						 * If a right edge has not been
+						 * seen yet, then a future
+						 * passing test will mark this
+						 * edge as the left edge.
+						 */
+						else if (right_edge[i] ==
+							IO_IO_OUT1_DELAY_MAX +
+							1)
+							left_edge[i] = -(d + 1);
+					}
+				}
+				debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d);
+				debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
+					   (int)(bit_chk & 1), i, left_edge[i]);
+				debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
+					   right_edge[i]);
+				bit_chk = bit_chk >> 1;
+			}
+		}
+	}
+
+	/* Check that all bits have a window */
+	for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+		debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
+			   %d right_edge[%u]: %d", __func__, __LINE__,
+			   i, left_edge[i], i, right_edge[i]);
+		BFM_GBL_SET(dq_write_left_edge[write_group][i], left_edge[i]);
+		BFM_GBL_SET(dq_write_right_edge[write_group][i], right_edge[i]);
+		if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) ||
+		    (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) {
+			set_failing_group_stage(test_bgn + i,
+						CAL_STAGE_WRITES,
+						CAL_SUBSTAGE_WRITES_CENTER);
+			return 0;
+		}
+	}
+
+	/* Find middle of window for each DQ bit */
+	mid_min = left_edge[0] - right_edge[0];
+	min_index = 0;
+	for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+		mid = left_edge[i] - right_edge[i];
+		if (mid < mid_min) {
+			mid_min = mid;
+			min_index = i;
+		}
+	}
+
+	/*
+	 * -mid_min/2 represents the amount that we need to move DQS.
+	 * If mid_min is odd and positive we'll need to add one to
+	 * make sure the rounding in further calculations is correct
+	 * (always bias to the right), so just add 1 for all positive values.
+	 */
+	if (mid_min > 0)
+		mid_min++;
+	mid_min = mid_min / 2;
+	debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__,
+		   __LINE__, mid_min);
+
+	/* Determine the amount we can change DQS (which is -mid_min) */
+	orig_mid_min = mid_min;
+	new_dqs = start_dqs;
+	mid_min = 0;
+	debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \
+		   mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min);
+	/* Initialize data for export structures */
+	dqs_margin = IO_IO_OUT1_DELAY_MAX + 1;
+	dq_margin  = IO_IO_OUT1_DELAY_MAX + 1;
+
+	/* add delay to bring centre of all DQ windows to the same "level" */
+	for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
+		/* Use values before divide by 2 to reduce round off error */
+		shift_dq = (left_edge[i] - right_edge[i] -
+			(left_edge[min_index] - right_edge[min_index]))/2  +
+		(orig_mid_min - mid_min);
+
+		debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \
+			   [%u]=%d\n", __func__, __LINE__, i, shift_dq);
+
+		temp_dq_out1_delay = sdr_ops((u32 *)SCC_MGR_IO_OUT1_DELAY, i << 2,
+					     0, SDR_READ);
+		if (shift_dq + (int32_t)temp_dq_out1_delay >
+			(int32_t)IO_IO_OUT1_DELAY_MAX) {
+			shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay;
+		} else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) {
+			shift_dq = -(int32_t)temp_dq_out1_delay;
+		}
+		debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n",
+			   i, shift_dq);
+		scc_mgr_set_dq_out1_delay(write_group, i, temp_dq_out1_delay +
+					  shift_dq);
+		scc_mgr_load_dq(i);
+
+		debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i,
+			   left_edge[i] - shift_dq + (-mid_min),
+			   right_edge[i] + shift_dq - (-mid_min));
+		/* To determine values for export structures */
+		if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
+			dq_margin = left_edge[i] - shift_dq + (-mid_min);
+
+		if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
+			dqs_margin = right_edge[i] + shift_dq - (-mid_min);
+	}
+
+	/* Move DQS */
+	scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
+	sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+
+	/* Centre DM */
+	BFM_STAGE("dm_center");
+	debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
+
+	/*
+	 * set the left and right edge of each bit to an illegal value,
+	 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value,
+	 */
+	left_edge[0]  = IO_IO_OUT1_DELAY_MAX + 1;
+	right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
+	int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
+	int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1;
+	int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
+	int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1;
+	int32_t win_best = 0;
+
+	/* Search for the/part of the window with DM shift */
+	for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) {
+		scc_mgr_apply_group_dm_out1_delay(write_group, d);
+		sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+
+		if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
+						    PASS_ALL_BITS, &bit_chk,
+						    0)) {
+			/* USE Set current end of the window */
+			end_curr = -d;
+			/*
+			 * If a starting edge of our window has not been seen
+			 * this is our current start of the DM window.
+			 */
+			if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
+				bgn_curr = -d;
+
+			/*
+			 * If current window is bigger than best seen.
+			 * Set best seen to be current window.
+			 */
+			if ((end_curr-bgn_curr+1) > win_best) {
+				win_best = end_curr-bgn_curr+1;
+				bgn_best = bgn_curr;
+				end_best = end_curr;
+			}
+		} else {
+			/* We just saw a failing test. Reset temp edge */
+			bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
+			end_curr = IO_IO_OUT1_DELAY_MAX + 1;
+			}
+		}
+
+
+	/* Reset DM delay chains to 0 */
+	scc_mgr_apply_group_dm_out1_delay(write_group, 0);
+
+	/*
+	 * Check to see if the current window nudges up aganist 0 delay.
+	 * If so we need to continue the search by shifting DQS otherwise DQS
+	 * search begins as a new search. */
+	if (end_curr != 0) {
+		bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
+		end_curr = IO_IO_OUT1_DELAY_MAX + 1;
+	}
+
+	/* Search for the/part of the window with DQS shifts */
+	for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) {
+		/*
+		 * Note: This only shifts DQS, so are we limiting ourselve to
+		 * width of DQ unnecessarily.
+		 */
+		scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
+							d + new_dqs);
+
+		sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+		if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
+						    PASS_ALL_BITS, &bit_chk,
+						    0)) {
+			/* USE Set current end of the window */
+			end_curr = d;
+			/*
+			 * If a beginning edge of our window has not been seen
+			 * this is our current begin of the DM window.
+			 */
+			if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
+				bgn_curr = d;
+
+			/*
+			 * If current window is bigger than best seen. Set best
+			 * seen to be current window.
+			 */
+			if ((end_curr-bgn_curr+1) > win_best) {
+				win_best = end_curr-bgn_curr+1;
+				bgn_best = bgn_curr;
+				end_best = end_curr;
+			}
+		} else {
+			/* We just saw a failing test. Reset temp edge */
+			bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
+			end_curr = IO_IO_OUT1_DELAY_MAX + 1;
+
+			/* Early exit optimization: if ther remaining delay
+			chain space is less than already seen largest window
+			we can exit */
+			if ((win_best-1) >
+				(IO_IO_OUT1_DELAY_MAX - new_dqs - d)) {
+					break;
+				}
+			}
+		}
+
+	/* assign left and right edge for cal and reporting; */
+	left_edge[0] = -1*bgn_best;
+	right_edge[0] = end_best;
+
+	debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__,
+		   __LINE__, left_edge[0], right_edge[0]);
+	BFM_GBL_SET(dm_left_edge[write_group][0], left_edge[0]);
+	BFM_GBL_SET(dm_right_edge[write_group][0], right_edge[0]);
+
+	/* Move DQS (back to orig) */
+	scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
+
+	/* Move DM */
+
+	/* Find middle of window for the DM bit */
+	mid = (left_edge[0] - right_edge[0]) / 2;
+
+	/* only move right, since we are not moving DQS/DQ */
+	if (mid < 0)
+		mid = 0;
+
+	/* dm_marign should fail if we never find a window */
+	if (win_best == 0)
+		dm_margin = -1;
+	else
+		dm_margin = left_edge[0] - mid;
+
+	scc_mgr_apply_group_dm_out1_delay(write_group, mid);
+	sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+
+	debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \
+		   dm_margin=%d\n", __func__, __LINE__, left_edge[0],
+		   right_edge[0], mid, dm_margin);
+	/* Export values */
+	gbl->fom_out += dq_margin + dqs_margin;
+
+	debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \
+		   dqs_margin=%d dm_margin=%d\n", __func__, __LINE__,
+		   dq_margin, dqs_margin, dm_margin);
+
+	/*
+	 * Do not remove this line as it makes sure all of our
+	 * decisions have been applied.
+	 */
+	sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+	return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0);
+}
+
+/* calibrate the write operations */
+static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g,
+	uint32_t test_bgn)
+{
+	/* update info for sims */
+	debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn);
+
+	reg_file_set_stage(CAL_STAGE_WRITES);
+	reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
+
+	reg_file_set_group(g);
+
+	if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) {
+		set_failing_group_stage(g, CAL_STAGE_WRITES,
+					CAL_SUBSTAGE_WRITES_CENTER);
+		return 0;
+	}
+
+	return 1;
+}
+
+/* precharge all banks and activate row 0 in bank "000..." and bank "111..." */
+static void mem_precharge_and_activate(void)
+{
+	uint32_t r;
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
+		if (param->skip_ranks[r]) {
+			/* request to skip the rank */
+
+			continue;
+		}
+
+		/* set rank */
+		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
+
+		/* precharge all banks ... */
+		sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+			__RW_MGR_PRECHARGE_ALL, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x0F, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+			__RW_MGR_ACTIVATE_0_AND_1_WAIT1, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0, 0x0F, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			__RW_MGR_ACTIVATE_0_AND_1_WAIT2, SDR_WRITE);
+
+		/* activate rows */
+		sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+			__RW_MGR_ACTIVATE_0_AND_1, SDR_WRITE);
+	}
+}
+
+/* Configure various memory related parameters. */
+static void mem_config(void)
+{
+	uint32_t rlat, wlat;
+	uint32_t rw_wl_nop_cycles;
+	uint32_t max_latency;
+
+	debug("%s:%d\n", __func__, __LINE__);
+	/* read in write and read latency */
+	wlat = sdr_ops((u32 *)MEM_T_WL_ADD, 0, 0, SDR_READ);
+	wlat += sdr_ops((u32 *)DATA_MGR_MEM_T_ADD, 0, 0, SDR_READ);
+	/* WL for hard phy does not include additive latency */
+
+	/*
+	 * add addtional write latency to offset the address/command extra
+	 * clock cycle. We change the AC mux setting causing AC to be delayed
+	 * by one mem clock cycle. Only do this for DDR3
+	 */
+	wlat = wlat + 1;
+
+	rlat = sdr_ops((u32 *)MEM_T_RL_ADD, 0, 0, SDR_READ);
+
+	rw_wl_nop_cycles = wlat - 2;
+	gbl->rw_wl_nop_cycles = rw_wl_nop_cycles;
+
+	/*
+	 * For AV/CV, lfifo is hardened and always runs at full rate so
+	 * max latency in AFI clocks, used here, is correspondingly smaller.
+	 */
+	max_latency = (1<<MAX_LATENCY_COUNT_WIDTH)/1 - 1;
+	/* configure for a burst length of 8 */
+
+	/* write latency */
+	/* Adjust Write Latency for Hard PHY */
+	wlat = wlat + 1;
+	/* set a pretty high read latency initially */
+	gbl->curr_read_lat = rlat + 16;
+
+	if (gbl->curr_read_lat > max_latency)
+		gbl->curr_read_lat = max_latency;
+
+	sdr_ops((u32 *)PHY_MGR_PHY_RLAT, 0, gbl->curr_read_lat, SDR_WRITE);
+
+	/* advertise write latency */
+	gbl->curr_write_lat = wlat;
+	sdr_ops((u32 *)PHY_MGR_AFI_WLAT, 0, wlat - 2, SDR_WRITE);
+	/* initialize bit slips */
+	mem_precharge_and_activate();
+}
+
+/* Set VFIFO and LFIFO to instant-on settings in skip calibration mode */
+static void mem_skip_calibrate(void)
+{
+	uint32_t vfifo_offset;
+	uint32_t i, j, r;
+
+	debug("%s:%d\n", __func__, __LINE__);
+	/* Need to update every shadow register set used by the interface */
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+		r += NUM_RANKS_PER_SHADOW_REG) {
+		/*
+		 * Set output phase alignment settings appropriate for
+		 * skip calibration.
+		 */
+		for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
+			scc_mgr_set_dqs_en_phase(i, 0);
+#if IO_DLL_CHAIN_LENGTH == 6
+			scc_mgr_set_dqdqs_output_phase(i, 6);
+#else
+			scc_mgr_set_dqdqs_output_phase(i, 7);
+#endif
+			/*
+			 * Case:33398
+			 *
+			 * Write data arrives to the I/O two cycles before write
+			 * latency is reached (720 deg).
+			 *   -> due to bit-slip in a/c bus
+			 *   -> to allow board skew where dqs is longer than ck
+			 *      -> how often can this happen!?
+			 *      -> can claim back some ptaps for high freq
+			 *       support if we can relax this, but i digress...
+			 *
+			 * The write_clk leads mem_ck by 90 deg
+			 * The minimum ptap of the OPA is 180 deg
+			 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
+			 * The write_clk is always delayed by 2 ptaps
+			 *
+			 * Hence, to make DQS aligned to CK, we need to delay
+			 * DQS by:
+			 *    (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
+			 *
+			 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
+			 * gives us the number of ptaps, which simplies to:
+			 *
+			 *    (1.25 * IO_DLL_CHAIN_LENGTH - 2)
+			 */
+			scc_mgr_set_dqdqs_output_phase(i, (1.25 *
+				IO_DLL_CHAIN_LENGTH - 2));
+		}
+		sdr_ops((u32 *)SCC_MGR_DQS_ENA, 0, 0xff, SDR_WRITE);
+		sdr_ops((u32 *)SCC_MGR_DQS_IO_ENA, 0, 0xff, SDR_WRITE);
+
+		for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
+			sdr_ops((u32 *)SCC_MGR_GROUP_COUNTER, 0, i, SDR_WRITE);
+			sdr_ops((u32 *)SCC_MGR_DQ_ENA, 0, 0xff, SDR_WRITE);
+			sdr_ops((u32 *)SCC_MGR_DM_ENA, 0, 0xff, SDR_WRITE);
+		}
+		sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+	}
+
+	/* Compensate for simulation model behaviour */
+	for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
+		scc_mgr_set_dqs_bus_in_delay(i, 10);
+		scc_mgr_load_dqs(i);
+	}
+	sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+
+	/*
+	 * ArriaV has hard FIFOs that can only be initialized by incrementing
+	 * in sequencer.
+	 */
+	vfifo_offset = CALIB_VFIFO_OFFSET;
+	for (j = 0; j < vfifo_offset; j++) {
+		sdr_ops((u32 *)PHY_MGR_CMD_INC_VFIFO_HARD_PHY, 0, 0xff, SDR_WRITE);
+	}
+	sdr_ops((u32 *)PHY_MGR_CMD_FIFO_RESET, 0, 0, SDR_WRITE);
+
+	/*
+	 * For ACV with hard lfifo, we get the skip-cal setting from
+	 * generation-time constant.
+	 */
+	gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
+	sdr_ops((u32 *)PHY_MGR_PHY_RLAT, 0, gbl->curr_read_lat, SDR_WRITE);
+}
+
+/* Memory calibration entry point */
+static uint32_t mem_calibrate(void)
+{
+	uint32_t i;
+	uint32_t rank_bgn, sr;
+	uint32_t write_group, write_test_bgn;
+	uint32_t read_group, read_test_bgn;
+	uint32_t run_groups, current_run;
+	uint32_t failing_groups = 0;
+	uint32_t group_failed = 0;
+	uint32_t sr_failed = 0;
+
+	debug("%s:%d\n", __func__, __LINE__);
+	/* Initialize the data settings */
+
+	gbl->error_substage = CAL_SUBSTAGE_NIL;
+	gbl->error_stage = CAL_STAGE_NIL;
+	gbl->error_group = 0xff;
+	gbl->fom_in = 0;
+	gbl->fom_out = 0;
+
+	mem_config();
+
+	uint32_t bypass_mode = 0x1;
+	for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
+		sdr_ops((u32 *)SCC_MGR_GROUP_COUNTER, 0, i, SDR_WRITE);
+		scc_set_bypass_mode(i, bypass_mode);
+	}
+
+	if (((DYNAMIC_CALIB_STEPS) & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
+		/*
+		 * Set VFIFO and LFIFO to instant-on settings in skip
+		 * calibration mode.
+		 */
+		mem_skip_calibrate();
+	} else {
+		for (i = 0; i < NUM_CALIB_REPEAT; i++) {
+			/*
+			 * Zero all delay chain/phase settings for all
+			 * groups and all shadow register sets.
+			 */
+			scc_mgr_zero_all();
+
+			run_groups = ~param->skip_groups;
+
+			for (write_group = 0, write_test_bgn = 0; write_group
+				< RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
+				write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
+				/* Initialized the group failure */
+				group_failed = 0;
+
+				BFM_GBL_SET(vfifo_idx, 0);
+				current_run = run_groups & ((1 <<
+					RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
+				run_groups = run_groups >>
+					RW_MGR_NUM_DQS_PER_WRITE_GROUP;
+
+				if (current_run == 0)
+					continue;
+
+				sdr_ops((u32 *)SCC_MGR_GROUP_COUNTER, 0,
+					write_group, SDR_WRITE);
+				scc_mgr_zero_group(write_group, write_test_bgn,
+						   0);
+
+				for (read_group = write_group *
+					RW_MGR_MEM_IF_READ_DQS_WIDTH /
+					RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
+					read_test_bgn = 0;
+					read_group < (write_group + 1) *
+					RW_MGR_MEM_IF_READ_DQS_WIDTH /
+					RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
+					group_failed == 0;
+					read_group++, read_test_bgn +=
+					RW_MGR_MEM_DQ_PER_READ_DQS) {
+					/* Calibrate the VFIFO */
+					if (!((STATIC_CALIB_STEPS) &
+						CALIB_SKIP_VFIFO)) {
+						if (!rw_mgr_mem_calibrate_vfifo
+							(read_group,
+							read_test_bgn)) {
+							group_failed = 1;
+
+							if (!(gbl->
+							phy_debug_mode_flags &
+						PHY_DEBUG_SWEEP_ALL_GROUPS)) {
+								return 0;
+							}
+						}
+					}
+				}
+
+				/* Calibrate the output side */
+				if (group_failed == 0)	{
+					for (rank_bgn = 0, sr = 0; rank_bgn
+						< RW_MGR_MEM_NUMBER_OF_RANKS;
+						rank_bgn +=
+						NUM_RANKS_PER_SHADOW_REG,
+						++sr) {
+						sr_failed = 0;
+						if (!((STATIC_CALIB_STEPS) &
+						CALIB_SKIP_WRITES)) {
+							if ((STATIC_CALIB_STEPS)
+						& CALIB_SKIP_DELAY_SWEEPS) {
+						/* not needed in quick mode! */
+							} else {
+						/*
+						 * Determine if this set of
+						 * ranks should be skipped
+						 * entirely.
+						 */
+					if (!param->skip_shadow_regs[sr]) {
+						if (!rw_mgr_mem_calibrate_writes
+						(rank_bgn, write_group,
+						write_test_bgn)) {
+							sr_failed = 1;
+							if (!(gbl->
+							phy_debug_mode_flags &
+						PHY_DEBUG_SWEEP_ALL_GROUPS)) {
+								return 0;
+									}
+									}
+								}
+							}
+						}
+						if (sr_failed != 0)
+							group_failed = 1;
+					}
+				}
+
+				if (group_failed == 0) {
+					for (read_group = write_group *
+					RW_MGR_MEM_IF_READ_DQS_WIDTH /
+					RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
+					read_test_bgn = 0;
+						read_group < (write_group + 1)
+						* RW_MGR_MEM_IF_READ_DQS_WIDTH
+						/ RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
+						group_failed == 0;
+						read_group++, read_test_bgn +=
+						RW_MGR_MEM_DQ_PER_READ_DQS) {
+						if (!((STATIC_CALIB_STEPS) &
+							CALIB_SKIP_WRITES)) {
+					if (!rw_mgr_mem_calibrate_vfifo_end
+						(read_group, read_test_bgn)) {
+							group_failed = 1;
+
+						if (!(gbl->phy_debug_mode_flags
+						& PHY_DEBUG_SWEEP_ALL_GROUPS)) {
+								return 0;
+								}
+							}
+						}
+					}
+				}
+
+				if (group_failed != 0)
+					failing_groups++;
+			}
+
+			/*
+			 * USER If there are any failing groups then report
+			 * the failure.
+			 */
+			if (failing_groups != 0)
+				return 0;
+
+			/* Calibrate the LFIFO */
+			if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) {
+				/*
+				 * If we're skipping groups as part of debug,
+				 * don't calibrate LFIFO.
+				 */
+				if (param->skip_groups == 0) {
+					if (!rw_mgr_mem_calibrate_lfifo())
+						return 0;
+				}
+			}
+		}
+	}
+
+	/*
+	 * Do not remove this line as it makes sure all of our decisions
+	 * have been applied.
+	 */
+	sdr_ops((u32 *)SCC_MGR_UPD, 0, 0, SDR_WRITE);
+	return 1;
+}
+
+static uint32_t run_mem_calibrate(void)
+{
+	uint32_t pass;
+	uint32_t debug_info;
+
+	/* Reset pass/fail status shown on afi_cal_success/fail */
+	sdr_ops((u32 *)PHY_MGR_CAL_STATUS, 0, PHY_MGR_CAL_RESET, SDR_WRITE);
+
+	debug("%s:%d\n", __func__, __LINE__);
+	BFM_STAGE("calibrate");
+
+	/* stop tracking manger */
+	uint32_t ctrlcfg = sdr_ops((u32 *)BASE_MMR, 0, 0, SDR_READ);
+
+	sdr_ops((u32 *)BASE_MMR, 0, ctrlcfg & 0xFFBFFFFF, SDR_WRITE);
+
+	initialize();
+	rw_mgr_mem_initialize();
+
+	pass = mem_calibrate();
+
+	mem_precharge_and_activate();
+	sdr_ops((u32 *)PHY_MGR_CMD_FIFO_RESET, 0, 0, SDR_WRITE);
+
+	if (pass) {
+		BFM_STAGE("handoff");
+	}
+
+	/*
+	 * Handoff:
+	 * Don't return control of the PHY back to AFI when in debug mode.
+	 */
+	if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) {
+		rw_mgr_mem_handoff();
+		/*
+		 * In Hard PHY this is a 2-bit control:
+		 * 0: AFI Mux Select
+		 * 1: DDIO Mux Select
+		 */
+		sdr_ops((u32 *)PHY_MGR_MUX_SEL, 0, 0x2, SDR_WRITE);
+	}
+
+	sdr_ops((u32 *)BASE_MMR, 0, ctrlcfg, SDR_WRITE);
+
+	if (pass) {
+		printf("%s: CALIBRATION PASSED\n", __FILE__);
+
+		gbl->fom_in /= 2;
+		gbl->fom_out /= 2;
+
+		if (gbl->fom_in > 0xff)
+			gbl->fom_in = 0xff;
+
+		if (gbl->fom_out > 0xff)
+			gbl->fom_out = 0xff;
+
+		/* Update the FOM in the register file */
+		debug_info = gbl->fom_in;
+		debug_info |= gbl->fom_out << 8;
+		sdr_ops(&sdr_reg_file->fom, 0, debug_info, SDR_WRITE);
+
+		sdr_ops((u32 *)PHY_MGR_CAL_DEBUG_INFO, 0, debug_info, SDR_WRITE);
+		sdr_ops((u32 *)PHY_MGR_CAL_STATUS, 0, PHY_MGR_CAL_SUCCESS,
+			SDR_WRITE);
+	} else {
+		printf("%s: CALIBRATION FAILED\n", __FILE__);
+
+		debug_info = gbl->error_stage;
+		debug_info |= gbl->error_substage << 8;
+		debug_info |= gbl->error_group << 16;
+
+		sdr_ops(&sdr_reg_file->failing_stage, 0, debug_info, SDR_WRITE);
+		sdr_ops((u32 *)PHY_MGR_CAL_DEBUG_INFO, 0, debug_info, SDR_WRITE);
+		sdr_ops((u32 *)PHY_MGR_CAL_STATUS, 0, PHY_MGR_CAL_FAIL, SDR_WRITE);
+
+		/* Update the failing group/stage in the register file */
+		debug_info = gbl->error_stage;
+		debug_info |= gbl->error_substage << 8;
+		debug_info |= gbl->error_group << 16;
+		sdr_ops(&sdr_reg_file->failing_stage, 0, debug_info, SDR_WRITE);
+	}
+
+	return pass;
+}
+
+static void hc_initialize_rom_data(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < inst_rom_init_size; i++) {
+		uint32_t data = inst_rom_init[i];
+		sdr_ops((u32 *)(RW_MGR_INST_ROM_WRITE), (i << 2), data, SDR_WRITE);
+	}
+
+	for (i = 0; i < ac_rom_init_size; i++) {
+		uint32_t data = ac_rom_init[i];
+		sdr_ops((u32 *)(RW_MGR_AC_ROM_WRITE), (i << 2), data, SDR_WRITE);
+	}
+}
+
+static void initialize_reg_file(void)
+{
+	/* Initialize the register file with the correct data */
+	sdr_ops(&sdr_reg_file->signature, 0, REG_FILE_INIT_SEQ_SIGNATURE, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->debug_data_addr, 0, 0, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->cur_stage, 0, 0, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->fom, 0, 0, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->failing_stage, 0, 0, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->debug1, 0, 0, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->debug2, 0, 0, SDR_WRITE);
+}
+
+static void initialize_hps_phy(void)
+{
+	/* These may need to be included also: */
+	/* wrap_back_en (false) */
+	/* atpg_en (false) */
+	/* pipelineglobalenable (true) */
+
+	uint32_t reg;
+	/*
+	 * Tracking also gets configured here because it's in the
+	 * same register.
+	 */
+	uint32_t trk_sample_count = 7500;
+	uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
+	/*
+	 * Format is number of outer loops in the 16 MSB, sample
+	 * count in 16 LSB.
+	*/
+
+	reg = 0;
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
+	/*
+	 * This field selects the intrinsic latency to RDATA_EN/FULL path.
+	 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
+	 */
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
+		trk_sample_count);
+	sdr_ops((u32 *)BASE_MMR, SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_OFFSET, reg,
+		SDR_WRITE);
+
+	reg = 0;
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
+		trk_sample_count >>
+		SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
+		trk_long_idle_sample_count);
+	sdr_ops((u32 *)BASE_MMR, SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_OFFSET, reg,
+		SDR_WRITE);
+
+	reg = 0;
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
+		trk_long_idle_sample_count >>
+		SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
+	sdr_ops((u32 *)BASE_MMR, SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_OFFSET, reg,
+		SDR_WRITE);
+}
+
+static void initialize_tracking(void)
+{
+	uint32_t concatenated_longidle = 0x0;
+	uint32_t concatenated_delays = 0x0;
+	uint32_t concatenated_rw_addr = 0x0;
+	uint32_t concatenated_refresh = 0x0;
+	uint32_t dtaps_per_ptap;
+	uint32_t tmp_delay;
+
+	/*
+	 * compute usable version of value in case we skip full
+	 * computation later
+	 */
+	dtaps_per_ptap = 0;
+	tmp_delay = 0;
+	while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
+		dtaps_per_ptap++;
+		tmp_delay += IO_DELAY_PER_DCHAIN_TAP;
+	}
+	dtaps_per_ptap--;
+
+	concatenated_longidle = concatenated_longidle ^ 10;
+		/*longidle outer loop */
+	concatenated_longidle = concatenated_longidle << 16;
+	concatenated_longidle = concatenated_longidle ^ 100;
+		/*longidle sample count */
+	concatenated_delays = concatenated_delays ^ 243;
+		/* trfc, worst case of 933Mhz 4Gb */
+	concatenated_delays = concatenated_delays << 8;
+	concatenated_delays = concatenated_delays ^ 14;
+		/* trcd, worst case */
+	concatenated_delays = concatenated_delays << 8;
+	concatenated_delays = concatenated_delays ^ 10;
+		/* vfifo wait */
+	concatenated_delays = concatenated_delays << 8;
+	concatenated_delays = concatenated_delays ^ 4;
+		/* mux delay */
+
+	concatenated_rw_addr = concatenated_rw_addr ^ __RW_MGR_IDLE;
+	concatenated_rw_addr = concatenated_rw_addr << 8;
+	concatenated_rw_addr = concatenated_rw_addr ^ __RW_MGR_ACTIVATE_1;
+	concatenated_rw_addr = concatenated_rw_addr << 8;
+	concatenated_rw_addr = concatenated_rw_addr ^ __RW_MGR_SGLE_READ;
+	concatenated_rw_addr = concatenated_rw_addr << 8;
+	concatenated_rw_addr = concatenated_rw_addr ^ __RW_MGR_PRECHARGE_ALL;
+
+	concatenated_refresh = concatenated_refresh ^ __RW_MGR_REFRESH_ALL;
+	concatenated_refresh = concatenated_refresh << 24;
+	concatenated_refresh = concatenated_refresh ^ 1000; /* trefi */
+
+	/* Initialize the register file with the correct data */
+	sdr_ops(&sdr_reg_file->dtaps_per_ptap, 0, dtaps_per_ptap, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->trk_sample_count, 0, 7500, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->trk_longidle, 0, concatenated_longidle, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->delays, 0, concatenated_delays, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->trk_rw_mgr_addr, 0, concatenated_rw_addr, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->trk_read_dqs_width, 0,
+		RW_MGR_MEM_IF_READ_DQS_WIDTH, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->trk_rfsh, 0, concatenated_refresh, SDR_WRITE);
+}
+
+int sdram_calibration_full(void)
+{
+	struct param_type my_param;
+	struct gbl_type my_gbl;
+	uint32_t pass;
+	uint32_t i;
+
+	param = &my_param;
+	gbl = &my_gbl;
+
+	/* Initialize the debug mode flags */
+	gbl->phy_debug_mode_flags = 0;
+	/* Set the calibration enabled by default */
+	gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
+	/*
+	 * Only sweep all groups (regardless of fail state) by default
+	 * Set enabled read test by default.
+	 */
+#if DISABLE_GUARANTEED_READ
+	gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
+#endif
+	/* Initialize the register file */
+	initialize_reg_file();
+
+	/* Initialize any PHY CSR */
+	initialize_hps_phy();
+
+	scc_mgr_initialize();
+
+	initialize_tracking();
+
+	/* USER Enable all ranks, groups */
+	for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++)
+		param->skip_ranks[i] = 0;
+	for (i = 0; i < NUM_SHADOW_REGS; ++i)
+		param->skip_shadow_regs[i] = 0;
+	param->skip_groups = 0;
+
+	printf("%s: Preparing to start memory calibration\n", __FILE__);
+
+	debug("%s:%d\n", __func__, __LINE__);
+	debug_cond(DLEVEL == 1, "DDR3 FULL_RATE ranks=%lu cs/dimm=%lu dq/dqs=%lu,%lu vg/dqs=%lu,%lu",
+	       (long unsigned int)RW_MGR_MEM_NUMBER_OF_RANKS,
+	       (long unsigned int)RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
+	       (long unsigned int)RW_MGR_MEM_DQ_PER_READ_DQS,
+	       (long unsigned int)RW_MGR_MEM_DQ_PER_WRITE_DQS,
+	       (long unsigned int)RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
+	       (long unsigned int)RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
+	debug_cond(DLEVEL == 1, "dqs=%lu,%lu dq=%lu dm=%lu ptap_delay=%lu dtap_delay=%lu",
+	       (long unsigned int)RW_MGR_MEM_IF_READ_DQS_WIDTH,
+	       (long unsigned int)RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
+	       (long unsigned int)RW_MGR_MEM_DATA_WIDTH,
+	       (long unsigned int)RW_MGR_MEM_DATA_MASK_WIDTH,
+	       (long unsigned int)IO_DELAY_PER_OPA_TAP,
+	       (long unsigned int)IO_DELAY_PER_DCHAIN_TAP);
+	debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%lu, dll=%lu",
+	       (long unsigned int)IO_DELAY_PER_DQS_EN_DCHAIN_TAP,
+	       (long unsigned int)IO_DLL_CHAIN_LENGTH);
+	debug_cond(DLEVEL == 1, "max values: en_p=%lu dqdqs_p=%lu en_d=%lu dqs_in_d=%lu",
+	       (long unsigned int)IO_DQS_EN_PHASE_MAX,
+	       (long unsigned int)IO_DQDQS_OUT_PHASE_MAX,
+	       (long unsigned int)IO_DQS_EN_DELAY_MAX,
+	       (long unsigned int)IO_DQS_IN_DELAY_MAX);
+	debug_cond(DLEVEL == 1, "io_in_d=%lu io_out1_d=%lu io_out2_d=%lu",
+	       (long unsigned int)IO_IO_IN_DELAY_MAX,
+	       (long unsigned int)IO_IO_OUT1_DELAY_MAX,
+	       (long unsigned int)IO_IO_OUT2_DELAY_MAX);
+	debug_cond(DLEVEL == 1, "dqs_in_reserve=%lu dqs_out_reserve=%lu",
+	       (long unsigned int)IO_DQS_IN_RESERVE,
+	       (long unsigned int)IO_DQS_OUT_RESERVE);
+
+	hc_initialize_rom_data();
+
+	/* update info for sims */
+	reg_file_set_stage(CAL_STAGE_NIL);
+	reg_file_set_group(0);
+
+	/*
+	 * Load global needed for those actions that require
+	 * some dynamic calibration support.
+	 */
+	dyn_calib_steps = STATIC_CALIB_STEPS;
+	/*
+	 * Load global to allow dynamic selection of delay loop settings
+	 * based on calibration mode.
+	 */
+	if (!((DYNAMIC_CALIB_STEPS) & CALIB_SKIP_DELAY_LOOPS))
+		skip_delay_mask = 0xff;
+	else
+		skip_delay_mask = 0x0;
+
+	pass = run_mem_calibrate();
+
+	printf("%s: Calibration complete\n", __FILE__);
+	return pass;
+}
diff --git a/drivers/ddr/altera/sequencer.h b/drivers/ddr/altera/sequencer.h
new file mode 100644
index 0000000..fd106ea
--- /dev/null
+++ b/drivers/ddr/altera/sequencer.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright Altera Corporation (C) 2012-2014. All rights reserved
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ */
+
+#ifndef _SEQUENCER_H_
+#define _SEQUENCER_H_
+
+extern int32_t inst_rom_init_size;
+extern uint32_t inst_rom_init[];
+extern uint32_t ac_rom_init_size;
+extern uint32_t ac_rom_init[];
+
+#if RLDRAMII
+#define RW_MGR_NUM_DM_PER_WRITE_GROUP (1)
+#define RW_MGR_NUM_TRUE_DM_PER_WRITE_GROUP (1)
+#else
+#define RW_MGR_NUM_DM_PER_WRITE_GROUP (RW_MGR_MEM_DATA_MASK_WIDTH \
+	/ RW_MGR_MEM_IF_WRITE_DQS_WIDTH)
+#define RW_MGR_NUM_TRUE_DM_PER_WRITE_GROUP (RW_MGR_TRUE_MEM_DATA_MASK_WIDTH \
+	/ RW_MGR_MEM_IF_WRITE_DQS_WIDTH)
+#endif
+
+#define RW_MGR_NUM_DQS_PER_WRITE_GROUP (RW_MGR_MEM_IF_READ_DQS_WIDTH \
+	/ RW_MGR_MEM_IF_WRITE_DQS_WIDTH)
+#define NUM_RANKS_PER_SHADOW_REG (RW_MGR_MEM_NUMBER_OF_RANKS / NUM_SHADOW_REGS)
+
+#define RW_MGR_RUN_SINGLE_GROUP	(BASE_RW_MGR)
+#define RW_MGR_RUN_ALL_GROUPS	(BASE_RW_MGR + 0x0400)
+
+#define RW_MGR_DI_BASE		(BASE_RW_MGR + 0x0020)
+
+#if DDR3
+#define DDR3_MR1_ODT_MASK	0xFFFFFD99
+#define DDR3_MR2_ODT_MASK	0xFFFFF9FF
+#define DDR3_AC_MIRR_MASK	0x020A8
+#endif /* DDR3 */
+
+#define RW_MGR_MEM_NUMBER_OF_RANKS	1
+#define NUM_SHADOW_REGS			1
+
+struct socfpga_sdr_rw_load_manager {
+	u32	load_cntr0;
+	u32	load_cntr1;
+	u32	load_cntr2;
+	u32	load_cntr3;
+};
+
+struct socfpga_sdr_rw_load_jump_manager {
+	u32	load_jump_add0;
+	u32	load_jump_add1;
+	u32	load_jump_add2;
+	u32	load_jump_add3;
+};
+
+#define RW_MGR_RESET_READ_DATAPATH	(BASE_RW_MGR + 0x1000)
+#define RW_MGR_SET_CS_AND_ODT_MASK	(BASE_RW_MGR + 0x1400)
+
+#define RW_MGR_RANK_NONE		0xFF
+#define RW_MGR_RANK_ALL			0x00
+
+#define RW_MGR_ODT_MODE_OFF		0
+#define RW_MGR_ODT_MODE_READ_WRITE	1
+
+#define NUM_CALIB_REPEAT		1
+
+#define NUM_READ_TESTS			7
+#define NUM_READ_PB_TESTS		7
+#define NUM_WRITE_TESTS			15
+#define NUM_WRITE_PB_TESTS		31
+
+#define PASS_ALL_BITS			1
+#define PASS_ONE_BIT			0
+
+/* calibration stages */
+
+#define CAL_STAGE_NIL			0
+#define CAL_STAGE_VFIFO			1
+#define CAL_STAGE_WLEVEL		2
+#define CAL_STAGE_LFIFO			3
+#define CAL_STAGE_WRITES		4
+#define CAL_STAGE_FULLTEST		5
+#define CAL_STAGE_REFRESH		6
+#define CAL_STAGE_CAL_SKIPPED		7
+#define CAL_STAGE_CAL_ABORTED		8
+#define CAL_STAGE_VFIFO_AFTER_WRITES	9
+
+/* calibration substages */
+
+#define CAL_SUBSTAGE_NIL		0
+#define CAL_SUBSTAGE_GUARANTEED_READ	1
+#define CAL_SUBSTAGE_DQS_EN_PHASE	2
+#define CAL_SUBSTAGE_VFIFO_CENTER	3
+#define CAL_SUBSTAGE_WORKING_DELAY	1
+#define CAL_SUBSTAGE_LAST_WORKING_DELAY	2
+#define CAL_SUBSTAGE_WLEVEL_COPY	3
+#define CAL_SUBSTAGE_WRITES_CENTER	1
+#define CAL_SUBSTAGE_READ_LATENCY	1
+#define CAL_SUBSTAGE_REFRESH		1
+
+#define MAX_RANKS			(RW_MGR_MEM_NUMBER_OF_RANKS)
+#define MAX_DQS				(RW_MGR_MEM_IF_WRITE_DQS_WIDTH > \
+					RW_MGR_MEM_IF_READ_DQS_WIDTH ? \
+					RW_MGR_MEM_IF_WRITE_DQS_WIDTH : \
+					RW_MGR_MEM_IF_READ_DQS_WIDTH)
+#define MAX_DQ				(RW_MGR_MEM_DATA_WIDTH)
+#define MAX_DM				(RW_MGR_MEM_DATA_MASK_WIDTH)
+
+/* length of VFIFO, from SW_MACROS */
+#define VFIFO_SIZE			(READ_VALID_FIFO_SIZE)
+
+/* Memory for data transfer between TCL scripts and NIOS.
+ *
+ * - First word is a command request.
+ * - The remaining words are part of the transfer.
+ */
+
+/* Define the base address of each manager. */
+
+/* MarkW: how should these base addresses be done for A-V? */
+#define BASE_PTR_MGR			(0x00040000)
+#define BASE_SCC_MGR			(0x00058000)
+#define BASE_REG_FILE			(0x00070000)
+#define BASE_TIMER			(0x00078000)
+#define BASE_PHY_MGR			(0x00088000)
+#define BASE_RW_MGR			(0x00090000)
+#define BASE_DATA_MGR			(0x00098000)
+#define BASE_MMR                        (0x000C0000)
+#define BASE_TRK_MGR			(0x000D0000)
+
+/* Register file addresses. */
+
+struct socfpga_sdr_reg_file {
+	u32 signature;
+	u32 debug_data_addr;
+	u32 cur_stage;
+	u32 fom;
+	u32 failing_stage;
+	u32 debug1;
+	u32 debug2;
+	u32 dtaps_per_ptap;
+	u32 trk_sample_count;
+	u32 trk_longidle;
+	u32 delays;
+	u32 trk_rw_mgr_addr;
+	u32 trk_read_dqs_width;
+	u32 trk_rfsh;
+};
+
+/* PHY manager configuration registers. */
+
+#define PHY_MGR_PHY_RLAT		(BASE_PHY_MGR + 0x4000)
+#define PHY_MGR_RESET_MEM_STBL		(BASE_PHY_MGR + 0x4004)
+#define PHY_MGR_MUX_SEL			(BASE_PHY_MGR + 0x4008)
+#define PHY_MGR_CAL_STATUS		(BASE_PHY_MGR + 0x400c)
+#define PHY_MGR_CAL_DEBUG_INFO		(BASE_PHY_MGR + 0x4010)
+#define PHY_MGR_VFIFO_RD_EN_OVRD	(BASE_PHY_MGR + 0x4014)
+#define PHY_MGR_AFI_WLAT		(BASE_PHY_MGR + 0x4018)
+#define PHY_MGR_AFI_RLAT		(BASE_PHY_MGR + 0x401c)
+
+#define PHY_MGR_CAL_RESET		(0)
+#define PHY_MGR_CAL_SUCCESS		(1)
+#define PHY_MGR_CAL_FAIL		(2)
+
+/* PHY manager command addresses. */
+
+#define PHY_MGR_CMD_INC_VFIFO_FR	(BASE_PHY_MGR + 0x0000)
+#define PHY_MGR_CMD_INC_VFIFO_HR	(BASE_PHY_MGR + 0x0004)
+#define PHY_MGR_CMD_INC_VFIFO_HARD_PHY	(BASE_PHY_MGR + 0x0004)
+#define PHY_MGR_CMD_FIFO_RESET		(BASE_PHY_MGR + 0x0008)
+#define PHY_MGR_CMD_INC_VFIFO_FR_HR	(BASE_PHY_MGR + 0x000C)
+#define PHY_MGR_CMD_INC_VFIFO_QR	(BASE_PHY_MGR + 0x0010)
+
+#define DATA_MGR_MEM_T_WL		(BASE_DATA_MGR + 0x0004)
+#define DATA_MGR_MEM_T_ADD		(BASE_DATA_MGR + 0x0008)
+#define DATA_MGR_MEM_T_RL		(BASE_DATA_MGR + 0x000C)
+
+#define MEM_T_WL_ADD			DATA_MGR_MEM_T_WL
+#define MEM_T_RL_ADD			DATA_MGR_MEM_T_RL
+
+#define CALIB_SKIP_DELAY_LOOPS		(1 << 0)
+#define CALIB_SKIP_ALL_BITS_CHK		(1 << 1)
+#define CALIB_SKIP_DELAY_SWEEPS		(1 << 2)
+#define CALIB_SKIP_VFIFO		(1 << 3)
+#define CALIB_SKIP_LFIFO		(1 << 4)
+#define CALIB_SKIP_WLEVEL		(1 << 5)
+#define CALIB_SKIP_WRITES		(1 << 6)
+#define CALIB_SKIP_FULL_TEST		(1 << 7)
+#define CALIB_SKIP_ALL			(CALIB_SKIP_VFIFO | \
+				CALIB_SKIP_LFIFO | CALIB_SKIP_WLEVEL | \
+				CALIB_SKIP_WRITES | CALIB_SKIP_FULL_TEST)
+#define CALIB_IN_RTL_SIM			(1 << 8)
+
+/* Scan chain manager command addresses */
+
+#define WRITE_SCC_OCT_OUT2_DELAY(group, delay)
+#define WRITE_SCC_DQS_BYPASS(group, bypass)
+
+#define WRITE_SCC_DQ_OUT2_DELAY(pin, delay)
+
+#define WRITE_SCC_DQ_BYPASS(pin, bypass)
+
+#define WRITE_SCC_RFIFO_MODE(pin, mode)
+
+#define WRITE_SCC_DQS_IO_OUT2_DELAY(delay)
+
+#define WRITE_SCC_DM_IO_OUT2_DELAY(pin, delay)
+
+#define WRITE_SCC_DM_BYPASS(pin, bypass)
+
+#define READ_SCC_OCT_OUT2_DELAY(group)	0
+#define READ_SCC_DQS_BYPASS(group)		0
+#define READ_SCC_DQS_BYPASS(group)		0
+
+#define READ_SCC_DQ_OUT2_DELAY(pin)		0
+#define READ_SCC_DQ_BYPASS(pin)			0
+#define READ_SCC_RFIFO_MODE(pin)		0
+
+#define READ_SCC_DQS_IO_OUT2_DELAY()	0
+
+#define READ_SCC_DM_IO_OUT2_DELAY(pin)	0
+#define READ_SCC_DM_BYPASS(pin)		0
+
+#define SCC_MGR_GROUP_COUNTER			(BASE_SCC_MGR + 0x0000)
+#define SCC_MGR_DQS_IN_DELAY			(BASE_SCC_MGR + 0x0100)
+#define SCC_MGR_DQS_EN_PHASE			(BASE_SCC_MGR + 0x0200)
+#define SCC_MGR_DQS_EN_DELAY			(BASE_SCC_MGR + 0x0300)
+#define SCC_MGR_DQDQS_OUT_PHASE			(BASE_SCC_MGR + 0x0400)
+#define SCC_MGR_OCT_OUT1_DELAY			(BASE_SCC_MGR + 0x0500)
+#define SCC_MGR_IO_OUT1_DELAY			(BASE_SCC_MGR + 0x0700)
+#define SCC_MGR_IO_IN_DELAY			(BASE_SCC_MGR + 0x0900)
+
+/* HHP-HPS-specific versions of some commands */
+#define SCC_MGR_DQS_EN_DELAY_GATE		(BASE_SCC_MGR + 0x0600)
+#define SCC_MGR_IO_OE_DELAY			(BASE_SCC_MGR + 0x0800)
+#define SCC_MGR_HHP_GLOBALS			(BASE_SCC_MGR + 0x0A00)
+#define SCC_MGR_HHP_RFILE			(BASE_SCC_MGR + 0x0B00)
+
+/* HHP-HPS-specific values */
+#define SCC_MGR_HHP_EXTRAS_OFFSET			0
+#define SCC_MGR_HHP_DQSE_MAP_OFFSET			1
+
+#define SCC_MGR_DQS_ENA				(BASE_SCC_MGR + 0x0E00)
+#define SCC_MGR_DQS_IO_ENA			(BASE_SCC_MGR + 0x0E04)
+#define SCC_MGR_DQ_ENA				(BASE_SCC_MGR + 0x0E08)
+#define SCC_MGR_DM_ENA				(BASE_SCC_MGR + 0x0E0C)
+#define SCC_MGR_UPD				(BASE_SCC_MGR + 0x0E20)
+#define SCC_MGR_ACTIVE_RANK			(BASE_SCC_MGR + 0x0E40)
+#define SCC_MGR_AFI_CAL_INIT			(BASE_SCC_MGR + 0x0D00)
+
+/* PHY Debug mode flag constants */
+#define PHY_DEBUG_IN_DEBUG_MODE 0x00000001
+#define PHY_DEBUG_ENABLE_CAL_RPT 0x00000002
+#define PHY_DEBUG_ENABLE_MARGIN_RPT 0x00000004
+#define PHY_DEBUG_SWEEP_ALL_GROUPS 0x00000008
+#define PHY_DEBUG_DISABLE_GUARANTEED_READ 0x00000010
+#define PHY_DEBUG_ENABLE_NON_DESTRUCTIVE_CALIBRATION 0x00000020
+
+/* Init and Reset delay constants - Only use if defined by sequencer_defines.h,
+ * otherwise, revert to defaults
+ * Default for Tinit = (0+1) * ((202+1) * (2 * 131 + 1) + 1) = 53532 =
+ * 200.75us @ 266MHz
+ */
+#ifdef TINIT_CNTR0_VAL
+#define SEQ_TINIT_CNTR0_VAL TINIT_CNTR0_VAL
+#else
+#define SEQ_TINIT_CNTR0_VAL 0
+#endif
+
+#ifdef TINIT_CNTR1_VAL
+#define SEQ_TINIT_CNTR1_VAL TINIT_CNTR1_VAL
+#else
+#define SEQ_TINIT_CNTR1_VAL 202
+#endif
+
+#ifdef TINIT_CNTR2_VAL
+#define SEQ_TINIT_CNTR2_VAL TINIT_CNTR2_VAL
+#else
+#define SEQ_TINIT_CNTR2_VAL 131
+#endif
+
+
+/* Default for Treset = (2+1) * ((252+1) * (2 * 131 + 1) + 1) = 133563 =
+ * 500.86us @ 266MHz
+ */
+#ifdef TRESET_CNTR0_VAL
+#define SEQ_TRESET_CNTR0_VAL TRESET_CNTR0_VAL
+#else
+#define SEQ_TRESET_CNTR0_VAL 2
+#endif
+
+#ifdef TRESET_CNTR1_VAL
+#define SEQ_TRESET_CNTR1_VAL TRESET_CNTR1_VAL
+#else
+#define SEQ_TRESET_CNTR1_VAL 252
+#endif
+
+#ifdef TRESET_CNTR2_VAL
+#define SEQ_TRESET_CNTR2_VAL TRESET_CNTR2_VAL
+#else
+#define SEQ_TRESET_CNTR2_VAL 131
+#endif
+
+#define RW_MGR_INST_ROM_WRITE BASE_RW_MGR + 0x1800
+#define RW_MGR_AC_ROM_WRITE BASE_RW_MGR + 0x1C00
+
+/* parameter variable holder */
+struct param_type {
+	uint32_t dm_correct_mask;
+	uint32_t read_correct_mask;
+	uint32_t read_correct_mask_vg;
+	uint32_t write_correct_mask;
+	uint32_t write_correct_mask_vg;
+
+	/* set a particular entry to 1 if we need to skip a particular rank */
+
+	uint32_t skip_ranks[MAX_RANKS];
+
+	/* set a particular entry to 1 if we need to skip a particular group */
+
+	uint32_t skip_groups;
+
+	/* set a particular entry to 1 if the shadow register
+	(which represents a set of ranks) needs to be skipped */
+
+	uint32_t skip_shadow_regs[NUM_SHADOW_REGS];
+
+};
+
+
+/* global variable holder */
+struct gbl_type {
+	uint32_t phy_debug_mode_flags;
+
+	/* current read latency */
+
+	uint32_t curr_read_lat;
+
+	/* current write latency */
+
+	uint32_t curr_write_lat;
+
+	/* error code */
+
+	uint32_t error_substage;
+	uint32_t error_stage;
+	uint32_t error_group;
+
+	/* figure-of-merit in, figure-of-merit out */
+
+	uint32_t fom_in;
+	uint32_t fom_out;
+
+	/*USER Number of RW Mgr NOP cycles between
+	write command and write data */
+	uint32_t rw_wl_nop_cycles;
+};
+#endif /* _SEQUENCER_H_ */
diff --git a/drivers/ddr/altera/sequencer_auto.h b/drivers/ddr/altera/sequencer_auto.h
new file mode 100644
index 0000000..de26b6b
--- /dev/null
+++ b/drivers/ddr/altera/sequencer_auto.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright Altera Corporation (C) 2012-2014. All rights reserved
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ */
+
+#define __RW_MGR_ac_mrs1 0x04
+#define __RW_MGR_ac_mrs3 0x06
+#define __RW_MGR_ac_write_bank_0_col_0_nodata_wl_1 0x1C
+#define __RW_MGR_ac_act_1 0x11
+#define __RW_MGR_ac_write_postdata 0x1A
+#define __RW_MGR_ac_act_0 0x10
+#define __RW_MGR_ac_des 0x0D
+#define __RW_MGR_ac_init_reset_1_cke_0 0x01
+#define __RW_MGR_ac_write_data 0x19
+#define __RW_MGR_ac_init_reset_0_cke_0 0x00
+#define __RW_MGR_ac_read_bank_0_1_norden 0x22
+#define __RW_MGR_ac_pre_all 0x12
+#define __RW_MGR_ac_mrs0_user 0x02
+#define __RW_MGR_ac_mrs0_dll_reset 0x03
+#define __RW_MGR_ac_read_bank_0_0 0x1D
+#define __RW_MGR_ac_write_bank_0_col_1 0x16
+#define __RW_MGR_ac_read_bank_0_1 0x1F
+#define __RW_MGR_ac_write_bank_1_col_0 0x15
+#define __RW_MGR_ac_write_bank_1_col_1 0x17
+#define __RW_MGR_ac_write_bank_0_col_0 0x14
+#define __RW_MGR_ac_read_bank_1_0 0x1E
+#define __RW_MGR_ac_mrs1_mirr 0x0A
+#define __RW_MGR_ac_read_bank_1_1 0x20
+#define __RW_MGR_ac_des_odt_1 0x0E
+#define __RW_MGR_ac_mrs0_dll_reset_mirr 0x09
+#define __RW_MGR_ac_zqcl 0x07
+#define __RW_MGR_ac_write_predata 0x18
+#define __RW_MGR_ac_mrs0_user_mirr 0x08
+#define __RW_MGR_ac_ref 0x13
+#define __RW_MGR_ac_nop 0x0F
+#define __RW_MGR_ac_rdimm 0x23
+#define __RW_MGR_ac_mrs2_mirr 0x0B
+#define __RW_MGR_ac_write_bank_0_col_0_nodata 0x1B
+#define __RW_MGR_ac_read_en 0x21
+#define __RW_MGR_ac_mrs3_mirr 0x0C
+#define __RW_MGR_ac_mrs2 0x05
+#define __RW_MGR_CONTENT_ac_mrs1 0x10090004
+#define __RW_MGR_CONTENT_ac_mrs3 0x100B0000
+#define __RW_MGR_CONTENT_ac_write_bank_0_col_0_nodata_wl_1 0x18980000
+#define __RW_MGR_CONTENT_ac_act_1 0x106B0000
+#define __RW_MGR_CONTENT_ac_write_postdata 0x38780000
+#define __RW_MGR_CONTENT_ac_act_0 0x10680000
+#define __RW_MGR_CONTENT_ac_des 0x30780000
+#define __RW_MGR_CONTENT_ac_init_reset_1_cke_0 0x20780000
+#define __RW_MGR_CONTENT_ac_write_data 0x3CF80000
+#define __RW_MGR_CONTENT_ac_init_reset_0_cke_0 0x20700000
+#define __RW_MGR_CONTENT_ac_read_bank_0_1_norden 0x10580008
+#define __RW_MGR_CONTENT_ac_pre_all 0x10280400
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define __RW_MGR_CONTENT_ac_mrs0_user 0x10080831
+#define __RW_MGR_CONTENT_ac_mrs0_dll_reset 0x10080930
+#else
+#define __RW_MGR_CONTENT_ac_mrs0_user 0x10080431
+#define __RW_MGR_CONTENT_ac_mrs0_dll_reset 0x10080530
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define __RW_MGR_CONTENT_ac_read_bank_0_0 0x13580000
+#define __RW_MGR_CONTENT_ac_write_bank_0_col_1 0x1C980008
+#define __RW_MGR_CONTENT_ac_read_bank_0_1 0x13580008
+#define __RW_MGR_CONTENT_ac_write_bank_1_col_0 0x1C9B0000
+#define __RW_MGR_CONTENT_ac_write_bank_1_col_1 0x1C9B0008
+#define __RW_MGR_CONTENT_ac_write_bank_0_col_0 0x1C980000
+#define __RW_MGR_CONTENT_ac_read_bank_1_0 0x135B0000
+#define __RW_MGR_CONTENT_ac_mrs1_mirr 0x100A0004
+#define __RW_MGR_CONTENT_ac_read_bank_1_1 0x135B0008
+#define __RW_MGR_CONTENT_ac_des_odt_1 0x38780000
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define __RW_MGR_CONTENT_ac_mrs0_dll_reset_mirr 0x100808C8
+#else
+#define __RW_MGR_CONTENT_ac_mrs0_dll_reset_mirr 0x100804C8
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define __RW_MGR_CONTENT_ac_zqcl 0x10380400
+#define __RW_MGR_CONTENT_ac_write_predata 0x38F80000
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define __RW_MGR_CONTENT_ac_mrs0_user_mirr 0x10080849
+#else
+#define __RW_MGR_CONTENT_ac_mrs0_user_mirr 0x10080449
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define __RW_MGR_CONTENT_ac_ref 0x10480000
+#define __RW_MGR_CONTENT_ac_nop 0x30780000
+#define __RW_MGR_CONTENT_ac_rdimm 0x10780000
+#define __RW_MGR_CONTENT_ac_mrs2_mirr 0x10090010
+#define __RW_MGR_CONTENT_ac_write_bank_0_col_0_nodata 0x18180000
+#define __RW_MGR_CONTENT_ac_read_en 0x33780000
+#define __RW_MGR_CONTENT_ac_mrs3_mirr 0x100B0000
+#define __RW_MGR_CONTENT_ac_mrs2 0x100A0008
+
+#define __RW_MGR_READ_B2B_WAIT2 0x6A
+#define __RW_MGR_LFSR_WR_RD_BANK_0_WAIT 0x31
+#define __RW_MGR_REFRESH_ALL 0x14
+#define __RW_MGR_ZQCL 0x06
+#define __RW_MGR_LFSR_WR_RD_BANK_0_NOP 0x22
+#define __RW_MGR_LFSR_WR_RD_BANK_0_DQS 0x23
+#define __RW_MGR_ACTIVATE_0_AND_1 0x0D
+#define __RW_MGR_MRS2_MIRR 0x0A
+#define __RW_MGR_INIT_RESET_0_CKE_0 0x6E
+#define __RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT 0x45
+#define __RW_MGR_ACTIVATE_1 0x0F
+#define __RW_MGR_MRS2 0x04
+#define __RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1 0x34
+#define __RW_MGR_MRS1 0x03
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define __RW_MGR_IDLE_LOOP1 0x7A
+#else
+#define __RW_MGR_IDLE_LOOP1 0x7C
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define __RW_MGR_GUARANTEED_WRITE_WAIT2 0x18
+#define __RW_MGR_MRS3 0x05
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define __RW_MGR_IDLE_LOOP2 0x79
+#else
+#define __RW_MGR_IDLE_LOOP2 0x7B
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define __RW_MGR_GUARANTEED_WRITE_WAIT1 0x1E
+#define __RW_MGR_LFSR_WR_RD_BANK_0_DATA 0x24
+#define __RW_MGR_GUARANTEED_WRITE_WAIT3 0x1C
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define __RW_MGR_RDIMM_CMD 0x78
+#else
+#define __RW_MGR_RDIMM_CMD 0x7A
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define __RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP 0x36
+#define __RW_MGR_GUARANTEED_WRITE_WAIT0 0x1A
+#define __RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA 0x38
+#define __RW_MGR_GUARANTEED_READ_CONT 0x53
+#define __RW_MGR_MRS3_MIRR 0x0B
+#define __RW_MGR_IDLE 0x00
+#define __RW_MGR_READ_B2B 0x58
+#define __RW_MGR_INIT_RESET_0_CKE_0_inloop 0x6F
+#define __RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS 0x37
+#define __RW_MGR_GUARANTEED_WRITE 0x17
+#define __RW_MGR_PRECHARGE_ALL 0x12
+#define __RW_MGR_INIT_RESET_1_CKE_0_inloop_1 0x74
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define __RW_MGR_SGLE_READ 0x7C
+#else
+#define __RW_MGR_SGLE_READ 0x7E
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define __RW_MGR_MRS0_USER_MIRR 0x0C
+#define __RW_MGR_RETURN 0x01
+#define __RW_MGR_LFSR_WR_RD_DM_BANK_0 0x35
+#define __RW_MGR_MRS0_USER 0x07
+#define __RW_MGR_GUARANTEED_READ 0x4B
+#define __RW_MGR_MRS0_DLL_RESET_MIRR 0x08
+#define __RW_MGR_INIT_RESET_1_CKE_0 0x73
+#define __RW_MGR_ACTIVATE_0_AND_1_WAIT2 0x10
+#define __RW_MGR_LFSR_WR_RD_BANK_0_WL_1 0x20
+#define __RW_MGR_MRS0_DLL_RESET 0x02
+#define __RW_MGR_ACTIVATE_0_AND_1_WAIT1 0x0E
+#define __RW_MGR_LFSR_WR_RD_BANK_0 0x21
+#define __RW_MGR_CLEAR_DQS_ENABLE 0x48
+#define __RW_MGR_MRS1_MIRR 0x09
+#define __RW_MGR_READ_B2B_WAIT1 0x60
+#define __RW_MGR_CONTENT_READ_B2B_WAIT2 0x00C680
+#define __RW_MGR_CONTENT_LFSR_WR_RD_BANK_0_WAIT 0x00A680
+#define __RW_MGR_CONTENT_REFRESH_ALL 0x000980
+#define __RW_MGR_CONTENT_ZQCL 0x008380
+#define __RW_MGR_CONTENT_LFSR_WR_RD_BANK_0_NOP 0x00E700
+#define __RW_MGR_CONTENT_LFSR_WR_RD_BANK_0_DQS 0x000C00
+#define __RW_MGR_CONTENT_ACTIVATE_0_AND_1 0x000800
+#define __RW_MGR_CONTENT_MRS2_MIRR 0x008580
+#define __RW_MGR_CONTENT_INIT_RESET_0_CKE_0 0x000000
+#define __RW_MGR_CONTENT_LFSR_WR_RD_DM_BANK_0_WAIT 0x00A680
+#define __RW_MGR_CONTENT_ACTIVATE_1 0x000880
+#define __RW_MGR_CONTENT_MRS2 0x008280
+#define __RW_MGR_CONTENT_LFSR_WR_RD_DM_BANK_0_WL_1 0x00CE00
+#define __RW_MGR_CONTENT_MRS1 0x008200
+#define __RW_MGR_CONTENT_IDLE_LOOP1 0x00A680
+#define __RW_MGR_CONTENT_GUARANTEED_WRITE_WAIT2 0x00CCE8
+#define __RW_MGR_CONTENT_MRS3 0x008300
+#define __RW_MGR_CONTENT_IDLE_LOOP2 0x008680
+#define __RW_MGR_CONTENT_GUARANTEED_WRITE_WAIT1 0x00AC88
+#define __RW_MGR_CONTENT_LFSR_WR_RD_BANK_0_DATA 0x020CE0
+#define __RW_MGR_CONTENT_GUARANTEED_WRITE_WAIT3 0x00EC88
+#define __RW_MGR_CONTENT_RDIMM_CMD 0x009180
+#define __RW_MGR_CONTENT_LFSR_WR_RD_DM_BANK_0_NOP 0x00E700
+#define __RW_MGR_CONTENT_GUARANTEED_WRITE_WAIT0 0x008CE8
+#define __RW_MGR_CONTENT_LFSR_WR_RD_DM_BANK_0_DATA 0x030CE0
+#define __RW_MGR_CONTENT_GUARANTEED_READ_CONT 0x001168
+#define __RW_MGR_CONTENT_MRS3_MIRR 0x008600
+#define __RW_MGR_CONTENT_IDLE 0x080000
+#define __RW_MGR_CONTENT_READ_B2B 0x040E88
+#define __RW_MGR_CONTENT_INIT_RESET_0_CKE_0_inloop 0x000000
+#define __RW_MGR_CONTENT_LFSR_WR_RD_DM_BANK_0_DQS 0x000C00
+#define __RW_MGR_CONTENT_GUARANTEED_WRITE 0x000B68
+#define __RW_MGR_CONTENT_PRECHARGE_ALL 0x000900
+#define __RW_MGR_CONTENT_INIT_RESET_1_CKE_0_inloop_1 0x000080
+#define __RW_MGR_CONTENT_SGLE_READ 0x040F08
+#define __RW_MGR_CONTENT_MRS0_USER_MIRR 0x008400
+#define __RW_MGR_CONTENT_RETURN 0x080680
+#define __RW_MGR_CONTENT_LFSR_WR_RD_DM_BANK_0 0x00CD80
+#define __RW_MGR_CONTENT_MRS0_USER 0x008100
+#define __RW_MGR_CONTENT_GUARANTEED_READ 0x001168
+#define __RW_MGR_CONTENT_MRS0_DLL_RESET_MIRR 0x008480
+#define __RW_MGR_CONTENT_INIT_RESET_1_CKE_0 0x000080
+#define __RW_MGR_CONTENT_ACTIVATE_0_AND_1_WAIT2 0x00A680
+#define __RW_MGR_CONTENT_LFSR_WR_RD_BANK_0_WL_1 0x00CE00
+#define __RW_MGR_CONTENT_MRS0_DLL_RESET 0x008180
+#define __RW_MGR_CONTENT_ACTIVATE_0_AND_1_WAIT1 0x008680
+#define __RW_MGR_CONTENT_LFSR_WR_RD_BANK_0 0x00CD80
+#define __RW_MGR_CONTENT_CLEAR_DQS_ENABLE 0x001158
+#define __RW_MGR_CONTENT_MRS1_MIRR 0x008500
+#define __RW_MGR_CONTENT_READ_B2B_WAIT1 0x00A680
+
diff --git a/drivers/ddr/altera/sequencer_auto_ac_init.c b/drivers/ddr/altera/sequencer_auto_ac_init.c
new file mode 100644
index 0000000..c05af98
--- /dev/null
+++ b/drivers/ddr/altera/sequencer_auto_ac_init.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright Altera Corporation (C) 2012-2014. All rights reserved
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ */
+
+const unsigned long ac_rom_init_size = 36;
+const unsigned long ac_rom_init[36] = {
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+	0x20700000,
+	0x20780000,
+	0x10080831,
+	0x10080930,
+	0x10090004,
+	0x100a0008,
+	0x100b0000,
+	0x10380400,
+	0x10080849,
+	0x100808c8,
+	0x100a0004,
+	0x10090010,
+	0x100b0000,
+	0x30780000,
+	0x38780000,
+	0x30780000,
+	0x10680000,
+	0x106b0000,
+	0x10280400,
+	0x10480000,
+	0x1c980000,
+	0x1c9b0000,
+	0x1c980008,
+	0x1c9b0008,
+	0x38f80000,
+	0x3cf80000,
+	0x38780000,
+	0x18180000,
+	0x18980000,
+	0x13580000,
+	0x135b0000,
+	0x13580008,
+	0x135b0008,
+	0x33780000,
+	0x10580008,
+	0x10780000
+#else
+	0x20700000,
+	0x20780000,
+	0x10080431,
+	0x10080530,
+	0x10090004,
+	0x100a0008,
+	0x100b0000,
+	0x10380400,
+	0x10080449,
+	0x100804c8,
+	0x100a0004,
+	0x10090010,
+	0x100b0000,
+	0x30780000,
+	0x38780000,
+	0x30780000,
+	0x10680000,
+	0x106b0000,
+	0x10280400,
+	0x10480000,
+	0x1c980000,
+	0x1c9b0000,
+	0x1c980008,
+	0x1c9b0008,
+	0x38f80000,
+	0x3cf80000,
+	0x38780000,
+	0x18180000,
+	0x18980000,
+	0x13580000,
+	0x135b0000,
+	0x13580008,
+	0x135b0008,
+	0x33780000,
+	0x10580008,
+	0x10780000
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+};
diff --git a/drivers/ddr/altera/sequencer_auto_inst_init.c b/drivers/ddr/altera/sequencer_auto_inst_init.c
new file mode 100644
index 0000000..e91e3be
--- /dev/null
+++ b/drivers/ddr/altera/sequencer_auto_inst_init.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright Altera Corporation (C) 2012-2014. All rights reserved
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ */
+
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+const alt_u32 inst_rom_init_size = 126;
+const alt_u32 inst_rom_init[126] = {
+	0x80000,
+	0x80680,
+	0x8180,
+	0x8200,
+	0x8280,
+	0x8300,
+	0x8380,
+	0x8100,
+	0x8480,
+	0x8500,
+	0x8580,
+	0x8600,
+	0x8400,
+	0x800,
+	0x8680,
+	0x880,
+	0xa680,
+	0x80680,
+	0x900,
+	0x80680,
+	0x980,
+	0x8680,
+	0x80680,
+	0xb68,
+	0xcce8,
+	0xae8,
+	0x8ce8,
+	0xb88,
+	0xec88,
+	0xa08,
+	0xac88,
+	0x80680,
+	0xce00,
+	0xcd80,
+	0xe700,
+	0xc00,
+	0x20ce0,
+	0x20ce0,
+	0x20ce0,
+	0x20ce0,
+	0xd00,
+	0x680,
+	0x680,
+	0x680,
+	0x680,
+	0x60e80,
+	0x61080,
+	0x61080,
+	0x61080,
+	0xa680,
+	0x8680,
+	0x80680,
+	0xce00,
+	0xcd80,
+	0xe700,
+	0xc00,
+	0x30ce0,
+	0x30ce0,
+	0x30ce0,
+	0x30ce0,
+	0xd00,
+	0x680,
+	0x680,
+	0x680,
+	0x680,
+	0x70e80,
+	0x71080,
+	0x71080,
+	0x71080,
+	0xa680,
+	0x8680,
+	0x80680,
+	0x1158,
+	0x6d8,
+	0x80680,
+	0x1168,
+	0x7e8,
+	0x7e8,
+	0x87e8,
+	0x40fe8,
+	0x410e8,
+	0x410e8,
+	0x410e8,
+	0x1168,
+	0x7e8,
+	0x7e8,
+	0xa7e8,
+	0x80680,
+	0x40e88,
+	0x41088,
+	0x41088,
+	0x41088,
+	0x40f68,
+	0x410e8,
+	0x410e8,
+	0x410e8,
+	0xa680,
+	0x40fe8,
+	0x410e8,
+	0x410e8,
+	0x410e8,
+	0x41008,
+	0x41088,
+	0x41088,
+	0x41088,
+	0x1100,
+	0xc680,
+	0x8680,
+	0xe680,
+	0x80680,
+	0x0,
+	0x8000,
+	0xa000,
+	0xc000,
+	0x80000,
+	0x80,
+	0x8080,
+	0xa080,
+	0xc080,
+	0x80080,
+	0x9180,
+	0x8680,
+	0xa680,
+	0x80680,
+	0x40f08,
+	0x80680
+};
+#else
+const unsigned long inst_rom_init_size = 128;
+const unsigned long inst_rom_init[128] = {
+	0x80000,
+	0x80680,
+	0x8180,
+	0x8200,
+	0x8280,
+	0x8300,
+	0x8380,
+	0x8100,
+	0x8480,
+	0x8500,
+	0x8580,
+	0x8600,
+	0x8400,
+	0x800,
+	0x8680,
+	0x880,
+	0xa680,
+	0x80680,
+	0x900,
+	0x80680,
+	0x980,
+	0x8680,
+	0x80680,
+	0xb68,
+	0xcce8,
+	0xae8,
+	0x8ce8,
+	0xb88,
+	0xec88,
+	0xa08,
+	0xac88,
+	0x80680,
+	0xce00,
+	0xcd80,
+	0xe700,
+	0xc00,
+	0x20ce0,
+	0x20ce0,
+	0x20ce0,
+	0x20ce0,
+	0xd00,
+	0x680,
+	0x680,
+	0x680,
+	0x680,
+	0x60e80,
+	0x61080,
+	0x61080,
+	0x61080,
+	0xa680,
+	0x8680,
+	0x80680,
+	0xce00,
+	0xcd80,
+	0xe700,
+	0xc00,
+	0x30ce0,
+	0x30ce0,
+	0x30ce0,
+	0x30ce0,
+	0xd00,
+	0x680,
+	0x680,
+	0x680,
+	0x680,
+	0x70e80,
+	0x71080,
+	0x71080,
+	0x71080,
+	0xa680,
+	0x8680,
+	0x80680,
+	0x1158,
+	0x6d8,
+	0x80680,
+	0x1168,
+	0x7e8,
+	0x7e8,
+	0x87e8,
+	0x40fe8,
+	0x410e8,
+	0x410e8,
+	0x410e8,
+	0x1168,
+	0x7e8,
+	0x7e8,
+	0xa7e8,
+	0x80680,
+	0x40e88,
+	0x41088,
+	0x41088,
+	0x41088,
+	0x40f68,
+	0x410e8,
+	0x410e8,
+	0x410e8,
+	0xa680,
+	0x40fe8,
+	0x410e8,
+	0x410e8,
+	0x410e8,
+	0x41008,
+	0x41088,
+	0x41088,
+	0x41088,
+	0x1100,
+	0xc680,
+	0x8680,
+	0xe680,
+	0x80680,
+	0x0,
+	0x0,
+	0xa000,
+	0x8000,
+	0x80000,
+	0x80,
+	0x80,
+	0x80,
+	0x80,
+	0xa080,
+	0x8080,
+	0x80080,
+	0x9180,
+	0x8680,
+	0xa680,
+	0x80680,
+	0x40f08,
+	0x80680
+};
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
diff --git a/drivers/ddr/altera/sequencer_defines.h b/drivers/ddr/altera/sequencer_defines.h
new file mode 100644
index 0000000..c38e862
--- /dev/null
+++ b/drivers/ddr/altera/sequencer_defines.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright Altera Corporation (C) 2012-2014. All rights reserved
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ */
+
+#ifndef _SEQUENCER_DEFINES_H_
+#define _SEQUENCER_DEFINES_H_
+
+#define AC_ROM_MR1_MIRR 0000000000100
+#define AC_ROM_MR1_OCD_ENABLE
+#define AC_ROM_MR2_MIRR 0000000010000
+#define AC_ROM_MR3_MIRR 0000000000000
+#define AC_ROM_MR0_CALIB
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define AC_ROM_MR0_DLL_RESET_MIRR 0100011001000
+#define AC_ROM_MR0_DLL_RESET 0100100110000
+#define AC_ROM_MR0_MIRR 0100001001001
+#define AC_ROM_MR0 0100000110001
+#else
+#define AC_ROM_MR0_DLL_RESET_MIRR 0010011001000
+#define AC_ROM_MR0_DLL_RESET 0010100110000
+#define AC_ROM_MR0_MIRR 0010001001001
+#define AC_ROM_MR0 0010000110001
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define AC_ROM_MR1 0000000000100
+#define AC_ROM_MR2 0000000001000
+#define AC_ROM_MR3 0000000000000
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define AFI_CLK_FREQ 534
+#else
+#define AFI_CLK_FREQ 401
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define AFI_RATE_RATIO 1
+#define AVL_CLK_FREQ 67
+#define BFM_MODE 0
+#define BURST2 0
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define CALIB_LFIFO_OFFSET 8
+#define CALIB_VFIFO_OFFSET 6
+#else
+#define CALIB_LFIFO_OFFSET 7
+#define CALIB_VFIFO_OFFSET 5
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define ENABLE_EXPORT_SEQ_DEBUG_BRIDGE 0
+#define ENABLE_SUPER_QUICK_CALIBRATION 0
+#define GUARANTEED_READ_BRINGUP_TEST 0
+#define HARD_PHY 1
+#define HARD_VFIFO 1
+#define HPS_HW 1
+#define HR_DDIO_OUT_HAS_THREE_REGS 0
+#define IO_DELAY_PER_DCHAIN_TAP 25
+#define IO_DELAY_PER_DQS_EN_DCHAIN_TAP 25
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define IO_DELAY_PER_OPA_TAP 234
+#else
+#define IO_DELAY_PER_OPA_TAP 312
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define IO_DLL_CHAIN_LENGTH 8
+#define IO_DM_OUT_RESERVE 0
+#define IO_DQDQS_OUT_PHASE_MAX 0
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define IO_DQS_EN_DELAY_MAX 15
+#define IO_DQS_EN_DELAY_OFFSET 16
+#else
+#define IO_DQS_EN_DELAY_MAX 31
+#define IO_DQS_EN_DELAY_OFFSET 0
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define IO_DQS_EN_PHASE_MAX 7
+#define IO_DQS_IN_DELAY_MAX 31
+#define IO_DQS_IN_RESERVE 4
+#define IO_DQS_OUT_RESERVE 6
+#define IO_DQ_OUT_RESERVE 0
+#define IO_IO_IN_DELAY_MAX 31
+#define IO_IO_OUT1_DELAY_MAX 31
+#define IO_IO_OUT2_DELAY_MAX 0
+#define IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS 0
+#define MARGIN_VARIATION_TEST 0
+#define MAX_LATENCY_COUNT_WIDTH 5
+#define MEM_ADDR_WIDTH 13
+#define READ_VALID_FIFO_SIZE 16
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define REG_FILE_INIT_SEQ_SIGNATURE 0x5555048c
+#else
+#define REG_FILE_INIT_SEQ_SIGNATURE 0x55550483
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define RW_MGR_MEM_ADDRESS_MIRRORING 0
+#define RW_MGR_MEM_ADDRESS_WIDTH 15
+#define RW_MGR_MEM_BANK_WIDTH 3
+#define RW_MGR_MEM_CHIP_SELECT_WIDTH 1
+#define RW_MGR_MEM_CLK_EN_WIDTH 1
+#define RW_MGR_MEM_CONTROL_WIDTH 1
+#define RW_MGR_MEM_DATA_MASK_WIDTH 5
+#define RW_MGR_MEM_DATA_WIDTH 40
+#define RW_MGR_MEM_DQ_PER_READ_DQS 8
+#define RW_MGR_MEM_DQ_PER_WRITE_DQS 8
+#define RW_MGR_MEM_IF_READ_DQS_WIDTH 5
+#define RW_MGR_MEM_IF_WRITE_DQS_WIDTH 5
+#define RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM 1
+#define RW_MGR_MEM_ODT_WIDTH 1
+#define RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS 1
+#define RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS 1
+#define RW_MGR_MR0_BL 1
+#define RW_MGR_MR0_CAS_LATENCY 3
+#define RW_MGR_TRUE_MEM_DATA_MASK_WIDTH 5
+#define RW_MGR_WRITE_TO_DEBUG_READ 1.0
+#define SKEW_CALIBRATION 0
+#define TINIT_CNTR1_VAL 32
+#define TINIT_CNTR2_VAL 32
+#define TINIT_CNTR0_VAL 132
+#define TRESET_CNTR1_VAL 99
+#define TRESET_CNTR2_VAL 10
+#define TRESET_CNTR0_VAL 132
+
+#endif /* _SEQUENCER_DEFINES_H_ */
diff --git a/include/configs/socfpga_cyclone5.h b/include/configs/socfpga_cyclone5.h
index 676144a..8c01b88 100644
--- a/include/configs/socfpga_cyclone5.h
+++ b/include/configs/socfpga_cyclone5.h
@@ -39,6 +39,7 @@
 
 #define CONFIG_REGEX			/* Enable regular expression support */
 
+#define CONFIG_ALTERA_SDRAM
 /* Memory configurations */
 #define PHYS_SDRAM_1_SIZE		0x40000000	/* 1GiB on SoCDK */
 
diff --git a/scripts/Makefile.spl b/scripts/Makefile.spl
index fcacb7f..a04b286 100644
--- a/scripts/Makefile.spl
+++ b/scripts/Makefile.spl
@@ -60,6 +60,7 @@ libs-$(CONFIG_SPL_GPIO_SUPPORT) += drivers/gpio/
 libs-$(CONFIG_SPL_MMC_SUPPORT) += drivers/mmc/
 libs-$(CONFIG_SPL_MPC8XXX_INIT_DDR_SUPPORT) += drivers/ddr/fsl/
 libs-$(CONFIG_SYS_MVEBU_DDR) += drivers/ddr/mvebu/
+libs-$(CONFIG_ALTERA_SDRAM) += drivers/ddr/altera/
 libs-$(CONFIG_SPL_SERIAL_SUPPORT) += drivers/serial/
 libs-$(CONFIG_SPL_SPI_FLASH_SUPPORT) += drivers/mtd/spi/
 libs-$(CONFIG_SPL_SPI_SUPPORT) += drivers/spi/
-- 
2.2.1

  reply	other threads:[~2015-03-30 22:01 UTC|newest]

Thread overview: 85+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-03-30 22:01 [U-Boot] [PATCHv3 00/17] Add SPL support for SoCFPGA dinguyen at opensource.altera.com
2015-03-30 22:01 ` dinguyen at opensource.altera.com [this message]
2015-03-31  6:41   ` [U-Boot] [PATCHv3 01/17] arm: socfpga: spl: Add main sdram code Wolfgang Denk
2015-04-03  2:00     ` Marek Vasut
2015-04-03  4:55       ` Dinh Nguyen
2015-04-03 23:31         ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 02/17] arm: socfpga: spl: Add CONFIG_SPL_MAX_SIZE to be 64KB dinguyen at opensource.altera.com
2015-04-03  1:44   ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 03/17] arm: socfpga: add functions to bring sdram, timer, and uart out of reset dinguyen at opensource.altera.com
2015-04-03  1:45   ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 04/17] arm: socfpga: spl: enable sdram, timer and uart dinguyen at opensource.altera.com
2015-04-03  1:45   ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 05/17] arm: socfpga: spl: Add call to timer_init dinguyen at opensource.altera.com
2015-04-03  1:45   ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 06/17] arm: socfpga: spl: allow bootrom to enable IOs after warm reset dinguyen at opensource.altera.com
2015-04-03  1:46   ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 07/17] arm: socfpga: spl: add sdram init and calibration dinguyen at opensource.altera.com
2015-03-31 21:00   ` Pavel Machek
2015-04-03  1:47     ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 08/17] arm: socfpga: spl: printout sdram size dinguyen at opensource.altera.com
2015-04-03  1:47   ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 09/17] arm: socfpga: spl: Use common lowlevel_init dinguyen at opensource.altera.com
2015-04-03  1:48   ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 10/17] arm: socfpga: spl: Add s_init stub dinguyen at opensource.altera.com
2015-04-03  1:49   ` Marek Vasut
2015-04-07 14:31     ` Dinh Nguyen
2015-04-11 16:57       ` Marek Vasut
2015-04-13 15:20         ` Dinh Nguyen
2015-04-13 15:33           ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 11/17] arm: socfpga: spl: add CONFIG_SPL_STACK to socfpga_common.h dinguyen at opensource.altera.com
2015-04-03  1:50   ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 12/17] arm: socfpga: spl: Adjust the SYS_INIT_RAM_SIZE to have room for the spl malloc dinguyen at opensource.altera.com
2015-04-03  1:51   ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 13/17] arm: socfpga: spl: add board_init_f to SPL dinguyen at opensource.altera.com
2015-03-31 21:07   ` Pavel Machek
2015-04-03  1:52     ` Marek Vasut
2015-04-07 14:34       ` Dinh Nguyen
2015-04-11 16:57         ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 14/17] arm: socfpga: spl: Add SDRAM check dinguyen at opensource.altera.com
2015-03-31 21:11   ` Pavel Machek
2015-04-03  1:53     ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 15/17] arm: socfpga: spl: update pll_config for dev kit dinguyen at opensource.altera.com
2015-04-03  1:54   ` Marek Vasut
2015-04-15 20:49     ` Dinh Nguyen
2015-04-16  6:24       ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 16/17] arm: socfpga: remove the need to map sdram in arch_early_init dinguyen at opensource.altera.com
2015-03-31 21:13   ` Pavel Machek
2015-04-03  1:55   ` Marek Vasut
2015-03-30 22:01 ` [U-Boot] [PATCHv3 17/17] arm: socfpga: fix uart0 pin mux configuration dinguyen at opensource.altera.com
2015-03-31 20:48   ` Pavel Machek
2015-03-31 20:59     ` Dinh Nguyen
2015-03-31 21:13       ` Pavel Machek
2015-04-03  1:57         ` Marek Vasut
2015-04-03  1:56   ` Marek Vasut
2015-03-31 21:14 ` [U-Boot] [PATCHv3 00/17] Add SPL support for SoCFPGA Pavel Machek
2015-04-06 14:40 ` [U-Boot] printf("%d") breaks u-boot 2015.01+ Pavel Machek
2015-04-06 14:59   ` Marek Vasut
2015-04-06 18:14     ` Pavel Machek
2015-04-06 18:48       ` Marek Vasut
2015-04-06 19:23         ` Pavel Machek
2015-04-06 21:02           ` Marek Vasut
2015-04-07  7:53             ` Pavel Machek
2015-04-07 12:23     ` [U-Boot] u-boot 2015.04 on socfpga was " Pavel Machek
2015-04-07  5:13   ` [U-Boot] " Heiko Schocher
2015-04-07  7:56     ` Pavel Machek
2015-04-07  8:16       ` Heiko Schocher
2015-04-08 12:09         ` Pavel Machek
2015-04-08 13:13           ` Marek Vasut
2015-04-08 14:00             ` Pavel Machek
2015-04-13 11:12             ` [U-Boot] "socfpga sdram_applycfg" in mainline u-boot Pavel Machek
2015-04-13 11:24               ` Marek Vasut
2015-04-13 11:29                 ` Stefan Roese
2015-04-08 13:49           ` [U-Boot] printf("%d") breaks u-boot 2015.01+ Tom Rini
2015-04-08 14:06             ` Pavel Machek
2015-04-08 15:53               ` Tom Rini
2015-04-08 16:06                 ` Pavel Machek
2015-04-08 16:43                   ` Tom Rini
2015-04-08 16:08                 ` Tom Rini
2015-04-13 12:45                   ` Pavel Machek
2015-04-13 12:49                   ` [U-Boot] [patch] break build if it would produce broken binary Pavel Machek
2015-04-13 12:52                     ` Tom Rini
2015-04-13 20:38                       ` Pavel Machek
2015-06-02 17:11                         ` Simon Glass
2015-08-13 15:06                           ` Pavel Machek
2015-08-13 17:41                             ` Marek Vasut

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1427752878-18426-2-git-send-email-dinguyen@opensource.altera.com \
    --to=dinguyen@opensource.altera.com \
    --cc=u-boot@lists.denx.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.