All of lore.kernel.org
 help / color / mirror / Atom feed
* [U-Boot] [PATCH RESEND 0/3] drivers/ddr/altera: Add the DDR controller driver for SoCFPGA
@ 2015-04-16 14:11 dinguyen at opensource.altera.com
  2015-04-16 14:11 ` [U-Boot] [PATCH RESEND 1/3] driver/ddr/altera: Add DDR driver for Altera's SDRAM controller dinguyen at opensource.altera.com
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: dinguyen at opensource.altera.com @ 2015-04-16 14:11 UTC (permalink / raw)
  To: u-boot

From: Dinh Nguyen <dinguyen@opensource.altera.com>

Hi,

This is a resend of the patch series that adds the DDR controller driver for
Altera's SoCFPGA platform. This resend contains a new patch:

arm: socfpga: enable the Altera SDRAM controller driver

This new patch is necessary for the driver to build against u-boot-socfpga/master.

Thanks,

Dinh Nguyen (3):
  driver/ddr/altera: Add DDR driver for Altera's SDRAM controller
  driver/ddr/altera/: Add the sdram calibration portion
  arm: socfpga: enable the Altera SDRAM controller driver

 Makefile                                         |    1 +
 arch/arm/include/asm/arch-socfpga/sdram.h        |  294 ++
 arch/arm/include/asm/arch-socfpga/sdram_config.h |  100 +
 drivers/ddr/altera/Makefile                      |   12 +
 drivers/ddr/altera/sdram.c                       |  827 +++++
 drivers/ddr/altera/sequencer.c                   | 3907 ++++++++++++++++++++++
 drivers/ddr/altera/sequencer.h                   |  328 ++
 drivers/ddr/altera/sequencer_auto.h              |  128 +
 drivers/ddr/altera/sequencer_auto_ac_init.c      |   85 +
 drivers/ddr/altera/sequencer_auto_inst_init.c    |  270 ++
 drivers/ddr/altera/sequencer_defines.h           |  121 +
 include/configs/socfpga_common.h                 |    5 +
 scripts/Makefile.spl                             |    1 +
 13 files changed, 6079 insertions(+)
 create mode 100644 arch/arm/include/asm/arch-socfpga/sdram.h
 create mode 100644 arch/arm/include/asm/arch-socfpga/sdram_config.h
 create mode 100644 drivers/ddr/altera/Makefile
 create mode 100644 drivers/ddr/altera/sdram.c
 create mode 100644 drivers/ddr/altera/sequencer.c
 create mode 100644 drivers/ddr/altera/sequencer.h
 create mode 100644 drivers/ddr/altera/sequencer_auto.h
 create mode 100644 drivers/ddr/altera/sequencer_auto_ac_init.c
 create mode 100644 drivers/ddr/altera/sequencer_auto_inst_init.c
 create mode 100644 drivers/ddr/altera/sequencer_defines.h

-- 
2.2.1

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [U-Boot] [PATCH RESEND 1/3] driver/ddr/altera: Add DDR driver for Altera's SDRAM controller
  2015-04-16 14:11 [U-Boot] [PATCH RESEND 0/3] drivers/ddr/altera: Add the DDR controller driver for SoCFPGA dinguyen at opensource.altera.com
@ 2015-04-16 14:11 ` dinguyen at opensource.altera.com
  2015-04-17 12:31   ` Pavel Machek
  2015-04-16 14:11 ` [U-Boot] [PATCH RESEND 2/3] driver/ddr/altera/: Add the sdram calibration portion dinguyen at opensource.altera.com
  2015-04-16 14:11 ` [U-Boot] [PATCH RESEND 3/3] arm: socfpga: enable the Altera SDRAM controller driver dinguyen at opensource.altera.com
  2 siblings, 1 reply; 8+ messages in thread
From: dinguyen at opensource.altera.com @ 2015-04-16 14:11 UTC (permalink / raw)
  To: u-boot

From: Dinh Nguyen <dinguyen@opensource.altera.com>

This patch enables the SDRAM controller that is used on Altera's SoCFPGA
family. This patch configures the SDRAM controller based on a configuration
file that is generated from the Quartus tool, sdram_config.h.

Signed-off-by: Dinh Nguyen <dinguyen@opensource.altera.com>
---
 Makefile                                         |   1 +
 arch/arm/include/asm/arch-socfpga/sdram.h        | 294 ++++++++
 arch/arm/include/asm/arch-socfpga/sdram_config.h | 100 +++
 drivers/ddr/altera/sdram.c                       | 827 +++++++++++++++++++++++
 scripts/Makefile.spl                             |   1 +
 5 files changed, 1223 insertions(+)
 create mode 100644 arch/arm/include/asm/arch-socfpga/sdram.h
 create mode 100644 arch/arm/include/asm/arch-socfpga/sdram_config.h
 create mode 100644 drivers/ddr/altera/sdram.c

diff --git a/Makefile b/Makefile
index 0f7d583..163d530 100644
--- a/Makefile
+++ b/Makefile
@@ -649,6 +649,7 @@ libs-y += drivers/power/ \
 libs-y += drivers/spi/
 libs-$(CONFIG_FMAN_ENET) += drivers/net/fm/
 libs-$(CONFIG_SYS_FSL_DDR) += drivers/ddr/fsl/
+libs-$(CONFIG_ALTERA_SDRAM) += drivers/ddr/altera/
 libs-y += drivers/serial/
 libs-y += drivers/usb/dwc3/
 libs-y += drivers/usb/eth/
diff --git a/arch/arm/include/asm/arch-socfpga/sdram.h b/arch/arm/include/asm/arch-socfpga/sdram.h
new file mode 100644
index 0000000..f46c5f4
--- /dev/null
+++ b/arch/arm/include/asm/arch-socfpga/sdram.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright Altera Corporation (C) 2014-2015
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+#ifndef	_SDRAM_H_
+#define	_SDRAM_H_
+
+#ifndef __ASSEMBLY__
+
+/* function declaration */
+unsigned long sdram_calculate_size(void);
+unsigned sdram_mmr_init_full(unsigned int sdr_phy_reg);
+int sdram_calibration_full(void);
+
+extern int sdram_calibration(void);
+
+#define SDR_CTRLGRP_ADDRESS 0x5000
+
+struct socfpga_sdr_ctrl {
+	u32	ctrl_cfg;
+	u32	dram_timing1;
+	u32	dram_timing2;
+	u32	dram_timing3;
+	u32	dram_timing4;	/* 0x10 */
+	u32	lowpwr_timing;
+	u32	dram_odt;
+	u32	__padding0[4];
+	u32	dram_addrw;	/* 0x2c */
+	u32	dram_if_width;	/* 0x30 */
+	u32	dram_dev_width;
+	u32	dram_sts;
+	u32	dram_intr;
+	u32	sbe_count;	/* 0x40 */
+	u32	dbe_count;
+	u32	err_addr;
+	u32	drop_count;
+	u32	drop_addr;	/* 0x50 */
+	u32	lowpwr_eq;
+	u32	lowpwr_ack;
+	u32	static_cfg;
+	u32	ctrl_width;	/* 0x60 */
+	u32	cport_width;
+	u32	cport_wmap;
+	u32	cport_rmap;
+	u32	rfifo_cmap;	/* 0x70 */
+	u32	wfifo_cmap;
+	u32	cport_rdwr;
+	u32	port_cfg;
+	u32	fpgaport_rst;	/* 0x80 */
+	u32	__padding1;
+	u32	fifo_cfg;
+	u32	protport_default;
+	u32	prot_rule_addr;	/* 0x90 */
+	u32	prot_rule_id;
+	u32	prot_rule_data;
+	u32	prot_rule_rdwr;
+	u32	__padding2[3];
+	u32	mp_priority;	/* 0xac */
+	u32	mp_weight0;	/* 0xb0 */
+	u32	mp_weight1;
+	u32	mp_weight2;
+	u32	mp_weight3;
+	u32	mp_pacing0;	/* 0xc0 */
+	u32	mp_pacing1;
+	u32	mp_pacing2;
+	u32	mp_pacing3;
+	u32	mp_threshold0;	/* 0xd0 */
+	u32	mp_threshold1;
+	u32	mp_threshold2;
+	u32	__padding3[29];
+	u32	phy_ctrl0;	/* 0x150 */
+	u32	phy_ctrl1;
+	u32	phy_ctrl2;
+};
+
+#define SDR_CTRLGRP_CTRLCFG_NODMPINS_LSB 23
+#define SDR_CTRLGRP_CTRLCFG_NODMPINS_MASK 0x00800000
+#define SDR_CTRLGRP_CTRLCFG_DQSTRKEN_LSB 22
+#define SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK 0x00400000
+#define SDR_CTRLGRP_CTRLCFG_STARVELIMIT_LSB 16
+#define SDR_CTRLGRP_CTRLCFG_STARVELIMIT_MASK 0x003f0000
+#define SDR_CTRLGRP_CTRLCFG_REORDEREN_LSB 15
+#define SDR_CTRLGRP_CTRLCFG_REORDEREN_MASK 0x00008000
+#define SDR_CTRLGRP_CTRLCFG_ECCCORREN_LSB 11
+#define SDR_CTRLGRP_CTRLCFG_ECCCORREN_MASK 0x00000800
+#define SDR_CTRLGRP_CTRLCFG_ECCEN_LSB 10
+#define SDR_CTRLGRP_CTRLCFG_ECCEN_MASK 0x00000400
+#define SDR_CTRLGRP_CTRLCFG_ADDRORDER_LSB 8
+#define SDR_CTRLGRP_CTRLCFG_ADDRORDER_MASK 0x00000300
+#define SDR_CTRLGRP_CTRLCFG_MEMBL_LSB 3
+#define SDR_CTRLGRP_CTRLCFG_MEMBL_MASK 0x000000f8
+#define SDR_CTRLGRP_CTRLCFG_MEMTYPE_LSB 0
+#define SDR_CTRLGRP_CTRLCFG_MEMTYPE_MASK 0x00000007
+/* Register template: sdr::ctrlgrp::dramtiming1                            */
+#define SDR_CTRLGRP_DRAMTIMING1_TRFC_LSB 24
+#define SDR_CTRLGRP_DRAMTIMING1_TRFC_MASK 0xff000000
+#define SDR_CTRLGRP_DRAMTIMING1_TFAW_LSB 18
+#define SDR_CTRLGRP_DRAMTIMING1_TFAW_MASK 0x00fc0000
+#define SDR_CTRLGRP_DRAMTIMING1_TRRD_LSB 14
+#define SDR_CTRLGRP_DRAMTIMING1_TRRD_MASK 0x0003c000
+#define SDR_CTRLGRP_DRAMTIMING1_TCL_LSB 9
+#define SDR_CTRLGRP_DRAMTIMING1_TCL_MASK 0x00003e00
+#define SDR_CTRLGRP_DRAMTIMING1_TAL_LSB 4
+#define SDR_CTRLGRP_DRAMTIMING1_TAL_MASK 0x000001f0
+#define SDR_CTRLGRP_DRAMTIMING1_TCWL_LSB 0
+#define SDR_CTRLGRP_DRAMTIMING1_TCWL_MASK 0x0000000f
+/* Register template: sdr::ctrlgrp::dramtiming2                            */
+#define SDR_CTRLGRP_DRAMTIMING2_TWTR_LSB 25
+#define SDR_CTRLGRP_DRAMTIMING2_TWTR_MASK 0x1e000000
+#define SDR_CTRLGRP_DRAMTIMING2_TWR_LSB 21
+#define SDR_CTRLGRP_DRAMTIMING2_TWR_MASK 0x01e00000
+#define SDR_CTRLGRP_DRAMTIMING2_TRP_LSB 17
+#define SDR_CTRLGRP_DRAMTIMING2_TRP_MASK 0x001e0000
+#define SDR_CTRLGRP_DRAMTIMING2_TRCD_LSB 13
+#define SDR_CTRLGRP_DRAMTIMING2_TRCD_MASK 0x0001e000
+#define SDR_CTRLGRP_DRAMTIMING2_TREFI_LSB 0
+#define SDR_CTRLGRP_DRAMTIMING2_TREFI_MASK 0x00001fff
+/* Register template: sdr::ctrlgrp::dramtiming3                            */
+#define SDR_CTRLGRP_DRAMTIMING3_TCCD_LSB 19
+#define SDR_CTRLGRP_DRAMTIMING3_TCCD_MASK 0x00780000
+#define SDR_CTRLGRP_DRAMTIMING3_TMRD_LSB 15
+#define SDR_CTRLGRP_DRAMTIMING3_TMRD_MASK 0x00078000
+#define SDR_CTRLGRP_DRAMTIMING3_TRC_LSB 9
+#define SDR_CTRLGRP_DRAMTIMING3_TRC_MASK 0x00007e00
+#define SDR_CTRLGRP_DRAMTIMING3_TRAS_LSB 4
+#define SDR_CTRLGRP_DRAMTIMING3_TRAS_MASK 0x000001f0
+#define SDR_CTRLGRP_DRAMTIMING3_TRTP_LSB 0
+#define SDR_CTRLGRP_DRAMTIMING3_TRTP_MASK 0x0000000f
+/* Register template: sdr::ctrlgrp::dramtiming4                            */
+#define SDR_CTRLGRP_DRAMTIMING4_MINPWRSAVECYCLES_LSB 20
+#define SDR_CTRLGRP_DRAMTIMING4_MINPWRSAVECYCLES_MASK 0x00f00000
+#define SDR_CTRLGRP_DRAMTIMING4_PWRDOWNEXIT_LSB 10
+#define SDR_CTRLGRP_DRAMTIMING4_PWRDOWNEXIT_MASK 0x000ffc00
+#define SDR_CTRLGRP_DRAMTIMING4_SELFRFSHEXIT_LSB 0
+#define SDR_CTRLGRP_DRAMTIMING4_SELFRFSHEXIT_MASK 0x000003ff
+/* Register template: sdr::ctrlgrp::lowpwrtiming                           */
+#define SDR_CTRLGRP_LOWPWRTIMING_CLKDISABLECYCLES_LSB 16
+#define SDR_CTRLGRP_LOWPWRTIMING_CLKDISABLECYCLES_MASK 0x000f0000
+#define SDR_CTRLGRP_LOWPWRTIMING_AUTOPDCYCLES_LSB 0
+#define SDR_CTRLGRP_LOWPWRTIMING_AUTOPDCYCLES_MASK 0x0000ffff
+/* Register template: sdr::ctrlgrp::dramaddrw                              */
+#define SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB 13
+#define SDR_CTRLGRP_DRAMADDRW_CSBITS_MASK 0x0000e000
+#define SDR_CTRLGRP_DRAMADDRW_BANKBITS_LSB 10
+#define SDR_CTRLGRP_DRAMADDRW_BANKBITS_MASK 0x00001c00
+#define SDR_CTRLGRP_DRAMADDRW_ROWBITS_LSB 5
+#define SDR_CTRLGRP_DRAMADDRW_ROWBITS_MASK 0x000003e0
+#define SDR_CTRLGRP_DRAMADDRW_COLBITS_LSB 0
+#define SDR_CTRLGRP_DRAMADDRW_COLBITS_MASK 0x0000001f
+/* Register template: sdr::ctrlgrp::dramifwidth                            */
+#define SDR_CTRLGRP_DRAMIFWIDTH_IFWIDTH_LSB 0
+#define SDR_CTRLGRP_DRAMIFWIDTH_IFWIDTH_MASK 0x000000ff
+/* Register template: sdr::ctrlgrp::dramdevwidth                           */
+#define SDR_CTRLGRP_DRAMDEVWIDTH_DEVWIDTH_LSB 0
+#define SDR_CTRLGRP_DRAMDEVWIDTH_DEVWIDTH_MASK 0x0000000f
+/* Register template: sdr::ctrlgrp::dramintr                               */
+#define SDR_CTRLGRP_DRAMINTR_INTREN_LSB 0
+#define SDR_CTRLGRP_DRAMINTR_INTREN_MASK 0x00000001
+#define SDR_CTRLGRP_LOWPWREQ_SELFRFSHMASK_LSB 4
+#define SDR_CTRLGRP_LOWPWREQ_SELFRFSHMASK_MASK 0x00000030
+/* Register template: sdr::ctrlgrp::staticcfg                              */
+#define SDR_CTRLGRP_STATICCFG_APPLYCFG_LSB 3
+#define SDR_CTRLGRP_STATICCFG_APPLYCFG_MASK 0x00000008
+#define SDR_CTRLGRP_STATICCFG_USEECCASDATA_LSB 2
+#define SDR_CTRLGRP_STATICCFG_USEECCASDATA_MASK 0x00000004
+#define SDR_CTRLGRP_STATICCFG_MEMBL_LSB 0
+#define SDR_CTRLGRP_STATICCFG_MEMBL_MASK 0x00000003
+/* Register template: sdr::ctrlgrp::ctrlwidth                              */
+#define SDR_CTRLGRP_CTRLWIDTH_CTRLWIDTH_LSB 0
+#define SDR_CTRLGRP_CTRLWIDTH_CTRLWIDTH_MASK 0x00000003
+/* Register template: sdr::ctrlgrp::cportwidth                             */
+#define SDR_CTRLGRP_CPORTWIDTH_CMDPORTWIDTH_LSB 0
+#define SDR_CTRLGRP_CPORTWIDTH_CMDPORTWIDTH_MASK 0x000fffff
+/* Register template: sdr::ctrlgrp::cportwmap                              */
+#define SDR_CTRLGRP_CPORTWMAP_CPORTWFIFOMAP_LSB 0
+#define SDR_CTRLGRP_CPORTWMAP_CPORTWFIFOMAP_MASK 0x3fffffff
+/* Register template: sdr::ctrlgrp::cportrmap                              */
+#define SDR_CTRLGRP_CPORTRMAP_CPORTRFIFOMAP_LSB 0
+#define SDR_CTRLGRP_CPORTRMAP_CPORTRFIFOMAP_MASK 0x3fffffff
+/* Register template: sdr::ctrlgrp::rfifocmap                              */
+#define SDR_CTRLGRP_RFIFOCMAP_RFIFOCPORTMAP_LSB 0
+#define SDR_CTRLGRP_RFIFOCMAP_RFIFOCPORTMAP_MASK 0x00ffffff
+/* Register template: sdr::ctrlgrp::wfifocmap                              */
+#define SDR_CTRLGRP_WFIFOCMAP_WFIFOCPORTMAP_LSB 0
+#define SDR_CTRLGRP_WFIFOCMAP_WFIFOCPORTMAP_MASK 0x00ffffff
+/* Register template: sdr::ctrlgrp::cportrdwr                              */
+#define SDR_CTRLGRP_CPORTRDWR_CPORTRDWR_LSB 0
+#define SDR_CTRLGRP_CPORTRDWR_CPORTRDWR_MASK 0x000fffff
+/* Register template: sdr::ctrlgrp::portcfg                                */
+#define SDR_CTRLGRP_PORTCFG_AUTOPCHEN_LSB 10
+#define SDR_CTRLGRP_PORTCFG_AUTOPCHEN_MASK 0x000ffc00
+#define SDR_CTRLGRP_PORTCFG_PORTPROTOCOL_LSB 0
+#define SDR_CTRLGRP_PORTCFG_PORTPROTOCOL_MASK 0x000003ff
+/* Register template: sdr::ctrlgrp::fifocfg                                */
+#define SDR_CTRLGRP_FIFOCFG_INCSYNC_LSB 10
+#define SDR_CTRLGRP_FIFOCFG_INCSYNC_MASK 0x00000400
+#define SDR_CTRLGRP_FIFOCFG_SYNCMODE_LSB 0
+#define SDR_CTRLGRP_FIFOCFG_SYNCMODE_MASK 0x000003ff
+/* Register template: sdr::ctrlgrp::mppriority                             */
+#define SDR_CTRLGRP_MPPRIORITY_USERPRIORITY_LSB 0
+#define SDR_CTRLGRP_MPPRIORITY_USERPRIORITY_MASK 0x3fffffff
+/* Register template: sdr::ctrlgrp::mpweight::mpweight_0                   */
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_STATICWEIGHT_31_0_LSB 0
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_STATICWEIGHT_31_0_MASK 0xffffffff
+/* Register template: sdr::ctrlgrp::mpweight::mpweight_1                   */
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_SUMOFWEIGHTS_13_0_LSB 18
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_SUMOFWEIGHTS_13_0_MASK 0xfffc0000
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_STATICWEIGHT_49_32_LSB 0
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_STATICWEIGHT_49_32_MASK 0x0003ffff
+/* Register template: sdr::ctrlgrp::mpweight::mpweight_2                   */
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_SUMOFWEIGHTS_45_14_LSB 0
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_SUMOFWEIGHTS_45_14_MASK 0xffffffff
+/* Register template: sdr::ctrlgrp::mpweight::mpweight_3                   */
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_SUMOFWEIGHTS_63_46_LSB 0
+#define SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_SUMOFWEIGHTS_63_46_MASK 0x0003ffff
+/* Register template: sdr::ctrlgrp::mppacing::mppacing_0                   */
+#define SDR_CTRLGRP_MPPACING_MPPACING_0_THRESHOLD1_31_0_LSB 0
+#define SDR_CTRLGRP_MPPACING_MPPACING_0_THRESHOLD1_31_0_MASK 0xffffffff
+/* Register template: sdr::ctrlgrp::mppacing::mppacing_1                   */
+#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD2_3_0_LSB 28
+#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD2_3_0_MASK 0xf0000000
+#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD1_59_32_LSB 0
+#define SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD1_59_32_MASK 0x0fffffff
+/* Register template: sdr::ctrlgrp::mppacing::mppacing_2                   */
+#define SDR_CTRLGRP_MPPACING_MPPACING_2_THRESHOLD2_35_4_LSB 0
+#define SDR_CTRLGRP_MPPACING_MPPACING_2_THRESHOLD2_35_4_MASK 0xffffffff
+/* Register template: sdr::ctrlgrp::mppacing::mppacing_3                   */
+#define SDR_CTRLGRP_MPPACING_MPPACING_3_THRESHOLD2_59_36_LSB 0
+#define SDR_CTRLGRP_MPPACING_MPPACING_3_THRESHOLD2_59_36_MASK 0x00ffffff
+/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_0       */
+#define \
+SDR_CTRLGRP_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_LSB 0
+#define  \
+SDR_CTRLGRP_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_MASK \
+0xffffffff
+/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_1       */
+#define \
+SDR_CTRLGRP_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_LSB 0
+#define \
+SDR_CTRLGRP_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_MASK \
+0xffffffff
+/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_2       */
+#define \
+SDR_CTRLGRP_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_LSB 0
+#define \
+SDR_CTRLGRP_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_MASK \
+0x0000ffff
+/* Register template: sdr::ctrlgrp::remappriority                          */
+#define SDR_CTRLGRP_REMAPPRIORITY_PRIORITYREMAP_LSB 0
+#define SDR_CTRLGRP_REMAPPRIORITY_PRIORITYREMAP_MASK 0x000000ff
+/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_0                     */
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_LSB 12
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH 20
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(x) \
+ (((x) << 12) & 0xfffff000)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(x) \
+ (((x) << 10) & 0x00000c00)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(x) \
+ (((x) << 6) & 0x000000c0)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(x) \
+ (((x) << 8) & 0x00000100)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(x) \
+ (((x) << 9) & 0x00000200)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(x) \
+ (((x) << 4) & 0x00000030)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(x) \
+ (((x) << 2) & 0x0000000c)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(x) \
+ (((x) << 0) & 0x00000003)
+/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_1                     */
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH 20
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(x) \
+ (((x) << 12) & 0xfffff000)
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(x) \
+ (((x) << 0) & 0x00000fff)
+/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_2                     */
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(x) \
+ (((x) << 0) & 0x00000fff)
+/* Register template: sdr::ctrlgrp::dramodt                                */
+#define SDR_CTRLGRP_DRAMODT_READ_LSB 4
+#define SDR_CTRLGRP_DRAMODT_READ_MASK 0x000000f0
+#define SDR_CTRLGRP_DRAMODT_WRITE_LSB 0
+#define SDR_CTRLGRP_DRAMODT_WRITE_MASK 0x0000000f
+/* Field instance: sdr::ctrlgrp::dramsts                                   */
+#define SDR_CTRLGRP_DRAMSTS_DBEERR_MASK 0x00000008
+#define SDR_CTRLGRP_DRAMSTS_SBEERR_MASK 0x00000004
+
+/* SDRAM width macro for configuration with ECC */
+#define SDRAM_WIDTH_32BIT_WITH_ECC	40
+#define SDRAM_WIDTH_16BIT_WITH_ECC	24
+
+#endif
+#endif /* _SDRAM_H_ */
diff --git a/arch/arm/include/asm/arch-socfpga/sdram_config.h b/arch/arm/include/asm/arch-socfpga/sdram_config.h
new file mode 100644
index 0000000..93687c3
--- /dev/null
+++ b/arch/arm/include/asm/arch-socfpga/sdram_config.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright Altera Corporation (C) 2012-2015
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ */
+
+#ifndef __SDRAM_CONFIG_H
+#define __SDRAM_CONFIG_H
+
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMTYPE			2
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMBL			8
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ADDRORDER		0
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCEN			1
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCCORREN		1
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_REORDEREN		1
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_STARVELIMIT		10
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_DQSTRKEN			0
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_NODMPINS			0
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCWL			6
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_AL			0
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCL			7
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRRD			4
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TFAW			19
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRFC			139
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TREFI		4160
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRCD		8
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRP		8
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWR		8
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWTR		4
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRTP			4
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRAS			19
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRC			26
+#else
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRRD			3
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TFAW			14
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRFC			104
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TREFI		3120
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRCD		6
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRP		6
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWR		6
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWTR		4
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRTP			3
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRAS			14
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRC			20
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TMRD			4
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TCCD			4
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_SELFRFSHEXIT		512
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_PWRDOWNEXIT		3
+#define CONFIG_HPS_SDR_CTRLCFG_LOWPWRTIMING_AUTOPDCYCLES	0
+#define CONFIG_HPS_SDR_CTRLCFG_LOWPWRTIMING_CLKDISABLECYCLES	8
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS		10
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS		15
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_BANKBITS		3
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS			1
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMIFWIDTH_IFWIDTH		40
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMDEVWIDTH_DEVWIDTH		8
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMINTR_INTREN			0
+#define CONFIG_HPS_SDR_CTRLCFG_LOWPWREQ_SELFRFSHMASK		3
+#define CONFIG_HPS_SDR_CTRLCFG_STATICCFG_MEMBL			2
+#define CONFIG_HPS_SDR_CTRLCFG_STATICCFG_USEECCASDATA		0
+#define CONFIG_HPS_SDR_CTRLCFG_CTRLWIDTH_CTRLWIDTH		2
+#define CONFIG_HPS_SDR_CTRLCFG_PORTCFG_AUTOPCHEN		0
+#define CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_SYNCMODE			0
+#define CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_INCSYNC			0
+#define CONFIG_HPS_SDR_CTRLCFG_MPPRIORITY_USERPRIORITY		0x3FFD1088
+#define CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_0_STATICWEIGHT_31_0	0x21084210
+#define CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_STATICWEIGHT_49_32	0x1EF84
+#define CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_SUMOFWEIGHT_13_0	0x2020
+#define CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_2_SUMOFWEIGHT_45_14	0x0
+#define CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_3_SUMOFWEIGHT_63_46	0xF800
+#define CONFIG_HPS_SDR_CTRLCFG_PHYCTRL_PHYCTRL_0		0x200
+
+#define CONFIG_HPS_SDR_CTRLCFG_CPORTWIDTH_CPORTWIDTH		0x44555
+#define CONFIG_HPS_SDR_CTRLCFG_CPORTWMAP_CPORTWMAP		0x2C011000
+#define CONFIG_HPS_SDR_CTRLCFG_CPORTRMAP_CPORTRMAP		0xB00088
+#define CONFIG_HPS_SDR_CTRLCFG_RFIFOCMAP_RFIFOCMAP		0x760210
+#define CONFIG_HPS_SDR_CTRLCFG_WFIFOCMAP_WFIFOCMAP		0x980543
+#define CONFIG_HPS_SDR_CTRLCFG_CPORTRDWR_CPORTRDWR		0x5A56A
+#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_0_THRESHOLD1_31_0	0x20820820
+#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD1_59_32	0x8208208
+#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD2_3_0	0
+#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_2_THRESHOLD2_35_4	0x41041041
+#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_3_THRESHOLD2_59_36	0x410410
+#define CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0 \
+0x01010101
+#define CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32 \
+0x01010101
+#define CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64 \
+0x0101
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMODT_READ			0
+#define CONFIG_HPS_SDR_CTRLCFG_DRAMODT_WRITE			1
+#define CONFIG_HPS_SDR_CTRLCFG_FPGAPORTRST_READ_PORT_USED	0
+#define CONFIG_HPS_SDR_CTRLCFG_FPGAPORTRST_WRITE_PORT_USED	0
+#define CONFIG_HPS_SDR_CTRLCFG_FPGAPORTRST_COMMAND_PORT_USED	0
+#define CONFIG_HPS_SDR_CTRLCFG_FPGAPORTRST			0
+
+#endif	/*#ifndef__SDRAM_CONFIG_H*/
diff --git a/drivers/ddr/altera/sdram.c b/drivers/ddr/altera/sdram.c
new file mode 100644
index 0000000..ff0d7bc
--- /dev/null
+++ b/drivers/ddr/altera/sdram.c
@@ -0,0 +1,827 @@
+/*
+ * Copyright Altera Corporation (C) 2014-2015
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+#include <common.h>
+#include <div64.h>
+#include <watchdog.h>
+#include <asm/arch/fpga_manager.h>
+#include <asm/arch/sdram.h>
+#include <asm/arch/sdram_config.h>
+#include <asm/arch/system_manager.h>
+#include <asm/io.h>
+
+#define COMPARE_FAIL_ACTION	return 1;
+
+/* define constant for 4G memory - used for SDRAM errata workaround */
+#define MEMSIZE_4G (4ULL * 1024ULL * 1024ULL * 1024ULL)
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static struct socfpga_system_manager *sysmgr_regs =
+	(struct socfpga_system_manager *)SOCFPGA_SYSMGR_ADDRESS;
+static struct socfpga_sdr_ctrl *sdr_ctrl =
+	(struct socfpga_sdr_ctrl *)(SOCFPGA_SDR_ADDRESS + SDR_CTRLGRP_ADDRESS);
+
+/* Below function only applicable for SPL */
+static int compute_errata_rows(unsigned long long memsize, int cs, int width,
+			       int rows, int banks, int cols)
+{
+	unsigned long long newrows;
+	int inewrowslog2;
+	int bits;
+
+	debug("workaround rows - memsize %lld\n", memsize);
+	debug("workaround rows - cs        %d\n", cs);
+	debug("workaround rows - width     %d\n", width);
+	debug("workaround rows - rows      %d\n", rows);
+	debug("workaround rows - banks     %d\n", banks);
+	debug("workaround rows - cols      %d\n", cols);
+
+	newrows = lldiv(memsize, (cs * (width / 8)));
+	debug("rows workaround - term1 %lld\n", newrows);
+
+	newrows = lldiv(newrows, ((1 << banks) * (1 << cols)));
+	debug("rows workaround - term2 %lld\n", newrows);
+
+	/* Compute the hamming weight - same as number of bits set.
+	 * Need to see if result is ordinal power of 2 before
+	 * attempting log2 of result.
+	 */
+	bits = hweight32(newrows);
+
+	debug("rows workaround - bits %d\n", bits);
+
+	if (bits != 1) {
+		printf("SDRAM workaround failed, bits set %d\n", bits);
+		return rows;
+	}
+
+	if (newrows > UINT_MAX) {
+		printf("SDRAM workaround rangecheck failed, %lld\n", newrows);
+		return rows;
+	}
+
+	inewrowslog2 = __ilog2((unsigned int)newrows);
+
+	debug("rows workaround - ilog2 %d, %d\n", inewrowslog2,
+	       (int)newrows);
+
+	if (inewrowslog2 == -1) {
+		printf("SDRAM workaround failed, newrows %d\n", (int)newrows);
+		return rows;
+	}
+
+	return inewrowslog2;
+}
+
+typedef struct _sdram_prot_rule {
+	uint64_t	sdram_start;	/* SDRAM start address */
+	uint64_t	sdram_end;	/* SDRAM end address */
+	uint32_t	rule;		/* SDRAM protection rule number: 0-19 */
+	int		valid;		/* Rule valid or not? 1 - valid, 0 not*/
+
+	uint32_t	security;
+	uint32_t	portmask;
+	uint32_t	result;
+	uint32_t	lo_prot_id;
+	uint32_t	hi_prot_id;
+} sdram_prot_rule, *psdram_prot_rule;
+
+/* SDRAM protection rules vary from 0-19, a total of 20 rules. */
+
+static void sdram_set_rule(psdram_prot_rule prule)
+{
+	uint32_t lo_addr_bits;
+	uint32_t hi_addr_bits;
+	int ruleno = prule->rule;
+
+	/* Select the rule */
+	writel(ruleno, &sdr_ctrl->prot_rule_rdwr);
+
+	/* Obtain the address bits */
+	lo_addr_bits = (uint32_t)(((prule->sdram_start) >> 20ULL) & 0xFFF);
+	hi_addr_bits = (uint32_t)((((prule->sdram_end-1) >> 20ULL)) & 0xFFF);
+
+	debug("sdram set rule start %x, %lld\n", lo_addr_bits,
+	      prule->sdram_start);
+	debug("sdram set rule end   %x, %lld\n", hi_addr_bits,
+	      prule->sdram_end);
+
+	/* Set rule addresses */
+	writel(lo_addr_bits | (hi_addr_bits << 12), &sdr_ctrl->prot_rule_addr);
+
+	/* Set rule protection ids */
+	writel(prule->lo_prot_id | (prule->hi_prot_id << 12),
+	       &sdr_ctrl->prot_rule_id);
+
+	/* Set the rule data */
+	writel(prule->security | (prule->valid << 2) |
+	       (prule->portmask << 3) | (prule->result << 13),
+	       &sdr_ctrl->prot_rule_data);
+
+	/* write the rule */
+	writel(ruleno | (1L << 5), &sdr_ctrl->prot_rule_rdwr);
+
+	/* Set rule number to 0 by default */
+	writel(0, &sdr_ctrl->prot_rule_rdwr);
+}
+
+static void sdram_get_rule(psdram_prot_rule prule)
+{
+	uint32_t protruleaddr;
+	uint32_t protruleid;
+	uint32_t protruledata;
+	int ruleno = prule->rule;
+
+	/* Read the rule */
+	writel(ruleno, &sdr_ctrl->prot_rule_rdwr);
+	writel(ruleno | (1L << 6), &sdr_ctrl->prot_rule_rdwr);
+
+	/* Get the addresses */
+	protruleaddr = readl(&sdr_ctrl->prot_rule_addr);
+	prule->sdram_start = (protruleaddr & 0xFFF) << 20;
+	prule->sdram_end = ((protruleaddr >> 12) & 0xFFF) << 20;
+
+	/* Get the configured protection IDs */
+	protruleid = readl(&sdr_ctrl->prot_rule_id);
+	prule->lo_prot_id = protruleid & 0xFFF;
+	prule->hi_prot_id = (protruleid >> 12) & 0xFFF;
+
+	/* Get protection data */
+	protruledata = readl(&sdr_ctrl->prot_rule_data);
+
+	prule->security = protruledata & 0x3;
+	prule->valid = (protruledata >> 2) & 0x1;
+	prule->portmask = (protruledata >> 3) & 0x3FF;
+	prule->result = (protruledata >> 13) & 0x1;
+}
+
+static void sdram_set_protection_config(uint64_t sdram_start, uint64_t sdram_end)
+{
+	sdram_prot_rule rule;
+	int rules;
+
+	/* Start with accepting all SDRAM transaction */
+	writel(0x0, &sdr_ctrl->protport_default);
+
+	/* Clear all protection rules for warm boot case */
+
+	rule.sdram_start = 0;
+	rule.sdram_end = 0;
+	rule.lo_prot_id = 0;
+	rule.hi_prot_id = 0;
+	rule.portmask = 0;
+	rule.security = 0;
+	rule.result = 0;
+	rule.valid = 0;
+	rule.rule = 0;
+
+	for (rules = 0; rules < 20; rules++) {
+		rule.rule = rules;
+		sdram_set_rule(&rule);
+	}
+
+	/* new rule: accept SDRAM */
+	rule.sdram_start = sdram_start;
+	rule.sdram_end = sdram_end;
+	rule.lo_prot_id = 0x0;
+	rule.hi_prot_id = 0xFFF;
+	rule.portmask = 0x3FF;
+	rule.security = 0x3;
+	rule.result = 0;
+	rule.valid = 1;
+	rule.rule = 0;
+
+	/* set new rule */
+	sdram_set_rule(&rule);
+
+	/* default rule: reject everything */
+	writel(0x3ff, &sdr_ctrl->protport_default);
+}
+
+static void sdram_dump_protection_config(void)
+{
+	sdram_prot_rule rule;
+	int rules;
+
+	debug("SDRAM Prot rule, default %x\n",
+	      readl(&sdr_ctrl->protport_default));
+
+	for (rules = 0; rules < 20; rules++) {
+		sdram_get_rule(&rule);
+		debug("Rule %d, rules ...\n", rules);
+		debug("    sdram start %llx\n", rule.sdram_start);
+		debug("    sdram end   %llx\n", rule.sdram_end);
+		debug("    low prot id %d, hi prot id %d\n",
+		      rule.lo_prot_id,
+		      rule.hi_prot_id);
+		debug("    portmask %x\n", rule.portmask);
+		debug("    security %d\n", rule.security);
+		debug("    result %d\n", rule.result);
+		debug("    valid %d\n", rule.valid);
+	}
+}
+
+/* Function to write to register and verify the write */
+static unsigned sdram_write_verify(unsigned int *addr, unsigned reg_value)
+{
+#ifndef SDRAM_MMR_SKIP_VERIFY
+	unsigned reg_value1;
+#endif
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n", (u32)addr, reg_value);
+	/* Write to register */
+	writel(reg_value, addr);
+#ifndef SDRAM_MMR_SKIP_VERIFY
+	debug("   Read and verify...");
+	/* Read back the wrote value */
+	reg_value1 = readl(addr);
+	/* Indicate failure if value not matched */
+	if (reg_value1 != reg_value) {
+		debug("FAIL - Address 0x%08x Expected 0x%08x Data 0x%08x\n",
+		      (u32)addr, reg_value, reg_value1);
+		return 1;
+	}
+	debug("correct!\n");
+#endif	/* SDRAM_MMR_SKIP_VERIFY */
+	return 0;
+}
+
+static void set_sdr_ctrlcfg(void)
+{
+	int addrorder;
+
+	debug("\nConfiguring CTRLCFG\n");
+	clrsetbits_le32(&sdr_ctrl->ctrl_cfg, SDR_CTRLGRP_CTRLCFG_MEMTYPE_MASK,
+		   CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMTYPE <<
+		   SDR_CTRLGRP_CTRLCFG_MEMTYPE_LSB);
+	clrsetbits_le32(&sdr_ctrl->ctrl_cfg, SDR_CTRLGRP_CTRLCFG_MEMBL_MASK,
+		   CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_MEMBL <<
+		   SDR_CTRLGRP_CTRLCFG_MEMBL_LSB);
+
+
+	/* SDRAM Failure When Accessing Non-Existent Memory
+	 * Set the addrorder field of the SDRAM control register
+	 * based on the CSBITs setting.
+	 */
+	switch (CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS) {
+	case 1:
+		addrorder = 0; /* chip, row, bank, column */
+		if (CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ADDRORDER != 0)
+			debug("INFO: Changing address order to 0 (chip, row, \
+			      bank, column)\n");
+		break;
+	case 2:
+		addrorder = 2; /* row, chip, bank, column */
+		if (CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ADDRORDER != 2)
+			debug("INFO: Changing address order to 2 (row, chip, \
+			      bank, column)\n");
+		break;
+	default:
+		addrorder = CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ADDRORDER;
+		break;
+	}
+
+	clrsetbits_le32(&sdr_ctrl->ctrl_cfg, SDR_CTRLGRP_CTRLCFG_ADDRORDER_MASK,
+			addrorder << SDR_CTRLGRP_CTRLCFG_ADDRORDER_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->ctrl_cfg, SDR_CTRLGRP_CTRLCFG_ECCEN_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCEN <<
+			SDR_CTRLGRP_CTRLCFG_ECCEN_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->ctrl_cfg, SDR_CTRLGRP_CTRLCFG_ECCCORREN_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_ECCCORREN <<
+			SDR_CTRLGRP_CTRLCFG_ECCCORREN_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->ctrl_cfg, SDR_CTRLGRP_CTRLCFG_REORDEREN_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_REORDEREN <<
+			SDR_CTRLGRP_CTRLCFG_REORDEREN_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->ctrl_cfg, SDR_CTRLGRP_CTRLCFG_STARVELIMIT_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_STARVELIMIT <<
+			SDR_CTRLGRP_CTRLCFG_STARVELIMIT_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->ctrl_cfg, SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_DQSTRKEN <<
+			SDR_CTRLGRP_CTRLCFG_DQSTRKEN_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->ctrl_cfg, SDR_CTRLGRP_CTRLCFG_NODMPINS_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLCFG_NODMPINS <<
+			SDR_CTRLGRP_CTRLCFG_NODMPINS_LSB);
+}
+
+static void set_sdr_dram_timing1(void)
+{
+	debug("Configuring DRAMTIMING1\n");
+	clrsetbits_le32(&sdr_ctrl->dram_timing1, SDR_CTRLGRP_DRAMTIMING1_TCWL_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCWL <<
+			SDR_CTRLGRP_DRAMTIMING1_TCWL_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_timing1, SDR_CTRLGRP_DRAMTIMING1_TAL_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_AL <<
+			SDR_CTRLGRP_DRAMTIMING1_TAL_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_timing1, SDR_CTRLGRP_DRAMTIMING1_TCL_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TCL <<
+			SDR_CTRLGRP_DRAMTIMING1_TCL_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_timing1, SDR_CTRLGRP_DRAMTIMING1_TRRD_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRRD <<
+			SDR_CTRLGRP_DRAMTIMING1_TRRD_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_timing1, SDR_CTRLGRP_DRAMTIMING1_TFAW_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TFAW <<
+			SDR_CTRLGRP_DRAMTIMING1_TFAW_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_timing1, SDR_CTRLGRP_DRAMTIMING1_TRFC_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING1_TRFC <<
+			SDR_CTRLGRP_DRAMTIMING1_TRFC_LSB);
+}
+
+static void set_sdr_dram_timing2(void)
+{
+	debug("Configuring DRAMTIMING2\n");
+	clrsetbits_le32(&sdr_ctrl->dram_timing2, SDR_CTRLGRP_DRAMTIMING2_TREFI_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TREFI <<
+			SDR_CTRLGRP_DRAMTIMING2_TREFI_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_timing2, SDR_CTRLGRP_DRAMTIMING2_TRCD_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRCD <<
+			SDR_CTRLGRP_DRAMTIMING2_TRCD_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_timing2, SDR_CTRLGRP_DRAMTIMING2_TRP_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TRP <<
+			SDR_CTRLGRP_DRAMTIMING2_TRP_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_timing2, SDR_CTRLGRP_DRAMTIMING2_TWR_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWR <<
+			SDR_CTRLGRP_DRAMTIMING2_TWR_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_timing2, SDR_CTRLGRP_DRAMTIMING2_TWTR_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING2_IF_TWTR <<
+			SDR_CTRLGRP_DRAMTIMING2_TWTR_LSB);
+}
+
+static void set_sdr_dram_timing3(void)
+{
+	debug("Configuring DRAMTIMING3\n");
+	clrsetbits_le32(&sdr_ctrl->dram_timing3, SDR_CTRLGRP_DRAMTIMING3_TRTP_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRTP <<
+			SDR_CTRLGRP_DRAMTIMING3_TRTP_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_timing3, SDR_CTRLGRP_DRAMTIMING3_TRAS_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRAS <<
+			SDR_CTRLGRP_DRAMTIMING3_TRAS_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_timing3, SDR_CTRLGRP_DRAMTIMING3_TRC_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TRC <<
+			SDR_CTRLGRP_DRAMTIMING3_TRC_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_timing3, SDR_CTRLGRP_DRAMTIMING3_TMRD_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TMRD <<
+			SDR_CTRLGRP_DRAMTIMING3_TMRD_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_timing3, SDR_CTRLGRP_DRAMTIMING3_TCCD_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING3_TCCD <<
+			SDR_CTRLGRP_DRAMTIMING3_TCCD_LSB);
+}
+
+static void set_sdr_dram_timing4(void)
+{
+	debug("Configuring DRAMTIMING4\n");
+	clrsetbits_le32(&sdr_ctrl->dram_timing4,
+			SDR_CTRLGRP_DRAMTIMING4_SELFRFSHEXIT_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_SELFRFSHEXIT <<
+			SDR_CTRLGRP_DRAMTIMING4_SELFRFSHEXIT_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_timing4,
+			SDR_CTRLGRP_DRAMTIMING4_PWRDOWNEXIT_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMTIMING4_PWRDOWNEXIT <<
+			SDR_CTRLGRP_DRAMTIMING4_PWRDOWNEXIT_LSB);
+}
+
+static void set_sdr_dram_lowpwr_timing(void)
+{
+	debug("Configuring LOWPWRTIMING\n");
+	clrsetbits_le32(&sdr_ctrl->lowpwr_timing,
+			SDR_CTRLGRP_LOWPWRTIMING_AUTOPDCYCLES_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_LOWPWRTIMING_AUTOPDCYCLES <<
+			SDR_CTRLGRP_LOWPWRTIMING_AUTOPDCYCLES_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->lowpwr_timing,
+			SDR_CTRLGRP_LOWPWRTIMING_CLKDISABLECYCLES_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_LOWPWRTIMING_CLKDISABLECYCLES <<
+			SDR_CTRLGRP_LOWPWRTIMING_CLKDISABLECYCLES_LSB);
+}
+
+static void set_sdr_addr_rw(void)
+{
+	int cs = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS;
+	int width = 8;
+	int rows = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS;
+	int banks = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_BANKBITS;
+	int cols = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS;
+	unsigned long long workaround_memsize = MEMSIZE_4G;
+
+	debug("Configuring DRAMADDRW\n");
+	clrsetbits_le32(&sdr_ctrl->dram_addrw, SDR_CTRLGRP_DRAMADDRW_COLBITS_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS <<
+			SDR_CTRLGRP_DRAMADDRW_COLBITS_LSB);
+	/* SDRAM Failure When Accessing Non-Existent Memory
+	 * Update Preloader to artificially increase the number of rows so
+	 * that the memory thinks it has 4GB of RAM.
+	 */
+	rows = compute_errata_rows(workaround_memsize, cs, width, rows, banks,
+				   cols);
+
+	clrsetbits_le32(&sdr_ctrl->dram_addrw, SDR_CTRLGRP_DRAMADDRW_ROWBITS_MASK,
+			rows << SDR_CTRLGRP_DRAMADDRW_ROWBITS_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_addrw, SDR_CTRLGRP_DRAMADDRW_BANKBITS_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_BANKBITS <<
+			SDR_CTRLGRP_DRAMADDRW_BANKBITS_LSB);
+	/* SDRAM Failure When Accessing Non-Existent Memory
+	 * Set SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB to
+	 * log2(number of chip select bits). Since there's only
+	 * 1 or 2 chip selects, log2(1) => 0, and log2(2) => 1,
+	 * which is the same as "chip selects" - 1.
+	 */
+	clrsetbits_le32(&sdr_ctrl->dram_addrw, SDR_CTRLGRP_DRAMADDRW_CSBITS_MASK,
+			(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS - 1) <<
+			SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB);
+}
+
+static void set_sdr_static_cfg(void)
+{
+	debug("Configuring STATICCFG\n");
+	clrsetbits_le32(&sdr_ctrl->static_cfg, SDR_CTRLGRP_STATICCFG_MEMBL_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_STATICCFG_MEMBL <<
+			SDR_CTRLGRP_STATICCFG_MEMBL_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->static_cfg,
+			SDR_CTRLGRP_STATICCFG_USEECCASDATA_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_STATICCFG_USEECCASDATA <<
+			SDR_CTRLGRP_STATICCFG_USEECCASDATA_LSB);
+}
+
+static void set_sdr_fifo_cfg(void)
+{
+	debug("Configuring FIFOCFG\n");
+	clrsetbits_le32(&sdr_ctrl->fifo_cfg, SDR_CTRLGRP_FIFOCFG_SYNCMODE_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_SYNCMODE <<
+			SDR_CTRLGRP_FIFOCFG_SYNCMODE_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->fifo_cfg, SDR_CTRLGRP_FIFOCFG_INCSYNC_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_FIFOCFG_INCSYNC <<
+			SDR_CTRLGRP_FIFOCFG_INCSYNC_LSB);
+}
+
+static void set_sdr_mp_weight(void)
+{
+	debug("Configuring MPWEIGHT_MPWEIGHT_0\n");
+	clrsetbits_le32(&sdr_ctrl->mp_weight0,
+			SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_STATICWEIGHT_31_0_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_0_STATICWEIGHT_31_0 <<
+			SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_0_STATICWEIGHT_31_0_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->mp_weight1,
+			SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_STATICWEIGHT_49_32_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_STATICWEIGHT_49_32 <<
+			SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_STATICWEIGHT_49_32_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->mp_weight1,
+			SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_SUMOFWEIGHTS_13_0_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_1_SUMOFWEIGHT_13_0 <<
+			SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_1_SUMOFWEIGHTS_13_0_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->mp_weight2,
+			SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_SUMOFWEIGHTS_45_14_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_2_SUMOFWEIGHT_45_14 <<
+			SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_2_SUMOFWEIGHTS_45_14_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->mp_weight3,
+			SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_SUMOFWEIGHTS_63_46_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_MPWIEIGHT_3_SUMOFWEIGHT_63_46 <<
+			SDR_CTRLGRP_MPWEIGHT_MPWEIGHT_3_SUMOFWEIGHTS_63_46_LSB);
+}
+
+static void set_sdr_mp_pacing(void)
+{
+	debug("Configuring MPPACING_MPPACING_0\n");
+	clrsetbits_le32(&sdr_ctrl->mp_pacing0,
+			SDR_CTRLGRP_MPPACING_MPPACING_0_THRESHOLD1_31_0_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_MPPACING_0_THRESHOLD1_31_0 <<
+			SDR_CTRLGRP_MPPACING_MPPACING_0_THRESHOLD1_31_0_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->mp_pacing1,
+			SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD1_59_32_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD1_59_32 <<
+			SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD1_59_32_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->mp_pacing1,
+			SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD2_3_0_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD2_3_0 <<
+			SDR_CTRLGRP_MPPACING_MPPACING_1_THRESHOLD2_3_0_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->mp_pacing2,
+			SDR_CTRLGRP_MPPACING_MPPACING_2_THRESHOLD2_35_4_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_MPPACING_2_THRESHOLD2_35_4 <<
+			SDR_CTRLGRP_MPPACING_MPPACING_2_THRESHOLD2_35_4_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->mp_pacing3,
+			SDR_CTRLGRP_MPPACING_MPPACING_3_THRESHOLD2_59_36_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_MPPACING_3_THRESHOLD2_59_36 <<
+			SDR_CTRLGRP_MPPACING_MPPACING_3_THRESHOLD2_59_36_LSB);
+}
+
+static void set_sdr_mp_threshold(void)
+{
+	debug("Configuring MPTHRESHOLDRST_MPTHRESHOLDRST_0\n");
+	clrsetbits_le32(&sdr_ctrl->mp_threshold0,
+			SDR_CTRLGRP_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0 <<
+			SDR_CTRLGRP_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->mp_threshold1,
+			SDR_CTRLGRP_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_MASK,
+			SDR_CTRLGRP_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_MASK <<
+			SDR_CTRLGRP_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->mp_threshold2,
+			SDR_CTRLGRP_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64 <<
+			SDR_CTRLGRP_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_LSB);
+}
+
+
+/* Function to initialize SDRAM MMR */
+unsigned sdram_mmr_init_full(unsigned int sdr_phy_reg)
+{
+	unsigned long reg_value;
+	unsigned long status = 0;
+
+#if defined(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS) && \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS) && \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_BANKBITS) && \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS) && \
+defined(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS)
+
+	writel(CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS,
+	       &sysmgr_regs->iswgrp_handoff[4]);
+#endif
+	set_sdr_ctrlcfg();
+	set_sdr_dram_timing1();
+	set_sdr_dram_timing2();
+	set_sdr_dram_timing3();
+	set_sdr_dram_timing4();
+	set_sdr_dram_lowpwr_timing();
+	set_sdr_addr_rw();
+
+	debug("Configuring DRAMIFWIDTH\n");
+	clrsetbits_le32(&sdr_ctrl->dram_if_width,
+			SDR_CTRLGRP_DRAMIFWIDTH_IFWIDTH_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMIFWIDTH_IFWIDTH <<
+			SDR_CTRLGRP_DRAMIFWIDTH_IFWIDTH_LSB);
+
+	debug("Configuring DRAMDEVWIDTH\n");
+	clrsetbits_le32(&sdr_ctrl->dram_dev_width,
+			SDR_CTRLGRP_DRAMDEVWIDTH_DEVWIDTH_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMDEVWIDTH_DEVWIDTH <<
+			SDR_CTRLGRP_DRAMDEVWIDTH_DEVWIDTH_LSB);
+
+	debug("Configuring LOWPWREQ\n");
+	clrsetbits_le32(&sdr_ctrl->lowpwr_eq,
+			SDR_CTRLGRP_LOWPWREQ_SELFRFSHMASK_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_LOWPWREQ_SELFRFSHMASK <<
+			SDR_CTRLGRP_LOWPWREQ_SELFRFSHMASK_LSB);
+
+	debug("Configuring DRAMINTR\n");
+	clrsetbits_le32(&sdr_ctrl->dram_intr, SDR_CTRLGRP_DRAMINTR_INTREN_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMINTR_INTREN <<
+			SDR_CTRLGRP_DRAMINTR_INTREN_LSB);
+
+	set_sdr_static_cfg();
+
+	debug("Configuring CTRLWIDTH\n");
+	clrsetbits_le32(&sdr_ctrl->ctrl_width,
+			SDR_CTRLGRP_CTRLWIDTH_CTRLWIDTH_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_CTRLWIDTH_CTRLWIDTH <<
+			SDR_CTRLGRP_CTRLWIDTH_CTRLWIDTH_LSB);
+
+	debug("Configuring PORTCFG\n");
+	clrsetbits_le32(&sdr_ctrl->port_cfg, SDR_CTRLGRP_PORTCFG_AUTOPCHEN_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_PORTCFG_AUTOPCHEN <<
+			SDR_CTRLGRP_PORTCFG_AUTOPCHEN_LSB);
+
+	set_sdr_fifo_cfg();
+
+	debug("Configuring MPPRIORITY\n");
+	clrsetbits_le32(&sdr_ctrl->mp_priority,
+			SDR_CTRLGRP_MPPRIORITY_USERPRIORITY_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_MPPRIORITY_USERPRIORITY <<
+			SDR_CTRLGRP_MPPRIORITY_USERPRIORITY_LSB);
+
+	set_sdr_mp_weight();
+	set_sdr_mp_pacing();
+	set_sdr_mp_threshold();
+
+	debug("Configuring PHYCTRL_PHYCTRL_0\n");
+	setbits_le32(&sdr_ctrl->phy_ctrl0,
+		     CONFIG_HPS_SDR_CTRLCFG_PHYCTRL_PHYCTRL_0);
+
+	debug("Configuring CPORTWIDTH\n");
+	clrsetbits_le32(&sdr_ctrl->cport_width,
+			SDR_CTRLGRP_CPORTWIDTH_CMDPORTWIDTH_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_CPORTWIDTH_CPORTWIDTH <<
+			SDR_CTRLGRP_CPORTWIDTH_CMDPORTWIDTH_LSB);
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(unsigned)(&sdr_ctrl->cport_width),
+		(unsigned)reg_value);
+	reg_value = readl(&sdr_ctrl->cport_width);
+	debug("   Read value without verify 0x%08x\n", (unsigned)reg_value);
+
+	debug("Configuring CPORTWMAP\n");
+	clrsetbits_le32(&sdr_ctrl->cport_wmap,
+			SDR_CTRLGRP_CPORTWMAP_CPORTWFIFOMAP_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_CPORTWMAP_CPORTWMAP <<
+			SDR_CTRLGRP_CPORTWMAP_CPORTWFIFOMAP_LSB);
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(unsigned)(&sdr_ctrl->cport_wmap),
+		(unsigned)reg_value);
+	reg_value = readl(&sdr_ctrl->cport_wmap);
+	debug("   Read value without verify 0x%08x\n", (unsigned)reg_value);
+
+	debug("Configuring CPORTRMAP\n");
+	clrsetbits_le32(&sdr_ctrl->cport_rmap,
+			SDR_CTRLGRP_CPORTRMAP_CPORTRFIFOMAP_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_CPORTRMAP_CPORTRMAP <<
+			SDR_CTRLGRP_CPORTRMAP_CPORTRFIFOMAP_LSB);
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(unsigned)(&sdr_ctrl->cport_rmap),
+		(unsigned)reg_value);
+	reg_value = readl(&sdr_ctrl->cport_rmap);
+	debug("   Read value without verify 0x%08x\n", (unsigned)reg_value);
+
+	debug("Configuring RFIFOCMAP\n");
+	clrsetbits_le32(&sdr_ctrl->rfifo_cmap,
+			SDR_CTRLGRP_RFIFOCMAP_RFIFOCPORTMAP_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_RFIFOCMAP_RFIFOCMAP <<
+			SDR_CTRLGRP_RFIFOCMAP_RFIFOCPORTMAP_LSB);
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(unsigned)(&sdr_ctrl->rfifo_cmap),
+		(unsigned)reg_value);
+	reg_value = readl(&sdr_ctrl->rfifo_cmap);
+	debug("   Read value without verify 0x%08x\n", (unsigned)reg_value);
+
+	debug("Configuring WFIFOCMAP\n");
+	reg_value = readl(&sdr_ctrl->wfifo_cmap);
+	clrsetbits_le32(&sdr_ctrl->wfifo_cmap,
+			SDR_CTRLGRP_WFIFOCMAP_WFIFOCPORTMAP_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_WFIFOCMAP_WFIFOCMAP <<
+			SDR_CTRLGRP_WFIFOCMAP_WFIFOCPORTMAP_LSB);
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(unsigned)(&sdr_ctrl->wfifo_cmap),
+		(unsigned)reg_value);
+	reg_value = readl(&sdr_ctrl->wfifo_cmap);
+	debug("   Read value without verify 0x%08x\n", (unsigned)reg_value);
+
+	debug("Configuring CPORTRDWR\n");
+	clrsetbits_le32(&sdr_ctrl->cport_rdwr,
+			SDR_CTRLGRP_CPORTRDWR_CPORTRDWR_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_CPORTRDWR_CPORTRDWR <<
+			SDR_CTRLGRP_CPORTRDWR_CPORTRDWR_LSB);
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(unsigned)(&sdr_ctrl->cport_rdwr),
+		(unsigned)reg_value);
+	reg_value = readl(&sdr_ctrl->cport_rdwr);
+	debug("   Read value without verify 0x%08x\n", (unsigned)reg_value);
+
+	debug("Configuring DRAMODT\n");
+	clrsetbits_le32(&sdr_ctrl->dram_odt,
+			SDR_CTRLGRP_DRAMODT_READ_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMODT_READ <<
+			SDR_CTRLGRP_DRAMODT_READ_LSB);
+
+	clrsetbits_le32(&sdr_ctrl->dram_odt,
+			SDR_CTRLGRP_DRAMODT_WRITE_MASK,
+			CONFIG_HPS_SDR_CTRLCFG_DRAMODT_WRITE <<
+			SDR_CTRLGRP_DRAMODT_WRITE_LSB);
+
+	/* saving this value to SYSMGR.ISWGRP.HANDOFF.FPGA2SDR */
+	writel(CONFIG_HPS_SDR_CTRLCFG_FPGAPORTRST,
+	       &sysmgr_regs->iswgrp_handoff[3]);
+
+	/* only enable if the FPGA is programmed */
+	if (fpgamgr_test_fpga_ready()) {
+		if (sdram_write_verify(&sdr_ctrl->fpgaport_rst,
+		    CONFIG_HPS_SDR_CTRLCFG_FPGAPORTRST) == 1) {
+			/* Set status to 1 to ensure we return failed status
+			if user wish the COMPARE_FAIL_ACTION not to do anything.
+			This is to cater scenario where user wish to
+			continue initlization even verify failed. */
+			status = 1;
+			COMPARE_FAIL_ACTION
+		}
+	}
+
+	/* Restore the SDR PHY Register if valid */
+	if (sdr_phy_reg != 0xffffffff)
+		writel(sdr_phy_reg, &sdr_ctrl->phy_ctrl0);
+
+/***** Final step - apply configuration changes *****/
+	debug("Configuring STATICCFG_\n");
+	clrsetbits_le32(&sdr_ctrl->static_cfg, SDR_CTRLGRP_STATICCFG_APPLYCFG_MASK,
+			1 << SDR_CTRLGRP_STATICCFG_APPLYCFG_LSB);
+	debug("   Write - Address ");
+	debug("0x%08x Data 0x%08x\n",
+		(unsigned)(&sdr_ctrl->static_cfg),
+		(unsigned)reg_value);
+	reg_value = readl(&sdr_ctrl->static_cfg);
+	debug("   Read value without verify 0x%08x\n", (unsigned)reg_value);
+
+	sdram_set_protection_config(0, sdram_calculate_size());
+
+	sdram_dump_protection_config();
+
+	return status;
+}
+
+/* To calculate SDRAM device size based on SDRAM controller parameters.
+ * Size is specified in bytes.
+ *
+ * NOTE!!!!
+ * This function is compiled and linked into the preloader and
+ * Uboot (there may be others). So if this function changes, the Preloader
+ * and UBoot must be updated simultaneously.
+ */
+unsigned long sdram_calculate_size(void)
+{
+	unsigned long temp;
+	unsigned long row, bank, col, cs, width;
+
+	temp = readl(&sdr_ctrl->dram_addrw);
+	col = (temp & SDR_CTRLGRP_DRAMADDRW_COLBITS_MASK) >>
+		SDR_CTRLGRP_DRAMADDRW_COLBITS_LSB;
+
+	/* SDRAM Failure When Accessing Non-Existent Memory
+	 * Use ROWBITS from Quartus/QSys to calculate SDRAM size
+	 * since the FB specifies we modify ROWBITs to work around SDRAM
+	 * controller issue.
+	 *
+	 * If the stored handoff value for rows is 0, it probably means
+	 * the preloader is older than UBoot. Use the
+	 * #define from the SOCEDS Tools per Crucible review
+	 * uboot-socfpga-204. Note that this is not a supported
+	 * configuration and is not tested. The customer
+	 * should be using preloader and uboot built from the
+	 * same tag.
+	 */
+	row = readl(&sysmgr_regs->iswgrp_handoff[4]);
+	if (row == 0)
+		row = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS;
+	/* If the stored handoff value for rows is greater than
+	 * the field width in the sdr.dramaddrw register then
+	 * something is very wrong. Revert to using the the #define
+	 * value handed off by the SOCEDS tool chain instead of
+	 * using a broken value.
+	 */
+	if (row > 31)
+		row = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS;
+
+	bank = (temp & SDR_CTRLGRP_DRAMADDRW_BANKBITS_MASK) >>
+		SDR_CTRLGRP_DRAMADDRW_BANKBITS_LSB;
+
+	/* SDRAM Failure When Accessing Non-Existent Memory
+	 * Use CSBITs from Quartus/QSys to calculate SDRAM size
+	 * since the FB specifies we modify CSBITs to work around SDRAM
+	 * controller issue.
+	 */
+	cs = (temp & SDR_CTRLGRP_DRAMADDRW_CSBITS_MASK) >>
+	      SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB;
+	cs += 1;
+
+	cs = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS;
+
+	width = readl(&sdr_ctrl->dram_if_width);
+	/* ECC would not be calculated as its not addressible */
+	if (width == SDRAM_WIDTH_32BIT_WITH_ECC)
+		width = 32;
+	if (width == SDRAM_WIDTH_16BIT_WITH_ECC)
+		width = 16;
+
+	/* calculate the SDRAM size base on this info */
+	temp = 1 << (row + bank + col);
+	temp = temp * cs * (width  / 8);
+
+	debug("sdram_calculate_memory returns %ld\n", temp);
+
+	return temp;
+}
diff --git a/scripts/Makefile.spl b/scripts/Makefile.spl
index fcacb7f..a04b286 100644
--- a/scripts/Makefile.spl
+++ b/scripts/Makefile.spl
@@ -60,6 +60,7 @@ libs-$(CONFIG_SPL_GPIO_SUPPORT) += drivers/gpio/
 libs-$(CONFIG_SPL_MMC_SUPPORT) += drivers/mmc/
 libs-$(CONFIG_SPL_MPC8XXX_INIT_DDR_SUPPORT) += drivers/ddr/fsl/
 libs-$(CONFIG_SYS_MVEBU_DDR) += drivers/ddr/mvebu/
+libs-$(CONFIG_ALTERA_SDRAM) += drivers/ddr/altera/
 libs-$(CONFIG_SPL_SERIAL_SUPPORT) += drivers/serial/
 libs-$(CONFIG_SPL_SPI_FLASH_SUPPORT) += drivers/mtd/spi/
 libs-$(CONFIG_SPL_SPI_SUPPORT) += drivers/spi/
-- 
2.2.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [U-Boot] [PATCH RESEND 2/3] driver/ddr/altera/: Add the sdram calibration portion
  2015-04-16 14:11 [U-Boot] [PATCH RESEND 0/3] drivers/ddr/altera: Add the DDR controller driver for SoCFPGA dinguyen at opensource.altera.com
  2015-04-16 14:11 ` [U-Boot] [PATCH RESEND 1/3] driver/ddr/altera: Add DDR driver for Altera's SDRAM controller dinguyen at opensource.altera.com
@ 2015-04-16 14:11 ` dinguyen at opensource.altera.com
  2015-04-25 17:27   ` Pavel Machek
  2015-04-16 14:11 ` [U-Boot] [PATCH RESEND 3/3] arm: socfpga: enable the Altera SDRAM controller driver dinguyen at opensource.altera.com
  2 siblings, 1 reply; 8+ messages in thread
From: dinguyen at opensource.altera.com @ 2015-04-16 14:11 UTC (permalink / raw)
  To: u-boot

From: Dinh Nguyen <dinguyen@opensource.altera.com>

This patch adds the DDR calibration portion of the Altera SDRAM driver.

Signed-off-by: Dinh Nguyen <dinguyen@opensource.altera.com>
---
 drivers/ddr/altera/Makefile                   |   12 +
 drivers/ddr/altera/sequencer.c                | 3907 +++++++++++++++++++++++++
 drivers/ddr/altera/sequencer.h                |  328 +++
 drivers/ddr/altera/sequencer_auto.h           |  128 +
 drivers/ddr/altera/sequencer_auto_ac_init.c   |   85 +
 drivers/ddr/altera/sequencer_auto_inst_init.c |  270 ++
 drivers/ddr/altera/sequencer_defines.h        |  121 +
 7 files changed, 4851 insertions(+)
 create mode 100644 drivers/ddr/altera/Makefile
 create mode 100644 drivers/ddr/altera/sequencer.c
 create mode 100644 drivers/ddr/altera/sequencer.h
 create mode 100644 drivers/ddr/altera/sequencer_auto.h
 create mode 100644 drivers/ddr/altera/sequencer_auto_ac_init.c
 create mode 100644 drivers/ddr/altera/sequencer_auto_inst_init.c
 create mode 100644 drivers/ddr/altera/sequencer_defines.h

diff --git a/drivers/ddr/altera/Makefile b/drivers/ddr/altera/Makefile
new file mode 100644
index 0000000..38fba57
--- /dev/null
+++ b/drivers/ddr/altera/Makefile
@@ -0,0 +1,12 @@
+#
+# (C) Copyright 2000-2003
+# Wolfgang Denk, DENX Software Engineering, wd at denx.de.
+#
+# (C) Copyright 2010, Thomas Chou <thomas@wytron.com.tw>
+# Copyright (C) 2014 Altera Corporation <www.altera.com>
+#
+# SPDX-License-Identifier:	GPL-2.0+
+#
+
+obj-$(CONFIG_ALTERA_SDRAM) += sdram.o sequencer.o sequencer_auto_ac_init.o \
+			  sequencer_auto_inst_init.o
diff --git a/drivers/ddr/altera/sequencer.c b/drivers/ddr/altera/sequencer.c
new file mode 100644
index 0000000..701e167
--- /dev/null
+++ b/drivers/ddr/altera/sequencer.c
@@ -0,0 +1,3907 @@
+/*
+ * Copyright Altera Corporation (C) 2012-2015
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <asm/arch/sdram.h>
+#include "sequencer.h"
+#include "sequencer_auto.h"
+#include "sequencer_defines.h"
+
+static void scc_mgr_load_dqs_for_write_group(uint32_t write_group);
+
+static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
+	(struct socfpga_sdr_rw_load_manager *)(BASE_RW_MGR + 0x800);
+
+static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
+	(struct socfpga_sdr_rw_load_jump_manager *)(BASE_RW_MGR + 0xC00);
+
+static struct socfpga_sdr_reg_file *sdr_reg_file =
+	(struct socfpga_sdr_reg_file *)(BASE_REG_FILE);
+
+static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
+	(struct socfpga_sdr_scc_mgr *)(BASE_SCC_MGR + 0x0E00);
+
+static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
+	(struct socfpga_phy_mgr_cmd *)(BASE_PHY_MGR);
+
+static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
+	(struct socfpga_phy_mgr_cfg *)(BASE_PHY_MGR + 0x4000);
+
+static struct socfpga_data_mgr *data_mgr =
+	(struct socfpga_data_mgr *)(BASE_DATA_MGR);
+/******************************************************************************
+ ******************************************************************************
+ ** NOTE: Special Rules for Globale Variables                                **
+ **                                                                          **
+ ** All global variables that are explicitly initialized (including          **
+ ** explicitly initialized to zero), are only initialized once, during       **
+ ** configuration time, and not again on reset.  This means that they        **
+ ** preserve their current contents across resets, which is needed for some  **
+ ** special cases involving communication with external modules.  In         **
+ ** addition, this avoids paying the price to have the memory initialized,   **
+ ** even for zeroed data, provided it is explicitly set to zero in the code, **
+ ** and doesn't rely on implicit initialization.                             **
+ ******************************************************************************
+ ******************************************************************************/
+
+/*
+ * Temporary workaround to place the initial stack pointer at a safe
+ * offset from end
+ */
+asm(".global __alt_stack_pointer");
+asm("__alt_stack_pointer = " __stringify(STACK_POINTER));
+
+/* Just to make the debugging code more uniform */
+#ifndef RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM
+#define RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM 0
+#endif
+
+#define SDR_WRITE	1
+#define SDR_READ	0
+
+#define DELTA_D		1
+#define MGR_SELECT_MASK		0xf8000
+
+/*
+ * In order to reduce ROM size, most of the selectable calibration steps are
+ * decided at compile time based on the user's calibration mode selection,
+ * as captured by the STATIC_CALIB_STEPS selection below.
+ *
+ * However, to support simulation-time selection of fast simulation mode, where
+ * we skip everything except the bare minimum, we need a few of the steps to
+ * be dynamic.  In those cases, we either use the DYNAMIC_CALIB_STEPS for the
+ * check, which is based on the rtl-supplied value, or we dynamically compute
+ * the value to use based on the dynamically-chosen calibration mode
+ */
+
+#define BTFLD_FMT "%u"
+
+/* For HPS running on actual hardware */
+
+#define DLEVEL 0
+/*
+ * space around comma is required for varargs macro to remove comma if args
+ * is empty
+ */
+#define BFM_GBL_SET(field, value)
+#define BFM_GBL_GET(field)		((long unsigned int)0)
+#define BFM_STAGE(stage)
+#define BFM_INC_VFIFO
+#define COV(label)
+
+#define STATIC_IN_RTL_SIM 0
+
+#define STATIC_SKIP_DELAY_LOOPS 0
+
+#define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
+	STATIC_SKIP_DELAY_LOOPS)
+
+/* calibration steps requested by the rtl */
+uint16_t dyn_calib_steps;
+
+/*
+ * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
+ * instead of static, we use boolean logic to select between
+ * non-skip and skip values
+ *
+ * The mask is set to include all bits when not-skipping, but is
+ * zero when skipping
+ */
+
+uint16_t skip_delay_mask;	/* mask off bits when skipping/not-skipping */
+
+#define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
+	((non_skip_value) & skip_delay_mask)
+
+struct gbl_type *gbl;
+struct param_type *param;
+uint32_t curr_shadow_reg;
+
+static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
+	uint32_t write_group, uint32_t use_dm,
+	uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks);
+
+static int sdr_ops(u32 *base, u32 offset, u32 data, bool write)
+{
+	u32 addr = (u32)base & MGR_SELECT_MASK;
+	u32 temp;
+
+	switch (addr) {
+	case BASE_PHY_MGR:
+		addr = (((u32)base >> 8) & (1 << 6)) | ((u32)base & 0x3f) |
+			SDR_PHYGRP_PHYMGRGRP_ADDRESS;
+		break;
+	case BASE_RW_MGR:
+		addr = ((u32)base & 0x1fff) | SDR_PHYGRP_RWMGRGRP_ADDRESS;
+		break;
+	case BASE_DATA_MGR:
+		addr = ((u32)base & 0x7ff) | SDR_PHYGRP_DATAMGRGRP_ADDRESS;
+		break;
+	case BASE_SCC_MGR:
+		addr = ((u32)base & 0xfff) | SDR_PHYGRP_SCCGRP_ADDRESS;
+		break;
+	case BASE_REG_FILE:
+		addr = ((u32)base & 0x7ff) | SDR_PHYGRP_REGFILEGRP_ADDRESS;
+		break;
+	case BASE_MMR:
+		addr = ((u32)base & 0xfff) | SDR_CTRLGRP_ADDRESS;
+		break;
+	default:
+		return -1;
+	}
+
+	if (write) {
+		writel(data, SOCFPGA_SDR_ADDRESS + addr + offset);
+	} else {
+		temp = readl(SOCFPGA_SDR_ADDRESS + addr + offset);
+		return temp;
+	}
+
+	return 0;
+}
+
+static void set_failing_group_stage(uint32_t group, uint32_t stage,
+	uint32_t substage)
+{
+	/*
+	 * Only set the global stage if there was not been any other
+	 * failing group
+	 */
+	if (gbl->error_stage == CAL_STAGE_NIL)	{
+		gbl->error_substage = substage;
+		gbl->error_stage = stage;
+		gbl->error_group = group;
+	}
+}
+
+static inline void reg_file_set_group(uint32_t set_group)
+{
+	/* Read the current group and stage */
+	uint32_t cur_stage_group = sdr_ops(&sdr_reg_file->cur_stage, 0, 0, SDR_READ);
+
+	/* Clear the group */
+	cur_stage_group &= 0x0000FFFF;
+
+	/* Set the group */
+	cur_stage_group |= (set_group << 16);
+
+	/* Write the data back */
+	sdr_ops(&sdr_reg_file->cur_stage, 0, cur_stage_group, SDR_WRITE);
+}
+
+static inline void reg_file_set_stage(uint32_t set_stage)
+{
+	/* Read the current group and stage */
+	uint32_t cur_stage_group = sdr_ops(&sdr_reg_file->cur_stage, 0, 0, SDR_READ);
+
+	/* Clear the stage and substage */
+	cur_stage_group &= 0xFFFF0000;
+
+	/* Set the stage */
+	cur_stage_group |= (set_stage & 0x000000FF);
+
+	/* Write the data back */
+	sdr_ops(&sdr_reg_file->cur_stage, 0, cur_stage_group, SDR_WRITE);
+}
+
+static inline void reg_file_set_sub_stage(uint32_t set_sub_stage)
+{
+	/* Read the current group and stage */
+	uint32_t cur_stage_group = sdr_ops(&sdr_reg_file->cur_stage, 0, 0, SDR_READ);
+
+	/* Clear the substage */
+	cur_stage_group &= 0xFFFF00FF;
+
+	/* Set the sub stage */
+	cur_stage_group |= ((set_sub_stage << 8) & 0x0000FF00);
+
+	/* Write the data back */
+	sdr_ops(&sdr_reg_file->cur_stage, 0, cur_stage_group, SDR_WRITE);
+}
+
+static void initialize(void)
+{
+	debug("%s:%d\n", __func__, __LINE__);
+	/* USER calibration has control over path to memory */
+	/*
+	 * In Hard PHY this is a 2-bit control:
+	 * 0: AFI Mux Select
+	 * 1: DDIO Mux Select
+	 */
+	sdr_ops(&phy_mgr_cfg->mux_sel, 0, 0x3, SDR_WRITE);
+
+	/* USER memory clock is not stable we begin initialization  */
+	sdr_ops(&phy_mgr_cfg->reset_mem_stbl, 0, 0, SDR_WRITE);
+
+	/* USER calibration status all set to zero */
+
+	sdr_ops(&phy_mgr_cfg->cal_status, 0, 0, SDR_WRITE);
+	sdr_ops(&phy_mgr_cfg->cal_debug_info, 0, 0, SDR_WRITE);
+
+	if ((dyn_calib_steps & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) {
+		param->read_correct_mask_vg  = ((uint32_t)1 <<
+			(RW_MGR_MEM_DQ_PER_READ_DQS /
+			RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
+		param->write_correct_mask_vg = ((uint32_t)1 <<
+			(RW_MGR_MEM_DQ_PER_READ_DQS /
+			RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
+		param->read_correct_mask     = ((uint32_t)1 <<
+			RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
+		param->write_correct_mask    = ((uint32_t)1 <<
+			RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
+		param->dm_correct_mask       = ((uint32_t)1 <<
+			(RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH))
+			- 1;
+	}
+}
+
+static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
+{
+	uint32_t odt_mask_0 = 0;
+	uint32_t odt_mask_1 = 0;
+	uint32_t cs_and_odt_mask;
+
+	if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) {
+		if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) {
+			/*
+			 * 1 Rank
+			 * Read: ODT = 0
+			 * Write: ODT = 1
+			 */
+			odt_mask_0 = 0x0;
+			odt_mask_1 = 0x1;
+		} else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) {
+			/* 2 Ranks */
+			if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
+				/* - Dual-Slot , Single-Rank
+				 * (1 chip-select per DIMM)
+				 * OR
+				 * - RDIMM, 4 total CS (2 CS per DIMM)
+				 * means 2 DIMM
+				 * Since MEM_NUMBER_OF_RANKS is 2 they are
+				 * both single rank
+				 * with 2 CS each (special for RDIMM)
+				 * Read: Turn on ODT on the opposite rank
+				 * Write: Turn on ODT on all ranks
+				 */
+				odt_mask_0 = 0x3 & ~(1 << rank);
+				odt_mask_1 = 0x3;
+			} else {
+				/*
+				 * USER - Single-Slot , Dual-rank DIMMs
+				 * (2 chip-selects per DIMM)
+				 * USER Read: Turn on ODT off on all ranks
+				 * USER Write: Turn on ODT on active rank
+				 */
+				odt_mask_0 = 0x0;
+				odt_mask_1 = 0x3 & (1 << rank);
+			}
+				} else {
+			/* 4 Ranks
+			 * Read:
+			 * ----------+-----------------------+
+			 *           |                       |
+			 *           |         ODT           |
+			 * Read From +-----------------------+
+			 *   Rank    |  3  |  2  |  1  |  0  |
+			 * ----------+-----+-----+-----+-----+
+			 *     0     |  0  |  1  |  0  |  0  |
+			 *     1     |  1  |  0  |  0  |  0  |
+			 *     2     |  0  |  0  |  0  |  1  |
+			 *     3     |  0  |  0  |  1  |  0  |
+			 * ----------+-----+-----+-----+-----+
+			 *
+			 * Write:
+			 * ----------+-----------------------+
+			 *           |                       |
+			 *           |         ODT           |
+			 * Write To  +-----------------------+
+			 *   Rank    |  3  |  2  |  1  |  0  |
+			 * ----------+-----+-----+-----+-----+
+			 *     0     |  0  |  1  |  0  |  1  |
+			 *     1     |  1  |  0  |  1  |  0  |
+			 *     2     |  0  |  1  |  0  |  1  |
+			 *     3     |  1  |  0  |  1  |  0  |
+			 * ----------+-----+-----+-----+-----+
+			 */
+			switch (rank) {
+			case 0:
+				odt_mask_0 = 0x4;
+				odt_mask_1 = 0x5;
+				break;
+			case 1:
+				odt_mask_0 = 0x8;
+				odt_mask_1 = 0xA;
+				break;
+			case 2:
+				odt_mask_0 = 0x1;
+				odt_mask_1 = 0x5;
+				break;
+			case 3:
+				odt_mask_0 = 0x2;
+				odt_mask_1 = 0xA;
+				break;
+			}
+		}
+	} else {
+		odt_mask_0 = 0x0;
+		odt_mask_1 = 0x0;
+	}
+
+	cs_and_odt_mask =
+		(0xFF & ~(1 << rank)) |
+		((0xFF & odt_mask_0) << 8) |
+		((0xFF & odt_mask_1) << 16);
+	sdr_ops((u32 *)RW_MGR_SET_CS_AND_ODT_MASK, 0, cs_and_odt_mask, SDR_WRITE);
+}
+
+static void scc_mgr_initialize(void)
+{
+	/*
+	 * Clear register file for HPS
+	 * 16 (2^4) is the size of the full register file in the scc mgr:
+	 *	RFILE_DEPTH = log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
+	 * MEM_IF_READ_DQS_WIDTH - 1) + 1;
+	 */
+	uint32_t i;
+	for (i = 0; i < 16; i++) {
+		debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u",
+			   __func__, __LINE__, i);
+		sdr_ops((u32 *)SCC_MGR_HHP_RFILE, i << 2, 0, SDR_WRITE);
+	}
+}
+
+static inline void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group,
+						uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_DQS_IN_DELAY, read_group << 2, delay, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_dqs_io_in_delay(uint32_t write_group,
+	uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_IO_IN_DELAY, RW_MGR_MEM_DQ_PER_WRITE_DQS << 2,
+		delay, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_DQS_EN_PHASE, read_group << 2, phase, SDR_WRITE);
+}
+
+static void scc_mgr_set_dqs_en_phase_all_ranks(uint32_t read_group,
+					       uint32_t phase)
+{
+	uint32_t r;
+	uint32_t update_scan_chains;
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+	     r += NUM_RANKS_PER_SHADOW_REG) {
+		/*
+		 * USER although the h/w doesn't support different phases per
+		 * shadow register, for simplicity our scc manager modeling
+		 * keeps different phase settings per shadow reg, and it's
+		 * important for us to keep them in sync to match h/w.
+		 * for efficiency, the scan chain update should occur only
+		 * once to sr0.
+		 */
+		update_scan_chains = (r == 0) ? 1 : 0;
+
+		scc_mgr_set_dqs_en_phase(read_group, phase);
+
+		if (update_scan_chains) {
+			sdr_ops(&sdr_scc_mgr->dqs_ena, 0, read_group, SDR_WRITE);
+			sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+		}
+	}
+}
+
+static inline void scc_mgr_set_dqdqs_output_phase(uint32_t write_group,
+						  uint32_t phase)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_DQDQS_OUT_PHASE, write_group << 2, phase, SDR_WRITE);
+}
+
+static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
+						     uint32_t phase)
+{
+	uint32_t r;
+	uint32_t update_scan_chains;
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+	     r += NUM_RANKS_PER_SHADOW_REG) {
+		/*
+		 * USER although the h/w doesn't support different phases per
+		 * shadow register, for simplicity our scc manager modeling
+		 * keeps different phase settings per shadow reg, and it's
+		 * important for us to keep them in sync to match h/w.
+		 * for efficiency, the scan chain update should occur only
+		 * once to sr0.
+		 */
+		update_scan_chains = (r == 0) ? 1 : 0;
+
+		scc_mgr_set_dqdqs_output_phase(write_group, phase);
+
+		if (update_scan_chains) {
+			sdr_ops(&sdr_scc_mgr->dqs_ena, 0, write_group, SDR_WRITE);
+			sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+		}
+	}
+}
+
+static inline void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_DQS_EN_DELAY, read_group << 2, delay +
+		IO_DQS_EN_DELAY_OFFSET, SDR_WRITE);
+}
+
+static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
+					       uint32_t delay)
+{
+	uint32_t r;
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+		r += NUM_RANKS_PER_SHADOW_REG) {
+		scc_mgr_set_dqs_en_delay(read_group, delay);
+
+		sdr_ops(&sdr_scc_mgr->dqs_ena, 0, read_group, SDR_WRITE);
+
+		/*
+		 * In shadow register mode, the T11 settings are stored in
+		 * registers in the core, which are updated by the DQS_ENA
+		 * signals. Not issuing the SCC_MGR_UPD command allows us to
+		 * save lots of rank switching overhead, by calling
+		 * select_shadow_regs_for_update with update_scan_chains
+		 * set to 0.
+		 */
+		sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+	}
+}
+
+static void scc_mgr_set_oct_out1_delay(uint32_t write_group, uint32_t delay)
+{
+	uint32_t read_group;
+
+	/*
+	 * Load the setting in the SCC manager
+	 * Although OCT affects only write data, the OCT delay is controlled
+	 * by the DQS logic block which is instantiated once per read group.
+	 * For protocols where a write group consists of multiple read groups,
+	 * the setting must be set multiple times.
+	 */
+	for (read_group = write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH /
+	     RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
+	     read_group < (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH /
+	     RW_MGR_MEM_IF_WRITE_DQS_WIDTH; ++read_group)
+		sdr_ops((u32 *)SCC_MGR_OCT_OUT1_DELAY, read_group << 2,
+			delay, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_dq_out1_delay(uint32_t write_group,
+				      uint32_t dq_in_group, uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_IO_OUT1_DELAY, dq_in_group << 2, delay, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_dq_in_delay(uint32_t write_group,
+	uint32_t dq_in_group, uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_IO_IN_DELAY, dq_in_group << 2, delay, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_hhp_extras(void)
+{
+	/*
+	 * Load the fixed setting in the SCC manager
+	 * bits: 0:0 = 1'b1   - dqs bypass
+	 * bits: 1:1 = 1'b1   - dq bypass
+	 * bits: 4:2 = 3'b001   - rfifo_mode
+	 * bits: 6:5 = 2'b01  - rfifo clock_select
+	 * bits: 7:7 = 1'b0  - separate gating from ungating setting
+	 * bits: 8:8 = 1'b0  - separate OE from Output delay setting
+	 */
+	uint32_t value = (0<<8) | (0<<7) | (1<<5) | (1<<2) | (1<<1) | (1<<0);
+	sdr_ops((u32 *)SCC_MGR_HHP_GLOBALS, SCC_MGR_HHP_EXTRAS_OFFSET,
+		value, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_hhp_dqse_map(void)
+{
+	/* Load the fixed setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_HHP_GLOBALS, SCC_MGR_HHP_DQSE_MAP_OFFSET, 0,
+		SDR_WRITE);
+}
+
+static inline void scc_mgr_set_dqs_out1_delay(uint32_t write_group,
+					      uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_IO_OUT1_DELAY, RW_MGR_MEM_DQ_PER_WRITE_DQS << 2,
+		delay, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_dm_out1_delay(uint32_t write_group,
+					     uint32_t dm, uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_IO_OUT1_DELAY,
+		(RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm) << 2, delay, SDR_WRITE);
+}
+
+static inline void scc_mgr_set_dm_in_delay(uint32_t write_group,
+					   uint32_t dm, uint32_t delay)
+{
+	/* Load the setting in the SCC manager */
+	sdr_ops((u32 *)SCC_MGR_IO_IN_DELAY,
+		(RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm) << 2, delay, SDR_WRITE);
+}
+
+/*
+ * USER Zero all DQS config
+ * TODO: maybe rename to scc_mgr_zero_dqs_config (or something)
+ */
+static void scc_mgr_zero_all(void)
+{
+	uint32_t i, r;
+
+	/*
+	 * USER Zero all DQS config settings, across all groups and all
+	 * shadow registers
+	 */
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r +=
+	     NUM_RANKS_PER_SHADOW_REG) {
+		for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
+			/*
+			 * The phases actually don't exist on a per-rank basis,
+			 * but there's no harm updating them several times, so
+			 * let's keep the code simple.
+			 */
+			scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
+			scc_mgr_set_dqs_en_phase(i, 0);
+			scc_mgr_set_dqs_en_delay(i, 0);
+		}
+
+		for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
+			scc_mgr_set_dqdqs_output_phase(i, 0);
+			/* av/cv don't have out2 */
+			scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
+		}
+
+		/* multicast to all DQS group enables */
+		sdr_ops(&sdr_scc_mgr->dqs_ena, 0, 0xff, SDR_WRITE);
+		sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+	}
+}
+
+static void scc_set_bypass_mode(uint32_t write_group, uint32_t mode)
+{
+	/* mode = 0 : Do NOT bypass - Half Rate Mode */
+	/* mode = 1 : Bypass - Full Rate Mode */
+
+	/* only need to set once for all groups, pins, dq, dqs, dm */
+	if (write_group == 0) {
+		debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n", __func__,
+			   __LINE__);
+		scc_mgr_set_hhp_extras();
+		debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
+			  __func__, __LINE__);
+	}
+	/* multicast to all DQ enables */
+	sdr_ops(&sdr_scc_mgr->dq_ena, 0, 0xff, SDR_WRITE);
+	sdr_ops(&sdr_scc_mgr->dm_ena, 0, 0xff, SDR_WRITE);
+
+	/* update current DQS IO enable */
+	sdr_ops(&sdr_scc_mgr->dqs_io_ena, 0, 0, SDR_WRITE);
+
+	/* update the DQS logic */
+	sdr_ops(&sdr_scc_mgr->dqs_ena, 0, write_group, SDR_WRITE);
+
+	/* hit update */
+	sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+}
+
+static void scc_mgr_zero_group(uint32_t write_group, uint32_t test_begin,
+			       int32_t out_only)
+{
+	uint32_t i, r;
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r +=
+		NUM_RANKS_PER_SHADOW_REG) {
+		/* Zero all DQ config settings */
+		for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+			scc_mgr_set_dq_out1_delay(write_group, i, 0);
+			if (!out_only)
+				scc_mgr_set_dq_in_delay(write_group, i, 0);
+		}
+
+		/* multicast to all DQ enables */
+		sdr_ops(&sdr_scc_mgr->dq_ena, 0, 0xff, SDR_WRITE);
+
+		/* Zero all DM config settings */
+		for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
+			scc_mgr_set_dm_out1_delay(write_group, i, 0);
+		}
+
+		/* multicast to all DM enables */
+		sdr_ops(&sdr_scc_mgr->dm_ena, 0, 0xff, SDR_WRITE);
+
+		/* zero all DQS io settings */
+		if (!out_only)
+			scc_mgr_set_dqs_io_in_delay(write_group, 0);
+		/* av/cv don't have out2 */
+		scc_mgr_set_dqs_out1_delay(write_group, IO_DQS_OUT_RESERVE);
+		scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
+		scc_mgr_load_dqs_for_write_group(write_group);
+		/* multicast to all DQS IO enables (only 1) */
+		sdr_ops(&sdr_scc_mgr->dqs_io_ena, 0, 0, SDR_WRITE);
+		/* hit update to zero everything */
+		sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+	}
+}
+
+/* load up dqs config settings */
+static void scc_mgr_load_dqs(uint32_t dqs)
+{
+	sdr_ops(&sdr_scc_mgr->dqs_ena, 0, dqs, SDR_WRITE);
+}
+
+static void scc_mgr_load_dqs_for_write_group(uint32_t write_group)
+{
+	uint32_t read_group;
+
+	/*
+	 * Although OCT affects only write data, the OCT delay is controlled
+	 * by the DQS logic block which is instantiated once per read group.
+	 * For protocols where a write group consists of multiple read groups,
+	 * the setting must be scanned multiple times.
+	 */
+	for (read_group = write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH /
+	     RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
+	     read_group < (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH /
+	     RW_MGR_MEM_IF_WRITE_DQS_WIDTH; ++read_group)
+		sdr_ops(&sdr_scc_mgr->dqs_ena, 0, read_group, SDR_WRITE);
+}
+
+/* load up dqs io config settings */
+static void scc_mgr_load_dqs_io(void)
+{
+	sdr_ops(&sdr_scc_mgr->dqs_io_ena, 0, 0, SDR_WRITE);
+}
+
+/* load up dq config settings */
+static void scc_mgr_load_dq(uint32_t dq_in_group)
+{
+	sdr_ops(&sdr_scc_mgr->dq_ena, 0, dq_in_group, SDR_WRITE);
+}
+
+/* load up dm config settings */
+static void scc_mgr_load_dm(uint32_t dm)
+{
+	sdr_ops(&sdr_scc_mgr->dm_ena, 0, dm, SDR_WRITE);
+}
+
+/*
+ * apply and load a particular input delay for the DQ pins in a group
+ * group_bgn is the index of the first dq pin (in the write group)
+ */
+static void scc_mgr_apply_group_dq_in_delay(uint32_t write_group,
+					    uint32_t group_bgn, uint32_t delay)
+{
+	uint32_t i, p;
+
+	for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
+		scc_mgr_set_dq_in_delay(write_group, p, delay);
+		scc_mgr_load_dq(p);
+	}
+}
+
+/* apply and load a particular output delay for the DQ pins in a group */
+static void scc_mgr_apply_group_dq_out1_delay(uint32_t write_group,
+					      uint32_t group_bgn,
+					      uint32_t delay1)
+{
+	uint32_t i, p;
+
+	for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
+		scc_mgr_set_dq_out1_delay(write_group, i, delay1);
+		scc_mgr_load_dq(i);
+	}
+}
+
+/* apply and load a particular output delay for the DM pins in a group */
+static void scc_mgr_apply_group_dm_out1_delay(uint32_t write_group,
+					      uint32_t delay1)
+{
+	uint32_t i;
+
+	for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
+		scc_mgr_set_dm_out1_delay(write_group, i, delay1);
+		scc_mgr_load_dm(i);
+	}
+}
+
+
+/* apply and load delay on both DQS and OCT out1 */
+static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
+						    uint32_t delay)
+{
+	scc_mgr_set_dqs_out1_delay(write_group, delay);
+	scc_mgr_load_dqs_io();
+
+	scc_mgr_set_oct_out1_delay(write_group, delay);
+	scc_mgr_load_dqs_for_write_group(write_group);
+}
+
+/* apply a delay to the entire output side: DQ, DM, DQS, OCT */
+static void scc_mgr_apply_group_all_out_delay_add(uint32_t write_group,
+						  uint32_t group_bgn,
+						  uint32_t delay)
+{
+	uint32_t i, p, new_delay;
+
+	/* dq shift */
+	for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
+		new_delay = READ_SCC_DQ_OUT2_DELAY;
+		new_delay += delay;
+
+		if (new_delay > IO_IO_OUT2_DELAY_MAX) {
+			debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQ[%u,%u]:\
+				   %u > %lu => %lu", __func__, __LINE__,
+				   write_group, group_bgn, delay, i, p, new_delay,
+				   (long unsigned int)IO_IO_OUT2_DELAY_MAX,
+				   (long unsigned int)IO_IO_OUT2_DELAY_MAX);
+			new_delay = IO_IO_OUT2_DELAY_MAX;
+		}
+
+		scc_mgr_load_dq(i);
+	}
+
+	/* dm shift */
+	for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
+		new_delay = READ_SCC_DM_IO_OUT2_DELAY;
+		new_delay += delay;
+
+		if (new_delay > IO_IO_OUT2_DELAY_MAX) {
+			debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DM[%u]:\
+				   %u > %lu => %lu\n",  __func__, __LINE__,
+				   write_group, group_bgn, delay, i, new_delay,
+				   (long unsigned int)IO_IO_OUT2_DELAY_MAX,
+				   (long unsigned int)IO_IO_OUT2_DELAY_MAX);
+			new_delay = IO_IO_OUT2_DELAY_MAX;
+		}
+
+		scc_mgr_load_dm(i);
+	}
+
+	/* dqs shift */
+	new_delay = READ_SCC_DQS_IO_OUT2_DELAY;
+	new_delay += delay;
+
+	if (new_delay > IO_IO_OUT2_DELAY_MAX) {
+		debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
+			   " adding %u to OUT1\n", __func__, __LINE__,
+			   write_group, group_bgn, delay, new_delay,
+			   IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
+			   new_delay - IO_IO_OUT2_DELAY_MAX);
+		scc_mgr_set_dqs_out1_delay(write_group, new_delay -
+					   IO_IO_OUT2_DELAY_MAX);
+		new_delay = IO_IO_OUT2_DELAY_MAX;
+	}
+
+	scc_mgr_load_dqs_io();
+
+	/* oct shift */
+	new_delay = READ_SCC_OCT_OUT2_DELAY;
+	new_delay += delay;
+
+	if (new_delay > IO_IO_OUT2_DELAY_MAX) {
+		debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
+			   " adding %u to OUT1\n", __func__, __LINE__,
+			   write_group, group_bgn, delay, new_delay,
+			   IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
+			   new_delay - IO_IO_OUT2_DELAY_MAX);
+		scc_mgr_set_oct_out1_delay(write_group, new_delay -
+					   IO_IO_OUT2_DELAY_MAX);
+		new_delay = IO_IO_OUT2_DELAY_MAX;
+	}
+
+	scc_mgr_load_dqs_for_write_group(write_group);
+}
+
+/*
+ * USER apply a delay to the entire output side (DQ, DM, DQS, OCT)
+ * and to all ranks
+ */
+static void scc_mgr_apply_group_all_out_delay_add_all_ranks(
+	uint32_t write_group, uint32_t group_bgn, uint32_t delay)
+{
+	uint32_t r;
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+		r += NUM_RANKS_PER_SHADOW_REG) {
+		scc_mgr_apply_group_all_out_delay_add(write_group,
+						      group_bgn, delay);
+		sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+	}
+}
+
+/* optimization used to recover some slots in ddr3 inst_rom */
+/* could be applied to other protocols if we wanted to */
+static void set_jump_as_return(void)
+{
+	/*
+	 * to save space, we replace return with jump to special shared
+	 * RETURN instruction so we set the counter to large value so that
+	 * we always jump
+	 */
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0xFF, SDR_WRITE);
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+		RW_MGR_RETURN, SDR_WRITE);
+}
+
+/*
+ * should always use constants as argument to ensure all computations are
+ * performed at compile time
+ */
+static inline void delay_for_n_mem_clocks(const uint32_t clocks)
+{
+	uint32_t afi_clocks;
+	uint8_t inner = 0;
+	uint8_t outer = 0;
+	uint16_t c_loop = 0;
+
+	debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
+
+
+	afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO;
+	/* scale (rounding up) to get afi clocks */
+
+	/*
+	 * Note, we don't bother accounting for being off a little bit
+	 * because of a few extra instructions in outer loops
+	 * Note, the loops have a test at the end, and do the test before
+	 * the decrement, and so always perform the loop
+	 * 1 time more than the counter value
+	 */
+	if (afi_clocks == 0) {
+		;
+	} else if (afi_clocks <= 0x100) {
+		inner = afi_clocks-1;
+		outer = 0;
+		c_loop = 0;
+	} else if (afi_clocks <= 0x10000) {
+		inner = 0xff;
+		outer = (afi_clocks-1) >> 8;
+		c_loop = 0;
+	} else {
+		inner = 0xff;
+		outer = 0xff;
+		c_loop = (afi_clocks-1) >> 16;
+	}
+
+	/*
+	 * rom instructions are structured as follows:
+	 *
+	 *    IDLE_LOOP2: jnz cntr0, TARGET_A
+	 *    IDLE_LOOP1: jnz cntr1, TARGET_B
+	 *                return
+	 *
+	 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
+	 * TARGET_B is set to IDLE_LOOP2 as well
+	 *
+	 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
+	 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
+	 *
+	 * a little confusing, but it helps save precious space in the inst_rom
+	 * and sequencer rom and keeps the delays more accurate and reduces
+	 * overhead
+	 */
+	if (afi_clocks <= 0x100) {
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0,
+			SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			RW_MGR_IDLE_LOOP1, SDR_WRITE);
+		sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0, RW_MGR_IDLE_LOOP1,
+			SDR_WRITE);
+	} else {
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0,
+			SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), SDR_WRITE);
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0,
+			SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer), SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+			RW_MGR_IDLE_LOOP2, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			RW_MGR_IDLE_LOOP2, SDR_WRITE);
+
+		/* hack to get around compiler not being smart enough */
+		if (afi_clocks <= 0x10000) {
+			/* only need to run once */
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_IDLE_LOOP2, SDR_WRITE);
+		} else {
+			do {
+				sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+					RW_MGR_IDLE_LOOP2, SDR_WRITE);
+			} while (c_loop-- != 0);
+		}
+	}
+	debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
+}
+
+/*
+ * should always use constants as argument to ensure all computations are
+ * performed@compile time
+ */
+static inline void delay_for_n_ns(const uint32_t nanoseconds)
+{
+	debug("%s:%d nanoseconds=%u ... end", __func__, __LINE__, nanoseconds);
+	delay_for_n_mem_clocks((1000*nanoseconds) /
+			       (1000000/AFI_CLK_FREQ) * AFI_RATE_RATIO);
+}
+
+static void rw_mgr_mem_initialize(void)
+{
+	uint32_t r;
+
+	debug("%s:%d\n", __func__, __LINE__);
+
+	/* The reset / cke part of initialization is broadcasted to all ranks */
+	sdr_ops((u32 *)RW_MGR_SET_CS_AND_ODT_MASK, 0, RW_MGR_RANK_ALL, SDR_WRITE);
+
+	/*
+	 * Here's how you load register for a loop
+	 * Counters are located @ 0x800
+	 * Jump address are located @ 0xC00
+	 * For both, registers 0 to 3 are selected using bits 3 and 2, like
+	 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
+	 * I know this ain't pretty, but Avalon bus throws away the 2 least
+	 * significant bits
+	 */
+
+	/* start with memory RESET activated */
+
+	/* tINIT = 200us */
+
+	/*
+	 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
+	 * If a and b are the number of iteration in 2 nested loops
+	 * it takes the following number of cycles to complete the operation:
+	 * number_of_cycles = ((2 + n) * a + 2) * b
+	 * where n is the number of instruction in the inner loop
+	 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
+	 * b = 6A
+	 */
+
+	/* Load counters */
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0,
+		SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR0_VAL), SDR_WRITE);
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0,
+		SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR1_VAL), SDR_WRITE);
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr2, 0,
+		SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR2_VAL), SDR_WRITE);
+
+	/* Load jump address */
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+		RW_MGR_INIT_RESET_0_CKE_0, SDR_WRITE);
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+		RW_MGR_INIT_RESET_0_CKE_0, SDR_WRITE);
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2, 0,
+		RW_MGR_INIT_RESET_0_CKE_0, SDR_WRITE);
+
+	/* Execute count instruction */
+	sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0, RW_MGR_INIT_RESET_0_CKE_0,
+		SDR_WRITE);
+
+	/* indicate that memory is stable */
+	sdr_ops(&phy_mgr_cfg->reset_mem_stbl, 0, 1, SDR_WRITE);
+
+	/*
+	 * transition the RESET to high
+	 * Wait for 500us
+	 */
+
+	/*
+	 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
+	 * If a and b are the number of iteration in 2 nested loops
+	 * it takes the following number of cycles to complete the operation
+	 * number_of_cycles = ((2 + n) * a + 2) * b
+	 * where n is the number of instruction in the inner loop
+	 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
+	 * b = FF
+	 */
+
+	/* Load counters */
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0,
+		SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR0_VAL), SDR_WRITE);
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0,
+		SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR1_VAL), SDR_WRITE);
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr2, 0,
+		SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR2_VAL), SDR_WRITE);
+
+	/* Load jump address */
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+		RW_MGR_INIT_RESET_1_CKE_0, SDR_WRITE);
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+		RW_MGR_INIT_RESET_1_CKE_0, SDR_WRITE);
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2, 0,
+		RW_MGR_INIT_RESET_1_CKE_0, SDR_WRITE);
+
+	sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0, RW_MGR_INIT_RESET_1_CKE_0,
+		SDR_WRITE);
+
+	/* bring up clock enable */
+
+	/* tXRP < 250 ck cycles */
+	delay_for_n_mem_clocks(250);
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
+		if (param->skip_ranks[r]) {
+			/* request to skip the rank */
+
+			continue;
+		}
+
+		/* set rank */
+		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
+
+		/*
+		 * USER Use Mirror-ed commands for odd ranks if address
+		 * mirrorring is on
+		 */
+		if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS2_MIRR, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS3_MIRR, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS1_MIRR, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS0_DLL_RESET_MIRR, SDR_WRITE);
+		} else {
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS2, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS3, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS1, SDR_WRITE);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS0_DLL_RESET, SDR_WRITE);
+		}
+		set_jump_as_return();
+		sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0, RW_MGR_ZQCL,
+			SDR_WRITE);
+
+		/* tZQinit = tDLLK = 512 ck cycles */
+		delay_for_n_mem_clocks(512);
+	}
+}
+
+/*
+ * At the end of calibration we have to program the user settings in, and
+ * USER  hand off the memory to the user.
+ */
+static void rw_mgr_mem_handoff(void)
+{
+	uint32_t r;
+
+	debug("%s:%d\n", __func__, __LINE__);
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
+		if (param->skip_ranks[r])
+			/* request to skip the rank */
+			continue;
+		/* set rank */
+		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
+
+		/* precharge all banks ... */
+		sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+			RW_MGR_PRECHARGE_ALL, SDR_WRITE);
+
+		/* load up MR settings specified by user */
+
+		/*
+		 * Use Mirror-ed commands for odd ranks if address
+		 * mirrorring is on
+		 */
+		if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS2_MIRR, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS3_MIRR, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS1_MIRR, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS0_USER_MIRR, SDR_WRITE);
+		} else {
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS2, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS3, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS1, SDR_WRITE);
+			delay_for_n_mem_clocks(4);
+			set_jump_as_return();
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+				RW_MGR_MRS0_USER, SDR_WRITE);
+		}
+		/*
+		 * USER  need to wait tMOD (12CK or 15ns) time before issuing
+		 * other commands, but we will have plenty of NIOS cycles before
+		 * actual handoff so its okay.
+		 */
+	}
+}
+
+/*
+ * performs a guaranteed read on the patterns we are going to use during a
+ * read test to ensure memory works
+ */
+static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn,
+	uint32_t group, uint32_t num_tries, uint32_t *bit_chk,
+	uint32_t all_ranks)
+{
+	uint32_t r, vg;
+	uint32_t correct_mask_vg;
+	uint32_t tmp_bit_chk;
+	uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
+		(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
+
+	*bit_chk = param->read_correct_mask;
+	correct_mask_vg = param->read_correct_mask_vg;
+
+	for (r = rank_bgn; r < rank_end; r++) {
+		if (param->skip_ranks[r])
+			/* request to skip the rank */
+			continue;
+
+		/* set rank */
+		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
+
+		/* Load up a constant bursts of read commands */
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x20, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+			RW_MGR_GUARANTEED_READ, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0, 0x20, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			RW_MGR_GUARANTEED_READ_CONT, SDR_WRITE);
+
+		tmp_bit_chk = 0;
+		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
+			/* reset the fifos to get pointers to known state */
+
+			sdr_ops(&phy_mgr_cmd->fifo_reset, 0, 0, SDR_WRITE);
+			sdr_ops((u32 *)RW_MGR_RESET_READ_DATAPATH, 0, 0, SDR_WRITE);
+
+			tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
+				/ RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
+
+			sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP,
+				      ((group *
+				      RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
+				      vg) << 2), RW_MGR_GUARANTEED_READ, SDR_WRITE);
+			tmp_bit_chk = tmp_bit_chk | (correct_mask_vg &
+				~(sdr_ops((u32 *)BASE_RW_MGR, 0, 0, SDR_READ)));
+
+			if (vg == 0)
+				break;
+		}
+		*bit_chk &= tmp_bit_chk;
+	}
+
+	sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, (group << 2),
+		RW_MGR_CLEAR_DQS_ENABLE, SDR_WRITE);
+
+	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+	debug_cond(DLEVEL == 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\
+		   %lu\n", __func__, __LINE__, group, *bit_chk, param->read_correct_mask,
+		   (long unsigned int)(*bit_chk == param->read_correct_mask));
+	return *bit_chk == param->read_correct_mask;
+}
+
+static inline uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks
+	(uint32_t group, uint32_t num_tries, uint32_t *bit_chk)
+{
+	return rw_mgr_mem_calibrate_read_test_patterns(0, group,
+		num_tries, bit_chk, 1);
+}
+
+/* load up the patterns we are going to use during a read test */
+static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn,
+	uint32_t all_ranks)
+{
+	uint32_t r;
+	uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
+		(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
+
+	debug("%s:%d\n", __func__, __LINE__);
+	for (r = rank_bgn; r < rank_end; r++) {
+		if (param->skip_ranks[r])
+			/* request to skip the rank */
+			continue;
+
+		/* set rank */
+		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
+
+		/* Load up a constant bursts */
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x20, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+			RW_MGR_GUARANTEED_WRITE_WAIT0, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0, 0x20, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			RW_MGR_GUARANTEED_WRITE_WAIT1, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr2, 0, 0x04, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2, 0,
+			RW_MGR_GUARANTEED_WRITE_WAIT2, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr3, 0, 0x04, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add3, 0,
+			RW_MGR_GUARANTEED_WRITE_WAIT3, SDR_WRITE);
+
+		sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+			RW_MGR_GUARANTEED_WRITE, SDR_WRITE);
+	}
+
+	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+}
+
+/*
+ * try a read and see if it returns correct data back. has dummy reads
+ * inserted into the mix used to align dqs enable. has more thorough checks
+ * than the regular read test.
+ */
+static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group,
+	uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
+	uint32_t all_groups, uint32_t all_ranks)
+{
+	uint32_t r, vg;
+	uint32_t correct_mask_vg;
+	uint32_t tmp_bit_chk;
+	uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
+		(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
+
+	*bit_chk = param->read_correct_mask;
+	correct_mask_vg = param->read_correct_mask_vg;
+
+	uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) &
+		CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION);
+
+	for (r = rank_bgn; r < rank_end; r++) {
+		if (param->skip_ranks[r])
+			/* request to skip the rank */
+			continue;
+
+		/* set rank */
+		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0, 0x10, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			RW_MGR_READ_B2B_WAIT1, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr2, 0, 0x10, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2, 0,
+			RW_MGR_READ_B2B_WAIT2, SDR_WRITE);
+
+		if (quick_read_mode)
+			sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x1, SDR_WRITE);
+			/* need at least two (1+1) reads to capture failures */
+		else if (all_groups)
+			sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x06, SDR_WRITE);
+		else
+			sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x32, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+			RW_MGR_READ_B2B, SDR_WRITE);
+		if (all_groups)
+			sdr_ops(&sdr_rw_load_mgr_regs->load_cntr3, 0,
+				      RW_MGR_MEM_IF_READ_DQS_WIDTH *
+				      RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
+				      SDR_WRITE);
+		else
+			sdr_ops(&sdr_rw_load_mgr_regs->load_cntr3, 0, 0x0, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add3, 0,
+			RW_MGR_READ_B2B, SDR_WRITE);
+
+		tmp_bit_chk = 0;
+		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
+			/* reset the fifos to get pointers to known state */
+			sdr_ops(&phy_mgr_cmd->fifo_reset, 0, 0, SDR_WRITE);
+			sdr_ops((u32 *)RW_MGR_RESET_READ_DATAPATH, 0, 0, SDR_WRITE);
+
+			tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
+				/ RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
+
+			sdr_ops((u32 *)(all_groups ? RW_MGR_RUN_ALL_GROUPS :
+				      RW_MGR_RUN_SINGLE_GROUP), ((group *
+				      RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS
+				      + vg) << 2), RW_MGR_READ_B2B, SDR_WRITE);
+			tmp_bit_chk = tmp_bit_chk | (correct_mask_vg &
+				~(sdr_ops((u32 *)BASE_RW_MGR, 0, 0, SDR_READ)));
+
+			if (vg == 0)
+				break;
+		}
+		*bit_chk &= tmp_bit_chk;
+	}
+
+	sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, (group << 2),
+		RW_MGR_CLEAR_DQS_ENABLE, SDR_WRITE);
+
+	if (all_correct) {
+		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+		debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\
+			   (%u == %u) => %lu", __func__, __LINE__, group,
+			   all_groups, *bit_chk, param->read_correct_mask,
+			   (long unsigned int)(*bit_chk ==
+			   param->read_correct_mask));
+		return *bit_chk == param->read_correct_mask;
+	} else	{
+		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+		debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\
+			   (%u != %lu) => %lu\n", __func__, __LINE__,
+			   group, all_groups, *bit_chk, (long unsigned int)0,
+			   (long unsigned int)(*bit_chk != 0x00));
+		return *bit_chk != 0x00;
+	}
+}
+
+static inline uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group,
+	uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
+	uint32_t all_groups)
+{
+	return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct,
+					      bit_chk, all_groups, 1);
+}
+
+static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t *v)
+{
+	sdr_ops(&phy_mgr_cmd->inc_vfifo_hard_phy, 0, grp, SDR_WRITE);
+
+	(*v)++;
+	BFM_INC_VFIFO;
+}
+
+/*Used in quick cal to properly loop through the duplicated VFIFOs in
+AV QDRII/RLDRAM */
+static inline void rw_mgr_incr_vfifo_all(uint32_t grp, uint32_t *v)
+{
+	rw_mgr_incr_vfifo(grp, v);
+}
+
+static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t *v)
+{
+	uint32_t i;
+
+	for (i = 0; i < VFIFO_SIZE-1; i++)
+		rw_mgr_incr_vfifo(grp, v);
+}
+
+/* find a good dqs enable to use */
+static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
+{
+	uint32_t i, d, v, p;
+	uint32_t max_working_cnt;
+	uint32_t fail_cnt;
+	uint32_t bit_chk;
+	uint32_t dtaps_per_ptap;
+	uint32_t found_begin, found_end;
+	uint32_t work_bgn, work_mid, work_end, tmp_delay;
+	uint32_t test_status;
+	uint32_t found_passing_read, found_failing_read, initial_failing_dtap;
+
+	debug("%s:%d %u\n", __func__, __LINE__, grp);
+	BFM_STAGE("find_dqs_en_phase");
+
+	reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
+
+	scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
+	scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
+
+	fail_cnt = 0;
+
+	/* ************************************************************** */
+	/* * Step 0 : Determine number of delay taps for each phase tap * */
+
+	dtaps_per_ptap = 0;
+	tmp_delay = 0;
+	while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
+		dtaps_per_ptap++;
+		tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+	}
+	dtaps_per_ptap--;
+	tmp_delay = 0;
+
+	/* ********************************************************* */
+	/* * Step 1 : First push vfifo until we get a failing read * */
+	for (v = 0; v < VFIFO_SIZE; ) {
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo %lu\n",
+			   __func__, __LINE__, BFM_GBL_GET(vfifo_idx));
+		test_status = rw_mgr_mem_calibrate_read_test_all_ranks
+			(grp, 1, PASS_ONE_BIT, &bit_chk, 0);
+		if (!test_status) {
+			fail_cnt++;
+
+			if (fail_cnt == 2)
+				break;
+		}
+
+		/* fiddle with FIFO */
+		rw_mgr_incr_vfifo(grp, &v);
+	}
+
+	if (v >= VFIFO_SIZE) {
+		/* no failing read found!! Something must have gone wrong */
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo failed\n",
+			   __func__, __LINE__);
+		return 0;
+	}
+
+	max_working_cnt = 0;
+
+	/* ******************************************************** */
+	/* * step 2: find first working phase, increment in ptaps * */
+	found_begin = 0;
+	work_bgn = 0;
+	for (d = 0; d <= dtaps_per_ptap; d++, tmp_delay +=
+		IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
+		work_bgn = tmp_delay;
+		scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
+
+		for (i = 0; i < VFIFO_SIZE; i++) {
+			for (p = 0; p <= IO_DQS_EN_PHASE_MAX; p++, work_bgn +=
+				IO_DELAY_PER_OPA_TAP) {
+				scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
+
+				test_status =
+				rw_mgr_mem_calibrate_read_test_all_ranks
+				(grp, 1, PASS_ONE_BIT, &bit_chk, 0);
+
+				if (test_status) {
+					max_working_cnt = 1;
+					found_begin = 1;
+					break;
+				}
+			}
+
+			if (found_begin)
+				break;
+
+			if (p > IO_DQS_EN_PHASE_MAX)
+				/* fiddle with FIFO */
+				rw_mgr_incr_vfifo(grp, &v);
+		}
+
+		if (found_begin)
+			break;
+	}
+
+	if (i >= VFIFO_SIZE) {
+		/* cannot find working solution */
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/\
+			   ptap/dtap\n", __func__, __LINE__);
+		return 0;
+	}
+
+	work_end = work_bgn;
+
+	/*  If d is 0 then the working window covers a phase tap and
+	we can follow the old procedure otherwise, we've found the beginning,
+	and we need to increment the dtaps until we find the end */
+	if (d == 0) {
+		/* ********************************************************* */
+		/* * step 3a: if we have room, back off by one and
+		increment in dtaps * */
+		COV(EN_PHASE_PTAP_OVERLAP);
+
+		/* Special case code for backing up a phase */
+		if (p == 0) {
+			p = IO_DQS_EN_PHASE_MAX ;
+			rw_mgr_decr_vfifo(grp, &v);
+		} else {
+			p = p - 1;
+		}
+		tmp_delay = work_bgn - IO_DELAY_PER_OPA_TAP;
+		scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
+
+		found_begin = 0;
+		for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < work_bgn;
+			d++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
+			scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
+
+			if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
+								     PASS_ONE_BIT,
+								     &bit_chk, 0)) {
+				found_begin = 1;
+				work_bgn = tmp_delay;
+				break;
+			}
+		}
+
+		/* We have found a working dtap before the ptap found above */
+		if (found_begin == 1)
+			max_working_cnt++;
+
+		/* Restore VFIFO to old state before we decremented it
+		(if needed) */
+		p = p + 1;
+		if (p > IO_DQS_EN_PHASE_MAX) {
+			p = 0;
+			rw_mgr_incr_vfifo(grp, &v);
+		}
+
+		scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
+
+		/* ********************************************************* */
+		/* * step 4a: go forward from working phase to non working
+		phase, increment in ptaps * */
+		p = p + 1;
+		work_end += IO_DELAY_PER_OPA_TAP;
+		if (p > IO_DQS_EN_PHASE_MAX) {
+			/* fiddle with FIFO */
+			p = 0;
+			rw_mgr_incr_vfifo(grp, &v);
+		}
+
+		found_end = 0;
+		for (; i < VFIFO_SIZE + 1; i++) {
+			for (; p <= IO_DQS_EN_PHASE_MAX; p++, work_end
+				+= IO_DELAY_PER_OPA_TAP) {
+				scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
+
+				if (!rw_mgr_mem_calibrate_read_test_all_ranks
+					(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) {
+					found_end = 1;
+					break;
+				} else {
+					max_working_cnt++;
+				}
+			}
+
+			if (found_end)
+				break;
+
+			if (p > IO_DQS_EN_PHASE_MAX) {
+				/* fiddle with FIFO */
+				rw_mgr_incr_vfifo(grp, &v);
+				p = 0;
+			}
+		}
+
+		if (i >= VFIFO_SIZE + 1) {
+			/* cannot see edge of failing read */
+			debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end:\
+				   failed\n", __func__, __LINE__);
+			return 0;
+		}
+
+		/* ********************************************************* */
+		/* * step 5a:  back off one from last, increment in dtaps  * */
+
+		/* Special case code for backing up a phase */
+		if (p == 0) {
+			p = IO_DQS_EN_PHASE_MAX;
+			rw_mgr_decr_vfifo(grp, &v);
+		} else {
+			p = p - 1;
+		}
+
+		work_end -= IO_DELAY_PER_OPA_TAP;
+		scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
+
+		/* * The actual increment of dtaps is done outside of
+		the if/else loop to share code */
+		d = 0;
+
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p: \
+			   vfifo=%lu ptap=%u\n", __func__, __LINE__,
+			   BFM_GBL_GET(vfifo_idx), p);
+	} else {
+		/* ******************************************************* */
+		/* * step 3-5b:  Find the right edge of the window using
+		delay taps   * */
+		COV(EN_PHASE_PTAP_NO_OVERLAP);
+
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase:vfifo=%lu \
+			   ptap=%u dtap=%u bgn=%u\n", __func__, __LINE__,
+			   BFM_GBL_GET(vfifo_idx), p, d, work_bgn);
+		BFM_GBL_SET(dqs_enable_left_edge[grp].v,
+			    BFM_GBL_GET(vfifo_idx));
+		BFM_GBL_SET(dqs_enable_left_edge[grp].p, p);
+		BFM_GBL_SET(dqs_enable_left_edge[grp].d, d);
+		BFM_GBL_SET(dqs_enable_left_edge[grp].ps, work_bgn);
+
+		work_end = work_bgn;
+
+		/* * The actual increment of dtaps is done outside of the
+		if/else loop to share code */
+
+		/* Only here to counterbalance a subtract later on which is
+		not needed if this branch of the algorithm is taken */
+		max_working_cnt++;
+	}
+
+	/* The dtap increment to find the failing edge is done here */
+	for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end +=
+		IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
+			debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
+				   end-2: dtap=%u\n", __func__, __LINE__, d);
+			scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
+
+			if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
+								      PASS_ONE_BIT,
+								      &bit_chk, 0)) {
+				break;
+			}
+		}
+
+	/* Go back to working dtap */
+	if (d != 0)
+		work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+
+	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p/d: vfifo=%lu \
+		   ptap=%u dtap=%u end=%u\n", __func__, __LINE__,
+		   BFM_GBL_GET(vfifo_idx), p, d-1, work_end);
+	BFM_GBL_SET(dqs_enable_right_edge[grp].v, BFM_GBL_GET(vfifo_idx));
+	BFM_GBL_SET(dqs_enable_right_edge[grp].p, p);
+	BFM_GBL_SET(dqs_enable_right_edge[grp].d, d-1);
+	BFM_GBL_SET(dqs_enable_right_edge[grp].ps, work_end);
+
+	if (work_end >= work_bgn) {
+		/* we have a working range */
+	} else {
+		/* nil range */
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end-2: \
+			   failed\n", __func__, __LINE__);
+		return 0;
+	}
+
+	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n",
+		   __func__, __LINE__, work_bgn, work_end);
+
+	/* *************************************************************** */
+	/*
+	 * * We need to calculate the number of dtaps that equal a ptap
+	 * * To do that we'll back up a ptap and re-find the edge of the
+	 * * window using dtaps
+	 */
+
+	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \
+		   for tracking\n", __func__, __LINE__);
+
+	/* Special case code for backing up a phase */
+	if (p == 0) {
+		p = IO_DQS_EN_PHASE_MAX;
+		rw_mgr_decr_vfifo(grp, &v);
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
+			   cycle/phase: v=%lu p=%u\n", __func__, __LINE__,
+			   BFM_GBL_GET(vfifo_idx), p);
+	} else {
+		p = p - 1;
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
+			   phase only: v=%lu p=%u", __func__, __LINE__,
+			   BFM_GBL_GET(vfifo_idx), p);
+	}
+
+	scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
+
+	/*
+	 * Increase dtap until we first see a passing read (in case the
+	 * window is smaller than a ptap),
+	 * and then a failing read to mark the edge of the window again
+	 */
+
+	/* Find a passing read */
+	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find passing read\n",
+		   __func__, __LINE__);
+	found_passing_read = 0;
+	found_failing_read = 0;
+	initial_failing_dtap = d;
+	for (; d <= IO_DQS_EN_DELAY_MAX; d++) {
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: testing \
+			   read d=%u\n", __func__, __LINE__, d);
+		scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
+
+		if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
+							     PASS_ONE_BIT,
+							     &bit_chk, 0)) {
+			found_passing_read = 1;
+			break;
+		}
+	}
+
+	if (found_passing_read) {
+		/* Find a failing read */
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find failing \
+			   read\n", __func__, __LINE__);
+		for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) {
+			debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
+				   testing read d=%u\n", __func__, __LINE__, d);
+			scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
+
+			if (!rw_mgr_mem_calibrate_read_test_all_ranks
+				(grp, 1, PASS_ONE_BIT, &bit_chk, 0)) {
+				found_failing_read = 1;
+				break;
+			}
+		}
+	} else {
+		debug_cond(DLEVEL == 1, "%s:%d find_dqs_en_phase: failed to \
+			   calculate dtaps", __func__, __LINE__);
+		debug_cond(DLEVEL == 1, "per ptap. Fall back on static value\n");
+	}
+
+	/*
+	 * The dynamically calculated dtaps_per_ptap is only valid if we
+	 * found a passing/failing read. If we didn't, it means d hit the max
+	 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
+	 * statically calculated value.
+	 */
+	if (found_passing_read && found_failing_read)
+		dtaps_per_ptap = d - initial_failing_dtap;
+
+	sdr_ops(&sdr_reg_file->dtaps_per_ptap, 0, dtaps_per_ptap, SDR_WRITE);
+	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \
+		   - %u = %u",  __func__, __LINE__, d,
+		   initial_failing_dtap, dtaps_per_ptap);
+
+	/* ******************************************** */
+	/* * step 6:  Find the centre of the window   * */
+
+	work_mid = (work_bgn + work_end) / 2;
+	tmp_delay = 0;
+
+	debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
+		   work_bgn, work_end, work_mid);
+	/* Get the middle delay to be less than a VFIFO delay */
+	for (p = 0; p <= IO_DQS_EN_PHASE_MAX;
+		p++, tmp_delay += IO_DELAY_PER_OPA_TAP)
+		;
+	debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
+	while (work_mid > tmp_delay)
+		work_mid -= tmp_delay;
+	debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid);
+	tmp_delay = 0;
+	for (p = 0; p <= IO_DQS_EN_PHASE_MAX && tmp_delay < work_mid;
+		p++, tmp_delay += IO_DELAY_PER_OPA_TAP)
+		;
+	tmp_delay -= IO_DELAY_PER_OPA_TAP;
+	debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p-1, tmp_delay);
+	for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < work_mid; d++,
+		tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP)
+		;
+	debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
+
+	scc_mgr_set_dqs_en_phase_all_ranks(grp, p-1);
+	scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
+
+	/*
+	 * push vfifo until we can successfully calibrate. We can do this
+	 * because the largest possible margin in 1 VFIFO cycle.
+	 */
+	for (i = 0; i < VFIFO_SIZE; i++) {
+		debug_cond(DLEVEL == 2, "find_dqs_en_phase: center: vfifo=%lu\n",
+		       BFM_GBL_GET(vfifo_idx));
+		if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
+							     PASS_ONE_BIT,
+							     &bit_chk, 0)) {
+			break;
+		}
+
+		/* fiddle with FIFO */
+		rw_mgr_incr_vfifo(grp, &v);
+	}
+
+	if (i >= VFIFO_SIZE) {
+		debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center: \
+			   failed\n", __func__, __LINE__);
+		return 0;
+	}
+	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center found: \
+		   vfifo=%lu ptap=%u dtap=%u\n", __func__, __LINE__,
+		   BFM_GBL_GET(vfifo_idx), p-1, d);
+	BFM_GBL_SET(dqs_enable_mid[grp].v, BFM_GBL_GET(vfifo_idx));
+	BFM_GBL_SET(dqs_enable_mid[grp].p, p-1);
+	BFM_GBL_SET(dqs_enable_mid[grp].d, d);
+	BFM_GBL_SET(dqs_enable_mid[grp].ps, work_mid);
+	return 1;
+}
+
+/*
+ * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
+ * dq_in_delay values
+ */
+static inline uint32_t
+rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
+(uint32_t write_group, uint32_t read_group, uint32_t test_bgn)
+{
+	uint32_t found;
+	uint32_t i;
+	uint32_t p;
+	uint32_t d;
+	uint32_t r;
+
+	const uint32_t delay_step = IO_IO_IN_DELAY_MAX /
+		(RW_MGR_MEM_DQ_PER_READ_DQS-1);
+		/* we start at zero, so have one less dq to devide among */
+
+	debug("%s:%d (%u,%u,%u)", __func__, __LINE__, write_group, read_group,
+	      test_bgn);
+
+	/* try different dq_in_delays since the dq path is shorter than dqs */
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+		r += NUM_RANKS_PER_SHADOW_REG) {
+		for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS;
+			i++, p++, d += delay_step) {
+			debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_\
+				   vfifo_find_dqs_", __func__, __LINE__);
+			debug_cond(DLEVEL == 1, "en_phase_sweep_dq_in_delay: g=%u/%u ",
+			       write_group, read_group);
+			debug_cond(DLEVEL == 1, "r=%u, i=%u p=%u d=%u\n", r, i , p, d);
+			scc_mgr_set_dq_in_delay(write_group, p, d);
+			scc_mgr_load_dq(p);
+		}
+		sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+	}
+
+	found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group);
+
+	debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\
+		   en_phase_sweep_dq", __func__, __LINE__);
+	debug_cond(DLEVEL == 1, "_in_delay: g=%u/%u found=%u; Reseting delay \
+		   chain to zero\n", write_group, read_group, found);
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+		r += NUM_RANKS_PER_SHADOW_REG) {
+		for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS;
+			i++, p++) {
+			scc_mgr_set_dq_in_delay(write_group, p, 0);
+			scc_mgr_load_dq(p);
+		}
+		sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+	}
+
+	return found;
+}
+
+/* per-bit deskew DQ and center */
+static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn,
+	uint32_t write_group, uint32_t read_group, uint32_t test_bgn,
+	uint32_t use_read_test, uint32_t update_fom)
+{
+	uint32_t i, p, d, min_index;
+	/* Store these as signed since there are comparisons with
+	signed numbers */
+	uint32_t bit_chk;
+	uint32_t sticky_bit_chk;
+	int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
+	int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
+	int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS];
+	int32_t mid;
+	int32_t orig_mid_min, mid_min;
+	int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs,
+		final_dqs_en;
+	int32_t dq_margin, dqs_margin;
+	uint32_t stop;
+	uint32_t temp_dq_in_delay1, temp_dq_in_delay2;
+
+	debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
+
+	start_dqs = sdr_ops((u32 *)SCC_MGR_DQS_IN_DELAY, read_group << 2, 0,
+			    SDR_READ);
+	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
+		start_dqs_en = sdr_ops((u32 *)SCC_MGR_DQS_EN_DELAY,
+				       (read_group << 2) - IO_DQS_EN_DELAY_OFFSET,
+				       0, SDR_READ);
+
+	/* per-bit deskew */
+
+	/* set the left and right edge of each bit to an illegal value */
+	/* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
+	sticky_bit_chk = 0;
+	for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
+		left_edge[i]  = IO_IO_IN_DELAY_MAX + 1;
+		right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
+	}
+
+	/* Search for the left edge of the window for each bit */
+	for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) {
+		scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d);
+
+		sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+
+		/*
+		 * Stop searching when the read test doesn't pass AND when
+		 * we've seen a passing read on every bit.
+		 */
+		if (use_read_test) {
+			stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
+				read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
+				&bit_chk, 0, 0);
+		} else {
+			rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
+							0, PASS_ONE_BIT,
+							&bit_chk, 0);
+			bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
+				(read_group - (write_group *
+					RW_MGR_MEM_IF_READ_DQS_WIDTH /
+					RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
+			stop = (bit_chk == 0);
+		}
+		sticky_bit_chk = sticky_bit_chk | bit_chk;
+		stop = stop && (sticky_bit_chk == param->read_correct_mask);
+		debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => " BTFLD_FMT " == "
+			   BTFLD_FMT " && %u", __func__, __LINE__, d,
+			   sticky_bit_chk,
+			param->read_correct_mask, stop);
+
+		if (stop == 1) {
+			break;
+		} else {
+			for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
+				if (bit_chk & 1) {
+					/* Remember a passing test as the
+					left_edge */
+					left_edge[i] = d;
+				} else {
+					/* If a left edge has not been seen yet,
+					then a future passing test will mark
+					this edge as the right edge */
+					if (left_edge[i] ==
+						IO_IO_IN_DELAY_MAX + 1) {
+						right_edge[i] = -(d + 1);
+					}
+				}
+				bit_chk = bit_chk >> 1;
+			}
+		}
+	}
+
+	/* Reset DQ delay chains to 0 */
+	scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, 0);
+	sticky_bit_chk = 0;
+	for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) {
+		debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
+			   %d right_edge[%u]: %d\n", __func__, __LINE__,
+			   i, left_edge[i], i, right_edge[i]);
+
+		/*
+		 * Check for cases where we haven't found the left edge,
+		 * which makes our assignment of the the right edge invalid.
+		 * Reset it to the illegal value.
+		 */
+		if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && (
+			right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
+			right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
+			debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \
+				   right_edge[%u]: %d\n", __func__, __LINE__,
+				   i, right_edge[i]);
+		}
+
+		/*
+		 * Reset sticky bit (except for bits where we have seen
+		 * both the left and right edge).
+		 */
+		sticky_bit_chk = sticky_bit_chk << 1;
+		if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) &&
+		    (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
+			sticky_bit_chk = sticky_bit_chk | 1;
+		}
+
+		if (i == 0)
+			break;
+	}
+
+	/* Search for the right edge of the window for each bit */
+	for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) {
+		scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
+		if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
+			uint32_t delay = d + start_dqs_en;
+			if (delay > IO_DQS_EN_DELAY_MAX)
+				delay = IO_DQS_EN_DELAY_MAX;
+			scc_mgr_set_dqs_en_delay(read_group, delay);
+		}
+		scc_mgr_load_dqs(read_group);
+
+		sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+
+		/*
+		 * Stop searching when the read test doesn't pass AND when
+		 * we've seen a passing read on every bit.
+		 */
+		if (use_read_test) {
+			stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
+				read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
+				&bit_chk, 0, 0);
+		} else {
+			rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
+							0, PASS_ONE_BIT,
+							&bit_chk, 0);
+			bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
+				(read_group - (write_group *
+					RW_MGR_MEM_IF_READ_DQS_WIDTH /
+					RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
+			stop = (bit_chk == 0);
+		}
+		sticky_bit_chk = sticky_bit_chk | bit_chk;
+		stop = stop && (sticky_bit_chk == param->read_correct_mask);
+
+		debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => " BTFLD_FMT " == "
+			   BTFLD_FMT " && %u", __func__, __LINE__, d,
+			   sticky_bit_chk, param->read_correct_mask, stop);
+
+		if (stop == 1) {
+			break;
+		} else {
+			for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
+				if (bit_chk & 1) {
+					/* Remember a passing test as
+					the right_edge */
+					right_edge[i] = d;
+				} else {
+					if (d != 0) {
+						/* If a right edge has not been
+						seen yet, then a future passing
+						test will mark this edge as the
+						left edge */
+						if (right_edge[i] ==
+						IO_IO_IN_DELAY_MAX + 1) {
+							left_edge[i] = -(d + 1);
+						}
+					} else {
+						/* d = 0 failed, but it passed
+						when testing the left edge,
+						so it must be marginal,
+						set it to -1 */
+						if (right_edge[i] ==
+							IO_IO_IN_DELAY_MAX + 1 &&
+							left_edge[i] !=
+							IO_IO_IN_DELAY_MAX
+							+ 1) {
+							right_edge[i] = -1;
+						}
+						/* If a right edge has not been
+						seen yet, then a future passing
+						test will mark this edge as the
+						left edge */
+						else if (right_edge[i] ==
+							IO_IO_IN_DELAY_MAX +
+							1) {
+							left_edge[i] = -(d + 1);
+						}
+					}
+				}
+
+				debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\
+					   d=%u]: ", __func__, __LINE__, d);
+				debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ",
+					   (int)(bit_chk & 1), i, left_edge[i]);
+				debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
+					   right_edge[i]);
+				bit_chk = bit_chk >> 1;
+			}
+		}
+	}
+
+	/* Check that all bits have a window */
+	for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
+		debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
+			   %d right_edge[%u]: %d", __func__, __LINE__,
+			   i, left_edge[i], i, right_edge[i]);
+		BFM_GBL_SET(dq_read_left_edge[read_group][i], left_edge[i]);
+		BFM_GBL_SET(dq_read_right_edge[read_group][i], right_edge[i]);
+		if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i]
+			== IO_IO_IN_DELAY_MAX + 1)) {
+			/*
+			 * Restore delay chain settings before letting the loop
+			 * in rw_mgr_mem_calibrate_vfifo to retry different
+			 * dqs/ck relationships.
+			 */
+			scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs);
+			if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
+				scc_mgr_set_dqs_en_delay(read_group,
+							 start_dqs_en);
+			}
+			scc_mgr_load_dqs(read_group);
+			sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+
+			debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \
+				   find edge [%u]: %d %d", __func__, __LINE__,
+				   i, left_edge[i], right_edge[i]);
+			if (use_read_test) {
+				set_failing_group_stage(read_group *
+					RW_MGR_MEM_DQ_PER_READ_DQS + i,
+					CAL_STAGE_VFIFO,
+					CAL_SUBSTAGE_VFIFO_CENTER);
+			} else {
+				set_failing_group_stage(read_group *
+					RW_MGR_MEM_DQ_PER_READ_DQS + i,
+					CAL_STAGE_VFIFO_AFTER_WRITES,
+					CAL_SUBSTAGE_VFIFO_CENTER);
+			}
+			return 0;
+		}
+	}
+
+	/* Find middle of window for each DQ bit */
+	mid_min = left_edge[0] - right_edge[0];
+	min_index = 0;
+	for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
+		mid = left_edge[i] - right_edge[i];
+		if (mid < mid_min) {
+			mid_min = mid;
+			min_index = i;
+		}
+	}
+
+	/*
+	 * -mid_min/2 represents the amount that we need to move DQS.
+	 * If mid_min is odd and positive we'll need to add one to
+	 * make sure the rounding in further calculations is correct
+	 * (always bias to the right), so just add 1 for all positive values.
+	 */
+	if (mid_min > 0)
+		mid_min++;
+
+	mid_min = mid_min / 2;
+
+	debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n",
+		   __func__, __LINE__, mid_min, min_index);
+
+	/* Determine the amount we can change DQS (which is -mid_min) */
+	orig_mid_min = mid_min;
+	new_dqs = start_dqs - mid_min;
+	if (new_dqs > IO_DQS_IN_DELAY_MAX)
+		new_dqs = IO_DQS_IN_DELAY_MAX;
+	else if (new_dqs < 0)
+		new_dqs = 0;
+
+	mid_min = start_dqs - new_dqs;
+	debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
+		   mid_min, new_dqs);
+
+	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
+		if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
+			mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
+		else if (start_dqs_en - mid_min < 0)
+			mid_min += start_dqs_en - mid_min;
+	}
+	new_dqs = start_dqs - mid_min;
+
+	debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \
+		   new_dqs=%d mid_min=%d\n", start_dqs,
+		   IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
+		   new_dqs, mid_min);
+
+	/* Initialize data for export structures */
+	dqs_margin = IO_IO_IN_DELAY_MAX + 1;
+	dq_margin  = IO_IO_IN_DELAY_MAX + 1;
+
+	/* add delay to bring centre of all DQ windows to the same "level" */
+	for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
+		/* Use values before divide by 2 to reduce round off error */
+		shift_dq = (left_edge[i] - right_edge[i] -
+			(left_edge[min_index] - right_edge[min_index]))/2  +
+			(orig_mid_min - mid_min);
+
+		debug_cond(DLEVEL == 2, "vfifo_center: before: \
+			   shift_dq[%u]=%d\n", i, shift_dq);
+
+		temp_dq_in_delay1 = sdr_ops((u32 *)SCC_MGR_IO_IN_DELAY, p << 2,
+					    0, SDR_READ);
+		temp_dq_in_delay2 = sdr_ops((u32 *)SCC_MGR_IO_IN_DELAY, i << 2,
+					    0, SDR_READ);
+		if (shift_dq + (int32_t)temp_dq_in_delay1 >
+			(int32_t)IO_IO_IN_DELAY_MAX) {
+			shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2;
+		} else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) {
+			shift_dq = -(int32_t)temp_dq_in_delay1;
+		}
+		debug_cond(DLEVEL == 2, "vfifo_center: after: \
+			   shift_dq[%u]=%d\n", i, shift_dq);
+		final_dq[i] = temp_dq_in_delay1 + shift_dq;
+		scc_mgr_set_dq_in_delay(write_group, p, final_dq[i]);
+		scc_mgr_load_dq(p);
+
+		debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i,
+			   left_edge[i] - shift_dq + (-mid_min),
+			   right_edge[i] + shift_dq - (-mid_min));
+		/* To determine values for export structures */
+		if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
+			dq_margin = left_edge[i] - shift_dq + (-mid_min);
+
+		if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
+			dqs_margin = right_edge[i] + shift_dq - (-mid_min);
+	}
+
+	final_dqs = new_dqs;
+	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
+		final_dqs_en = start_dqs_en - mid_min;
+
+	/* Move DQS-en */
+	if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
+		scc_mgr_set_dqs_en_delay(read_group, final_dqs_en);
+		scc_mgr_load_dqs(read_group);
+	}
+
+	/* Move DQS */
+	scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs);
+	scc_mgr_load_dqs(read_group);
+	debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \
+		   dqs_margin=%d", __func__, __LINE__,
+		   dq_margin, dqs_margin);
+
+	/* Do not remove this line as it makes sure all of our decisions
+	have been applied */
+	sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+	return (dq_margin >= 0) && (dqs_margin >= 0);
+}
+
+/*
+ * calibrate the read valid prediction FIFO.
+ *
+ *  - read valid prediction will consist of finding a good DQS enable phase,
+ * DQS enable delay, DQS input phase, and DQS input delay.
+ *  - we also do a per-bit deskew on the DQ lines.
+ */
+static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group,
+					   uint32_t test_bgn)
+{
+	uint32_t p, d, rank_bgn, sr;
+	uint32_t dtaps_per_ptap;
+	uint32_t tmp_delay;
+	uint32_t bit_chk;
+	uint32_t grp_calibrated;
+	uint32_t write_group, write_test_bgn;
+	uint32_t failed_substage;
+
+	debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
+
+	/* update info for sims */
+	reg_file_set_stage(CAL_STAGE_VFIFO);
+
+	write_group = read_group;
+	write_test_bgn = test_bgn;
+
+	/* USER Determine number of delay taps for each phase tap */
+	dtaps_per_ptap = 0;
+	tmp_delay = 0;
+	while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
+		dtaps_per_ptap++;
+		tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+	}
+	dtaps_per_ptap--;
+	tmp_delay = 0;
+
+	/* update info for sims */
+	reg_file_set_group(read_group);
+
+	grp_calibrated = 0;
+
+	reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
+	failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
+
+	for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) {
+		/*
+		 * In RLDRAMX we may be messing the delay of pins in
+		 * the same write group but outside of the current read
+		 * the group, but that's ok because we haven't
+		 * calibrated output side yet.
+		 */
+		if (d > 0) {
+			scc_mgr_apply_group_all_out_delay_add_all_ranks
+			(write_group, write_test_bgn, d);
+		}
+
+		for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0;
+			p++) {
+			/* set a particular dqdqs phase */
+			scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p);
+
+			debug_cond(DLEVEL == 1, "%s:%d calibrate_vfifo: g=%u \
+				   p=%u d=%u\n", __func__, __LINE__,
+				   read_group, p, d);
+			BFM_GBL_SET(gwrite_pos[read_group].p, p);
+			BFM_GBL_SET(gwrite_pos[read_group].d, d);
+
+			/*
+			 * Load up the patterns used by read calibration
+			 * using current DQDQS phase.
+			 */
+			rw_mgr_mem_calibrate_read_load_patterns(0, 1);
+			if (!(gbl->phy_debug_mode_flags &
+				PHY_DEBUG_DISABLE_GUARANTEED_READ)) {
+				if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks
+				    (read_group, 1, &bit_chk)) {
+					debug_cond(DLEVEL == 1, "%s:%d Guaranteed read test failed:",
+						   __func__, __LINE__);
+					debug_cond(DLEVEL == 1, " g=%u p=%u d=%u\n",
+						   read_group, p, d);
+					break;
+				}
+			}
+
+/* case:56390 */
+			grp_calibrated = 1;
+	if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
+		(write_group, read_group, test_bgn)) {
+				/*
+				 * USER Read per-bit deskew can be done on a
+				 * per shadow register basis.
+				 */
+				for (rank_bgn = 0, sr = 0;
+					rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
+					rank_bgn += NUM_RANKS_PER_SHADOW_REG,
+					++sr) {
+					/*
+					 * Determine if this set of ranks
+					 * should be skipped entirely.
+					 */
+					if (!param->skip_shadow_regs[sr]) {
+						/*
+						 * If doing read after write
+						 * calibration, do not update
+						 * FOM, now - do it then.
+						 */
+					if (!rw_mgr_mem_calibrate_vfifo_center
+						(rank_bgn, write_group,
+						read_group, test_bgn, 1, 0)) {
+							grp_calibrated = 0;
+							failed_substage =
+						CAL_SUBSTAGE_VFIFO_CENTER;
+						}
+					}
+				}
+			} else {
+				grp_calibrated = 0;
+				failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
+			}
+		}
+	}
+
+	if (grp_calibrated == 0) {
+		set_failing_group_stage(write_group, CAL_STAGE_VFIFO,
+					failed_substage);
+		return 0;
+	}
+
+	/*
+	 * Reset the delay chains back to zero if they have moved > 1
+	 * (check for > 1 because loop will increase d even when pass in
+	 * first case).
+	 */
+	if (d > 2)
+		scc_mgr_zero_group(write_group, write_test_bgn, 1);
+
+	return 1;
+}
+
+/* VFIFO Calibration -- Read Deskew Calibration after write deskew */
+static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group,
+					       uint32_t test_bgn)
+{
+	uint32_t rank_bgn, sr;
+	uint32_t grp_calibrated;
+	uint32_t write_group;
+
+	debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn);
+
+	/* update info for sims */
+
+	reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
+	reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
+
+	write_group = read_group;
+
+	/* update info for sims */
+	reg_file_set_group(read_group);
+
+	grp_calibrated = 1;
+	/* Read per-bit deskew can be done on a per shadow register basis */
+	for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
+		rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) {
+		/* Determine if this set of ranks should be skipped entirely */
+		if (!param->skip_shadow_regs[sr]) {
+		/* This is the last calibration round, update FOM here */
+			if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
+								write_group,
+								read_group,
+								test_bgn, 0,
+								1)) {
+				grp_calibrated = 0;
+			}
+		}
+	}
+
+
+	if (grp_calibrated == 0) {
+		set_failing_group_stage(write_group,
+					CAL_STAGE_VFIFO_AFTER_WRITES,
+					CAL_SUBSTAGE_VFIFO_CENTER);
+		return 0;
+	}
+
+	return 1;
+}
+
+/* Calibrate LFIFO to find smallest read latency */
+static uint32_t rw_mgr_mem_calibrate_lfifo(void)
+{
+	uint32_t found_one;
+	uint32_t bit_chk;
+
+	debug("%s:%d\n", __func__, __LINE__);
+	BFM_STAGE("lfifo");
+
+	/* update info for sims */
+	reg_file_set_stage(CAL_STAGE_LFIFO);
+	reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
+
+	/* Load up the patterns used by read calibration for all ranks */
+	rw_mgr_mem_calibrate_read_load_patterns(0, 1);
+	found_one = 0;
+
+	do {
+		sdr_ops(&phy_mgr_cfg->phy_rlat, 0, gbl->curr_read_lat, SDR_WRITE);
+		debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
+			   __func__, __LINE__, gbl->curr_read_lat);
+
+		if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
+							      NUM_READ_TESTS,
+							      PASS_ALL_BITS,
+							      &bit_chk, 1)) {
+			break;
+		}
+
+		found_one = 1;
+		/* reduce read latency and see if things are working */
+		/* correctly */
+		gbl->curr_read_lat--;
+	} while (gbl->curr_read_lat > 0);
+
+	/* reset the fifos to get pointers to known state */
+
+	sdr_ops(&phy_mgr_cmd->fifo_reset, 0, 0, SDR_WRITE);
+
+	if (found_one) {
+		/* add a fudge factor to the read latency that was determined */
+		gbl->curr_read_lat += 2;
+		sdr_ops(&phy_mgr_cfg->phy_rlat, 0, gbl->curr_read_lat, SDR_WRITE);
+		debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \
+			   read_lat=%u\n", __func__, __LINE__,
+			   gbl->curr_read_lat);
+		return 1;
+	} else {
+		set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
+					CAL_SUBSTAGE_READ_LATENCY);
+
+		debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \
+			   read_lat=%u\n", __func__, __LINE__,
+			   gbl->curr_read_lat);
+		return 0;
+	}
+}
+
+/*
+ * issue write test command.
+ * two variants are provided. one that just tests a write pattern and
+ * another that tests datamask functionality.
+ */
+static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
+						  uint32_t test_dm)
+{
+	uint32_t mcc_instruction;
+	uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
+		ENABLE_SUPER_QUICK_CALIBRATION);
+	uint32_t rw_wl_nop_cycles;
+
+	/*
+	 * Set counter and jump addresses for the right
+	 * number of NOP cycles.
+	 * The number of supported NOP cycles can range from -1 to infinity
+	 * Three different cases are handled:
+	 *
+	 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
+	 *    mechanism will be used to insert the right number of NOPs
+	 *
+	 * 2. For a number of NOP cycles equals to 0, the micro-instruction
+	 *    issuing the write command will jump straight to the
+	 *    micro-instruction that turns on DQS (for DDRx), or outputs write
+	 *    data (for RLD), skipping
+	 *    the NOP micro-instruction all together
+	 *
+	 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
+	 *    turned on in the same micro-instruction that issues the write
+	 *    command. Then we need
+	 *    to directly jump to the micro-instruction that sends out the data
+	 *
+	 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
+	 *       (2 and 3). One jump-counter (0) is used to perform multiple
+	 *       write-read operations.
+	 *       one counter left to issue this command in "multiple-group" mode
+	 */
+
+	rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
+
+	if (rw_wl_nop_cycles == -1) {
+		/*
+		 * CNTR 2 - We want to execute the special write operation that
+		 * turns on DQS right away and then skip directly to the
+		 * instruction that sends out the data. We set the counter to a
+		 * large number so that the jump is always taken.
+		 */
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr2, 0, 0xFF, SDR_WRITE);
+
+		/* CNTR 3 - Not used */
+		if (test_dm) {
+			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2,
+				0, RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA, SDR_WRITE);
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add3,
+				0, RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP, SDR_WRITE);
+		} else {
+			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2,
+				0, RW_MGR_LFSR_WR_RD_BANK_0_DATA, SDR_WRITE);
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add3,
+				0, RW_MGR_LFSR_WR_RD_BANK_0_NOP, SDR_WRITE);
+		}
+	} else if (rw_wl_nop_cycles == 0) {
+		/*
+		 * CNTR 2 - We want to skip the NOP operation and go straight
+		 * to the DQS enable instruction. We set the counter to a large
+		 * number so that the jump is always taken.
+		 */
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr2, 0, 0xFF, SDR_WRITE);
+
+		/* CNTR 3 - Not used */
+		if (test_dm) {
+			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2,
+				0, RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS, SDR_WRITE);
+		} else {
+			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2,
+				0, RW_MGR_LFSR_WR_RD_BANK_0_DQS, SDR_WRITE);
+		}
+	} else {
+		/* CNTR 2 - In this case we want to execute the next instruction
+		and NOT take the jump. So we set the counter to 0. The jump
+		address doesn't count */
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr2, 0, 0x0, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add2, 0,
+			0x0, SDR_WRITE);
+
+		/* CNTR 3 - Set the nop counter to the number of cycles we
+		need to loop for, minus 1 */
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr3, 0,
+			rw_wl_nop_cycles - 1, SDR_WRITE);
+		if (test_dm) {
+			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add3,
+				0, RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP, SDR_WRITE);
+		} else {
+			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
+			sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add3,
+				0, RW_MGR_LFSR_WR_RD_BANK_0_NOP, SDR_WRITE);
+		}
+	}
+
+	sdr_ops((u32 *)RW_MGR_RESET_READ_DATAPATH, 0, 0, SDR_WRITE);
+
+	if (quick_write_mode)
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x08, SDR_WRITE);
+	else
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x40, SDR_WRITE);
+
+	sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+		mcc_instruction, SDR_WRITE);
+
+	/*
+	 * CNTR 1 - This is used to ensure enough time elapses
+	 * for read data to come back.
+	 */
+	sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0, 0x30, SDR_WRITE);
+
+	if (test_dm) {
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT, SDR_WRITE);
+	} else {
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			RW_MGR_LFSR_WR_RD_BANK_0_WAIT, SDR_WRITE);
+	}
+
+	sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, (group << 2), mcc_instruction,
+		SDR_WRITE);
+}
+
+/* Test writes, can check for a single bit pass or multiple bit pass */
+static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
+	uint32_t write_group, uint32_t use_dm, uint32_t all_correct,
+	uint32_t *bit_chk, uint32_t all_ranks)
+{
+	uint32_t r;
+	uint32_t correct_mask_vg;
+	uint32_t tmp_bit_chk;
+	uint32_t vg;
+	uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
+		(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
+
+	*bit_chk = param->write_correct_mask;
+	correct_mask_vg = param->write_correct_mask_vg;
+
+	for (r = rank_bgn; r < rank_end; r++) {
+		if (param->skip_ranks[r]) {
+			/* request to skip the rank */
+
+			continue;
+		}
+
+		/* set rank */
+		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
+
+		tmp_bit_chk = 0;
+		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) {
+			/* reset the fifos to get pointers to known state */
+			sdr_ops(&phy_mgr_cmd->fifo_reset, 0, 0, SDR_WRITE);
+
+			tmp_bit_chk = tmp_bit_chk <<
+				(RW_MGR_MEM_DQ_PER_WRITE_DQS /
+				RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
+			rw_mgr_mem_calibrate_write_test_issue(write_group *
+				RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg,
+				use_dm);
+
+			tmp_bit_chk = tmp_bit_chk | (correct_mask_vg &
+				~(sdr_ops((u32 *)BASE_RW_MGR, 0, 0, SDR_READ)));
+			if (vg == 0)
+				break;
+		}
+		*bit_chk &= tmp_bit_chk;
+	}
+
+	if (all_correct) {
+		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+		debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : " BTFLD_FMT " == "
+			BTFLD_FMT " => %lu", write_group, use_dm,
+			*bit_chk, param->write_correct_mask,
+			(long unsigned int)(*bit_chk ==
+			param->write_correct_mask));
+		return *bit_chk == param->write_correct_mask;
+	} else {
+		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+		debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : " BTFLD_FMT " != ",
+		       write_group, use_dm, *bit_chk);
+		debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0,
+			(long unsigned int)(*bit_chk != 0));
+		return *bit_chk != 0x00;
+	}
+}
+
+static inline uint32_t rw_mgr_mem_calibrate_write_test_all_ranks
+(uint32_t write_group, uint32_t use_dm, uint32_t all_correct, uint32_t *bit_chk)
+{
+	return rw_mgr_mem_calibrate_write_test(0, write_group,
+		use_dm, all_correct, bit_chk, 1);
+}
+
+/*
+ * center all windows. do per-bit-deskew to possibly increase size of
+ * certain windows.
+ */
+static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn,
+	uint32_t write_group, uint32_t test_bgn)
+{
+	uint32_t i, p, min_index;
+	int32_t d;
+	/*
+	 * Store these as signed since there are comparisons with
+	 * signed numbers.
+	 */
+	uint32_t bit_chk;
+	uint32_t sticky_bit_chk;
+	int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
+	int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
+	int32_t mid;
+	int32_t mid_min, orig_mid_min;
+	int32_t new_dqs, start_dqs, shift_dq;
+	int32_t dq_margin, dqs_margin, dm_margin;
+	uint32_t stop;
+	uint32_t temp_dq_out1_delay;
+
+	debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
+	BFM_STAGE("writes_center");
+
+	dm_margin = 0;
+
+	start_dqs = sdr_ops((u32 *)SCC_MGR_IO_OUT1_DELAY,
+			    (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2), 0, SDR_READ);
+
+	/* per-bit deskew */
+
+	/*
+	 * set the left and right edge of each bit to an illegal value
+	 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
+	 */
+	sticky_bit_chk = 0;
+	for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+		left_edge[i]  = IO_IO_OUT1_DELAY_MAX + 1;
+		right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
+	}
+
+	/* Search for the left edge of the window for each bit */
+	for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) {
+		scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, d);
+
+		sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+
+		/*
+		 * Stop searching when the read test doesn't pass AND when
+		 * we've seen a passing read on every bit.
+		 */
+		stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
+			0, PASS_ONE_BIT, &bit_chk, 0);
+		sticky_bit_chk = sticky_bit_chk | bit_chk;
+		stop = stop && (sticky_bit_chk == param->write_correct_mask);
+		debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => " BTFLD_FMT
+			" == " BTFLD_FMT " && %u [bit_chk=" BTFLD_FMT "]\n",
+			d, sticky_bit_chk, param->write_correct_mask,
+			stop, bit_chk);
+
+		if (stop == 1) {
+			break;
+		} else {
+			for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+				if (bit_chk & 1) {
+					/*
+					 * Remember a passing test as the
+					 * left_edge.
+					 */
+					left_edge[i] = d;
+				} else {
+					/*
+					 * If a left edge has not been seen
+					 * yet, then a future passing test will
+					 * mark this edge as the right edge.
+					 */
+					if (left_edge[i] ==
+						IO_IO_OUT1_DELAY_MAX + 1) {
+						right_edge[i] = -(d + 1);
+					}
+				}
+				debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d);
+				debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
+					   (int)(bit_chk & 1), i, left_edge[i]);
+				debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
+				       right_edge[i]);
+				bit_chk = bit_chk >> 1;
+			}
+		}
+	}
+
+	/* Reset DQ delay chains to 0 */
+	scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, 0);
+	sticky_bit_chk = 0;
+	for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) {
+		debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
+			   %d right_edge[%u]: %d\n", __func__, __LINE__,
+			   i, left_edge[i], i, right_edge[i]);
+
+		/*
+		 * Check for cases where we haven't found the left edge,
+		 * which makes our assignment of the the right edge invalid.
+		 * Reset it to the illegal value.
+		 */
+		if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) &&
+		    (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) {
+			right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
+			debug_cond(DLEVEL == 2, "%s:%d write_center: reset \
+				   right_edge[%u]: %d\n", __func__, __LINE__,
+				   i, right_edge[i]);
+		}
+
+		/*
+		 * Reset sticky bit (except for bits where we have
+		 * seen the left edge).
+		 */
+		sticky_bit_chk = sticky_bit_chk << 1;
+		if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1))
+			sticky_bit_chk = sticky_bit_chk | 1;
+
+		if (i == 0)
+			break;
+	}
+
+	/* Search for the right edge of the window for each bit */
+	for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) {
+		scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
+							d + start_dqs);
+
+		sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+
+		/*
+		 * Stop searching when the read test doesn't pass AND when
+		 * we've seen a passing read on every bit.
+		 */
+		stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
+			0, PASS_ONE_BIT, &bit_chk, 0);
+
+		sticky_bit_chk = sticky_bit_chk | bit_chk;
+		stop = stop && (sticky_bit_chk == param->write_correct_mask);
+
+		debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => " BTFLD_FMT " == "
+			   BTFLD_FMT " && %u\n", d, sticky_bit_chk,
+			   param->write_correct_mask, stop);
+
+		if (stop == 1) {
+			if (d == 0) {
+				for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS;
+					i++) {
+					/* d = 0 failed, but it passed when
+					testing the left edge, so it must be
+					marginal, set it to -1 */
+					if (right_edge[i] ==
+						IO_IO_OUT1_DELAY_MAX + 1 &&
+						left_edge[i] !=
+						IO_IO_OUT1_DELAY_MAX + 1) {
+						right_edge[i] = -1;
+					}
+				}
+			}
+			break;
+		} else {
+			for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+				if (bit_chk & 1) {
+					/*
+					 * Remember a passing test as
+					 * the right_edge.
+					 */
+					right_edge[i] = d;
+				} else {
+					if (d != 0) {
+						/*
+						 * If a right edge has not
+						 * been seen yet, then a future
+						 * passing test will mark this
+						 * edge as the left edge.
+						 */
+						if (right_edge[i] ==
+						    IO_IO_OUT1_DELAY_MAX + 1)
+							left_edge[i] = -(d + 1);
+					} else {
+						/*
+						 * d = 0 failed, but it passed
+						 * when testing the left edge,
+						 * so it must be marginal, set
+						 * it to -1.
+						 */
+						if (right_edge[i] ==
+						    IO_IO_OUT1_DELAY_MAX + 1 &&
+						    left_edge[i] !=
+						    IO_IO_OUT1_DELAY_MAX + 1)
+							right_edge[i] = -1;
+						/*
+						 * If a right edge has not been
+						 * seen yet, then a future
+						 * passing test will mark this
+						 * edge as the left edge.
+						 */
+						else if (right_edge[i] ==
+							IO_IO_OUT1_DELAY_MAX +
+							1)
+							left_edge[i] = -(d + 1);
+					}
+				}
+				debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d);
+				debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
+					   (int)(bit_chk & 1), i, left_edge[i]);
+				debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
+					   right_edge[i]);
+				bit_chk = bit_chk >> 1;
+			}
+		}
+	}
+
+	/* Check that all bits have a window */
+	for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+		debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
+			   %d right_edge[%u]: %d", __func__, __LINE__,
+			   i, left_edge[i], i, right_edge[i]);
+		BFM_GBL_SET(dq_write_left_edge[write_group][i], left_edge[i]);
+		BFM_GBL_SET(dq_write_right_edge[write_group][i], right_edge[i]);
+		if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) ||
+		    (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) {
+			set_failing_group_stage(test_bgn + i,
+						CAL_STAGE_WRITES,
+						CAL_SUBSTAGE_WRITES_CENTER);
+			return 0;
+		}
+	}
+
+	/* Find middle of window for each DQ bit */
+	mid_min = left_edge[0] - right_edge[0];
+	min_index = 0;
+	for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+		mid = left_edge[i] - right_edge[i];
+		if (mid < mid_min) {
+			mid_min = mid;
+			min_index = i;
+		}
+	}
+
+	/*
+	 * -mid_min/2 represents the amount that we need to move DQS.
+	 * If mid_min is odd and positive we'll need to add one to
+	 * make sure the rounding in further calculations is correct
+	 * (always bias to the right), so just add 1 for all positive values.
+	 */
+	if (mid_min > 0)
+		mid_min++;
+	mid_min = mid_min / 2;
+	debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__,
+		   __LINE__, mid_min);
+
+	/* Determine the amount we can change DQS (which is -mid_min) */
+	orig_mid_min = mid_min;
+	new_dqs = start_dqs;
+	mid_min = 0;
+	debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \
+		   mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min);
+	/* Initialize data for export structures */
+	dqs_margin = IO_IO_OUT1_DELAY_MAX + 1;
+	dq_margin  = IO_IO_OUT1_DELAY_MAX + 1;
+
+	/* add delay to bring centre of all DQ windows to the same "level" */
+	for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
+		/* Use values before divide by 2 to reduce round off error */
+		shift_dq = (left_edge[i] - right_edge[i] -
+			(left_edge[min_index] - right_edge[min_index]))/2  +
+		(orig_mid_min - mid_min);
+
+		debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \
+			   [%u]=%d\n", __func__, __LINE__, i, shift_dq);
+
+		temp_dq_out1_delay = sdr_ops((u32 *)SCC_MGR_IO_OUT1_DELAY, i << 2,
+					     0, SDR_READ);
+		if (shift_dq + (int32_t)temp_dq_out1_delay >
+			(int32_t)IO_IO_OUT1_DELAY_MAX) {
+			shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay;
+		} else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) {
+			shift_dq = -(int32_t)temp_dq_out1_delay;
+		}
+		debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n",
+			   i, shift_dq);
+		scc_mgr_set_dq_out1_delay(write_group, i, temp_dq_out1_delay +
+					  shift_dq);
+		scc_mgr_load_dq(i);
+
+		debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i,
+			   left_edge[i] - shift_dq + (-mid_min),
+			   right_edge[i] + shift_dq - (-mid_min));
+		/* To determine values for export structures */
+		if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
+			dq_margin = left_edge[i] - shift_dq + (-mid_min);
+
+		if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
+			dqs_margin = right_edge[i] + shift_dq - (-mid_min);
+	}
+
+	/* Move DQS */
+	scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
+	sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+
+	/* Centre DM */
+	BFM_STAGE("dm_center");
+	debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
+
+	/*
+	 * set the left and right edge of each bit to an illegal value,
+	 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value,
+	 */
+	left_edge[0]  = IO_IO_OUT1_DELAY_MAX + 1;
+	right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
+	int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
+	int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1;
+	int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
+	int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1;
+	int32_t win_best = 0;
+
+	/* Search for the/part of the window with DM shift */
+	for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) {
+		scc_mgr_apply_group_dm_out1_delay(write_group, d);
+		sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+
+		if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
+						    PASS_ALL_BITS, &bit_chk,
+						    0)) {
+			/* USE Set current end of the window */
+			end_curr = -d;
+			/*
+			 * If a starting edge of our window has not been seen
+			 * this is our current start of the DM window.
+			 */
+			if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
+				bgn_curr = -d;
+
+			/*
+			 * If current window is bigger than best seen.
+			 * Set best seen to be current window.
+			 */
+			if ((end_curr-bgn_curr+1) > win_best) {
+				win_best = end_curr-bgn_curr+1;
+				bgn_best = bgn_curr;
+				end_best = end_curr;
+			}
+		} else {
+			/* We just saw a failing test. Reset temp edge */
+			bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
+			end_curr = IO_IO_OUT1_DELAY_MAX + 1;
+			}
+		}
+
+
+	/* Reset DM delay chains to 0 */
+	scc_mgr_apply_group_dm_out1_delay(write_group, 0);
+
+	/*
+	 * Check to see if the current window nudges up aganist 0 delay.
+	 * If so we need to continue the search by shifting DQS otherwise DQS
+	 * search begins as a new search. */
+	if (end_curr != 0) {
+		bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
+		end_curr = IO_IO_OUT1_DELAY_MAX + 1;
+	}
+
+	/* Search for the/part of the window with DQS shifts */
+	for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) {
+		/*
+		 * Note: This only shifts DQS, so are we limiting ourselve to
+		 * width of DQ unnecessarily.
+		 */
+		scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
+							d + new_dqs);
+
+		sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+		if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
+						    PASS_ALL_BITS, &bit_chk,
+						    0)) {
+			/* USE Set current end of the window */
+			end_curr = d;
+			/*
+			 * If a beginning edge of our window has not been seen
+			 * this is our current begin of the DM window.
+			 */
+			if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
+				bgn_curr = d;
+
+			/*
+			 * If current window is bigger than best seen. Set best
+			 * seen to be current window.
+			 */
+			if ((end_curr-bgn_curr+1) > win_best) {
+				win_best = end_curr-bgn_curr+1;
+				bgn_best = bgn_curr;
+				end_best = end_curr;
+			}
+		} else {
+			/* We just saw a failing test. Reset temp edge */
+			bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
+			end_curr = IO_IO_OUT1_DELAY_MAX + 1;
+
+			/* Early exit optimization: if ther remaining delay
+			chain space is less than already seen largest window
+			we can exit */
+			if ((win_best-1) >
+				(IO_IO_OUT1_DELAY_MAX - new_dqs - d)) {
+					break;
+				}
+			}
+		}
+
+	/* assign left and right edge for cal and reporting; */
+	left_edge[0] = -1*bgn_best;
+	right_edge[0] = end_best;
+
+	debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__,
+		   __LINE__, left_edge[0], right_edge[0]);
+	BFM_GBL_SET(dm_left_edge[write_group][0], left_edge[0]);
+	BFM_GBL_SET(dm_right_edge[write_group][0], right_edge[0]);
+
+	/* Move DQS (back to orig) */
+	scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
+
+	/* Move DM */
+
+	/* Find middle of window for the DM bit */
+	mid = (left_edge[0] - right_edge[0]) / 2;
+
+	/* only move right, since we are not moving DQS/DQ */
+	if (mid < 0)
+		mid = 0;
+
+	/* dm_marign should fail if we never find a window */
+	if (win_best == 0)
+		dm_margin = -1;
+	else
+		dm_margin = left_edge[0] - mid;
+
+	scc_mgr_apply_group_dm_out1_delay(write_group, mid);
+	sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+
+	debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \
+		   dm_margin=%d\n", __func__, __LINE__, left_edge[0],
+		   right_edge[0], mid, dm_margin);
+	/* Export values */
+	gbl->fom_out += dq_margin + dqs_margin;
+
+	debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \
+		   dqs_margin=%d dm_margin=%d\n", __func__, __LINE__,
+		   dq_margin, dqs_margin, dm_margin);
+
+	/*
+	 * Do not remove this line as it makes sure all of our
+	 * decisions have been applied.
+	 */
+	sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+	return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0);
+}
+
+/* calibrate the write operations */
+static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g,
+	uint32_t test_bgn)
+{
+	/* update info for sims */
+	debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn);
+
+	reg_file_set_stage(CAL_STAGE_WRITES);
+	reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
+
+	reg_file_set_group(g);
+
+	if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) {
+		set_failing_group_stage(g, CAL_STAGE_WRITES,
+					CAL_SUBSTAGE_WRITES_CENTER);
+		return 0;
+	}
+
+	return 1;
+}
+
+/* precharge all banks and activate row 0 in bank "000..." and bank "111..." */
+static void mem_precharge_and_activate(void)
+{
+	uint32_t r;
+
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
+		if (param->skip_ranks[r]) {
+			/* request to skip the rank */
+
+			continue;
+		}
+
+		/* set rank */
+		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
+
+		/* precharge all banks ... */
+		sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+			RW_MGR_PRECHARGE_ALL, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr0, 0, 0x0F, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add0, 0,
+			RW_MGR_ACTIVATE_0_AND_1_WAIT1, SDR_WRITE);
+
+		sdr_ops(&sdr_rw_load_mgr_regs->load_cntr1, 0, 0x0F, SDR_WRITE);
+		sdr_ops(&sdr_rw_load_jump_mgr_regs->load_jump_add1, 0,
+			RW_MGR_ACTIVATE_0_AND_1_WAIT2, SDR_WRITE);
+
+		/* activate rows */
+		sdr_ops((u32 *)RW_MGR_RUN_SINGLE_GROUP, 0,
+			RW_MGR_ACTIVATE_0_AND_1, SDR_WRITE);
+	}
+}
+
+/* Configure various memory related parameters. */
+static void mem_config(void)
+{
+	uint32_t rlat, wlat;
+	uint32_t rw_wl_nop_cycles;
+	uint32_t max_latency;
+
+	debug("%s:%d\n", __func__, __LINE__);
+	/* read in write and read latency */
+	wlat = sdr_ops(&data_mgr->t_wl_add, 0, 0, SDR_READ);
+	wlat += sdr_ops(&data_mgr->mem_t_add, 0, 0, SDR_READ);
+	/* WL for hard phy does not include additive latency */
+
+	/*
+	 * add addtional write latency to offset the address/command extra
+	 * clock cycle. We change the AC mux setting causing AC to be delayed
+	 * by one mem clock cycle. Only do this for DDR3
+	 */
+	wlat = wlat + 1;
+
+	rlat = sdr_ops(&data_mgr->t_rl_add, 0, 0, SDR_READ);
+
+	rw_wl_nop_cycles = wlat - 2;
+	gbl->rw_wl_nop_cycles = rw_wl_nop_cycles;
+
+	/*
+	 * For AV/CV, lfifo is hardened and always runs at full rate so
+	 * max latency in AFI clocks, used here, is correspondingly smaller.
+	 */
+	max_latency = (1<<MAX_LATENCY_COUNT_WIDTH)/1 - 1;
+	/* configure for a burst length of 8 */
+
+	/* write latency */
+	/* Adjust Write Latency for Hard PHY */
+	wlat = wlat + 1;
+	/* set a pretty high read latency initially */
+	gbl->curr_read_lat = rlat + 16;
+
+	if (gbl->curr_read_lat > max_latency)
+		gbl->curr_read_lat = max_latency;
+
+	sdr_ops(&phy_mgr_cfg->phy_rlat, 0, gbl->curr_read_lat, SDR_WRITE);
+
+	/* advertise write latency */
+	gbl->curr_write_lat = wlat;
+	sdr_ops(&phy_mgr_cfg->afi_wlat, 0, wlat - 2, SDR_WRITE);
+	/* initialize bit slips */
+	mem_precharge_and_activate();
+}
+
+/* Set VFIFO and LFIFO to instant-on settings in skip calibration mode */
+static void mem_skip_calibrate(void)
+{
+	uint32_t vfifo_offset;
+	uint32_t i, j, r;
+
+	debug("%s:%d\n", __func__, __LINE__);
+	/* Need to update every shadow register set used by the interface */
+	for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+		r += NUM_RANKS_PER_SHADOW_REG) {
+		/*
+		 * Set output phase alignment settings appropriate for
+		 * skip calibration.
+		 */
+		for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
+			scc_mgr_set_dqs_en_phase(i, 0);
+#if IO_DLL_CHAIN_LENGTH == 6
+			scc_mgr_set_dqdqs_output_phase(i, 6);
+#else
+			scc_mgr_set_dqdqs_output_phase(i, 7);
+#endif
+			/*
+			 * Case:33398
+			 *
+			 * Write data arrives to the I/O two cycles before write
+			 * latency is reached (720 deg).
+			 *   -> due to bit-slip in a/c bus
+			 *   -> to allow board skew where dqs is longer than ck
+			 *      -> how often can this happen!?
+			 *      -> can claim back some ptaps for high freq
+			 *       support if we can relax this, but i digress...
+			 *
+			 * The write_clk leads mem_ck by 90 deg
+			 * The minimum ptap of the OPA is 180 deg
+			 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
+			 * The write_clk is always delayed by 2 ptaps
+			 *
+			 * Hence, to make DQS aligned to CK, we need to delay
+			 * DQS by:
+			 *    (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
+			 *
+			 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
+			 * gives us the number of ptaps, which simplies to:
+			 *
+			 *    (1.25 * IO_DLL_CHAIN_LENGTH - 2)
+			 */
+			scc_mgr_set_dqdqs_output_phase(i, (1.25 *
+				IO_DLL_CHAIN_LENGTH - 2));
+		}
+		sdr_ops(&sdr_scc_mgr->dqs_ena, 0, 0xff, SDR_WRITE);
+		sdr_ops(&sdr_scc_mgr->dqs_io_ena, 0, 0xff, SDR_WRITE);
+
+		for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
+			sdr_ops((u32 *)SCC_MGR_GROUP_COUNTER, 0, i, SDR_WRITE);
+			sdr_ops(&sdr_scc_mgr->dq_ena, 0, 0xff, SDR_WRITE);
+			sdr_ops(&sdr_scc_mgr->dm_ena, 0, 0xff, SDR_WRITE);
+		}
+		sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+	}
+
+	/* Compensate for simulation model behaviour */
+	for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
+		scc_mgr_set_dqs_bus_in_delay(i, 10);
+		scc_mgr_load_dqs(i);
+	}
+	sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+
+	/*
+	 * ArriaV has hard FIFOs that can only be initialized by incrementing
+	 * in sequencer.
+	 */
+	vfifo_offset = CALIB_VFIFO_OFFSET;
+	for (j = 0; j < vfifo_offset; j++) {
+		sdr_ops(&phy_mgr_cmd->inc_vfifo_hard_phy, 0, 0xff, SDR_WRITE);
+	}
+	sdr_ops(&phy_mgr_cmd->fifo_reset, 0, 0, SDR_WRITE);
+
+	/*
+	 * For ACV with hard lfifo, we get the skip-cal setting from
+	 * generation-time constant.
+	 */
+	gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
+	sdr_ops(&phy_mgr_cfg->phy_rlat, 0, gbl->curr_read_lat, SDR_WRITE);
+}
+
+/* Memory calibration entry point */
+static uint32_t mem_calibrate(void)
+{
+	uint32_t i;
+	uint32_t rank_bgn, sr;
+	uint32_t write_group, write_test_bgn;
+	uint32_t read_group, read_test_bgn;
+	uint32_t run_groups, current_run;
+	uint32_t failing_groups = 0;
+	uint32_t group_failed = 0;
+	uint32_t sr_failed = 0;
+
+	debug("%s:%d\n", __func__, __LINE__);
+	/* Initialize the data settings */
+
+	gbl->error_substage = CAL_SUBSTAGE_NIL;
+	gbl->error_stage = CAL_STAGE_NIL;
+	gbl->error_group = 0xff;
+	gbl->fom_in = 0;
+	gbl->fom_out = 0;
+
+	mem_config();
+
+	uint32_t bypass_mode = 0x1;
+	for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
+		sdr_ops((u32 *)SCC_MGR_GROUP_COUNTER, 0, i, SDR_WRITE);
+		scc_set_bypass_mode(i, bypass_mode);
+	}
+
+	if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
+		/*
+		 * Set VFIFO and LFIFO to instant-on settings in skip
+		 * calibration mode.
+		 */
+		mem_skip_calibrate();
+	} else {
+		for (i = 0; i < NUM_CALIB_REPEAT; i++) {
+			/*
+			 * Zero all delay chain/phase settings for all
+			 * groups and all shadow register sets.
+			 */
+			scc_mgr_zero_all();
+
+			run_groups = ~param->skip_groups;
+
+			for (write_group = 0, write_test_bgn = 0; write_group
+				< RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
+				write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
+				/* Initialized the group failure */
+				group_failed = 0;
+
+				BFM_GBL_SET(vfifo_idx, 0);
+				current_run = run_groups & ((1 <<
+					RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
+				run_groups = run_groups >>
+					RW_MGR_NUM_DQS_PER_WRITE_GROUP;
+
+				if (current_run == 0)
+					continue;
+
+				sdr_ops((u32 *)SCC_MGR_GROUP_COUNTER, 0,
+					write_group, SDR_WRITE);
+				scc_mgr_zero_group(write_group, write_test_bgn,
+						   0);
+
+				for (read_group = write_group *
+					RW_MGR_MEM_IF_READ_DQS_WIDTH /
+					RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
+					read_test_bgn = 0;
+					read_group < (write_group + 1) *
+					RW_MGR_MEM_IF_READ_DQS_WIDTH /
+					RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
+					group_failed == 0;
+					read_group++, read_test_bgn +=
+					RW_MGR_MEM_DQ_PER_READ_DQS) {
+					/* Calibrate the VFIFO */
+					if (!((STATIC_CALIB_STEPS) &
+						CALIB_SKIP_VFIFO)) {
+						if (!rw_mgr_mem_calibrate_vfifo
+							(read_group,
+							read_test_bgn)) {
+							group_failed = 1;
+
+							if (!(gbl->
+							phy_debug_mode_flags &
+						PHY_DEBUG_SWEEP_ALL_GROUPS)) {
+								return 0;
+							}
+						}
+					}
+				}
+
+				/* Calibrate the output side */
+				if (group_failed == 0)	{
+					for (rank_bgn = 0, sr = 0; rank_bgn
+						< RW_MGR_MEM_NUMBER_OF_RANKS;
+						rank_bgn +=
+						NUM_RANKS_PER_SHADOW_REG,
+						++sr) {
+						sr_failed = 0;
+						if (!((STATIC_CALIB_STEPS) &
+						CALIB_SKIP_WRITES)) {
+							if ((STATIC_CALIB_STEPS)
+						& CALIB_SKIP_DELAY_SWEEPS) {
+						/* not needed in quick mode! */
+							} else {
+						/*
+						 * Determine if this set of
+						 * ranks should be skipped
+						 * entirely.
+						 */
+					if (!param->skip_shadow_regs[sr]) {
+						if (!rw_mgr_mem_calibrate_writes
+						(rank_bgn, write_group,
+						write_test_bgn)) {
+							sr_failed = 1;
+							if (!(gbl->
+							phy_debug_mode_flags &
+						PHY_DEBUG_SWEEP_ALL_GROUPS)) {
+								return 0;
+									}
+									}
+								}
+							}
+						}
+						if (sr_failed != 0)
+							group_failed = 1;
+					}
+				}
+
+				if (group_failed == 0) {
+					for (read_group = write_group *
+					RW_MGR_MEM_IF_READ_DQS_WIDTH /
+					RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
+					read_test_bgn = 0;
+						read_group < (write_group + 1)
+						* RW_MGR_MEM_IF_READ_DQS_WIDTH
+						/ RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
+						group_failed == 0;
+						read_group++, read_test_bgn +=
+						RW_MGR_MEM_DQ_PER_READ_DQS) {
+						if (!((STATIC_CALIB_STEPS) &
+							CALIB_SKIP_WRITES)) {
+					if (!rw_mgr_mem_calibrate_vfifo_end
+						(read_group, read_test_bgn)) {
+							group_failed = 1;
+
+						if (!(gbl->phy_debug_mode_flags
+						& PHY_DEBUG_SWEEP_ALL_GROUPS)) {
+								return 0;
+								}
+							}
+						}
+					}
+				}
+
+				if (group_failed != 0)
+					failing_groups++;
+			}
+
+			/*
+			 * USER If there are any failing groups then report
+			 * the failure.
+			 */
+			if (failing_groups != 0)
+				return 0;
+
+			/* Calibrate the LFIFO */
+			if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) {
+				/*
+				 * If we're skipping groups as part of debug,
+				 * don't calibrate LFIFO.
+				 */
+				if (param->skip_groups == 0) {
+					if (!rw_mgr_mem_calibrate_lfifo())
+						return 0;
+				}
+			}
+		}
+	}
+
+	/*
+	 * Do not remove this line as it makes sure all of our decisions
+	 * have been applied.
+	 */
+	sdr_ops(&sdr_scc_mgr->update, 0, 0, SDR_WRITE);
+	return 1;
+}
+
+static uint32_t run_mem_calibrate(void)
+{
+	uint32_t pass;
+	uint32_t debug_info;
+
+	/* Reset pass/fail status shown on afi_cal_success/fail */
+	sdr_ops(&phy_mgr_cfg->cal_status, 0, PHY_MGR_CAL_RESET, SDR_WRITE);
+
+	debug("%s:%d\n", __func__, __LINE__);
+	BFM_STAGE("calibrate");
+
+	/* stop tracking manger */
+	uint32_t ctrlcfg = sdr_ops((u32 *)BASE_MMR, 0, 0, SDR_READ);
+
+	sdr_ops((u32 *)BASE_MMR, 0, ctrlcfg & 0xFFBFFFFF, SDR_WRITE);
+
+	initialize();
+	rw_mgr_mem_initialize();
+
+	pass = mem_calibrate();
+
+	mem_precharge_and_activate();
+	sdr_ops(&phy_mgr_cmd->fifo_reset, 0, 0, SDR_WRITE);
+
+	if (pass) {
+		BFM_STAGE("handoff");
+	}
+
+	/*
+	 * Handoff:
+	 * Don't return control of the PHY back to AFI when in debug mode.
+	 */
+	if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) {
+		rw_mgr_mem_handoff();
+		/*
+		 * In Hard PHY this is a 2-bit control:
+		 * 0: AFI Mux Select
+		 * 1: DDIO Mux Select
+		 */
+		sdr_ops(&phy_mgr_cfg->mux_sel, 0, 0x2, SDR_WRITE);
+	}
+
+	sdr_ops((u32 *)BASE_MMR, 0, ctrlcfg, SDR_WRITE);
+
+	if (pass) {
+		printf("%s: CALIBRATION PASSED\n", __FILE__);
+
+		gbl->fom_in /= 2;
+		gbl->fom_out /= 2;
+
+		if (gbl->fom_in > 0xff)
+			gbl->fom_in = 0xff;
+
+		if (gbl->fom_out > 0xff)
+			gbl->fom_out = 0xff;
+
+		/* Update the FOM in the register file */
+		debug_info = gbl->fom_in;
+		debug_info |= gbl->fom_out << 8;
+		sdr_ops(&sdr_reg_file->fom, 0, debug_info, SDR_WRITE);
+
+		sdr_ops(&phy_mgr_cfg->cal_debug_info, 0, debug_info, SDR_WRITE);
+		sdr_ops(&phy_mgr_cfg->cal_status, 0, PHY_MGR_CAL_SUCCESS,
+			SDR_WRITE);
+	} else {
+		printf("%s: CALIBRATION FAILED\n", __FILE__);
+
+		debug_info = gbl->error_stage;
+		debug_info |= gbl->error_substage << 8;
+		debug_info |= gbl->error_group << 16;
+
+		sdr_ops(&sdr_reg_file->failing_stage, 0, debug_info, SDR_WRITE);
+		sdr_ops(&phy_mgr_cfg->cal_debug_info, 0, debug_info, SDR_WRITE);
+		sdr_ops(&phy_mgr_cfg->cal_status, 0, PHY_MGR_CAL_FAIL, SDR_WRITE);
+
+		/* Update the failing group/stage in the register file */
+		debug_info = gbl->error_stage;
+		debug_info |= gbl->error_substage << 8;
+		debug_info |= gbl->error_group << 16;
+		sdr_ops(&sdr_reg_file->failing_stage, 0, debug_info, SDR_WRITE);
+	}
+
+	return pass;
+}
+
+static void hc_initialize_rom_data(void)
+{
+	uint32_t i;
+
+	for (i = 0; i < inst_rom_init_size; i++) {
+		uint32_t data = inst_rom_init[i];
+		sdr_ops((u32 *)(RW_MGR_INST_ROM_WRITE), (i << 2), data, SDR_WRITE);
+	}
+
+	for (i = 0; i < ac_rom_init_size; i++) {
+		uint32_t data = ac_rom_init[i];
+		sdr_ops((u32 *)(RW_MGR_AC_ROM_WRITE), (i << 2), data, SDR_WRITE);
+	}
+}
+
+static void initialize_reg_file(void)
+{
+	/* Initialize the register file with the correct data */
+	sdr_ops(&sdr_reg_file->signature, 0, REG_FILE_INIT_SEQ_SIGNATURE, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->debug_data_addr, 0, 0, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->cur_stage, 0, 0, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->fom, 0, 0, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->failing_stage, 0, 0, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->debug1, 0, 0, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->debug2, 0, 0, SDR_WRITE);
+}
+
+static void initialize_hps_phy(void)
+{
+	/* These may need to be included also: */
+	/* wrap_back_en (false) */
+	/* atpg_en (false) */
+	/* pipelineglobalenable (true) */
+
+	uint32_t reg;
+	/*
+	 * Tracking also gets configured here because it's in the
+	 * same register.
+	 */
+	uint32_t trk_sample_count = 7500;
+	uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
+	/*
+	 * Format is number of outer loops in the 16 MSB, sample
+	 * count in 16 LSB.
+	*/
+
+	reg = 0;
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
+	/*
+	 * This field selects the intrinsic latency to RDATA_EN/FULL path.
+	 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
+	 */
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
+		trk_sample_count);
+	sdr_ops((u32 *)BASE_MMR, SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_OFFSET, reg,
+		SDR_WRITE);
+
+	reg = 0;
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
+		trk_sample_count >>
+		SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
+		trk_long_idle_sample_count);
+	sdr_ops((u32 *)BASE_MMR, SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_OFFSET, reg,
+		SDR_WRITE);
+
+	reg = 0;
+	reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
+		trk_long_idle_sample_count >>
+		SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
+	sdr_ops((u32 *)BASE_MMR, SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_OFFSET, reg,
+		SDR_WRITE);
+}
+
+static void initialize_tracking(void)
+{
+	uint32_t concatenated_longidle = 0x0;
+	uint32_t concatenated_delays = 0x0;
+	uint32_t concatenated_rw_addr = 0x0;
+	uint32_t concatenated_refresh = 0x0;
+	uint32_t dtaps_per_ptap;
+	uint32_t tmp_delay;
+
+	/*
+	 * compute usable version of value in case we skip full
+	 * computation later
+	 */
+	dtaps_per_ptap = 0;
+	tmp_delay = 0;
+	while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
+		dtaps_per_ptap++;
+		tmp_delay += IO_DELAY_PER_DCHAIN_TAP;
+	}
+	dtaps_per_ptap--;
+
+	concatenated_longidle = concatenated_longidle ^ 10;
+		/*longidle outer loop */
+	concatenated_longidle = concatenated_longidle << 16;
+	concatenated_longidle = concatenated_longidle ^ 100;
+		/*longidle sample count */
+	concatenated_delays = concatenated_delays ^ 243;
+		/* trfc, worst case of 933Mhz 4Gb */
+	concatenated_delays = concatenated_delays << 8;
+	concatenated_delays = concatenated_delays ^ 14;
+		/* trcd, worst case */
+	concatenated_delays = concatenated_delays << 8;
+	concatenated_delays = concatenated_delays ^ 10;
+		/* vfifo wait */
+	concatenated_delays = concatenated_delays << 8;
+	concatenated_delays = concatenated_delays ^ 4;
+		/* mux delay */
+
+	concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_IDLE;
+	concatenated_rw_addr = concatenated_rw_addr << 8;
+	concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_ACTIVATE_1;
+	concatenated_rw_addr = concatenated_rw_addr << 8;
+	concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_SGLE_READ;
+	concatenated_rw_addr = concatenated_rw_addr << 8;
+	concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_PRECHARGE_ALL;
+
+	concatenated_refresh = concatenated_refresh ^ RW_MGR_REFRESH_ALL;
+	concatenated_refresh = concatenated_refresh << 24;
+	concatenated_refresh = concatenated_refresh ^ 1000; /* trefi */
+
+	/* Initialize the register file with the correct data */
+	sdr_ops(&sdr_reg_file->dtaps_per_ptap, 0, dtaps_per_ptap, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->trk_sample_count, 0, 7500, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->trk_longidle, 0, concatenated_longidle, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->delays, 0, concatenated_delays, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->trk_rw_mgr_addr, 0, concatenated_rw_addr, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->trk_read_dqs_width, 0,
+		RW_MGR_MEM_IF_READ_DQS_WIDTH, SDR_WRITE);
+	sdr_ops(&sdr_reg_file->trk_rfsh, 0, concatenated_refresh, SDR_WRITE);
+}
+
+int sdram_calibration_full(void)
+{
+	struct param_type my_param;
+	struct gbl_type my_gbl;
+	uint32_t pass;
+	uint32_t i;
+
+	param = &my_param;
+	gbl = &my_gbl;
+
+	/* Initialize the debug mode flags */
+	gbl->phy_debug_mode_flags = 0;
+	/* Set the calibration enabled by default */
+	gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
+	/*
+	 * Only sweep all groups (regardless of fail state) by default
+	 * Set enabled read test by default.
+	 */
+#if DISABLE_GUARANTEED_READ
+	gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
+#endif
+	/* Initialize the register file */
+	initialize_reg_file();
+
+	/* Initialize any PHY CSR */
+	initialize_hps_phy();
+
+	scc_mgr_initialize();
+
+	initialize_tracking();
+
+	/* USER Enable all ranks, groups */
+	for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++)
+		param->skip_ranks[i] = 0;
+	for (i = 0; i < NUM_SHADOW_REGS; ++i)
+		param->skip_shadow_regs[i] = 0;
+	param->skip_groups = 0;
+
+	printf("%s: Preparing to start memory calibration\n", __FILE__);
+
+	debug("%s:%d\n", __func__, __LINE__);
+	debug_cond(DLEVEL == 1, "DDR3 FULL_RATE ranks=%lu cs/dimm=%lu dq/dqs=%lu,%lu vg/dqs=%lu,%lu",
+	       (long unsigned int)RW_MGR_MEM_NUMBER_OF_RANKS,
+	       (long unsigned int)RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
+	       (long unsigned int)RW_MGR_MEM_DQ_PER_READ_DQS,
+	       (long unsigned int)RW_MGR_MEM_DQ_PER_WRITE_DQS,
+	       (long unsigned int)RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
+	       (long unsigned int)RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
+	debug_cond(DLEVEL == 1, "dqs=%lu,%lu dq=%lu dm=%lu ptap_delay=%lu dtap_delay=%lu",
+	       (long unsigned int)RW_MGR_MEM_IF_READ_DQS_WIDTH,
+	       (long unsigned int)RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
+	       (long unsigned int)RW_MGR_MEM_DATA_WIDTH,
+	       (long unsigned int)RW_MGR_MEM_DATA_MASK_WIDTH,
+	       (long unsigned int)IO_DELAY_PER_OPA_TAP,
+	       (long unsigned int)IO_DELAY_PER_DCHAIN_TAP);
+	debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%lu, dll=%lu",
+	       (long unsigned int)IO_DELAY_PER_DQS_EN_DCHAIN_TAP,
+	       (long unsigned int)IO_DLL_CHAIN_LENGTH);
+	debug_cond(DLEVEL == 1, "max values: en_p=%lu dqdqs_p=%lu en_d=%lu dqs_in_d=%lu",
+	       (long unsigned int)IO_DQS_EN_PHASE_MAX,
+	       (long unsigned int)IO_DQDQS_OUT_PHASE_MAX,
+	       (long unsigned int)IO_DQS_EN_DELAY_MAX,
+	       (long unsigned int)IO_DQS_IN_DELAY_MAX);
+	debug_cond(DLEVEL == 1, "io_in_d=%lu io_out1_d=%lu io_out2_d=%lu",
+	       (long unsigned int)IO_IO_IN_DELAY_MAX,
+	       (long unsigned int)IO_IO_OUT1_DELAY_MAX,
+	       (long unsigned int)IO_IO_OUT2_DELAY_MAX);
+	debug_cond(DLEVEL == 1, "dqs_in_reserve=%lu dqs_out_reserve=%lu",
+	       (long unsigned int)IO_DQS_IN_RESERVE,
+	       (long unsigned int)IO_DQS_OUT_RESERVE);
+
+	hc_initialize_rom_data();
+
+	/* update info for sims */
+	reg_file_set_stage(CAL_STAGE_NIL);
+	reg_file_set_group(0);
+
+	/*
+	 * Load global needed for those actions that require
+	 * some dynamic calibration support.
+	 */
+	dyn_calib_steps = STATIC_CALIB_STEPS;
+	/*
+	 * Load global to allow dynamic selection of delay loop settings
+	 * based on calibration mode.
+	 */
+	if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
+		skip_delay_mask = 0xff;
+	else
+		skip_delay_mask = 0x0;
+
+	pass = run_mem_calibrate();
+
+	printf("%s: Calibration complete\n", __FILE__);
+	return pass;
+}
diff --git a/drivers/ddr/altera/sequencer.h b/drivers/ddr/altera/sequencer.h
new file mode 100644
index 0000000..29f61a8
--- /dev/null
+++ b/drivers/ddr/altera/sequencer.h
@@ -0,0 +1,328 @@
+/*
+ * Copyright Altera Corporation (C) 2012-2015
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ */
+
+#ifndef _SEQUENCER_H_
+#define _SEQUENCER_H_
+
+#define RW_MGR_NUM_DM_PER_WRITE_GROUP (RW_MGR_MEM_DATA_MASK_WIDTH \
+	/ RW_MGR_MEM_IF_WRITE_DQS_WIDTH)
+#define RW_MGR_NUM_TRUE_DM_PER_WRITE_GROUP (RW_MGR_TRUE_MEM_DATA_MASK_WIDTH \
+	/ RW_MGR_MEM_IF_WRITE_DQS_WIDTH)
+
+#define RW_MGR_NUM_DQS_PER_WRITE_GROUP (RW_MGR_MEM_IF_READ_DQS_WIDTH \
+	/ RW_MGR_MEM_IF_WRITE_DQS_WIDTH)
+#define NUM_RANKS_PER_SHADOW_REG (RW_MGR_MEM_NUMBER_OF_RANKS / NUM_SHADOW_REGS)
+
+#define RW_MGR_RUN_SINGLE_GROUP	(BASE_RW_MGR)
+#define RW_MGR_RUN_ALL_GROUPS	(BASE_RW_MGR + 0x0400)
+
+#define RW_MGR_DI_BASE		(BASE_RW_MGR + 0x0020)
+
+#define RW_MGR_MEM_NUMBER_OF_RANKS	1
+#define NUM_SHADOW_REGS			1
+
+#define RW_MGR_RESET_READ_DATAPATH	(BASE_RW_MGR + 0x1000)
+#define RW_MGR_SET_CS_AND_ODT_MASK	(BASE_RW_MGR + 0x1400)
+
+#define RW_MGR_RANK_NONE		0xFF
+#define RW_MGR_RANK_ALL			0x00
+
+#define RW_MGR_ODT_MODE_OFF		0
+#define RW_MGR_ODT_MODE_READ_WRITE	1
+
+#define NUM_CALIB_REPEAT		1
+
+#define NUM_READ_TESTS			7
+#define NUM_READ_PB_TESTS		7
+#define NUM_WRITE_TESTS			15
+#define NUM_WRITE_PB_TESTS		31
+
+#define PASS_ALL_BITS			1
+#define PASS_ONE_BIT			0
+
+/* calibration stages */
+#define CAL_STAGE_NIL			0
+#define CAL_STAGE_VFIFO			1
+#define CAL_STAGE_WLEVEL		2
+#define CAL_STAGE_LFIFO			3
+#define CAL_STAGE_WRITES		4
+#define CAL_STAGE_FULLTEST		5
+#define CAL_STAGE_REFRESH		6
+#define CAL_STAGE_CAL_SKIPPED		7
+#define CAL_STAGE_CAL_ABORTED		8
+#define CAL_STAGE_VFIFO_AFTER_WRITES	9
+
+/* calibration substages */
+#define CAL_SUBSTAGE_NIL		0
+#define CAL_SUBSTAGE_GUARANTEED_READ	1
+#define CAL_SUBSTAGE_DQS_EN_PHASE	2
+#define CAL_SUBSTAGE_VFIFO_CENTER	3
+#define CAL_SUBSTAGE_WORKING_DELAY	1
+#define CAL_SUBSTAGE_LAST_WORKING_DELAY	2
+#define CAL_SUBSTAGE_WLEVEL_COPY	3
+#define CAL_SUBSTAGE_WRITES_CENTER	1
+#define CAL_SUBSTAGE_READ_LATENCY	1
+#define CAL_SUBSTAGE_REFRESH		1
+
+#define MAX_RANKS			(RW_MGR_MEM_NUMBER_OF_RANKS)
+#define MAX_DQS				(RW_MGR_MEM_IF_WRITE_DQS_WIDTH > \
+					RW_MGR_MEM_IF_READ_DQS_WIDTH ? \
+					RW_MGR_MEM_IF_WRITE_DQS_WIDTH : \
+					RW_MGR_MEM_IF_READ_DQS_WIDTH)
+#define MAX_DQ				(RW_MGR_MEM_DATA_WIDTH)
+#define MAX_DM				(RW_MGR_MEM_DATA_MASK_WIDTH)
+
+/* length of VFIFO, from SW_MACROS */
+#define VFIFO_SIZE			(READ_VALID_FIFO_SIZE)
+
+/* MarkW: how should these base addresses be done for A-V? */
+#define BASE_PTR_MGR			(0x00040000)
+#define BASE_SCC_MGR			(0x00058000)
+#define BASE_REG_FILE			(0x00070000)
+#define BASE_TIMER			(0x00078000)
+#define BASE_PHY_MGR			(0x00088000)
+#define BASE_RW_MGR			(0x00090000)
+#define BASE_DATA_MGR			(0x00098000)
+#define BASE_MMR                        (0x000C0000)
+#define BASE_TRK_MGR			(0x000D0000)
+
+#define SCC_MGR_GROUP_COUNTER			(BASE_SCC_MGR + 0x0000)
+#define SCC_MGR_DQS_IN_DELAY			(BASE_SCC_MGR + 0x0100)
+#define SCC_MGR_DQS_EN_PHASE			(BASE_SCC_MGR + 0x0200)
+#define SCC_MGR_DQS_EN_DELAY			(BASE_SCC_MGR + 0x0300)
+#define SCC_MGR_DQDQS_OUT_PHASE			(BASE_SCC_MGR + 0x0400)
+#define SCC_MGR_OCT_OUT1_DELAY			(BASE_SCC_MGR + 0x0500)
+#define SCC_MGR_IO_OUT1_DELAY			(BASE_SCC_MGR + 0x0700)
+#define SCC_MGR_IO_IN_DELAY			(BASE_SCC_MGR + 0x0900)
+
+/* HHP-HPS-specific versions of some commands */
+#define SCC_MGR_DQS_EN_DELAY_GATE		(BASE_SCC_MGR + 0x0600)
+#define SCC_MGR_IO_OE_DELAY			(BASE_SCC_MGR + 0x0800)
+#define SCC_MGR_HHP_GLOBALS			(BASE_SCC_MGR + 0x0A00)
+#define SCC_MGR_HHP_RFILE			(BASE_SCC_MGR + 0x0B00)
+#define SCC_MGR_AFI_CAL_INIT			(BASE_SCC_MGR + 0x0D00)
+
+#define SDR_PHYGRP_SCCGRP_ADDRESS		0x0
+#define SDR_PHYGRP_PHYMGRGRP_ADDRESS		0x1000
+#define SDR_PHYGRP_RWMGRGRP_ADDRESS		0x2000
+#define SDR_PHYGRP_DATAMGRGRP_ADDRESS		0x4000
+#define SDR_PHYGRP_REGFILEGRP_ADDRESS		0x4800
+
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_OFFSET 0x150
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_OFFSET 0x154
+#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_OFFSET 0x158
+
+#define PHY_MGR_CAL_RESET		(0)
+#define PHY_MGR_CAL_SUCCESS		(1)
+#define PHY_MGR_CAL_FAIL		(2)
+
+#define CALIB_SKIP_DELAY_LOOPS		(1 << 0)
+#define CALIB_SKIP_ALL_BITS_CHK		(1 << 1)
+#define CALIB_SKIP_DELAY_SWEEPS		(1 << 2)
+#define CALIB_SKIP_VFIFO		(1 << 3)
+#define CALIB_SKIP_LFIFO		(1 << 4)
+#define CALIB_SKIP_WLEVEL		(1 << 5)
+#define CALIB_SKIP_WRITES		(1 << 6)
+#define CALIB_SKIP_FULL_TEST		(1 << 7)
+#define CALIB_SKIP_ALL			(CALIB_SKIP_VFIFO | \
+				CALIB_SKIP_LFIFO | CALIB_SKIP_WLEVEL | \
+				CALIB_SKIP_WRITES | CALIB_SKIP_FULL_TEST)
+#define CALIB_IN_RTL_SIM			(1 << 8)
+
+/* Scan chain manager command addresses */
+#define READ_SCC_OCT_OUT2_DELAY			0
+#define READ_SCC_DQ_OUT2_DELAY			0
+#define READ_SCC_DQS_IO_OUT2_DELAY		0
+#define READ_SCC_DM_IO_OUT2_DELAY		0
+
+/* HHP-HPS-specific values */
+#define SCC_MGR_HHP_EXTRAS_OFFSET			0
+#define SCC_MGR_HHP_DQSE_MAP_OFFSET			1
+
+/* PHY Debug mode flag constants */
+#define PHY_DEBUG_IN_DEBUG_MODE 0x00000001
+#define PHY_DEBUG_ENABLE_CAL_RPT 0x00000002
+#define PHY_DEBUG_ENABLE_MARGIN_RPT 0x00000004
+#define PHY_DEBUG_SWEEP_ALL_GROUPS 0x00000008
+#define PHY_DEBUG_DISABLE_GUARANTEED_READ 0x00000010
+#define PHY_DEBUG_ENABLE_NON_DESTRUCTIVE_CALIBRATION 0x00000020
+
+/* Init and Reset delay constants - Only use if defined by sequencer_defines.h,
+ * otherwise, revert to defaults
+ * Default for Tinit = (0+1) * ((202+1) * (2 * 131 + 1) + 1) = 53532 =
+ * 200.75us @ 266MHz
+ */
+#ifdef TINIT_CNTR0_VAL
+#define SEQ_TINIT_CNTR0_VAL TINIT_CNTR0_VAL
+#else
+#define SEQ_TINIT_CNTR0_VAL 0
+#endif
+
+#ifdef TINIT_CNTR1_VAL
+#define SEQ_TINIT_CNTR1_VAL TINIT_CNTR1_VAL
+#else
+#define SEQ_TINIT_CNTR1_VAL 202
+#endif
+
+#ifdef TINIT_CNTR2_VAL
+#define SEQ_TINIT_CNTR2_VAL TINIT_CNTR2_VAL
+#else
+#define SEQ_TINIT_CNTR2_VAL 131
+#endif
+
+
+/* Default for Treset = (2+1) * ((252+1) * (2 * 131 + 1) + 1) = 133563 =
+ * 500.86us @ 266MHz
+ */
+#ifdef TRESET_CNTR0_VAL
+#define SEQ_TRESET_CNTR0_VAL TRESET_CNTR0_VAL
+#else
+#define SEQ_TRESET_CNTR0_VAL 2
+#endif
+
+#ifdef TRESET_CNTR1_VAL
+#define SEQ_TRESET_CNTR1_VAL TRESET_CNTR1_VAL
+#else
+#define SEQ_TRESET_CNTR1_VAL 252
+#endif
+
+#ifdef TRESET_CNTR2_VAL
+#define SEQ_TRESET_CNTR2_VAL TRESET_CNTR2_VAL
+#else
+#define SEQ_TRESET_CNTR2_VAL 131
+#endif
+
+#define RW_MGR_INST_ROM_WRITE BASE_RW_MGR + 0x1800
+#define RW_MGR_AC_ROM_WRITE BASE_RW_MGR + 0x1C00
+
+extern int32_t inst_rom_init_size;
+extern uint32_t inst_rom_init[];
+extern uint32_t ac_rom_init_size;
+extern uint32_t ac_rom_init[];
+
+struct socfpga_sdr_rw_load_manager {
+	u32	load_cntr0;
+	u32	load_cntr1;
+	u32	load_cntr2;
+	u32	load_cntr3;
+};
+
+struct socfpga_sdr_rw_load_jump_manager {
+	u32	load_jump_add0;
+	u32	load_jump_add1;
+	u32	load_jump_add2;
+	u32	load_jump_add3;
+};
+
+struct socfpga_sdr_reg_file {
+	u32 signature;
+	u32 debug_data_addr;
+	u32 cur_stage;
+	u32 fom;
+	u32 failing_stage;
+	u32 debug1;
+	u32 debug2;
+	u32 dtaps_per_ptap;
+	u32 trk_sample_count;
+	u32 trk_longidle;
+	u32 delays;
+	u32 trk_rw_mgr_addr;
+	u32 trk_read_dqs_width;
+	u32 trk_rfsh;
+};
+
+/* parameter variable holder */
+struct param_type {
+	uint32_t dm_correct_mask;
+	uint32_t read_correct_mask;
+	uint32_t read_correct_mask_vg;
+	uint32_t write_correct_mask;
+	uint32_t write_correct_mask_vg;
+
+	/* set a particular entry to 1 if we need to skip a particular rank */
+
+	uint32_t skip_ranks[MAX_RANKS];
+
+	/* set a particular entry to 1 if we need to skip a particular group */
+
+	uint32_t skip_groups;
+
+	/* set a particular entry to 1 if the shadow register
+	(which represents a set of ranks) needs to be skipped */
+
+	uint32_t skip_shadow_regs[NUM_SHADOW_REGS];
+
+};
+
+
+/* global variable holder */
+struct gbl_type {
+	uint32_t phy_debug_mode_flags;
+
+	/* current read latency */
+
+	uint32_t curr_read_lat;
+
+	/* current write latency */
+
+	uint32_t curr_write_lat;
+
+	/* error code */
+
+	uint32_t error_substage;
+	uint32_t error_stage;
+	uint32_t error_group;
+
+	/* figure-of-merit in, figure-of-merit out */
+
+	uint32_t fom_in;
+	uint32_t fom_out;
+
+	/*USER Number of RW Mgr NOP cycles between
+	write command and write data */
+	uint32_t rw_wl_nop_cycles;
+};
+
+struct socfpga_sdr_scc_mgr {
+	u32	dqs_ena;
+	u32	dqs_io_ena;
+	u32	dq_ena;
+	u32	dm_ena;
+	u32	__padding1[4];
+	u32	update;
+	u32	__padding2[7];
+	u32	active_rank;
+};
+
+/* PHY manager configuration registers. */
+struct socfpga_phy_mgr_cfg {
+	u32	phy_rlat;
+	u32	reset_mem_stbl;
+	u32	mux_sel;
+	u32	cal_status;
+	u32	cal_debug_info;
+	u32	vfifo_rd_en_ovrd;
+	u32	afi_wlat;
+	u32	afi_rlat;
+};
+
+/* PHY manager command addresses. */
+struct socfpga_phy_mgr_cmd {
+	u32	inc_vfifo_fr;
+	u32	inc_vfifo_hard_phy;
+	u32	fifo_reset;
+	u32	inc_vfifo_fr_hr;
+	u32	inc_vfifo_qr;
+};
+
+struct socfpga_data_mgr {
+	u32	__padding1;
+	u32	t_wl_add;
+	u32	mem_t_add;
+	u32	t_rl_add;
+};
+
+#endif /* _SEQUENCER_H_ */
diff --git a/drivers/ddr/altera/sequencer_auto.h b/drivers/ddr/altera/sequencer_auto.h
new file mode 100644
index 0000000..0c5d83b
--- /dev/null
+++ b/drivers/ddr/altera/sequencer_auto.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright Altera Corporation (C) 2012-2015
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ */
+
+
+#define RW_MGR_READ_B2B_WAIT2 0x6A
+#define RW_MGR_LFSR_WR_RD_BANK_0_WAIT 0x31
+#define RW_MGR_REFRESH_ALL 0x14
+#define RW_MGR_ZQCL 0x06
+#define RW_MGR_LFSR_WR_RD_BANK_0_NOP 0x22
+#define RW_MGR_LFSR_WR_RD_BANK_0_DQS 0x23
+#define RW_MGR_ACTIVATE_0_AND_1 0x0D
+#define RW_MGR_MRS2_MIRR 0x0A
+#define RW_MGR_INIT_RESET_0_CKE_0 0x6E
+#define RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT 0x45
+#define RW_MGR_ACTIVATE_1 0x0F
+#define RW_MGR_MRS2 0x04
+#define RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1 0x34
+#define RW_MGR_MRS1 0x03
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define RW_MGR_IDLE_LOOP1 0x7A
+#else
+#define RW_MGR_IDLE_LOOP1 0x7C
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define RW_MGR_GUARANTEED_WRITE_WAIT2 0x18
+#define RW_MGR_MRS3 0x05
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define RW_MGR_IDLE_LOOP2 0x79
+#else
+#define RW_MGR_IDLE_LOOP2 0x7B
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define RW_MGR_GUARANTEED_WRITE_WAIT1 0x1E
+#define RW_MGR_LFSR_WR_RD_BANK_0_DATA 0x24
+#define RW_MGR_GUARANTEED_WRITE_WAIT3 0x1C
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define RW_MGR_RDIMM_CMD 0x78
+#else
+#define RW_MGR_RDIMM_CMD 0x7A
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP 0x36
+#define RW_MGR_GUARANTEED_WRITE_WAIT0 0x1A
+#define RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA 0x38
+#define RW_MGR_GUARANTEED_READ_CONT 0x53
+#define RW_MGR_MRS3_MIRR 0x0B
+#define RW_MGR_IDLE 0x00
+#define RW_MGR_READ_B2B 0x58
+#define RW_MGR_INIT_RESET_0_CKE_0_inloop 0x6F
+#define RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS 0x37
+#define RW_MGR_GUARANTEED_WRITE 0x17
+#define RW_MGR_PRECHARGE_ALL 0x12
+#define RW_MGR_INIT_RESET_1_CKE_0_inloop_1 0x74
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define RW_MGR_SGLE_READ 0x7C
+#else
+#define RW_MGR_SGLE_READ 0x7E
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define RW_MGR_MRS0_USER_MIRR 0x0C
+#define RW_MGR_RETURN 0x01
+#define RW_MGR_LFSR_WR_RD_DM_BANK_0 0x35
+#define RW_MGR_MRS0_USER 0x07
+#define RW_MGR_GUARANTEED_READ 0x4B
+#define RW_MGR_MRS0_DLL_RESET_MIRR 0x08
+#define RW_MGR_INIT_RESET_1_CKE_0 0x73
+#define RW_MGR_ACTIVATE_0_AND_1_WAIT2 0x10
+#define RW_MGR_LFSR_WR_RD_BANK_0_WL_1 0x20
+#define RW_MGR_MRS0_DLL_RESET 0x02
+#define RW_MGR_ACTIVATE_0_AND_1_WAIT1 0x0E
+#define RW_MGR_LFSR_WR_RD_BANK_0 0x21
+#define RW_MGR_CLEAR_DQS_ENABLE 0x48
+#define RW_MGR_MRS1_MIRR 0x09
+#define RW_MGR_READ_B2B_WAIT1 0x60
+#define RW_MGR_CONTENT_READ_B2B_WAIT2 0x00C680
+#define RW_MGR_CONTENT_LFSR_WR_RD_BANK_0_WAIT 0x00A680
+#define RW_MGR_CONTENT_REFRESH_ALL 0x000980
+#define RW_MGR_CONTENT_ZQCL 0x008380
+#define RW_MGR_CONTENT_LFSR_WR_RD_BANK_0_NOP 0x00E700
+#define RW_MGR_CONTENT_LFSR_WR_RD_BANK_0_DQS 0x000C00
+#define RW_MGR_CONTENT_ACTIVATE_0_AND_1 0x000800
+#define RW_MGR_CONTENT_MRS2_MIRR 0x008580
+#define RW_MGR_CONTENT_INIT_RESET_0_CKE_0 0x000000
+#define RW_MGR_CONTENT_LFSR_WR_RD_DM_BANK_0_WAIT 0x00A680
+#define RW_MGR_CONTENT_ACTIVATE_1 0x000880
+#define RW_MGR_CONTENT_MRS2 0x008280
+#define RW_MGR_CONTENT_LFSR_WR_RD_DM_BANK_0_WL_1 0x00CE00
+#define RW_MGR_CONTENT_MRS1 0x008200
+#define RW_MGR_CONTENT_IDLE_LOOP1 0x00A680
+#define RW_MGR_CONTENT_GUARANTEED_WRITE_WAIT2 0x00CCE8
+#define RW_MGR_CONTENT_MRS3 0x008300
+#define RW_MGR_CONTENT_IDLE_LOOP2 0x008680
+#define RW_MGR_CONTENT_GUARANTEED_WRITE_WAIT1 0x00AC88
+#define RW_MGR_CONTENT_LFSR_WR_RD_BANK_0_DATA 0x020CE0
+#define RW_MGR_CONTENT_GUARANTEED_WRITE_WAIT3 0x00EC88
+#define RW_MGR_CONTENT_RDIMM_CMD 0x009180
+#define RW_MGR_CONTENT_LFSR_WR_RD_DM_BANK_0_NOP 0x00E700
+#define RW_MGR_CONTENT_GUARANTEED_WRITE_WAIT0 0x008CE8
+#define RW_MGR_CONTENT_LFSR_WR_RD_DM_BANK_0_DATA 0x030CE0
+#define RW_MGR_CONTENT_GUARANTEED_READ_CONT 0x001168
+#define RW_MGR_CONTENT_MRS3_MIRR 0x008600
+#define RW_MGR_CONTENT_IDLE 0x080000
+#define RW_MGR_CONTENT_READ_B2B 0x040E88
+#define RW_MGR_CONTENT_INIT_RESET_0_CKE_0_inloop 0x000000
+#define RW_MGR_CONTENT_LFSR_WR_RD_DM_BANK_0_DQS 0x000C00
+#define RW_MGR_CONTENT_GUARANTEED_WRITE 0x000B68
+#define RW_MGR_CONTENT_PRECHARGE_ALL 0x000900
+#define RW_MGR_CONTENT_INIT_RESET_1_CKE_0_inloop_1 0x000080
+#define RW_MGR_CONTENT_SGLE_READ 0x040F08
+#define RW_MGR_CONTENT_MRS0_USER_MIRR 0x008400
+#define RW_MGR_CONTENT_RETURN 0x080680
+#define RW_MGR_CONTENT_LFSR_WR_RD_DM_BANK_0 0x00CD80
+#define RW_MGR_CONTENT_MRS0_USER 0x008100
+#define RW_MGR_CONTENT_GUARANTEED_READ 0x001168
+#define RW_MGR_CONTENT_MRS0_DLL_RESET_MIRR 0x008480
+#define RW_MGR_CONTENT_INIT_RESET_1_CKE_0 0x000080
+#define RW_MGR_CONTENT_ACTIVATE_0_AND_1_WAIT2 0x00A680
+#define RW_MGR_CONTENT_LFSR_WR_RD_BANK_0_WL_1 0x00CE00
+#define RW_MGR_CONTENT_MRS0_DLL_RESET 0x008180
+#define RW_MGR_CONTENT_ACTIVATE_0_AND_1_WAIT1 0x008680
+#define RW_MGR_CONTENT_LFSR_WR_RD_BANK_0 0x00CD80
+#define RW_MGR_CONTENT_CLEAR_DQS_ENABLE 0x001158
+#define RW_MGR_CONTENT_MRS1_MIRR 0x008500
+#define RW_MGR_CONTENT_READ_B2B_WAIT1 0x00A680
+
diff --git a/drivers/ddr/altera/sequencer_auto_ac_init.c b/drivers/ddr/altera/sequencer_auto_ac_init.c
new file mode 100644
index 0000000..358a0ca
--- /dev/null
+++ b/drivers/ddr/altera/sequencer_auto_ac_init.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright Altera Corporation (C) 2012-2015
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ */
+
+const unsigned long ac_rom_init_size = 36;
+const unsigned long ac_rom_init[36] = {
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+	0x20700000,
+	0x20780000,
+	0x10080831,
+	0x10080930,
+	0x10090004,
+	0x100a0008,
+	0x100b0000,
+	0x10380400,
+	0x10080849,
+	0x100808c8,
+	0x100a0004,
+	0x10090010,
+	0x100b0000,
+	0x30780000,
+	0x38780000,
+	0x30780000,
+	0x10680000,
+	0x106b0000,
+	0x10280400,
+	0x10480000,
+	0x1c980000,
+	0x1c9b0000,
+	0x1c980008,
+	0x1c9b0008,
+	0x38f80000,
+	0x3cf80000,
+	0x38780000,
+	0x18180000,
+	0x18980000,
+	0x13580000,
+	0x135b0000,
+	0x13580008,
+	0x135b0008,
+	0x33780000,
+	0x10580008,
+	0x10780000
+#else
+	0x20700000,
+	0x20780000,
+	0x10080431,
+	0x10080530,
+	0x10090004,
+	0x100a0008,
+	0x100b0000,
+	0x10380400,
+	0x10080449,
+	0x100804c8,
+	0x100a0004,
+	0x10090010,
+	0x100b0000,
+	0x30780000,
+	0x38780000,
+	0x30780000,
+	0x10680000,
+	0x106b0000,
+	0x10280400,
+	0x10480000,
+	0x1c980000,
+	0x1c9b0000,
+	0x1c980008,
+	0x1c9b0008,
+	0x38f80000,
+	0x3cf80000,
+	0x38780000,
+	0x18180000,
+	0x18980000,
+	0x13580000,
+	0x135b0000,
+	0x13580008,
+	0x135b0008,
+	0x33780000,
+	0x10580008,
+	0x10780000
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+};
diff --git a/drivers/ddr/altera/sequencer_auto_inst_init.c b/drivers/ddr/altera/sequencer_auto_inst_init.c
new file mode 100644
index 0000000..9983c40
--- /dev/null
+++ b/drivers/ddr/altera/sequencer_auto_inst_init.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright Altera Corporation (C) 2012-2015
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ */
+
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+const alt_u32 inst_rom_init_size = 126;
+const alt_u32 inst_rom_init[126] = {
+	0x80000,
+	0x80680,
+	0x8180,
+	0x8200,
+	0x8280,
+	0x8300,
+	0x8380,
+	0x8100,
+	0x8480,
+	0x8500,
+	0x8580,
+	0x8600,
+	0x8400,
+	0x800,
+	0x8680,
+	0x880,
+	0xa680,
+	0x80680,
+	0x900,
+	0x80680,
+	0x980,
+	0x8680,
+	0x80680,
+	0xb68,
+	0xcce8,
+	0xae8,
+	0x8ce8,
+	0xb88,
+	0xec88,
+	0xa08,
+	0xac88,
+	0x80680,
+	0xce00,
+	0xcd80,
+	0xe700,
+	0xc00,
+	0x20ce0,
+	0x20ce0,
+	0x20ce0,
+	0x20ce0,
+	0xd00,
+	0x680,
+	0x680,
+	0x680,
+	0x680,
+	0x60e80,
+	0x61080,
+	0x61080,
+	0x61080,
+	0xa680,
+	0x8680,
+	0x80680,
+	0xce00,
+	0xcd80,
+	0xe700,
+	0xc00,
+	0x30ce0,
+	0x30ce0,
+	0x30ce0,
+	0x30ce0,
+	0xd00,
+	0x680,
+	0x680,
+	0x680,
+	0x680,
+	0x70e80,
+	0x71080,
+	0x71080,
+	0x71080,
+	0xa680,
+	0x8680,
+	0x80680,
+	0x1158,
+	0x6d8,
+	0x80680,
+	0x1168,
+	0x7e8,
+	0x7e8,
+	0x87e8,
+	0x40fe8,
+	0x410e8,
+	0x410e8,
+	0x410e8,
+	0x1168,
+	0x7e8,
+	0x7e8,
+	0xa7e8,
+	0x80680,
+	0x40e88,
+	0x41088,
+	0x41088,
+	0x41088,
+	0x40f68,
+	0x410e8,
+	0x410e8,
+	0x410e8,
+	0xa680,
+	0x40fe8,
+	0x410e8,
+	0x410e8,
+	0x410e8,
+	0x41008,
+	0x41088,
+	0x41088,
+	0x41088,
+	0x1100,
+	0xc680,
+	0x8680,
+	0xe680,
+	0x80680,
+	0x0,
+	0x8000,
+	0xa000,
+	0xc000,
+	0x80000,
+	0x80,
+	0x8080,
+	0xa080,
+	0xc080,
+	0x80080,
+	0x9180,
+	0x8680,
+	0xa680,
+	0x80680,
+	0x40f08,
+	0x80680
+};
+#else
+const unsigned long inst_rom_init_size = 128;
+const unsigned long inst_rom_init[128] = {
+	0x80000,
+	0x80680,
+	0x8180,
+	0x8200,
+	0x8280,
+	0x8300,
+	0x8380,
+	0x8100,
+	0x8480,
+	0x8500,
+	0x8580,
+	0x8600,
+	0x8400,
+	0x800,
+	0x8680,
+	0x880,
+	0xa680,
+	0x80680,
+	0x900,
+	0x80680,
+	0x980,
+	0x8680,
+	0x80680,
+	0xb68,
+	0xcce8,
+	0xae8,
+	0x8ce8,
+	0xb88,
+	0xec88,
+	0xa08,
+	0xac88,
+	0x80680,
+	0xce00,
+	0xcd80,
+	0xe700,
+	0xc00,
+	0x20ce0,
+	0x20ce0,
+	0x20ce0,
+	0x20ce0,
+	0xd00,
+	0x680,
+	0x680,
+	0x680,
+	0x680,
+	0x60e80,
+	0x61080,
+	0x61080,
+	0x61080,
+	0xa680,
+	0x8680,
+	0x80680,
+	0xce00,
+	0xcd80,
+	0xe700,
+	0xc00,
+	0x30ce0,
+	0x30ce0,
+	0x30ce0,
+	0x30ce0,
+	0xd00,
+	0x680,
+	0x680,
+	0x680,
+	0x680,
+	0x70e80,
+	0x71080,
+	0x71080,
+	0x71080,
+	0xa680,
+	0x8680,
+	0x80680,
+	0x1158,
+	0x6d8,
+	0x80680,
+	0x1168,
+	0x7e8,
+	0x7e8,
+	0x87e8,
+	0x40fe8,
+	0x410e8,
+	0x410e8,
+	0x410e8,
+	0x1168,
+	0x7e8,
+	0x7e8,
+	0xa7e8,
+	0x80680,
+	0x40e88,
+	0x41088,
+	0x41088,
+	0x41088,
+	0x40f68,
+	0x410e8,
+	0x410e8,
+	0x410e8,
+	0xa680,
+	0x40fe8,
+	0x410e8,
+	0x410e8,
+	0x410e8,
+	0x41008,
+	0x41088,
+	0x41088,
+	0x41088,
+	0x1100,
+	0xc680,
+	0x8680,
+	0xe680,
+	0x80680,
+	0x0,
+	0x0,
+	0xa000,
+	0x8000,
+	0x80000,
+	0x80,
+	0x80,
+	0x80,
+	0x80,
+	0xa080,
+	0x8080,
+	0x80080,
+	0x9180,
+	0x8680,
+	0xa680,
+	0x80680,
+	0x40f08,
+	0x80680
+};
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
diff --git a/drivers/ddr/altera/sequencer_defines.h b/drivers/ddr/altera/sequencer_defines.h
new file mode 100644
index 0000000..32f13ac
--- /dev/null
+++ b/drivers/ddr/altera/sequencer_defines.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright Altera Corporation (C) 2012-2015
+ *
+ * SPDX-License-Identifier:    BSD-3-Clause
+ */
+
+#ifndef _SEQUENCER_DEFINES_H_
+#define _SEQUENCER_DEFINES_H_
+
+#define AC_ROM_MR1_MIRR 0000000000100
+#define AC_ROM_MR1_OCD_ENABLE
+#define AC_ROM_MR2_MIRR 0000000010000
+#define AC_ROM_MR3_MIRR 0000000000000
+#define AC_ROM_MR0_CALIB
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define AC_ROM_MR0_DLL_RESET_MIRR 0100011001000
+#define AC_ROM_MR0_DLL_RESET 0100100110000
+#define AC_ROM_MR0_MIRR 0100001001001
+#define AC_ROM_MR0 0100000110001
+#else
+#define AC_ROM_MR0_DLL_RESET_MIRR 0010011001000
+#define AC_ROM_MR0_DLL_RESET 0010100110000
+#define AC_ROM_MR0_MIRR 0010001001001
+#define AC_ROM_MR0 0010000110001
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define AC_ROM_MR1 0000000000100
+#define AC_ROM_MR2 0000000001000
+#define AC_ROM_MR3 0000000000000
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define AFI_CLK_FREQ 534
+#else
+#define AFI_CLK_FREQ 401
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define AFI_RATE_RATIO 1
+#define AVL_CLK_FREQ 67
+#define BFM_MODE 0
+#define BURST2 0
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define CALIB_LFIFO_OFFSET 8
+#define CALIB_VFIFO_OFFSET 6
+#else
+#define CALIB_LFIFO_OFFSET 7
+#define CALIB_VFIFO_OFFSET 5
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define ENABLE_EXPORT_SEQ_DEBUG_BRIDGE 0
+#define ENABLE_SUPER_QUICK_CALIBRATION 0
+#define GUARANTEED_READ_BRINGUP_TEST 0
+#define HARD_PHY 1
+#define HARD_VFIFO 1
+#define HPS_HW 1
+#define HR_DDIO_OUT_HAS_THREE_REGS 0
+#define IO_DELAY_PER_DCHAIN_TAP 25
+#define IO_DELAY_PER_DQS_EN_DCHAIN_TAP 25
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define IO_DELAY_PER_OPA_TAP 234
+#else
+#define IO_DELAY_PER_OPA_TAP 312
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define IO_DLL_CHAIN_LENGTH 8
+#define IO_DM_OUT_RESERVE 0
+#define IO_DQDQS_OUT_PHASE_MAX 0
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define IO_DQS_EN_DELAY_MAX 15
+#define IO_DQS_EN_DELAY_OFFSET 16
+#else
+#define IO_DQS_EN_DELAY_MAX 31
+#define IO_DQS_EN_DELAY_OFFSET 0
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define IO_DQS_EN_PHASE_MAX 7
+#define IO_DQS_IN_DELAY_MAX 31
+#define IO_DQS_IN_RESERVE 4
+#define IO_DQS_OUT_RESERVE 6
+#define IO_DQ_OUT_RESERVE 0
+#define IO_IO_IN_DELAY_MAX 31
+#define IO_IO_OUT1_DELAY_MAX 31
+#define IO_IO_OUT2_DELAY_MAX 0
+#define IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS 0
+#define MARGIN_VARIATION_TEST 0
+#define MAX_LATENCY_COUNT_WIDTH 5
+#define MEM_ADDR_WIDTH 13
+#define READ_VALID_FIFO_SIZE 16
+#ifdef CONFIG_SOCFPGA_ARRIA5
+/* The if..else... is not required if generated by tools */
+#define REG_FILE_INIT_SEQ_SIGNATURE 0x5555048c
+#else
+#define REG_FILE_INIT_SEQ_SIGNATURE 0x55550483
+#endif /* CONFIG_SOCFPGA_ARRIA5 */
+#define RW_MGR_MEM_ADDRESS_MIRRORING 0
+#define RW_MGR_MEM_ADDRESS_WIDTH 15
+#define RW_MGR_MEM_BANK_WIDTH 3
+#define RW_MGR_MEM_CHIP_SELECT_WIDTH 1
+#define RW_MGR_MEM_CLK_EN_WIDTH 1
+#define RW_MGR_MEM_CONTROL_WIDTH 1
+#define RW_MGR_MEM_DATA_MASK_WIDTH 5
+#define RW_MGR_MEM_DATA_WIDTH 40
+#define RW_MGR_MEM_DQ_PER_READ_DQS 8
+#define RW_MGR_MEM_DQ_PER_WRITE_DQS 8
+#define RW_MGR_MEM_IF_READ_DQS_WIDTH 5
+#define RW_MGR_MEM_IF_WRITE_DQS_WIDTH 5
+#define RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM 1
+#define RW_MGR_MEM_ODT_WIDTH 1
+#define RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS 1
+#define RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS 1
+#define RW_MGR_MR0_BL 1
+#define RW_MGR_MR0_CAS_LATENCY 3
+#define RW_MGR_TRUE_MEM_DATA_MASK_WIDTH 5
+#define RW_MGR_WRITE_TO_DEBUG_READ 1.0
+#define SKEW_CALIBRATION 0
+#define TINIT_CNTR1_VAL 32
+#define TINIT_CNTR2_VAL 32
+#define TINIT_CNTR0_VAL 132
+#define TRESET_CNTR1_VAL 99
+#define TRESET_CNTR2_VAL 10
+#define TRESET_CNTR0_VAL 132
+
+#endif /* _SEQUENCER_DEFINES_H_ */
-- 
2.2.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [U-Boot] [PATCH RESEND 3/3] arm: socfpga: enable the Altera SDRAM controller driver
  2015-04-16 14:11 [U-Boot] [PATCH RESEND 0/3] drivers/ddr/altera: Add the DDR controller driver for SoCFPGA dinguyen at opensource.altera.com
  2015-04-16 14:11 ` [U-Boot] [PATCH RESEND 1/3] driver/ddr/altera: Add DDR driver for Altera's SDRAM controller dinguyen at opensource.altera.com
  2015-04-16 14:11 ` [U-Boot] [PATCH RESEND 2/3] driver/ddr/altera/: Add the sdram calibration portion dinguyen at opensource.altera.com
@ 2015-04-16 14:11 ` dinguyen at opensource.altera.com
  2 siblings, 0 replies; 8+ messages in thread
From: dinguyen at opensource.altera.com @ 2015-04-16 14:11 UTC (permalink / raw)
  To: u-boot

From: Dinh Nguyen <dinguyen@opensource.altera.com>

Enable the Altera SDRAM driver for the SoCFPGA platform.

Signed-off-by: Dinh Nguyen <dinguyen@opensource.altera.com>
---
 include/configs/socfpga_common.h | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/include/configs/socfpga_common.h b/include/configs/socfpga_common.h
index cae744d..9b52050 100644
--- a/include/configs/socfpga_common.h
+++ b/include/configs/socfpga_common.h
@@ -77,6 +77,11 @@
 #define CONFIG_SYS_PL310_BASE		SOCFPGA_MPUL2_ADDRESS
 
 /*
+ * SDRAM controller
+ */
+#define CONFIG_ALTERA_SDRAM
+
+/*
  * EPCS/EPCQx1 Serial Flash Controller
  */
 #ifdef CONFIG_ALTERA_SPI
-- 
2.2.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [U-Boot] [PATCH RESEND 1/3] driver/ddr/altera: Add DDR driver for Altera's SDRAM controller
  2015-04-16 14:11 ` [U-Boot] [PATCH RESEND 1/3] driver/ddr/altera: Add DDR driver for Altera's SDRAM controller dinguyen at opensource.altera.com
@ 2015-04-17 12:31   ` Pavel Machek
  2015-04-17 15:20     ` Dinh Nguyen
  0 siblings, 1 reply; 8+ messages in thread
From: Pavel Machek @ 2015-04-17 12:31 UTC (permalink / raw)
  To: u-boot

Hi!

> +#ifndef	_SDRAM_H_
> +#define	_SDRAM_H_
> +
> +#ifndef __ASSEMBLY__
> +
> +/* function declaration */

You can delete this comment.

> +#define \
> +SDR_CTRLGRP_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_LSB 0
> +#define  \
> +SDR_CTRLGRP_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_MASK \
> +0xffffffff
> +/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_1       */
> +#define \
> +SDR_CTRLGRP_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_LSB 0
> +#define \
> +SDR_CTRLGRP_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_MASK \
> +0xffffffff
> +/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_2       */
> +#define \
> +SDR_CTRLGRP_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_LSB 0
> +#define \
> +SDR_CTRLGRP_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_MASK \
> +0x0000ffff

Can we get slightly shorter define names?

> +/* Register template: sdr::ctrlgrp::remappriority                          */
> +#define SDR_CTRLGRP_REMAPPRIORITY_PRIORITYREMAP_LSB 0
> +#define SDR_CTRLGRP_REMAPPRIORITY_PRIORITYREMAP_MASK 0x000000ff
> +/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_0                     */
> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_LSB 12
> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH 20
> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(x) \
> + (((x) << 12) & 0xfffff000)
> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(x) \
> + (((x) << 10) & 0x00000c00)
> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(x) \
> + (((x) << 6) & 0x000000c0)
> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(x) \
> + (((x) << 8) & 0x00000100)
> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(x) \
> + (((x) << 9) & 0x00000200)
> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(x) \
> + (((x) << 4) & 0x00000030)
> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(x) \
> + (((x) << 2) & 0x0000000c)
> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(x) \
> + (((x) << 0) & 0x00000003)
> +/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_1                     */
> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH 20
> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(x) \
> + (((x) << 12) & 0xfffff000)
> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(x) \
> + (((x) << 0) & 0x00000fff)
> +/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_2                     */
> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(x) \
> + (((x) << 0) & 0x00000fff)

Too long names here, too..

> --- /dev/null
> +++ b/arch/arm/include/asm/arch-socfpga/sdram_config.h
> @@ -0,0 +1,100 @@
> +/*
> + * Copyright Altera Corporation (C) 2012-2015
> + *
> + * SPDX-License-Identifier:    BSD-3-Clause
> + */
> +

If this file is autogenerated, you should mention it here.

> +#ifdef CONFIG_SOCFPGA_ARRIA5
> +/* The if..else... is not required if generated by tools */

What does this comment say?

> +#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD2_3_0	0
> +#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_2_THRESHOLD2_35_4	0x41041041
> +#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_3_THRESHOLD2_59_36	0x410410
> +#define CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0 \
> +0x01010101
> +#define CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32 \
> +0x01010101
> +#define CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64 \
> +0x0101

Drop "HPS" and "CTRLCFG" from the config names... they should still be
unique and you'll not hit 80 column limits with just the name?

> +#define COMPARE_FAIL_ACTION	return 1;

Macros that change control flow are nasty.

> +/* Below function only applicable for SPL */

"Function below"?

Add ifdef so that it is not available for u-boot proper?

> +typedef struct _sdram_prot_rule {
> +	uint64_t	sdram_start;	/* SDRAM start address */
> +	uint64_t	sdram_end;	/* SDRAM end address */
> +	uint32_t	rule;		/* SDRAM protection rule number: 0-19 */
> +	int		valid;		/* Rule valid or not? 1 - valid, 0 not*/
> +
> +	uint32_t	security;
> +	uint32_t	portmask;
> +	uint32_t	result;
> +	uint32_t	lo_prot_id;
> +	uint32_t	hi_prot_id;
> +} sdram_prot_rule, *psdram_prot_rule;

Struct typedefs are nasty. Just use "struct sdram_prot_rule"?

> +static void sdram_get_rule(psdram_prot_rule prule)
> +{
> +	uint32_t protruleaddr;
> +	uint32_t protruleid;
> +	uint32_t protruledata;

Remove "protrule" from local variables, as it is clear from context?

> +static void sdram_set_protection_config(uint64_t sdram_start, uint64_t sdram_end)
> +{
> +	sdram_prot_rule rule;
> +	int rules;
> +
> +	/* Start with accepting all SDRAM transaction */
> +	writel(0x0, &sdr_ctrl->protport_default);
> +
> +	/* Clear all protection rules for warm boot case */
> +
> +	rule.sdram_start = 0;

Kill last empty line. And actually... maybe memset?

> +static void set_sdr_addr_rw(void)
> +{
> +	int cs = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS;
> +	int width = 8;
> +	int rows = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS;
> +	int banks = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_BANKBITS;
> +	int cols = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS;
> +	unsigned long long workaround_memsize = MEMSIZE_4G;
> +
> +	debug("Configuring DRAMADDRW\n");
> +	clrsetbits_le32(&sdr_ctrl->dram_addrw, SDR_CTRLGRP_DRAMADDRW_COLBITS_MASK,
> +			CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS <<
> +			SDR_CTRLGRP_DRAMADDRW_COLBITS_LSB);
> +	/* SDRAM Failure When Accessing Non-Existent Memory
> +	 * Update Preloader to artificially increase the number of rows so
> +	 * that the memory thinks it has 4GB of RAM.
> +	 */

Comment style, "rows, so"?


> +/* To calculate SDRAM device size based on SDRAM controller parameters.

Drop "To".

> + * Size is specified in bytes.
> + *
> + * NOTE!!!!
> + * This function is compiled and linked into the preloader and
> + * Uboot (there may be others). So if this function changes, the Preloader
> + * and UBoot must be updated simultaneously.
> + */

Is that worth big note and four exclamation marks? Compiler should
take care of recompilation...

Ok, this starts to look like something that we could actually merge.

									Pavel
-- 
(english) http://www.livejournal.com/~pavelmachek
(cesky, pictures) http://atrey.karlin.mff.cuni.cz/~pavel/picture/horses/blog.html

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [U-Boot] [PATCH RESEND 1/3] driver/ddr/altera: Add DDR driver for Altera's SDRAM controller
  2015-04-17 12:31   ` Pavel Machek
@ 2015-04-17 15:20     ` Dinh Nguyen
  2015-04-17 20:44       ` Pavel Machek
  0 siblings, 1 reply; 8+ messages in thread
From: Dinh Nguyen @ 2015-04-17 15:20 UTC (permalink / raw)
  To: u-boot

Hi Pavel,

On 04/17/2015 07:31 AM, Pavel Machek wrote:
> Hi!
> 
>> +#ifndef	_SDRAM_H_
>> +#define	_SDRAM_H_
>> +
>> +#ifndef __ASSEMBLY__
>> +
>> +/* function declaration */
> 
> You can delete this comment.
> 

Ok...

>> +#define \
>> +SDR_CTRLGRP_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_LSB 0
>> +#define  \
>> +SDR_CTRLGRP_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_MASK \
>> +0xffffffff
>> +/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_1       */
>> +#define \
>> +SDR_CTRLGRP_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_LSB 0
>> +#define \
>> +SDR_CTRLGRP_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_MASK \
>> +0xffffffff
>> +/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_2       */
>> +#define \
>> +SDR_CTRLGRP_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_LSB 0
>> +#define \
>> +SDR_CTRLGRP_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_MASK \
>> +0x0000ffff
> 
> Can we get slightly shorter define names?

I did think about shortening these defines a bit, but came to this
reason that I should leave these alone. These defines are generated from
the tools AFAICT. I don't think any sane person would try to have
defines this long. So I still want to try to save the use case that the
driver can still be used with the autogenerated header file from the
tools in some form.

> 
>> +/* Register template: sdr::ctrlgrp::remappriority                          */
>> +#define SDR_CTRLGRP_REMAPPRIORITY_PRIORITYREMAP_LSB 0
>> +#define SDR_CTRLGRP_REMAPPRIORITY_PRIORITYREMAP_MASK 0x000000ff
>> +/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_0                     */
>> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_LSB 12
>> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH 20
>> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(x) \
>> + (((x) << 12) & 0xfffff000)
>> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(x) \
>> + (((x) << 10) & 0x00000c00)
>> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(x) \
>> + (((x) << 6) & 0x000000c0)
>> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(x) \
>> + (((x) << 8) & 0x00000100)
>> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(x) \
>> + (((x) << 9) & 0x00000200)
>> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(x) \
>> + (((x) << 4) & 0x00000030)
>> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(x) \
>> + (((x) << 2) & 0x0000000c)
>> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(x) \
>> + (((x) << 0) & 0x00000003)
>> +/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_1                     */
>> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH 20
>> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(x) \
>> + (((x) << 12) & 0xfffff000)
>> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(x) \
>> + (((x) << 0) & 0x00000fff)
>> +/* Register template: sdr::ctrlgrp::phyctrl::phyctrl_2                     */
>> +#define SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(x) \
>> + (((x) << 0) & 0x00000fff)
> 
> Too long names here, too..
> 
>> --- /dev/null
>> +++ b/arch/arm/include/asm/arch-socfpga/sdram_config.h
>> @@ -0,0 +1,100 @@
>> +/*
>> + * Copyright Altera Corporation (C) 2012-2015
>> + *
>> + * SPDX-License-Identifier:    BSD-3-Clause
>> + */
>> +
> 
> If this file is autogenerated, you should mention it here.
> 

Ok...

>> +#ifdef CONFIG_SOCFPGA_ARRIA5
>> +/* The if..else... is not required if generated by tools */
> 
> What does this comment say?
> 

I have no idea, but will clean up.

>> +#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_1_THRESHOLD2_3_0	0
>> +#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_2_THRESHOLD2_35_4	0x41041041
>> +#define CONFIG_HPS_SDR_CTRLCFG_MPPACING_3_THRESHOLD2_59_36	0x410410
>> +#define CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0 \
>> +0x01010101
>> +#define CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32 \
>> +0x01010101
>> +#define CONFIG_HPS_SDR_CTRLCFG_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64 \
>> +0x0101
> 
> Drop "HPS" and "CTRLCFG" from the config names... they should still be
> unique and you'll not hit 80 column limits with just the name?
> 

Same reasoning as I had for previous comment regarding long define names.

>> +#define COMPARE_FAIL_ACTION	return 1;
> 
> Macros that change control flow are nasty.
> 

Will remove...

>> +/* Below function only applicable for SPL */
> 
> "Function below"?

Will clean..

> 
> Add ifdef so that it is not available for u-boot proper?
> 
>> +typedef struct _sdram_prot_rule {
>> +	uint64_t	sdram_start;	/* SDRAM start address */
>> +	uint64_t	sdram_end;	/* SDRAM end address */
>> +	uint32_t	rule;		/* SDRAM protection rule number: 0-19 */
>> +	int		valid;		/* Rule valid or not? 1 - valid, 0 not*/
>> +
>> +	uint32_t	security;
>> +	uint32_t	portmask;
>> +	uint32_t	result;
>> +	uint32_t	lo_prot_id;
>> +	uint32_t	hi_prot_id;
>> +} sdram_prot_rule, *psdram_prot_rule;
> 
> Struct typedefs are nasty. Just use "struct sdram_prot_rule"?

Yeah...will clean up...

> 
>> +static void sdram_get_rule(psdram_prot_rule prule)
>> +{
>> +	uint32_t protruleaddr;
>> +	uint32_t protruleid;
>> +	uint32_t protruledata;
> 
> Remove "protrule" from local variables, as it is clear from context?
> 

Ok...

>> +static void sdram_set_protection_config(uint64_t sdram_start, uint64_t sdram_end)
>> +{
>> +	sdram_prot_rule rule;
>> +	int rules;
>> +
>> +	/* Start with accepting all SDRAM transaction */
>> +	writel(0x0, &sdr_ctrl->protport_default);
>> +
>> +	/* Clear all protection rules for warm boot case */
>> +
>> +	rule.sdram_start = 0;
> 
> Kill last empty line. And actually... maybe memset?

Ok...

> 
>> +static void set_sdr_addr_rw(void)
>> +{
>> +	int cs = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_CSBITS;
>> +	int width = 8;
>> +	int rows = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_ROWBITS;
>> +	int banks = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_BANKBITS;
>> +	int cols = CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS;
>> +	unsigned long long workaround_memsize = MEMSIZE_4G;
>> +
>> +	debug("Configuring DRAMADDRW\n");
>> +	clrsetbits_le32(&sdr_ctrl->dram_addrw, SDR_CTRLGRP_DRAMADDRW_COLBITS_MASK,
>> +			CONFIG_HPS_SDR_CTRLCFG_DRAMADDRW_COLBITS <<
>> +			SDR_CTRLGRP_DRAMADDRW_COLBITS_LSB);
>> +	/* SDRAM Failure When Accessing Non-Existent Memory
>> +	 * Update Preloader to artificially increase the number of rows so
>> +	 * that the memory thinks it has 4GB of RAM.
>> +	 */
> 
> Comment style, "rows, so"?
> 

Will clean up...

> 
>> +/* To calculate SDRAM device size based on SDRAM controller parameters.
> 
> Drop "To".
> 

Ok...

>> + * Size is specified in bytes.
>> + *
>> + * NOTE!!!!
>> + * This function is compiled and linked into the preloader and
>> + * Uboot (there may be others). So if this function changes, the Preloader
>> + * and UBoot must be updated simultaneously.
>> + */
> 
> Is that worth big note and four exclamation marks? Compiler should
> take care of recompilation...

Yeah, I'll clean up...

> 
> Ok, this starts to look like something that we could actually merge.

Almost...

thanks,
Dinh

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [U-Boot] [PATCH RESEND 1/3] driver/ddr/altera: Add DDR driver for Altera's SDRAM controller
  2015-04-17 15:20     ` Dinh Nguyen
@ 2015-04-17 20:44       ` Pavel Machek
  0 siblings, 0 replies; 8+ messages in thread
From: Pavel Machek @ 2015-04-17 20:44 UTC (permalink / raw)
  To: u-boot

Hi!

> >> +#define \
> >> +SDR_CTRLGRP_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_LSB 0
> >> +#define  \
> >> +SDR_CTRLGRP_MPTHRESHOLDRST_0_THRESHOLDRSTCYCLES_31_0_MASK \
> >> +0xffffffff
> >> +/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_1       */
> >> +#define \
> >> +SDR_CTRLGRP_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_LSB 0
> >> +#define \
> >> +SDR_CTRLGRP_MPTHRESHOLDRST_1_THRESHOLDRSTCYCLES_63_32_MASK \
> >> +0xffffffff
> >> +/* Register template: sdr::ctrlgrp::mpthresholdrst::mpthresholdrst_2       */
> >> +#define \
> >> +SDR_CTRLGRP_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_LSB 0
> >> +#define \
> >> +SDR_CTRLGRP_MPTHRESHOLDRST_2_THRESHOLDRSTCYCLES_79_64_MASK \
> >> +0x0000ffff
> > 
> > Can we get slightly shorter define names?
> 
> I did think about shortening these defines a bit, but came to this
> reason that I should leave these alone. These defines are generated from
> the tools AFAICT. I don't think any sane person would try to have
> defines this long. So I still want to try to save the use case that the
> driver can still be used with the autogenerated header file from the
> tools in some form.

Ok.

[I'd suggest placing the defines on single lines, ignoring 80 column
rule, but then checkpatch would scream, so I guess it is ok as it is.]

Best regards,
									Pavel
-- 
(english) http://www.livejournal.com/~pavelmachek
(cesky, pictures) http://atrey.karlin.mff.cuni.cz/~pavel/picture/horses/blog.html

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [U-Boot] [PATCH RESEND 2/3] driver/ddr/altera/: Add the sdram calibration portion
  2015-04-16 14:11 ` [U-Boot] [PATCH RESEND 2/3] driver/ddr/altera/: Add the sdram calibration portion dinguyen at opensource.altera.com
@ 2015-04-25 17:27   ` Pavel Machek
  0 siblings, 0 replies; 8+ messages in thread
From: Pavel Machek @ 2015-04-25 17:27 UTC (permalink / raw)
  To: u-boot

Hi!

> +++ b/drivers/ddr/altera/Makefile
> @@ -0,0 +1,12 @@
> +#
> +# (C) Copyright 2000-2003
> +# Wolfgang Denk, DENX Software Engineering, wd at denx.de.

Umm. Looks like nothing of Wolfgang's original code is remaining at this point,
you can delete this.

> +/******************************************************************************
> + ******************************************************************************
> + ** NOTE: Special Rules for Globale Variables                                **
> + **                                                                          **

They are global, not globale. 

> + ** All global variables that are explicitly initialized (including          **
> + ** explicitly initialized to zero), are only initialized once, during       **
> + ** configuration time, and not again on reset.  This means that they        **
> + ** preserve their current contents across resets, which is needed for some  **
> + ** special cases involving communication with external modules.  In         **
> + ** addition, this avoids paying the price to have the memory initialized,   **
> + ** even for zeroed data, provided it is explicitly set to zero in the code, **
> + ** and doesn't rely on implicit initialization.                             **

Is this true for variables in this .c file, or for all of them?

> +/*
> + * Temporary workaround to place the initial stack pointer at a safe
> + * offset from end
> + */
> +asm(".global __alt_stack_pointer");
> +asm("__alt_stack_pointer = " __stringify(STACK_POINTER));

How is this different from normal variable?

> +/* For HPS running on actual hardware */
> +
> +#define DLEVEL 0

DEBUG_LEVEL?


> +/*
> + * space around comma is required for varargs macro to remove comma if args
> + * is empty
> + */
> +#define BFM_GBL_SET(field, value)
> +#define BFM_GBL_GET(field)		((long unsigned int)0)
> +#define BFM_STAGE(stage)
> +#define BFM_INC_VFIFO
> +#define COV(label)

I don't get the comment, or the macros. Can you just remove them?

Thanks,
									Pavel

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2015-04-25 17:27 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-04-16 14:11 [U-Boot] [PATCH RESEND 0/3] drivers/ddr/altera: Add the DDR controller driver for SoCFPGA dinguyen at opensource.altera.com
2015-04-16 14:11 ` [U-Boot] [PATCH RESEND 1/3] driver/ddr/altera: Add DDR driver for Altera's SDRAM controller dinguyen at opensource.altera.com
2015-04-17 12:31   ` Pavel Machek
2015-04-17 15:20     ` Dinh Nguyen
2015-04-17 20:44       ` Pavel Machek
2015-04-16 14:11 ` [U-Boot] [PATCH RESEND 2/3] driver/ddr/altera/: Add the sdram calibration portion dinguyen at opensource.altera.com
2015-04-25 17:27   ` Pavel Machek
2015-04-16 14:11 ` [U-Boot] [PATCH RESEND 3/3] arm: socfpga: enable the Altera SDRAM controller driver dinguyen at opensource.altera.com

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.