From mboxrd@z Thu Jan 1 00:00:00 1970 From: Kazuaki Ichinohe Date: Wed, 25 Mar 2009 20:32:28 +0900 Subject: [U-Boot] [PATCH] Canyonlands SATA harddisk driver In-Reply-To: <200903241722.55723.sr@denx.de> References: <49BFA0BE.7060103@fsi.co.jp> <49C86E1D.4050601@fsi.co.jp> <200903241722.55723.sr@denx.de> Message-ID: <49CA164C.4080906@fsi.co.jp> List-Id: MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit To: u-boot@lists.denx.de Hi Stefan, Thank you for the reply. This is the patch that corrects the point. Signed-off-by: Kazuaki Ichinohe --- [patch] diff -purN u-boot-2009.03/common/cmd_scsi.c u-boot-2009.03-sata/common/cmd_scsi.c --- u-boot-2009.03/common/cmd_scsi.c 2009-03-22 06:04:41.000000000 +0900 +++ u-boot-2009.03-sata/common/cmd_scsi.c 2009-03-25 18:53:29.000000000 +0900 @@ -47,8 +47,10 @@ #define SCSI_DEV_ID 0x5288 #else +#ifndef CONFIG_SATA_DWC #error no scsi device defined #endif +#endif static ccb tempccb; /* temporary scsi command buffer */ @@ -179,6 +181,7 @@ void scsi_init(void) { int busdevfunc; +#ifndef CONFIG_SATA_DWC busdevfunc=pci_find_device(SCSI_VEND_ID,SCSI_DEV_ID,0); /* get PCI Device ID */ if(busdevfunc==-1) { printf("Error SCSI Controller (%04X,%04X) not found\n",SCSI_VEND_ID,SCSI_DEV_ID); @@ -189,7 +192,13 @@ void scsi_init(void) printf("SCSI Controller (%04X,%04X) found (%d:%d:%d)\n",SCSI_VEND_ID,SCSI_DEV_ID,(busdevfunc>>16)&0xFF,(busdevfunc>>11)&0x1F,(busdevfunc>>8)&0x7); } #endif +#endif +#ifdef CONFIG_SATA_DWC + sata_dwc_probe(); +#endif +#ifndef CONFIG_SATA_DWC scsi_low_level_init(busdevfunc); +#endif scsi_scan(1); } @@ -444,8 +453,11 @@ int do_scsi (cmd_tbl_t *cmdtp, int flag, /**************************************************************************************** * scsi_read */ - +#if defined(CONFIG_SATA_DWC) && !defined(CONFIG_LBA48) +#define SCSI_MAX_READ_BLK 0xFF +#else #define SCSI_MAX_READ_BLK 0xFFFF /* almost the maximum amount of the scsi_ext command.. */ +#endif ulong scsi_read(int device, ulong blknr, ulong blkcnt, void *buffer) { diff -purN u-boot-2009.03/drivers/block/Makefile u-boot-2009.03-sata/drivers/block/Makefile --- u-boot-2009.03/drivers/block/Makefile 2009-03-22 06:04:41.000000000 +0900 +++ u-boot-2009.03-sata/drivers/block/Makefile 2009-03-23 19:22:31.000000000 +0900 @@ -34,6 +34,7 @@ COBJS-$(CONFIG_SATA_SIL3114) += sata_sil COBJS-$(CONFIG_IDE_SIL680) += sil680.o COBJS-$(CONFIG_SCSI_SYM53C8XX) += sym53c8xx.o COBJS-$(CONFIG_SYSTEMACE) += systemace.o +COBJS-$(CONFIG_SATA_DWC) += sata_dwc.o COBJS := $(COBJS-y) SRCS := $(COBJS:.o=.c) diff -purN u-boot-2009.03/drivers/block/sata_dwc.c u-boot-2009.03-sata/drivers/block/sata_dwc.c --- u-boot-2009.03/drivers/block/sata_dwc.c 1970-01-01 09:00:00.000000000 +0900 +++ u-boot-2009.03-sata/drivers/block/sata_dwc.c 2009-03-25 18:02:07.000000000 +0900 @@ -0,0 +1,2175 @@ +/* + * sata_dwc.c + * + * Synopsys DesignWare Cores (DWC) SATA host driver + * + * Author: Mark Miesfeld + * + * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese + * Copyright 2008 DENX Software Engineering + * + * Based on versions provided by AMCC and Synopsys which are: + * Copyright 2006 Applied Micro Circuits Corporation + * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED + * + * This program is free software; you can redistribute + * it and/or modify it under the terms of the GNU + * General Public License as published by the + * Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* sata_dwc.h */ +#include "sata_dwc.h" + +/* Base Address */ +#define SATA_BASE_ADDR 0xe20d1000 +#define SATA_DMA_REG_ADDR 0xe20d0800 + +/* SATA DMA driver Globals */ +#define DMA_NUM_CHANS 1 +#define DMA_NUM_CHAN_REGS 8 + +/* SATA DMA Register definitions */ +#define AHB_DMA_BRST_DFLT 16 /* 4 data items burst length */ + +struct dmareg { + u32 low; /* Low bits 0-31 */ + u32 high; /* High bits 32-63 */ +}; + +/* DMA Per Channel registers */ +struct dma_chan_regs { + struct dmareg sar; /* Source Address */ + struct dmareg dar; /* Destination address */ + struct dmareg llp; /* Linked List Pointer */ + struct dmareg ctl; /* Control */ + struct dmareg sstat; /* Source Status not implemented in core */ + struct dmareg dstat; /* Destination Status not implemented in core */ + struct dmareg sstatar; /* Source Status Address not impl in core */ + struct dmareg dstatar; /* Destination Status Address not implemented */ + struct dmareg cfg; /* Config */ + struct dmareg sgr; /* Source Gather */ + struct dmareg dsr; /* Destination Scatter */ +}; + +/* Generic Interrupt Registers */ +struct dma_interrupt_regs { + struct dmareg tfr; /* Transfer Interrupt */ + struct dmareg block; /* Block Interrupt */ + struct dmareg srctran; /* Source Transfer Interrupt */ + struct dmareg dsttran; /* Dest Transfer Interrupt */ + struct dmareg error; /* Error */ +}; + +struct ahb_dma_regs { + struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS]; + struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */ + struct dma_interrupt_regs interrupt_status; /* Interrupt Status */ + struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */ + struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */ + struct dmareg statusInt; /* Interrupt combined */ + struct dmareg rq_srcreg; /* Src Trans Req */ + struct dmareg rq_dstreg; /* Dst Trans Req */ + struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req */ + struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req */ + struct dmareg rq_lst_srcreg; /* Last Src Trans Req */ + struct dmareg rq_lst_dstreg; /* Last Dst Trans Req */ + struct dmareg dma_cfg; /* DMA Config */ + struct dmareg dma_chan_en; /* DMA Channel Enable */ + struct dmareg dma_id; /* DMA ID */ + struct dmareg dma_test; /* DMA Test */ + struct dmareg res1; /* reserved */ + struct dmareg res2; /* reserved */ + /* DMA Comp Params + * Param 6 = dma_param[0], Param 5 = dma_param[1], + * Param 4 = dma_param[2] ... + */ + struct dmareg dma_params[6]; +}; + +/* DMA Register Operation Bits */ +#define DMA_EN 0x00000001 /* Enable AHB DMA */ +#define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */ +#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \ + ((0x000000001 << (ch)) << 8)) /* Enable channel */ +#define DMA_DISABLE_CHAN(ch) (0x00000000 | \ + ((0x000000001 << (ch)) << 8)) /* Disable channel */ + +#define SATA_DWC_MAX_PORTS 1 +#define SATA_DWC_SCR_OFFSET 0x24 +#define SATA_DWC_REG_OFFSET 0x64 + +/* DWC SATA Registers */ +struct sata_dwc_regs { + u32 fptagr; /* 1st party DMA tag */ + u32 fpbor; /* 1st party DMA buffer offset */ + u32 fptcr; /* 1st party DMA Xfr count */ + u32 dmacr; /* DMA Control */ + u32 dbtsr; /* DMA Burst Transac size */ + u32 intpr; /* Interrupt Pending */ + u32 intmr; /* Interrupt Mask */ + u32 errmr; /* Error Mask */ + u32 llcr; /* Link Layer Control */ + u32 phycr; /* PHY Control */ + u32 physr; /* PHY Status */ + u32 rxbistpd; /* Recvd BIST pattern def register */ + u32 rxbistpd1; /* Recvd BIST data dword1 */ + u32 rxbistpd2; /* Recvd BIST pattern data dword2 */ + u32 txbistpd; /* Trans BIST pattern def register */ + u32 txbistpd1; /* Trans BIST data dword1 */ + u32 txbistpd2; /* Trans BIST data dword2 */ + u32 bistcr; /* BIST Control Register */ + u32 bistfctr; /* BIST FIS Count Register */ + u32 bistsr; /* BIST Status Register */ + u32 bistdecr; /* BIST Dword Error count register */ + u32 res[15]; /* Reserved locations */ + u32 testr; /* Test Register */ + u32 versionr; /* Version Register */ + u32 idr; /* ID Register */ + u32 unimpl[192]; /* Unimplemented */ + u32 dmadr[256]; /* FIFO Locations in DMA Mode */ +}; + +/* DWC SATA Register Operations */ +#define SATA_DWC_TXFIFO_DEPTH 0x01FF +#define SATA_DWC_RXFIFO_DEPTH 0x01FF + +#define SATA_DWC_DBTSR_MWR(size) ((size/4) & SATA_DWC_TXFIFO_DEPTH) +#define SATA_DWC_DBTSR_MRD(size) (((size/4) & \ + SATA_DWC_RXFIFO_DEPTH) << 16) +#define SATA_DWC_INTPR_DMAT 0x00000001 +#define SATA_DWC_INTPR_NEWFP 0x00000002 +#define SATA_DWC_INTPR_PMABRT 0x00000004 +#define SATA_DWC_INTPR_ERR 0x00000008 +#define SATA_DWC_INTPR_NEWBIST 0x00000010 +#define SATA_DWC_INTPR_IPF 0x10000000 +#define SATA_DWC_INTMR_DMATM 0x00000001 +#define SATA_DWC_INTMR_NEWFPM 0x00000002 +#define SATA_DWC_INTMR_PMABRTM 0x00000004 +#define SATA_DWC_INTMR_ERRM 0x00000008 +#define SATA_DWC_INTMR_NEWBISTM 0x00000010 + +#define SATA_DWC_DMACR_TMOD_TXCHEN 0x00000004 +#define SATA_DWC_DMACR_TXRXCH_CLEAR SATA_DWC_DMACR_TMOD_TXCHEN + +#define SATA_DWC_QCMD_MAX 32 + +/* This is all error bits, zero's are reserved fields. */ +#define SATA_DWC_SERROR_ERR_BITS 0x0FFF0F03 + +/* + * Commonly used DWC SATA driver Macros + */ +#define HSDEVP_FROM_AP(ap) (struct sata_dwc_device_port*) \ + (ap)->private_data + +struct sata_dwc_device { + struct device *dev; /* generic device struct */ + struct ata_probe_ent *pe; /* ptr to probe-ent */ + struct ata_host *host; + u8 *reg_base; + struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ + int irq_dma; +}; + +struct sata_dwc_device_port { + struct sata_dwc_device *hsdev; + int cmd_issued[SATA_DWC_QCMD_MAX]; + u32 dma_chan[SATA_DWC_QCMD_MAX]; + int dma_pending[SATA_DWC_QCMD_MAX]; +}; + +enum { + SATA_DWC_CMD_ISSUED_NOT = 0, + SATA_DWC_CMD_ISSUED_PEND = 1, + SATA_DWC_CMD_ISSUED_EXEC = 2, + SATA_DWC_CMD_ISSUED_NODATA = 3, + + SATA_DWC_DMA_PENDING_NONE = 0, + SATA_DWC_DMA_PENDING_TX = 1, + SATA_DWC_DMA_PENDING_RX = 2, +}; + +/* delay Macros */ +#define msleep(a) udelay(a * 1000) +#define ssleep(a) msleep(a * 1000) + +static int ata_probe_timeout = (ATA_TMOUT_INTERNAL / 100); + +enum sata_dev_state { + SATA_INIT = 0, + SATA_READY = 1, + SATA_NODEVICE = 2, + SATA_ERROR = 3, +}; +enum sata_dev_state dev_state = SATA_INIT; + +/* + * Globals + */ +static struct ahb_dma_regs *sata_dma_regs = 0; +static struct ata_host *phost; +static struct ata_port ap; +static struct ata_port *pap = ≈ +static struct ata_device ata_device; +static struct sata_dwc_device_port dwc_devp; + +static void *scr_addr_sstatus; +static char temp_data_buf[512]; +static u32 temp_n_block = 0; + +/* + * Prototype + */ +unsigned ata_exec_internal(struct ata_device *dev, + struct ata_taskfile *tf, const u8 *cdb, + int dma_dir, unsigned int buflen, + unsigned long timeout); +unsigned int ata_dev_set_feature(struct ata_device *dev, + u8 enable,u8 feature); +unsigned int ata_dev_init_params(struct ata_device *dev, + u16 heads, u16 sectors); +void clear_serror(void); +u8 ata_irq_on(struct ata_port *ap); +struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap, + unsigned int tag); +int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, + u8 status, int in_wq); +void ata_tf_to_host(struct ata_port *ap,const struct ata_taskfile *tf); +unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc); +void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); +unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc); +u8 ata_check_altstatus(struct ata_port *ap); +static u8 ata_check_status(struct ata_port *ap); +void ata_dev_select(struct ata_port *ap, unsigned int device, + unsigned int wait, unsigned int can_sleep); +void ata_qc_issue(struct ata_queued_cmd *qc); +void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); +int ata_dev_read_sectors(ccb *pccb); +void ata_std_dev_select(struct ata_port *ap, unsigned int device); +void ata_qc_complete(struct ata_queued_cmd *qc); +void __ata_qc_complete(struct ata_queued_cmd *qc); +void fill_result_tf(struct ata_queued_cmd *qc); +void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf); +void ata_mmio_data_xfer(struct ata_device *dev, + unsigned char *buf,unsigned int buflen); +void ata_pio_task(struct ata_port *arg_ap); +void __ata_port_freeze(struct ata_port *ap); +int ata_port_freeze(struct ata_port *ap); +void ata_qc_free(struct ata_queued_cmd *qc); +void ata_pio_sectors(struct ata_queued_cmd *qc); +void ata_pio_sector(struct ata_queued_cmd *qc); +void ata_pio_queue_task(struct ata_port *ap, + void *data,unsigned long delay); +void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq); +int sata_dwc_softreset(struct ata_port *ap); +void sata_dwc_probe(void); + +u8 ata_check_altstatus(struct ata_port *ap) +{ + u8 val = 0; + val = readb(ap->ioaddr.altstatus_addr); + return val; +} + +int check_sata_dev_state(void) +{ + static ccb tempccb; /* temporary scsi command buffer */ + ccb *pccb = (ccb *)&tempccb; + int ret = 0; + int i = 0; + + while(1){ + udelay (10000); /* 10 ms */ + + pccb->cmd[0] = SCSI_READ10; + pccb->cmd[1] = 0; + pccb->cmd[2] = 0; + pccb->cmd[3] = 0; + pccb->cmd[4] = 0; + pccb->cmd[5] = 0; + pccb->cmd[6] = 0; + pccb->cmd[7] = 0; + pccb->cmd[8] = 1; + pccb->cmdlen = 10; + pccb->pdata = &temp_data_buf[0]; /* dummy */ + pccb->datalen = 512; + + /* Send Read Command */ + ret = ata_dev_read_sectors(pccb); + + /* result TRUE => break */ + if(ret == 0){ + break; + } + + i++; + if (i > (ATA_RESET_TIME * 100)){ + printf("** TimeOUT **\n"); + dev_state = SATA_NODEVICE; /* set device status flag */ + return FALSE; + } + + if ((i >= 100) && ((i%100)==0)) { + printf("."); + } + } + + /* Set device status flag */ + dev_state = SATA_READY; + + return TRUE; +} + +void ata_id_string(const u16 *id, unsigned char *s, + unsigned int ofs, unsigned int len) +{ + unsigned int c; + + while (len > 0) { + c = id[ofs] >> 8; + *s = c; + s++; + + c = id[ofs] & 0xff; + *s = c; + s++; + + ofs++; + len -= 2; + } +} + +static int waiting_for_reg_state(volatile u8 *offset, + int timeout_msec, + u32 sign) +{ + int i; + u32 status; + + for (i = 0; i < timeout_msec; i++){ + status = readl(offset); + if ( ( status & sign ) != 0 ){ + break; + } + msleep(1); + } + + return (i < timeout_msec) ? 0 : -1; +} + +static inline u32 qcmd_tag_to_mask(u8 tag) +{ + return (0x00000001 << (tag & 0x1f)); +} + +static u8 ata_check_status(struct ata_port *ap) +{ + u8 val = 0; + val = readb(ap->ioaddr.status_addr); + return val; +} + +static u8 ata_busy_wait(struct ata_port *ap, + unsigned int bits,unsigned int max) +{ + u8 status; + + do { + udelay(10); + status = ata_check_status(ap); + max--; + } while (status != 0xff && (status & bits) && (max > 0)); + + return status; +} + +static u8 ata_wait_idle(struct ata_port *ap) +{ + u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + return status; +} + +static int sata_dwc_scsiop_inq(ccb *pccb) +{ + struct ata_device *ata_dev = &ata_device; + u16 *id; + + /* Set IDENTIFY Data */ + id = ata_dev->id; + + /* make INQUIRY header */ + u8 hdr[] = { + 0, + 0, + 0x5, /* claim SPC-3 version compatibility */ + 2, + 95 - 4 + }; + + /* set scsi removeable (RMB) bit per ata bit */ + if (ata_id_removeable(id)){ + hdr[1] |= (1 << 7); + } + + memcpy(pccb->pdata, hdr, sizeof(hdr)); + + if (pccb->datalen > 35) { + memcpy(&pccb->pdata[8], "ATA ", 8); + ata_id_string(id, &pccb->pdata[16], ATA_ID_PROD, 16); + ata_id_string(id, &pccb->pdata[32], ATA_ID_FW_REV, 4); + if (pccb->pdata[32] == 0 || pccb->pdata[32] == ' '){ + memcpy(&pccb->pdata[32], "n/a ", 4); + } + } + + if (pccb->datalen > 63) { + const u8 versions[] = { + 0x60, /* SAM-3 (no version claimed) */ + 0x03, + 0x20, /* SBC-2 (no version claimed) */ + 0x02, + 0x60 /* SPC-3 (no version claimed) */ + }; + memcpy(pccb->pdata + 59, versions, sizeof(versions)); + } + + return 0; +} + +#define ATA_SCSI_PDATA_SET(idx, val) do { \ + if ((idx) < pccb->datalen) pccb->pdata[(idx)] = (u8)(val); \ + } while (0) + +static int sata_dwc_scsiop_read_cap(ccb *pccb) +{ + struct ata_device *ata_dev = &ata_device; + u32 last_lba; + last_lba = ata_dev->n_sectors; /* LBA of the last block */ + + if (pccb->cmd[0] == SCSI_RD_CAPAC) { + if (last_lba >= 0xffffffffULL){ + last_lba = 0xffffffff; + } + + /* sector count, 32-bit */ + ATA_SCSI_PDATA_SET(0, last_lba >> (8 * 3)); + ATA_SCSI_PDATA_SET(1, last_lba >> (8 * 2)); + ATA_SCSI_PDATA_SET(2, last_lba >> (8 * 1)); + ATA_SCSI_PDATA_SET(3, last_lba); + + /* buffer clear */ + ATA_SCSI_PDATA_SET(4, 0); + ATA_SCSI_PDATA_SET(5, 0); + + /* sector size */ + ATA_SCSI_PDATA_SET(6, ATA_SECT_SIZE >> 8); + ATA_SCSI_PDATA_SET(7, ATA_SECT_SIZE & 0xff); + + } else { + /* sector count, 64-bit */ + ATA_SCSI_PDATA_SET(0, last_lba >> (8 * 7)); + ATA_SCSI_PDATA_SET(1, last_lba >> (8 * 6)); + ATA_SCSI_PDATA_SET(2, last_lba >> (8 * 5)); + ATA_SCSI_PDATA_SET(3, last_lba >> (8 * 4)); + ATA_SCSI_PDATA_SET(4, last_lba >> (8 * 3)); + ATA_SCSI_PDATA_SET(5, last_lba >> (8 * 2)); + ATA_SCSI_PDATA_SET(6, last_lba >> (8 * 1)); + ATA_SCSI_PDATA_SET(7, last_lba); + + /* sector size */ + ATA_SCSI_PDATA_SET(10, ATA_SECT_SIZE >> 8); + ATA_SCSI_PDATA_SET(11, ATA_SECT_SIZE & 0xff); + } + + return 0; +} + +/* + * SCSI TEST UNIT READY command operation. + * No operation. Simply returns success to caller, to indicate + * that the caller should successfully complete this SCSI command. + */ +static int sata_dwc_scsiop_test_unit_ready(ccb *pccb) +{ + /* No operation */ + return 0; +} + +int scsi_exec(ccb *pccb) +{ + int ret; + + /* check device status */ + if(dev_state != SATA_READY){ + return FALSE; + } + + switch (pccb->cmd[0]) { + case SCSI_READ10: + ret = ata_dev_read_sectors(pccb); + break; + case SCSI_RD_CAPAC: + ret = sata_dwc_scsiop_read_cap(pccb); + break; + case SCSI_TST_U_RDY: + ret = sata_dwc_scsiop_test_unit_ready(pccb); + break; + case SCSI_INQUIRY: + ret = sata_dwc_scsiop_inq(pccb); + break; + default: + printf("Unsupport SCSI command 0x%02x\n", pccb->cmd[0]); + return FALSE; + } + + if (ret) { + debug("SCSI command 0x%02x ret errno %d\n", pccb->cmd[0], ret); + return FALSE; + } + return TRUE; +} + +static const struct ata_port_info sata_dwc_port_info[] = { + { + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING | + ATA_FLAG_SRST | ATA_FLAG_NCQ, + .pio_mask = 0x1f, /* pio 0-4 */ + .mwdma_mask = 0x07, + .udma_mask = 0x7f, + }, +}; + +int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, + unsigned int flags, u16 *id) +{ + struct ata_port *ap = pap; + unsigned int class = *p_class; + struct ata_taskfile tf; + unsigned int err_mask = 0; + const char *reason; + int may_fallback = 1, tried_spinup = 0; + u8 status; + int rc; + + /* cheack BSY = 0 */ + status = ata_busy_wait(ap, ATA_BUSY, 30000); + if (status & ATA_BUSY) { + printf("BSY = 0 check. timeout.\n"); + rc = FALSE; + return rc; + } + + ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ + +retry: + memset(&tf, 0, sizeof(tf)); + ap->print_id = 1; + ap->flags &= ~ATA_FLAG_DISABLED; + tf.ctl = ap->ctl; /* 0x08 */ + tf.device = ATA_DEVICE_OBS; + tf.command = ATA_CMD_ID_ATA; + tf.protocol = ATA_PROT_PIO; + + /* Some devices choke if TF registers contain garbage. Make + * sure those are properly initialized. + */ + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + + /* Device presence detection is unreliable on some + * controllers. Always poll IDENTIFY if available. + */ + tf.flags |= ATA_TFLAG_POLLING; + + temp_n_block = 1; + + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, + sizeof(id[0]) * ATA_ID_WORDS, 0); + + if (err_mask) { + if (err_mask & AC_ERR_NODEV_HINT) { + printf("NODEV after polling detection\n"); + return -ENOENT; + } + + if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { + /* Device or controller might have reported + * the wrong device class. Give a shot at the + * other IDENTIFY if the current one is + * aborted by the device. + */ + if (may_fallback) { + may_fallback = 0; + + if (class == ATA_DEV_ATA) { + class = ATA_DEV_ATAPI; + } else { + class = ATA_DEV_ATA; + } + goto retry; + } + /* Control reaches here iff the device aborted + * both flavors of IDENTIFYs which happens + * sometimes with phantom devices. + */ + printf("both IDENTIFYs aborted, assuming NODEV\n"); + return -ENOENT; + } + rc = -EIO; + reason = "I/O error"; + goto err_out; + } + + /* Falling back doesn't make sense if ID data was read + * successfully at least once. + */ + may_fallback = 0; + +#ifdef __BIG_ENDIAN + unsigned int id_cnt; + + for (id_cnt = 0; id_cnt < ATA_ID_WORDS; id_cnt++) + id[id_cnt] = le16_to_cpu(id[id_cnt]); + +#endif /* __BIG_ENDIAN */ + + /* sanity check */ + rc = -EINVAL; + reason = "device reports invalid type"; + + if (class == ATA_DEV_ATA) { + if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)){ + goto err_out; + } + } else { + if (ata_id_is_ata(id)){ + goto err_out; + } + } + if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { + tried_spinup = 1; + /* + * Drive powered-up in standby mode, and requires a specific + * SET_FEATURES spin-up subcommand before it will accept + * anything other than the original IDENTIFY command. + */ + err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); + if (err_mask && id[2] != 0x738c) { + rc = -EIO; + reason = "SPINUP failed"; + goto err_out; + } + /* + * If the drive initially returned incomplete IDENTIFY info, + * we now must reissue the IDENTIFY command. + */ + if (id[2] == 0x37c8) + goto retry; + } + + if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { + /* + * The exact sequence expected by certain pre-ATA4 drives is: + * SRST RESET + * IDENTIFY (optional in early ATA) + * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) + * anything else.. + * Some drives were very specific about that exact sequence. + * + * Note that ATA4 says lba is mandatory so the second check + * shoud never trigger. + */ + if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { + err_mask = ata_dev_init_params(dev, id[3], id[6]); + if (err_mask) { + rc = -EIO; + reason = "INIT_DEV_PARAMS failed"; + goto err_out; + } + + /* current CHS translation info (id[53-58]) might be + * changed. reread the identify device info. + */ + flags &= ~ATA_READID_POSTRESET; + goto retry; + } + } + + *p_class = class; + return 0; + +err_out: + return rc; +} + +void ata_dev_select(struct ata_port *ap, unsigned int device, + unsigned int wait, unsigned int can_sleep) +{ + if (wait){ + ata_wait_idle(ap); + } + + ata_std_dev_select(ap, device); + + if (wait) { + ata_wait_idle(ap); + } +} + +void ata_std_dev_select(struct ata_port *ap, unsigned int device) +{ + u8 tmp; + + if (device == 0) { + tmp = ATA_DEVICE_OBS; + } else { + tmp = ATA_DEVICE_OBS | ATA_DEV1; + } + + writeb(tmp, ap->ioaddr.device_addr); + + /* needed; also flushes, for mmio */ + readb(ap->ioaddr.altstatus_addr); + + /* need delay 400nsec over */ + udelay(1); +} + +void ata_qc_reinit(struct ata_queued_cmd *qc) +{ + qc->dma_dir = DMA_NONE; + qc->flags = 0; + qc->nbytes = qc->extrabytes = qc->curbytes = 0; + qc->n_elem = 0; + qc->err_mask = 0; + qc->sect_size = ATA_SECT_SIZE; + qc->nbytes = ATA_SECT_SIZE * temp_n_block; + + memset(&qc->tf, 0, sizeof(qc->tf)); + qc->tf.ctl = 0; + qc->tf.device = ATA_DEVICE_OBS; + + /* init result_tf such that it indicates normal completion */ + qc->result_tf.command = ATA_DRDY; + qc->result_tf.feature = 0; +} + +unsigned ata_exec_internal(struct ata_device *dev, + struct ata_taskfile *tf, const u8 *cdb, + int dma_dir, unsigned int buflen, + unsigned long timeout) +{ + struct ata_link *link = dev->link; + struct ata_port *ap = pap; + struct ata_queued_cmd *qc; + unsigned int tag, preempted_tag; + u32 preempted_sactive, preempted_qc_active; + int preempted_nr_active_links; + unsigned int err_mask; + int rc = 0; + u8 status; + + /* cheack BSY = 0 */ + status = ata_busy_wait(ap, ATA_BUSY, 300000); + if (status & ATA_BUSY) { + printf("BSY = 0 check. timeout.\n"); + rc = FALSE; + return rc; + } + + /* no internal command while frozen */ + if (ap->pflags & ATA_PFLAG_FROZEN) { + return AC_ERR_SYSTEM; + } + + tag = ATA_TAG_INTERNAL; + + if (test_and_set_bit(tag, &ap->qc_allocated)){ + printf("test_and_set_bit failed\n"); + } + + qc = __ata_qc_from_tag(ap, tag); + qc->tag = tag; + qc->ap = ap; + qc->dev = dev; + + ata_qc_reinit(qc); + + preempted_tag = link->active_tag; + preempted_sactive = link->sactive; + preempted_qc_active = ap->qc_active; + preempted_nr_active_links = ap->nr_active_links; + link->active_tag = ATA_TAG_POISON; + link->sactive = 0; + ap->qc_active = 0; + ap->nr_active_links = 0; + + /* prepare & issue qc */ + qc->tf = *tf; + if (cdb) + memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); + qc->flags |= ATA_QCFLAG_RESULT_TF; + qc->dma_dir = dma_dir; + qc->private_data = 0; + + ata_qc_issue(qc); + + if (!timeout){ + timeout = ata_probe_timeout * 1000 / HZ; + } + + /* cheack BSY = 0 */ + status = ata_busy_wait(ap, ATA_BUSY, 30000); + if (status & ATA_BUSY) { + printf("BSY = 0 check. timeout.\n"); + printf("altstatus = 0x%x.\n",status); + qc->err_mask |= AC_ERR_OTHER; + return qc->err_mask; + } + + if(waiting_for_reg_state(ap->ioaddr.altstatus_addr,1000,0x8)){ + printf("DRQ = 1 check timeout 1000msec\n"); + u8 status = 0; + u8 errorStatus = 0; + + status = readb( ap->ioaddr.altstatus_addr); + if ((status&0x01) != 0) { + errorStatus = readb( ap->ioaddr.feature_addr); + printf("ERROR STATUS = 0x%x\n",errorStatus); + } + qc->err_mask |= AC_ERR_OTHER; + return qc->err_mask; + } + + status = ata_busy_wait(ap, ATA_BUSY, 10); + if (status & ATA_BUSY) { + printf("BSY = 0 check. timeout.\n"); + qc->err_mask |= AC_ERR_OTHER; + return qc->err_mask; + } + + ata_pio_task(ap); + + if (!rc) { + if (qc->flags & ATA_QCFLAG_ACTIVE) { + qc->err_mask |= AC_ERR_TIMEOUT; + ata_port_freeze(ap); + } + } + + /* perform minimal error analysis */ + if (qc->flags & ATA_QCFLAG_FAILED) { + if (qc->result_tf.command & (ATA_ERR | ATA_DF)){ + qc->err_mask |= AC_ERR_DEV; + } + + if (!qc->err_mask){ + qc->err_mask |= AC_ERR_OTHER; + } + + if (qc->err_mask & ~AC_ERR_OTHER){ + qc->err_mask &= ~AC_ERR_OTHER; + } + } + + /* finish up */ + *tf = qc->result_tf; + err_mask = qc->err_mask; + ata_qc_free(qc); + link->active_tag = preempted_tag; + link->sactive = preempted_sactive; + ap->qc_active = preempted_qc_active; + ap->nr_active_links = preempted_nr_active_links; + + if (ap->flags & ATA_FLAG_DISABLED) { + err_mask |= AC_ERR_SYSTEM; + ap->flags &= ~ATA_FLAG_DISABLED; + } + + return err_mask; +} + +unsigned int ata_dev_set_feature(struct ata_device *dev, + u8 enable, u8 feature) +{ + struct ata_taskfile tf; + struct ata_port *ap; + ap = pap; + unsigned int err_mask; + + /* set up set-features taskfile */ + memset(&tf, 0, sizeof(tf)); + tf.ctl = ap->ctl; + + tf.device = ATA_DEVICE_OBS; + tf.command = ATA_CMD_SET_FEATURES; + tf.feature = enable; + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf.protocol = ATA_PROT_NODATA; + tf.nsect = feature; + + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, 0, 0); + + return err_mask; +} + +unsigned int ata_dev_init_params(struct ata_device *dev, + u16 heads, u16 sectors) +{ + struct ata_taskfile tf; + struct ata_port *ap; + ap = pap; + unsigned int err_mask; + + /* Number of sectors per track 1-255. Number of heads 1-16 */ + if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) + return AC_ERR_INVALID; + + /* set up init dev params taskfile */ + memset(&tf, 0, sizeof(tf)); + tf.ctl = ap->ctl; + tf.device = ATA_DEVICE_OBS; + tf.command = ATA_CMD_INIT_DEV_PARAMS; + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf.protocol = ATA_PROT_NODATA; + tf.nsect = sectors; + tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ + + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, 0, 0); + + if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) + err_mask = 0; + + return err_mask; +} + +struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap, + unsigned int tag) +{ + if (tag < ATA_MAX_QUEUE) + return &ap->qcmd[tag]; + return NULL; +} + +void ata_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_link *link = qc->dev->link; + u8 prot = qc->tf.protocol; + + if (ata_is_ncq(prot)) { + if (!link->sactive) + ap->nr_active_links++; + link->sactive |= 1 << qc->tag; + } else { + ap->nr_active_links++; + link->active_tag = qc->tag; + } + + qc->flags |= ATA_QCFLAG_ACTIVE; + ap->qc_active |= 1 << qc->tag; + + if (qc->dev->flags & ATA_DFLAG_SLEEPING) { + msleep(1); /* delay 1msec */ + return; + } + + qc->err_mask |= sata_dwc_qc_issue(qc); + if (qc->err_mask) + goto err; + + return; +err: + ata_qc_complete(qc); +} + +unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) +{ + ata_qc_issue_prot(qc); + return 0; +} + +void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) +{ + /* set command register */ + writeb(tf->command, ap->ioaddr.command_addr); + + /* read alternative status register */ + readb(ap->ioaddr.altstatus_addr); + + /* delay 400nsec over */ + udelay(1); +} + +void __ata_port_freeze(struct ata_port *ap) +{ + printf("set port freeze.\n"); + ap->pflags |= ATA_PFLAG_FROZEN; +} + +int ata_port_freeze(struct ata_port *ap) +{ + __ata_port_freeze(ap); + return 0; +} + +unsigned int ata_tag_internal(unsigned int tag) +{ + return tag == ATA_MAX_QUEUE - 1; +} + +void ata_qc_complete(struct ata_queued_cmd *qc) +{ + struct ata_device *dev = qc->dev; + if (qc->err_mask) + qc->flags |= ATA_QCFLAG_FAILED; + + if (qc->flags & ATA_QCFLAG_FAILED) { + if (!ata_tag_internal(qc->tag)) { + /* always fill result TF for failed qc */ + fill_result_tf(qc); + return; + } + } + /* read result TF if requested */ + if (qc->flags & ATA_QCFLAG_RESULT_TF) + fill_result_tf(qc); + + /* Some commands need post-processing after successful + * completion. + */ + switch (qc->tf.command) { + case ATA_CMD_SET_FEATURES: + if (qc->tf.feature != SETFEATURES_WC_ON && + qc->tf.feature != SETFEATURES_WC_OFF) + break; + case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ + case ATA_CMD_SET_MULTI: /* multi_count changed */ + break; + + case ATA_CMD_SLEEP: + dev->flags |= ATA_DFLAG_SLEEPING; + break; + } + + __ata_qc_complete(qc); +} + +void __ata_qc_complete(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_link *link = qc->dev->link; + + /* command should be marked inactive atomically with qc completion */ + link->active_tag = ATA_TAG_POISON; + ap->nr_active_links--; + + /* clear exclusive status */ + if (qc->flags & ATA_QCFLAG_CLEAR_EXCL && ap->excl_link == link) + ap->excl_link = NULL; + + qc->flags &= ~ATA_QCFLAG_ACTIVE; + ap->qc_active &= ~(1 << qc->tag); +} + +void fill_result_tf(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + qc->result_tf.flags = qc->tf.flags; + ata_tf_read(ap, &qc->result_tf); +} + +void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + tf->command = ata_check_status(ap); + tf->feature = readb(ioaddr->error_addr); + tf->nsect = readb(ioaddr->nsect_addr); + tf->lbal = readb(ioaddr->lbal_addr); + tf->lbam = readb(ioaddr->lbam_addr); + tf->lbah = readb(ioaddr->lbah_addr); + tf->device = readb(ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + if (ioaddr->ctl_addr) { + writeb(tf->ctl | ATA_HOB, ioaddr->ctl_addr); + + tf->hob_feature = readb(ioaddr->error_addr); + tf->hob_nsect = readb(ioaddr->nsect_addr); + tf->hob_lbal = readb(ioaddr->lbal_addr); + tf->hob_lbam = readb(ioaddr->lbam_addr); + tf->hob_lbah = readb(ioaddr->lbah_addr); + + writeb(tf->ctl, ioaddr->ctl_addr); + ap->last_ctl = tf->ctl; + } else { + printf("sata_dwc warnning register read.\n"); + } + } +} + +void ata_qc_free(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int tag; + qc->flags = 0; + tag = qc->tag; + if (tag < ATA_MAX_QUEUE){ + qc->tag = ATA_TAG_POISON; + clear_bit(tag, &ap->qc_allocated); + } +} + +void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + if (tf->ctl != ap->last_ctl) { + if (ioaddr->ctl_addr){ + writeb(tf->ctl, ioaddr->ctl_addr); + } + ap->last_ctl = tf->ctl; + ata_wait_idle(ap); + } + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + writeb(tf->hob_feature, ioaddr->feature_addr); + writeb(tf->hob_nsect, ioaddr->nsect_addr); + writeb(tf->hob_lbal, ioaddr->lbal_addr); + writeb(tf->hob_lbam, ioaddr->lbam_addr); + writeb(tf->hob_lbah, ioaddr->lbah_addr); + } + + if (is_addr) { + writeb(tf->feature, ioaddr->feature_addr); + writeb(tf->nsect, ioaddr->nsect_addr); + writeb(tf->lbal, ioaddr->lbal_addr); + writeb(tf->lbam, ioaddr->lbam_addr); + writeb(tf->lbah, ioaddr->lbah_addr); + } + + if (tf->flags & ATA_TFLAG_DEVICE) { + writeb(tf->device, ioaddr->device_addr); + } + + ata_wait_idle(ap); +} + +void sata_dwc_exec_command_by_tag(struct ata_port *ap, + struct ata_taskfile *tf, + u8 tag, u32 cmd_issued) +{ + struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); + hsdevp->cmd_issued[tag] = cmd_issued; + + /* Clear SError before executing a new command.*/ + clear_serror(); + ata_exec_command(ap, tf); +} + +void clear_serror(void) +{ + u32 temp; + temp = in_le32((void __iomem *)scr_addr_sstatus + 4); + + out_le32((void __iomem *)scr_addr_sstatus + 4, temp); +} + +void ata_tf_to_host(struct ata_port *ap, const struct ata_taskfile *tf) +{ + ata_tf_load(ap, tf); + ata_exec_command(ap, tf); +} + +unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + if (ap->flags & ATA_FLAG_PIO_POLLING) { + switch (qc->tf.protocol) { + case ATA_PROT_PIO: + case ATA_PROT_NODATA: + case ATAPI_PROT_PIO: + case ATAPI_PROT_NODATA: + qc->tf.flags |= ATA_TFLAG_POLLING; + break; + default: + break; + } + } + + /* select the device */ + ata_dev_select(ap, qc->dev->devno, 1, 0); + + /* start the command */ + switch (qc->tf.protocol) { + case ATA_PROT_PIO: + if (qc->tf.flags & ATA_TFLAG_POLLING){ + qc->tf.ctl |= ATA_NIEN; + } + + ata_tf_to_host(ap, &qc->tf); + + /* PIO data in protocol */ + ap->hsm_task_state = HSM_ST; + + if (qc->tf.flags & ATA_TFLAG_POLLING){ + ata_pio_queue_task(ap, qc, 0); + } + + break; + + default: + return AC_ERR_SYSTEM; + } + + return 0; +} + +void ata_pio_task(struct ata_port *arg_ap) +{ + struct ata_port *ap = arg_ap; + struct ata_queued_cmd *qc = ap->port_task_data; + u8 status; + int poll_next; + +fsm_start: + /* + * This is purely heuristic. This is a fast path. + * Sometimes when we enter, BSY will be cleared in + * a chk-status or two. If not, the drive is probably seeking + * or something. Snooze for a couple msecs, then + * chk-status again. If still busy, queue delayed work. + */ + status = ata_busy_wait(ap, ATA_BUSY, 5); + if (status & ATA_BUSY) { + msleep(2); + status = ata_busy_wait(ap, ATA_BUSY, 10); + if (status & ATA_BUSY) { + ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE); + return; + } + } + + /* move the HSM */ + poll_next = ata_hsm_move(ap, qc, status, 1); + + /* another command or interrupt handler + * may be running at this point. + */ + if (poll_next) + goto fsm_start; +} + +void ata_pio_queue_task(struct ata_port *ap, void *data,unsigned long delay) +{ + ap->port_task_data = data; +} + +unsigned int ac_err_mask(u8 status) +{ + if (status & (ATA_BUSY | ATA_DRQ)) + return AC_ERR_HSM; + if (status & (ATA_ERR | ATA_DF)) + return AC_ERR_DEV; + return 0; +} + +unsigned int __ac_err_mask(u8 status) +{ + unsigned int mask = ac_err_mask(status); + if (mask == 0) + return AC_ERR_OTHER; + return mask; +} + +int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, + u8 status, int in_wq) +{ + int poll_next; + +fsm_start: + switch (ap->hsm_task_state) { + case HSM_ST_FIRST: + poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); + + /* check device status */ + if ((status & ATA_DRQ) == 0) { + /* handle BSY=0, DRQ=0 as error */ + if (status & (ATA_ERR | ATA_DF)) { + /* device stops HSM for abort/error */ + qc->err_mask |= AC_ERR_DEV; + } else { + /* HSM violation. Let EH handle this */ + qc->err_mask |= AC_ERR_HSM; + } + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + + /* Device should not ask for data transfer (DRQ=1) + * when it finds something wrong. + * We ignore DRQ here and stop the HSM by + * changing hsm_task_state to HSM_ST_ERR and + * let the EH abort the command or reset the device. + */ + if (status & (ATA_ERR | ATA_DF)) { + if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { + printf("DRQ=1 with device error, " + "dev_stat 0x%X\n", status); + qc->err_mask |= AC_ERR_HSM; + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + } + + if (qc->tf.protocol == ATA_PROT_PIO) { + /* PIO data out protocol. + * send first data block. + */ + /* ata_pio_sectors() might change the state + * to HSM_ST_LAST. so, the state is changed here + * before ata_pio_sectors(). + */ + ap->hsm_task_state = HSM_ST; + ata_pio_sectors(qc); + } else { + printf("protocol is not ATA_PROT_PIO \n"); + } + break; + + case HSM_ST: + /* complete command or read/write the data register */ + /* ATA PIO protocol */ + if ((status & ATA_DRQ) == 0) { + /* handle BSY=0, DRQ=0 as error */ + if (status & (ATA_ERR | ATA_DF)){ + /* device stops HSM for abort/error */ + qc->err_mask |= AC_ERR_DEV; + } else { + /* HSM violation. Let EH handle this. + * Phantom devices also trigger this + * condition. Mark hint. + */ + qc->err_mask |= AC_ERR_HSM | AC_ERR_NODEV_HINT; + } + + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + /* For PIO reads, some devices may ask for + * data transfer (DRQ=1) alone with ERR=1. + * We respect DRQ here and transfer one + * block of junk data before changing the + * hsm_task_state to HSM_ST_ERR. + * + * For PIO writes, ERR=1 DRQ=1 doesn't make + * sense since the data block has been + * transferred to the device. + */ + if (status & (ATA_ERR | ATA_DF)) { + /* data might be corrputed */ + qc->err_mask |= AC_ERR_DEV; + + if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { + ata_pio_sectors(qc); + status = ata_wait_idle(ap); + } + + if (status & (ATA_BUSY | ATA_DRQ)){ + qc->err_mask |= AC_ERR_HSM; + } + + /* ata_pio_sectors() might change the + * state to HSM_ST_LAST. so, the state + * is changed after ata_pio_sectors(). + */ + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + + ata_pio_sectors(qc); + if (ap->hsm_task_state == HSM_ST_LAST && + (!(qc->tf.flags & ATA_TFLAG_WRITE))) { + /* all data read */ + status = ata_wait_idle(ap); + goto fsm_start; + } + + poll_next = 1; + break; + + case HSM_ST_LAST: + if (!ata_ok(status)) { + qc->err_mask |= __ac_err_mask(status); + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + + /* no more data to transfer */ + ap->hsm_task_state = HSM_ST_IDLE; + + /* complete taskfile transaction */ + ata_hsm_qc_complete(qc, in_wq); + + poll_next = 0; + break; + + case HSM_ST_ERR: + /* make sure qc->err_mask is available to + * know what's wrong and recover + */ + ap->hsm_task_state = HSM_ST_IDLE; + + /* complete taskfile transaction */ + ata_hsm_qc_complete(qc, in_wq); + + poll_next = 0; + break; + default: + poll_next = 0; + } + + return poll_next; +} + +int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) +{ + if (qc->tf.flags & ATA_TFLAG_POLLING) + return 1; + + if (ap->hsm_task_state == HSM_ST_FIRST) { + if (ata_is_atapi(qc->tf.protocol) && + !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) + return 1; + } + + return 0; +} + +void ata_pio_sectors(struct ata_queued_cmd *qc) +{ + struct ata_port *ap; + ap = pap; + qc->pdata = ap->pdata; + + if (is_multi_taskfile(&qc->tf)) { + /* READ/WRITE MULTIPLE */ + unsigned int nsect; + + nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, + qc->dev->multi_count); + while (nsect--) + ata_pio_sector(qc); + } else { + ata_pio_sector(qc); + } + + readb(qc->ap->ioaddr.altstatus_addr); + udelay(1); +} + +void ata_pio_sector(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int offset; + unsigned char *buf; + + if (qc->curbytes == qc->nbytes - qc->sect_size){ + ap->hsm_task_state = HSM_ST_LAST; + } + + offset = qc->curbytes; + + /* check cmd and set buffer */ + switch (qc->tf.command) { + case ATA_CMD_ID_ATA: /* IDENTIFY */ + buf = &ata_device.id[0]; + break; + case ATA_CMD_PIO_READ_EXT: /* READ SECTORS EX 0x24 */ + case ATA_CMD_PIO_READ: /* READ SECTORS 0x20 */ + buf = qc->pdata + offset; + break; + default: + buf = &temp_data_buf[0]; + } + + /* data xfer */ + ata_mmio_data_xfer(qc->dev, buf, qc->sect_size); + + qc->curbytes += qc->sect_size; + +} + +void ata_mmio_data_xfer(struct ata_device *dev, unsigned char *buf, + unsigned int buflen) +{ + struct ata_port *ap = pap; + void __iomem *data_addr = ap->ioaddr.data_addr; + unsigned int words = buflen >> 1; + u16 *buf16 = (u16 *)buf; + unsigned int i = 0; + + udelay(100); + /* Transfer */ + for(i=0; i < words; i++) + buf16[i] = cpu_to_le16(readw(data_addr)); + + /* Transfer trailing 1 byte, if any. */ + if (buflen & 0x01) { + __le16 align_buf[1] = { 0 }; + unsigned char *trailing_buf = buf + buflen - 1; + + align_buf[0] = cpu_to_le16(readw(data_addr)); + memcpy(trailing_buf, align_buf, 1); + words++; + } +} + +void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) +{ + struct ata_port *ap = qc->ap; + + if (in_wq) { + /* EH might have kicked in while host lock is + * released. + */ + qc = &ap->qcmd[qc->tag]; + if (qc) { + if (!(qc->err_mask & AC_ERR_HSM)) { + ata_irq_on(ap); + ata_qc_complete(qc); + } else { + ata_port_freeze(ap); + } + } + } else { + if (!(qc->err_mask & AC_ERR_HSM)) { + ata_qc_complete(qc); + } else { + ata_port_freeze(ap); + } + } +} + +u8 ata_irq_on(struct ata_port *ap) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u8 tmp; + + ap->ctl &= ~ATA_NIEN; + ap->last_ctl = ap->ctl; + + if (ioaddr->ctl_addr) + writeb(ap->ctl, ioaddr->ctl_addr); + + tmp = ata_wait_idle(ap); + + return tmp; +} + +void sata_dwc_probe(void) +{ + struct sata_dwc_device hsdev; + struct ata_host host; + struct ata_port_info pi = sata_dwc_port_info[0]; + struct ata_device *ata_dev = &ata_device; + struct ata_link *link; + struct sata_dwc_device_port hsdevp = dwc_devp; + u8 *base = 0; + u8 *sata_dma_regs_addr = 0; + u32 idr, versionr; + u8 status; + unsigned long base_addr = 0; + int chan = 0; + int rc; + int i; + const u16 *id; + unsigned long xfer_mask; + char revbuf[7]; /* XYZ-99\0 */ + unsigned long pio_mask, mwdma_mask, udma_mask; + + phost = &host; /* set grobal */ + + /* Base address */ + base = (u8*)SATA_BASE_ADDR; + + /* SATA register init */ + hsdev.sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET); + + host.n_ports = SATA_DWC_MAX_PORTS; + + for(i = 0; i < SATA_DWC_MAX_PORTS; i++){ + ap.pflags |= ATA_PFLAG_INITIALIZING; + ap.flags = ATA_FLAG_DISABLED; + ap.print_id = -1; + ap.ctl = ATA_DEVCTL_OBS; + ap.host = &host; + ap.last_ctl = 0xFF; + ap.dev = &ata_device; + + link = &ap.link; + link->ap = ≈ + link->pmp = 0; + link->active_tag = ATA_TAG_POISON; + link->hw_sata_spd_limit = 0; + + ap.port_no = i; + host.ports[i] = ≈ + } + + ap.pio_mask = pi.pio_mask; + ap.mwdma_mask = pi.mwdma_mask; + ap.udma_mask = pi.udma_mask; + ap.flags |= pi.flags; + ap.link.flags |= pi.link_flags; + + /* Host port init */ + host.ports[0]->ioaddr.cmd_addr = base; + host.ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; + scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET; + + /* sata_dwc setup port */ + base_addr = (unsigned long)base; + + host.ports[0]->ioaddr.cmd_addr = (void *)base_addr + 0x00; + host.ports[0]->ioaddr.data_addr = (void *)base_addr + 0x00; + + host.ports[0]->ioaddr.error_addr = (void *)base_addr + 0x04; + host.ports[0]->ioaddr.feature_addr = (void *)base_addr + 0x04; + + host.ports[0]->ioaddr.nsect_addr = (void *)base_addr + 0x08; + + host.ports[0]->ioaddr.lbal_addr = (void *)base_addr + 0x0c; + host.ports[0]->ioaddr.lbam_addr = (void *)base_addr + 0x10; + host.ports[0]->ioaddr.lbah_addr = (void *)base_addr + 0x14; + + host.ports[0]->ioaddr.device_addr = (void *)base_addr + 0x18; + host.ports[0]->ioaddr.command_addr = (void *)base_addr + 0x1c; + host.ports[0]->ioaddr.status_addr = (void *)base_addr + 0x1c; + + host.ports[0]->ioaddr.altstatus_addr = (void *)base_addr + 0x20; + host.ports[0]->ioaddr.ctl_addr = (void *)base_addr + 0x20; + + /* Get Host ID / Version */ + idr = in_le32(&hsdev.sata_dwc_regs->idr); + versionr = in_le32(&hsdev.sata_dwc_regs->versionr); + + /* DMA register init */ + sata_dma_regs_addr = (u8*)SATA_DMA_REG_ADDR; + sata_dma_regs = (void *__iomem)sata_dma_regs_addr; + + /* DMA channel Interrupt is disable. DMA Enable setup is next. */ + for(chan = 0; chan < DMA_NUM_CHANS; chan++){ + /* DMA Interrupt register(error) mask. */ + out_le32(&(sata_dma_regs->interrupt_mask.error.low), + DMA_DISABLE_CHAN(chan)); + + /* DMA Interrupt register(transfer) mask. */ + out_le32(&(sata_dma_regs->interrupt_mask.tfr.low), + DMA_DISABLE_CHAN(chan)); + } + + /* DMA Enable by DMAC Configuration Register */ + out_le32(&(sata_dma_regs->dma_cfg.low), DMA_EN); + + /* Enable selective interrupts by setting the interrupt mask register */ + out_le32(&hsdev.sata_dwc_regs->intmr, + SATA_DWC_INTMR_ERRM | + SATA_DWC_INTMR_NEWFPM | + SATA_DWC_INTMR_PMABRTM | + SATA_DWC_INTMR_DMATM); + + /* Unmask the error bits that should trigger an error interrupt by + * setting the error mask register. + */ + out_le32(&hsdev.sata_dwc_regs->errmr,SATA_DWC_SERROR_ERR_BITS); + + hsdev.host = ap.host; + memset(&hsdevp, 0, sizeof(hsdevp)); + hsdevp.hsdev = &hsdev; + + for (i = 0; i < SATA_DWC_QCMD_MAX; i++) + hsdevp.cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; + + if (ap.port_no == 0) { + out_le32(&hsdev.sata_dwc_regs->dmacr,SATA_DWC_DMACR_TXRXCH_CLEAR); + + out_le32(&hsdev.sata_dwc_regs->dbtsr, + (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | + SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); + } + + /* Clear any error bits before libata starts issuing commands */ + out_le32((void __iomem *)scr_addr_sstatus + 4, + in_le32((void __iomem *)scr_addr_sstatus + 4)); + + /* check altstatus register (find device) */ + status = ata_check_altstatus(&ap); + + if(status == 0x7f){ + printf("Hard Disk not found.\n"); + dev_state = SATA_NODEVICE; + return; + } + + /* waiting for device ready. time out 30sec */ + printf("waitng for device ready."); + i = 0; + while(1){ + udelay (10000); /* 10 ms */ + + /* read altstatus */ + status = ata_check_altstatus(&ap); + + /* status is not busy => break */ + if((status & ATA_BUSY) == 0){ + printf("\n"); + break; + } + + i++; + if (i > (ATA_RESET_TIME * 100)) { + printf("** TimeOUT **\n"); + + /* bussy set devise state flag */ + dev_state = SATA_NODEVICE; + return; + } + if ((i >= 100) && ((i%100)==0)) { + printf("."); + } + } + + /* softreset */ + rc = sata_dwc_softreset(&ap); + + if(rc){ + printf("sata_dwc : error. soft reset failed\n"); + } + + /* waiting for device ready. time out 30sec */ + printf("waitng for device ready."); + i = 0; + while(1){ + udelay (10000); /* 10 ms */ + + /* read altstatus */ + status = ata_check_altstatus(&ap); + + /* status is not busy => break */ + if((status & ATA_BUSY) == 0){ + printf("\n"); + break; + } + + i++; + if (i > (ATA_RESET_TIME * 100)) { + printf("** TimeOUT **\n"); + + /* bussy set devise state flag */ + dev_state = SATA_NODEVICE; + return; + } + if ((i >= 100) && ((i%100)==0)) { + printf("."); + } + } + + udelay (1000); /* before IDENTIFY command safety delay 1 ms */ + + /* IDENTIFY command send */ + rc = ata_dev_read_id(ata_dev, &ata_dev->class, + ATA_READID_POSTRESET,ata_dev->id); + + if(rc){ + printf("sata_dwc : error. IDENTIFY Command failed\n"); + } + + /* SATA drives indicate we have a bridge. We don't know which + * end of the link the bridge is which is a problem + */ + if (ata_id_is_sata(ata_dev->id)) + ap.cbl = ATA_CBL_SATA; + + id = ata_dev->id; + + /* initialize to be configured parameters */ + ata_dev->flags &= ~ATA_DFLAG_CFG_MASK; + ata_dev->max_sectors = 0; + ata_dev->cdb_len = 0; + ata_dev->n_sectors = 0; + ata_dev->cylinders = 0; + ata_dev->heads = 0; + ata_dev->sectors = 0; + + /* Usual case. Word 53 indicates word 64 is valid */ + if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { + pio_mask = id[ATA_ID_PIO_MODES] & 0x03; + pio_mask <<= 3; + pio_mask |= 0x7; + } else { + /* If word 64 isn't valid then Word 51 high byte holds + * the PIO timing number for the maximum. Turn it into + * a mask. + */ + u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; + if (mode < 5){ /* Valid PIO range */ + pio_mask = (2 << mode) - 1; + } else { + pio_mask = 1; + } + } + + mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; + + if (ata_id_is_cfa(id)) { + /* + * Process compact flash extended modes + */ + int pio = id[163] & 0x7; + int dma = (id[163] >> 3) & 7; + + if (pio) + pio_mask |= (1 << 5); + if (pio > 1) + pio_mask |= (1 << 6); + if (dma) + mwdma_mask |= (1 << 3); + if (dma > 1) + mwdma_mask |= (1 << 4); + } + + udma_mask = 0; + if (id[ATA_ID_FIELD_VALID] & (1 << 2)) + udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; + + xfer_mask = (pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO | + (mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA | + (udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA; + + /* ATA-specific feature tests */ + if (ata_dev->class == ATA_DEV_ATA) { + if (ata_id_is_cfa(id)) { + if (id[162] & 1) /* CPRM may make this media unusable */ + printf("supports DRM functions and may " + "not be fully accessable.\n"); + sprintf(revbuf, "%s", "CFA"); + } else { + /* Warn the user if the device has TPM extensions */ + if (ata_id_has_tpm(id)) + printf("supports DRM functions and may " + "not be fully accessable.\n"); + } + + /* set n_secters */ + ata_dev->n_sectors = ata_id_n_sectors((u16*)id); + + if (ata_dev->id[59] & 0x100) + ata_dev->multi_count = ata_dev->id[59] & 0xff; + + if (ata_id_has_lba(id)) { + const char *lba_desc; + char ncq_desc[20]; + + lba_desc = "LBA"; + ata_dev->flags |= ATA_DFLAG_LBA; + if (ata_id_has_lba48(id)) { + ata_dev->flags |= ATA_DFLAG_LBA48; + lba_desc = "LBA48"; + + if (ata_dev->n_sectors >= (1UL << 28) && + ata_id_has_flush_ext(id)) + ata_dev->flags |= ATA_DFLAG_FLUSH_EXT; + } + /* config NCQ */ + if (!ata_id_has_ncq(ata_dev->id)) { + ncq_desc[0] = '\0'; + } + + if (ata_dev->horkage & ATA_HORKAGE_NONCQ) { + sprintf(ncq_desc, "%s", "NCQ (not used)"); + } + + if (ap.flags & ATA_FLAG_NCQ) { + ata_dev->flags |= ATA_DFLAG_NCQ; + } + } + ata_dev->cdb_len = 16; + } + /* determine max_sectors */ + ata_dev->max_sectors = ATA_MAX_SECTORS; + if (ata_dev->flags & ATA_DFLAG_LBA48) + ata_dev->max_sectors = ATA_MAX_SECTORS_LBA48; + + if (!(ata_dev->horkage & ATA_HORKAGE_IPM)) { + if (ata_id_has_hipm(ata_dev->id)) + ata_dev->flags |= ATA_DFLAG_HIPM; + if (ata_id_has_dipm(ata_dev->id)) + ata_dev->flags |= ATA_DFLAG_DIPM; + } + + if ((ap.cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ata_dev->id))) { + ata_dev->udma_mask &= ATA_UDMA5; + ata_dev->max_sectors = ATA_MAX_SECTORS; + } + + if (ata_dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { + printf("Drive reports diagnostics failure." + "This may indicate a drive\n"); + printf("fault or invalid emulation." + "Contact drive vendor for information.\n"); + } + + /* Hard Disk status check : test send READ Command */ + rc = check_sata_dev_state(); +} + +int sata_dwc_softreset(struct ata_port *ap) +{ + u8 nsect,lbal = 0; + u8 tmp = 0; + u32 serror = 0; + u8 status = 0; + struct ata_ioports *ioaddr = &ap->ioaddr; + + /* read SCR ERROR */ + serror = in_le32((void *)ap->ioaddr.scr_addr + (SCR_ERROR * 4)); + + /* device check */ + writeb(0x55, ioaddr->nsect_addr); + writeb(0xaa, ioaddr->lbal_addr); + writeb(0xaa, ioaddr->nsect_addr); + writeb(0x55, ioaddr->lbal_addr); + writeb(0x55, ioaddr->nsect_addr); + writeb(0xaa, ioaddr->lbal_addr); + + nsect = readb(ioaddr->nsect_addr); + lbal = readb(ioaddr->lbal_addr); + + if ((nsect == 0x55) && (lbal == 0xaa)){ + /* we found a device */ + printf("we found a device\n"); + } else { + /* nothing found */ + printf("Not found a device.\n"); + dev_state = SATA_NODEVICE; + return FALSE; + } + + tmp = ATA_DEVICE_OBS; + writeb(tmp,ioaddr->device_addr); + writeb(ap->ctl,ioaddr->ctl_addr); + + udelay(200); + + /* set softreset */ + writeb(ap->ctl | ATA_SRST, ioaddr->ctl_addr); + + udelay(200); + writeb(ap->ctl,ioaddr->ctl_addr); + + /* wait a while before cheking status */ + msleep(150); + status = ata_check_status(ap); + + msleep(50); + ata_check_status(ap); + + while (1) { + u8 status = ata_check_status(ap); + + if (!(status & ATA_BUSY)){ + break; + } + + status = in_le32((void *)ap->ioaddr.scr_addr + (SCR_STATUS * 4)); + if(status == 0xff || (status & 0xf) != 0x3){ + } + + printf("Hard Disk status is BUSY.\n"); + msleep(50); + } + + tmp = ATA_DEVICE_OBS; + writeb(tmp,ioaddr->device_addr); + + nsect = readb(ioaddr->nsect_addr); + lbal = readb(ioaddr->lbal_addr); + + return 0; +} + +void scsi_bus_reset() +{ + struct ata_port *ap = pap; + + /* check device state */ + if(dev_state != SATA_READY){ + printf("no devices available\n"); + return; + } + + /*soft reset process*/ + sata_dwc_softreset(ap); +} + +void scsi_print_error(ccb * pccb) +{ + /*The ahci error info can be read in the ahci driver*/ +} + +int ata_dev_read_sectors(ccb *pccb) +{ + struct ata_port *ap = pap; + struct ata_device *dev = &ata_device; + struct ata_taskfile tf; + unsigned int class = ATA_DEV_ATA; + unsigned int err_mask = 0; + const char *reason; + int may_fallback = 1; + int rc; + + u32 block=0; + u32 n_block=0; + + ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ + +retry: + memset(&tf, 0, sizeof(tf)); + tf.ctl = ap->ctl; /*ap->ctl = 0x08 */ + ap->print_id = 1; + ap->flags &= ~ATA_FLAG_DISABLED; + + /* set pdata */ + ap->pdata = pccb->pdata; + + tf.device = ATA_DEVICE_OBS; + + block |= ((u32)pccb->cmd[2]) << 24; + block |= ((u32)pccb->cmd[3]) << 16; + block |= ((u32)pccb->cmd[4]) << 8; + block |= ((u32)pccb->cmd[5]); + + n_block |= ((u32)pccb->cmd[7]) << 8; + n_block |= ((u32)pccb->cmd[8]); + + /* trying temp to n_block */ + temp_n_block = n_block; + + +#ifdef CONFIG_LBA48 + tf.command = ATA_CMD_PIO_READ_EXT; /* READ SECTOR(S) EXT 0x24 */ + tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; + + tf.hob_feature = 31; + tf.feature = 31; + tf.hob_nsect = (n_block >> 8) & 0xff; + tf.nsect = n_block & 0xff; + + tf.hob_lbah = 0x0; + tf.hob_lbam = 0x0; + tf.hob_lbal = (block >> 24) & 0xff; + tf.lbah = (block >> 16) & 0xff; + tf.lbam = (block >> 8) & 0xff; + tf.lbal = block & 0xff; + + tf.device = 1 << 6; + if (tf.flags & ATA_TFLAG_FUA) + tf.device |= 1 << 7; +#else /* LBA24 */ + tf.command = ATA_CMD_PIO_READ; /* READ SECTOR(S) 0x20 */ + tf.flags |= ATA_TFLAG_LBA ; + + tf.feature = 31; + tf.nsect = n_block & 0xff; + + tf.lbah = (block >> 16) & 0xff; + tf.lbam = (block >> 8) & 0xff; + tf.lbal = block & 0xff; + + tf.device = (block >> 24) & 0xf; + + tf.device |= 1 << 6; + if (tf.flags & ATA_TFLAG_FUA) + tf.device |= 1 << 7; + +#endif /* CONFIG_LBA48 */ + + tf.protocol = ATA_PROT_PIO; + + /* Some devices choke if TF registers contain garbage. Make + * sure those are properly initialized. + */ + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf.flags |= ATA_TFLAG_POLLING; + + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,0, 0); + + if (err_mask) { + if (err_mask & AC_ERR_NODEV_HINT) { + printf("READ_SECTORS NODEV after polling detection\n"); + return -ENOENT; + } + + if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { + /* Device or controller might have reported + * the wrong device class. Give a shot at the + * other IDENTIFY if the current one is + * aborted by the device. + */ + if (may_fallback) { + may_fallback = 0; + + if (class == ATA_DEV_ATA) { + class = ATA_DEV_ATAPI; + } else { + class = ATA_DEV_ATA; + } + goto retry; + } + /* Control reaches here iff the device aborted + * both flavors of IDENTIFYs which happens + * sometimes with phantom devices. + */ + printf("both IDENTIFYs aborted, assuming NODEV\n"); + return -ENOENT; + } + + rc = -EIO; + reason = "I/O error"; + goto err_out; + } + + /* Falling back doesn't make sense if ID data was read + * successfully at least once. + */ + may_fallback = 0; + + /* sanity check */ + rc = -EINVAL; + reason = "device reports invalid type"; + + return 0; + +err_out: + printf("failed to READ SECTORS (%s, err_mask=0x%x)\n", reason, err_mask); + return rc; +} diff -purN u-boot-2009.03/drivers/block/sata_dwc.h u-boot-2009.03-sata/drivers/block/sata_dwc.h --- u-boot-2009.03/drivers/block/sata_dwc.h 1970-01-01 09:00:00.000000000 +0900 +++ u-boot-2009.03-sata/drivers/block/sata_dwc.h 2009-03-25 17:27:01.000000000 +0900 @@ -0,0 +1,496 @@ +/* + * sata_dwc.h + * + * Synopsys DesignWare Cores (DWC) SATA host driver + * + * Author: Mark Miesfeld + * + * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese + * Copyright 2008 DENX Software Engineering + * + * Based on versions provided by AMCC and Synopsys which are: + * Copyright 2006 Applied Micro Circuits Corporation + * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED + * + * This program is free software; you can redistribute + * it and/or modify it under the terms of the GNU + * General Public License as published by the + * Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + */ + +#ifndef _SATA_DWC_H_ +#define _SATA_DWC_H_ + +#define HZ 100 + +#define READ 0 +#define WRITE 1 + +static int ata_id_has_hipm(const u16 *id) +{ + u16 val = id[76]; + + if (val == 0 || val == 0xffff) + return -1; + + return val & (1 << 9); +} + +static int ata_id_has_dipm(const u16 *id) +{ + u16 val = id[78]; + + if (val == 0 || val == 0xffff) + return -1; + + return val & (1 << 3); +} + +enum { + ATA_READID_POSTRESET = (1 << 0), /* reading ID after reset */ + + ATA_DNXFER_PIO = 0, /* speed down PIO */ + ATA_DNXFER_DMA = 1, /* speed down DMA */ + ATA_DNXFER_40C = 2, /* apply 40c cable limit */ + ATA_DNXFER_FORCE_PIO = 3, /* force PIO */ + ATA_DNXFER_FORCE_PIO0 = 4, /* force PIO0 */ + + ATA_DNXFER_QUIET = (1 << 31), +}; + +enum hsm_task_states { + HSM_ST_IDLE, /* no command on going */ + HSM_ST_FIRST, /* (waiting the device to) + * write CDB or first data block */ + HSM_ST, /* (waiting the device to) transfer data */ + HSM_ST_LAST, /* (waiting the device to) complete command */ + HSM_ST_ERR, /* error */ +}; + +#define ATA_SHORT_PAUSE ((HZ >> 6) + 1) + +typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc); + +struct ata_queued_cmd { + struct ata_port *ap; + struct ata_device *dev; + + struct ata_taskfile tf; + u8 cdb[ATAPI_CDB_LEN]; + + unsigned long flags; /* ATA_QCFLAG_xxx */ + unsigned int tag; + unsigned int n_elem; + + int dma_dir; + + unsigned int sect_size; + + unsigned int nbytes; + unsigned int extrabytes; + unsigned int curbytes; + + unsigned int err_mask; + struct ata_taskfile result_tf; + + void *private_data; + void *lldd_task; + unsigned char *pdata; +}; + +/* defines only for the constants which don't work cell as enums*/ +#define ATA_TAG_POISON 0xfafbfcfdU + +enum { + /* various global constants */ + LIBATA_MAX_PRD = ATA_MAX_PRD / 2, + LIBATA_DUMB_MAX_PRD = ATA_MAX_PRD / 4, /* Worst case */ + ATA_MAX_PORTS = 8, + ATA_DEF_QUEUE = 1, + /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */ + ATA_MAX_QUEUE = 32, + ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1, + ATA_MAX_BUS = 2, + ATA_DEF_BUSY_WAIT = 10000, + + ATAPI_MAX_DRAIN = 16 << 10, + + ATA_SHT_EMULATED = 1, + ATA_SHT_CMD_PER_LUN = 1, + ATA_SHT_THIS_ID = -1, + ATA_SHT_USE_CLUSTERING = 1, + + /* struct ata_device stuff */ + ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ + ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ + ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ */ + ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */ + ATA_DFLAG_FLUSH_EXT = (1 << 4), /* do FLUSH_EXT instead of FLUSH */ + ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */ + ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */ + ATA_DFLAG_AN = (1 << 7), /* AN configured */ + ATA_DFLAG_HIPM = (1 << 8), /* device supports HIPM */ + ATA_DFLAG_DIPM = (1 << 9), /* device supports DIPM */ + ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */ + ATA_DFLAG_CFG_MASK = (1 << 12) - 1, + + ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */ + ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */ + ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */ + ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ + ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ + ATA_DFLAG_INIT_MASK = (1 << 24) - 1, + + ATA_DFLAG_DETACH = (1 << 24), + ATA_DFLAG_DETACHED = (1 << 25), + + /* struct ata_link flags */ + ATA_LFLAG_HRST_TO_RESUME = (1 << 0), /* hardreset to resume link */ + ATA_LFLAG_SKIP_D2H_BSY = (1 << 1), /* can't wait for the first D2H + * Register FIS clearing BSY */ + ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ + ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ + ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */ + ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB, + ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */ + ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */ + + /* struct ata_port flags */ + ATA_FLAG_SLAVE_POSS = (1 << 0), + ATA_FLAG_SATA = (1 << 1), + ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */ + ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */ + ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */ + ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */ + ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ + ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ + ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ + ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD + * doesn't handle PIO interrupts */ + ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */ + ATA_FLAG_DEBUGMSG = (1 << 13), + ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */ + ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */ + ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */ + ATA_FLAG_AN = (1 << 18), /* controller supports AN */ + ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ + ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */ + + ATA_FLAG_DISABLED = (1 << 23), /* port is disabled, ignore it */ + + /* struct ata_port pflags */ + ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */ + ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */ + ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ + ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ + ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ + ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */ + ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ + ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */ + ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */ + ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ + ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ + + /* struct ata_queued_cmd flags */ + ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ + ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */ + ATA_QCFLAG_IO = (1 << 3), /* standard IO command */ + ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ + ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */ + ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */ + + ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ + ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ + ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */ + + /* host set flags */ + ATA_HOST_SIMPLEX = (1 << 0), + ATA_HOST_STARTED = (1 << 1), /* Host started */ + + /* various lengths of time */ + ATA_TMOUT_BOOT = 30 * 100, /* heuristic */ + ATA_TMOUT_BOOT_QUICK = 7 * 100, /* heuristic */ + ATA_TMOUT_INTERNAL = 30 * 100, + ATA_TMOUT_INTERNAL_QUICK = 5 * 100, + + /* FIXME: GoVault needs 2s but we can't afford that without + * parallel probing. 800ms is enough for iVDR disk + * HHD424020F7SV00. Increase to 2secs when parallel probing + * is in place. + */ + ATA_TMOUT_FF_WAIT = 4 * 100 / 5, + + /* ATA bus states */ + BUS_UNKNOWN = 0, + BUS_DMA = 1, + BUS_IDLE = 2, + BUS_NOINTR = 3, + BUS_NODATA = 4, + BUS_TIMER = 5, + BUS_PIO = 6, + BUS_EDD = 7, + BUS_IDENTIFY = 8, + BUS_PACKET = 9, + + /* SATA port states */ + PORT_UNKNOWN = 0, + PORT_ENABLED = 1, + PORT_DISABLED = 2, + + /* encoding various smaller bitmaps into a single + * unsigned long bitmap + */ + ATA_NR_PIO_MODES = 7, + ATA_NR_MWDMA_MODES = 5, + ATA_NR_UDMA_MODES = 8, + + ATA_SHIFT_PIO = 0, + ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES, + ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES, + + /* size of buffer to pad xfers ending on unaligned boundaries */ + ATA_DMA_PAD_SZ = 4, + + /* ering size */ + ATA_ERING_SIZE = 32, + + ATA_DEFER_LINK = 1, + ATA_DEFER_PORT = 2, + + /* desc_len for ata_eh_info and context */ + ATA_EH_DESC_LEN = 80, + + /* reset / recovery action types */ + ATA_EH_REVALIDATE = (1 << 0), + ATA_EH_SOFTRESET = (1 << 1), + ATA_EH_HARDRESET = (1 << 2), + ATA_EH_ENABLE_LINK = (1 << 3), + ATA_EH_LPM = (1 << 4), /* link power management action */ + + ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, + ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, + + /* ata_eh_info->flags */ + ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ + ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */ + ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ + ATA_EHI_QUIET = (1 << 3), /* be quiet */ + + ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ + ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ + ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */ + ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */ + ATA_EHI_POST_SETMODE = (1 << 20), /* revaildating after setmode */ + + ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, + ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK, + + /* max tries if error condition is still set after ->error_handler */ + ATA_EH_MAX_TRIES = 5, + + /* how hard are we gonna try to probe/recover devices */ + ATA_PROBE_MAX_TRIES = 3, + ATA_EH_DEV_TRIES = 3, + ATA_EH_PMP_TRIES = 5, + ATA_EH_PMP_LINK_TRIES = 3, + + SATA_PMP_SCR_TIMEOUT = 250, + + /* Horkage types. May be set by libata or controller on drives + (some horkage may be drive/controller pair dependant */ + + ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */ + ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */ + ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */ + ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */ + ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */ + ATA_HORKAGE_SKIP_PM = (1 << 5), /* Skip PM operations */ + ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ + ATA_HORKAGE_IPM = (1 << 7), /* Link PM problems */ + ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */ + ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */ + + /* DMA mask for user DMA control: User visible values; DO NOT renumber */ + ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */ + ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */ + ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */ + + /* ATAPI command types */ + ATAPI_READ = 0, /* READs */ + ATAPI_WRITE = 1, /* WRITEs */ + ATAPI_READ_CD = 2, /* READ CD [MSF] */ + ATAPI_PASS_THRU = 3, /* SAT pass-thru */ + ATAPI_MISC = 4, /* the rest */ +}; + +enum ata_completion_errors { + AC_ERR_DEV = (1 << 0), /* device reported error */ + AC_ERR_HSM = (1 << 1), /* host state machine violation */ + AC_ERR_TIMEOUT = (1 << 2), /* timeout */ + AC_ERR_MEDIA = (1 << 3), /* media error */ + AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */ + AC_ERR_HOST_BUS = (1 << 5), /* host bus error */ + AC_ERR_SYSTEM = (1 << 6), /* system error */ + AC_ERR_INVALID = (1 << 7), /* invalid argument */ + AC_ERR_OTHER = (1 << 8), /* unknown */ + AC_ERR_NODEV_HINT = (1 << 9), /* polling device detection hint */ + AC_ERR_NCQ = (1 << 10), /* marker for offending NCQ qc */ +}; + +enum ata_xfer_mask { + ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1) << ATA_SHIFT_PIO, + ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1) << ATA_SHIFT_MWDMA, + ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1) << ATA_SHIFT_UDMA, +}; + +struct ata_port_info { + struct scsi_host_template *sht; + unsigned long flags; + unsigned long link_flags; + unsigned long pio_mask; + unsigned long mwdma_mask; + unsigned long udma_mask; + const struct ata_port_operations *port_ops; + void *private_data; +}; + +struct ata_ioports { + void __iomem *cmd_addr; + void __iomem *data_addr; + void __iomem *error_addr; + void __iomem *feature_addr; + void __iomem *nsect_addr; + void __iomem *lbal_addr; + void __iomem *lbam_addr; + void __iomem *lbah_addr; + void __iomem *device_addr; + void __iomem *status_addr; + void __iomem *command_addr; + void __iomem *altstatus_addr; + void __iomem *ctl_addr; + void __iomem *bmdma_addr; + void __iomem *scr_addr; +}; + +struct ata_host { + void __iomem * const *iomap; + unsigned int n_ports; + void *private_data; + const struct ata_port_operations *ops; + unsigned long flags; + struct ata_port *simplex_claimed; /* channel owning the DMA */ + struct ata_port *ports[0]; +}; + +struct ata_port_stats { + unsigned long unhandled_irq; + unsigned long idle_irq; + unsigned long rw_reqbuf; +}; + +struct ata_device { + struct ata_link *link; + unsigned int devno; /* 0 or 1 */ + unsigned long flags; /* ATA_DFLAG_xxx */ + unsigned int horkage; /* List of broken features */ + struct scsi_device *sdev; /* attached SCSI device */ +#ifdef CONFIG_ATA_ACPI + acpi_handle acpi_handle; + union acpi_object *gtf_cache; +#endif + /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */ + u64 n_sectors; /* size of device, if ATA */ + unsigned int class; /* ATA_DEV_xxx */ + + union { + u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ + u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ + }; + + u8 pio_mode; + u8 dma_mode; + u8 xfer_mode; + unsigned int xfer_shift; /* ATA_SHIFT_xxx */ + + unsigned int multi_count; /* sectors count for + *READ/WRITE MULTIPLE */ + unsigned int max_sectors; /* per-device max sectors */ + unsigned int cdb_len; + + /* per-dev xfer mask */ + unsigned long pio_mask; + unsigned long mwdma_mask; + unsigned long udma_mask; + + /* for CHS addressing */ + u16 cylinders; /* Number of cylinders */ + u16 heads; /* Number of heads */ + u16 sectors; /* Number of sectors per track */ + + /* error history */ + int spdn_cnt; +}; + +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct ata_link { + struct ata_port *ap; + int pmp; /* port multiplier port # */ + unsigned int active_tag; /* active tag on this link */ + u32 sactive; /* active NCQ commands */ + + unsigned int flags; /* ATA_LFLAG_xxx */ + + unsigned int hw_sata_spd_limit; + unsigned int sata_spd_limit; + unsigned int sata_spd; /* current SATA PHY speed */ + + struct ata_device device[2]; +}; + +struct ata_port { + unsigned long flags; /* ATA_FLAG_xxx */ + unsigned int pflags; /* ATA_PFLAG_xxx */ + unsigned int print_id; /* user visible unique port ID */ + unsigned int port_no; /* 0 based port no. inside the host */ + + struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ + + u8 ctl; /* cache of ATA control register */ + u8 last_ctl; /* Cache last written value */ + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; + unsigned int cbl; /* cable type; ATA_CBL_xxx */ + + struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; + unsigned long qc_allocated; + unsigned int qc_active; + int nr_active_links; /* #links with active qcs */ + + struct ata_link link; /* host default link */ + + int nr_pmp_links; /* nr of available PMP links */ + struct ata_link *pmp_link; /* array of PMP links */ + struct ata_link *excl_link; /* for PMP qc exclusion */ + + struct ata_port_stats stats; + struct ata_host *host; + + struct device *dev; + void *port_task_data; + + unsigned int hsm_task_state; + + u32 msg_enable; + void *private_data; + unsigned char *pdata; +}; +#endif /* _SATA_DWC_H_ */ diff -purN u-boot-2009.03/include/configs/canyonlands.h u-boot-2009.03-sata/include/configs/canyonlands.h --- u-boot-2009.03/include/configs/canyonlands.h 2009-03-22 06:04:41.000000000 +0900 +++ u-boot-2009.03-sata/include/configs/canyonlands.h 2009-03-25 17:31:40.000000000 +0900 @@ -34,6 +34,7 @@ #ifdef CONFIG_CANYONLANDS #define CONFIG_460EX 1 /* Specific PPC460EX */ #define CONFIG_HOSTNAME canyonlands +#define CONFIG_SATA_DWC /* PPC460EX SATA support */ #else #define CONFIG_460GT 1 /* Specific PPC460GT */ #ifdef CONFIG_GLACIER @@ -454,6 +455,9 @@ #define CONFIG_CMD_SDRAM #define CONFIG_CMD_SNTP #define CONFIG_CMD_USB +#if defined(CONFIG_SATA_DWC) +#define CONFIG_CMD_SCSI +#endif #elif defined(CONFIG_GLACIER) #define CONFIG_CMD_DATE #define CONFIG_CMD_DTT @@ -517,6 +521,18 @@ #endif /* CONFIG_460GT */ /*----------------------------------------------------------------------- + * S-ATA driver setup + *----------------------------------------------------------------------*/ +#ifdef CONFIG_SATA_DWC +#define CONFIG_LIBATA +#define CONFIG_SYS_SCSI_MAX_SCSI_ID 1 /* SCSI ID */ +#define CONFIG_SYS_SCSI_MAX_LUN 1 /* SCSI LUN */ +#define CONFIG_SYS_SCSI_MAX_DEVICE (CONFIG_SYS_SCSI_MAX_SCSI_ID * \ + CONFIG_SYS_SCSI_MAX_LUN) +#define CONFIG_SYS_SCSI_MAXDEVICE CONFIG_SYS_SCSI_MAX_DEVICE +#endif + +/*----------------------------------------------------------------------- * External Bus Controller (EBC) Setup *----------------------------------------------------------------------*/ Regards, Kazuaki Ichinohe