All of lore.kernel.org
 help / color / mirror / Atom feed
From: Zhikang Zhang <zhikang.zhang@nxp.com>
To: u-boot@lists.denx.de
Subject: [U-Boot] [RFC, 1/2] NVMe: add NVMe driver support
Date: Thu, 6 Apr 2017 16:40:33 +0800	[thread overview]
Message-ID: <1491468034-1627-1-git-send-email-zhikang.zhang@nxp.com> (raw)

Add Support of devices that follow the NVM Express standard

 basic functions:
	nvme init/scan
	nvme info - show the basic information of device
	nvme Read/Write

 driver model:
	Use block device(CONFIG_BLK)'s structure to support nvme's DM.
	Use UCLASS_PCI as a parent uclass.

The driver code heavily copy from the NVMe driver code in Linux Kernel.

Signed-off-by: Zhikang Zhang <zhikang.zhang@nxp.com>
Signed-off-by: Wenbin Song <wenbin.song@nxp.com>
---
 common/Makefile            |    1 +
 common/board_r.c           |   13 +
 common/nvme.c              |  127 ++++++
 doc/README.nvme            |   17 +
 drivers/block/Kconfig      |   16 +
 drivers/block/Makefile     |    1 +
 drivers/block/blk-uclass.c |    2 +
 drivers/block/nvme.c       | 1048 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/block/nvme.h       |  114 +++++
 drivers/block/nvme_uapi.h  |  570 ++++++++++++++++++++++++
 include/blk.h              |    1 +
 include/nvme.h             |   50 +++
 12 files changed, 1960 insertions(+)
 create mode 100644 common/nvme.c
 create mode 100644 doc/README.nvme
 create mode 100644 drivers/block/nvme.c
 create mode 100644 drivers/block/nvme.h
 create mode 100644 drivers/block/nvme_uapi.h
 create mode 100644 include/nvme.h

diff --git a/common/Makefile b/common/Makefile
index ecc23e6..70c539b 100644
--- a/common/Makefile
+++ b/common/Makefile
@@ -81,6 +81,7 @@ obj-$(CONFIG_LCD_DT_SIMPLEFB) += lcd_simplefb.o
 obj-$(CONFIG_LYNXKDI) += lynxkdi.o
 obj-$(CONFIG_MENU) += menu.o
 obj-$(CONFIG_CMD_SATA) += sata.o
+obj-$(CONFIG_NVME) += nvme.o
 obj-$(CONFIG_SCSI) += scsi.o
 obj-$(CONFIG_UPDATE_TFTP) += update.o
 obj-$(CONFIG_DFU_TFTP) += update.o
diff --git a/common/board_r.c b/common/board_r.c
index 48fa4ee..843f4e5 100644
--- a/common/board_r.c
+++ b/common/board_r.c
@@ -49,6 +49,7 @@
 #include <timer.h>
 #include <trace.h>
 #include <watchdog.h>
+#include <nvme.h>
 #ifdef CONFIG_CMD_AMBAPP
 #include <ambapp.h>
 #endif
@@ -458,6 +459,15 @@ static int initr_dataflash(void)
 }
 #endif
 
+#ifdef CONFIG_NVME
+static int initr_nvme(void)
+{
+	puts("NVMe:   ");
+	nvme_initialize();
+	return 0;
+}
+#endif
+
 /*
  * Tell if it's OK to load the environment early in boot.
  *
@@ -942,6 +952,9 @@ init_fnc_t init_sequence_r[] = {
 #if defined(CONFIG_SPARC)
 	prom_init,
 #endif
+#ifdef CONFIG_NVME
+	initr_nvme,
+#endif
 	run_main_loop,
 };
 
diff --git a/common/nvme.c b/common/nvme.c
new file mode 100644
index 0000000..ed3250f
--- /dev/null
+++ b/common/nvme.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2017 NXP Semiconductors
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <nvme.h>
+
+struct blk_desc nvme_dev_desc[CONFIG_SYS_NVME_MAX_DEVICE];
+
+struct pci_device_id nvme_supported[] = {
+		{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0953) },
+		{}
+};
+#ifdef CONFIG_PARTITIONS
+struct blk_desc *nvme_get_dev(int dev)
+{
+	return (dev < CONFIG_SYS_NVME_MAX_DEVICE) ? &nvme_dev_desc[dev] : NULL;
+}
+#endif
+
+#ifdef CONFIG_BLK
+struct udevice *udev;
+static unsigned long nvme_bread(struct udevice *dev, lbaint_t start,
+				lbaint_t blkcnt, void *dst)
+{
+	return nvme_read(dev, start, blkcnt, dst);
+}
+
+static unsigned long nvme_bwrite(struct udevice *dev, lbaint_t start,
+				 lbaint_t blkcnt, const void *buffer)
+{
+	return nvme_write(dev, start, blkcnt, buffer);
+}
+#else
+static unsigned long nvme_bread(struct blk_desc *block_dev, lbaint_t start,
+				lbaint_t blkcnt, void *dst)
+{
+	return nvme_read(block_dev->devnum, start, blkcnt, dst);
+}
+
+static unsigned long nvme_bwrite(struct blk_desc *block_dev, lbaint_t start,
+				 lbaint_t blkcnt, const void *buffer)
+{
+	return nvme_write(block_dev->devnum, start, blkcnt, buffer);
+}
+#endif
+
+int __nvme_initialize(void)
+{
+	int rc, ret = -1;
+	int i;
+
+	for (i = 0; i < CONFIG_SYS_NVME_MAX_DEVICE; i++) {
+#ifdef CONFIG_BLK
+		rc = init_nvme(udev);
+		if (!rc)
+			rc = scan_nvme(udev);
+#else
+		rc = init_nvme(i);
+		if (!rc)
+			rc = scan_nvme(i);
+#endif
+
+		if (!rc && nvme_dev_desc[i].lba > 0) {
+			nvme_dev_desc[i].if_type = IF_TYPE_NVME;
+			nvme_dev_desc[i].devnum = i;
+			nvme_dev_desc[i].part_type = PART_TYPE_UNKNOWN;
+			nvme_dev_desc[i].type = DEV_TYPE_HARDDISK;
+#ifndef CONFIG_BLK
+			nvme_dev_desc[i].block_read = nvme_bread;
+			nvme_dev_desc[i].block_write = nvme_bwrite;
+#endif
+
+			part_init(&nvme_dev_desc[i]);
+			ret = i;
+		} else {
+			memset(&nvme_dev_desc[i], 0, sizeof(struct blk_desc));
+		}
+		printf("devnum: %d Vendor: %s Prod: %s\n",
+		       i,
+		       nvme_dev_desc[i].vendor,
+		       nvme_dev_desc[i].product);
+	}
+	return ret;
+}
+int nvme_initialize(void) __attribute__((weak, alias("__nvme_initialize")));
+
+#ifdef CONFIG_BLK
+static void nvme_name(char *str, int cardnum)
+{
+	sprintf(str, "nvme#%u", cardnum);
+}
+
+static int nvme_bind(struct udevice *dev)
+{
+	udev = dev;
+	int num_cards = 0;
+	char name[20];
+	nvme_name(name, num_cards++);
+
+	return device_set_name(udev, name);
+}
+
+static const struct blk_ops nvme_ops = {
+	.read	= nvme_bread,
+	.write	= nvme_bwrite,
+};
+
+U_BOOT_DRIVER(nvme) = {
+	.name		= "nvme",
+	.id		= UCLASS_BLK,
+	.bind		= nvme_bind,
+	.ops		= &nvme_ops,
+	.priv_auto_alloc_size	= sizeof(struct nvme_ns),
+};
+U_BOOT_PCI_DEVICE(nvme, nvme_supported);
+#else
+U_BOOT_LEGACY_BLK(nvme) = {
+	.if_typename	= "nvme",
+	.if_type	= IF_TYPE_NVME,
+	.max_devs	= CONFIG_SYS_NVME_MAX_DEVICE,
+	.desc		= nvme_dev_desc,
+};
+#endif
diff --git a/doc/README.nvme b/doc/README.nvme
new file mode 100644
index 0000000..892e0fb
--- /dev/null
+++ b/doc/README.nvme
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2017 NXP Semiconductors
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+1. NVMe Config Switches:
+
+CONFIG_NVME		enables basic NVMe device support
+			NVMe is a basic device which mounted on PCIe bus,
+			so is depends on CONFIG_PCI.
+CONFIG_SYS_NVME_MAX_DEVICE	configs the max num of NVMe devices
+
+2. Driver Model Support:
+
+Use block device(CONFIG_BLK)'s structure to support nvme's DM.
+Use UCLASS_PCI as a parent uclass.
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 88e66e2..665d471 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -49,3 +49,19 @@ config SATA_CEVA
 	  AHCI 1.3 specifications with hot-plug detect feature.
 
 endmenu
+
+config NVME
+	bool "Support NVMe devices"
+	depends on PCI
+	help
+	  This option enables supporting for all NVMe devices.
+	  It supports the basic functions before using NVMe devices
+	  such as "nvme init" "nvme scan" and R/W.
+	  It also contains other functions to use the devices.
+
+config SYS_NVME_MAX_DEVICE
+	int "Config the Max Num of NVMe Devices"
+	depends on NVME
+	default 1
+	help
+	  Set the max num of NVMe Devices.
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index a72feec..65d4bde 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -31,3 +31,4 @@ obj-$(CONFIG_SANDBOX) += sandbox.o sandbox_scsi.o sata_sandbox.o
 obj-$(CONFIG_SCSI_SYM53C8XX) += sym53c8xx.o
 obj-$(CONFIG_SYSTEMACE) += systemace.o
 obj-$(CONFIG_BLOCK_CACHE) += blkcache.o
+obj-$(CONFIG_NVME) += nvme.o
diff --git a/drivers/block/blk-uclass.c b/drivers/block/blk-uclass.c
index 38cb938..25e3d90 100644
--- a/drivers/block/blk-uclass.c
+++ b/drivers/block/blk-uclass.c
@@ -22,6 +22,7 @@ static const char *if_typename_str[IF_TYPE_COUNT] = {
 	[IF_TYPE_SATA]		= "sata",
 	[IF_TYPE_HOST]		= "host",
 	[IF_TYPE_SYSTEMACE]	= "ace",
+	[IF_TYPE_NVME]		= "nvme",
 };
 
 static enum uclass_id if_type_uclass_id[IF_TYPE_COUNT] = {
@@ -34,6 +35,7 @@ static enum uclass_id if_type_uclass_id[IF_TYPE_COUNT] = {
 	[IF_TYPE_SD]		= UCLASS_INVALID,
 	[IF_TYPE_SATA]		= UCLASS_AHCI,
 	[IF_TYPE_HOST]		= UCLASS_ROOT,
+	[IF_TYPE_NVME]		= UCLASS_PCI,
 	[IF_TYPE_SYSTEMACE]	= UCLASS_INVALID,
 };
 
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
new file mode 100644
index 0000000..2e06477
--- /dev/null
+++ b/drivers/block/nvme.c
@@ -0,0 +1,1048 @@
+/*
+ * Copyright (C) 2017 NXP Semiconductors
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#include <common.h>
+#include <errno.h>
+#include <linux/compat.h>
+#include <dm.h>
+#include <pci.h>
+#include <memalign.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <console.h>
+#include <asm/armv8/mmu.h>
+#include "nvme.h"
+
+#define NVME_Q_DEPTH		2
+#define NVME_AQ_DEPTH		2
+#define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
+#define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
+#define ADMIN_TIMEOUT		60
+#define MAX_PRP_POOL		512
+
+static struct nvme_info nvme_info = {.maxport = 0, .idx = 0,
+	.dev_list = LIST_HEAD_INIT(nvme_info.dev_list)};
+
+/*
+ *An NVM Express queue. Each device has at least two(one for admin
+ *commands and one for I/O commands).
+ */
+struct nvme_queue {
+	struct nvme_dev *dev;
+	struct nvme_command *sq_cmds;
+	struct nvme_completion *cqes;
+	wait_queue_head_t sq_full;
+	u32 __iomem *q_db;
+	u16 q_depth;
+	s16 cq_vector;
+	u16 sq_head;
+	u16 sq_tail;
+	u16 cq_head;
+	u16 qid;
+	u8 cq_phase;
+	u8 cqe_seen;
+	unsigned long cmdid_data[];
+};
+
+static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
+{
+	u32 bit = enabled ? NVME_CSTS_RDY : 0;
+	while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit)
+		udelay(10000);
+
+	return 0;
+}
+
+static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
+		int total_len, u64 dma_addr)
+{
+	u32 page_size = dev->page_size;
+	int offset = dma_addr & (page_size - 1);
+	u64 *prp_pool;
+	int length = total_len;
+	int i, nprps;
+	length -= (page_size - offset);
+
+	if (length <= 0) {
+		*prp2 = 0;
+		return 0;
+	}
+
+	if (length)
+		dma_addr += (page_size - offset);
+
+	if (length <= page_size) {
+		*prp2 = dma_addr;
+		return 0;
+	}
+
+	nprps = DIV_ROUND_UP(length, page_size);
+
+	if (nprps > dev->prp_entry_num) {
+		free(dev->prp_pool);
+		dev->prp_pool = malloc(nprps << 3);
+		if (!dev->prp_pool) {
+			printf("Error: malloc prp_pool fail\n");
+			return -ENOMEM;
+		}
+		dev->prp_entry_num = nprps;
+	}
+	prp_pool = dev->prp_pool;
+	i = 0;
+	while (nprps) {
+		if (i == ((page_size >> 3) - 1)) {
+			*(prp_pool + i) = cpu_to_le64((u64)prp_pool +
+					page_size);
+			i = 0;
+			prp_pool += page_size;
+		}
+		*(prp_pool + i++) = cpu_to_le64(dma_addr);
+		dma_addr += page_size;
+		nprps--;
+	}
+	*prp2 = (u64)dev->prp_pool;
+	return 0;
+}
+
+static __le16 get_cmdid(void)
+{
+	static unsigned short cmdid;
+	return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0);
+}
+
+static u16 read_completion_status(struct nvme_queue *nvmeq, u16 index)
+{
+	u64 start = (u64)&nvmeq->cqes[index];
+	u64 stop = start + sizeof(struct nvme_completion);
+	invalidate_dcache_range(start, stop);
+	return le16_to_cpu(readw(&(nvmeq->cqes[index].status)));
+}
+
+/**
+ * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
+ * @nvmeq: The queue to use
+ * @cmd: The command to send
+ *
+ * Safe to use from interrupt context
+ */
+static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+{
+	u16 tail = nvmeq->sq_tail;
+	flush_dcache_all();
+	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+	flush_dcache_range((ulong)&nvmeq->sq_cmds[tail],
+			   (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd));
+	if (++tail == nvmeq->q_depth)
+		tail = 0;
+	writel(tail, nvmeq->q_db);
+	nvmeq->sq_tail = tail;
+
+	return 0;
+}
+
+static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
+		struct nvme_command *cmd, u32 *result, unsigned timeout)
+{
+	u16 head = nvmeq->cq_head;
+	u16 phase = nvmeq->cq_phase;
+	u16 status;
+	ulong start_time;
+	ulong timeout_us = timeout * 100000;
+
+	cmd->common.command_id = get_cmdid();
+	nvme_submit_cmd(nvmeq, cmd);
+
+	start_time = timer_get_us();
+
+	for (;;) {
+		status = read_completion_status(nvmeq, head);
+		if ((status & 0x01) == phase)
+			break;
+		if (timeout_us > 0 && (timer_get_us() - start_time)
+		    >= timeout_us)
+			return -ETIMEDOUT;
+	}
+	status >>= 1;
+	if (status) {
+		printf("ERROR: status = %d, phase = %d, head = %d\n",
+		       status, phase, head);
+		status = 0;
+		if (++head == nvmeq->q_depth) {
+			head = 0;
+			phase = !phase;
+		}
+		writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
+		nvmeq->cq_head = head;
+		nvmeq->cq_phase = phase;
+		return -1;
+	}
+
+	if (result)
+		*result = le32_to_cpu(readl(&(nvmeq->cqes[head].result)));
+
+	if (++head == nvmeq->q_depth) {
+		head = 0;
+		phase = !phase;
+	}
+	writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
+	nvmeq->cq_head = head;
+	nvmeq->cq_phase = phase;
+
+	return status;
+}
+
+static int nvme_submit_admin_cmd(struct nvme_dev *dev,
+		struct nvme_command *cmd, u32 *result)
+{
+	return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
+}
+
+static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev,
+		int qid, int depth)
+{
+	struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq));
+	if (!nvmeq)
+		return NULL;
+	memset(nvmeq, 0, sizeof(*nvmeq));
+
+	nvmeq->cqes = (void *)memalign(4096, CQ_SIZE(depth));
+	if (!nvmeq->cqes)
+		goto free_nvmeq;
+	memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
+
+	nvmeq->sq_cmds = (void *)memalign(4096, SQ_SIZE(depth));
+	if (!nvmeq->sq_cmds)
+		goto free_queue;
+	memset((void *)nvmeq->sq_cmds, 0, SQ_SIZE(depth));
+
+	nvmeq->dev = dev;
+
+	nvmeq->cq_head = 0;
+	nvmeq->cq_phase = 1;
+	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
+	nvmeq->q_depth = depth;
+	nvmeq->qid = qid;
+	dev->queue_count++;
+	dev->queues[qid] = nvmeq;
+
+	return nvmeq;
+ free_queue:
+	free((void *)nvmeq->cqes);
+ free_nvmeq:
+	free(nvmeq);
+	return NULL;
+}
+
+static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
+{
+	struct nvme_command c;
+	memset(&c, 0, sizeof(c));
+	c.delete_queue.opcode = opcode;
+	c.delete_queue.qid = cpu_to_le16(id);
+	return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
+{
+	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
+}
+
+static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
+{
+	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
+}
+
+static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
+{
+	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
+	dev->ctrl_config |= NVME_CC_ENABLE;
+	writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
+	return nvme_wait_ready(dev, cap, true);
+}
+
+static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
+{
+	dev->ctrl_config &= ~NVME_CC_SHN_MASK;
+	dev->ctrl_config &= ~NVME_CC_ENABLE;
+	writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc);
+	return nvme_wait_ready(dev, cap, false);
+}
+
+static void nvme_free_queue(struct nvme_queue *nvmeq)
+{
+	free((void *)nvmeq->cqes);
+	free(nvmeq->sq_cmds);
+	free(nvmeq);
+}
+
+static void nvme_free_queues(struct nvme_dev *dev, int lowest)
+{
+	int i;
+	for (i = dev->queue_count - 1; i >= lowest; i--) {
+		struct nvme_queue *nvmeq = dev->queues[i];
+		dev->queue_count--;
+		dev->queues[i] = NULL;
+		nvme_free_queue(nvmeq);
+	}
+}
+
+static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
+{
+	struct nvme_dev *dev = nvmeq->dev;
+	nvmeq->sq_tail = 0;
+	nvmeq->cq_head = 0;
+	nvmeq->cq_phase = 1;
+	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
+	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
+	flush_dcache_range((u64)nvmeq->cqes,
+			   (u64)nvmeq->cqes + CQ_SIZE(nvmeq->q_depth));
+	dev->online_queues++;
+}
+
+static int nvme_configure_admin_queue(struct nvme_dev *dev)
+{
+	int result;
+	u32 aqa;
+	u64 cap = readq(&dev->bar->cap);
+	struct nvme_queue *nvmeq;
+	unsigned page_shift = PAGE_SHIFT;
+	unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
+	unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
+	if (page_shift < dev_page_min) {
+		dev_err(&dev->pci_dev->dev,
+			"Minimum device page size (%u) too large for host (%u)\n",
+			1 << dev_page_min,
+			1 << page_shift);
+		return -ENODEV;
+	}
+
+	if (page_shift > dev_page_max) {
+		dev_info(&dev->pci_dev->dev,
+			 "Device maximum page size (%u) smaller than host (%u); enabling work-around\n",
+			 1 << dev_page_max,
+			 1 << page_shift);
+		page_shift = dev_page_max;
+	}
+
+	result = nvme_disable_ctrl(dev, cap);
+	if (result < 0)
+		return result;
+
+	nvmeq = dev->queues[0];
+	if (!nvmeq) {
+		nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
+		if (!nvmeq)
+			return -ENOMEM;
+	}
+
+	aqa = nvmeq->q_depth - 1;
+	aqa |= aqa << 16;
+	aqa |= aqa << 16;
+
+	dev->page_size = 1 << page_shift;
+
+	dev->ctrl_config = NVME_CC_CSS_NVM;
+	dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
+	dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
+	dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
+
+	writel(aqa, &dev->bar->aqa);
+	writeq((u64)nvmeq->sq_cmds, &dev->bar->asq);
+	writeq((u64)nvmeq->cqes, &dev->bar->acq);
+
+	result = nvme_enable_ctrl(dev, cap);
+	if (result)
+		goto free_nvmeq;
+
+	nvmeq->cq_vector = 0;
+
+	nvme_init_queue(dev->queues[0], 0);
+	return result;
+
+ free_nvmeq:
+	nvme_free_queues(dev, 0);
+	return result;
+}
+
+static int adapter_alloc_cq(struct nvme_dev *dev,
+		u16 qid, struct nvme_queue *nvmeq)
+{
+	struct nvme_command c;
+	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
+	memset(&c, 0, sizeof(c));
+	c.create_cq.opcode = nvme_admin_create_cq;
+	c.create_cq.prp1 = cpu_to_le64(nvmeq->cqes);
+	c.create_cq.cqid = cpu_to_le16(qid);
+	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+	c.create_cq.cq_flags = cpu_to_le16(flags);
+	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
+
+	return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int adapter_alloc_sq(struct nvme_dev *dev,
+		u16 qid, struct nvme_queue *nvmeq)
+{
+	struct nvme_command c;
+	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
+	memset(&c, 0, sizeof(c));
+	c.create_sq.opcode = nvme_admin_create_sq;
+	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_cmds);
+	c.create_sq.sqid = cpu_to_le16(qid);
+	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+	c.create_sq.sq_flags = cpu_to_le16(flags);
+	c.create_sq.cqid = cpu_to_le16(qid);
+
+	return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_identify(struct nvme_dev *dev, unsigned nsid,
+		unsigned cns, dma_addr_t dma_addr)
+{
+	struct nvme_command c;
+	memset(&c, 0, sizeof(c));
+	u32 page_size = dev->page_size;
+	int offset = dma_addr & (page_size - 1);
+	int length = sizeof(struct nvme_id_ctrl);
+
+	c.identify.opcode = nvme_admin_identify;
+	c.identify.nsid = cpu_to_le32(nsid);
+	c.identify.prp1 = cpu_to_le64(dma_addr);
+
+	length -= (page_size - offset);
+	if (length <= 0) {
+		c.identify.prp2 = 0;
+	} else {
+		dma_addr += (page_size - offset);
+		c.identify.prp2 = dma_addr;
+	}
+
+	c.identify.cns = cpu_to_le32(cns);
+
+	return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static __maybe_unused int nvme_get_features(struct nvme_dev *dev, unsigned fid,
+		unsigned nsid, dma_addr_t dma_addr, u32 *result)
+{
+	struct nvme_command c;
+	memset(&c, 0, sizeof(c));
+
+	c.features.opcode = nvme_admin_get_features;
+	c.features.nsid = cpu_to_le32(nsid);
+	c.features.prp1 = cpu_to_le64(dma_addr);
+	c.features.fid = cpu_to_le32(fid);
+
+	return nvme_submit_admin_cmd(dev, &c, result);
+}
+
+static __maybe_unused int nvme_set_features(struct nvme_dev *dev, unsigned fid,
+		unsigned dword11, dma_addr_t dma_addr, u32 *result)
+{
+	struct nvme_command c;
+	memset(&c, 0, sizeof(c));
+
+	c.features.opcode = nvme_admin_set_features;
+	c.features.prp1 = cpu_to_le64(dma_addr);
+	c.features.fid = cpu_to_le32(fid);
+	c.features.dword11 = cpu_to_le32(dword11);
+
+	return nvme_submit_admin_cmd(dev, &c, result);
+}
+
+static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
+{
+	struct nvme_dev *dev = nvmeq->dev;
+	int result;
+
+	nvmeq->cq_vector = qid - 1;
+	result = adapter_alloc_cq(dev, qid, nvmeq);
+	if (result < 0)
+		goto release_cq;
+
+	result = adapter_alloc_sq(dev, qid, nvmeq);
+	if (result < 0)
+		goto release_sq;
+
+	nvme_init_queue(nvmeq, qid);
+	return result;
+
+ release_sq:
+	adapter_delete_sq(dev, qid);
+ release_cq:
+	adapter_delete_cq(dev, qid);
+	return result;
+}
+
+static int set_queue_count(struct nvme_dev *dev, int count)
+{
+	int status;
+	u32 result;
+	u32 q_count = (count - 1) | ((count - 1) << 16);
+
+	status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES,
+			q_count, 0, &result);
+
+	if (status < 0)
+		return status;
+	if (status > 1)
+		return 0;
+
+	return min(result & 0xffff, result >> 16) + 1;
+}
+
+static void nvme_create_io_queues(struct nvme_dev *dev)
+{
+	unsigned int i;
+
+	for (i = dev->queue_count; i <= dev->max_qid; i++)
+		if (!nvme_alloc_queue(dev, i, dev->q_depth))
+			break;
+
+	for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
+		if (nvme_create_queue(dev->queues[i], i))
+			break;
+}
+
+static int nvme_setup_io_queues(struct nvme_dev *dev)
+{
+	int nr_io_queues;
+	int result;
+	nr_io_queues = 1;
+	result = set_queue_count(dev, nr_io_queues);
+	if (result <= 0)
+		return result;
+
+	if (result < nr_io_queues)
+		nr_io_queues = result;
+
+	dev->max_qid = nr_io_queues;
+
+	/* Free previously allocated queues*/
+	nvme_free_queues(dev, nr_io_queues + 1);
+	nvme_create_io_queues(dev);
+
+	return 0;
+}
+
+static int nvme_get_info_from_identify(struct nvme_dev *dev)
+{
+	u16 vendor, device;
+	struct nvme_id_ctrl buf, *ctrl = &buf;
+	int ret;
+	int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
+
+	ret = nvme_identify(dev, 0, 1, (dma_addr_t)ctrl);
+	if (ret) {
+		printf("Error: nvme%d get identify error\n", dev->instance);
+		return -EIO;
+	}
+
+	dev->nn = le32_to_cpu(ctrl->nn);
+	dev->vwc = ctrl->vwc;
+	memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
+	memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
+	memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
+	if (ctrl->mdts)
+		dev->max_transfer_shift = (ctrl->mdts + shift);
+
+#ifdef CONFIG_BLK
+	dm_pci_read_config16(dev->pdev, PCI_VENDOR_ID, &vendor);
+	dm_pci_read_config16(dev->pdev, PCI_DEVICE_ID, &device);
+#else
+	pci_read_config_word(dev->pci_dev, PCI_VENDOR_ID, &vendor);
+	pci_read_config_word(dev->pci_dev, PCI_DEVICE_ID, &device);
+#endif
+	if ((vendor == PCI_VENDOR_ID_INTEL) &&
+	    (device == 0x0953) && ctrl->vs[3]) {
+		unsigned int max_transfer_shift;
+		dev->stripe_size = (ctrl->vs[3] + shift);
+		max_transfer_shift = (ctrl->vs[3] + 18);
+		if (dev->max_transfer_shift) {
+			dev->max_transfer_shift = min(max_transfer_shift,
+						       dev->max_transfer_shift);
+		} else {
+			dev->max_transfer_shift = max_transfer_shift;
+		}
+	}
+	return 0;
+}
+
+#ifdef CONFIG_BLK
+int init_nvme(struct udevice *udev)
+#else
+int init_nvme(int devnum)
+#endif
+{
+	static int init_done;
+	pci_dev_t pci_dev;
+	int ret;
+#ifdef CONFIG_BLK
+	struct nvme_ns *ns = dev_get_priv(udev);
+	struct nvme_dev *dev = ns->dev;
+	int devnum = trailing_strtol(udev->name);
+#else
+	struct nvme_dev *dev;
+#endif
+	u32 val;
+	u64 cap;
+
+	if (init_done == 1 && devnum < nvme_info.maxport) {
+		printf("devnum = %d\n", devnum);
+		return 0;
+	}
+
+	pci_dev  = pci_find_devices(nvme_supported, nvme_info.idx++);
+	if (pci_dev == -1) {
+		printf("Error: can't find pci device \"nvme%d\"\n",
+		       nvme_info.idx - 1);
+		return -ENODEV;
+	}
+
+	dev = malloc(sizeof(*dev));
+	if (dev == NULL) {
+		printf("Error: nvme%d: Out of Memory!\n", nvme_info.idx - 1);
+		return -ENOMEM;
+	}
+	memset(dev, 0, sizeof(*dev));
+
+	dev->instance = nvme_info.idx - 1;
+	INIT_LIST_HEAD(&dev->namespaces);
+#ifdef CONFIG_BLK
+	dev->pdev = udev;
+	dev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0,
+			PCI_REGION_MEM);
+#else
+	dev->pci_dev = pci_dev;
+	dev->bar = pci_map_bar(dev->pci_dev, PCI_BASE_ADDRESS_0,
+			PCI_REGION_MEM);
+#endif
+	if (readl(&dev->bar->csts) == -1) {
+		ret = -ENODEV;
+		printf("Error: nvme%d: Out of Memory!\n", nvme_info.idx - 1);
+		goto free_nvme;
+	}
+
+	dev->queues = malloc(2 * sizeof(struct nvme_queue));
+	if (!dev->queues) {
+		ret = -ENOMEM;
+		printf("Error: nvme%d: Out of Memory!\n", dev->instance);
+		goto free_nvme;
+	}
+	memset(dev->queues, 0, sizeof(2 * sizeof(struct nvme_queue)));
+
+	dev->prp_pool = malloc(MAX_PRP_POOL);
+	if (!dev->prp_pool) {
+		ret = -ENOMEM;
+		printf("Error: nvme%d: Out of Memory!\n", dev->instance);
+		goto free_nvme;
+	}
+	dev->prp_entry_num = MAX_PRP_POOL >> 3;
+
+	/* Try to enable I/O accesses and bus-mastering */
+	val = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+#ifdef CONFIG_BLK
+	dm_pci_write_config32(udev, PCI_COMMAND, val);
+#else
+	pci_write_config_dword(dev->pci_dev, PCI_COMMAND, val);
+#endif
+
+	/* Print a debug message with the IO base address */
+#ifdef CONFIG_BLK
+	dm_pci_read_config32(udev, PCI_BASE_ADDRESS_0, &val);
+#else
+	pci_read_config_dword(dev->pci_dev, PCI_BASE_ADDRESS_0, &val);
+#endif
+
+	/* Make sure it worked */
+#ifdef CONFIG_BLK
+	dm_pci_read_config32(udev, PCI_COMMAND, &val);
+#else
+	pci_read_config_dword(dev->pci_dev, PCI_COMMAND, &val);
+#endif
+	if (!(val & PCI_COMMAND_MEMORY)) {
+		printf("Can't enable I/O memory\n");
+		ret = -ENOSPC;
+		goto free_queue;
+	}
+	if (!(val & PCI_COMMAND_MASTER)) {
+		printf("Can't enable bus-mastering\n");
+		ret = -EPERM;
+		goto free_queue;
+	}
+	if (readl(&dev->bar->csts) == -1) {
+		ret = -ENODEV;
+		goto free_queue;
+	}
+
+	cap = readq(&dev->bar->cap);
+	dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
+	dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
+	dev->dbs = ((void __iomem *)dev->bar) + 4096;
+
+	ret = nvme_configure_admin_queue(dev);
+	if (ret)
+		goto free_queue;
+
+	ret = nvme_setup_io_queues(dev);
+	if (ret)
+		goto free_queue;
+
+	nvme_get_info_from_identify(dev);
+	dev->blk_dev_start = nvme_info.maxport;
+	list_add(&dev->node, &nvme_info.dev_list);
+
+	nvme_info.maxport += dev->nn;
+	init_done = 1;
+	return 0;
+ free_queue:
+	free((void *)dev->queues);
+ free_nvme:
+	free((void *)dev);
+	return ret;
+}
+
+static struct nvme_dev *find_dev_by_devnum(int devnum)
+{
+	struct nvme_dev *dev;
+	list_for_each_entry(dev, &nvme_info.dev_list, node) {
+		if ((devnum >= dev->blk_dev_start) &&
+		    (devnum < dev->blk_dev_start + dev->nn))
+			return dev;
+	}
+	return NULL;
+}
+
+static struct nvme_ns *find_ns_by_devnum(int devnum)
+{
+	struct nvme_dev *dev;
+	struct nvme_ns *ns;
+
+	dev = find_dev_by_devnum(devnum);
+	if (!dev)
+		return NULL;
+
+	list_for_each_entry(ns, &dev->namespaces, list) {
+		if (devnum == ns->devnum)
+			return ns;
+	}
+	return NULL;
+}
+
+#ifdef CONFIG_BLK
+int scan_nvme(struct udevice *udev)
+{
+	struct nvme_ns *ns = dev_get_priv(udev);
+	struct nvme_dev *dev = ns->dev;
+	int devnum = trailing_strtol(udev->name);
+#else
+int scan_nvme(int devnum)
+{
+	struct nvme_ns *ns;
+	struct nvme_dev *dev;
+#endif
+	int ret;
+	u8 flbas;
+	u16 vendor;
+	char vendor_c[8];
+	struct nvme_id_ns buf, *id = &buf;
+
+	if (devnum > nvme_info.maxport) {
+		printf("Error: devnum %d is not initialized\n", devnum);
+		return -ENODEV;
+	}
+
+	dev = find_dev_by_devnum(devnum);
+	if (!dev)
+		return -ENODEV;
+
+	ns = find_ns_by_devnum(devnum);
+	if (ns)
+		return 0;
+
+	ns = malloc(sizeof(*ns));
+	if (!ns)
+		return -ENOMEM;
+	memset(ns, 0, sizeof(*ns));
+	ns->dev = dev;
+	ns->ns_id = devnum - dev->blk_dev_start + 1;
+	ns->devnum = devnum;
+	if (nvme_identify(dev, ns->ns_id, 0, (dma_addr_t)id)) {
+		ret = -EIO;
+		goto free_ns;
+	}
+	flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK;
+	ns->flbas = flbas;
+	ns->lba_shift = id->lbaf[flbas].ds;
+	ns->mode_select_num_blocks = le64_to_cpu(id->nuse);
+	ns->mode_select_block_len = 1 << ns->lba_shift;
+	list_add(&ns->list, &dev->namespaces);
+	nvme_dev_desc[devnum].lba = ns->mode_select_num_blocks;
+	nvme_dev_desc[devnum].log2blksz = ns->lba_shift;
+	nvme_dev_desc[devnum].blksz = 1 << ns->lba_shift;
+#ifdef CONFIG_BLK
+	nvme_dev_desc[devnum].bdev = dev->pdev;
+	dm_pci_read_config16(dev->pdev, PCI_VENDOR_ID, &vendor);
+	udev->priv = ns;
+	udev->uclass_platdata = (void *)&nvme_dev_desc[devnum];
+#else
+	nvme_dev_desc[devnum].priv = (void *)ns;
+	pci_read_config_word(dev->pci_dev, PCI_VENDOR_ID, &vendor);
+#endif
+	sprintf(vendor_c, "0x%.4x", vendor);
+	memcpy(nvme_dev_desc[devnum].product,
+	       dev->serial, sizeof(dev->serial));
+	memcpy(nvme_dev_desc[devnum].vendor,
+	       vendor_c, sizeof(vendor_c));
+	memcpy(nvme_dev_desc[devnum].revision,
+	       dev->firmware_rev, sizeof(dev->firmware_rev));
+	return 0;
+ free_ns:
+	free(ns);
+	return ret;
+}
+
+static void print_optional_admin_cmd(u16 oacs, int devnum)
+{
+	printf("Blk device %d: Optional Admin Command Support:\n",
+	       devnum);
+	printf("\tNamespace Management/Attachment: %s\n",
+	       oacs & 0x08 ? "yes" : "no");
+	printf("\tFirmware Commit/Image download: %s\n",
+	       oacs & 0x04 ? "yes" : "no");
+	printf("\tFormat NVM: %s\n",
+	       oacs & 0x02 ? "yes" : "no");
+	printf("\tSecurity Send/Receive: %s\n",
+	       oacs & 0x01 ? "yes" : "no");
+}
+
+static void print_optional_nvm_cmd(u16 oncs, int devnum)
+{
+	printf("Blk device %d: Optional NVM Command Support:\n",
+	       devnum);
+	printf("\tReservation: %s\n",
+	       oncs & 0x10 ? "yes" : "no");
+	printf("\tSave/Select field in the Set/Get features: %s\n",
+	       oncs & 0x08 ? "yes" : "no");
+	printf("\tWrite Zeroes: %s\n",
+	       oncs & 0x04 ? "yes" : "no");
+	printf("\tDataset Management: %s\n",
+	       oncs & 0x02 ? "yes" : "no");
+	printf("\tWrite Uncorrectable: %s\n",
+	       oncs & 0x01 ? "yes" : "no");
+}
+
+static void print_format_nvme_attributes(u8 fna, int devnum)
+{
+	printf("Blk device %d: Format NVM Attributes:\n", devnum);
+	printf("\tSupport Cryptographic Erase: %s\n",
+	       fna & 0x04 ? "yes" : "No");
+	printf("\tSupport erase a particular namespace: %s\n",
+	       fna & 0x02 ? "No" : "Yes");
+	printf("\tSupport format a particular namespace: %s\n",
+	       fna & 0x01 ? "No" : "Yes");
+}
+
+static void print_format(struct nvme_lbaf *lbaf)
+{
+	u8 str[][10] = {"Best", "Better", "Good", "Degraded"};
+	printf("\t\tMetadata Size: %d\n", le16_to_cpu(lbaf->ms));
+	printf("\t\tLBA Data Size: %d\n", 1 << lbaf->ds);
+	printf("\t\tRelative Performance: %s\n", str[lbaf->rp & 0x03]);
+}
+
+static void print_formats(struct nvme_id_ns *id, struct nvme_ns *ns)
+{
+	int i;
+	printf("Blk device %d: LBA Format Support:\n", ns->devnum);
+	for (i = 0; i < id->nlbaf; i++) {
+		printf("\tLBA Foramt %d Support: ", i);
+		if (i == ns->flbas)
+			printf("(current)\n");
+		else
+			printf("\n");
+		print_format(id->lbaf + i);
+	}
+}
+
+static void print_data_protect_cap(u8 dpc, int devnum)
+{
+	printf("Blk device %d: End-to-End Data", devnum);
+	printf("Protect Capabilities:\n");
+	printf("\tAs last eight bytes: %s\n",
+	       dpc & 0x10 ? "yes" : "No");
+	printf("\tAs first eight bytes: %s\n",
+	       dpc & 0x08 ? "yes" : "No");
+	printf("\tSupport Type3: %s\n",
+	       dpc & 0x04 ? "yes" : "No");
+	printf("\tSupport Type2: %s\n",
+	       dpc & 0x02 ? "yes" : "No");
+	printf("\tSupport Type1: %s\n",
+	       dpc & 0x01 ? "yes" : "No");
+}
+
+static void print_metadata_cap(u8 mc, int devnum)
+{
+	printf("Blk device %d: Metadata capabilities:\n", devnum);
+	printf("\tAs part of a separate buffer: %s\n",
+	       mc & 0x02 ? "yes" : "No");
+	printf("\tAs part of an extended data LBA: %s\n",
+	       mc & 0x01 ? "yes" : "No");
+}
+
+int nvme_print_info(int devnum)
+{
+#ifdef CONFIG_BLK
+	struct udevice *udev;
+	blk_get_device(IF_TYPE_NVME, devnum, &udev);
+	struct nvme_ns *ns = dev_get_priv(udev);
+#else
+	struct nvme_ns *ns = nvme_dev_desc[devnum].priv;
+#endif
+	if (!ns) {
+		printf("Can not find device %d\n", devnum);
+		return -EINVAL;
+	}
+
+	struct nvme_dev *dev = ns->dev;
+	struct nvme_id_ns buf_ns, *id = &buf_ns;
+	struct nvme_id_ctrl buf_ctrl, *ctrl = &buf_ctrl;
+
+	if (nvme_identify(dev, 0, 1, (dma_addr_t)ctrl))
+		return -EIO;
+
+	print_optional_admin_cmd(le16_to_cpu(ctrl->oacs), devnum);
+	print_optional_nvm_cmd(le16_to_cpu(ctrl->oncs), devnum);
+	print_format_nvme_attributes(ctrl->fna, devnum);
+
+	if (nvme_identify(dev, ns->ns_id, 0, (dma_addr_t)id))
+		return -EIO;
+
+	print_formats(id, ns);
+	print_data_protect_cap(id->dpc, devnum);
+	print_metadata_cap(id->mc, devnum);
+	return 0;
+}
+
+#ifdef CONFIG_BLK
+ulong nvme_write(struct udevice *udev, ulong blknr, lbaint_t blkcnt,
+		const void *buffer)
+{
+	struct nvme_ns *ns = dev_get_priv(udev);
+	struct nvme_dev *dev = ns->dev;
+	int devnum = ns->devnum;
+#else
+ulong nvme_write(int devnum, ulong blknr, lbaint_t blkcnt,
+		const void *buffer)
+{
+	struct nvme_ns *ns = nvme_dev_desc[devnum].priv;
+	struct nvme_dev *dev = ns->dev;
+#endif
+	struct nvme_command c;
+	int status;
+	u64 prp2;
+	u64 total_len = blkcnt << nvme_dev_desc[devnum].log2blksz;
+	u64 temp_len = total_len;
+
+	u64 slba = blknr;
+	u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
+	u64 total_lbas = blkcnt;
+
+	c.rw.opcode = nvme_cmd_write;
+	c.rw.flags = 0;
+	c.rw.nsid = cpu_to_le32(ns->ns_id);
+	c.rw.control = 0;
+	c.rw.dsmgmt = 0;
+	c.rw.reftag = 0;
+	c.rw.apptag = 0;
+	c.rw.appmask = 0;
+	c.rw.metadata = 0;
+
+	while (total_lbas) {
+		if (total_lbas < lbas) {
+			lbas = (u16)total_lbas;
+			total_lbas = 0;
+		} else {
+			total_lbas -= lbas;
+		}
+
+		if (nvme_setup_prps
+		   (dev, &prp2, lbas << ns->lba_shift, (u64)buffer))
+			return -EIO;
+		c.rw.slba = cpu_to_le64(slba);
+		slba += lbas;
+		c.rw.length = cpu_to_le16(lbas - 1);
+		c.rw.prp1 = cpu_to_le64(buffer);
+		c.rw.prp2 = cpu_to_le64(prp2);
+		status = nvme_submit_sync_cmd(dev->queues[1],
+				&c, NULL, NVME_IO_TIMEOUT);
+		if (status)
+			break;
+		temp_len -= lbas << ns->lba_shift;
+		buffer += lbas << ns->lba_shift;
+	}
+	return (total_len - temp_len) >> nvme_dev_desc[devnum].log2blksz;
+}
+
+#ifdef CONFIG_BLK
+ulong nvme_read(struct udevice *udev, ulong blknr, lbaint_t blkcnt,
+		void *buffer)
+{
+	struct nvme_ns *ns = dev_get_priv(udev);
+	struct nvme_dev *dev = ns->dev;
+	int devnum = ns->devnum;
+#else
+ulong nvme_read(int devnum, ulong blknr, lbaint_t blkcnt,
+		void *buffer)
+{
+	struct nvme_ns *ns = nvme_dev_desc[devnum].priv;
+	struct nvme_dev *dev = ns->dev;
+#endif
+	struct nvme_command c;
+	int status;
+	u64 prp2;
+	u64 total_len = blkcnt << nvme_dev_desc[devnum].log2blksz;
+	u64 temp_len = total_len;
+
+	u64 slba = blknr;
+	u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
+	u64 total_lbas = blkcnt;
+
+	c.rw.opcode = nvme_cmd_read;
+	c.rw.flags = 0;
+	c.rw.nsid = cpu_to_le32(ns->ns_id);
+	c.rw.control = 0;
+	c.rw.dsmgmt = 0;
+	c.rw.reftag = 0;
+	c.rw.apptag = 0;
+	c.rw.appmask = 0;
+	c.rw.metadata = 0;
+
+	while (total_lbas) {
+		if (total_lbas < lbas) {
+			lbas = (u16)total_lbas;
+			total_lbas = 0;
+		} else {
+			total_lbas -= lbas;
+		}
+
+		if (nvme_setup_prps
+		   (dev, &prp2, lbas << ns->lba_shift, (u64)buffer))
+			return -EIO;
+		c.rw.slba = cpu_to_le64(slba);
+		slba += lbas;
+		c.rw.length = cpu_to_le16(lbas - 1);
+		c.rw.prp1 = cpu_to_le64(buffer);
+		c.rw.prp2 = cpu_to_le64(prp2);
+		status = nvme_submit_sync_cmd(dev->queues[1],
+				&c, NULL, NVME_IO_TIMEOUT);
+		if (status)
+			break;
+		temp_len -= lbas << ns->lba_shift;
+		buffer += lbas << ns->lba_shift;
+	}
+	return (total_len - temp_len) >> nvme_dev_desc[devnum].log2blksz;
+}
diff --git a/drivers/block/nvme.h b/drivers/block/nvme.h
new file mode 100644
index 0000000..9345f90
--- /dev/null
+++ b/drivers/block/nvme.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2017 NXP Semiconductors
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#include <pci.h>
+#include <asm/io.h>
+#include "nvme_uapi.h"
+#include <linux/list.h>
+#include <nvme.h>
+
+struct nvme_bar {
+	__u64			cap;	/* Controller Capabilities */
+	__u32			vs;	/* Version */
+	__u32			intms;	/* Interrupt Mask Set */
+	__u32			intmc;	/* Interrupt Mask Clear */
+	__u32			cc;	/* Controller Configuration */
+	__u32			rsvd1;	/* Reserved */
+	__u32			csts;	/* Controller Status */
+	__u32			rsvd2;	/* Reserved */
+	__u32			aqa;	/* Admin Queue Attributes */
+	__u64			asq;	/* Admin SQ Base Address */
+	__u64			acq;	/* Admin CQ Base Address */
+};
+
+#define NVME_CAP_MQES(cap)	((cap) & 0xffff)
+#define NVME_CAP_TIMEOUT(cap)	(((cap) >> 24) & 0xff)
+#define NVME_CAP_STRIDE(cap)	(((cap) >> 32) & 0xf)
+#define NVME_CAP_MPSMIN(cap)	(((cap) >> 48) & 0xf)
+#define NVME_CAP_MPSMAX(cap)	(((cap) >> 52) & 0xf)
+
+enum {
+	NVME_CC_ENABLE		= 1 << 0,
+	NVME_CC_CSS_NVM		= 0 << 4,
+	NVME_CC_MPS_SHIFT	= 7,
+	NVME_CC_ARB_RR		= 0 << 11,
+	NVME_CC_ARB_WRRU	= 1 << 11,
+	NVME_CC_ARB_VS		= 7 << 11,
+	NVME_CC_SHN_NONE	= 0 << 14,
+	NVME_CC_SHN_NORMAL	= 1 << 14,
+	NVME_CC_SHN_ABRUPT	= 2 << 14,
+	NVME_CC_SHN_MASK	= 3 << 14,
+	NVME_CC_IOSQES		= 6 << 16,
+	NVME_CC_IOCQES		= 4 << 20,
+	NVME_CSTS_RDY		= 1 << 0,
+	NVME_CSTS_CFS		= 1 << 1,
+	NVME_CSTS_SHST_NORMAL	= 0 << 2,
+	NVME_CSTS_SHST_OCCUR	= 1 << 2,
+	NVME_CSTS_SHST_CMPLT	= 2 << 2,
+	NVME_CSTS_SHST_MASK	= 3 << 2,
+};
+
+#define NVME_IO_TIMEOUT  30
+
+/*
+ * Represents an NVM Express device.  Each nvme_dev is a PCI function.
+ */
+struct nvme_dev {
+	struct list_head node;
+	struct nvme_queue **queues;
+	u32 __iomem *dbs;
+	unsigned int cardnum;
+#ifdef CONFIG_BLK
+	struct udevice *pdev;
+#endif
+	pci_dev_t pci_dev;
+	int instance;
+	uint8_t *hw_addr;
+	unsigned queue_count;
+	unsigned online_queues;
+	unsigned max_qid;
+	int q_depth;
+	u32 db_stride;
+	u32 ctrl_config;
+	struct nvme_bar __iomem *bar;
+	struct list_head namespaces;
+	const char *name;
+	char serial[20];
+	char model[40];
+	char firmware_rev[8];
+	u32 max_transfer_shift;
+	u32 stripe_size;
+	u32 page_size;
+	u16 oncs;
+	u16 abort_limit;
+	u8 event_limit;
+	u8 vwc;
+	u64 *prp_pool;
+	u32 prp_entry_num;
+	u32 nn;
+	u32 blk_dev_start;
+};
+
+struct nvme_info {
+	int maxport;
+	int idx;
+	struct list_head dev_list;
+};
+
+/*
+ * The nvme_iod describes the data in an I/O, including the list of PRP
+ * entries.  You can't see it in this data structure because C doesn't let
+ * me express that.  Use nvme_alloc_iod to ensure there's enough space
+ * allocated to store the PRP list.
+ */
+struct nvme_iod {
+	unsigned long private;	/* For the use of the submitter of the I/O */
+	int npages;		/* In the PRP list. 0 means small pool in use */
+	int offset;		/* Of PRP list */
+	int nents;		/* Used in scatterlist */
+	int length;		/* Of data, in bytes */
+	dma_addr_t first_dma;
+};
diff --git a/drivers/block/nvme_uapi.h b/drivers/block/nvme_uapi.h
new file mode 100644
index 0000000..c87d104
--- /dev/null
+++ b/drivers/block/nvme_uapi.h
@@ -0,0 +1,570 @@
+/*
+ * Copyright (C) 2017 NXP Semiconductors
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#include <linux/types.h>
+
+struct nvme_id_power_state {
+	__le16			max_power;	/* centiwatts */
+	__u8			rsvd2;
+	__u8			flags;
+	__le32			entry_lat;	/* microseconds */
+	__le32			exit_lat;	/* microseconds */
+	__u8			read_tput;
+	__u8			read_lat;
+	__u8			write_tput;
+	__u8			write_lat;
+	__le16			idle_power;
+	__u8			idle_scale;
+	__u8			rsvd19;
+	__le16			active_power;
+	__u8			active_work_scale;
+	__u8			rsvd23[9];
+};
+
+enum {
+	NVME_PS_FLAGS_MAX_POWER_SCALE	= 1 << 0,
+	NVME_PS_FLAGS_NON_OP_STATE	= 1 << 1,
+};
+
+struct nvme_id_ctrl {
+	__le16			vid;
+	__le16			ssvid;
+	char			sn[20];
+	char			mn[40];
+	char			fr[8];
+	__u8			rab;
+	__u8			ieee[3];
+	__u8			mic;
+	__u8			mdts;
+	__u16			cntlid;
+	__u32			ver;
+	__u8			rsvd84[172];
+	__le16			oacs;
+	__u8			acl;
+	__u8			aerl;
+	__u8			frmw;
+	__u8			lpa;
+	__u8			elpe;
+	__u8			npss;
+	__u8			avscc;
+	__u8			apsta;
+	__le16			wctemp;
+	__le16			cctemp;
+	__u8			rsvd270[242];
+	__u8			sqes;
+	__u8			cqes;
+	__u8			rsvd514[2];
+	__le32			nn;
+	__le16			oncs;
+	__le16			fuses;
+	__u8			fna;
+	__u8			vwc;
+	__le16			awun;
+	__le16			awupf;
+	__u8			nvscc;
+	__u8			rsvd531;
+	__le16			acwu;
+	__u8			rsvd534[2];
+	__le32			sgls;
+	__u8			rsvd540[1508];
+	struct nvme_id_power_state	psd[32];
+	__u8			vs[1024];
+};
+
+enum {
+	NVME_CTRL_ONCS_COMPARE			= 1 << 0,
+	NVME_CTRL_ONCS_WRITE_UNCORRECTABLE	= 1 << 1,
+	NVME_CTRL_ONCS_DSM			= 1 << 2,
+	NVME_CTRL_VWC_PRESENT			= 1 << 0,
+};
+
+struct nvme_lbaf {
+	__le16			ms;
+	__u8			ds;
+	__u8			rp;
+};
+
+struct nvme_id_ns {
+	__le64			nsze;
+	__le64			ncap;
+	__le64			nuse;
+	__u8			nsfeat;
+	__u8			nlbaf;
+	__u8			flbas;
+	__u8			mc;
+	__u8			dpc;
+	__u8			dps;
+	__u8			nmic;
+	__u8			rescap;
+	__u8			fpi;
+	__u8			rsvd33;
+	__le16			nawun;
+	__le16			nawupf;
+	__le16			nacwu;
+	__le16			nabsn;
+	__le16			nabo;
+	__le16			nabspf;
+	__u16			rsvd46;
+	__le64			nvmcap[2];
+	__u8			rsvd64[40];
+	__u8			nguid[16];
+	__u8			eui64[8];
+	struct nvme_lbaf	lbaf[16];
+	__u8			rsvd192[192];
+	__u8			vs[3712];
+};
+
+enum {
+	NVME_NS_FEAT_THIN	= 1 << 0,
+	NVME_NS_FLBAS_LBA_MASK	= 0xf,
+	NVME_NS_FLBAS_META_EXT	= 0x10,
+	NVME_LBAF_RP_BEST	= 0,
+	NVME_LBAF_RP_BETTER	= 1,
+	NVME_LBAF_RP_GOOD	= 2,
+	NVME_LBAF_RP_DEGRADED	= 3,
+	NVME_NS_DPC_PI_LAST	= 1 << 4,
+	NVME_NS_DPC_PI_FIRST	= 1 << 3,
+	NVME_NS_DPC_PI_TYPE3	= 1 << 2,
+	NVME_NS_DPC_PI_TYPE2	= 1 << 1,
+	NVME_NS_DPC_PI_TYPE1	= 1 << 0,
+	NVME_NS_DPS_PI_FIRST	= 1 << 3,
+	NVME_NS_DPS_PI_MASK	= 0x7,
+	NVME_NS_DPS_PI_TYPE1	= 1,
+	NVME_NS_DPS_PI_TYPE2	= 2,
+	NVME_NS_DPS_PI_TYPE3	= 3,
+};
+
+struct nvme_smart_log {
+	__u8			critical_warning;
+	__u8			temperature[2];
+	__u8			avail_spare;
+	__u8			spare_thresh;
+	__u8			percent_used;
+	__u8			rsvd6[26];
+	__u8			data_units_read[16];
+	__u8			data_units_written[16];
+	__u8			host_reads[16];
+	__u8			host_writes[16];
+	__u8			ctrl_busy_time[16];
+	__u8			power_cycles[16];
+	__u8			power_on_hours[16];
+	__u8			unsafe_shutdowns[16];
+	__u8			media_errors[16];
+	__u8			num_err_log_entries[16];
+	__le32			warning_temp_time;
+	__le32			critical_comp_time;
+	__le16			temp_sensor[8];
+	__u8			rsvd216[296];
+};
+
+enum {
+	NVME_SMART_CRIT_SPARE		= 1 << 0,
+	NVME_SMART_CRIT_TEMPERATURE	= 1 << 1,
+	NVME_SMART_CRIT_RELIABILITY	= 1 << 2,
+	NVME_SMART_CRIT_MEDIA		= 1 << 3,
+	NVME_SMART_CRIT_VOLATILE_MEMORY	= 1 << 4,
+};
+
+struct nvme_lba_range_type {
+	__u8			type;
+	__u8			attributes;
+	__u8			rsvd2[14];
+	__u64			slba;
+	__u64			nlb;
+	__u8			guid[16];
+	__u8			rsvd48[16];
+};
+
+enum {
+	NVME_LBART_TYPE_FS	= 0x01,
+	NVME_LBART_TYPE_RAID	= 0x02,
+	NVME_LBART_TYPE_CACHE	= 0x03,
+	NVME_LBART_TYPE_SWAP	= 0x04,
+
+	NVME_LBART_ATTRIB_TEMP	= 1 << 0,
+	NVME_LBART_ATTRIB_HIDE	= 1 << 1,
+};
+
+struct nvme_reservation_status {
+	__le32	gen;
+	__u8	rtype;
+	__u8	regctl[2];
+	__u8	resv5[2];
+	__u8	ptpls;
+	__u8	resv10[13];
+	struct {
+		__le16	cntlid;
+		__u8	rcsts;
+		__u8	resv3[5];
+		__le64	hostid;
+		__le64	rkey;
+	} regctl_ds[];
+};
+
+/* I/O commands */
+
+enum nvme_opcode {
+	nvme_cmd_flush		= 0x00,
+	nvme_cmd_write		= 0x01,
+	nvme_cmd_read		= 0x02,
+	nvme_cmd_write_uncor	= 0x04,
+	nvme_cmd_compare	= 0x05,
+	nvme_cmd_write_zeroes	= 0x08,
+	nvme_cmd_dsm		= 0x09,
+	nvme_cmd_resv_register	= 0x0d,
+	nvme_cmd_resv_report	= 0x0e,
+	nvme_cmd_resv_acquire	= 0x11,
+	nvme_cmd_resv_release	= 0x15,
+};
+
+struct nvme_common_command {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__le32			nsid;
+	__le32			cdw2[2];
+	__le64			metadata;
+	__le64			prp1;
+	__le64			prp2;
+	__le32			cdw10[6];
+};
+
+struct nvme_rw_command {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__le32			nsid;
+	__u64			rsvd2;
+	__le64			metadata;
+	__le64			prp1;
+	__le64			prp2;
+	__le64			slba;
+	__le16			length;
+	__le16			control;
+	__le32			dsmgmt;
+	__le32			reftag;
+	__le16			apptag;
+	__le16			appmask;
+};
+
+enum {
+	NVME_RW_LR			= 1 << 15,
+	NVME_RW_FUA			= 1 << 14,
+	NVME_RW_DSM_FREQ_UNSPEC		= 0,
+	NVME_RW_DSM_FREQ_TYPICAL	= 1,
+	NVME_RW_DSM_FREQ_RARE		= 2,
+	NVME_RW_DSM_FREQ_READS		= 3,
+	NVME_RW_DSM_FREQ_WRITES		= 4,
+	NVME_RW_DSM_FREQ_RW		= 5,
+	NVME_RW_DSM_FREQ_ONCE		= 6,
+	NVME_RW_DSM_FREQ_PREFETCH	= 7,
+	NVME_RW_DSM_FREQ_TEMP		= 8,
+	NVME_RW_DSM_LATENCY_NONE	= 0 << 4,
+	NVME_RW_DSM_LATENCY_IDLE	= 1 << 4,
+	NVME_RW_DSM_LATENCY_NORM	= 2 << 4,
+	NVME_RW_DSM_LATENCY_LOW		= 3 << 4,
+	NVME_RW_DSM_SEQ_REQ		= 1 << 6,
+	NVME_RW_DSM_COMPRESSED		= 1 << 7,
+	NVME_RW_PRINFO_PRCHK_REF	= 1 << 10,
+	NVME_RW_PRINFO_PRCHK_APP	= 1 << 11,
+	NVME_RW_PRINFO_PRCHK_GUARD	= 1 << 12,
+	NVME_RW_PRINFO_PRACT		= 1 << 13,
+};
+
+struct nvme_dsm_cmd {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__le32			nsid;
+	__u64			rsvd2[2];
+	__le64			prp1;
+	__le64			prp2;
+	__le32			nr;
+	__le32			attributes;
+	__u32			rsvd12[4];
+};
+
+enum {
+	NVME_DSMGMT_IDR		= 1 << 0,
+	NVME_DSMGMT_IDW		= 1 << 1,
+	NVME_DSMGMT_AD		= 1 << 2,
+};
+
+struct nvme_dsm_range {
+	__le32			cattr;
+	__le32			nlb;
+	__le64			slba;
+};
+
+/* Admin commands */
+
+enum nvme_admin_opcode {
+	nvme_admin_delete_sq		= 0x00,
+	nvme_admin_create_sq		= 0x01,
+	nvme_admin_get_log_page		= 0x02,
+	nvme_admin_delete_cq		= 0x04,
+	nvme_admin_create_cq		= 0x05,
+	nvme_admin_identify		= 0x06,
+	nvme_admin_abort_cmd		= 0x08,
+	nvme_admin_set_features		= 0x09,
+	nvme_admin_get_features		= 0x0a,
+	nvme_admin_async_event		= 0x0c,
+	nvme_admin_activate_fw		= 0x10,
+	nvme_admin_download_fw		= 0x11,
+	nvme_admin_format_nvm		= 0x80,
+	nvme_admin_security_send	= 0x81,
+	nvme_admin_security_recv	= 0x82,
+};
+
+enum {
+	NVME_QUEUE_PHYS_CONTIG	= (1 << 0),
+	NVME_CQ_IRQ_ENABLED	= (1 << 1),
+	NVME_SQ_PRIO_URGENT	= (0 << 1),
+	NVME_SQ_PRIO_HIGH	= (1 << 1),
+	NVME_SQ_PRIO_MEDIUM	= (2 << 1),
+	NVME_SQ_PRIO_LOW	= (3 << 1),
+	NVME_FEAT_ARBITRATION	= 0x01,
+	NVME_FEAT_POWER_MGMT	= 0x02,
+	NVME_FEAT_LBA_RANGE	= 0x03,
+	NVME_FEAT_TEMP_THRESH	= 0x04,
+	NVME_FEAT_ERR_RECOVERY	= 0x05,
+	NVME_FEAT_VOLATILE_WC	= 0x06,
+	NVME_FEAT_NUM_QUEUES	= 0x07,
+	NVME_FEAT_IRQ_COALESCE	= 0x08,
+	NVME_FEAT_IRQ_CONFIG	= 0x09,
+	NVME_FEAT_WRITE_ATOMIC	= 0x0a,
+	NVME_FEAT_ASYNC_EVENT	= 0x0b,
+	NVME_FEAT_AUTO_PST	= 0x0c,
+	NVME_FEAT_SW_PROGRESS	= 0x80,
+	NVME_FEAT_HOST_ID	= 0x81,
+	NVME_FEAT_RESV_MASK	= 0x82,
+	NVME_FEAT_RESV_PERSIST	= 0x83,
+	NVME_LOG_ERROR		= 0x01,
+	NVME_LOG_SMART		= 0x02,
+	NVME_LOG_FW_SLOT	= 0x03,
+	NVME_LOG_RESERVATION	= 0x80,
+	NVME_FWACT_REPL		= (0 << 3),
+	NVME_FWACT_REPL_ACTV	= (1 << 3),
+	NVME_FWACT_ACTV		= (2 << 3),
+};
+
+struct nvme_identify {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__le32			nsid;
+	__u64			rsvd2[2];
+	__le64			prp1;
+	__le64			prp2;
+	__le32			cns;
+	__u32			rsvd11[5];
+};
+
+struct nvme_features {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__le32			nsid;
+	__u64			rsvd2[2];
+	__le64			prp1;
+	__le64			prp2;
+	__le32			fid;
+	__le32			dword11;
+	__u32			rsvd12[4];
+};
+
+struct nvme_create_cq {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__u32			rsvd1[5];
+	__le64			prp1;
+	__u64			rsvd8;
+	__le16			cqid;
+	__le16			qsize;
+	__le16			cq_flags;
+	__le16			irq_vector;
+	__u32			rsvd12[4];
+};
+
+struct nvme_create_sq {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__u32			rsvd1[5];
+	__le64			prp1;
+	__u64			rsvd8;
+	__le16			sqid;
+	__le16			qsize;
+	__le16			sq_flags;
+	__le16			cqid;
+	__u32			rsvd12[4];
+};
+
+struct nvme_delete_queue {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__u32			rsvd1[9];
+	__le16			qid;
+	__u16			rsvd10;
+	__u32			rsvd11[5];
+};
+
+struct nvme_abort_cmd {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__u32			rsvd1[9];
+	__le16			sqid;
+	__u16			cid;
+	__u32			rsvd11[5];
+};
+
+struct nvme_download_firmware {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__u32			rsvd1[5];
+	__le64			prp1;
+	__le64			prp2;
+	__le32			numd;
+	__le32			offset;
+	__u32			rsvd12[4];
+};
+
+struct nvme_format_cmd {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__le32			nsid;
+	__u64			rsvd2[4];
+	__le32			cdw10;
+	__u32			rsvd11[5];
+};
+
+struct nvme_command {
+	union {
+		struct nvme_common_command common;
+		struct nvme_rw_command rw;
+		struct nvme_identify identify;
+		struct nvme_features features;
+		struct nvme_create_cq create_cq;
+		struct nvme_create_sq create_sq;
+		struct nvme_delete_queue delete_queue;
+		struct nvme_download_firmware dlfw;
+		struct nvme_format_cmd format;
+		struct nvme_dsm_cmd dsm;
+		struct nvme_abort_cmd abort;
+	};
+};
+
+enum {
+	NVME_SC_SUCCESS			= 0x0,
+	NVME_SC_INVALID_OPCODE		= 0x1,
+	NVME_SC_INVALID_FIELD		= 0x2,
+	NVME_SC_CMDID_CONFLICT		= 0x3,
+	NVME_SC_DATA_XFER_ERROR		= 0x4,
+	NVME_SC_POWER_LOSS		= 0x5,
+	NVME_SC_INTERNAL		= 0x6,
+	NVME_SC_ABORT_REQ		= 0x7,
+	NVME_SC_ABORT_QUEUE		= 0x8,
+	NVME_SC_FUSED_FAIL		= 0x9,
+	NVME_SC_FUSED_MISSING		= 0xa,
+	NVME_SC_INVALID_NS		= 0xb,
+	NVME_SC_CMD_SEQ_ERROR		= 0xc,
+	NVME_SC_SGL_INVALID_LAST	= 0xd,
+	NVME_SC_SGL_INVALID_COUNT	= 0xe,
+	NVME_SC_SGL_INVALID_DATA	= 0xf,
+	NVME_SC_SGL_INVALID_METADATA	= 0x10,
+	NVME_SC_SGL_INVALID_TYPE	= 0x11,
+	NVME_SC_LBA_RANGE		= 0x80,
+	NVME_SC_CAP_EXCEEDED		= 0x81,
+	NVME_SC_NS_NOT_READY		= 0x82,
+	NVME_SC_RESERVATION_CONFLICT	= 0x83,
+	NVME_SC_CQ_INVALID		= 0x100,
+	NVME_SC_QID_INVALID		= 0x101,
+	NVME_SC_QUEUE_SIZE		= 0x102,
+	NVME_SC_ABORT_LIMIT		= 0x103,
+	NVME_SC_ABORT_MISSING		= 0x104,
+	NVME_SC_ASYNC_LIMIT		= 0x105,
+	NVME_SC_FIRMWARE_SLOT		= 0x106,
+	NVME_SC_FIRMWARE_IMAGE		= 0x107,
+	NVME_SC_INVALID_VECTOR		= 0x108,
+	NVME_SC_INVALID_LOG_PAGE	= 0x109,
+	NVME_SC_INVALID_FORMAT		= 0x10a,
+	NVME_SC_FIRMWARE_NEEDS_RESET	= 0x10b,
+	NVME_SC_INVALID_QUEUE		= 0x10c,
+	NVME_SC_FEATURE_NOT_SAVEABLE	= 0x10d,
+	NVME_SC_FEATURE_NOT_CHANGEABLE	= 0x10e,
+	NVME_SC_FEATURE_NOT_PER_NS	= 0x10f,
+	NVME_SC_FW_NEEDS_RESET_SUBSYS	= 0x110,
+	NVME_SC_BAD_ATTRIBUTES		= 0x180,
+	NVME_SC_INVALID_PI		= 0x181,
+	NVME_SC_READ_ONLY		= 0x182,
+	NVME_SC_WRITE_FAULT		= 0x280,
+	NVME_SC_READ_ERROR		= 0x281,
+	NVME_SC_GUARD_CHECK		= 0x282,
+	NVME_SC_APPTAG_CHECK		= 0x283,
+	NVME_SC_REFTAG_CHECK		= 0x284,
+	NVME_SC_COMPARE_FAILED		= 0x285,
+	NVME_SC_ACCESS_DENIED		= 0x286,
+	NVME_SC_DNR			= 0x4000,
+};
+
+struct nvme_completion {
+	__le32	result;		/* Used by admin commands to return data */
+	__u32	rsvd;
+	__le16	sq_head;	/* how much of this queue may be reclaimed */
+	__le16	sq_id;		/* submission queue that generated this entry */
+	__u16	command_id;	/* of the command which completed */
+	__le16	status;		/* did the command fail, and if so, why? */
+};
+
+struct nvme_user_io {
+	__u8	opcode;
+	__u8	flags;
+	__u16	control;
+	__u16	nblocks;
+	__u16	rsvd;
+	__u64	metadata;
+	__u64	addr;
+	__u64	slba;
+	__u32	dsmgmt;
+	__u32	reftag;
+	__u16	apptag;
+	__u16	appmask;
+};
+
+struct nvme_passthru_cmd {
+	__u8	opcode;
+	__u8	flags;
+	__u16	rsvd1;
+	__u32	nsid;
+	__u32	cdw2;
+	__u32	cdw3;
+	__u64	metadata;
+	__u64	addr;
+	__u32	metadata_len;
+	__u32	data_len;
+	__u32	cdw10;
+	__u32	cdw11;
+	__u32	cdw12;
+	__u32	cdw13;
+	__u32	cdw14;
+	__u32	cdw15;
+	__u32	timeout_ms;
+	__u32	result;
+};
+
+#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
+
+#define nvme_admin_cmd nvme_passthru_cmd
+
+#define NVME_IOCTL_ID		_IO('N', 0x40)
+#define NVME_IOCTL_ADMIN_CMD	_IOWR('N', 0x41, struct nvme_admin_cmd)
+#define NVME_IOCTL_SUBMIT_IO	_IOW('N', 0x42, struct nvme_user_io)
+#define NVME_IOCTL_IO_CMD	_IOWR('N', 0x43, struct nvme_passthru_cmd)
diff --git a/include/blk.h b/include/blk.h
index 66a1c55..7f1e573 100644
--- a/include/blk.h
+++ b/include/blk.h
@@ -31,6 +31,7 @@ enum if_type {
 	IF_TYPE_SATA,
 	IF_TYPE_HOST,
 	IF_TYPE_SYSTEMACE,
+	IF_TYPE_NVME,
 
 	IF_TYPE_COUNT,			/* Number of interface types */
 };
diff --git a/include/nvme.h b/include/nvme.h
new file mode 100644
index 0000000..3f8f94f
--- /dev/null
+++ b/include/nvme.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2017 NXP Semiconductors
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#ifndef __NVME_H__
+#define __NVME_H__
+#include <part.h>
+
+#ifdef CONFIG_BLK
+int init_nvme(struct udevice *udev);
+int scan_nvme(struct udevice *udev);
+ulong nvme_read(struct udevice *udev, ulong blknr, lbaint_t blkcnt,
+		void *buffer);
+ulong nvme_write(struct udevice *udev, ulong blknr, lbaint_t blkcnt,
+		const void *buffer);
+#else
+int init_nvme(int devnum);
+int scan_nvme(int devnum);
+ulong nvme_read(int devnum, ulong blknr, lbaint_t blkcnt,
+		void *buffer);
+ulong nvme_write(int devnum, ulong blknr, lbaint_t blkcnt,
+		const void *buffer);
+#endif
+int nvme_print_info(int devnum);
+int nvme_initialize(void);
+int __nvme_initialize(void);
+
+extern struct blk_desc nvme_dev_desc[];
+
+/*
+ * An NVM Express namespace is equivalent to a SCSI LUN
+ * Each namespace is operated as an independent "device".
+ */
+struct nvme_ns {
+	struct list_head list;
+	struct nvme_dev *dev;
+	unsigned ns_id;
+	int devnum;
+	int lba_shift;
+	u16 ms;
+	u8 flbas;
+	u8 pi_type;
+	u64 mode_select_num_blocks;
+	u32 mode_select_block_len;
+};
+
+extern struct pci_device_id nvme_supported[];
+#endif
-- 
2.1.0.27.g96db324

             reply	other threads:[~2017-04-06  8:40 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-04-06  8:40 Zhikang Zhang [this message]
2017-04-06  8:40 ` [U-Boot] [RFC, 2/2] NVMe: add nvme commands Zhikang Zhang
2017-04-10 21:48 ` [U-Boot] [RFC, 1/2] NVMe: add NVMe driver support Tom Rini
2017-04-11  2:33   ` Z.K. Zhang
2017-04-11 13:17     ` Tom Rini
2017-04-13  8:54       ` Z.K. Zhang
2017-04-13 15:02         ` Tom Rini
2017-07-28  7:51 Jon Nettleton
2017-07-28 11:11 ` Bin Meng
2017-07-28 13:44   ` Jon Nettleton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1491468034-1627-1-git-send-email-zhikang.zhang@nxp.com \
    --to=zhikang.zhang@nxp.com \
    --cc=u-boot@lists.denx.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.