multiprotocol blkback drivers. This is a patch for the block interface, frontend drivers, backend drivers and tools to support multiple ring protocols. Right there are now just two: the 32bit and the 64bit one. If needed it can be extended. Interface changes (io/blkif.h) * Have both request structs there, with "v1" and "v2" added to the name. The old name is aliased to the native protocol of the architecture. * Add helper functions to convert v1/v2 requests to native. Backend changes: * Look at the "protocol" name of the frontend and switch ring handling accordingly. If the protocol node isn't present it assumes native protocol. * As the request struct is copied anyway before being processed (for security reasons) it is converted to native at that point so most backend code doesn't need to know what the frontend speaks. * In case of blktap this is completely transparent to userspace, the kernel/userspace ring is always native no matter what the frontend speaks. --- linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c | 71 ++++++++++---- linux-2.6-xen-sparse/drivers/xen/blkback/common.h | 6 - linux-2.6-xen-sparse/drivers/xen/blkback/interface.c | 25 ++++- linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c | 19 +++ linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c | 74 ++++++++++---- linux-2.6-xen-sparse/drivers/xen/blktap/common.h | 6 - linux-2.6-xen-sparse/drivers/xen/blktap/interface.c | 25 ++++- linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c | 19 +++ linux-2.6-xen-sparse/include/xen/blkif.h | 95 +++++++++++++++++++ xen/include/public/io/blkif.h | 14 +- 10 files changed, 290 insertions(+), 64 deletions(-) Index: build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c =================================================================== --- build-32-unstable-13534.orig/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c +++ build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c @@ -298,17 +298,20 @@ irqreturn_t blkif_be_int(int irq, void * static int do_block_io_op(blkif_t *blkif) { - blkif_back_ring_t *blk_ring = &blkif->blk_ring; + blkif_back_rings_t *blk_rings = &blkif->blk_rings; blkif_request_t req; pending_req_t *pending_req; RING_IDX rc, rp; int more_to_do = 0; - rc = blk_ring->req_cons; - rp = blk_ring->sring->req_prod; + rc = blk_rings->common.req_cons; + rp = blk_rings->common.sring->req_prod; rmb(); /* Ensure we see queued requests up to 'rp'. */ - while ((rc != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) { + while ((rc != rp)) { + + if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) + break; pending_req = alloc_req(); if (NULL == pending_req) { @@ -317,8 +320,20 @@ static int do_block_io_op(blkif_t *blkif break; } - memcpy(&req, RING_GET_REQUEST(blk_ring, rc), sizeof(req)); - blk_ring->req_cons = ++rc; /* before make_response() */ + switch (blkif->blk_protocol) { + case BLKIF_PROTOCOL_NATIVE: + memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req)); + break; + case BLKIF_PROTOCOL_X86_32: + blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc)); + break; + case BLKIF_PROTOCOL_X86_64: + blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc)); + break; + default: + BUG(); + } + blk_rings->common.req_cons = ++rc; /* before make_response() */ switch (req.operation) { case BLKIF_OP_READ: @@ -498,34 +513,48 @@ static void dispatch_rw_block_io(blkif_t static void make_response(blkif_t *blkif, unsigned long id, unsigned short op, int st) { - blkif_response_t *resp; + blkif_response_t resp; unsigned long flags; - blkif_back_ring_t *blk_ring = &blkif->blk_ring; + blkif_back_rings_t *blk_rings = &blkif->blk_rings; int more_to_do = 0; int notify; - spin_lock_irqsave(&blkif->blk_ring_lock, flags); - - /* Place on the response ring for the relevant domain. */ - resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt); - resp->id = id; - resp->operation = op; - resp->status = st; - blk_ring->rsp_prod_pvt++; - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify); + resp.id = id; + resp.operation = op; + resp.status = st; - if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) { + spin_lock_irqsave(&blkif->blk_ring_lock, flags); + /* Place on the response ring for the relevant domain. */ + switch (blkif->blk_protocol) { + case BLKIF_PROTOCOL_NATIVE: + memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), + &resp, sizeof(resp)); + break; + case BLKIF_PROTOCOL_X86_32: + memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), + &resp, sizeof(resp)); + break; + case BLKIF_PROTOCOL_X86_64: + memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), + &resp, sizeof(resp)); + break; + default: + BUG(); + } + blk_rings->common.rsp_prod_pvt++; + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); + if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) { /* * Tail check for pending requests. Allows frontend to avoid * notifications if requests are already in flight (lower * overheads and promotes batching). */ - RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do); + RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do); - } else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) { + } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) { more_to_do = 1; - } + spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); if (more_to_do) Index: build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blkback/common.h =================================================================== --- build-32-unstable-13534.orig/linux-2.6-xen-sparse/drivers/xen/blkback/common.h +++ build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blkback/common.h @@ -40,8 +40,7 @@ #include #include #include -#include -#include +#include #include #include #include @@ -67,7 +66,8 @@ typedef struct blkif_st { /* Physical parameters of the comms window. */ unsigned int irq; /* Comms information. */ - blkif_back_ring_t blk_ring; + int blk_protocol; + blkif_back_rings_t blk_rings; struct vm_struct *blk_ring_area; /* The VBD attached to this interface. */ struct vbd vbd; Index: build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c =================================================================== --- build-32-unstable-13534.orig/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c +++ build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c @@ -95,7 +95,6 @@ static void unmap_frontend_page(blkif_t int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn) { - blkif_sring_t *sring; int err; /* Already connected through? */ @@ -111,8 +110,24 @@ int blkif_map(blkif_t *blkif, unsigned l return err; } - sring = (blkif_sring_t *)blkif->blk_ring_area->addr; - BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE); + switch (blkif->blk_protocol) { + case 1: + { + blkif_x86_32_sring_t *sring_x86_32; + sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr; + BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE); + break; + } + case 2: + { + blkif_x86_64_sring_t *sring_x86_64; + sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr; + BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE); + break; + } + default: + BUG(); + } err = bind_interdomain_evtchn_to_irqhandler( blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif); @@ -143,10 +158,10 @@ void blkif_disconnect(blkif_t *blkif) blkif->irq = 0; } - if (blkif->blk_ring.sring) { + if (blkif->blk_rings.common.sring) { unmap_frontend_page(blkif); free_vm_area(blkif->blk_ring_area); - blkif->blk_ring.sring = NULL; + blkif->blk_rings.common.sring = NULL; } } Index: build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c =================================================================== --- build-32-unstable-13534.orig/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c +++ build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c @@ -459,6 +459,7 @@ static int connect_ring(struct backend_i struct xenbus_device *dev = be->dev; unsigned long ring_ref; unsigned int evtchn; + char protocol[64] = ""; int err; DPRINTK("%s", dev->otherend); @@ -472,6 +473,24 @@ static int connect_ring(struct backend_i return err; } + be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; + err = xenbus_gather(XBT_NIL, dev->otherend, "protocol", + "%63s", protocol, NULL); + if (err) + strcpy(protocol, "unspecified, assuming native"); + else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) + be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; + else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32)) + be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; + else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) + be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; + else { + xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); + return -1; + } + printk("blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n", + ring_ref, evtchn, be->blkif->blk_protocol, protocol); + /* Map the shared frame, irq etc. */ err = blkif_map(be->blkif, ring_ref, evtchn); if (err) { Index: build-32-unstable-13534/xen/include/public/io/blkif.h =================================================================== --- build-32-unstable-13534.orig/xen/include/public/io/blkif.h +++ build-32-unstable-13534/xen/include/public/io/blkif.h @@ -71,18 +71,20 @@ */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 +struct blkif_request_segment { + grant_ref_t gref; /* reference to I/O buffer frame */ + /* @first_sect: first sector in frame to transfer (inclusive). */ + /* @last_sect: last sector in frame to transfer (inclusive). */ + uint8_t first_sect, last_sect; +}; + struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ uint64_t id; /* private guest value, echoed in resp */ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ - struct blkif_request_segment { - grant_ref_t gref; /* reference to I/O buffer frame */ - /* @first_sect: first sector in frame to transfer (inclusive). */ - /* @last_sect: last sector in frame to transfer (inclusive). */ - uint8_t first_sect, last_sect; - } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; typedef struct blkif_request blkif_request_t; Index: build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c =================================================================== --- build-32-unstable-13534.orig/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c +++ build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c @@ -1091,15 +1091,15 @@ irqreturn_t tap_blkif_be_int(int irq, vo static int print_dbug = 1; static int do_block_io_op(blkif_t *blkif) { - blkif_back_ring_t *blk_ring = &blkif->blk_ring; + blkif_back_rings_t *blk_rings = &blkif->blk_rings; blkif_request_t req; pending_req_t *pending_req; RING_IDX rc, rp; int more_to_do = 0; tap_blkif_t *info; - rc = blk_ring->req_cons; - rp = blk_ring->sring->req_prod; + rc = blk_rings->common.req_cons; + rp = blk_rings->common.sring->req_prod; rmb(); /* Ensure we see queued requests up to 'rp'. */ /*Check blkif has corresponding UE ring*/ @@ -1130,8 +1130,8 @@ static int do_block_io_op(blkif_t *blkif more_to_do = 1; break; } - - if (RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) { + + if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) { WPRINTK("RING_REQUEST_CONS_OVERFLOW!" " More to do\n"); more_to_do = 1; @@ -1145,8 +1145,21 @@ static int do_block_io_op(blkif_t *blkif break; } - memcpy(&req, RING_GET_REQUEST(blk_ring, rc), sizeof(req)); - blk_ring->req_cons = ++rc; /* before make_response() */ + switch (blkif->blk_protocol) { + case BLKIF_PROTOCOL_NATIVE: + memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), + sizeof(req)); + break; + case BLKIF_PROTOCOL_X86_32: + blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc)); + break; + case BLKIF_PROTOCOL_X86_64: + blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc)); + break; + default: + BUG(); + } + blk_rings->common.req_cons = ++rc; /* before make_response() */ switch (req.operation) { case BLKIF_OP_READ: @@ -1222,7 +1235,7 @@ static void dispatch_rw_block_io(blkif_t WPRINTK("blktap: fe_ring is full, can't add " "IO Request will be dropped. %d %d\n", RING_SIZE(&info->ufe_ring), - RING_SIZE(&blkif->blk_ring)); + RING_SIZE(&blkif->blk_rings.common)); goto fail_response; } @@ -1410,32 +1423,51 @@ static void dispatch_rw_block_io(blkif_t static void make_response(blkif_t *blkif, unsigned long id, unsigned short op, int st) { - blkif_response_t *resp; + blkif_response_t resp; unsigned long flags; - blkif_back_ring_t *blk_ring = &blkif->blk_ring; + blkif_back_rings_t *blk_rings = &blkif->blk_rings; int more_to_do = 0; int notify; + resp.id = id; + resp.operation = op; + resp.status = st; + spin_lock_irqsave(&blkif->blk_ring_lock, flags); - /* Place on the response ring for the relevant domain. */ - resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt); - resp->id = id; - resp->operation = op; - resp->status = st; - blk_ring->rsp_prod_pvt++; - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify); + /* Place on the response ring for the relevant domain. */ + switch (blkif->blk_protocol) { + case BLKIF_PROTOCOL_NATIVE: + memcpy(RING_GET_RESPONSE(&blk_rings->native, + blk_rings->native.rsp_prod_pvt), + &resp, sizeof(resp)); + break; + case BLKIF_PROTOCOL_X86_32: + memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, + blk_rings->x86_32.rsp_prod_pvt), + &resp, sizeof(resp)); + break; + case BLKIF_PROTOCOL_X86_64: + memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, + blk_rings->x86_64.rsp_prod_pvt), + &resp, sizeof(resp)); + break; + default: + BUG(); + } + blk_rings->common.rsp_prod_pvt++; + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); - if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) { + if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) { /* * Tail check for pending requests. Allows frontend to avoid * notifications if requests are already in flight (lower * overheads and promotes batching). */ - RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do); - } else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) { + RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do); + } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) { more_to_do = 1; + } - } spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); if (more_to_do) blkif_notify_work(blkif); Index: build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blktap/common.h =================================================================== --- build-32-unstable-13534.orig/linux-2.6-xen-sparse/drivers/xen/blktap/common.h +++ build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blktap/common.h @@ -39,8 +39,7 @@ #include #include #include -#include -#include +#include #include #include @@ -58,7 +57,8 @@ typedef struct blkif_st { /* Physical parameters of the comms window. */ unsigned int irq; /* Comms information. */ - blkif_back_ring_t blk_ring; + int blk_protocol; + blkif_back_rings_t blk_rings; struct vm_struct *blk_ring_area; /* Back pointer to the backend_info. */ struct backend_info *be; Index: build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c =================================================================== --- build-32-unstable-13534.orig/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c +++ build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c @@ -96,7 +96,6 @@ static void unmap_frontend_page(blkif_t int tap_blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn) { - blkif_sring_t *sring; int err; /* Already connected through? */ @@ -112,8 +111,24 @@ int tap_blkif_map(blkif_t *blkif, unsign return err; } - sring = (blkif_sring_t *)blkif->blk_ring_area->addr; - BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE); + switch (blkif->blk_protocol) { + case 1: + { + blkif_x86_32_sring_t *sring_x86_32; + sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr; + BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE); + break; + } + case 2: + { + blkif_x86_64_sring_t *sring_x86_64; + sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr; + BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE); + break; + } + default: + BUG(); + } err = bind_interdomain_evtchn_to_irqhandler( blkif->domid, evtchn, tap_blkif_be_int, @@ -134,10 +149,10 @@ void tap_blkif_unmap(blkif_t *blkif) unbind_from_irqhandler(blkif->irq, blkif); blkif->irq = 0; } - if (blkif->blk_ring.sring) { + if (blkif->blk_rings.common.sring) { unmap_frontend_page(blkif); free_vm_area(blkif->blk_ring_area); - blkif->blk_ring.sring = NULL; + blkif->blk_rings.common.sring = NULL; } } Index: build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c =================================================================== --- build-32-unstable-13534.orig/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c +++ build-32-unstable-13534/linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c @@ -340,6 +340,7 @@ static int connect_ring(struct backend_i struct xenbus_device *dev = be->dev; unsigned long ring_ref; unsigned int evtchn; + char protocol[64]; int err; DPRINTK("%s\n", dev->otherend); @@ -353,6 +354,24 @@ static int connect_ring(struct backend_i return err; } + be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; + err = xenbus_gather(XBT_NIL, dev->otherend, "protocol", + "%63s", protocol, NULL); + if (err) + strcpy(protocol, "unspecified, assuming native"); + else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) + be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; + else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32)) + be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; + else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) + be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; + else { + xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); + return -1; + } + printk("blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n", + ring_ref, evtchn, be->blkif->blk_protocol, protocol); + /* Map the shared frame, irq etc. */ err = tap_blkif_map(be->blkif, ring_ref, evtchn); if (err) { Index: build-32-unstable-13534/linux-2.6-xen-sparse/include/xen/blkif.h =================================================================== --- /dev/null +++ build-32-unstable-13534/linux-2.6-xen-sparse/include/xen/blkif.h @@ -0,0 +1,95 @@ +#ifndef __XEN_BLKIF_H__ +#define __XEN_BLKIF_H__ + +#include +#include +#include + +/* Not a real protocol. Used to generate ring structs which contain + * the elements common to all protocols only. This way we get a + * compiler-checkable way to use common struct elements, so we can + * avoid using switch(protocol) in a number of places. */ +struct blkif_common_request { + char dummy; +}; +struct blkif_common_response { + char dummy; +}; + +/* i386 protocol version */ +#pragma pack(push, 4) +struct blkif_x86_32_request { + uint8_t operation; /* BLKIF_OP_??? */ + uint8_t nr_segments; /* number of segments */ + blkif_vdev_t handle; /* only for read/write requests */ + uint64_t id; /* private guest value, echoed in resp */ + blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; +}; +struct blkif_x86_32_response { + uint64_t id; /* copied from request */ + uint8_t operation; /* copied from request */ + int16_t status; /* BLKIF_RSP_??? */ +}; +typedef struct blkif_x86_32_request blkif_x86_32_request_t; +typedef struct blkif_x86_32_response blkif_x86_32_response_t; +#pragma pack(pop) + +/* x86_64 protocol version */ +struct blkif_x86_64_request { + uint8_t operation; /* BLKIF_OP_??? */ + uint8_t nr_segments; /* number of segments */ + blkif_vdev_t handle; /* only for read/write requests */ + uint64_t __attribute__((__aligned__(8))) id; + blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; +}; +struct blkif_x86_64_response { + uint64_t __attribute__((__aligned__(8))) id; + uint8_t operation; /* copied from request */ + int16_t status; /* BLKIF_RSP_??? */ +}; +typedef struct blkif_x86_64_request blkif_x86_64_request_t; +typedef struct blkif_x86_64_response blkif_x86_64_response_t; + +DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response); +DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response); +DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response); + +union blkif_back_rings { + blkif_back_ring_t native; + blkif_common_back_ring_t common; + blkif_x86_32_back_ring_t x86_32; + blkif_x86_64_back_ring_t x86_64; +}; +typedef union blkif_back_rings blkif_back_rings_t; + +#define BLKIF_PROTOCOL_NATIVE 1 +#define BLKIF_PROTOCOL_X86_32 2 +#define BLKIF_PROTOCOL_X86_64 3 + +static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src) +{ + int i; + dst->operation = src->operation; + dst->nr_segments = src->nr_segments; + dst->handle = src->handle; + dst->id = src->id; + dst->sector_number = src->sector_number; + for (i = 0; i < src->nr_segments; i++) + dst->seg[i] = src->seg[i]; +} + +static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src) +{ + int i; + dst->operation = src->operation; + dst->nr_segments = src->nr_segments; + dst->handle = src->handle; + dst->id = src->id; + dst->sector_number = src->sector_number; + for (i = 0; i < src->nr_segments; i++) + dst->seg[i] = src->seg[i]; +} + +#endif /* __XEN_BLKIF_H__ */