From: Halil Pasic <pasic@linux.ibm.com> To: kvm@vger.kernel.org, linux-s390@vger.kernel.org, Cornelia Huck <cohuck@redhat.com>, Martin Schwidefsky <schwidefsky@de.ibm.com>, Sebastian Ott <sebott@linux.ibm.com> Cc: Halil Pasic <pasic@linux.ibm.com>, virtualization@lists.linux-foundation.org, "Michael S. Tsirkin" <mst@redhat.com>, Christoph Hellwig <hch@infradead.org>, Thomas Huth <thuth@redhat.com>, Christian Borntraeger <borntraeger@de.ibm.com>, Viktor Mihajlovski <mihajlov@linux.ibm.com>, Vasily Gorbik <gor@linux.ibm.com>, Janosch Frank <frankja@linux.ibm.com>, Claudio Imbrenda <imbrenda@linux.ibm.com>, Farhan Ali <alifm@linux.ibm.com>, Eric Farman <farman@linux.ibm.com> Subject: [PATCH 05/10] s390/cio: introduce DMA pools to cio Date: Fri, 26 Apr 2019 20:32:40 +0200 [thread overview] Message-ID: <20190426183245.37939-6-pasic@linux.ibm.com> (raw) In-Reply-To: <20190426183245.37939-1-pasic@linux.ibm.com> To support protected virtualization cio will need to make sure the memory used for communication with the hypervisor is DMA memory. Let us introduce one global cio, and some tools for pools seated at individual devices. Our DMA pools are implemented as a gen_pool backed with DMA pages. The idea is to avoid each allocation effectively wasting a page, as we typically allocate much less than PAGE_SIZE. Signed-off-by: Halil Pasic <pasic@linux.ibm.com> --- arch/s390/Kconfig | 1 + arch/s390/include/asm/cio.h | 11 +++++ drivers/s390/cio/cio.h | 1 + drivers/s390/cio/css.c | 101 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 114 insertions(+) diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 5500d05d4d53..5861311d95d9 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -195,6 +195,7 @@ config S390 select VIRT_TO_BUS select HAVE_NMI select SWIOTLB + select GENERIC_ALLOCATOR config SCHED_OMIT_FRAME_POINTER diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index 1727180e8ca1..43c007d2775a 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -328,6 +328,17 @@ static inline u8 pathmask_to_pos(u8 mask) void channel_subsystem_reinit(void); extern void css_schedule_reprobe(void); +extern void *cio_dma_zalloc(size_t size); +extern void cio_dma_free(void *cpu_addr, size_t size); +extern struct device *cio_get_dma_css_dev(void); + +struct gen_pool; +void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, + size_t size); +void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size); +void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev); +struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages); + /* Function from drivers/s390/cio/chsc.c */ int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta); int chsc_sstpi(void *page, void *result, size_t size); diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 92eabbb5f18d..f23f7e2c33f7 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -113,6 +113,7 @@ struct subchannel { enum sch_todo todo; struct work_struct todo_work; struct schib_config config; + u64 dma_mask; } __attribute__ ((aligned(8))); DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index aea502922646..7087cc314fe9 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -20,6 +20,8 @@ #include <linux/reboot.h> #include <linux/suspend.h> #include <linux/proc_fs.h> +#include <linux/genalloc.h> +#include <linux/dma-mapping.h> #include <asm/isc.h> #include <asm/crw.h> @@ -199,6 +201,8 @@ static int css_validate_subchannel(struct subchannel_id schid, return err; } +static u64 css_dev_dma_mask = DMA_BIT_MASK(31); + struct subchannel *css_alloc_subchannel(struct subchannel_id schid, struct schib *schib) { @@ -224,6 +228,9 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid, INIT_WORK(&sch->todo_work, css_sch_todo); sch->dev.release = &css_subchannel_release; device_initialize(&sch->dev); + sch->dma_mask = css_dev_dma_mask; + sch->dev.dma_mask = &sch->dma_mask; + sch->dev.coherent_dma_mask = sch->dma_mask; return sch; err: @@ -899,6 +906,9 @@ static int __init setup_css(int nr) dev_set_name(&css->device, "css%x", nr); css->device.groups = cssdev_attr_groups; css->device.release = channel_subsystem_release; + /* some cio DMA memory needs to be 31 bit addressable */ + css->device.coherent_dma_mask = css_dev_dma_mask, + css->device.dma_mask = &css_dev_dma_mask; mutex_init(&css->mutex); css->cssid = chsc_get_cssid(nr); @@ -1018,6 +1028,96 @@ static struct notifier_block css_power_notifier = { .notifier_call = css_power_event, }; +#define POOL_INIT_PAGES 1 +static struct gen_pool *cio_dma_pool; +/* Currently cio supports only a single css */ +#define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO) + + +struct device *cio_get_dma_css_dev(void) +{ + return &channel_subsystems[0]->device; +} + +struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages) +{ + struct gen_pool *gp_dma; + void *cpu_addr; + dma_addr_t dma_addr; + int i; + + gp_dma = gen_pool_create(3, -1); + if (!gp_dma) + return NULL; + for (i = 0; i < nr_pages; ++i) { + cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, + CIO_DMA_GFP); + if (!cpu_addr) + return gp_dma; + gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr, + dma_addr, PAGE_SIZE, -1); + } + return gp_dma; +} + +static void __gp_dma_free_dma(struct gen_pool *pool, + struct gen_pool_chunk *chunk, void *data) +{ + dma_free_coherent((struct device *) data, PAGE_SIZE, + (void *) chunk->start_addr, + (dma_addr_t) chunk->phys_addr); +} + +void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev) +{ + if (!gp_dma) + return; + /* this is qite ugly but no better idea */ + gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev); + gen_pool_destroy(gp_dma); +} + +static void __init cio_dma_pool_init(void) +{ + /* No need to free up the resources: compiled in */ + cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1); +} + +void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, + size_t size) +{ + dma_addr_t dma_addr; + unsigned long addr = gen_pool_alloc(gp_dma, size); + + if (!addr) { + addr = (unsigned long) dma_alloc_coherent(dma_dev, + PAGE_SIZE, &dma_addr, CIO_DMA_GFP); + if (!addr) + return NULL; + gen_pool_add_virt(gp_dma, addr, dma_addr, PAGE_SIZE, -1); + addr = gen_pool_alloc(gp_dma, size); + } + return (void *) addr; +} + +void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size) +{ + if (!cpu_addr) + return; + memset(cpu_addr, 0, size); + gen_pool_free(gp_dma, (unsigned long) cpu_addr, size); +} + +void *cio_dma_zalloc(size_t size) +{ + return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size); +} + +void cio_dma_free(void *cpu_addr, size_t size) +{ + cio_gp_dma_free(cio_dma_pool, cpu_addr, size); +} + /* * Now that the driver core is running, we can setup our channel subsystem. * The struct subchannel's are created during probing. @@ -1063,6 +1163,7 @@ static int __init css_bus_init(void) unregister_reboot_notifier(&css_reboot_notifier); goto out_unregister; } + cio_dma_pool_init(); css_init_done = 1; /* Enable default isc for I/O subchannels. */ -- 2.16.4
WARNING: multiple messages have this Message-ID (diff)
From: Halil Pasic <pasic@linux.ibm.com> To: kvm@vger.kernel.org, linux-s390@vger.kernel.org, Cornelia Huck <cohuck@redhat.com>, Martin Schwidefsky <schwidefsky@de.ibm.com>, Sebastian Ott <sebott@linux.ibm.com> Cc: Christoph Hellwig <hch@infradead.org>, Thomas Huth <thuth@redhat.com>, Claudio Imbrenda <imbrenda@linux.ibm.com>, Janosch Frank <frankja@linux.ibm.com>, Vasily Gorbik <gor@linux.ibm.com>, "Michael S. Tsirkin" <mst@redhat.com>, Farhan Ali <alifm@linux.ibm.com>, Eric Farman <farman@linux.ibm.com>, virtualization@lists.linux-foundation.org, Halil Pasic <pasic@linux.ibm.com>, Viktor Mihajlovski <mihajlov@linux.ibm.com> Subject: [PATCH 05/10] s390/cio: introduce DMA pools to cio Date: Fri, 26 Apr 2019 20:32:40 +0200 [thread overview] Message-ID: <20190426183245.37939-6-pasic@linux.ibm.com> (raw) In-Reply-To: <20190426183245.37939-1-pasic@linux.ibm.com> To support protected virtualization cio will need to make sure the memory used for communication with the hypervisor is DMA memory. Let us introduce one global cio, and some tools for pools seated at individual devices. Our DMA pools are implemented as a gen_pool backed with DMA pages. The idea is to avoid each allocation effectively wasting a page, as we typically allocate much less than PAGE_SIZE. Signed-off-by: Halil Pasic <pasic@linux.ibm.com> --- arch/s390/Kconfig | 1 + arch/s390/include/asm/cio.h | 11 +++++ drivers/s390/cio/cio.h | 1 + drivers/s390/cio/css.c | 101 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 114 insertions(+) diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 5500d05d4d53..5861311d95d9 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -195,6 +195,7 @@ config S390 select VIRT_TO_BUS select HAVE_NMI select SWIOTLB + select GENERIC_ALLOCATOR config SCHED_OMIT_FRAME_POINTER diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index 1727180e8ca1..43c007d2775a 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -328,6 +328,17 @@ static inline u8 pathmask_to_pos(u8 mask) void channel_subsystem_reinit(void); extern void css_schedule_reprobe(void); +extern void *cio_dma_zalloc(size_t size); +extern void cio_dma_free(void *cpu_addr, size_t size); +extern struct device *cio_get_dma_css_dev(void); + +struct gen_pool; +void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, + size_t size); +void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size); +void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev); +struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages); + /* Function from drivers/s390/cio/chsc.c */ int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta); int chsc_sstpi(void *page, void *result, size_t size); diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 92eabbb5f18d..f23f7e2c33f7 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -113,6 +113,7 @@ struct subchannel { enum sch_todo todo; struct work_struct todo_work; struct schib_config config; + u64 dma_mask; } __attribute__ ((aligned(8))); DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index aea502922646..7087cc314fe9 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -20,6 +20,8 @@ #include <linux/reboot.h> #include <linux/suspend.h> #include <linux/proc_fs.h> +#include <linux/genalloc.h> +#include <linux/dma-mapping.h> #include <asm/isc.h> #include <asm/crw.h> @@ -199,6 +201,8 @@ static int css_validate_subchannel(struct subchannel_id schid, return err; } +static u64 css_dev_dma_mask = DMA_BIT_MASK(31); + struct subchannel *css_alloc_subchannel(struct subchannel_id schid, struct schib *schib) { @@ -224,6 +228,9 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid, INIT_WORK(&sch->todo_work, css_sch_todo); sch->dev.release = &css_subchannel_release; device_initialize(&sch->dev); + sch->dma_mask = css_dev_dma_mask; + sch->dev.dma_mask = &sch->dma_mask; + sch->dev.coherent_dma_mask = sch->dma_mask; return sch; err: @@ -899,6 +906,9 @@ static int __init setup_css(int nr) dev_set_name(&css->device, "css%x", nr); css->device.groups = cssdev_attr_groups; css->device.release = channel_subsystem_release; + /* some cio DMA memory needs to be 31 bit addressable */ + css->device.coherent_dma_mask = css_dev_dma_mask, + css->device.dma_mask = &css_dev_dma_mask; mutex_init(&css->mutex); css->cssid = chsc_get_cssid(nr); @@ -1018,6 +1028,96 @@ static struct notifier_block css_power_notifier = { .notifier_call = css_power_event, }; +#define POOL_INIT_PAGES 1 +static struct gen_pool *cio_dma_pool; +/* Currently cio supports only a single css */ +#define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO) + + +struct device *cio_get_dma_css_dev(void) +{ + return &channel_subsystems[0]->device; +} + +struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages) +{ + struct gen_pool *gp_dma; + void *cpu_addr; + dma_addr_t dma_addr; + int i; + + gp_dma = gen_pool_create(3, -1); + if (!gp_dma) + return NULL; + for (i = 0; i < nr_pages; ++i) { + cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, + CIO_DMA_GFP); + if (!cpu_addr) + return gp_dma; + gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr, + dma_addr, PAGE_SIZE, -1); + } + return gp_dma; +} + +static void __gp_dma_free_dma(struct gen_pool *pool, + struct gen_pool_chunk *chunk, void *data) +{ + dma_free_coherent((struct device *) data, PAGE_SIZE, + (void *) chunk->start_addr, + (dma_addr_t) chunk->phys_addr); +} + +void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev) +{ + if (!gp_dma) + return; + /* this is qite ugly but no better idea */ + gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev); + gen_pool_destroy(gp_dma); +} + +static void __init cio_dma_pool_init(void) +{ + /* No need to free up the resources: compiled in */ + cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1); +} + +void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, + size_t size) +{ + dma_addr_t dma_addr; + unsigned long addr = gen_pool_alloc(gp_dma, size); + + if (!addr) { + addr = (unsigned long) dma_alloc_coherent(dma_dev, + PAGE_SIZE, &dma_addr, CIO_DMA_GFP); + if (!addr) + return NULL; + gen_pool_add_virt(gp_dma, addr, dma_addr, PAGE_SIZE, -1); + addr = gen_pool_alloc(gp_dma, size); + } + return (void *) addr; +} + +void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size) +{ + if (!cpu_addr) + return; + memset(cpu_addr, 0, size); + gen_pool_free(gp_dma, (unsigned long) cpu_addr, size); +} + +void *cio_dma_zalloc(size_t size) +{ + return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size); +} + +void cio_dma_free(void *cpu_addr, size_t size) +{ + cio_gp_dma_free(cio_dma_pool, cpu_addr, size); +} + /* * Now that the driver core is running, we can setup our channel subsystem. * The struct subchannel's are created during probing. @@ -1063,6 +1163,7 @@ static int __init css_bus_init(void) unregister_reboot_notifier(&css_reboot_notifier); goto out_unregister; } + cio_dma_pool_init(); css_init_done = 1; /* Enable default isc for I/O subchannels. */ -- 2.16.4
next prev parent reply other threads:[~2019-04-26 18:32 UTC|newest] Thread overview: 182+ messages / expand[flat|nested] mbox.gz Atom feed top 2019-04-26 18:32 [PATCH 00/10] s390: virtio: support protected virtualization Halil Pasic 2019-04-26 18:32 ` Halil Pasic 2019-04-26 18:32 ` [PATCH 01/10] virtio/s390: use vring_create_virtqueue Halil Pasic 2019-04-26 18:32 ` Halil Pasic 2019-05-03 9:17 ` Cornelia Huck 2019-05-03 20:04 ` Michael S. Tsirkin 2019-05-03 20:04 ` Michael S. Tsirkin 2019-05-04 14:03 ` Halil Pasic 2019-05-04 14:03 ` Halil Pasic 2019-05-05 11:15 ` Cornelia Huck 2019-05-05 11:15 ` Cornelia Huck 2019-05-07 13:58 ` Christian Borntraeger 2019-05-07 13:58 ` Christian Borntraeger 2019-05-08 20:12 ` Halil Pasic 2019-05-08 20:12 ` Halil Pasic 2019-05-10 14:07 ` Cornelia Huck 2019-05-10 14:07 ` Cornelia Huck 2019-05-12 16:47 ` Michael S. Tsirkin 2019-05-12 16:47 ` Michael S. Tsirkin 2019-05-13 9:52 ` Cornelia Huck 2019-05-13 9:52 ` Cornelia Huck 2019-05-13 12:27 ` Michael Mueller 2019-05-13 12:27 ` Michael Mueller 2019-05-13 12:29 ` Cornelia Huck 2019-05-13 12:29 ` Cornelia Huck 2019-04-26 18:32 ` [PATCH 02/10] virtio/s390: DMA support for virtio-ccw Halil Pasic 2019-04-26 18:32 ` Halil Pasic 2019-05-03 9:31 ` Cornelia Huck 2019-04-26 18:32 ` [PATCH 03/10] virtio/s390: enable packed ring Halil Pasic 2019-04-26 18:32 ` Halil Pasic 2019-05-03 9:44 ` Cornelia Huck 2019-05-05 15:13 ` Thomas Huth 2019-05-05 15:13 ` Thomas Huth 2019-04-26 18:32 ` [PATCH 04/10] s390/mm: force swiotlb for protected virtualization Halil Pasic 2019-04-26 18:32 ` Halil Pasic 2019-04-26 19:27 ` Christoph Hellwig 2019-04-26 19:27 ` Christoph Hellwig 2019-04-29 13:59 ` Halil Pasic 2019-04-29 13:59 ` Halil Pasic 2019-04-29 14:05 ` Christian Borntraeger 2019-04-29 14:05 ` Christian Borntraeger 2019-05-13 12:50 ` Michael Mueller 2019-05-13 12:50 ` Michael Mueller 2019-05-08 13:15 ` Claudio Imbrenda 2019-05-08 13:15 ` Claudio Imbrenda 2019-05-09 22:34 ` Halil Pasic 2019-05-09 22:34 ` Halil Pasic 2019-05-15 14:15 ` Michael Mueller 2019-05-15 14:15 ` Michael Mueller [not found] ` <ad23f5e7-dc78-04af-c892-47bbc65134c6@linux.ibm.com> 2019-05-09 18:05 ` Jason J. Herne 2019-05-09 18:05 ` Jason J. Herne 2019-05-09 18:05 ` Jason J. Herne 2019-05-10 7:49 ` Claudio Imbrenda 2019-05-10 7:49 ` Claudio Imbrenda 2019-04-26 18:32 ` Halil Pasic [this message] 2019-04-26 18:32 ` [PATCH 05/10] s390/cio: introduce DMA pools to cio Halil Pasic 2019-05-08 13:18 ` Sebastian Ott 2019-05-08 13:18 ` Sebastian Ott 2019-05-08 21:22 ` Halil Pasic 2019-05-08 21:22 ` Halil Pasic 2019-05-09 8:40 ` Sebastian Ott 2019-05-09 8:40 ` Sebastian Ott 2019-05-09 10:11 ` Cornelia Huck 2019-05-09 10:11 ` Cornelia Huck 2019-05-09 22:11 ` Halil Pasic 2019-05-09 22:11 ` Halil Pasic 2019-05-10 14:10 ` Cornelia Huck 2019-05-10 14:10 ` Cornelia Huck 2019-05-12 18:22 ` Halil Pasic 2019-05-12 18:22 ` Halil Pasic 2019-05-13 13:29 ` Cornelia Huck 2019-05-13 13:29 ` Cornelia Huck 2019-05-15 17:12 ` Halil Pasic 2019-05-15 17:12 ` Halil Pasic 2019-05-16 6:13 ` Cornelia Huck 2019-05-16 6:13 ` Cornelia Huck 2019-05-16 13:59 ` Sebastian Ott 2019-05-16 13:59 ` Sebastian Ott 2019-05-20 12:13 ` Halil Pasic 2019-05-20 12:13 ` Halil Pasic 2019-05-21 8:46 ` Michael Mueller 2019-05-21 8:46 ` Michael Mueller 2019-05-22 12:07 ` Sebastian Ott 2019-05-22 12:07 ` Sebastian Ott 2019-05-22 22:12 ` Halil Pasic 2019-05-22 22:12 ` Halil Pasic 2019-05-23 15:17 ` Halil Pasic 2019-05-23 15:17 ` Halil Pasic 2019-04-26 18:32 ` [PATCH 06/10] s390/cio: add basic protected virtualization support Halil Pasic 2019-04-26 18:32 ` Halil Pasic 2019-05-08 13:46 ` Sebastian Ott 2019-05-08 13:46 ` Sebastian Ott 2019-05-08 13:54 ` Christoph Hellwig 2019-05-08 13:54 ` Christoph Hellwig 2019-05-08 21:08 ` Halil Pasic 2019-05-08 21:08 ` Halil Pasic 2019-05-09 8:52 ` Sebastian Ott 2019-05-09 8:52 ` Sebastian Ott 2019-05-08 14:23 ` Pierre Morel 2019-05-08 14:23 ` Pierre Morel 2019-05-13 9:41 ` Cornelia Huck 2019-05-13 9:41 ` Cornelia Huck 2019-05-14 14:47 ` Jason J. Herne 2019-05-14 14:47 ` Jason J. Herne 2019-05-15 21:08 ` Halil Pasic 2019-05-15 21:08 ` Halil Pasic 2019-05-16 6:32 ` Cornelia Huck 2019-05-16 6:32 ` Cornelia Huck 2019-05-16 13:42 ` Halil Pasic 2019-05-16 13:42 ` Halil Pasic 2019-05-16 13:54 ` Cornelia Huck 2019-05-16 13:54 ` Cornelia Huck 2019-05-15 20:51 ` Halil Pasic 2019-05-15 20:51 ` Halil Pasic 2019-05-16 6:29 ` Cornelia Huck 2019-05-16 6:29 ` Cornelia Huck 2019-05-18 18:11 ` Halil Pasic 2019-05-18 18:11 ` Halil Pasic 2019-05-20 10:21 ` Cornelia Huck 2019-05-20 10:21 ` Cornelia Huck 2019-05-20 12:34 ` Halil Pasic 2019-05-20 12:34 ` Halil Pasic 2019-05-20 13:43 ` Cornelia Huck 2019-05-20 13:43 ` Cornelia Huck 2019-04-26 18:32 ` [PATCH 07/10] s390/airq: use DMA memory for adapter interrupts Halil Pasic 2019-04-26 18:32 ` Halil Pasic 2019-05-08 13:58 ` Sebastian Ott 2019-05-08 13:58 ` Sebastian Ott 2019-05-09 11:37 ` Cornelia Huck 2019-05-09 11:37 ` Cornelia Huck 2019-05-13 12:59 ` Cornelia Huck 2019-05-13 12:59 ` Cornelia Huck 2019-04-26 18:32 ` [PATCH 08/10] virtio/s390: add indirection to indicators access Halil Pasic 2019-04-26 18:32 ` Halil Pasic 2019-05-08 14:31 ` Pierre Morel 2019-05-08 14:31 ` Pierre Morel 2019-05-09 12:01 ` Pierre Morel 2019-05-09 12:01 ` Pierre Morel 2019-05-09 18:26 ` Halil Pasic 2019-05-09 18:26 ` Halil Pasic 2019-05-10 7:43 ` Pierre Morel 2019-05-10 7:43 ` Pierre Morel 2019-05-10 11:54 ` Halil Pasic 2019-05-10 11:54 ` Halil Pasic 2019-05-10 15:36 ` Pierre Morel 2019-05-10 15:36 ` Pierre Morel 2019-05-13 10:15 ` Cornelia Huck 2019-05-13 10:15 ` Cornelia Huck 2019-05-16 15:24 ` Pierre Morel 2019-05-16 15:24 ` Pierre Morel 2019-04-26 18:32 ` [PATCH 09/10] virtio/s390: use DMA memory for ccw I/O and classic notifiers Halil Pasic 2019-04-26 18:32 ` Halil Pasic 2019-05-08 14:46 ` Pierre Morel 2019-05-08 14:46 ` Pierre Morel 2019-05-09 13:30 ` Pierre Morel 2019-05-09 13:30 ` Pierre Morel 2019-05-09 18:30 ` Halil Pasic 2019-05-09 18:30 ` Halil Pasic 2019-05-13 13:54 ` Cornelia Huck 2019-05-13 13:54 ` Cornelia Huck 2019-04-26 18:32 ` [PATCH 10/10] virtio/s390: make airq summary indicators DMA Halil Pasic 2019-04-26 18:32 ` Halil Pasic 2019-05-08 15:11 ` Pierre Morel 2019-05-08 15:11 ` Pierre Morel 2019-05-15 13:33 ` Michael Mueller 2019-05-15 13:33 ` Michael Mueller 2019-05-15 17:23 ` Halil Pasic 2019-05-15 17:23 ` Halil Pasic 2019-05-13 12:20 ` Cornelia Huck 2019-05-13 12:20 ` Cornelia Huck 2019-05-15 13:43 ` Michael Mueller 2019-05-15 13:43 ` Michael Mueller 2019-05-15 13:50 ` Cornelia Huck 2019-05-15 13:50 ` Cornelia Huck 2019-05-15 17:18 ` Halil Pasic 2019-05-15 17:18 ` Halil Pasic 2019-05-03 9:55 ` [PATCH 00/10] s390: virtio: support protected virtualization Cornelia Huck 2019-05-03 10:03 ` Juergen Gross 2019-05-03 13:33 ` Cornelia Huck 2019-05-03 13:33 ` Cornelia Huck 2019-05-04 13:58 ` Halil Pasic 2019-05-04 13:58 ` Halil Pasic
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20190426183245.37939-6-pasic@linux.ibm.com \ --to=pasic@linux.ibm.com \ --cc=alifm@linux.ibm.com \ --cc=borntraeger@de.ibm.com \ --cc=cohuck@redhat.com \ --cc=farman@linux.ibm.com \ --cc=frankja@linux.ibm.com \ --cc=gor@linux.ibm.com \ --cc=hch@infradead.org \ --cc=imbrenda@linux.ibm.com \ --cc=kvm@vger.kernel.org \ --cc=linux-s390@vger.kernel.org \ --cc=mihajlov@linux.ibm.com \ --cc=mst@redhat.com \ --cc=schwidefsky@de.ibm.com \ --cc=sebott@linux.ibm.com \ --cc=thuth@redhat.com \ --cc=virtualization@lists.linux-foundation.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.