linux-pci.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Kishon Vijay Abraham I <kishon@ti.com>
To: Jason Wang <jasowang@redhat.com>, Cornelia Huck <cohuck@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>,
	Ohad Ben-Cohen <ohad@wizery.com>,
	Bjorn Andersson <bjorn.andersson@linaro.org>,
	Jon Mason <jdmason@kudzu.us>, Dave Jiang <dave.jiang@intel.com>,
	Allen Hubbe <allenbh@gmail.com>,
	Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>,
	Bjorn Helgaas <bhelgaas@google.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Stefano Garzarella <sgarzare@redhat.com>,
	<linux-doc@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
	<linux-remoteproc@vger.kernel.org>, <linux-ntb@googlegroups.com>,
	<linux-pci@vger.kernel.org>, <kvm@vger.kernel.org>,
	<virtualization@lists.linux-foundation.org>,
	<netdev@vger.kernel.org>
Subject: Re: [RFC PATCH 00/22] Enhance VHOST to enable SoC-to-SoC communication
Date: Wed, 16 Sep 2020 17:17:32 +0530	[thread overview]
Message-ID: <67924594-c70e-390e-ce2e-dda41a94ada1@ti.com> (raw)
In-Reply-To: <ee0aa81d-064b-d7a7-86bb-79a3f4d3dd11@redhat.com>

Hi Jason,

On 16/09/20 8:40 am, Jason Wang wrote:
> 
> On 2020/9/15 下午11:47, Kishon Vijay Abraham I wrote:
>> Hi Jason,
>>
>> On 15/09/20 1:48 pm, Jason Wang wrote:
>>> Hi Kishon:
>>>
>>> On 2020/9/14 下午3:23, Kishon Vijay Abraham I wrote:
>>>>> Then you need something that is functional equivalent to virtio PCI
>>>>> which is actually the concept of vDPA (e.g vDPA provides
>>>>> alternatives if
>>>>> the queue_sel is hard in the EP implementation).
>>>> Okay, I just tried to compare the 'struct vdpa_config_ops' and 'struct
>>>> vhost_config_ops' ( introduced in [RFC PATCH 03/22] vhost: Add ops for
>>>> the VHOST driver to configure VHOST device).
>>>>
>>>> struct vdpa_config_ops {
>>>>      /* Virtqueue ops */
>>>>      int (*set_vq_address)(struct vdpa_device *vdev,
>>>>                    u16 idx, u64 desc_area, u64 driver_area,
>>>>                    u64 device_area);
>>>>      void (*set_vq_num)(struct vdpa_device *vdev, u16 idx, u32 num);
>>>>      void (*kick_vq)(struct vdpa_device *vdev, u16 idx);
>>>>      void (*set_vq_cb)(struct vdpa_device *vdev, u16 idx,
>>>>                struct vdpa_callback *cb);
>>>>      void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool
>>>> ready);
>>>>      bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
>>>>      int (*set_vq_state)(struct vdpa_device *vdev, u16 idx,
>>>>                  const struct vdpa_vq_state *state);
>>>>      int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
>>>>                  struct vdpa_vq_state *state);
>>>>      struct vdpa_notification_area
>>>>      (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
>>>>      /* vq irq is not expected to be changed once DRIVER_OK is set */
>>>>      int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx);
>>>>
>>>>      /* Device ops */
>>>>      u32 (*get_vq_align)(struct vdpa_device *vdev);
>>>>      u64 (*get_features)(struct vdpa_device *vdev);
>>>>      int (*set_features)(struct vdpa_device *vdev, u64 features);
>>>>      void (*set_config_cb)(struct vdpa_device *vdev,
>>>>                    struct vdpa_callback *cb);
>>>>      u16 (*get_vq_num_max)(struct vdpa_device *vdev);
>>>>      u32 (*get_device_id)(struct vdpa_device *vdev);
>>>>      u32 (*get_vendor_id)(struct vdpa_device *vdev);
>>>>      u8 (*get_status)(struct vdpa_device *vdev);
>>>>      void (*set_status)(struct vdpa_device *vdev, u8 status);
>>>>      void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
>>>>                 void *buf, unsigned int len);
>>>>      void (*set_config)(struct vdpa_device *vdev, unsigned int offset,
>>>>                 const void *buf, unsigned int len);
>>>>      u32 (*get_generation)(struct vdpa_device *vdev);
>>>>
>>>>      /* DMA ops */
>>>>      int (*set_map)(struct vdpa_device *vdev, struct vhost_iotlb
>>>> *iotlb);
>>>>      int (*dma_map)(struct vdpa_device *vdev, u64 iova, u64 size,
>>>>                 u64 pa, u32 perm);
>>>>      int (*dma_unmap)(struct vdpa_device *vdev, u64 iova, u64 size);
>>>>
>>>>      /* Free device resources */
>>>>      void (*free)(struct vdpa_device *vdev);
>>>> };
>>>>
>>>> +struct vhost_config_ops {
>>>> +    int (*create_vqs)(struct vhost_dev *vdev, unsigned int nvqs,
>>>> +              unsigned int num_bufs, struct vhost_virtqueue *vqs[],
>>>> +              vhost_vq_callback_t *callbacks[],
>>>> +              const char * const names[]);
>>>> +    void (*del_vqs)(struct vhost_dev *vdev);
>>>> +    int (*write)(struct vhost_dev *vdev, u64 vhost_dst, void *src,
>>>> int len);
>>>> +    int (*read)(struct vhost_dev *vdev, void *dst, u64 vhost_src, int
>>>> len);
>>>> +    int (*set_features)(struct vhost_dev *vdev, u64 device_features);
>>>> +    int (*set_status)(struct vhost_dev *vdev, u8 status);
>>>> +    u8 (*get_status)(struct vhost_dev *vdev);
>>>> +};
>>>> +
>>>> struct virtio_config_ops
>>>> I think there's some overlap here and some of the ops tries to do the
>>>> same thing.
>>>>
>>>> I think it differs in (*set_vq_address)() and (*create_vqs)().
>>>> [create_vqs() introduced in struct vhost_config_ops provides
>>>> complimentary functionality to (*find_vqs)() in struct
>>>> virtio_config_ops. It seemingly encapsulates the functionality of
>>>> (*set_vq_address)(), (*set_vq_num)(), (*set_vq_cb)(),..].
>>>>
>>>> Back to the difference between (*set_vq_address)() and (*create_vqs)(),
>>>> set_vq_address() directly provides the virtqueue address to the vdpa
>>>> device but create_vqs() only provides the parameters of the virtqueue
>>>> (like the number of virtqueues, number of buffers) but does not
>>>> directly
>>>> provide the address. IMO the backend client drivers (like net or vhost)
>>>> shouldn't/cannot by itself know how to access the vring created on
>>>> virtio front-end. The vdpa device/vhost device should have logic for
>>>> that. That will help the client drivers to work with different types of
>>>> vdpa device/vhost device and can access the vring created by virtio
>>>> irrespective of whether the vring can be accessed via mmio or kernel
>>>> space or user space.
>>>>
>>>> I think vdpa always works with client drivers in userspace and
>>>> providing
>>>> userspace address for vring.
>>>
>>> Sorry for being unclear. What I meant is not replacing vDPA with the
>>> vhost(bus) you proposed but the possibility of replacing virtio-pci-epf
>>> with vDPA in:
>> Okay, so the virtio back-end still use vhost and front end should use
>> vDPA. I see. So the host side PCI driver for EPF should populate
>> vdpa_config_ops and invoke vdpa_register_device().
> 
> 
> Yes.
> 
> 
>>> My question is basically for the part of virtio_pci_epf_send_command(),
>>> so it looks to me you have a vendor specific API to replace the
>>> virtio-pci layout of the BAR:
>> Even when we use vDPA, we have to use some sort of
>> virtio_pci_epf_send_command() to communicate with virtio backend right?
> 
> 
> Right.
> 
> 
>>
>> Right, the layout is slightly different from the standard layout.
>>
>> This is the layout
>> struct epf_vhost_reg_queue {
>>          u8 cmd;
>>          u8 cmd_status;
>>          u16 status;
>>          u16 num_buffers;
>>          u16 msix_vector;
>>          u64 queue_addr;
> 
> 
> What's the meaning of queue_addr here?

Using queue_addr, the virtio front-end communicates the address of the
allocated memory for virtqueue to the virtio back-end.
> 
> Does not mean the device expects a contiguous memory for avail/desc/used
> ring?

It's contiguous memory. Isn't this similar to other virtio transport
(both PCI legacy and modern interface)?.
> 
> 
>> } __packed;
>>
>> struct epf_vhost_reg {
>>          u64 host_features;
>>          u64 guest_features;
>>          u16 msix_config;
>>          u16 num_queues;
>>          u8 device_status;
>>          u8 config_generation;
>>          u32 isr;
>>          u8 cmd;
>>          u8 cmd_status;
>>          struct epf_vhost_reg_queue vq[MAX_VQS];
>> } __packed;
>>>
>>> +static int virtio_pci_epf_send_command(struct virtio_pci_device
>>> *vp_dev,
>>> +                       u32 command)
>>> +{
>>> +    struct virtio_pci_epf *pci_epf;
>>> +    void __iomem *ioaddr;
>>> +    ktime_t timeout;
>>> +    bool timedout;
>>> +    int ret = 0;
>>> +    u8 status;
>>> +
>>> +    pci_epf = to_virtio_pci_epf(vp_dev);
>>> +    ioaddr = vp_dev->ioaddr;
>>> +
>>> +    mutex_lock(&pci_epf->lock);
>>> +    writeb(command, ioaddr + HOST_CMD);
>>> +    timeout = ktime_add_ms(ktime_get(), COMMAND_TIMEOUT);
>>> +    while (1) {
>>> +        timedout = ktime_after(ktime_get(), timeout);
>>> +        status = readb(ioaddr + HOST_CMD_STATUS);
>>> +
>>>
>>> Several questions:
>>>
>>> - It's not clear to me how the synchronization is done between the RC
>>> and EP. E.g how and when the value of HOST_CMD_STATUS can be changed.
>> The HOST_CMD (commands sent to the EP) is serialized by using mutex.
>> Once the EP reads the command, it resets the value in HOST_CMD. So
>> HOST_CMD is less likely an issue.
> 
> 
> Here's my understanding of the protocol:
> 
> 1) RC write to HOST_CMD
> 2) RC wait for HOST_CMD_STATUS to be HOST_CMD_STATUS_OKAY

That's right!
> 
> It looks to me what EP should do is
> 
> 1) EP reset HOST_CMD after reading new command

That's right! It does.
> 
> And it looks to me EP should also reset HOST_CMD_STATUS here?

yeah, that would require RC to send another command to reset the status.
Didn't see it required in the normal scenario but good to add this.
> 
> (I thought there should be patch to handle stuffs like this but I didn't
> find it in this series)

This is added in [RFC PATCH 19/22] PCI: endpoint: Add EP function driver
to provide VHOST interface

pci_epf_vhost_cmd_handler() gets commands from RC using "reg->cmd;". On
the EP side, it is local memory access (mapped to BAR memory exposed to
the host) and hence accessed using structure member access.
> 
> 
>>
>> A sufficiently large time is given for the EP to complete it's operation
>> (1 Sec) where the EP provides the status in HOST_CMD_STATUS. After it
>> expires, HOST_CMD_STATUS_NONE is written to HOST_CMD_STATUS. There could
>> be case where EP updates HOST_CMD_STATUS after RC writes
>> HOST_CMD_STATUS_NONE, but by then HOST has already detected this as
>> failure and error-ed out.
>>  
>>> If you still want to introduce a new transport, a virtio spec patch
>>> would be helpful for us to understand the device API.
>> Okay, that should be on https://github.com/oasis-tcs/virtio-spec.git?
> 
> 
> Yes.
> 
> 
>>> - You have you vendor specific layout (according to
>>> virtio_pci_epb_table()), so I guess you it's better to have a vendor
>>> specific vDPA driver instead
>> Okay, with vDPA, we are free to define our own layouts.
> 
> 
> Right, but vDPA have other requirements. E.g it requires the device have
> the ability to save/restore the state (e.g the last_avail_idx).
> 
> So it actually depends on what you want. If you don't care about
> userspace drivers and want to have a standard transport, you can still
> go virtio.

okay.
> 
> 
>>> - The advantage of vendor specific vDPA driver is that it can 1) have
>>> less codes 2) support userspace drivers through vhost-vDPA (instead of
>>> inventing new APIs since we can't use vfio-pci here).
>> I see there's an additional level of indirection from virtio to vDPA and
>> probably no need for spec update but don't exactly see how it'll reduce
>> code.
> 
> 
> AFAIK you don't need to implement your own setup_vq and del_vq.
> 
There should still be some entity that allocates memory for virtqueues
and then communicate this address to the backend.

Maybe I have to look this further.
> 
>>
>> For 2, Isn't vhost-vdpa supposed to run on virtio backend?
> 
> 
> Not currently, vDPA is a superset of virtio (e.g it support virtqueue
> state save/restore). This it should be possible in the future probably.
> 
> 
>>
>>  From a high level, I think I should be able to use vDPA for
>> virtio_pci_epf.c. Would you also suggest using vDPA for ntb_virtio.c?
>> ([RFC PATCH 20/22] NTB: Add a new NTB client driver to implement VIRTIO
>> functionality).
> 
> 
> I think it's your call. If you want
> 
> 1) a well-defined standard virtio transport
> 2) willing to finalize d and maintain the spec
> 3) doesn't care about userspace drivers

IIUC, we can use vDPA (virtio_vdpa.c) but still don't need userspace
drivers right?
> 
> You can go with virtio, otherwise vDPA.

Okay, let me see. Thanks for your inputs.

Best Regards,
Kishon

WARNING: multiple messages have this Message-ID (diff)
From: Kishon Vijay Abraham I <kishon@ti.com>
To: Jason Wang <jasowang@redhat.com>, Cornelia Huck <cohuck@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>,
	Ohad Ben-Cohen <ohad@wizery.com>,
	Bjorn Andersson <bjorn.andersson@linaro.org>,
	Jon Mason <jdmason@kudzu.us>, Dave Jiang <dave.jiang@intel.com>,
	Allen Hubbe <allenbh@gmail.com>,
	Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>,
	Bjorn Helgaas <bhelgaas@google.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Stefano Garzarella <sgarzare@redhat.com>,
	<linux-doc@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
	<linux-remoteproc@vger.kernel.org>, <linux-ntb@googlegroups.com>,
	<linux-pci@vger.kernel.org>, <kvm@vger.kernel.org>,
	<virtualization@lists.linux-foundation.org>,
	<netdev@vger.kernel.org>
Subject: Re: [RFC PATCH 00/22] Enhance VHOST to enable SoC-to-SoC communication
Date: Wed, 16 Sep 2020 17:17:32 +0530	[thread overview]
Message-ID: <67924594-c70e-390e-ce2e-dda41a94ada1@ti.com> (raw)
Message-ID: <20200916114732.G72TT8i4mnDggPj-5yw98Mg122eaMTLu2vDbXsCXg6k@z> (raw)
In-Reply-To: <ee0aa81d-064b-d7a7-86bb-79a3f4d3dd11@redhat.com>



  reply	other threads:[~2020-09-16 15:33 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-02  8:21 [RFC PATCH 00/22] Enhance VHOST to enable SoC-to-SoC communication Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 01/22] vhost: Make _feature_ bits a property of vhost device Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 02/22] vhost: Introduce standard Linux driver model in VHOST Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 03/22] vhost: Add ops for the VHOST driver to configure VHOST device Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 04/22] vringh: Add helpers to access vring in MMIO Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 05/22] vhost: Add MMIO helpers for operations on vhost virtqueue Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 06/22] vhost: Introduce configfs entry for configuring VHOST Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 07/22] virtio_pci: Use request_threaded_irq() instead of request_irq() Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 08/22] rpmsg: virtio_rpmsg_bus: Disable receive virtqueue callback when reading messages Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 09/22] rpmsg: Introduce configfs entry for configuring rpmsg Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 10/22] rpmsg: virtio_rpmsg_bus: Add Address Service Notification support Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 11/22] rpmsg: virtio_rpmsg_bus: Move generic rpmsg structure to rpmsg_internal.h Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 12/22] virtio: Add ops to allocate and free buffer Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 13/22] rpmsg: virtio_rpmsg_bus: Use virtio_alloc_buffer() and virtio_free_buffer() Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 14/22] rpmsg: Add VHOST based remote processor messaging bus Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 15/22] samples/rpmsg: Setup delayed work to send message Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 16/22] samples/rpmsg: Wait for address to be bound to rpdev for sending message Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 17/22] rpmsg.txt: Add Documentation to configure rpmsg using configfs Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 18/22] virtio_pci: Add VIRTIO driver for VHOST on Configurable PCIe Endpoint device Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 19/22] PCI: endpoint: Add EP function driver to provide VHOST interface Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 20/22] NTB: Add a new NTB client driver to implement VIRTIO functionality Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 21/22] NTB: Add a new NTB client driver to implement VHOST functionality Kishon Vijay Abraham I
2020-07-02  8:21 ` [RFC PATCH 22/22] NTB: Describe ntb_virtio and ntb_vhost client in the documentation Kishon Vijay Abraham I
2020-07-02  9:51 ` [RFC PATCH 00/22] Enhance VHOST to enable SoC-to-SoC communication Michael S. Tsirkin
2020-07-02 10:10   ` Jason Wang
2020-07-02 13:35     ` Kishon Vijay Abraham I
2020-07-03  7:16       ` Jason Wang
2020-07-06  9:32         ` Kishon Vijay Abraham I
2020-07-07  9:47           ` Jason Wang
2020-07-07 14:45             ` Kishon Vijay Abraham I
2020-07-08 11:22               ` Jason Wang
2020-07-08 13:13                 ` Kishon Vijay Abraham I
2020-07-09  6:26                   ` Jason Wang
2020-08-28 10:34                     ` Cornelia Huck
2020-09-01  5:24                       ` Kishon Vijay Abraham I
2020-09-01  8:50                         ` Jason Wang
2020-09-08 16:37                           ` Cornelia Huck
2020-09-09  8:41                             ` Jason Wang
2020-09-14  7:23                           ` Kishon Vijay Abraham I
2020-09-15  8:18                             ` Jason Wang
2020-09-15 15:47                               ` Kishon Vijay Abraham I
2020-09-16  3:10                                 ` Jason Wang
2020-09-16 11:47                                   ` Kishon Vijay Abraham I [this message]
2020-09-16 11:47                                     ` Kishon Vijay Abraham I
2020-09-18  4:04                                     ` Jason Wang
2020-07-15 17:15                   ` Mathieu Poirier
2020-09-01  4:40                     ` Kishon Vijay Abraham I
2020-07-02 10:25   ` Kishon Vijay Abraham I
2020-07-02 17:31   ` Mathieu Poirier
2020-07-03  6:17     ` Kishon Vijay Abraham I

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=67924594-c70e-390e-ce2e-dda41a94ada1@ti.com \
    --to=kishon@ti.com \
    --cc=allenbh@gmail.com \
    --cc=bhelgaas@google.com \
    --cc=bjorn.andersson@linaro.org \
    --cc=cohuck@redhat.com \
    --cc=dave.jiang@intel.com \
    --cc=jasowang@redhat.com \
    --cc=jdmason@kudzu.us \
    --cc=kvm@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-ntb@googlegroups.com \
    --cc=linux-pci@vger.kernel.org \
    --cc=linux-remoteproc@vger.kernel.org \
    --cc=lorenzo.pieralisi@arm.com \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=ohad@wizery.com \
    --cc=pbonzini@redhat.com \
    --cc=sgarzare@redhat.com \
    --cc=stefanha@redhat.com \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).