netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jonathan Lemon <jonathan.lemon@gmail.com>
To: <netdev@vger.kernel.org>
Cc: <kernel-team@fb.com>
Subject: [RFC PATCH v2 06/21] include: add netgpu UAPI and kernel definitions
Date: Mon, 27 Jul 2020 15:44:29 -0700	[thread overview]
Message-ID: <20200727224444.2987641-7-jonathan.lemon@gmail.com> (raw)
In-Reply-To: <20200727224444.2987641-1-jonathan.lemon@gmail.com>

From: Jonathan Lemon <bsd@fb.com>

This provides the interface to the netgpu module.

Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
---
 include/net/netgpu.h       | 66 ++++++++++++++++++++++++++++++++++++
 include/uapi/misc/netgpu.h | 69 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 135 insertions(+)
 create mode 100644 include/net/netgpu.h
 create mode 100644 include/uapi/misc/netgpu.h

diff --git a/include/net/netgpu.h b/include/net/netgpu.h
new file mode 100644
index 000000000000..14bd19412c38
--- /dev/null
+++ b/include/net/netgpu.h
@@ -0,0 +1,66 @@
+#ifndef _NET_NETGPU_H
+#define _NET_NETGPU_H
+
+#include <uapi/misc/netgpu.h>		/* IOCTL defines */
+#include <uapi/misc/shqueue.h>
+
+enum {
+	NETGPU_MEMTYPE_HOST,
+	NETGPU_MEMTYPE_CUDA,
+
+	NETGPU_MEMTYPE_MAX,
+};
+
+struct netgpu_pgcache {
+	struct netgpu_pgcache *next;
+	struct page *page[];
+};
+
+struct netgpu_ifq {
+	struct shared_queue fill;
+	struct wait_queue_head fill_wait;
+	struct netgpu_ctx *ctx;
+	int queue_id;
+	spinlock_t pgcache_lock;
+	struct netgpu_pgcache *napi_cache;
+	struct netgpu_pgcache *spare_cache;
+	struct netgpu_pgcache *any_cache;
+	int napi_cache_count;
+	int any_cache_count;
+	struct list_head ifq_node;
+};
+
+struct netgpu_skq {
+	struct shared_queue rx;
+	struct shared_queue cq;		/* for requested completions */
+	struct netgpu_ctx *ctx;
+	void (*sk_destruct)(struct sock *sk);
+	void (*sk_data_ready)(struct sock *sk);
+};
+
+struct netgpu_ctx {
+	struct xarray xa;		/* contains dmamaps */
+	refcount_t ref;
+	struct net_device *dev;
+	struct list_head ifq_list;
+};
+
+struct net_device;
+struct netgpu_ops;
+struct socket;
+
+dma_addr_t netgpu_get_dma(struct netgpu_ctx *ctx, struct page *page);
+int netgpu_get_page(struct netgpu_ifq *ifq, struct page **page,
+		    dma_addr_t *dma);
+void netgpu_put_page(struct netgpu_ifq *ifq, struct page *page, bool napi);
+int netgpu_get_pages(struct sock *sk, struct page **pages, unsigned long addr,
+		     int count);
+
+int netgpu_socket_mmap(struct file *file, struct socket *sock,
+		       struct vm_area_struct *vma);
+int netgpu_attach_socket(struct sock *sk, void __user *arg);
+
+int netgpu_register(struct netgpu_ops *ops);
+void netgpu_unregister(int memtype);
+
+#endif /* _NET_NETGPU_H */
diff --git a/include/uapi/misc/netgpu.h b/include/uapi/misc/netgpu.h
new file mode 100644
index 000000000000..1fa8a1d719ee
--- /dev/null
+++ b/include/uapi/misc/netgpu.h
@@ -0,0 +1,69 @@
+#ifndef _UAPI_MISC_NETGPU_H
+#define _UAPI_MISC_NETGPU_H
+
+#include <linux/ioctl.h>
+
+#define NETGPU_OFF_FILL_ID	(0ULL << 12)
+#define NETGPU_OFF_RX_ID	(1ULL << 12)
+#define NETGPU_OFF_CQ_ID	(2ULL << 12)
+
+struct netgpu_queue_offsets {
+	unsigned prod;
+	unsigned cons;
+	unsigned data;
+	unsigned resv;
+};
+
+struct netgpu_user_queue {
+	unsigned elt_sz;
+	unsigned entries;
+	unsigned mask;
+	unsigned map_sz;
+	unsigned map_off;
+	struct netgpu_queue_offsets off;
+};
+
+enum netgpu_memtype {
+	MEMTYPE_HOST,
+	MEMTYPE_CUDA,
+
+	MEMTYPE_MAX,
+};
+
+/* VA memory provided by a specific PCI device. */
+struct netgpu_region_param {
+	struct iovec iov;
+	enum netgpu_memtype memtype;
+};
+
+struct netgpu_attach_param {
+	int mem_fd;
+	int mem_idx;
+};
+
+struct netgpu_socket_param {
+	unsigned resv;
+	int ctx_fd;
+	struct netgpu_user_queue rx;
+	struct netgpu_user_queue cq;
+};
+
+struct netgpu_ifq_param {
+	unsigned resv;
+	unsigned ifq_fd;		/* OUT parameter */
+	unsigned queue_id;		/* IN/OUT, IN: -1 if don't care */
+	struct netgpu_user_queue fill;
+};
+
+struct netgpu_ctx_param {
+	unsigned resv;
+	unsigned ifindex;
+};
+
+#define NETGPU_CTX_IOCTL_ATTACH_DEV	_IOR( 0, 1, int)
+#define NETGPU_CTX_IOCTL_BIND_QUEUE	_IOWR(0, 2, struct netgpu_ifq_param)
+#define NETGPU_CTX_IOCTL_ATTACH_REGION	_IOW( 0, 3, struct netgpu_attach_param)
+#define NETGPU_MEM_IOCTL_ADD_REGION	_IOR( 0, 4, struct netgpu_region_param)
+#define NETGPU_SOCK_IOCTL_ATTACH_QUEUES	(SIOCPROTOPRIVATE + 0)
+
+#endif /* _UAPI_MISC_NETGPU_H */
-- 
2.24.1


  parent reply	other threads:[~2020-07-27 22:46 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-27 22:44 [RFC PATCH v2 00/21] netgpu: networking between NIC and GPU/CPU Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 01/21] linux/log2.h: enclose macro arg in parens Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 02/21] mm/memory_hotplug: add {add|release}_memory_pages Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 03/21] mm: Allow DMA mapping of pages which are not online Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 04/21] kernel/user: export free_uid Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 05/21] uapi/misc: add shqueue.h for shared queues Jonathan Lemon
2020-07-27 22:44 ` Jonathan Lemon [this message]
2020-07-27 22:44 ` [RFC PATCH v2 07/21] netdevice: add SETUP_NETGPU to the netdev_bpf structure Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 08/21] skbuff: add a zc_netgpu bitflag Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 09/21] core/skbuff: use skb_zdata for testing whether skb is zerocopy Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 10/21] netgpu: add network/gpu/host dma module Jonathan Lemon
2020-07-28 16:26   ` Greg KH
2020-07-28 17:41     ` Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 11/21] core/skbuff: add page recycling logic for netgpu pages Jonathan Lemon
2020-07-28 16:28   ` Greg KH
2020-07-28 18:00     ` Jonathan Lemon
2020-07-28 18:26       ` Greg KH
2020-07-27 22:44 ` [RFC PATCH v2 12/21] lib: have __zerocopy_sg_from_iter get netgpu pages for a sk Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 13/21] net/tcp: Pad TCP options out to a fixed size for netgpu Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 14/21] net/tcp: add netgpu ioctl setting up zero copy RX queues Jonathan Lemon
2020-07-28  2:16   ` Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 15/21] net/tcp: add MSG_NETDMA flag for sendmsg() Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 16/21] mlx5: remove the umem parameter from mlx5e_open_channel Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 17/21] mlx5e: add header split ability Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 18/21] mlx5e: add netgpu entries to mlx5 structures Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 19/21] mlx5e: add the netgpu driver functions Jonathan Lemon
2020-07-28 16:27   ` Greg KH
2020-07-27 22:44 ` [RFC PATCH v2 20/21] mlx5e: hook up the netgpu functions Jonathan Lemon
2020-07-27 22:44 ` [RFC PATCH v2 21/21] netgpu/nvidia: add Nvidia plugin for netgpu Jonathan Lemon
2020-07-28 16:31   ` Greg KH
2020-07-28 17:18     ` Chris Mason
2020-07-28 17:27       ` Christoph Hellwig
2020-07-28 18:47         ` Chris Mason
2020-07-28 19:55 ` [RFC PATCH v2 00/21] netgpu: networking between NIC and GPU/CPU Stephen Hemminger
2020-07-28 20:43   ` Jonathan Lemon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200727224444.2987641-7-jonathan.lemon@gmail.com \
    --to=jonathan.lemon@gmail.com \
    --cc=kernel-team@fb.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).