From: James Simmons <jsimmons@infradead.org>
To: Andreas Dilger <adilger@whamcloud.com>,
Oleg Drokin <green@whamcloud.com>, NeilBrown <neilb@suse.de>
Cc: Amir Shehata <ashehata@whamcloud.com>,
Lustre Development List <lustre-devel@lists.lustre.org>
Subject: [lustre-devel] [PATCH 14/41] lnet: Add the kernel level Marshalling API
Date: Sun, 4 Apr 2021 20:50:43 -0400 [thread overview]
Message-ID: <1617583870-32029-15-git-send-email-jsimmons@infradead.org> (raw)
In-Reply-To: <1617583870-32029-1-git-send-email-jsimmons@infradead.org>
From: Amir Shehata <ashehata@whamcloud.com>
Given a UDSP, Marshal the UDSP pointed to by udsp
into the memory block that is allocated from userspace.
WC-bug-id: https://jira.whamcloud.com/browse/LU-9121
Lustre-commit: cd0ef3165e1d1b5f ("LU-9121 lnet: Add the kernel level Marshalling API")
Signed-off-by: Sonia Sharma <sharmaso@whamcloud.com>
Signed-off-by: Amir Shehata <ashehata@whamcloud.com>
Reviewed-on: https://review.whamcloud.com/34403
Reviewed-by: Serguei Smirnov <ssmirnov@whamcloud.com>
Reviewed-by: Chris Horn <chris.horn@hpe.com>
Signed-off-by: James Simmons <jsimmons@infradead.org>
---
include/linux/lnet/udsp.h | 13 +++
net/lnet/lnet/udsp.c | 214 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 227 insertions(+)
diff --git a/include/linux/lnet/udsp.h b/include/linux/lnet/udsp.h
index 265cb42..0cf630f 100644
--- a/include/linux/lnet/udsp.h
+++ b/include/linux/lnet/udsp.h
@@ -114,4 +114,17 @@
*/
void lnet_udsp_destroy(bool shutdown);
+/**
+ * lnet_get_udsp_size
+ * Return the size needed to store the marshalled UDSP
+ */
+size_t lnet_get_udsp_size(struct lnet_udsp *udsp);
+
+/**
+ * lnet_udsp_marshal
+ * Marshal the udsp into the bulk memory provided.
+ * Return success/failure.
+ */
+int lnet_udsp_marshal(struct lnet_udsp *udsp,
+ struct lnet_ioctl_udsp *ioc_udsp);
#endif /* UDSP_H */
diff --git a/net/lnet/lnet/udsp.c b/net/lnet/lnet/udsp.c
index 85e31fe..499035d 100644
--- a/net/lnet/lnet/udsp.c
+++ b/net/lnet/lnet/udsp.c
@@ -1049,3 +1049,217 @@ struct lnet_udsp *
lnet_udsp_free(udsp);
}
}
+
+static size_t
+lnet_size_marshaled_nid_descr(struct lnet_ud_nid_descr *descr)
+{
+ struct cfs_expr_list *expr;
+ int expr_count = 0;
+ int range_count = 0;
+ size_t size = sizeof(struct lnet_ioctl_udsp_descr);
+
+ if (!lnet_udsp_criteria_present(descr))
+ return size;
+
+ /* we always have one net expression */
+ if (!list_empty(&descr->ud_net_id.udn_net_num_range)) {
+ expr = list_first_entry(&descr->ud_net_id.udn_net_num_range,
+ struct cfs_expr_list, el_link);
+
+ /* count the number of cfs_range_expr in the net expression */
+ range_count = lnet_get_list_len(&expr->el_exprs);
+ }
+
+ /* count the number of cfs_range_expr in the address expressions */
+ list_for_each_entry(expr, &descr->ud_addr_range, el_link) {
+ expr_count++;
+ range_count += lnet_get_list_len(&expr->el_exprs);
+ }
+
+ size += (sizeof(struct lnet_expressions) * expr_count);
+ size += (sizeof(struct lnet_range_expr) * range_count);
+
+ return size;
+}
+
+size_t
+lnet_get_udsp_size(struct lnet_udsp *udsp)
+{
+ size_t size = sizeof(struct lnet_ioctl_udsp);
+
+ size += lnet_size_marshaled_nid_descr(&udsp->udsp_src);
+ size += lnet_size_marshaled_nid_descr(&udsp->udsp_dst);
+ size += lnet_size_marshaled_nid_descr(&udsp->udsp_rte);
+
+ CDEBUG(D_NET, "get udsp (%p) size: %d\n", udsp, (int)size);
+
+ return size;
+}
+
+static int
+copy_exprs(struct cfs_expr_list *expr, void __user **bulk,
+ u32 *bulk_size)
+{
+ struct cfs_range_expr *range;
+ struct lnet_range_expr range_expr;
+
+ /* copy over the net range expressions to the bulk */
+ list_for_each_entry(range, &expr->el_exprs, re_link) {
+ range_expr.re_lo = range->re_lo;
+ range_expr.re_hi = range->re_hi;
+ range_expr.re_stride = range->re_stride;
+ CDEBUG(D_NET, "Copy Range %u:%u:%u\n",
+ range_expr.re_lo, range_expr.re_hi,
+ range_expr.re_stride);
+ if (copy_to_user(*bulk, &range_expr, sizeof(range_expr))) {
+ CDEBUG(D_NET, "Failed to copy range_expr\n");
+ return -EFAULT;
+ }
+ *bulk += sizeof(range_expr);
+ *bulk_size -= sizeof(range_expr);
+ }
+
+ return 0;
+}
+
+static int
+copy_nid_range(struct lnet_ud_nid_descr *nid_descr, char *type,
+ void **bulk, u32 *bulk_size)
+{
+ struct lnet_ioctl_udsp_descr ioc_udsp_descr;
+ struct cfs_expr_list *expr;
+ struct lnet_expressions ioc_expr;
+ int expr_count;
+ int net_expr_count;
+ int rc;
+
+ memset(&ioc_udsp_descr, 0, sizeof(ioc_udsp_descr));
+ ioc_udsp_descr.iud_src_hdr.ud_descr_type = *(u32 *)type;
+
+ /* if criteria not present, copy over the static part of the NID
+ * descriptor
+ */
+ if (!lnet_udsp_criteria_present(nid_descr)) {
+ CDEBUG(D_NET, "Descriptor %u:%u:%u:%u\n",
+ ioc_udsp_descr.iud_src_hdr.ud_descr_type,
+ ioc_udsp_descr.iud_src_hdr.ud_descr_count,
+ ioc_udsp_descr.iud_net.ud_net_type,
+ ioc_udsp_descr.iud_net.ud_net_num_expr.le_count);
+ if (copy_to_user(*bulk, &ioc_udsp_descr,
+ sizeof(ioc_udsp_descr))) {
+ CDEBUG(D_NET, "failed to copy ioc_udsp_descr\n");
+ return -EFAULT;
+ }
+ *bulk += sizeof(ioc_udsp_descr);
+ *bulk_size -= sizeof(ioc_udsp_descr);
+ return 0;
+ }
+
+ expr_count = lnet_get_list_len(&nid_descr->ud_addr_range);
+
+ /* copy the net information */
+ if (!list_empty(&nid_descr->ud_net_id.udn_net_num_range)) {
+ expr = list_first_entry(&nid_descr->ud_net_id.udn_net_num_range,
+ struct cfs_expr_list, el_link);
+ net_expr_count = lnet_get_list_len(&expr->el_exprs);
+ } else {
+ net_expr_count = 0;
+ }
+
+ /* set the total expression count */
+ ioc_udsp_descr.iud_src_hdr.ud_descr_count = expr_count;
+ ioc_udsp_descr.iud_net.ud_net_type =
+ nid_descr->ud_net_id.udn_net_type;
+ ioc_udsp_descr.iud_net.ud_net_num_expr.le_count = net_expr_count;
+
+ CDEBUG(D_NET, "Descriptor %u:%u:%u:%u\n",
+ ioc_udsp_descr.iud_src_hdr.ud_descr_type,
+ ioc_udsp_descr.iud_src_hdr.ud_descr_count,
+ ioc_udsp_descr.iud_net.ud_net_type,
+ ioc_udsp_descr.iud_net.ud_net_num_expr.le_count);
+
+ /* copy over the header info to the bulk */
+ if (copy_to_user(*bulk, &ioc_udsp_descr, sizeof(ioc_udsp_descr))) {
+ CDEBUG(D_NET, "Failed to copy data\n");
+ return -EFAULT;
+ }
+ *bulk += sizeof(ioc_udsp_descr);
+ *bulk_size -= sizeof(ioc_udsp_descr);
+
+ /* copy over the net num expression if it exists */
+ if (net_expr_count) {
+ rc = copy_exprs(expr, bulk, bulk_size);
+ if (rc)
+ return rc;
+ }
+
+ /* copy the address range */
+ list_for_each_entry(expr, &nid_descr->ud_addr_range, el_link) {
+ ioc_expr.le_count = lnet_get_list_len(&expr->el_exprs);
+ if (copy_to_user(*bulk, &ioc_expr, sizeof(ioc_expr))) {
+ CDEBUG(D_NET, "failex to copy ioc_expr\n");
+ return -EFAULT;
+ }
+ *bulk += sizeof(ioc_expr);
+ *bulk_size -= sizeof(ioc_expr);
+
+ rc = copy_exprs(expr, bulk, bulk_size);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+int
+lnet_udsp_marshal(struct lnet_udsp *udsp, struct lnet_ioctl_udsp *ioc_udsp)
+{
+ int rc = -ENOMEM;
+ void __user *bulk;
+ u32 bulk_size;
+
+ if (!ioc_udsp)
+ return -EINVAL;
+
+ bulk = ioc_udsp->iou_bulk;
+ bulk_size = ioc_udsp->iou_hdr.ioc_len +
+ ioc_udsp->iou_bulk_size;
+
+ CDEBUG(D_NET, "marshal udsp (%p)\n", udsp);
+ CDEBUG(D_NET, "MEM -----> bulk: %p:0x%x\n", bulk, bulk_size);
+ /* make sure user space allocated enough buffer to marshal the
+ * udsp
+ */
+ if (bulk_size != lnet_get_udsp_size(udsp)) {
+ rc = -ENOSPC;
+ goto fail;
+ }
+
+ ioc_udsp->iou_idx = udsp->udsp_idx;
+ ioc_udsp->iou_action_type = udsp->udsp_action_type;
+ ioc_udsp->iou_action.priority = udsp->udsp_action.udsp_priority;
+
+ bulk_size -= sizeof(*ioc_udsp);
+
+ rc = copy_nid_range(&udsp->udsp_src, "SRC", &bulk, &bulk_size);
+ if (rc)
+ goto fail;
+
+ rc = copy_nid_range(&udsp->udsp_dst, "DST", &bulk, &bulk_size);
+ if (rc)
+ goto fail;
+
+ rc = copy_nid_range(&udsp->udsp_rte, "RTE", &bulk, &bulk_size);
+ if (rc)
+ goto fail;
+
+ CDEBUG(D_NET, "MEM <----- bulk: %p\n", bulk);
+
+ /* we should've consumed the entire buffer */
+ LASSERT(bulk_size == 0);
+ return 0;
+
+fail:
+ CERROR("Failed to marshal udsp: %d\n", rc);
+ return rc;
+}
--
1.8.3.1
_______________________________________________
lustre-devel mailing list
lustre-devel@lists.lustre.org
http://lists.lustre.org/listinfo.cgi/lustre-devel-lustre.org
next prev parent reply other threads:[~2021-04-05 0:52 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-05 0:50 [lustre-devel] [PATCH 00/41] lustre: sync to OpenSFS branch as of March 1 James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 01/41] lustre: llite: data corruption due to RPC reordering James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 02/41] lustre: llite: make readahead aware of hints James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 03/41] lustre: lov: avoid NULL dereference in cleanup James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 04/41] lustre: llite: quiet spurious ioctl warning James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 05/41] lustre: ptlrpc: do not output error when imp_sec is freed James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 06/41] lustre: update version to 2.14.0 James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 07/41] lnet: UDSP storage and marshalled structs James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 08/41] lnet: foundation patch for selection mod James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 09/41] lnet: Preferred gateway selection James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 10/41] lnet: Select NI/peer NI with highest prio James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 11/41] lnet: select best peer and local net James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 12/41] lnet: UDSP handling James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 13/41] lnet: Apply UDSP on local and remote NIs James Simmons
2021-04-05 0:50 ` James Simmons [this message]
2021-04-05 0:50 ` [lustre-devel] [PATCH 15/41] lnet: Add the kernel level De-Marshalling API James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 16/41] lnet: Add the ioctl handler for "add policy" James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 17/41] lnet: ioctl handler for "delete policy" James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 18/41] lnet: ioctl handler for get policy info James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 19/41] lustre: update version to 2.14.50 James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 20/41] lustre: gss: handle empty reqmsg in sptlrpc_req_ctx_switch James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 21/41] lustre: sec: file ioctls to handle encryption policies James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 22/41] lustre: obdclass: try to skip corrupted llog records James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 23/41] lustre: lov: fix layout generation inc for mirror split James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 24/41] lnet: modify assertion in lnet_post_send_locked James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 25/41] lustre: lov: fixes bitfield in lod qos code James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 26/41] lustre: lov: grant deadlock if same OSC in two components James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 27/41] lustre: change EWOULDBLOCK to EAGAIN James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 28/41] lsutre: ldlm: return error from ldlm_namespace_new() James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 29/41] lustre: llite: remove unused ll_teardown_mmaps() James Simmons
2021-04-05 0:50 ` [lustre-devel] [PATCH 30/41] lustre: lov: style cleanups in lov_set_osc_active() James Simmons
2021-04-05 0:51 ` [lustre-devel] [PATCH 31/41] lustre: change various operations structs to const James Simmons
2021-04-05 0:51 ` [lustre-devel] [PATCH 32/41] lustre: mark strings in char arrays as const James Simmons
2021-04-05 0:51 ` [lustre-devel] [PATCH 33/41] lustre: convert snprintf to scnprintf as appropriate James Simmons
2021-04-05 0:51 ` [lustre-devel] [PATCH 34/41] lustre: remove non-static 'inline' markings James Simmons
2021-04-05 0:51 ` [lustre-devel] [PATCH 35/41] lustre: llite: use is_root_inode() James Simmons
2021-04-05 0:51 ` [lustre-devel] [PATCH 36/41] lnet: libcfs: discard cfs_firststr James Simmons
2021-04-05 0:51 ` [lustre-devel] [PATCH 37/41] lnet: place wire protocol data int own headers James Simmons
2021-04-05 0:51 ` [lustre-devel] [PATCH 38/41] lnet: libcfs: use wait_event_timeout() in tracefiled() James Simmons
2021-04-05 0:51 ` [lustre-devel] [PATCH 39/41] lnet: use init_wait() rather than init_waitqueue_entry() James Simmons
2021-04-05 0:51 ` [lustre-devel] [PATCH 40/41] lnet: discard LNET_MD_PHYS James Simmons
2021-04-05 0:51 ` [lustre-devel] [PATCH 41/41] lnet: o2iblnd: convert peers hash table to hashtable.h James Simmons
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1617583870-32029-15-git-send-email-jsimmons@infradead.org \
--to=jsimmons@infradead.org \
--cc=adilger@whamcloud.com \
--cc=ashehata@whamcloud.com \
--cc=green@whamcloud.com \
--cc=lustre-devel@lists.lustre.org \
--cc=neilb@suse.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).