From: xiubli@redhat.com
To: idryomov@gmail.com, ceph-devel@vger.kernel.org
Cc: vshankar@redhat.com, mchangir@redhat.com, Xiubo Li <xiubli@redhat.com>
Subject: [PATCH v5 1/6] ceph: save the cap_auths in client when session being opened
Date: Thu, 18 Apr 2024 22:20:14 +0800 [thread overview]
Message-ID: <20240418142019.133191-2-xiubli@redhat.com> (raw)
In-Reply-To: <20240418142019.133191-1-xiubli@redhat.com>
From: Xiubo Li <xiubli@redhat.com>
Save the cap_auths, which have been parsed by the MDS, in the opened
session.
URL: https://tracker.ceph.com/issues/61333
Signed-off-by: Xiubo Li <xiubli@redhat.com>
---
fs/ceph/mds_client.c | 108 ++++++++++++++++++++++++++++++++++++++++++-
fs/ceph/mds_client.h | 21 +++++++++
2 files changed, 128 insertions(+), 1 deletion(-)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 8651d5a63dbb..0e26bc90457c 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -4140,10 +4140,13 @@ static void handle_session(struct ceph_mds_session *session,
void *p = msg->front.iov_base;
void *end = p + msg->front.iov_len;
struct ceph_mds_session_head *h;
- u32 op;
+ struct ceph_mds_cap_auth *cap_auths = NULL;
+ u32 op, cap_auths_num = 0;
u64 seq, features = 0;
int wake = 0;
bool blocklisted = false;
+ u32 i;
+
/* decode */
ceph_decode_need(&p, end, sizeof(*h), bad);
@@ -4188,7 +4191,103 @@ static void handle_session(struct ceph_mds_session *session,
}
}
+ if (msg_version >= 6) {
+ ceph_decode_32_safe(&p, end, cap_auths_num, bad);
+ doutc(cl, "cap_auths_num %d\n", cap_auths_num);
+
+ if (cap_auths_num && op != CEPH_SESSION_OPEN) {
+ WARN_ON_ONCE(op != CEPH_SESSION_OPEN);
+ goto skip_cap_auths;
+ }
+
+ cap_auths = kcalloc(cap_auths_num,
+ sizeof(struct ceph_mds_cap_auth),
+ GFP_KERNEL);
+ if (!cap_auths) {
+ pr_err_client(cl, "No memory for cap_auths\n");
+ return;
+ }
+
+ for (i = 0; i < cap_auths_num; i++) {
+ u32 _len, j;
+
+ /* struct_v, struct_compat, and struct_len in MDSCapAuth */
+ ceph_decode_skip_n(&p, end, 2 + sizeof(u32), bad);
+
+ /* struct_v, struct_compat, and struct_len in MDSCapMatch */
+ ceph_decode_skip_n(&p, end, 2 + sizeof(u32), bad);
+ ceph_decode_64_safe(&p, end, cap_auths[i].match.uid, bad);
+ ceph_decode_32_safe(&p, end, _len, bad);
+ if (_len) {
+ cap_auths[i].match.gids = kcalloc(_len, sizeof(int32_t),
+ GFP_KERNEL);
+ if (!cap_auths[i].match.gids) {
+ pr_err_client(cl, "No memory for gids\n");
+ goto fail;
+ }
+
+ cap_auths[i].match.num_gids = _len;
+ for (j = 0; j < _len; j++)
+ ceph_decode_32_safe(&p, end,
+ cap_auths[i].match.gids[j],
+ bad);
+ }
+
+ ceph_decode_32_safe(&p, end, _len, bad);
+ if (_len) {
+ cap_auths[i].match.path = kcalloc(_len + 1, sizeof(char),
+ GFP_KERNEL);
+ if (!cap_auths[i].match.path) {
+ pr_err_client(cl, "No memory for path\n");
+ goto fail;
+ }
+ ceph_decode_copy(&p, cap_auths[i].match.path, _len);
+
+ /* Remove the tailing '/' */
+ while (_len && cap_auths[i].match.path[_len - 1] == '/') {
+ cap_auths[i].match.path[_len - 1] = '\0';
+ _len -= 1;
+ }
+ }
+
+ ceph_decode_32_safe(&p, end, _len, bad);
+ if (_len) {
+ cap_auths[i].match.fs_name = kcalloc(_len + 1, sizeof(char),
+ GFP_KERNEL);
+ if (!cap_auths[i].match.fs_name) {
+ pr_err_client(cl, "No memory for fs_name\n");
+ goto fail;
+ }
+ ceph_decode_copy(&p, cap_auths[i].match.fs_name, _len);
+ }
+
+ ceph_decode_8_safe(&p, end, cap_auths[i].match.root_squash, bad);
+ ceph_decode_8_safe(&p, end, cap_auths[i].readable, bad);
+ ceph_decode_8_safe(&p, end, cap_auths[i].writeable, bad);
+ doutc(cl, "uid %lld, num_gids %d, path %s, fs_name %s,"
+ " root_squash %d, readable %d, writeable %d\n",
+ cap_auths[i].match.uid, cap_auths[i].match.num_gids,
+ cap_auths[i].match.path, cap_auths[i].match.fs_name,
+ cap_auths[i].match.root_squash,
+ cap_auths[i].readable, cap_auths[i].writeable);
+ }
+
+ }
+
+skip_cap_auths:
mutex_lock(&mdsc->mutex);
+ if (op == CEPH_SESSION_OPEN) {
+ if (mdsc->s_cap_auths) {
+ for (i = 0; i < mdsc->s_cap_auths_num; i++) {
+ kfree(mdsc->s_cap_auths[i].match.gids);
+ kfree(mdsc->s_cap_auths[i].match.path);
+ kfree(mdsc->s_cap_auths[i].match.fs_name);
+ }
+ kfree(mdsc->s_cap_auths);
+ }
+ mdsc->s_cap_auths_num = cap_auths_num;
+ mdsc->s_cap_auths = cap_auths;
+ }
if (op == CEPH_SESSION_CLOSE) {
ceph_get_mds_session(session);
__unregister_session(mdsc, session);
@@ -4318,6 +4417,13 @@ static void handle_session(struct ceph_mds_session *session,
pr_err_client(cl, "corrupt message mds%d len %d\n", mds,
(int)msg->front.iov_len);
ceph_msg_dump(msg);
+fail:
+ for (i = 0; i < cap_auths_num; i++) {
+ kfree(cap_auths[i].match.gids);
+ kfree(cap_auths[i].match.path);
+ kfree(cap_auths[i].match.fs_name);
+ }
+ kfree(cap_auths);
return;
}
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index b88e80415224..9e2fe310d0b4 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -71,6 +71,24 @@ enum ceph_feature_type {
struct ceph_fs_client;
struct ceph_cap;
+#define MDS_AUTH_UID_ANY -1
+
+struct ceph_mds_cap_match {
+ int64_t uid; // MDS_AUTH_UID_ANY as default
+ uint32_t num_gids;
+ int32_t *gids; // Use these GIDs
+ char *path; // Require path to be child of this (may be "" or "/" for any)
+ char *fs_name;
+ u8 root_squash; // False as default
+};
+
+struct ceph_mds_cap_auth {
+ struct ceph_mds_cap_match match;
+ u8 readable;
+ u8 writeable;
+};
+
+
/*
* parsed info about a single inode. pointers are into the encoded
* on-wire structures within the mds reply message payload.
@@ -513,6 +531,9 @@ struct ceph_mds_client {
struct rw_semaphore pool_perm_rwsem;
struct rb_root pool_perm_tree;
+ u32 s_cap_auths_num;
+ struct ceph_mds_cap_auth *s_cap_auths;
+
char nodename[__NEW_UTS_LEN + 1];
};
--
2.43.0
next prev parent reply other threads:[~2024-04-18 14:22 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-18 14:20 [PATCH v5 0/6] ceph: check the cephx mds auth access in client side xiubli
2024-04-18 14:20 ` xiubli [this message]
2024-04-18 14:20 ` [PATCH v5 2/6] ceph: add ceph_mds_check_access() helper support xiubli
2024-04-18 14:20 ` [PATCH v5 3/6] ceph: check the cephx mds auth access for setattr xiubli
2024-04-18 14:20 ` [PATCH v5 4/6] ceph: check the cephx mds auth access for open xiubli
2024-04-18 14:20 ` [PATCH v5 5/6] ceph: check the cephx mds auth access for async dirop xiubli
2024-04-18 14:20 ` [PATCH v5 6/6] ceph: add CEPHFS_FEATURE_MDS_AUTH_CAPS_CHECK feature bit xiubli
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240418142019.133191-2-xiubli@redhat.com \
--to=xiubli@redhat.com \
--cc=ceph-devel@vger.kernel.org \
--cc=idryomov@gmail.com \
--cc=mchangir@redhat.com \
--cc=vshankar@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).