All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chuck Lever III <chuck.lever@oracle.com>
To: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Linux NFS Mailing List <linux-nfs@vger.kernel.org>
Subject: Re: [bug report] NFSD: Convert the filecache to use rhashtable
Date: Wed, 6 Jul 2022 14:36:32 +0000	[thread overview]
Message-ID: <CEE385C8-21BD-47CE-8F9C-50D6E2B02DAF@oracle.com> (raw)
In-Reply-To: <YsWdMVpiHhDjfEPg@kili>



> On Jul 6, 2022, at 10:33 AM, Dan Carpenter <dan.carpenter@oracle.com> wrote:
> 
> Hello Chuck Lever,
> 
> The patch 125b58c13f71: "NFSD: Convert the filecache to use
> rhashtable" from Jun 28, 2022, leads to the following Smatch static
> checker warning:
> 
> 	fs/nfsd/filecache.c:1117 nfsd_file_do_acquire()
> 	warn: 'new' was already freed.
> 
> fs/nfsd/filecache.c
>    1066 static __be32
>    1067 nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
>    1068                      unsigned int may_flags, struct nfsd_file **pnf, bool open)
>    1069 {
>    1070         struct nfsd_file_lookup_key key = {
>    1071                 .type        = NFSD_FILE_KEY_FULL,
>    1072                 .need        = may_flags & NFSD_FILE_MAY_MASK,
>    1073                 .net        = SVC_NET(rqstp),
>    1074                 .cred        = current_cred(),
>    1075         };
>    1076         struct nfsd_file *nf, *new;
>    1077         bool retry = true;
>    1078         __be32 status;
>    1079 
>    1080         status = fh_verify(rqstp, fhp, S_IFREG,
>    1081                                 may_flags|NFSD_MAY_OWNER_OVERRIDE);
>    1082         if (status != nfs_ok)
>    1083                 return status;
>    1084         key.inode = d_inode(fhp->fh_dentry);
>    1085 
>    1086 retry:
>    1087         /* Avoid allocation if the item is already in cache */
>    1088         nf = rhashtable_lookup_fast(&nfsd_file_rhash_tbl, &key,
>    1089                                     nfsd_file_rhash_params);
>    1090         if (nf)
>    1091                 nf = nfsd_file_get(nf);
>    1092         if (nf)
>    1093                 goto wait_for_construction;
>    1094 
>    1095         new = nfsd_file_alloc(&key, may_flags);
>    1096         if (!new) {
>    1097                 status = nfserr_jukebox;
>    1098                 goto out_status;
>    1099         }
>    1100 
>    1101         nf = rhashtable_lookup_get_insert_key(&nfsd_file_rhash_tbl,
>    1102                                               &key, &new->nf_rhash,
>    1103                                               nfsd_file_rhash_params);
>    1104         if (!nf) {
>    1105                 nf = new;
>    1106                 goto open_file;
>    1107         }
>    1108         nfsd_file_slab_free(&new->nf_rcu);
>                                      ^^^^^^^^^^^
> This frees "new".
> 
>    1109         if (IS_ERR(nf)) {
>    1110                 trace_nfsd_file_insert_err(rqstp, key.inode, may_flags, PTR_ERR(nf));
>    1111                 nf = NULL;
>    1112                 status = nfserr_jukebox;
>    1113                 goto out_status;
>    1114         }
>    1115         nf = nfsd_file_get(nf);
>    1116         if (nf == NULL) {
> --> 1117                 nf = new;
>                              ^^^
> Use after free

Ugh. I wonder why I haven't hit this already.

Thanks Dan!


>    1118                 goto open_file;
>    1119         }
>    1120 
>    1121 wait_for_construction:
>    1122         wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
>    1123 
>    1124         /* Did construction of this file fail? */
>    1125         if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
>    1126                 trace_nfsd_file_cons_err(rqstp, key.inode, may_flags, nf);
>    1127                 if (!retry) {
>    1128                         status = nfserr_jukebox;
>    1129                         goto out;
>    1130                 }
>    1131                 retry = false;
>    1132                 nfsd_file_put_noref(nf);
>    1133                 goto retry;
>    1134         }
>    1135 
>    1136         nfsd_file_lru_remove(nf);
>    1137         this_cpu_inc(nfsd_file_cache_hits);
>    1138 
>    1139         if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
>    1140                 bool write = (may_flags & NFSD_MAY_WRITE);
>    1141 
>    1142                 if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
>    1143                     (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
>    1144                         status = nfserrno(nfsd_open_break_lease(
>    1145                                         file_inode(nf->nf_file), may_flags));
>    1146                         if (status == nfs_ok) {
>    1147                                 clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
>    1148                                 if (write)
>    1149                                         clear_bit(NFSD_FILE_BREAK_WRITE,
>    1150                                                   &nf->nf_flags);
>    1151                         }
>    1152                 }
>    1153         }
>    1154 out:
>    1155         if (status == nfs_ok) {
>    1156                 if (open)
>    1157                         this_cpu_inc(nfsd_file_acquisitions);
>    1158                 *pnf = nf;
>    1159         } else {
>    1160                 nfsd_file_put(nf);
>    1161                 nf = NULL;
>    1162         }
>    1163 
>    1164 out_status:
>    1165         if (open)
>    1166                 trace_nfsd_file_acquire(rqstp, key.inode, may_flags, nf, status);
>    1167         return status;
>    1168 
>    1169 open_file:
>    1170         trace_nfsd_file_alloc(nf);
>    1171         nf->nf_mark = nfsd_file_mark_find_or_create(nf);
>    1172         if (nf->nf_mark) {
>    1173                 if (open) {
>    1174                         status = nfsd_open_verified(rqstp, fhp, may_flags,
>    1175                                                     &nf->nf_file);
>    1176                         trace_nfsd_file_open(nf, status);
>    1177                 } else
>    1178                         status = nfs_ok;
>    1179         } else
>    1180                 status = nfserr_jukebox;
>    1181         /*
>    1182          * If construction failed, or we raced with a call to unlink()
>    1183          * then unhash.
>    1184          */
>    1185         if (status != nfs_ok || key.inode->i_nlink == 0)
>    1186                 if (nfsd_file_unhash(nf))
>    1187                         nfsd_file_put_noref(nf);
>    1188         clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
>    1189         smp_mb__after_atomic();
>    1190         wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
>    1191         goto out;
>    1192 }
> 
> regards,
> dan carpenter

--
Chuck Lever




      reply	other threads:[~2022-07-06 14:36 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-06 14:33 [bug report] NFSD: Convert the filecache to use rhashtable Dan Carpenter
2022-07-06 14:36 ` Chuck Lever III [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CEE385C8-21BD-47CE-8F9C-50D6E2B02DAF@oracle.com \
    --to=chuck.lever@oracle.com \
    --cc=dan.carpenter@oracle.com \
    --cc=linux-nfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.