linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: kernel test robot <lkp@intel.com>
To: Mike Kravetz <mike.kravetz@oracle.com>
Cc: kbuild-all@lists.01.org,
	Linux Memory Management List <linux-mm@kvack.org>,
	Andrew Morton <akpm@linux-foundation.org>
Subject: [linux-next:master 7735/7934] mm/hugetlb.c:5565:14: warning: variable 'reserve_alloc' set but not used
Date: Tue, 20 Sep 2022 06:30:46 +0800	[thread overview]
Message-ID: <202209200603.Hpvoa8Ii-lkp@intel.com> (raw)

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
head:   4c9ca5b1597e3222177ba2a94658f78fa5ef4f58
commit: 449f4bb2cb94b16a2014eeef41f4a50c8a3ecbdc [7735/7934] hugetlb: clean up code checking for fault/truncation races
config: i386-defconfig (https://download.01.org/0day-ci/archive/20220920/202209200603.Hpvoa8Ii-lkp@intel.com/config)
compiler: gcc-11 (Debian 11.3.0-5) 11.3.0
reproduce (this is a W=1 build):
        # https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/commit/?id=449f4bb2cb94b16a2014eeef41f4a50c8a3ecbdc
        git remote add linux-next https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
        git fetch --no-tags linux-next master
        git checkout 449f4bb2cb94b16a2014eeef41f4a50c8a3ecbdc
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        make W=1 O=build_dir ARCH=i386 SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

   mm/hugetlb.c: In function 'hugetlb_no_page':
>> mm/hugetlb.c:5565:14: warning: variable 'reserve_alloc' set but not used [-Wunused-but-set-variable]
    5565 |         bool reserve_alloc = false;
         |              ^~~~~~~~~~~~~


vim +/reserve_alloc +5565 mm/hugetlb.c

  5549	
  5550	static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
  5551				struct vm_area_struct *vma,
  5552				struct address_space *mapping, pgoff_t idx,
  5553				unsigned long address, pte_t *ptep,
  5554				pte_t old_pte, unsigned int flags)
  5555	{
  5556		struct hstate *h = hstate_vma(vma);
  5557		vm_fault_t ret = VM_FAULT_SIGBUS;
  5558		int anon_rmap = 0;
  5559		unsigned long size;
  5560		struct page *page;
  5561		pte_t new_pte;
  5562		spinlock_t *ptl;
  5563		unsigned long haddr = address & huge_page_mask(h);
  5564		bool new_page, new_pagecache_page = false;
> 5565		bool reserve_alloc = false;
  5566	
  5567		/*
  5568		 * Currently, we are forced to kill the process in the event the
  5569		 * original mapper has unmapped pages from the child due to a failed
  5570		 * COW/unsharing. Warn that such a situation has occurred as it may not
  5571		 * be obvious.
  5572		 */
  5573		if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
  5574			pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
  5575				   current->pid);
  5576			return ret;
  5577		}
  5578	
  5579		/*
  5580		 * Use page lock to guard against racing truncation
  5581		 * before we get page_table_lock.
  5582		 */
  5583		new_page = false;
  5584		page = find_lock_page(mapping, idx);
  5585		if (!page) {
  5586			size = i_size_read(mapping->host) >> huge_page_shift(h);
  5587			if (idx >= size)
  5588				goto out;
  5589			/* Check for page in userfault range */
  5590			if (userfaultfd_missing(vma)) {
  5591				ret = hugetlb_handle_userfault(vma, mapping, idx,
  5592							       flags, haddr, address,
  5593							       VM_UFFD_MISSING);
  5594				goto out;
  5595			}
  5596	
  5597			page = alloc_huge_page(vma, haddr, 0);
  5598			if (IS_ERR(page)) {
  5599				/*
  5600				 * Returning error will result in faulting task being
  5601				 * sent SIGBUS.  The hugetlb fault mutex prevents two
  5602				 * tasks from racing to fault in the same page which
  5603				 * could result in false unable to allocate errors.
  5604				 * Page migration does not take the fault mutex, but
  5605				 * does a clear then write of pte's under page table
  5606				 * lock.  Page fault code could race with migration,
  5607				 * notice the clear pte and try to allocate a page
  5608				 * here.  Before returning error, get ptl and make
  5609				 * sure there really is no pte entry.
  5610				 */
  5611				ptl = huge_pte_lock(h, mm, ptep);
  5612				ret = 0;
  5613				if (huge_pte_none(huge_ptep_get(ptep)))
  5614					ret = vmf_error(PTR_ERR(page));
  5615				spin_unlock(ptl);
  5616				goto out;
  5617			}
  5618			clear_huge_page(page, address, pages_per_huge_page(h));
  5619			__SetPageUptodate(page);
  5620			new_page = true;
  5621			if (HPageRestoreReserve(page))
  5622				reserve_alloc = true;
  5623	
  5624			if (vma->vm_flags & VM_MAYSHARE) {
  5625				int err = hugetlb_add_to_page_cache(page, mapping, idx);
  5626				if (err) {
  5627					/*
  5628					 * err can't be -EEXIST which implies someone
  5629					 * else consumed the reservation since hugetlb
  5630					 * fault mutex is held when add a hugetlb page
  5631					 * to the page cache. So it's safe to call
  5632					 * restore_reserve_on_error() here.
  5633					 */
  5634					restore_reserve_on_error(h, vma, haddr, page);
  5635					put_page(page);
  5636					goto out;
  5637				}
  5638				new_pagecache_page = true;
  5639			} else {
  5640				lock_page(page);
  5641				if (unlikely(anon_vma_prepare(vma))) {
  5642					ret = VM_FAULT_OOM;
  5643					goto backout_unlocked;
  5644				}
  5645				anon_rmap = 1;
  5646			}
  5647		} else {
  5648			/*
  5649			 * If memory error occurs between mmap() and fault, some process
  5650			 * don't have hwpoisoned swap entry for errored virtual address.
  5651			 * So we need to block hugepage fault by PG_hwpoison bit check.
  5652			 */
  5653			if (unlikely(PageHWPoison(page))) {
  5654				ret = VM_FAULT_HWPOISON_LARGE |
  5655					VM_FAULT_SET_HINDEX(hstate_index(h));
  5656				goto backout_unlocked;
  5657			}
  5658	
  5659			/* Check for page in userfault range. */
  5660			if (userfaultfd_minor(vma)) {
  5661				unlock_page(page);
  5662				put_page(page);
  5663				ret = hugetlb_handle_userfault(vma, mapping, idx,
  5664							       flags, haddr, address,
  5665							       VM_UFFD_MINOR);
  5666				goto out;
  5667			}
  5668		}
  5669	
  5670		/*
  5671		 * If we are going to COW a private mapping later, we examine the
  5672		 * pending reservations for this page now. This will ensure that
  5673		 * any allocations necessary to record that reservation occur outside
  5674		 * the spinlock.
  5675		 */
  5676		if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
  5677			if (vma_needs_reservation(h, vma, haddr) < 0) {
  5678				ret = VM_FAULT_OOM;
  5679				goto backout_unlocked;
  5680			}
  5681			/* Just decrements count, does not deallocate */
  5682			vma_end_reservation(h, vma, haddr);
  5683		}
  5684	
  5685		ptl = huge_pte_lock(h, mm, ptep);
  5686		ret = 0;
  5687		/* If pte changed from under us, retry */
  5688		if (!pte_same(huge_ptep_get(ptep), old_pte))
  5689			goto backout;
  5690	
  5691		if (anon_rmap) {
  5692			ClearHPageRestoreReserve(page);
  5693			hugepage_add_new_anon_rmap(page, vma, haddr);
  5694		} else
  5695			page_dup_file_rmap(page, true);
  5696		new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
  5697					&& (vma->vm_flags & VM_SHARED)));
  5698		/*
  5699		 * If this pte was previously wr-protected, keep it wr-protected even
  5700		 * if populated.
  5701		 */
  5702		if (unlikely(pte_marker_uffd_wp(old_pte)))
  5703			new_pte = huge_pte_wrprotect(huge_pte_mkuffd_wp(new_pte));
  5704		set_huge_pte_at(mm, haddr, ptep, new_pte);
  5705	
  5706		hugetlb_count_add(pages_per_huge_page(h), mm);
  5707		if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
  5708			/* Optimization, do the COW without a second fault */
  5709			ret = hugetlb_wp(mm, vma, address, ptep, flags, page, ptl);
  5710		}
  5711	
  5712		spin_unlock(ptl);
  5713	
  5714		/*
  5715		 * Only set HPageMigratable in newly allocated pages.  Existing pages
  5716		 * found in the pagecache may not have HPageMigratableset if they have
  5717		 * been isolated for migration.
  5718		 */
  5719		if (new_page)
  5720			SetHPageMigratable(page);
  5721	
  5722		unlock_page(page);
  5723	out:
  5724		return ret;
  5725	
  5726	backout:
  5727		spin_unlock(ptl);
  5728	backout_unlocked:
  5729		if (new_page && !new_pagecache_page)
  5730			restore_reserve_on_error(h, vma, haddr, page);
  5731	
  5732		unlock_page(page);
  5733		put_page(page);
  5734		goto out;
  5735	}
  5736	

-- 
0-DAY CI Kernel Test Service
https://01.org/lkp


             reply	other threads:[~2022-09-19 22:31 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-19 22:30 kernel test robot [this message]
2022-09-19 23:28 ` [linux-next:master 7735/7934] mm/hugetlb.c:5565:14: warning: variable 'reserve_alloc' set but not used Mike Kravetz

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=202209200603.Hpvoa8Ii-lkp@intel.com \
    --to=lkp@intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=kbuild-all@lists.01.org \
    --cc=linux-mm@kvack.org \
    --cc=mike.kravetz@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).