linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [norov:demotion2 3/6] mm/mempolicy.c:2155:11: error: 'PG_demote' undeclared
@ 2021-08-08 22:03 kernel test robot
  0 siblings, 0 replies; only message in thread
From: kernel test robot @ 2021-08-08 22:03 UTC (permalink / raw)
  To: Yury Norov; +Cc: kbuild-all, linux-kernel

[-- Attachment #1: Type: text/plain, Size: 5216 bytes --]

tree:   https://github.com/norov/linux demotion2
head:   f4bbcd30f7078209f786570ed763c18925916bfe
commit: dd58d1928e4d4b54398185ad21f39843d4138e14 [3/6] mm/demotion: introduce MPOL_F_DEMOTE for mbind() and set_mempolicy()
config: powerpc64-buildonly-randconfig-r006-20210809 (attached as .config)
compiler: powerpc-linux-gcc (GCC) 10.3.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/norov/linux/commit/dd58d1928e4d4b54398185ad21f39843d4138e14
        git remote add norov https://github.com/norov/linux
        git fetch --no-tags norov demotion2
        git checkout dd58d1928e4d4b54398185ad21f39843d4138e14
        # save the attached .config to linux build tree
        mkdir build_dir
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-10.3.0 make.cross O=build_dir ARCH=powerpc SHELL=/bin/bash

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   mm/mempolicy.c: In function 'alloc_pages_vma':
>> mm/mempolicy.c:2155:11: error: 'PG_demote' undeclared (first use in this function)
    2155 |   set_bit(PG_demote, &page->flags);
         |           ^~~~~~~~~
   mm/mempolicy.c:2155:11: note: each undeclared identifier is reported only once for each function it appears in


vim +/PG_demote +2155 mm/mempolicy.c

  2065	
  2066	/**
  2067	 * alloc_pages_vma - Allocate a page for a VMA.
  2068	 * @gfp: GFP flags.
  2069	 * @order: Order of the GFP allocation.
  2070	 * @vma: Pointer to VMA or NULL if not available.
  2071	 * @addr: Virtual address of the allocation.  Must be inside @vma.
  2072	 * @node: Which node to prefer for allocation (modulo policy).
  2073	 * @hugepage: For hugepages try only the preferred node if possible.
  2074	 *
  2075	 * Allocate a page for a specific address in @vma, using the appropriate
  2076	 * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
  2077	 * of the mm_struct of the VMA to prevent it from going away.  Should be
  2078	 * used for all allocations for pages that will be mapped into user space.
  2079	 *
  2080	 * Return: The page on success or NULL if allocation fails.
  2081	 */
  2082	struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
  2083			unsigned long addr, int node, bool hugepage)
  2084	{
  2085		unsigned short mode_flags;
  2086		struct mempolicy *pol;
  2087		struct page *page;
  2088		int preferred_nid;
  2089		nodemask_t *nmask;
  2090	
  2091		pol = get_vma_policy(vma, addr);
  2092		mode_flags = pol->flags;
  2093	
  2094		if (pol->mode == MPOL_INTERLEAVE) {
  2095			unsigned nid;
  2096	
  2097			nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
  2098			mpol_cond_put(pol);
  2099			page = alloc_page_interleave(gfp, order, nid);
  2100			goto out;
  2101		}
  2102	
  2103		if (pol->mode == MPOL_PREFERRED_MANY) {
  2104			page = alloc_pages_preferred_many(gfp, order, node, pol);
  2105			mpol_cond_put(pol);
  2106			goto out;
  2107		}
  2108	
  2109		if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
  2110			int hpage_node = node;
  2111	
  2112			/*
  2113			 * For hugepage allocation and non-interleave policy which
  2114			 * allows the current node (or other explicitly preferred
  2115			 * node) we only try to allocate from the current/preferred
  2116			 * node and don't fall back to other nodes, as the cost of
  2117			 * remote accesses would likely offset THP benefits.
  2118			 *
  2119			 * If the policy is interleave or does not allow the current
  2120			 * node in its nodemask, we allocate the standard way.
  2121			 */
  2122			if (pol->mode == MPOL_PREFERRED)
  2123				hpage_node = first_node(pol->nodes);
  2124	
  2125			nmask = policy_nodemask(gfp, pol);
  2126			if (!nmask || node_isset(hpage_node, *nmask)) {
  2127				mpol_cond_put(pol);
  2128				/*
  2129				 * First, try to allocate THP only on local node, but
  2130				 * don't reclaim unnecessarily, just compact.
  2131				 */
  2132				page = __alloc_pages_node(hpage_node,
  2133					gfp | __GFP_THISNODE | __GFP_NORETRY, order);
  2134	
  2135				/*
  2136				 * If hugepage allocations are configured to always
  2137				 * synchronous compact or the vma has been madvised
  2138				 * to prefer hugepage backing, retry allowing remote
  2139				 * memory with both reclaim and compact as well.
  2140				 */
  2141				if (!page && (gfp & __GFP_DIRECT_RECLAIM))
  2142					page = __alloc_pages_node(hpage_node,
  2143									gfp, order);
  2144	
  2145				goto out;
  2146			}
  2147		}
  2148	
  2149		nmask = policy_nodemask(gfp, pol);
  2150		preferred_nid = policy_node(gfp, pol, node);
  2151		page = __alloc_pages(gfp, order, preferred_nid, nmask);
  2152		mpol_cond_put(pol);
  2153	out:
  2154		if (page && (mode_flags & MPOL_F_DEMOTE))
> 2155			set_bit(PG_demote, &page->flags);
  2156	
  2157		return page;
  2158	}
  2159	EXPORT_SYMBOL(alloc_pages_vma);
  2160	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 29957 bytes --]

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-08-08 22:04 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-08 22:03 [norov:demotion2 3/6] mm/mempolicy.c:2155:11: error: 'PG_demote' undeclared kernel test robot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).