All of lore.kernel.org
 help / color / mirror / Atom feed
* [ardb:arm64-ro-page-tables-pkvm 3/18] mm/huge_memory.c:2191:48: error: passing argument 1 of 'page_to_phys' from incompatible pointer type
@ 2021-02-27  3:39 kernel test robot
  0 siblings, 0 replies; only message in thread
From: kernel test robot @ 2021-02-27  3:39 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 12794 bytes --]

tree:   git://git.kernel.org/pub/scm/linux/kernel/git/ardb/linux.git arm64-ro-page-tables-pkvm
head:   c5266cc8ebfbdcc7e1ea856a82beb017d0a4f611
commit: 59ebe17c5fd61622da88cab6da5079414a207885 [3/18] mm: HACK provide target address when migrating a PMD entry
config: powerpc-allyesconfig (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://git.kernel.org/pub/scm/linux/kernel/git/ardb/linux.git/commit/?id=59ebe17c5fd61622da88cab6da5079414a207885
        git remote add ardb git://git.kernel.org/pub/scm/linux/kernel/git/ardb/linux.git
        git fetch --no-tags ardb arm64-ro-page-tables-pkvm
        git checkout 59ebe17c5fd61622da88cab6da5079414a207885
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=powerpc 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   In file included from include/linux/byteorder/big_endian.h:5,
                    from arch/powerpc/include/uapi/asm/byteorder.h:14,
                    from include/asm-generic/bitops/le.h:6,
                    from arch/powerpc/include/asm/bitops.h:265,
                    from include/linux/bitops.h:32,
                    from include/linux/kernel.h:11,
                    from include/asm-generic/bug.h:20,
                    from arch/powerpc/include/asm/bug.h:109,
                    from include/linux/bug.h:5,
                    from include/linux/mmdebug.h:5,
                    from include/linux/mm.h:9,
                    from mm/huge_memory.c:8:
   mm/huge_memory.c: In function '__split_huge_pmd_locked':
>> mm/huge_memory.c:2191:48: error: passing argument 1 of 'page_to_phys' from incompatible pointer type [-Werror=incompatible-pointer-types]
    2191 |  set_pmd_at(mm, haddr, pmd, __pmd(page_to_phys(pgtable) | PMD_TYPE_TABLE));
         |                                                ^~~~~~~
         |                                                |
         |                                                pgtable_t {aka struct <anonymous> *}
   include/uapi/linux/byteorder/big_endian.h:37:51: note: in definition of macro '__cpu_to_be64'
      37 | #define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
         |                                                   ^
   mm/huge_memory.c:2191:29: note: in expansion of macro '__pmd'
    2191 |  set_pmd_at(mm, haddr, pmd, __pmd(page_to_phys(pgtable) | PMD_TYPE_TABLE));
         |                             ^~~~~
   In file included from include/linux/io.h:13,
                    from include/linux/irq.h:20,
                    from arch/powerpc/include/asm/hardirq.h:6,
                    from include/linux/hardirq.h:10,
                    from include/linux/highmem.h:10,
                    from mm/huge_memory.c:12:
   arch/powerpc/include/asm/io.h:938:53: note: expected 'struct page *' but argument is of type 'pgtable_t' {aka 'struct <anonymous> *'}
     938 | static inline phys_addr_t page_to_phys(struct page *page)
         |                                        ~~~~~~~~~~~~~^~~~
   In file included from include/linux/byteorder/big_endian.h:5,
                    from arch/powerpc/include/uapi/asm/byteorder.h:14,
                    from include/asm-generic/bitops/le.h:6,
                    from arch/powerpc/include/asm/bitops.h:265,
                    from include/linux/bitops.h:32,
                    from include/linux/kernel.h:11,
                    from include/asm-generic/bug.h:20,
                    from arch/powerpc/include/asm/bug.h:109,
                    from include/linux/bug.h:5,
                    from include/linux/mmdebug.h:5,
                    from include/linux/mm.h:9,
                    from mm/huge_memory.c:8:
>> mm/huge_memory.c:2191:59: error: 'PMD_TYPE_TABLE' undeclared (first use in this function)
    2191 |  set_pmd_at(mm, haddr, pmd, __pmd(page_to_phys(pgtable) | PMD_TYPE_TABLE));
         |                                                           ^~~~~~~~~~~~~~
   include/uapi/linux/byteorder/big_endian.h:37:51: note: in definition of macro '__cpu_to_be64'
      37 | #define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
         |                                                   ^
   mm/huge_memory.c:2191:29: note: in expansion of macro '__pmd'
    2191 |  set_pmd_at(mm, haddr, pmd, __pmd(page_to_phys(pgtable) | PMD_TYPE_TABLE));
         |                             ^~~~~
   mm/huge_memory.c:2191:59: note: each undeclared identifier is reported only once for each function it appears in
    2191 |  set_pmd_at(mm, haddr, pmd, __pmd(page_to_phys(pgtable) | PMD_TYPE_TABLE));
         |                                                           ^~~~~~~~~~~~~~
   include/uapi/linux/byteorder/big_endian.h:37:51: note: in definition of macro '__cpu_to_be64'
      37 | #define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
         |                                                   ^
   mm/huge_memory.c:2191:29: note: in expansion of macro '__pmd'
    2191 |  set_pmd_at(mm, haddr, pmd, __pmd(page_to_phys(pgtable) | PMD_TYPE_TABLE));
         |                             ^~~~~
   cc1: some warnings being treated as errors


vim +/page_to_phys +2191 mm/huge_memory.c

  2027	
  2028	static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
  2029			unsigned long haddr, bool freeze)
  2030	{
  2031		struct mm_struct *mm = vma->vm_mm;
  2032		struct page *page;
  2033		pgtable_t pgtable;
  2034		pmd_t old_pmd, _pmd;
  2035		bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
  2036		unsigned long addr;
  2037		int i;
  2038	
  2039		VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
  2040		VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
  2041		VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
  2042		VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
  2043					&& !pmd_devmap(*pmd));
  2044	
  2045		count_vm_event(THP_SPLIT_PMD);
  2046	
  2047		if (!vma_is_anonymous(vma)) {
  2048			_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
  2049			/*
  2050			 * We are going to unmap this huge page. So
  2051			 * just go ahead and zap it
  2052			 */
  2053			if (arch_needs_pgtable_deposit())
  2054				zap_deposited_table(mm, pmd);
  2055			if (vma_is_special_huge(vma))
  2056				return;
  2057			page = pmd_page(_pmd);
  2058			if (!PageDirty(page) && pmd_dirty(_pmd))
  2059				set_page_dirty(page);
  2060			if (!PageReferenced(page) && pmd_young(_pmd))
  2061				SetPageReferenced(page);
  2062			page_remove_rmap(page, true);
  2063			put_page(page);
  2064			add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
  2065			return;
  2066		} else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
  2067			/*
  2068			 * FIXME: Do we want to invalidate secondary mmu by calling
  2069			 * mmu_notifier_invalidate_range() see comments below inside
  2070			 * __split_huge_pmd() ?
  2071			 *
  2072			 * We are going from a zero huge page write protected to zero
  2073			 * small page also write protected so it does not seems useful
  2074			 * to invalidate secondary mmu at this time.
  2075			 */
  2076			return __split_huge_zero_page_pmd(vma, haddr, pmd);
  2077		}
  2078	
  2079		/*
  2080		 * Up to this point the pmd is present and huge and userland has the
  2081		 * whole access to the hugepage during the split (which happens in
  2082		 * place). If we overwrite the pmd with the not-huge version pointing
  2083		 * to the pte here (which of course we could if all CPUs were bug
  2084		 * free), userland could trigger a small page size TLB miss on the
  2085		 * small sized TLB while the hugepage TLB entry is still established in
  2086		 * the huge TLB. Some CPU doesn't like that.
  2087		 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
  2088		 * 383 on page 105. Intel should be safe but is also warns that it's
  2089		 * only safe if the permission and cache attributes of the two entries
  2090		 * loaded in the two TLB is identical (which should be the case here).
  2091		 * But it is generally safer to never allow small and huge TLB entries
  2092		 * for the same virtual address to be loaded simultaneously. So instead
  2093		 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
  2094		 * current pmd notpresent (atomically because here the pmd_trans_huge
  2095		 * must remain set@all times on the pmd until the split is complete
  2096		 * for this pmd), then we flush the SMP TLB and finally we write the
  2097		 * non-huge version of the pmd entry with pmd_populate.
  2098		 */
  2099		old_pmd = pmdp_invalidate(vma, haddr, pmd);
  2100	
  2101		pmd_migration = is_pmd_migration_entry(old_pmd);
  2102		if (unlikely(pmd_migration)) {
  2103			swp_entry_t entry;
  2104	
  2105			entry = pmd_to_swp_entry(old_pmd);
  2106			page = pfn_to_page(swp_offset(entry));
  2107			write = is_write_migration_entry(entry);
  2108			young = false;
  2109			soft_dirty = pmd_swp_soft_dirty(old_pmd);
  2110			uffd_wp = pmd_swp_uffd_wp(old_pmd);
  2111		} else {
  2112			page = pmd_page(old_pmd);
  2113			if (pmd_dirty(old_pmd))
  2114				SetPageDirty(page);
  2115			write = pmd_write(old_pmd);
  2116			young = pmd_young(old_pmd);
  2117			soft_dirty = pmd_soft_dirty(old_pmd);
  2118			uffd_wp = pmd_uffd_wp(old_pmd);
  2119		}
  2120		VM_BUG_ON_PAGE(!page_count(page), page);
  2121		page_ref_add(page, HPAGE_PMD_NR - 1);
  2122	
  2123		/*
  2124		 * Withdraw the table only after we mark the pmd entry invalid.
  2125		 * This's critical for some architectures (Power).
  2126		 */
  2127		pgtable = pgtable_trans_huge_withdraw(mm, pmd);
  2128		pmd_populate(mm, &_pmd, pgtable);
  2129	
  2130		for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
  2131			pte_t entry, *pte;
  2132			/*
  2133			 * Note that NUMA hinting access restrictions are not
  2134			 * transferred to avoid any possibility of altering
  2135			 * permissions across VMAs.
  2136			 */
  2137			if (freeze || pmd_migration) {
  2138				swp_entry_t swp_entry;
  2139				swp_entry = make_migration_entry(page + i, write);
  2140				entry = swp_entry_to_pte(swp_entry);
  2141				if (soft_dirty)
  2142					entry = pte_swp_mksoft_dirty(entry);
  2143				if (uffd_wp)
  2144					entry = pte_swp_mkuffd_wp(entry);
  2145			} else {
  2146				entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
  2147				entry = maybe_mkwrite(entry, vma);
  2148				if (!write)
  2149					entry = pte_wrprotect(entry);
  2150				if (!young)
  2151					entry = pte_mkold(entry);
  2152				if (soft_dirty)
  2153					entry = pte_mksoft_dirty(entry);
  2154				if (uffd_wp)
  2155					entry = pte_mkuffd_wp(entry);
  2156			}
  2157			pte = pte_offset_map(&_pmd, addr);
  2158			BUG_ON(!pte_none(*pte));
  2159			set_pte_at(mm, addr, pte, entry);
  2160			if (!pmd_migration)
  2161				atomic_inc(&page[i]._mapcount);
  2162			pte_unmap(pte);
  2163		}
  2164	
  2165		if (!pmd_migration) {
  2166			/*
  2167			 * Set PG_double_map before dropping compound_mapcount to avoid
  2168			 * false-negative page_mapped().
  2169			 */
  2170			if (compound_mapcount(page) > 1 &&
  2171			    !TestSetPageDoubleMap(page)) {
  2172				for (i = 0; i < HPAGE_PMD_NR; i++)
  2173					atomic_inc(&page[i]._mapcount);
  2174			}
  2175	
  2176			lock_page_memcg(page);
  2177			if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
  2178				/* Last compound_mapcount is gone. */
  2179				__dec_lruvec_page_state(page, NR_ANON_THPS);
  2180				if (TestClearPageDoubleMap(page)) {
  2181					/* No need in mapcount reference anymore */
  2182					for (i = 0; i < HPAGE_PMD_NR; i++)
  2183						atomic_dec(&page[i]._mapcount);
  2184				}
  2185			}
  2186			unlock_page_memcg(page);
  2187		}
  2188	
  2189		smp_wmb(); /* make pte visible before pmd */
  2190		//pmd_populate(mm, pmd, pgtable);
> 2191		set_pmd_at(mm, haddr, pmd, __pmd(page_to_phys(pgtable) | PMD_TYPE_TABLE));
  2192	
  2193		if (freeze) {
  2194			for (i = 0; i < HPAGE_PMD_NR; i++) {
  2195				page_remove_rmap(page + i, false);
  2196				put_page(page + i);
  2197			}
  2198		}
  2199	}
  2200	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 72494 bytes --]

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-02-27  3:39 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-27  3:39 [ardb:arm64-ro-page-tables-pkvm 3/18] mm/huge_memory.c:2191:48: error: passing argument 1 of 'page_to_phys' from incompatible pointer type kernel test robot

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.