Hi Muchun, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on linus/master] [also build test WARNING on hnaz-mm/master v5.17-rc2 next-20220202] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Muchun-Song/Fix-some-bugs-related-to-ramp-and-dax/20220202-223615 base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 9f7fb8de5d9bac17b6392a14af40baf555d9129b config: um-i386_defconfig (https://download.01.org/0day-ci/archive/20220203/202202030043.3NXtgGau-lkp(a)intel.com/config) compiler: gcc-9 (Debian 9.3.0-22) 9.3.0 reproduce (this is a W=1 build): # https://github.com/0day-ci/linux/commit/64a64c01138fd43f8d9ac17a47c813b55231c325 git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Muchun-Song/Fix-some-bugs-related-to-ramp-and-dax/20220202-223615 git checkout 64a64c01138fd43f8d9ac17a47c813b55231c325 # save the config file to linux build tree mkdir build_dir make W=1 O=build_dir ARCH=um SUBARCH=i386 SHELL=/bin/bash If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot All warnings (new ones prefixed by >>): mm/page_vma_mapped.c: In function 'page_vma_mapped_walk': >> mm/page_vma_mapped.c:165:16: warning: variable 'pfn' set but not used [-Wunused-but-set-variable] 165 | unsigned long pfn; | ^~~ vim +/pfn +165 mm/page_vma_mapped.c 135 136 /** 137 * page_vma_mapped_walk - check if @pvmw->page or @pvmw->pfn is mapped in 138 * @pvmw->vma at @pvmw->address 139 * @pvmw: pointer to struct page_vma_mapped_walk. page (or pfn and nr and 140 * index), vma, address and flags must be set. pmd, pte and ptl must be NULL. 141 * 142 * Returns true if the page or pfn is mapped in the vma. @pvmw->pmd and 143 * @pvmw->pte point to relevant page table entries. @pvmw->ptl is locked. 144 * @pvmw->address is adjusted if needed (for PTE-mapped THPs). 145 * 146 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page 147 * (usually THP or Huge DEVMAP). For PMD-mapped page, you should run 148 * page_vma_mapped_walk() in a loop to find all PTEs that map the huge page. 149 * 150 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry 151 * regardless of which page table level the page is mapped at. @pvmw->pmd is 152 * NULL. 153 * 154 * Returns false if there are no more page table entries for the page or pfn in 155 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. 156 * 157 * If you need to stop the walk before page_vma_mapped_walk() returned false, 158 * use page_vma_mapped_walk_done(). It will do the housekeeping. 159 */ 160 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) 161 { 162 struct mm_struct *mm = pvmw->vma->vm_mm; 163 struct page *page = NULL; 164 unsigned long end; > 165 unsigned long pfn; 166 pgd_t *pgd; 167 p4d_t *p4d; 168 pud_t *pud; 169 pmd_t pmde; 170 171 /* The only possible pmd mapping has been handled on last iteration */ 172 if (pvmw->pmd && !pvmw->pte) 173 return not_found(pvmw); 174 175 if (!(pvmw->flags & PVMW_PFN_WALK)) 176 page = pvmw->page; 177 pfn = page ? page_to_pfn(page) : pvmw->pfn; 178 179 if (unlikely(page && PageHuge(page))) { 180 /* The only possible mapping was handled on last iteration */ 181 if (pvmw->pte) 182 return not_found(pvmw); 183 184 /* when pud is not present, pte will be NULL */ 185 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page)); 186 if (!pvmw->pte) 187 return false; 188 189 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte); 190 spin_lock(pvmw->ptl); 191 if (!check_pte(pvmw)) 192 return not_found(pvmw); 193 return true; 194 } 195 196 /* 197 * Seek to next pte only makes sense for THP. 198 * But more important than that optimization, is to filter out 199 * any PageKsm page: whose page->index misleads vma_address() 200 * and vma_address_end() to disaster. 201 */ 202 if (page) 203 end = PageTransCompound(page) ? 204 vma_address_end(page, pvmw->vma) : 205 pvmw->address + PAGE_SIZE; 206 else 207 end = vma_pgoff_address_end(pvmw->index, pvmw->nr, pvmw->vma); 208 209 if (pvmw->pte) 210 goto next_pte; 211 restart: 212 do { 213 pgd = pgd_offset(mm, pvmw->address); 214 if (!pgd_present(*pgd)) { 215 step_forward(pvmw, PGDIR_SIZE); 216 continue; 217 } 218 p4d = p4d_offset(pgd, pvmw->address); 219 if (!p4d_present(*p4d)) { 220 step_forward(pvmw, P4D_SIZE); 221 continue; 222 } 223 pud = pud_offset(p4d, pvmw->address); 224 if (!pud_present(*pud)) { 225 step_forward(pvmw, PUD_SIZE); 226 continue; 227 } 228 229 pvmw->pmd = pmd_offset(pud, pvmw->address); 230 /* 231 * Make sure the pmd value isn't cached in a register by the 232 * compiler and used as a stale value after we've observed a 233 * subsequent update. 234 */ 235 pmde = READ_ONCE(*pvmw->pmd); 236 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 237 if (pmd_leaf(pmde) || is_pmd_migration_entry(pmde)) { 238 pvmw->ptl = pmd_lock(mm, pvmw->pmd); 239 pmde = *pvmw->pmd; 240 if (likely(pmd_leaf(pmde))) { 241 if (pvmw->flags & PVMW_MIGRATION) 242 return not_found(pvmw); 243 if (pmd_pfn(pmde) != pfn) 244 return not_found(pvmw); 245 return true; 246 } 247 if (!pmd_present(pmde)) { 248 swp_entry_t entry; 249 250 if (!thp_migration_supported() || 251 !(pvmw->flags & PVMW_MIGRATION)) 252 return not_found(pvmw); 253 entry = pmd_to_swp_entry(pmde); 254 if (!is_migration_entry(entry) || 255 pfn_swap_entry_to_pfn(entry) != pfn) 256 return not_found(pvmw); 257 return true; 258 } 259 /* THP pmd was split under us: handle on pte level */ 260 spin_unlock(pvmw->ptl); 261 pvmw->ptl = NULL; 262 } else 263 #endif 264 if (!pmd_present(pmde)) { 265 /* 266 * If PVMW_SYNC, take and drop THP pmd lock so that we 267 * cannot return prematurely, while zap_huge_pmd() has 268 * cleared *pmd but not decremented compound_mapcount(). 269 */ 270 if ((pvmw->flags & PVMW_SYNC) && page && 271 PageTransCompound(page)) { 272 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); 273 274 spin_unlock(ptl); 275 } 276 step_forward(pvmw, PMD_SIZE); 277 continue; 278 } 279 if (!map_pte(pvmw)) 280 goto next_pte; 281 this_pte: 282 if (check_pte(pvmw)) 283 return true; 284 next_pte: 285 do { 286 pvmw->address += PAGE_SIZE; 287 if (pvmw->address >= end) 288 return not_found(pvmw); 289 /* Did we cross page table boundary? */ 290 if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) { 291 if (pvmw->ptl) { 292 spin_unlock(pvmw->ptl); 293 pvmw->ptl = NULL; 294 } 295 pte_unmap(pvmw->pte); 296 pvmw->pte = NULL; 297 goto restart; 298 } 299 pvmw->pte++; 300 if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) { 301 pvmw->ptl = pte_lockptr(mm, pvmw->pmd); 302 spin_lock(pvmw->ptl); 303 } 304 } while (pte_none(*pvmw->pte)); 305 306 if (!pvmw->ptl) { 307 pvmw->ptl = pte_lockptr(mm, pvmw->pmd); 308 spin_lock(pvmw->ptl); 309 } 310 goto this_pte; 311 } while (pvmw->address < end); 312 313 return false; 314 } 315 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org