- Code: Select all
static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pte_t *pte;
u64 pfn;
pfn = phys_addr >> PAGE_SHIFT;
pte = pte_alloc_kernel(pmd, addr);
if (!pte)
return -ENOMEM;
do {
BUG_ON(!pte_none(*pte));
set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
return 0;
}
- Code: Select all
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
pte_t *pte;
pte = (pte_t *)__get_free_page(PGALLOC_GFP);
if (pte)
clean_pte_table(pte);
return pte;
}
- Code: Select all
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
In page_alloc.c this gets called when allocating the page and the fact that it's not zero'd and poisoned means that the pte value in the ioremap call is not zero'd which triggers the BUG_ON.
- Code: Select all
static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
{
int i;
for (i = 0; i < (1 << order); i++) {
struct page *p = page + i;
if (unlikely(check_new_page(p)))
return 1;
}
set_page_private(page, 0);
set_page_refcounted(page);
arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1);
#ifdef CONFIG_PAX_MEMORY_SANITIZE
if (gfp_flags & __GFP_ZERO)
prep_zero_page(page, order, gfp_flags);
#endif
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
return 0;
}
Changing the config ifdef
#ifdef CONFIG_PAX_MEMORY_SANITIZE
to
#if !defined(CONFIG_PAX_MEMORY_SANITIZE) || defined(CONFIG_DEBUG_PAGEALLOC)
fixes the issue for me
Let me know what you think.