Re: Kernel 5.8
Posted: Thu Jun 04, 2020 10:34 pm
I also must confirm that Kernel 5.8 Alpha1 does not boot on my AmigaOne X5000
Support Forum
https://forum.hyperion-entertainment.com/
https://forum.hyperion-entertainment.com/viewtopic.php?t=4477
Thanks for testing! I will bisect today.Skateman wrote: Thu Jun 04, 2020 10:34 pm I also must confirm that Kernel 5.8 Alpha1 does not boot on my AmigaOne X5000
Code: Select all
git bisect start
Code: Select all
git bisect good 3d77e6a8804abcc0504c904bd6e5cdf3a5cf8162
Code: Select all
git bisect bad 6929f71e46bdddbf1c4d67c2728648176c67c555
Code: Select all
[ff45fc3ca0f3c38e752d75f71b8d8efcf409e42d] mm: simplify calling a compound page destructor
Code: Select all
[cfa3b8068b09f25037146bfd5eed041b78878bee] Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Code: Select all
[17e0a7cb6a254c6d086562e7adf8b7ac24d267f3] Merge tag 'x86-cleanups-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Code: Select all
[17839856fd588f4ab6b789f482ed3ffd7c403e1f] gup: document and work around "COW can break either way" issue
Code: Select all
[c5d6c13843880ad0112f0513f3eb041b258be66e] Merge tag 'mmc-v5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
Code: Select all
[94709049fb8442fb2f7b91fbec3c2897a75e18df] Merge branch 'akpm' (patches from Andrew)
Code: Select all
[08d3090fc8dadd7b726dbda99d1baa39382c3f2c] mm/swapfile.c: simplify the calculation of n_goal
Code: Select all
[4926627793c0a7e7db2bc674e1d06777e86d8dab] mm: remove __get_vm_area
Code: Select all
[2b9059489c839e67ca9254913325e18cea11a980] mm: remove __vmalloc_node_flags_caller
Code: Select all
[6c0c7d2b365b21a413f6d75772a8a4a2c7d36916] mm/ioremap: track which page-table levels were modified
Code: Select all
[cb0849a990b2928760bc38561e8d33b554196e25] powerpc: use __vmalloc_node in alloc_vm_stack
Code: Select all
[d8626138009ba58ae2c22356966c2edaa1f1c3b5] mm: add functions to track page directory modifications
Code: Select all
[2ba3e6947aed9bb9575eb1603c0ac6e39185d32a] mm/vmalloc: track which page-table levels were modified
Code: Select all
2ba3e6947aed9bb9575eb1603c0ac6e39185d32a is the first bad commit
commit 2ba3e6947aed9bb9575eb1603c0ac6e39185d32a
Author: Joerg Roedel <[email protected]>
Date: Mon Jun 1 21:52:22 2020 -0700
mm/vmalloc: track which page-table levels were modified
Track at which levels in the page-table entries were modified by
vmap/vunmap.
After the page-table has been modified, use that information do decide
whether the new arch_sync_kernel_mappings() needs to be called.
[[email protected]: map_kernel_range_noflush() needs the arch_sync_kernel_mappings() call]
Signed-off-by: Joerg Roedel <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Acked-by: Andy Lutomirski <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: "H . Peter Anvin" <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Matthew Wilcox (Oracle) <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: "Rafael J. Wysocki" <[email protected]>
Cc: Steven Rostedt (VMware) <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]>
:040000 040000 f6d8e1fc7b44c9e23aa419ac35911cc211cbf3cf 940c6a816e17a731da190c7173c88191e46b935f M include
:040000 040000 053ad50b6860da11900c014ce3723149cfdef6d5 56f75109266239b6c8704693105ab2b9222a4304 M mm
Code: Select all
git revert 2ba3e6947aed9bb9575eb1603c0ac6e39185d32a
Code: Select all
lib/ioremap.c: In function ‘ioremap_page_range’:
lib/ioremap.c:245:13: error: ‘ARCH_PAGE_TABLE_SYNC_MASK’ undeclared (first use in this function); did you mean ‘PROC_TABLE_TYPE_MASK’?
if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
^~~~~~~~~~~~~~~~~~~~~~~~~
PROC_TABLE_TYPE_MASK
lib/ioremap.c:245:13: note: each undeclared identifier is reported only once for each function it appears in
lib/ioremap.c:246:3: error: implicit declaration of function ‘arch_sync_kernel_mappings’ [-Werror=implicit-function-declaration]
arch_sync_kernel_mappings(start, end);
^~~~~~~~~~~~~~~~~~~~~~~~~
cc1: some warnings being treated as errors
scripts/Makefile.build:266: recipe for target 'lib/ioremap.o' failed
make[1]: *** [lib/ioremap.o] Error 1
Makefile:1735: recipe for target 'lib' failed
make: *** [lib] Error 2
make: *** Auf noch nicht beendete Prozesse wird gewartet …
AR virt/lib/built-in.a
AR virt/built-in.a
Code: Select all
//if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
// arch_sync_kernel_mappings(start, end);
//
// return err;
Soon ...
Code: Select all
diff -rupN a/include/linux/vmalloc.h b/include/linux/vmalloc.h
--- a/include/linux/vmalloc.h 2020-06-07 07:33:07.709630790 +0200
+++ b/include/linux/vmalloc.h 2020-06-07 07:31:03.565460991 +0200
@@ -132,22 +132,6 @@ extern int remap_vmalloc_range(struct vm
unsigned long pgoff);
/*
- * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
- * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
- * needs to be called.
- */
-#ifndef ARCH_PAGE_TABLE_SYNC_MASK
-#define ARCH_PAGE_TABLE_SYNC_MASK 0
-#endif
-
-/*
- * There is no default implementation for arch_sync_kernel_mappings(). It is
- * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
- * is 0.
- */
-void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
-
-/*
* Lowlevel-APIs (not for driver use!)
*/
diff -rupN a/lib/ioremap.c b/lib/ioremap.c
--- a/lib/ioremap.c 2020-06-07 07:33:07.781630995 +0200
+++ b/lib/ioremap.c 2020-06-07 07:32:28.425463956 +0200
@@ -242,10 +242,6 @@ int ioremap_page_range(unsigned long add
flush_cache_vmap(start, end);
- if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
- arch_sync_kernel_mappings(start, end);
-
- return err;
}
#ifdef CONFIG_GENERIC_IOREMAP
diff -rupN a/mm/vmalloc.c b/mm/vmalloc.c
--- a/mm/vmalloc.c 2020-06-07 07:33:07.793631029 +0200
+++ b/mm/vmalloc.c 2020-06-07 07:31:03.565460991 +0200
@@ -69,8 +69,7 @@ static void free_work(struct work_struct
/*** Page table manipulation functions ***/
-static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
- pgtbl_mod_mask *mask)
+static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
{
pte_t *pte;
@@ -79,81 +78,59 @@ static void vunmap_pte_range(pmd_t *pmd,
pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
WARN_ON(!pte_none(ptent) && !pte_present(ptent));
} while (pte++, addr += PAGE_SIZE, addr != end);
- *mask |= PGTBL_PTE_MODIFIED;
}
-static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
- pgtbl_mod_mask *mask)
+static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
{
pmd_t *pmd;
unsigned long next;
- int cleared;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
-
- cleared = pmd_clear_huge(pmd);
- if (cleared || pmd_bad(*pmd))
- *mask |= PGTBL_PMD_MODIFIED;
-
- if (cleared)
+ if (pmd_clear_huge(pmd))
continue;
if (pmd_none_or_clear_bad(pmd))
continue;
- vunmap_pte_range(pmd, addr, next, mask);
+ vunmap_pte_range(pmd, addr, next);
} while (pmd++, addr = next, addr != end);
}
-static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
- pgtbl_mod_mask *mask)
+static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
{
pud_t *pud;
unsigned long next;
- int cleared;
pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
-
- cleared = pud_clear_huge(pud);
- if (cleared || pud_bad(*pud))
- *mask |= PGTBL_PUD_MODIFIED;
-
- if (cleared)
+ if (pud_clear_huge(pud))
continue;
if (pud_none_or_clear_bad(pud))
continue;
- vunmap_pmd_range(pud, addr, next, mask);
+ vunmap_pmd_range(pud, addr, next);
} while (pud++, addr = next, addr != end);
}
-static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
- pgtbl_mod_mask *mask)
+static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
{
p4d_t *p4d;
unsigned long next;
- int cleared;
p4d = p4d_offset(pgd, addr);
do {
next = p4d_addr_end(addr, end);
-
- cleared = p4d_clear_huge(p4d);
- if (cleared || p4d_bad(*p4d))
- *mask |= PGTBL_P4D_MODIFIED;
-
- if (cleared)
+ if (p4d_clear_huge(p4d))
continue;
if (p4d_none_or_clear_bad(p4d))
continue;
- vunmap_pud_range(p4d, addr, next, mask);
+ vunmap_pud_range(p4d, addr, next);
} while (p4d++, addr = next, addr != end);
}
/**
* unmap_kernel_range_noflush - unmap kernel VM area
- * @start: start of the VM area to unmap
+ * @addr: start of the VM area to unmap
* @size: size of the VM area to unmap
*
* Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify
@@ -164,33 +141,24 @@ static void vunmap_p4d_range(pgd_t *pgd,
* for calling flush_cache_vunmap() on to-be-mapped areas before calling this
* function and flush_tlb_kernel_range() after.
*/
-void unmap_kernel_range_noflush(unsigned long start, unsigned long size)
+void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
- unsigned long end = start + size;
+ unsigned long end = addr + size;
unsigned long next;
pgd_t *pgd;
- unsigned long addr = start;
- pgtbl_mod_mask mask = 0;
BUG_ON(addr >= end);
- start = addr;
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
- if (pgd_bad(*pgd))
- mask |= PGTBL_PGD_MODIFIED;
if (pgd_none_or_clear_bad(pgd))
continue;
- vunmap_p4d_range(pgd, addr, next, &mask);
+ vunmap_p4d_range(pgd, addr, next);
} while (pgd++, addr = next, addr != end);
-
- if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
- arch_sync_kernel_mappings(start, end);
}
static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, pgprot_t prot, struct page **pages, int *nr,
- pgtbl_mod_mask *mask)
+ unsigned long end, pgprot_t prot, struct page **pages, int *nr)
{
pte_t *pte;
@@ -199,7 +167,7 @@ static int vmap_pte_range(pmd_t *pmd, un
* callers keep track of where we're up to.
*/
- pte = pte_alloc_kernel_track(pmd, addr, mask);
+ pte = pte_alloc_kernel(pmd, addr);
if (!pte)
return -ENOMEM;
do {
@@ -212,59 +180,55 @@ static int vmap_pte_range(pmd_t *pmd, un
set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
(*nr)++;
} while (pte++, addr += PAGE_SIZE, addr != end);
- *mask |= PGTBL_PTE_MODIFIED;
return 0;
}
static int vmap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end, pgprot_t prot, struct page **pages, int *nr,
- pgtbl_mod_mask *mask)
+ unsigned long end, pgprot_t prot, struct page **pages, int *nr)
{
pmd_t *pmd;
unsigned long next;
- pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
+ pmd = pmd_alloc(&init_mm, pud, addr);
if (!pmd)
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
- if (vmap_pte_range(pmd, addr, next, prot, pages, nr, mask))
+ if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
return -ENOMEM;
} while (pmd++, addr = next, addr != end);
return 0;
}
static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
- unsigned long end, pgprot_t prot, struct page **pages, int *nr,
- pgtbl_mod_mask *mask)
+ unsigned long end, pgprot_t prot, struct page **pages, int *nr)
{
pud_t *pud;
unsigned long next;
- pud = pud_alloc_track(&init_mm, p4d, addr, mask);
+ pud = pud_alloc(&init_mm, p4d, addr);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
- if (vmap_pmd_range(pud, addr, next, prot, pages, nr, mask))
+ if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
return -ENOMEM;
} while (pud++, addr = next, addr != end);
return 0;
}
static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
- unsigned long end, pgprot_t prot, struct page **pages, int *nr,
- pgtbl_mod_mask *mask)
+ unsigned long end, pgprot_t prot, struct page **pages, int *nr)
{
p4d_t *p4d;
unsigned long next;
- p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
+ p4d = p4d_alloc(&init_mm, pgd, addr);
if (!p4d)
return -ENOMEM;
do {
next = p4d_addr_end(addr, end);
- if (vmap_pud_range(p4d, addr, next, prot, pages, nr, mask))
+ if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
return -ENOMEM;
} while (p4d++, addr = next, addr != end);
return 0;
@@ -291,28 +255,21 @@ static int vmap_p4d_range(pgd_t *pgd, un
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
pgprot_t prot, struct page **pages)
{
- unsigned long start = addr;
unsigned long end = addr + size;
unsigned long next;
pgd_t *pgd;
int err = 0;
int nr = 0;
- pgtbl_mod_mask mask = 0;
BUG_ON(addr >= end);
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
- if (pgd_bad(*pgd))
- mask |= PGTBL_PGD_MODIFIED;
- err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
+ err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
if (err)
return err;
} while (pgd++, addr = next, addr != end);
- if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
- arch_sync_kernel_mappings(start, end);
-
return 0;
}