Skip to content

Commit

Permalink
mm: remove vmalloc_sync_(un)mappings()
Browse files Browse the repository at this point in the history
These functions are not needed anymore because the vmalloc and ioremap
mappings are now synchronized when they are created or torn down.

Remove all callers and function definitions.

Signed-off-by: Joerg Roedel <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Tested-by: Steven Rostedt (VMware) <[email protected]>
Acked-by: Andy Lutomirski <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: "H . Peter Anvin" <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Matthew Wilcox (Oracle) <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: "Rafael J. Wysocki" <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Link: http:https://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
joergroedel authored and torvalds committed Jun 2, 2020
1 parent 86cf69f commit 73f693c
Show file tree
Hide file tree
Showing 7 changed files with 0 additions and 91 deletions.
37 changes: 0 additions & 37 deletions arch/x86/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -214,26 +214,6 @@ void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
}
}

static void vmalloc_sync(void)
{
unsigned long address;

if (SHARED_KERNEL_PMD)
return;

arch_sync_kernel_mappings(VMALLOC_START, VMALLOC_END);
}

void vmalloc_sync_mappings(void)
{
vmalloc_sync();
}

void vmalloc_sync_unmappings(void)
{
vmalloc_sync();
}

/*
* 32-bit:
*
Expand Down Expand Up @@ -336,23 +316,6 @@ static void dump_pagetable(unsigned long address)

#else /* CONFIG_X86_64: */

void vmalloc_sync_mappings(void)
{
/*
* 64-bit mappings might allocate new p4d/pud pages
* that need to be propagated to all tasks' PGDs.
*/
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
}

void vmalloc_sync_unmappings(void)
{
/*
* Unmappings never allocate or free p4d/pud pages.
* No work is required here.
*/
}

/*
* 64-bit:
*
Expand Down
6 changes: 0 additions & 6 deletions drivers/acpi/apei/ghes.c
Original file line number Diff line number Diff line change
Expand Up @@ -167,12 +167,6 @@ int ghes_estatus_pool_init(int num_ghes)
if (!addr)
goto err_pool_alloc;

/*
* New allocation must be visible in all pgd before it can be found by
* an NMI allocating from the pool.
*/
vmalloc_sync_mappings();

rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
if (rc)
goto err_pool_add;
Expand Down
2 changes: 0 additions & 2 deletions include/linux/vmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,8 +130,6 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,

extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
void vmalloc_sync_mappings(void);
void vmalloc_sync_unmappings(void);

/*
* Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
Expand Down
1 change: 0 additions & 1 deletion kernel/notifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,6 @@ NOKPROBE_SYMBOL(notify_die);

int register_die_notifier(struct notifier_block *nb)
{
vmalloc_sync_mappings();
return atomic_notifier_chain_register(&die_chain, nb);
}
EXPORT_SYMBOL_GPL(register_die_notifier);
Expand Down
12 changes: 0 additions & 12 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -8527,18 +8527,6 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
allocate_snapshot = false;
#endif

/*
* Because of some magic with the way alloc_percpu() works on
* x86_64, we need to synchronize the pgd of all the tables,
* otherwise the trace events that happen in x86_64 page fault
* handlers can't cope with accessing the chance that a
* alloc_percpu()'d memory might be touched in the page fault trace
* event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
* calls in tracing, because something might get triggered within a
* page fault trace event!
*/
vmalloc_sync_mappings();

return 0;
}

Expand Down
12 changes: 0 additions & 12 deletions mm/nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -371,18 +371,6 @@ void vm_unmap_aliases(void)
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);

/*
* Implement a stub for vmalloc_sync_[un]mapping() if the architecture
* chose not to have one.
*/
void __weak vmalloc_sync_mappings(void)
{
}

void __weak vmalloc_sync_unmappings(void)
{
}

struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
{
BUG();
Expand Down
21 changes: 0 additions & 21 deletions mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1353,12 +1353,6 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
if (unlikely(valist == NULL))
return false;

/*
* First make sure the mappings are removed from all page-tables
* before they are freed.
*/
vmalloc_sync_unmappings();

/*
* TODO: to calculate a flush range without looping.
* The list can be up to lazy_max_pages() elements.
Expand Down Expand Up @@ -3089,21 +3083,6 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
}
EXPORT_SYMBOL(remap_vmalloc_range);

/*
* Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
* not to have one.
*
* The purpose of this function is to make sure the vmalloc area
* mappings are identical in all page-tables in the system.
*/
void __weak vmalloc_sync_mappings(void)
{
}

void __weak vmalloc_sync_unmappings(void)
{
}

static int f(pte_t *pte, unsigned long addr, void *data)
{
pte_t ***p = data;
Expand Down

0 comments on commit 73f693c

Please sign in to comment.