diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 81eb96ec13b2..5402fb6b3aae 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -464,7 +464,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, * we invalidate the TLB here, thus avoiding dcbst * misbehaviour. */ - _tlbie(address); + _tlbie(address, 0 /* 8xx doesn't care about PID */); #endif if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) { diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index eb3a732e91db..ebfd13dc9d19 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -56,7 +56,7 @@ extern unsigned long total_lowmem; * architectures. -- Dan */ #if defined(CONFIG_8xx) -#define flush_HPTE(X, va, pg) _tlbie(va) +#define flush_HPTE(X, va, pg) _tlbie(va, 0 /* 8xx doesn't care about PID */) #define MMU_init_hw() do { } while(0) #define mmu_mapin_ram() (0UL) diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c index 390dd1995c2a..dd898d32480e 100644 --- a/arch/ppc/mm/init.c +++ b/arch/ppc/mm/init.c @@ -561,7 +561,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, * That means the zeroed TLB has to be invalidated * whenever a page miss occurs. */ - _tlbie(address); + _tlbie(address, 0 /* 8xx doesn't care about PID */); #endif if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) { diff --git a/arch/ppc/mm/mmu_decl.h b/arch/ppc/mm/mmu_decl.h index f1d4f2109a99..b298b60c202f 100644 --- a/arch/ppc/mm/mmu_decl.h +++ b/arch/ppc/mm/mmu_decl.h @@ -49,7 +49,7 @@ extern unsigned int num_tlbcam_entries; * architectures. -- Dan */ #if defined(CONFIG_8xx) -#define flush_HPTE(X, va, pg) _tlbie(va) +#define flush_HPTE(X, va, pg) _tlbie(va, 0 /* 8xx doesn't care about PID */) #define MMU_init_hw() do { } while(0) #define mmu_mapin_ram() (0UL)