[SPARC64]: Simplify TSB insert checks.

Don't try to avoid putting non-base page sized entries
into the user TSB.  It actually costs us more to check
this than it helps.

Eventually we'll have a multiple TSB scheme for user
processes.  Once a process starts using larger pages,
we'll allocate and use such a TSB.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2006-03-05 18:26:24 -08:00
parent 3cab0c3e86
commit 74ae998772
2 changed files with 6 additions and 23 deletions

View File

@ -55,20 +55,6 @@ tsb_reload:
brgez,a,pn %g5, tsb_do_fault
TSB_STORE(%g1, %g7)
/* If it is larger than the base page size, don't
* bother putting it into the TSB.
*/
sethi %hi(_PAGE_ALL_SZ_BITS), %g7
ldx [%g7 + %lo(_PAGE_ALL_SZ_BITS)], %g7
and %g5, %g7, %g2
sethi %hi(_PAGE_SZBITS), %g7
ldx [%g7 + %lo(_PAGE_SZBITS)], %g7
cmp %g2, %g7
mov 1, %g7
sllx %g7, TSB_TAG_INVALID_BIT, %g7
bne,a,pn %xcc, tsb_tlb_reload
TSB_STORE(%g1, %g7)
TSB_WRITE(%g1, %g5, %g6)
/* Finally, load TLB and return from trap. */

View File

@ -280,6 +280,8 @@ unsigned long _PAGE_SZBITS __read_mostly;
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
struct mm_struct *mm;
struct tsb *tsb;
unsigned long tag;
if (tlb_type != hypervisor) {
unsigned long pfn = pte_pfn(pte);
@ -308,15 +310,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
}
mm = vma->vm_mm;
if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) {
struct tsb *tsb;
unsigned long tag;
tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
(mm->context.tsb_nentries - 1UL)];
tag = (address >> 22UL);
tsb_insert(tsb, tag, pte_val(pte));
}
tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
(mm->context.tsb_nentries - 1UL)];
tag = (address >> 22UL);
tsb_insert(tsb, tag, pte_val(pte));
}
void flush_dcache_page(struct page *page)