linux-mips
[Top] [All Lists]

[PATCH 1/6] MIPS: Use 64-bit stores to c0_entrylo on 64-bit kernels.

To: linux-mips@linux-mips.org, ralf@linux-mips.org
Subject: [PATCH 1/6] MIPS: Use 64-bit stores to c0_entrylo on 64-bit kernels.
From: David Daney <ddaney@caviumnetworks.com>
Date: Wed, 10 Feb 2010 15:12:44 -0800
Cc: David Daney <ddaney@caviumnetworks.com>
In-reply-to: <4B733C71.8030304@caviumnetworks.com>
Original-recipient: rfc822;linux-mips@linux-mips.org
References: <4B733C71.8030304@caviumnetworks.com>
Sender: linux-mips-bounce@linux-mips.org
64-bit CPUs have 64-bit c0_entrylo{0,1} registers.  We should use the
64-bit dmtc0 instruction to set them.  This becomes important if we
want to set the RI and XI bits present in some processors.

Signed-off-by: David Daney <ddaney@caviumnetworks.com>
---
 arch/mips/mm/tlbex.c |   20 ++++++++++----------
 1 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 2c68849..35431e1 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -460,14 +460,14 @@ static __cpuinit void build_huge_update_entries(u32 **p,
                uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
 
        UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */
-       uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */
+       UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
        /* convert to entrylo1 */
        if (small_sequence)
                UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
        else
                UASM_i_ADDU(p, pte, pte, tmp);
 
-       uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */
+       UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
 }
 
 static __cpuinit void build_huge_handler_tail(u32 **p,
@@ -686,18 +686,18 @@ static void __cpuinit build_update_entries(u32 **p, 
unsigned int tmp,
                uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
                uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
                uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
-               uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
+               UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
                uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
-               uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+               UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
        } else {
                int pte_off_even = sizeof(pte_t) / 2;
                int pte_off_odd = pte_off_even + sizeof(pte_t);
 
                /* The pte entries are pre-shifted */
                uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
-               uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
+               UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
                uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
-               uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+               UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
        }
 #else
        UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
@@ -706,14 +706,14 @@ static void __cpuinit build_update_entries(u32 **p, 
unsigned int tmp,
                build_tlb_probe_entry(p);
        UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
        if (r4k_250MHZhwbug())
-               uasm_i_mtc0(p, 0, C0_ENTRYLO0);
-       uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
+               UASM_i_MTC0(p, 0, C0_ENTRYLO0);
+       UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
        UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
        if (r45k_bvahwbug())
                uasm_i_mfc0(p, tmp, C0_INDEX);
        if (r4k_250MHZhwbug())
-               uasm_i_mtc0(p, 0, C0_ENTRYLO1);
-       uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
+               UASM_i_MTC0(p, 0, C0_ENTRYLO1);
+       UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
 #endif
 }
 
-- 
1.6.2.5


<Prev in Thread] Current Thread [Next in Thread>