linux-cvs-patches
[Top] [All Lists]

CVS Update@linux-mips.org: linux

To: linux-cvs-patches@linux-mips.org
Subject: CVS Update@linux-mips.org: linux
From: ralf@linux-mips.org
Date: Mon, 21 Mar 2005 18:59:45 +0000
Reply-to: linux-mips@linux-mips.org
Sender: linux-cvs-patches-bounce@linux-mips.org
CVSROOT:        /home/cvs
Module name:    linux
Changes by:     ralf@ftp.linux-mips.org 05/03/21 18:59:38

Modified files:
        arch/mips/sgi-ip27: ip27-init.c ip27-irq.c 

Log message:
        HUB interrupts are allocated per node, not per slice.  Make
        manipulation of the interrupt mask register atomic by disabling
        interrupts.

diff -urN linux/arch/mips/sgi-ip27/ip27-init.c 
linux/arch/mips/sgi-ip27/ip27-init.c
--- linux/arch/mips/sgi-ip27/ip27-init.c        2005/01/13 14:05:30     1.67
+++ linux/arch/mips/sgi-ip27/ip27-init.c        2005/03/21 18:59:38     1.68
@@ -56,12 +56,12 @@
 {
        struct hub_data *hub = hub_data(cnode);
        nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
+       int i;
 
        cpu_set(smp_processor_id(), hub->h_cpus);
 
        if (test_and_set_bit(cnode, hub_init_mask))
                return;
-
        /*
         * Set CRB timeout at 5ms, (< PI timeout of 10ms)
         */
@@ -88,6 +88,24 @@
                __flush_cache_all();
        }
 #endif
+
+       /*
+        * Some interrupts are reserved by hardware or by software convention.
+        * Mark these as reserved right away so they won't be used accidently
+        * later.
+        */
+       for (i = 0; i <= BASE_PCI_IRQ; i++) {
+               __set_bit(i, hub->irq_alloc_mask);
+               LOCAL_HUB_CLR_INTR(INT_PEND0_BASELVL + i);
+       }
+
+       __set_bit(IP_PEND0_6_63, hub->irq_alloc_mask);
+       LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63);
+
+       for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) {
+               __set_bit(i, hub->irq_alloc_mask);
+               LOCAL_HUB_CLR_INTR(INT_PEND1_BASELVL + i);
+       }
 }
 
 void __init per_cpu_init(void)
@@ -104,30 +122,12 @@
 
        clear_c0_status(ST0_IM);
 
+       per_hub_init(cnode);
+
        for (i = 0; i < LEVELS_PER_SLICE; i++)
                si->level_to_irq[i] = -1;
 
        /*
-        * Some interrupts are reserved by hardware or by software convention.
-        * Mark these as reserved right away so they won't be used accidently
-        * later.
-        */
-       for (i = 0; i <= BASE_PCI_IRQ; i++) {
-               __set_bit(i, si->irq_alloc_mask);
-               LOCAL_HUB_S(PI_INT_PEND_MOD, i);
-       }
-
-       __set_bit(IP_PEND0_6_63, si->irq_alloc_mask);
-       LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63);
-
-       for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) {
-               __set_bit(i, si->irq_alloc_mask + 1);
-               LOCAL_HUB_S(PI_INT_PEND_MOD, i);
-       }
-
-       LOCAL_HUB_L(PI_INT_PEND0);
-
-       /*
         * We use this so we can find the local hub's data as fast as only
         * possible.
         */
@@ -140,8 +140,6 @@
        install_cpu_nmi_handler(cputoslice(cpu));
 
        set_c0_status(SRB_DEV0 | SRB_DEV1);
-
-       per_hub_init(cnode);
 }
 
 /*
diff -urN linux/arch/mips/sgi-ip27/ip27-irq.c 
linux/arch/mips/sgi-ip27/ip27-irq.c
--- linux/arch/mips/sgi-ip27/ip27-irq.c 2004/10/26 01:37:16     1.34
+++ linux/arch/mips/sgi-ip27/ip27-irq.c 2005/03/21 18:59:38     1.35
@@ -74,14 +74,15 @@
 
 static inline int alloc_level(int cpu, int irq)
 {
+       struct hub_data *hub = hub_data(cpu_to_node(cpu));
        struct slice_data *si = cpu_data[cpu].data;
-       int level;                              /* pre-allocated entries */
+       int level;
 
-       level = find_first_zero_bit(si->irq_alloc_mask, LEVELS_PER_SLICE);
+       level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE);
        if (level >= LEVELS_PER_SLICE)
                panic("Cpu %d flooded with devices\n", cpu);
 
-       __set_bit(level, si->irq_alloc_mask);
+       __set_bit(level, hub->irq_alloc_mask);
        si->level_to_irq[level] = irq;
 
        return level;
@@ -216,9 +217,11 @@
 {
        nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
        struct slice_data *si = cpu_data[cpu].data;
+       unsigned long flags;
 
-       __set_bit(bit, si->irq_enable_mask);
+       set_bit(bit, si->irq_enable_mask);
 
+       local_irq_save(flags);
        if (!cputoslice(cpu)) {
                REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
                REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
@@ -226,6 +229,7 @@
                REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
                REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
        }
+       local_irq_restore(flags);
 
        return 0;
 }
@@ -235,7 +239,7 @@
        nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
        struct slice_data *si = cpu_data[cpu].data;
 
-       __clear_bit(bit, si->irq_enable_mask);
+       clear_bit(bit, si->irq_enable_mask);
 
        if (!cputoslice(cpu)) {
                REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
@@ -298,6 +302,7 @@
 static void shutdown_bridge_irq(unsigned int irq)
 {
        struct bridge_controller *bc = IRQ_TO_BRIDGE(irq);
+       struct hub_data *hub = hub_data(cpu_to_node(bc->irq_cpu));
        bridge_t *bridge = bc->base;
        struct slice_data *si = cpu_data[bc->irq_cpu].data;
        int pin, swlevel;
@@ -313,7 +318,7 @@
        swlevel = find_level(&cpu, irq);
        intr_disconnect_level(cpu, swlevel);
 
-       __clear_bit(swlevel, si->irq_alloc_mask);
+       __clear_bit(swlevel, hub->irq_alloc_mask);
        si->level_to_irq[swlevel] = -1;
 
        bridge->b_int_enable &= ~(1 << pin);
@@ -433,25 +438,24 @@
        int slice = LOCAL_HUB_L(PI_CPU_NUM);
        int cpu = smp_processor_id();
        struct slice_data *si = cpu_data[cpu].data;
-       hubreg_t mask, set;
+       struct hub_data *hub = hub_data(cpu_to_node(cpu));
+       int resched, call;
+
+       resched = CPU_RESCHED_A_IRQ + slice;
+       __set_bit(resched, hub->irq_alloc_mask);
+       __set_bit(resched, si->irq_enable_mask);
+       LOCAL_HUB_CLR_INTR(resched);
+
+       call = CPU_CALL_A_IRQ + slice;
+       __set_bit(call, hub->irq_alloc_mask);
+       __set_bit(call, si->irq_enable_mask);
+       LOCAL_HUB_CLR_INTR(call);
 
        if (slice == 0) {
-               LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
-               LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
-               mask = LOCAL_HUB_L(PI_INT_MASK0_A);     /* Slice A */
-               set = (1UL << CPU_RESCHED_A_IRQ) | (1UL << CPU_CALL_A_IRQ);
-               mask |= set;
-               si->irq_enable_mask[0] |= set;
-               si->irq_alloc_mask[0] |= set;
-               LOCAL_HUB_S(PI_INT_MASK0_A, mask);
+               LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]);
+               LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]);
        } else {
-               LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
-               LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
-               mask = LOCAL_HUB_L(PI_INT_MASK0_B);     /* Slice B */
-               set = (1UL << CPU_RESCHED_B_IRQ) | (1UL << CPU_CALL_B_IRQ);
-               mask |= set;
-               si->irq_enable_mask[1] |= set;
-               si->irq_alloc_mask[1] |= set;
-               LOCAL_HUB_S(PI_INT_MASK0_B, mask);
+               LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]);
+               LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]);
        }
 }

<Prev in Thread] Current Thread [Next in Thread>