linux-cvs-patches
[Top] [All Lists]

CVS Update@linux-mips.org: linux

To: linux-cvs-patches@linux-mips.org
Subject: CVS Update@linux-mips.org: linux
From: ralf@linux-mips.org
Date: Thu, 05 May 2005 14:43:48 +0100
Reply-to: linux-mips@linux-mips.org
Sender: linux-cvs-patches-bounce@linux-mips.org
CVSROOT:        /home/cvs
Module name:    linux
Changes by:     ralf@ftp.linux-mips.org 05/05/05 14:43:42

Modified files:
        .              : Tag: linux_2_4 MAINTAINERS Makefile 
        arch/i386/mm   : Tag: linux_2_4 pageattr.c 
        arch/x86_64/kernel: Tag: linux_2_4 e820.c mtrr.c process.c 
                            setup64.c smp.c 
        arch/x86_64/mm : Tag: linux_2_4 pageattr.c 
        drivers/usb/serial: Tag: linux_2_4 visor.c visor.h 
        include/asm-x86_64: Tag: linux_2_4 hw_irq.h mmu_context.h 
                            unistd.h 
        lib            : Tag: linux_2_4 rwsem-spinlock.c rwsem.c 
        mm             : Tag: linux_2_4 filemap.c 
        net/netlink    : Tag: linux_2_4 af_netlink.c 

Log message:
        Merge with Linux 2.4.31-pre1.

diff -urN linux/MAINTAINERS linux/MAINTAINERS
--- linux/MAINTAINERS   2005/04/05 19:09:54     1.76.2.33
+++ linux/MAINTAINERS   2005/05/05 13:43:41     1.76.2.34
@@ -152,14 +152,6 @@
 W:     http://www.uni-karlsruhe.de/~Robert.Siemer/Private/
 S:     Maintained
 
-ACP/MWAVE MODEM
-P:     Paul B Schroeder
-M:     paulsch@us.ibm.com
-P:     Mike Sullivan
-M:     sullivam@us.ibm.com
-W:     http://www.ibm.com/linux/ltc/
-S:     Supported
-
 AACRAID SCSI RAID DRIVER
 P:     Adaptec OEM Raid Solutions
 M:     linux-aacraid-devel@dell.com
diff -urN linux/Makefile linux/Makefile
--- linux/Makefile      2005/04/05 19:09:54     1.119.2.35
+++ linux/Makefile      2005/05/05 13:43:41     1.119.2.36
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 4
-SUBLEVEL = 30
-EXTRAVERSION =
+SUBLEVEL = 31
+EXTRAVERSION = -pre1
 
 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
 
diff -urN linux/arch/i386/mm/pageattr.c linux/arch/i386/mm/pageattr.c
--- linux/arch/i386/mm/pageattr.c       2004/08/14 18:38:45     1.1.2.3
+++ linux/arch/i386/mm/pageattr.c       2005/05/05 13:43:41     1.1.2.4
@@ -119,19 +119,15 @@
        kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
                if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
-                       pte_t old = *kpte;
-                       pte_t standard = mk_pte(page, PAGE_KERNEL); 
-
                        set_pte_atomic(kpte, mk_pte(page, prot)); 
-                       if (pte_same(old,standard))
-                               atomic_inc(&kpte_page->count);
                } else {
                        struct page *split = split_large_page(address, prot); 
                        if (!split)
                                return -ENOMEM;
-                       atomic_inc(&kpte_page->count);  
                        set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
+                       kpte_page = split;
                }       
+               atomic_inc(&kpte_page->count);
        } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
                set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
                atomic_dec(&kpte_page->count); 
diff -urN linux/arch/x86_64/kernel/e820.c linux/arch/x86_64/kernel/e820.c
--- linux/arch/x86_64/kernel/e820.c     2004/08/14 18:38:48     1.2.2.9
+++ linux/arch/x86_64/kernel/e820.c     2005/05/05 13:43:41     1.2.2.10
@@ -594,7 +594,10 @@
                        ioapic_force = 1;
                        skip_ioapic_setup = 0;
                }
-               
+               else if (!memcmp(from, "noexec=", 7)) { 
+                       extern int nonx_setup(char *);
+                       nonx_setup(from + 7);
+               }                                       
        next:
                c = *(from++);
                if (!c)
diff -urN linux/arch/x86_64/kernel/mtrr.c linux/arch/x86_64/kernel/mtrr.c
--- linux/arch/x86_64/kernel/Attic/mtrr.c       2004/08/14 18:38:48     1.3.2.5
+++ linux/arch/x86_64/kernel/Attic/mtrr.c       2005/05/05 13:43:41     1.3.2.6
@@ -198,8 +198,7 @@
 
 static void get_mtrr (unsigned int reg, u64 *base, u32 *size, mtrr_type * type)
 {
-       u32 count, tmp, mask_lo, mask_hi;
-       int i;
+       u32 mask_lo, mask_hi;
        u32 base_lo, base_hi;
 
        rdmsr (MSR_MTRRphysMask(reg), mask_lo, mask_hi);
@@ -213,22 +212,17 @@
 
        rdmsr (MSR_MTRRphysBase(reg), base_lo, base_hi);
 
-       count = 0;
-       tmp = mask_lo >> MTRR_BEG_BIT;
-       for (i = MTRR_BEG_BIT; i <= 31; i++, tmp = tmp >> 1)
-               count = (count << (~tmp & 1)) | (~tmp & 1);
-       
-       tmp = mask_hi;
-       for (i = 0; i <= MTRR_END_BIT; i++, tmp = tmp >> 1)
-               count = (count << (~tmp & 1)) | (~tmp & 1);
-       
-       *size = (count+1); 
-       *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
-       *type = base_lo & 0xff;
+       /* Work out the shifted address mask */
+       mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT) | 
+                 mask_lo >> PAGE_SHIFT; 
+
+       /* This works correctly if size is a power of two, i.e. a
+          continguous range. */
+       *size = -mask_lo;
+       *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; 
+       *type = base_lo & 0xff; 
 }
 
-
-
 /*
  * Set variable MTRR register on the local CPU.
  *  <reg> The register to set.
@@ -242,8 +236,6 @@
                   u32 size, mtrr_type type, int do_safe)
 {
        struct set_mtrr_context ctxt;
-       u64 base64;
-       u64 size64;
 
        if (do_safe) { 
                set_mtrr_prepare (&ctxt);
@@ -255,12 +247,10 @@
                   relevant mask register to disable a range. */
                wrmsr (MSR_MTRRphysMask(reg), 0, 0);
        } else {
-               base64 = (base << PAGE_SHIFT) & size_and_mask;
-               wrmsr (MSR_MTRRphysBase(reg), base64 | type, base64 >> 32);
-
-               size64 = ~(((u64)size << PAGE_SHIFT) - 1);
-               size64 = size64 & size_and_mask;
-               wrmsr (MSR_MTRRphysMask(reg), (u32) (size64 | 0x800), (u32) 
(size64 >> 32));
+               wrmsr (MSR_MTRRphysBase(reg), base << PAGE_SHIFT | type, 
+                       (base & size_and_mask) >> (32 - PAGE_SHIFT));
+               wrmsr(MSR_MTRRphysMask(reg), -size << PAGE_SHIFT | 0x800,
+                       (-size & size_and_mask) >> (32 - PAGE_SHIFT));
        }
        if (do_safe)
                set_mtrr_done (&ctxt);
@@ -691,13 +681,13 @@
                return -ENOSYS;
        }
 
-       if (base & (size_or_mask>>PAGE_SHIFT)) {
+       if (base & size_or_mask) {
                printk (KERN_WARNING "mtrr: base(%Lx) exceeds the MTRR 
width(%Lx)\n",
-                               base, (size_or_mask>>PAGE_SHIFT));
+                               base, size_or_mask);
                return -EINVAL;
        }
 
-       if (size & (size_or_mask>>PAGE_SHIFT)) {
+       if (size & size_or_mask) {
                printk (KERN_WARNING "mtrr: size exceeds the MTRR width\n");
                return -EINVAL;
        }
@@ -1281,16 +1271,22 @@
 
        if (test_bit (X86_FEATURE_MTRR, boot_cpu_data.x86_capability)) {
                /* Query the width (in bits) of the physical
-                  addressable memory on the Hammer family. */
-               if ((cpuid_eax (0x80000000) >= 0x80000008)) {
+                  addressable memory. This is an AMD specific MSR,
+                  but we assume(hope?) Intel will implement it too
+                  when they extend the width of the Xeon address bus. */
+               if (cpuid_eax (0x80000000) >= 0x80000008) {
                        u32 phys_addr;
                        phys_addr = cpuid_eax (0x80000008) & 0xff;
-                       size_or_mask = ~((1L << phys_addr) - 1);
+                       size_or_mask = ~((1L << (phys_addr - PAGE_SHIFT)) - 1);
                        /*
                         * top bits MBZ as its beyond the addressable range.
                         * bottom bits MBZ as we don't care about lower 12 bits 
of addr.
                         */
-                       size_and_mask = (~size_or_mask) & 0x000ffffffffff000L;
+                       size_and_mask = ~size_or_mask &  0xfff00000;
+               } else {
+                       /* 36bit fallback */
+                       size_or_mask = 0xff000000;
+                       size_and_mask = 0x00f00000;
                }
        }
 }
diff -urN linux/arch/x86_64/kernel/process.c linux/arch/x86_64/kernel/process.c
--- linux/arch/x86_64/kernel/process.c  2004/04/16 03:14:13     1.4.2.9
+++ linux/arch/x86_64/kernel/process.c  2005/05/05 13:43:41     1.4.2.10
@@ -185,7 +185,6 @@
                }
                return 1;
        }
-       pm_idle = default_idle;
        return 1;
 }
 
diff -urN linux/arch/x86_64/kernel/setup64.c linux/arch/x86_64/kernel/setup64.c
--- linux/arch/x86_64/kernel/setup64.c  2004/04/16 03:14:13     1.2.2.7
+++ linux/arch/x86_64/kernel/setup64.c  2005/05/05 13:43:41     1.2.2.8
@@ -53,7 +53,7 @@
 
 */ 
 
-static int __init nonx_setup(char *str)
+int __init nonx_setup(char *str)
 {
        if (!strncmp(str, "on",3)) { 
                __supported_pte_mask |= _PAGE_NX; 
diff -urN linux/arch/x86_64/kernel/smp.c linux/arch/x86_64/kernel/smp.c
--- linux/arch/x86_64/kernel/smp.c      2003/11/17 01:07:34     1.3.2.4
+++ linux/arch/x86_64/kernel/smp.c      2005/05/05 13:43:41     1.3.2.5
@@ -228,7 +228,8 @@
                BUG();
        clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
        /* flush TLB before it goes away. this stops speculative prefetches */
-       __flush_tlb(); 
+       *read_pda(level4_pgt) = __pa(init_mm.pgd) | _PAGE_TABLE;
+       __flush_tlb();
 }
 
 /*
diff -urN linux/arch/x86_64/mm/pageattr.c linux/arch/x86_64/mm/pageattr.c
--- linux/arch/x86_64/mm/pageattr.c     2004/04/16 03:14:13     1.1.2.3
+++ linux/arch/x86_64/mm/pageattr.c     2005/05/05 13:43:41     1.1.2.4
@@ -55,7 +55,7 @@
                        asm volatile("clflush (%0)" :: "r" (address + i)); 
        } else
                asm volatile("wbinvd":::"memory"); 
-       __flush_tlb_one(address);
+       __flush_tlb_all();
 }
 
 /* no more special protections in this 2MB area - revert to a
@@ -101,18 +101,15 @@
        kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
                if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
-                       pte_t old = *kpte;
-                       pte_t standard = mk_pte(page, PAGE_KERNEL); 
-
                        set_pte(kpte, mk_pte(page, prot)); 
-                       if (pte_same(old,standard))
-                               atomic_inc(&kpte_page->count);
                } else {
                        struct page *split = split_large_page(address, prot); 
                        if (!split)
                                return -ENOMEM;
                        set_pte(kpte,mk_pte(split, PAGE_KERNEL));
-               }       
+                       kpte_page = split;
+               }
+               atomic_inc(&kpte_page->count);  
        } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
                set_pte(kpte, mk_pte(page, PAGE_KERNEL));
                atomic_dec(&kpte_page->count); 
diff -urN linux/drivers/usb/serial/visor.c linux/drivers/usb/serial/visor.c
--- linux/drivers/usb/serial/visor.c    2004/08/14 18:38:57     1.24.2.13
+++ linux/drivers/usb/serial/visor.c    2005/05/05 13:43:41     1.24.2.14
@@ -206,6 +206,7 @@
        { USB_DEVICE(PALM_VENDOR_ID, PALM_M130_ID) },
        { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_T_ID) },
        { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID) },
+       { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE31_ID) },
        { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID) },
        { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID) },
        { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_S360_ID) },
@@ -243,6 +244,7 @@
        { USB_DEVICE(PALM_VENDOR_ID, PALM_M130_ID) },
        { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_T_ID) },
        { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID) },
+       { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE31_ID) },
        { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID) },
        { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_3_5_ID) },
        { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID) },
diff -urN linux/drivers/usb/serial/visor.h linux/drivers/usb/serial/visor.h
--- linux/drivers/usb/serial/visor.h    2004/04/16 03:14:18     1.6.2.8
+++ linux/drivers/usb/serial/visor.h    2005/05/05 13:43:41     1.6.2.9
@@ -31,6 +31,7 @@
 #define PALM_M130_ID                   0x0050
 #define PALM_TUNGSTEN_T_ID             0x0060
 #define PALM_TUNGSTEN_Z_ID             0x0031
+#define PALM_ZIRE31_ID                 0x0061
 #define PALM_ZIRE_ID                   0x0070
 #define PALM_M100_ID                   0x0080
 
diff -urN linux/include/asm-x86_64/hw_irq.h linux/include/asm-x86_64/hw_irq.h
--- linux/include/asm-x86_64/hw_irq.h   2003/07/05 03:23:47     1.2.2.3
+++ linux/include/asm-x86_64/hw_irq.h   2005/05/05 13:43:41     1.2.2.4
@@ -156,7 +156,7 @@
        atomic_inc((atomic_t *)&prof_buffer[eip]);
 }
 
-#ifdef CONFIG_SMP /*more of this file should probably be ifdefed SMP */
+#ifdef CONFIG_X86_IO_APIC
 static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {
        if (IO_APIC_IRQ(i))
                send_IPI_self(IO_APIC_VECTOR(i));
diff -urN linux/include/asm-x86_64/mmu_context.h 
linux/include/asm-x86_64/mmu_context.h
--- linux/include/asm-x86_64/mmu_context.h      2002/09/11 12:45:37     1.3.2.1
+++ linux/include/asm-x86_64/mmu_context.h      2005/05/05 13:43:41     1.3.2.2
@@ -60,9 +60,11 @@
                        out_of_line_bug();
                if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
                        /* We were in lazy tlb mode and leave_mm disabled 
-                        * tlb flush IPI delivery. We must flush our tlb.
+                        * tlb flush IPI delivery. We must reload the page 
+                        * table.
                         */
-                       local_flush_tlb();
+                       *read_pda(level4_pgt) = __pa(next->pgd) | _PAGE_TABLE;
+                       __flush_tlb();
                }
                if (!test_and_set_bit(cpu, &next->context.cpuvalid))
                        load_LDT(next);
diff -urN linux/include/asm-x86_64/unistd.h linux/include/asm-x86_64/unistd.h
--- linux/include/asm-x86_64/unistd.h   2004/11/19 00:28:51     1.4.2.6
+++ linux/include/asm-x86_64/unistd.h   2005/05/05 13:43:41     1.4.2.7
@@ -673,7 +673,7 @@
 }
 
 extern long sys_exit(int) __attribute__((noreturn));
-static inline void exit(int error_code)
+extern inline void exit(int error_code)
 {
        sys_exit(error_code);
 }
diff -urN linux/lib/rwsem-spinlock.c linux/lib/rwsem-spinlock.c
--- linux/lib/rwsem-spinlock.c  2004/11/29 17:47:18     1.1.2.3
+++ linux/lib/rwsem-spinlock.c  2005/05/05 13:43:42     1.1.2.4
@@ -127,12 +127,12 @@
 
        rwsemtrace(sem,"Entering __down_read");
 
-       spin_lock(&sem->wait_lock);
+       spin_lock_irq(&sem->wait_lock);
 
        if (sem->activity>=0 && list_empty(&sem->wait_list)) {
                /* granted */
                sem->activity++;
-               spin_unlock(&sem->wait_lock);
+               spin_unlock_irq(&sem->wait_lock);
                goto out;
        }
 
@@ -147,7 +147,7 @@
        list_add_tail(&waiter.list,&sem->wait_list);
 
        /* we don't need to touch the semaphore struct anymore */
-       spin_unlock(&sem->wait_lock);
+       spin_unlock_irq(&sem->wait_lock);
 
        /* wait to be given the lock */
        for (;;) {
@@ -169,9 +169,10 @@
 int fastcall __down_read_trylock(struct rw_semaphore *sem)
 {
        int ret = 0;
+       unsigned long flags;
        rwsemtrace(sem,"Entering __down_read_trylock");
 
-       spin_lock(&sem->wait_lock);
+       spin_lock_irqsave(&sem->wait_lock, flags);
 
        if (sem->activity>=0 && list_empty(&sem->wait_list)) {
                /* granted */
@@ -179,7 +180,7 @@
                ret = 1;
        }
 
-       spin_unlock(&sem->wait_lock);
+       spin_unlock_irqrestore(&sem->wait_lock, flags);
 
        rwsemtrace(sem,"Leaving __down_read_trylock");
        return ret;
@@ -196,12 +197,12 @@
 
        rwsemtrace(sem,"Entering __down_write");
 
-       spin_lock(&sem->wait_lock);
+       spin_lock_irq(&sem->wait_lock);
 
        if (sem->activity==0 && list_empty(&sem->wait_list)) {
                /* granted */
                sem->activity = -1;
-               spin_unlock(&sem->wait_lock);
+               spin_unlock_irq(&sem->wait_lock);
                goto out;
        }
 
@@ -216,7 +217,7 @@
        list_add_tail(&waiter.list,&sem->wait_list);
 
        /* we don't need to touch the semaphore struct anymore */
-       spin_unlock(&sem->wait_lock);
+       spin_unlock_irq(&sem->wait_lock);
 
        /* wait to be given the lock */
        for (;;) {
@@ -238,9 +239,10 @@
 int fastcall __down_write_trylock(struct rw_semaphore *sem)
 {
        int ret = 0;
+       unsigned long flags;
        rwsemtrace(sem,"Entering __down_write_trylock");
 
-       spin_lock(&sem->wait_lock);
+       spin_lock_irqsave(&sem->wait_lock, flags);
 
        if (sem->activity==0 && list_empty(&sem->wait_list)) {
                /* granted */
@@ -248,7 +250,7 @@
                ret = 1;
        }
 
-       spin_unlock(&sem->wait_lock);
+       spin_unlock_irqrestore(&sem->wait_lock, flags);
 
        rwsemtrace(sem,"Leaving __down_write_trylock");
        return ret;
@@ -259,14 +261,15 @@
  */
 void fastcall __up_read(struct rw_semaphore *sem)
 {
+       unsigned long flags;
        rwsemtrace(sem,"Entering __up_read");
 
-       spin_lock(&sem->wait_lock);
+       spin_lock_irqsave(&sem->wait_lock, flags);
 
        if (--sem->activity==0 && !list_empty(&sem->wait_list))
                sem = __rwsem_wake_one_writer(sem);
 
-       spin_unlock(&sem->wait_lock);
+       spin_unlock_irqrestore(&sem->wait_lock, flags);
 
        rwsemtrace(sem,"Leaving __up_read");
 }
@@ -276,15 +279,16 @@
  */
 void fastcall __up_write(struct rw_semaphore *sem)
 {
+       unsigned long flags;
        rwsemtrace(sem,"Entering __up_write");
 
-       spin_lock(&sem->wait_lock);
+       spin_lock_irqsave(&sem->wait_lock, flags);
 
        sem->activity = 0;
        if (!list_empty(&sem->wait_list))
                sem = __rwsem_do_wake(sem);
 
-       spin_unlock(&sem->wait_lock);
+       spin_unlock_irqrestore(&sem->wait_lock, flags);
 
        rwsemtrace(sem,"Leaving __up_write");
 }
diff -urN linux/lib/rwsem.c linux/lib/rwsem.c
--- linux/lib/rwsem.c   2004/11/19 00:28:52     1.2.2.2
+++ linux/lib/rwsem.c   2005/05/05 13:43:42     1.2.2.3
@@ -127,7 +127,7 @@
        set_task_state(tsk,TASK_UNINTERRUPTIBLE);
 
        /* set up my own style of waitqueue */
-       spin_lock(&sem->wait_lock);
+       spin_lock_irq(&sem->wait_lock);
        waiter->task = tsk;
        get_task_struct(tsk);
 
@@ -142,7 +142,7 @@
        if (!(count & RWSEM_ACTIVE_MASK))
                sem = __rwsem_do_wake(sem);
 
-       spin_unlock(&sem->wait_lock);
+       spin_unlock_irq(&sem->wait_lock);
 
        /* wait to be given the lock */
        for (;;) {
@@ -195,15 +195,16 @@
  */
 struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
 {
+       unsigned long flags;
        rwsemtrace(sem,"Entering rwsem_wake");
 
-       spin_lock(&sem->wait_lock);
+       spin_lock_irqsave(&sem->wait_lock, flags);
 
        /* do nothing if list empty */
        if (!list_empty(&sem->wait_list))
                sem = __rwsem_do_wake(sem);
 
-       spin_unlock(&sem->wait_lock);
+       spin_unlock_irqrestore(&sem->wait_lock, flags);
 
        rwsemtrace(sem,"Leaving rwsem_wake");
 
diff -urN linux/mm/filemap.c linux/mm/filemap.c
--- linux/mm/filemap.c  2005/04/05 19:09:58     1.74.2.18
+++ linux/mm/filemap.c  2005/05/05 13:43:42     1.74.2.19
@@ -2605,6 +2605,8 @@
                end = vma->vm_end;
        end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
+       error = -EIO;
+
        /* round to cluster boundaries if this isn't a "random" area. */
        if (!VM_RandomReadHint(vma)) {
                start = CLUSTER_OFFSET(start);
diff -urN linux/net/netlink/af_netlink.c linux/net/netlink/af_netlink.c
--- linux/net/netlink/af_netlink.c      2005/04/05 19:09:59     1.19.2.9
+++ linux/net/netlink/af_netlink.c      2005/05/05 13:43:42     1.19.2.10
@@ -980,9 +980,11 @@
        len = cb->dump(skb, cb);
 
        if (len > 0) {
+               sock_hold(sk);
                spin_unlock(&sk->protinfo.af_netlink->cb_lock);
                skb_queue_tail(&sk->receive_queue, skb);
                sk->data_ready(sk, len);
+               sock_put(sk);
                return 0;
        }
 

<Prev in Thread] Current Thread [Next in Thread>