linux-mips
[Top] [All Lists]

Re: is remap_pfn_range should align to 2(n) * (page size) ?

To: "Ralf Baechle" <ralf@linux-mips.org>
Subject: Re: is remap_pfn_range should align to 2(n) * (page size) ?
From: zhuzhenhua <zzh.hust@gmail.com>
Date: Tue, 13 May 2008 19:44:06 +0800
Cc: linux-mips <linux-mips@linux-mips.org>
Dkim-signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=gamma; h=domainkey-signature:received:received:message-id:date:from:to:subject:cc:in-reply-to:mime-version:content-type:references; bh=dn2BE7P+H+N4GeUsJqM2MsHuEmIVhBC5qPsXYyAjcxQ=; b=aNe6uNC04CCYwVvIX+ukd+x1AjH742A0h9Ec7cwU0NGOE6lwOOIiTGIqLGCKfcQL/tFuhKiNRMh5iEyxiuvMK4tFbirzgSCYYK3V0zlWbti6bfeei4yHAA/mOQmYhEd6vzWI8zfj8XJirYTpjrt5VfcYVSt8M5anwNLQlPhtoeo=
Domainkey-signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=message-id:date:from:to:subject:cc:in-reply-to:mime-version:content-type:references; b=glIMajaW4mFdyasP1yxs1Vtyjur8A+kZUQwYy/Yyy4IeI2rcPg4YJneDQJcXl23EPX+a9HFqYIaGi8DuFxaiDhYf9CB86q4HoX3RcNXaJPj6eiXyahLC3qI2ltSXPXp+Z7YAkKLKTSIiclkwazZyxQBjisqFOHjD8NIyQjg1Onk=
In-reply-to: <20080512112233.GA8843@linux-mips.org>
Original-recipient: rfc822;linux-mips@linux-mips.org
References: <50c9a2250805082354x1edc1ecar89dcc3378b3bbe75@mail.gmail.com> <20080509095605.GB14450@linux-mips.org> <50c9a2250805111918r16913139obfc2982220636b3@mail.gmail.com> <20080512112233.GA8843@linux-mips.org>
Sender: linux-mips-bounce@linux-mips.org


On 5/12/08, Ralf Baechle <ralf@linux-mips.org> wrote:
On Mon, May 12, 2008 at 10:18:27AM +0800, zhuzhenhua wrote:

> > This has nothing to do with remap_pfn_range but with the power of two
> > sized buckets used by the global free page pool.  Any allocation with
> > get_free_pages will be rounded up to the next power of two.  If that's a
> > real concern for you you could allocate a 4MB page then split the page
> > into a 2MB and two 1MB pages and free the 1MB page again.


> thanks for your reply , i see in get_frree_pages and free_pages there is a
> get_order(size).
> but i don't understand  " allocate a 4MB page then split the page
> into a 2MB and two 1MB pages and free the 1MB page again."
> is there any function to split it?


No, you'd have to code that yourself.  Take a look at split_page() which
splits an order n page into order 0 pages.  You'd want something similar
but splitting for some non-zero order.


  Ralf

thanks for your advice, i found in newest kernel version, in some arch , the dma_alloc_coherent will call split_page.
because my kernel version is 2.6.14, so i first patch a split_page patch as follow:
http://www.kernel.org/pub/linux/kernel/people/npiggin/patches/lockless/2.6.16-rc5/broken-out/mm-split-highorder.patch

but it seemes that there is still no split_page in dma_alloc_coherent/dma_alloc_noncoherent
so i copy from other arch code to arch/mips/mm/dma-noncoherent.c (attach at the end of mail)
and now my driver just use dma_alloc_coherent malloc 3M directly, and it seemes ok.
i just wonder why mips arch dma_alloc_coherent/dma_alloc_nocoherent do not call split_page while other arch calling.

thanks for any hints.

Best Regards

zzh






modify on dma-noncoherent.c
--- dma-noncoherent.c    2008-05-13 19:31:58.131375500 +0800
+++ dma-noncoherent-split.c    2008-05-13 19:31:51.039745500 +0800
@@ -16,27 +16,59 @@
 
 #include <asm/cache.h>
 #include <asm/io.h>
+
+static struct page *__dma_alloc(struct device *dev, size_t size,
+                dma_addr_t *handle, gfp_t gfp)
+{
+    struct page *page, *free, *end;
+    int order;
+
+    size = PAGE_ALIGN(size);
+    order = get_order(size);
+    page = alloc_pages(gfp, order);
+    if (!page)
+        return NULL;
+    split_page(page, order);
+
+    *handle = page_to_phys(page);
+    free = page + (size >> PAGE_SHIFT);
+    end = page + (1 << order);
 
-/*
- * Warning on the terminology - Linux calls an uncached area coherent;
- * MIPS terminology calls memory areas with hardware maintained coherency
- * coherent.
- */
+    /*
+     * Free any unused pages
+     */
+    while (free < end) {
+        __free_page(free);
+        free++;
+    }
+
+    return page;
+}
+
+static void __dma_free(struct device *dev, size_t size,
+               struct page *page, dma_addr_t handle)
+{
+    struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
 
+    while (page < end)
+        __free_page(page++);
+}                          
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
     dma_addr_t * dma_handle, unsigned int __nocast gfp)
 {
+    struct page *page;     
     void *ret;
     /* ignore region specifiers */
     gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
 
     if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
         gfp |= GFP_DMA;
-    ret = (void *) __get_free_pages(gfp, get_order(size));
-
+    page = __dma_alloc(dev, size, dma_handle, gfp);
+    if (page)
+        ret = KSEG1ADDR(page_to_phys(page));
+    printk("ret in dma_alloc_noncoherent = 0x%x\n",ret);
     if (ret != NULL) {
         memset(ret, 0, size);
-        *dma_handle = virt_to_phys(ret);
     }
 
     return ret;
@@ -63,19 +95,17 @@
 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
     dma_addr_t dma_handle)
 {
-    free_pages((unsigned long) vaddr, get_order(size));
+    void *addr = KSEG0ADDR(vaddr);
+    struct page *page;
+    BUG_ON(!virt_addr_valid(addr));
+    page = virt_to_page(addr);
+    __dma_free(dev, size, page, dma_handle); 
 }
 
 EXPORT_SYMBOL(dma_free_noncoherent);
 
 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-    dma_addr_t dma_handle)
-{
-    unsigned long addr = (unsigned long) vaddr;
-
-    addr = CAC_ADDR(addr);
-    free_pages(addr, get_order(size));
-}
+    dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent")));
 
 EXPORT_SYMBOL(dma_free_coherent);
 

 


<Prev in Thread] Current Thread [Next in Thread>