From: Konrad Rzeszutek Wilk on
We allocate the requested page from wherever (we ignore the DMA32 flags)
and after allocation make a call for the kernel & hypervisor to replace
the memory at the virtual addresses with physical memory that is under
the 4GB mark.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
---
lib/swiotlb-xen.c | 55 ++++++++++++++++++++++------------------------------
1 files changed, 23 insertions(+), 32 deletions(-)

diff --git a/lib/swiotlb-xen.c b/lib/swiotlb-xen.c
index c177f32..89443e4 100644
--- a/lib/swiotlb-xen.c
+++ b/lib/swiotlb-xen.c
@@ -35,6 +35,7 @@

#include <linux/dma-mapping.h>
#include <xen/page.h>
+#include <xen/xen-ops.h>

/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
@@ -129,47 +130,37 @@ void *
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
- dma_addr_t dev_addr;
void *ret;
int order = get_order(size);
u64 dma_mask = DMA_BIT_MASK(32);
+ unsigned long vstart;

- if (hwdev && hwdev->coherent_dma_mask)
- dma_mask = hwdev->coherent_dma_mask;
+ /*
+ * Ignore region specifiers - the kernel's ideas of
+ * pseudo-phys memory layout has nothing to do with the
+ * machine physical layout. We can't allocate highmem
+ * because we can't return a pointer to it.
+ */
+ flags &= ~(__GFP_DMA | __GFP_HIGHMEM);

- ret = (void *)__get_free_pages(flags, order);
- if (ret && xen_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
- /*
- * The allocated memory isn't reachable by the device.
- */
- free_pages((unsigned long) ret, order);
- ret = NULL;
- }
- if (!ret) {
- /*
- * We are either out of memory or the device can't DMA to
- * GFP_DMA memory; fall back on map_single(), which
- * will grab memory from the lowest available address range.
- */
- ret = xen_map_single(hwdev, 0, size, DMA_FROM_DEVICE);
- if (!ret)
- return NULL;
- }
+ if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
+ return ret;

- memset(ret, 0, size);
- dev_addr = xen_virt_to_bus(hwdev, ret);
+ vstart = __get_free_pages(flags, order);
+ ret = (void *)vstart;

- /* Confirm address can be DMA'd by device */
- if (dev_addr + size - 1 > dma_mask) {
- printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
- (unsigned long long)dma_mask,
- (unsigned long long)dev_addr);
+ if (hwdev && hwdev->coherent_dma_mask)
+ dma_mask = hwdev->coherent_dma_mask;

- /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
- swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
- return NULL;
+ if (ret) {
+ if (xen_create_contiguous_region(vstart, order,
+ fls64(dma_mask)) != 0) {
+ free_pages(vstart, order);
+ return NULL;
+ }
+ memset(ret, 0, size);
+ *dma_handle = virt_to_machine(ret).maddr;
}
- *dma_handle = dev_addr;
return ret;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
--
1.7.0.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/