From 1e74f3000b86969de421ca0da08f42e7d21cbd99 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 17 Nov 2008 16:24:34 +0900 Subject: swiotlb: use coherent_dma_mask in alloc_coherent Impact: fix DMA buffer allocation coherency bug in certain configs This patch fixes swiotlb to use dev->coherent_dma_mask in swiotlb_alloc_coherent(). coherent_dma_mask is a subset of dma_mask (equal to it most of the time), enumerating the address range that a given device is able to DMA to/from in a cache-coherent way. But currently, swiotlb uses dev->dma_mask in alloc_coherent() implicitly via address_needs_mapping(), but alloc_coherent is really supposed to use coherent_dma_mask. This bug could break drivers that uses smaller coherent_dma_mask than dma_mask (though the current code works for the majority that use the same mask for coherent_dma_mask and dma_mask). Signed-off-by: FUJITA Tomonori Cc: tony.luck@intel.com Signed-off-by: Ingo Molnar --- lib/swiotlb.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 78330c37a61..5f6c629a924 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -467,9 +467,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t dev_addr; void *ret; int order = get_order(size); + u64 dma_mask = DMA_32BIT_MASK; + + if (hwdev && hwdev->coherent_dma_mask) + dma_mask = hwdev->coherent_dma_mask; ret = (void *)__get_free_pages(flags, order); - if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { + if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) { /* * The allocated memory isn't reachable by the device. * Fall back on swiotlb_map_single(). @@ -493,9 +497,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dev_addr = virt_to_bus(ret); /* Confirm address can be DMA'd by device */ - if (address_needs_mapping(hwdev, dev_addr, size)) { + if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", - (unsigned long long)*hwdev->dma_mask, + (unsigned long long)dma_mask, (unsigned long long)dev_addr); /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ -- cgit v1.2.3 From f652c521e0bec2e70cf123f47e80117a7e6ed139 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Wed, 19 Nov 2008 15:36:19 -0800 Subject: lib/scatterlist.c: fix kunmap() argument in sg_miter_stop() kunmap() takes as argument the struct page that orginally got kmap()'d, however the sg_miter_stop() function passed it the kernel virtual address instead, resulting in weird stuff. Somehow I ended up fixing this bug by accident while looking for a bug in the same area. Reported-by: kerneloops.org Acked-by: Tejun Heo Signed-off-by: Arjan van de Ven Cc: Hugh Dickins Cc: [2.6.27.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/scatterlist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 8d2688ff135..b7b449dafbe 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -395,7 +395,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) WARN_ON(!irqs_disabled()); kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); } else - kunmap(miter->addr); + kunmap(miter->page); miter->page = NULL; miter->addr = NULL; -- cgit v1.2.3 From 6ff2d39b91aec3dcae951afa982059e3dd9b49dc Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Mon, 1 Dec 2008 13:14:02 -0800 Subject: lib/idr.c: fix rcu related race with idr_find 2nd part of the fixes needed for http://bugzilla.kernel.org/show_bug.cgi?id=11796. When the idr tree is either grown or shrunk, then the update to the number of layers and the top pointer were not atomic. This race caused crashes. The attached patch fixes that by replicating the layers counter in each layer, thus idr_find doesn't need idp->layers anymore. Signed-off-by: Manfred Spraul Cc: Clement Calmels Cc: Nadia Derbey Cc: Pierre Peiffer Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/idr.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/idr.c b/lib/idr.c index e728c7fccc4..7a785a0c2ea 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -185,6 +185,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) new = get_from_free_list(idp); if (!new) return -1; + new->layer = l-1; rcu_assign_pointer(p->ary[m], new); p->count++; } @@ -210,6 +211,7 @@ build_up: if (unlikely(!p)) { if (!(p = get_from_free_list(idp))) return -1; + p->layer = 0; layers = 1; } /* @@ -237,6 +239,7 @@ build_up: } new->ary[0] = p; new->count = 1; + new->layer = layers-1; if (p->bitmap == IDR_FULL) __set_bit(0, &new->bitmap); p = new; @@ -493,17 +496,21 @@ void *idr_find(struct idr *idp, int id) int n; struct idr_layer *p; - n = idp->layers * IDR_BITS; p = rcu_dereference(idp->top); + if (!p) + return NULL; + n = (p->layer+1) * IDR_BITS; /* Mask off upper bits we don't use for the search. */ id &= MAX_ID_MASK; if (id >= (1 << n)) return NULL; + BUG_ON(n == 0); while (n > 0 && p) { n -= IDR_BITS; + BUG_ON(n != p->layer*IDR_BITS); p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); } return((void *)p); @@ -582,8 +589,11 @@ void *idr_replace(struct idr *idp, void *ptr, int id) int n; struct idr_layer *p, *old_p; - n = idp->layers * IDR_BITS; p = idp->top; + if (!p) + return ERR_PTR(-EINVAL); + + n = (p->layer+1) * IDR_BITS; id &= MAX_ID_MASK; -- cgit v1.2.3