提交 29a9b000 编写于 作者: L Linus Torvalds

Merge tag 'staging-4.15-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging

Pull staging fixes from Greg KH:
 "Here are three staging driver fixes for 4.15-rc6

  The first resolves a bug in the lustre driver that came about due to a
  broken cleanup patch, due to crazy list usage in that codebase.

  The remaining two are ion driver fixes, finally getting the CMA
  interaction to work properly, resolving two regressions in that area
  of the code.

  All have been in linux-next with no reported issues for a while"

* tag 'staging-4.15-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging:
  staging: android: ion: Fix dma direction for dma_sync_sg_for_cpu/device
  staging: ion: Fix ion_cma_heap allocations
  staging: lustre: lnet: Fix recent breakage from list_for_each conversion
...@@ -37,7 +37,7 @@ config ION_CHUNK_HEAP ...@@ -37,7 +37,7 @@ config ION_CHUNK_HEAP
config ION_CMA_HEAP config ION_CMA_HEAP
bool "Ion CMA heap support" bool "Ion CMA heap support"
depends on ION && CMA depends on ION && DMA_CMA
help help
Choose this option to enable CMA heaps with Ion. This heap is backed Choose this option to enable CMA heaps with Ion. This heap is backed
by the Contiguous Memory Allocator (CMA). If your system has these by the Contiguous Memory Allocator (CMA). If your system has these
......
...@@ -346,7 +346,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, ...@@ -346,7 +346,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
mutex_lock(&buffer->lock); mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) { list_for_each_entry(a, &buffer->attachments, list) {
dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents, dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
DMA_BIDIRECTIONAL); direction);
} }
mutex_unlock(&buffer->lock); mutex_unlock(&buffer->lock);
...@@ -368,7 +368,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, ...@@ -368,7 +368,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
mutex_lock(&buffer->lock); mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) { list_for_each_entry(a, &buffer->attachments, list) {
dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents, dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
DMA_BIDIRECTIONAL); direction);
} }
mutex_unlock(&buffer->lock); mutex_unlock(&buffer->lock);
......
...@@ -39,9 +39,15 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, ...@@ -39,9 +39,15 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
struct ion_cma_heap *cma_heap = to_cma_heap(heap); struct ion_cma_heap *cma_heap = to_cma_heap(heap);
struct sg_table *table; struct sg_table *table;
struct page *pages; struct page *pages;
unsigned long size = PAGE_ALIGN(len);
unsigned long nr_pages = size >> PAGE_SHIFT;
unsigned long align = get_order(size);
int ret; int ret;
pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL); if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT;
pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
if (!pages) if (!pages)
return -ENOMEM; return -ENOMEM;
...@@ -53,7 +59,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, ...@@ -53,7 +59,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
if (ret) if (ret)
goto free_mem; goto free_mem;
sg_set_page(table->sgl, pages, len, 0); sg_set_page(table->sgl, pages, size, 0);
buffer->priv_virt = pages; buffer->priv_virt = pages;
buffer->sg_table = table; buffer->sg_table = table;
...@@ -62,7 +68,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, ...@@ -62,7 +68,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
free_mem: free_mem:
kfree(table); kfree(table);
err: err:
cma_release(cma_heap->cma, pages, buffer->size); cma_release(cma_heap->cma, pages, nr_pages);
return -ENOMEM; return -ENOMEM;
} }
...@@ -70,9 +76,10 @@ static void ion_cma_free(struct ion_buffer *buffer) ...@@ -70,9 +76,10 @@ static void ion_cma_free(struct ion_buffer *buffer)
{ {
struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
struct page *pages = buffer->priv_virt; struct page *pages = buffer->priv_virt;
unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
/* release memory */ /* release memory */
cma_release(cma_heap->cma, pages, buffer->size); cma_release(cma_heap->cma, pages, nr_pages);
/* release sg table */ /* release sg table */
sg_free_table(buffer->sg_table); sg_free_table(buffer->sg_table);
kfree(buffer->sg_table); kfree(buffer->sg_table);
......
...@@ -487,21 +487,18 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr, ...@@ -487,21 +487,18 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
ksocknal_nid2peerlist(id.nid)); ksocknal_nid2peerlist(id.nid));
} }
route2 = NULL;
list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) { list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) {
if (route2->ksnr_ipaddr == ipaddr) if (route2->ksnr_ipaddr == ipaddr) {
break; /* Route already exists, use the old one */
ksocknal_route_decref(route);
route2 = NULL; route2->ksnr_share_count++;
} goto out;
if (!route2) { }
ksocknal_add_route_locked(peer, route);
route->ksnr_share_count++;
} else {
ksocknal_route_decref(route);
route2->ksnr_share_count++;
} }
/* Route doesn't already exist, add the new one */
ksocknal_add_route_locked(peer, route);
route->ksnr_share_count++;
out:
write_unlock_bh(&ksocknal_data.ksnd_global_lock); write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册