aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJon Medhurst <tixy@linaro.org>2015-08-04 16:13:39 +0100
committerJon Medhurst <tixy@linaro.org>2015-08-04 16:13:39 +0100
commitb9b686593e37b53957c5ff7baef3922ee39abc4d (patch)
treec3f2be13f5a8554b7d0d5ba15564784b211d653b /drivers
parentb4fab389d412397db9d8a3432a9348d4dd88c637 (diff)
parent30cfabe08a19657d75e7882c4ae94d60e8f3fe22 (diff)
Merge branch 'lsk-3.10-armlt-android-fixes' into integration-lsk-3.10-juno-android
Diffstat (limited to 'drivers')
-rw-r--r--drivers/staging/android/ion/ion.c4
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c21
2 files changed, 4 insertions, 21 deletions
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 698e363be551..bfee3e08071e 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -251,8 +251,10 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
allocation via dma_map_sg. The implicit contract here is that
memory comming from the heaps is ready for dma, ie if it has a
cached mapping that mapping has been invalidated */
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
sg_dma_address(sg) = sg_phys(sg);
+ sg_dma_len(sg) = sg->length;
+ }
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 4418bda76474..978bc9ef99a3 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -39,24 +39,6 @@ struct ion_cma_buffer_info {
struct sg_table *table;
};
-/*
- * Create scatter-list for the already allocated DMA buffer.
- * This function could be replaced by dma_common_get_sgtable
- * as soon as it will avalaible.
- */
-static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size)
-{
- struct page *page = virt_to_page(cpu_addr);
- int ret;
-
- ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
- if (unlikely(ret))
- return ret;
-
- sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
- return 0;
-}
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
@@ -95,8 +77,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
goto free_mem;
}
- if (ion_cma_get_sgtable
- (dev, info->table, info->cpu_addr, info->handle, len))
+ if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle, len))
goto free_table;
/* keep this for memory release */
buffer->priv_virt = info;