summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorRebecca Schultz Zavin <rebecca@android.com>2011-12-13 01:21:13 +0800
committerAndy Green <andy.green@linaro.org>2011-12-26 16:41:43 +0800
commit86f2ce18bcdd45fb668babe30b44f380f79a12cf (patch)
tree6680725f8ec7177c6b7862d5fa220b03823d88eb /drivers
parent27eb0666f65b828552fea92780ffe69d34dab13c (diff)
gpu: ion: Several fixes
Fix some cases where locks were not released on error paths Change heap->prio to heap->id to make meaning clearer Fix kernel doc to match sources
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/ion/ion.c55
-rw-r--r--drivers/gpu/ion/ion_carveout_heap.c7
-rw-r--r--drivers/gpu/ion/ion_heap.c4
-rw-r--r--drivers/gpu/ion/ion_priv.h34
-rw-r--r--drivers/gpu/ion/tegra/tegra_ion.c4
5 files changed, 66 insertions, 38 deletions
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 688582b7d40..108469b8478 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -82,14 +82,14 @@ struct ion_client {
};
/**
- * ion_handle - a client local reference to an buffer
+ * ion_handle - a client local reference to a buffer
* @ref: reference count
* @client: back pointer to the client the buffer resides in
* @buffer: pointer to the buffer
* @node: node in the client's handle rbtree
- * @map_cnt: count of times this client has mapped this buffer
- * @addr: return from map
- * @vaddr: return from map_kernel
+ * @kmap_cnt: count of times this client has mapped to kernel
+ * @dmap_cnt: count of times this client has mapped for dma
+ * @usermap_cnt: count of times this client has mapped for userspace
*
* Modifications to node, map_cnt or mapping should be protected by the
* lock in the client. Other fields are never changed after initialization.
@@ -121,7 +121,8 @@ static void ion_buffer_add(struct ion_device *dev,
else if (buffer > entry)
p = &(*p)->rb_right;
else
- WARN(1, "%s: buffer already found.", __func__);
+ pr_err("%s: buffer already found.", __func__);
+ BUG();
}
rb_link_node(&buffer->node, parent, p);
@@ -146,8 +147,10 @@ struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
kref_init(&buffer->ref);
ret = heap->ops->allocate(heap, buffer, len, align, flags);
- if (ret)
+ if (ret) {
+ kfree(buffer);
return ERR_PTR(ret);
+ }
buffer->dev = dev;
buffer->size = len;
mutex_init(&buffer->lock);
@@ -295,7 +298,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
if (!((1 << heap->type) & client->heap_mask))
continue;
/* if the caller didn't specify this heap type */
- if (!((1 << heap->prio) & flags))
+ if (!((1 << heap->id) & flags))
continue;
buffer = ion_buffer_create(heap, dev, len, align, flags);
if (!IS_ERR_OR_NULL(buffer))
@@ -374,6 +377,7 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle,
if (!ion_handle_validate(client, handle)) {
pr_err("%s: invalid handle passed to map_kernel.\n",
__func__);
+ mutex_unlock(&client->lock);
return -EINVAL;
}
@@ -382,6 +386,7 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle,
if (!buffer->heap->ops->phys) {
pr_err("%s: ion_phys is not implemented by this heap.\n",
__func__);
+ mutex_unlock(&client->lock);
return -ENODEV;
}
mutex_unlock(&client->lock);
@@ -398,6 +403,7 @@ void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
if (!ion_handle_validate(client, handle)) {
pr_err("%s: invalid handle passed to map_kernel.\n",
__func__);
+ mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
@@ -407,6 +413,8 @@ void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
if (!handle->buffer->heap->ops->map_kernel) {
pr_err("%s: map_kernel is not implemented by this heap.\n",
__func__);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
return ERR_PTR(-ENODEV);
}
@@ -433,6 +441,7 @@ struct scatterlist *ion_map_dma(struct ion_client *client,
if (!ion_handle_validate(client, handle)) {
pr_err("%s: invalid handle passed to map_dma.\n",
__func__);
+ mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
buffer = handle->buffer;
@@ -441,6 +450,8 @@ struct scatterlist *ion_map_dma(struct ion_client *client,
if (!handle->buffer->heap->ops->map_dma) {
pr_err("%s: map_kernel is not implemented by this heap.\n",
__func__);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
return ERR_PTR(-ENODEV);
}
if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
@@ -778,21 +789,6 @@ static void ion_vma_close(struct vm_area_struct *vma)
atomic_read(&buffer->ref.refcount));
}
-#if 0
-static int ion_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct ion_handle *handle = vma->vm_private_data;
- struct ion_buffer *buffer = vma->vm_file->private_data;
- struct ion_client *client;
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- /* this indicates the client is gone, nothing to do here */
- if (!handle)
- return;
- client = handle->client;
-}
-#endif
-
static struct vm_operations_struct ion_vm_ops = {
.open = ion_vma_open,
.close = ion_vma_close,
@@ -842,12 +838,12 @@ static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
mutex_lock(&buffer->lock);
/* now map it to userspace */
ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+ mutex_unlock(&buffer->lock);
if (ret) {
pr_err("%s: failure mapping buffer to userspace\n",
__func__);
goto err1;
}
- mutex_unlock(&buffer->lock);
vma->vm_ops = &ion_vm_ops;
/* move the handle into the vm_private_data so we can access it from
@@ -918,14 +914,16 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case ION_IOC_FREE:
{
struct ion_handle_data data;
+ bool valid;
if (copy_from_user(&data, (void __user *)arg,
sizeof(struct ion_handle_data)))
return -EFAULT;
mutex_lock(&client->lock);
- if (!ion_handle_validate(client, data.handle))
- return -EINVAL;
+ valid = ion_handle_validate(client, data.handle);
mutex_unlock(&client->lock);
+ if (!valid)
+ return -EINVAL;
ion_free(client, data.handle);
break;
}
@@ -940,6 +938,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (!ion_handle_validate(client, data.handle)) {
pr_err("%s: invalid handle passed to share ioctl.\n",
__func__);
+ mutex_unlock(&client->lock);
return -EINVAL;
}
data.fd = ion_ioctl_share(filp, client, data.handle);
@@ -1090,13 +1089,13 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
parent = *p;
entry = rb_entry(parent, struct ion_heap, node);
- if (heap->prio < entry->prio) {
+ if (heap->id < entry->id) {
p = &(*p)->rb_left;
- } else if (heap->prio > entry->prio) {
+ } else if (heap->id > entry->id ) {
p = &(*p)->rb_right;
} else {
pr_err("%s: can not insert multiple heaps with "
- "priority %d\n", __func__, heap->prio);
+ "id %d\n", __func__, heap->id);
goto end;
}
}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index eeb5c5354cd..245d81362d5 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -34,8 +34,8 @@ struct ion_carveout_heap {
};
ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
- unsigned long size,
- unsigned long align)
+ unsigned long size,
+ unsigned long align)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -53,6 +53,8 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
+ if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+ return;
gen_pool_free(carveout_heap->pool, addr, size);
}
@@ -130,6 +132,7 @@ static struct ion_heap_ops carveout_heap_ops = {
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_carveout_heap *carveout_heap;
+
carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
if (!carveout_heap)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index 7db9537e6f0..7c93577c9b3 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -38,12 +38,12 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
return ERR_PTR(-EINVAL);
}
if (IS_ERR_OR_NULL(heap))
- pr_err("%s: error creating heap %s type %d base %llu size %u\n",
+ pr_err("%s: error creating heap %s type %d base %lu size %u\n",
__func__, heap_data->name, heap_data->type,
heap_data->base, heap_data->size);
heap->name = heap_data->name;
- heap->prio = heap_data->prio;
+ heap->id = heap_data->id;
return heap;
}
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index fdbd2bef5c5..3323954c03a 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -45,6 +45,15 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
* @heap: back pointer to the heap the buffer came from
* @flags: buffer specific flags
* @size: size of the buffer
+ * @priv_virt: private data to the buffer representable as
+ * a void *
+ * @priv_phys: private data to the buffer representable as
+ * an ion_phys_addr_t (and someday a phys_addr_t)
+ * @lock: protects the buffers cnt fields
+ * @kmap_cnt: number of times the buffer is mapped to the kernel
+ * @vaddr: the kenrel mapping if kmap_cnt is not zero
+ * @dmap_cnt: number of times the buffer is mapped for dma
+ * @sglist: the scatterlist for the buffer is dmap_cnt is not zero
*/
struct ion_buffer {
struct kref ref;
@@ -71,7 +80,9 @@ struct ion_buffer {
* @phys get physical address of a buffer (only define on
* physically contiguous heaps)
* @map_dma map the memory for dma to a scatterlist
+ * @unmap_dma unmap the memory for dma
* @map_kernel map memory to the kernel
+ * @unmap_kernel unmap memory to the kernel
* @map_user map memory to userspace
*/
struct ion_heap_ops {
@@ -96,10 +107,10 @@ struct ion_heap_ops {
* @dev: back pointer to the ion_device
* @type: type of heap
* @ops: ops struct as above
- * @prio: priority (lower numbers first) of this heap when
+ * @id: id of heap, also indicates priority of this heap when
* allocating. These are specified by platform data and
* MUST be unique
- * @priv: private data used by the heap implementation
+ * @name: used for debugging
*
* Represents a pool of memory from which buffers can be made. In some
* systems the only heap is regular system memory allocated via vmalloc.
@@ -111,12 +122,13 @@ struct ion_heap {
struct ion_device *dev;
enum ion_heap_type type;
struct ion_heap_ops *ops;
- int prio;
+ int id;
const char *name;
};
/**
* ion_device_create - allocates and returns an ion device
+ * @custom_ioctl: arch specific ioctl function if applicable
*
* returns a valid device or -PTR_ERR
*/
@@ -138,7 +150,12 @@ void ion_device_destroy(struct ion_device *dev);
*/
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
-/* CREATE HEAPS */
+/**
+ * functions for creating and destroying the built in ion heaps.
+ * architectures can add their own custom architecture specific
+ * heaps as appropriate.
+ */
+
struct ion_heap *ion_heap_create(struct ion_platform_heap *);
void ion_heap_destroy(struct ion_heap *);
@@ -150,11 +167,18 @@ void ion_system_contig_heap_destroy(struct ion_heap *);
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
void ion_carveout_heap_destroy(struct ion_heap *);
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
unsigned long align);
void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size);
-
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
#define ION_CARVEOUT_ALLOCATE_FAIL -1
#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c
index 4239320c328..7af6e168ff4 100644
--- a/drivers/gpu/ion/tegra/tegra_ion.c
+++ b/drivers/gpu/ion/tegra/tegra_ion.c
@@ -36,8 +36,10 @@ int tegra_ion_probe(struct platform_device *pdev)
heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
idev = ion_device_create(NULL);
- if (IS_ERR_OR_NULL(idev))
+ if (IS_ERR_OR_NULL(idev)) {
+ kfree(heaps);
return PTR_ERR(idev);
+ }
/* create the heaps as specified in the board file */
for (i = 0; i < num_heaps; i++) {