/* * Allocator helper framework for constraints-aware dma-buf backing storage * allocation. * This allows constraint-sharing devices to deferred-allocate buffers shared * via dma-buf. * * Copyright(C) 2014 Linaro Limited. All rights reserved. * Author: Sumit Semwal * * Structure for management of clients, buffers etc heavily derived from * Android's ION framework. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see . */ #include #include #include #include #include #include #include #include #include #include #include "cenalloc.h" #include "cenalloc_priv.h" /* * Constraints-aware allocator framework helper is meant to facilitate * deferred allocation of backing storage for dma-buf buffers. * It works for devices that can share their constraints via dma_params. * These dma_params are then used by dma_buf_attach() to create a mask of * common constraints. The cenalloc constraint helpers then allocate * for the preferred allocator according to the constraint mask. * Allocators and their corresponding constraint masks are pre-populated * for a given system - likely at the time of platform initialization. */ /** * struct cenalloc_device - the metadata of the cenalloc device node * @dev: the actual misc device * @buffers: an rb tree of all the existing buffers * @buffer_lock: lock protecting the tree of buffers & handles * @lock: rwsem protecting the tree of allocators * @clients: list of all the clients created * @allocators: list of all the allocators in the system */ struct cenalloc_device { struct miscdevice dev; struct rb_root buffers; struct mutex buffer_lock; struct rw_semaphore lock; struct plist_head allocators; }; /* this function should only be called while dev->buffer_lock is held */ static void cenalloc_buffer_add(struct cenalloc_device *dev, struct cenalloc_buffer *buffer) { struct rb_node **p = &dev->buffers.rb_node; struct rb_node *parent = NULL; struct cenalloc_buffer *entry; while (*p) { parent = *p; entry = rb_entry(parent, struct cenalloc_buffer, node); if (buffer < entry) { p = &(*p)->rb_left; } else if (buffer > entry) { p = &(*p)->rb_right; } else { pr_err("%s: buffer already found.", __func__); BUG(); } } rb_link_node(&buffer->node, parent, p); rb_insert_color(&buffer->node, &dev->buffers); } static struct dma_buf_ops ca_dma_buf_ops; static bool is_cenalloc_buffer(struct dma_buf *dmabuf); /* * cenalloc_buffer_create creates a buffer, exports the dma-buf handle and * associates a dma-buf handle with it. * Returns: * on success, pointer to the associated dma_buf; * error if dma-buf cannot be exported or if it is out of memory. */ struct dma_buf *cenalloc_buffer_create(struct cenalloc_device *dev, unsigned long len, unsigned long align, unsigned long flags) { struct cenalloc_buffer *buffer; struct dma_buf *dmabuf; buffer = kzalloc(sizeof(struct cenalloc_buffer), GFP_KERNEL); if (!buffer) return ERR_PTR(-ENOMEM); buffer->flags = flags; kref_init(&buffer->ref); buffer->dev = dev; buffer->size = len; buffer->align = align; dmabuf = dma_buf_export(buffer, &ca_dma_buf_ops, buffer->size, O_RDWR, NULL); if (IS_ERR(dmabuf)) goto err; buffer->dmabuf = dmabuf; dmabuf->priv = buffer; mutex_init(&buffer->lock); mutex_lock(&dev->buffer_lock); cenalloc_buffer_add(dev, buffer); mutex_unlock(&dev->buffer_lock); return dmabuf; err: kfree(buffer); return ERR_CAST(dmabuf); } EXPORT_SYMBOL_GPL(cenalloc_buffer_create); static void cenalloc_buffer_destroy(struct cenalloc_buffer *buffer) { if (WARN_ON(buffer->kmap_cnt > 0)) buffer->allocator->ops->unmap_kernel(buffer->allocator, buffer); buffer->allocator->ops->unmap_dma(buffer->allocator, buffer); buffer->allocator->ops->free(buffer); kfree(buffer); } static void _cenalloc_buffer_destroy(struct cenalloc_buffer *buffer) { struct cenalloc_device *dev = buffer->dev; mutex_lock(&dev->buffer_lock); rb_erase(&buffer->node, &dev->buffers); mutex_unlock(&dev->buffer_lock); cenalloc_buffer_destroy(buffer); } void cenalloc_buffer_free(struct dma_buf *dmabuf) { if (is_cenalloc_buffer(dmabuf)) dma_buf_put(dmabuf); } EXPORT_SYMBOL_GPL(cenalloc_buffer_free); int cenalloc_phys(struct dma_buf *dmabuf, phys_addr_t *addr, size_t *len) { struct cenalloc_buffer *buffer; int ret; if (is_cenalloc_buffer(dmabuf)) buffer = (struct cenalloc_buffer *)dmabuf->priv; else return -EINVAL; if (!buffer->allocator->ops->phys) { pr_err("%s: cenalloc_phys is not implemented by this allocator.\n", __func__); return -ENODEV; } mutex_lock(&buffer->lock); ret = buffer->allocator->ops->phys(buffer->allocator, buffer, addr, len); mutex_lock(&buffer->lock); return ret; } EXPORT_SYMBOL_GPL(cenalloc_phys); static void *cenalloc_buffer_kmap_get(struct cenalloc_buffer *buffer) { void *vaddr; if (buffer->kmap_cnt) { buffer->kmap_cnt++; return buffer->vaddr; } vaddr = buffer->allocator->ops->map_kernel(buffer->allocator, buffer); if (WARN_ONCE(vaddr == NULL, "allocator->ops->map_kernel should return ERR_PTR on error")) return ERR_PTR(-EINVAL); if (IS_ERR(vaddr)) return vaddr; buffer->vaddr = vaddr; buffer->kmap_cnt++; return vaddr; } static void cenalloc_buffer_kmap_put(struct cenalloc_buffer *buffer) { buffer->kmap_cnt--; if (!buffer->kmap_cnt) { buffer->allocator->ops->unmap_kernel(buffer->allocator, buffer); buffer->vaddr = NULL; } } struct sg_table *cenalloc_sg_table(struct dma_buf *dmabuf) { struct sg_table *table; struct cenalloc_buffer *buffer; if (is_cenalloc_buffer(dmabuf)) buffer = (struct cenalloc_buffer *)dmabuf->priv; else { pr_err("%s: invalid buffer passed to sg_table.\n", __func__); return ERR_PTR(-EINVAL); } mutex_lock(&buffer->lock); table = buffer->sg_table; mutex_unlock(&buffer->lock); return table; } EXPORT_SYMBOL_GPL(cenalloc_sg_table); /* * dma_buf ops implementation */ static void cenalloc_buffer_sync_for_device(struct cenalloc_buffer *buffer, struct device *dev, enum dma_data_direction direction); /* * This will find the right allocator for the buffer passed; * assumption is, that all the interested importers have called dma_buf_attach() * with their right constraint masks before the first call to * dma_buf_map_attachment(). * At present, using the same priority based mechanism as ION. */ static int cenalloc_find_allocator(struct cenalloc_device *dev, struct cenalloc_buffer *buf) { struct cenalloc_allocator *allocator; unsigned long constraints_mask = buf->dmabuf->access_constraints_mask; size_t len = buf->size; /* * traverse the list of allocators available in this system in priority * order. If the allocator type is supported by the client, and matches * the request of the caller allocate from it. Repeat until allocate * has succeeded or all allocators have been tried. */ len = PAGE_ALIGN(len); if (!len) return -EINVAL; plist_for_each_entry(allocator, &dev->allocators, node) { /* if the caller didn't specify this allocator id */ if (!((1 << allocator->id) & constraints_mask)) continue; buf->allocator = allocator; } if (buf->allocator == NULL) return -ENODEV; return 0; } static struct sg_table *cenalloc_buffer_first_alloc( struct cenalloc_buffer *buffer) { struct cenalloc_allocator *allocator = buffer->allocator; struct sg_table *table; int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; struct scatterlist *sg; int ret, i, j, k = 0; mutex_lock(&buffer->lock); ret = cenalloc_find_allocator(buffer->dev, buffer); if (ret) { mutex_unlock(&buffer->lock); return ERR_PTR(-ENODEV); } ret = allocator->ops->allocate(allocator, buffer, buffer->size, buffer->align, buffer->flags); if (ret) goto cannot_allocate; table = allocator->ops->map_dma(allocator, buffer); if (WARN_ONCE(table == NULL, "allocator->ops->map_dma should return ERR_PTR on error")) table = ERR_PTR(-EINVAL); if (IS_ERR(table)) { /* TODO: find the right way to handle an error here? */ allocator->ops->free(buffer); mutex_unlock(&buffer->lock); return ERR_CAST(table); } buffer->pages = vmalloc(sizeof(struct page *) * num_pages); if (!buffer->pages) { ret = -ENOMEM; goto cannot_allocate; } for_each_sg(table->sgl, sg, table->nents, i) { struct page *page = sg_page(sg); for (j = 0; j < sg->length / PAGE_SIZE; j++) buffer->pages[k++] = page++; } mutex_unlock(&buffer->lock); return table; cannot_allocate: mutex_unlock(&buffer->lock); return ERR_PTR(ret); } static void cenalloc_buffer_sync_for_device(struct cenalloc_buffer *buffer, struct device *dev, enum dma_data_direction dir) { if (!buffer->allocator->ops->sync_for_device) { pr_err("%s: this allocator does not define a method for sync-to-device\n", __func__); return; } buffer->allocator->ops->sync_for_device(buffer->allocator, buffer, dev, dir); } /* * cenalloc_map_dma_buf() models delayed allocation; so if the buffer is not * backed up by storage, the allocation shall happen here for the first time, * based on the constraint_mask of the dma_buf, which is set based on devices * currently attached to the dma_buf. * * IMP: Assumption is that all participating devices call dma_buf_attach() * before the first dma_buf_map_attachment() is called. * * Migration is not supported at this time. */ static struct sg_table *cenalloc_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction direction) { struct dma_buf *dmabuf = attach->dmabuf; struct cenalloc_buffer *buffer = dmabuf->priv; struct sg_table *table = NULL; if (!buffer->sg_table) { down_read(&(buffer->dev->lock)); table = cenalloc_buffer_first_alloc(buffer); up_read(&(buffer->dev->lock)); if (IS_ERR(table)) return table; } mutex_lock(&buffer->lock); buffer->sg_table = table; cenalloc_buffer_sync_for_device(buffer, attach->dev, direction); mutex_unlock(&buffer->lock); return buffer->sg_table; } static void cenalloc_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction) { struct dma_buf *dmabuf = attachment->dmabuf; struct cenalloc_buffer *buffer = dmabuf->priv; buffer->allocator->ops->unmap_dma(buffer->allocator, buffer); } static int cenalloc_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { struct cenalloc_buffer *buffer = dmabuf->priv; int ret = 0; if (!buffer->sg_table) { pr_err( "%s: buffer needs to be mapped first before userspace mapping\n", __func__); return -EINVAL; } if (!buffer->allocator->ops->map_user) { pr_err( "%s: allocator does not define a method for userspace mapping\n", __func__); return -EINVAL; } /* * NOTE: For now, assume that the allocators will take care of any VMA * management for the buffer - includes providing vma_ops, and / or * managing mmap faults. * struct cenalloc_buffer will still provide struct **pages, * and also the vma area list for the buffer. * This also allows for device-specific vma operations. * */ mutex_lock(&buffer->lock); /* now map it to userspace */ ret = buffer->allocator->ops->map_user(buffer->allocator, buffer, vma); mutex_unlock(&buffer->lock); if (ret) pr_err("%s: failure mapping buffer to userspace\n", __func__); return ret; } static void cenalloc_dma_buf_release(struct dma_buf *dmabuf) { struct cenalloc_buffer *buffer = dmabuf->priv; _cenalloc_buffer_destroy(buffer); } static void *cenalloc_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) { struct cenalloc_buffer *buffer = dmabuf->priv; return buffer->vaddr + offset * PAGE_SIZE; } static void cenalloc_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, void *ptr) { /* TODO */ } static int cenalloc_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, enum dma_data_direction dir) { struct cenalloc_buffer *buffer = dmabuf->priv; void *vaddr; if (!buffer->allocator->ops->map_kernel) { pr_err("%s: map kernel is not implemented by this allocator.\n", __func__); return -ENODEV; } mutex_lock(&buffer->lock); vaddr = cenalloc_buffer_kmap_get(buffer); mutex_unlock(&buffer->lock); return PTR_ERR_OR_ZERO(vaddr); } static void cenalloc_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, enum dma_data_direction dir) { struct cenalloc_buffer *buffer = dmabuf->priv; mutex_lock(&buffer->lock); cenalloc_buffer_kmap_put(buffer); mutex_unlock(&buffer->lock); } static struct dma_buf_ops ca_dma_buf_ops = { .map_dma_buf = cenalloc_map_dma_buf, .unmap_dma_buf = cenalloc_unmap_dma_buf, .mmap = cenalloc_mmap, .release = cenalloc_dma_buf_release, .begin_cpu_access = cenalloc_dma_buf_begin_cpu_access, .end_cpu_access = cenalloc_dma_buf_end_cpu_access, .kmap = cenalloc_dma_buf_kmap, .kunmap = cenalloc_dma_buf_kunmap, }; static bool is_cenalloc_buffer(struct dma_buf *dmabuf) { return(dmabuf->ops == &ca_dma_buf_ops); } /** * cenalloc_device_add_allocator: * Adds an allocator to the cenalloc_device; This should be called at * platform initialization time, for all allocators for the platform. */ void cenalloc_device_add_allocator(struct cenalloc_device *dev, struct cenalloc_allocator *allocator) { if (!allocator->ops->allocate || !allocator->ops->free || !allocator->ops->map_dma || !allocator->ops->unmap_dma) pr_err("%s: can not add allocator with invalid ops struct.\n", __func__); allocator->dev = dev; down_write(&dev->lock); /* use negative allocator->id to reverse the priority -- when traversing the list later attempt higher id numbers first */ plist_node_init(&allocator->node, -allocator->id); plist_add(&allocator->node, &dev->allocators); up_write(&dev->lock); } EXPORT_SYMBOL_GPL(cenalloc_device_add_allocator); /* * Device Init and Remove */ static struct cenalloc_device cenalloc_dev = { .dev.minor = MISC_DYNAMIC_MINOR, .dev.name = "cenalloc", .dev.parent = NULL, }; /* * TODO: this mechanism of getting a cenalloc device isn't the best, * Need to have a better way of getting handle to device. */ struct cenalloc_device *cenalloc_get_device(void) { return &cenalloc_dev; } EXPORT_SYMBOL_GPL(cenalloc_get_device); static int __init cenalloc_device_init(void) { int ret; ret = misc_register(&cenalloc_dev.dev); if (ret) { pr_err("cenalloc: failed to register misc device.\n"); return ret; } cenalloc_dev.buffers = RB_ROOT; mutex_init(&cenalloc_dev.buffer_lock); init_rwsem(&cenalloc_dev.lock); plist_head_init(&cenalloc_dev.allocators); return ret; } static void __exit cenalloc_device_remove(void) { misc_deregister(&cenalloc_dev.dev); /* XXX need to free the allocators? */ } module_init(cenalloc_device_init); module_exit(cenalloc_device_remove); MODULE_AUTHOR("Sumit Semwal "); MODULE_DESCRIPTION("Constraint Aware Central Allocation Helper"); MODULE_LICENSE("GPL");