// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.*/ #include #include #include #include #include #define MHI_DEVICE_NAME "mhi" #define MHI_UCI_DRIVER_NAME "mhi_uci" #define MHI_MAX_UCI_MINORS 128 static DEFINE_IDR(uci_idr); static DEFINE_MUTEX(uci_drv_lock); static struct class *uci_dev_class; static int uci_dev_major; struct uci_buf { struct list_head node; void *data; size_t len; size_t consumed; }; struct uci_dev { unsigned int minor; size_t mtu; struct mhi_device *mhi_dev; struct mutex mhi_dev_lock; wait_queue_head_t ul_wq; wait_queue_head_t dl_wq; spinlock_t dl_queue_lock; struct list_head dl_queue; struct mutex write_lock; unsigned long flags; #define UCI_DEV_DL_CAP 0 #define UCI_DEV_UL_CAP 1 #define UCI_DEV_CONNECTED 2 struct kref ref_count; }; static void mhi_uci_dev_release(struct kref *ref) { struct uci_dev *udev = container_of(ref, struct uci_dev, ref_count); struct uci_buf *buf_itr, *tmp; /* Release non consumed buffers */ list_for_each_entry_safe(buf_itr, tmp, &udev->dl_queue, node) { list_del(&buf_itr->node); kfree(buf_itr->data); } mutex_destroy(&udev->mhi_dev_lock); mutex_destroy(&udev->write_lock); kfree(udev); } static int mhi_uci_queue_inbound(struct uci_dev *udev) { struct mhi_device *mhi_dev = udev->mhi_dev; struct device *dev = &mhi_dev->dev; int nr_desc, i, ret = -EIO; struct uci_buf *ubuf; void *buf; /* * skip queuing without error if dl channel is not supported. This * allows open to succeed for udev, supporting ul only channel. */ if (!udev->mhi_dev->dl_chan) return 0; nr_desc = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); for (i = 0; i < nr_desc; i++) { buf = kmalloc(udev->mtu + sizeof(*ubuf), GFP_KERNEL); if (!buf) return -ENOMEM; /* save uci_buf info at the end of buf */ ubuf = buf + udev->mtu; ubuf->data = buf; dev_dbg(dev, "Allocated buf %d of %d size %zu\n", i + 1, nr_desc, udev->mtu); ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, ubuf->data, udev->mtu, MHI_EOT); if (ret) { kfree(buf); dev_err(dev, "Failed to queue buffer %d\n", i); return ret; } } return ret; } static int mhi_uci_open(struct inode *inode, struct file *filp) { unsigned int minor = iminor(inode); struct uci_dev *udev = NULL; int ret; /* Retrieve corresponding uci_dev and get a reference */ mutex_lock(&uci_drv_lock); udev = idr_find(&uci_idr, minor); if (!udev) { mutex_unlock(&uci_drv_lock); return -ENODEV; } kref_get(&udev->ref_count); mutex_unlock(&uci_drv_lock); /* Start MHI channel(s) and fill RX queue */ mutex_lock(&udev->mhi_dev_lock); ret = mhi_prepare_for_transfer(udev->mhi_dev); if (!ret) ret = mhi_uci_queue_inbound(udev); mutex_unlock(&udev->mhi_dev_lock); if (ret) return ret; filp->private_data = udev; /* stream-like non-seekable file descriptor */ stream_open(inode, filp); return 0; } static int mhi_uci_release(struct inode *inode, struct file *file) { struct uci_dev *udev = file->private_data; /* Stop the channels, if it is not already destroyed */ mutex_lock(&udev->mhi_dev_lock); if (udev->mhi_dev) mhi_unprepare_from_transfer(udev->mhi_dev); mutex_unlock(&udev->mhi_dev_lock); file->private_data = NULL; kref_put(&udev->ref_count, mhi_uci_dev_release); return 0; } static __poll_t mhi_uci_poll(struct file *file, poll_table *wait) { struct uci_dev *udev = file->private_data; __poll_t mask = 0; poll_wait(file, &udev->ul_wq, wait); poll_wait(file, &udev->dl_wq, wait); /* Any buffer in the DL queue ? */ spin_lock_bh(&udev->dl_queue_lock); if (!list_empty(&udev->dl_queue)) mask |= EPOLLIN | EPOLLRDNORM; spin_unlock_bh(&udev->dl_queue_lock); /* If MHI queue is not full, write is possible */ mutex_lock(&udev->mhi_dev_lock); if (!udev->mhi_dev) mask = EPOLLERR; else if (!mhi_queue_is_full(udev->mhi_dev, DMA_TO_DEVICE)) mask |= EPOLLOUT | EPOLLWRNORM; mutex_unlock(&udev->mhi_dev_lock); return mask; } static ssize_t mhi_uci_write(struct file *file, const char __user *buf, size_t count, loff_t *offp) { struct uci_dev *udev = file->private_data; size_t bytes_xfered = 0; void *kbuf = NULL; int ret; if (!test_bit(UCI_DEV_UL_CAP, &udev->flags)) return -EOPNOTSUPP; if (!buf || !count) return -EINVAL; /* Serialize MHI queueing */ if (mutex_lock_interruptible(&udev->write_lock)) return -EINTR; while (count) { size_t xfer_size; /* Wait for available transfer descriptor */ ret = wait_event_interruptible(udev->ul_wq, !test_bit(UCI_DEV_CONNECTED, &udev->flags) || !mhi_queue_is_full(udev->mhi_dev, DMA_TO_DEVICE)); if (ret) break; if (!test_bit(UCI_DEV_CONNECTED, &udev->flags)) { ret = -EPIPE; break; } xfer_size = min_t(size_t, count, udev->mtu); kbuf = kmalloc(xfer_size, GFP_KERNEL); if (!kbuf) { ret = -ENOMEM; break; } ret = copy_from_user(kbuf, buf, xfer_size); if (ret) break; /* Add buffer to MHI queue */ ret = mhi_queue_buf(udev->mhi_dev, DMA_TO_DEVICE, kbuf, xfer_size, MHI_EOT); if (ret) break; bytes_xfered += xfer_size; count -= xfer_size; buf += xfer_size; kbuf = NULL; } mutex_unlock(&udev->write_lock); if (kbuf) /* aborted buffer queueing */ kfree(kbuf); return ret ? ret : bytes_xfered; } static int mhi_uci_recycle_ubuf(struct uci_dev *udev, struct uci_buf *ubuf) { int ret; mutex_lock(&udev->mhi_dev_lock); if (!udev->mhi_dev) { ret = -ENODEV; goto exit_unlock; } ret = mhi_queue_buf(udev->mhi_dev, DMA_FROM_DEVICE, ubuf->data, udev->mtu, MHI_EOT); exit_unlock: mutex_unlock(&udev->mhi_dev_lock); return ret; } static ssize_t mhi_uci_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct uci_dev *udev = file->private_data; bool recycle_buf = false; struct uci_buf *ubuf; size_t copy_len; char *copy_ptr; int ret = 0; if (!test_bit(UCI_DEV_DL_CAP, &udev->flags)) return -EOPNOTSUPP; if (!buf) return -EINVAL; spin_lock_irq(&udev->dl_queue_lock); ret = wait_event_interruptible_lock_irq(udev->dl_wq, !list_empty(&udev->dl_queue) || !test_bit(UCI_DEV_CONNECTED, &udev->flags), udev->dl_queue_lock); if (ret) { goto err_unlock; } else if (!test_bit(UCI_DEV_CONNECTED, &udev->flags)) { ret = -EPIPE; goto err_unlock; } ubuf = list_first_entry_or_null(&udev->dl_queue, struct uci_buf, node); if (!ubuf) { ret = -EIO; goto err_unlock; } /* Consume the buffer */ copy_len = min_t(size_t, count, ubuf->len - ubuf->consumed); copy_ptr = ubuf->data + ubuf->consumed; ubuf->consumed += copy_len; /* Remove buffer from the DL queue if entirely consumed */ if (ubuf->consumed == ubuf->len) { list_del(&ubuf->node); recycle_buf = true; } spin_unlock_irq(&udev->dl_queue_lock); ret = copy_to_user(buf, copy_ptr, copy_len); if (ret) return -EFAULT; if (recycle_buf) { /* Give the buffer back to MHI queue */ ret = mhi_uci_recycle_ubuf(udev, ubuf); if (ret) /* unable to recycle, release */ kfree(ubuf->data); } return copy_len; err_unlock: spin_unlock_irq(&udev->dl_queue_lock); return ret; } static const struct file_operations mhidev_fops = { .owner = THIS_MODULE, .open = mhi_uci_open, .release = mhi_uci_release, .read = mhi_uci_read, .write = mhi_uci_write, .poll = mhi_uci_poll, }; static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) { struct uci_dev *udev = dev_get_drvdata(&mhi_dev->dev); struct device *dev = &mhi_dev->dev; dev_dbg(dev, "%s: status: %d xfer_len: %zu\n", __func__, mhi_result->transaction_status, mhi_result->bytes_xferd); kfree(mhi_result->buf_addr); if (!mhi_result->transaction_status) wake_up_interruptible(&udev->ul_wq); } static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) { struct uci_dev *udev = dev_get_drvdata(&mhi_dev->dev); struct uci_buf *ubuf; dev_dbg(&mhi_dev->dev, "%s: status: %d receive_len: %zu\n", __func__, mhi_result->transaction_status, mhi_result->bytes_xferd); if (mhi_result->transaction_status && mhi_result->transaction_status != -EOVERFLOW) { kfree(mhi_result->buf_addr); return; } /* ubuf is placed at the end of the buffer (cf mhi_uci_queue_inbound) */ ubuf = mhi_result->buf_addr + udev->mtu; /* paranoia, should never happen */ if (WARN_ON(mhi_result->buf_addr != ubuf->data)) { kfree(mhi_result->buf_addr); return; } ubuf->data = mhi_result->buf_addr; ubuf->len = mhi_result->bytes_xferd; ubuf->consumed = 0; spin_lock_bh(&udev->dl_queue_lock); list_add_tail(&ubuf->node, &udev->dl_queue); spin_unlock_bh(&udev->dl_queue_lock); wake_up_interruptible(&udev->dl_wq); } static int mhi_uci_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) { struct uci_dev *udev; struct device *dev; int index, err; /* Create UCI data context */ udev = kzalloc(sizeof(*udev), GFP_KERNEL); if (!udev) return -ENOMEM; /* Retrieve index */ mutex_lock(&uci_drv_lock); index = idr_alloc(&uci_idr, udev, 0, MHI_MAX_UCI_MINORS, GFP_KERNEL); mutex_unlock(&uci_drv_lock); if (index < 0) { err = index; goto err_free_udev; } /* Init UCI data */ kref_init(&udev->ref_count); mutex_init(&udev->mhi_dev_lock); mutex_init(&udev->write_lock); init_waitqueue_head(&udev->ul_wq); init_waitqueue_head(&udev->dl_wq); spin_lock_init(&udev->dl_queue_lock); INIT_LIST_HEAD(&udev->dl_queue); udev->mhi_dev = mhi_dev; udev->minor = index; udev->mtu = min_t(size_t, id->driver_data, MHI_MAX_MTU); set_bit(UCI_DEV_CONNECTED, &udev->flags); if (mhi_dev->dl_chan) set_bit(UCI_DEV_DL_CAP, &udev->flags); if (mhi_dev->ul_chan) set_bit(UCI_DEV_UL_CAP, &udev->flags); dev_set_drvdata(&mhi_dev->dev, udev); /* Creates a new device and registers it with sysfs */ dev = device_create(uci_dev_class, &mhi_dev->dev, MKDEV(uci_dev_major, index), udev, dev_name(&mhi_dev->dev)); if (IS_ERR(dev)) { err = PTR_ERR(dev); goto err_free_idr; } dev_dbg(&mhi_dev->dev, "probed uci dev: %s\n", id->chan); return 0; err_free_idr: mutex_lock(&uci_drv_lock); idr_remove(&uci_idr, udev->minor); mutex_unlock(&uci_drv_lock); err_free_udev: kfree(udev); dev_set_drvdata(&mhi_dev->dev, NULL); return err; }; static void mhi_uci_remove(struct mhi_device *mhi_dev) { struct uci_dev *udev = dev_get_drvdata(&mhi_dev->dev); dev_set_drvdata(&mhi_dev->dev, NULL); mutex_lock(&uci_drv_lock); idr_remove(&uci_idr, udev->minor); mutex_unlock(&uci_drv_lock); clear_bit(UCI_DEV_CONNECTED, &udev->flags); device_destroy(uci_dev_class, MKDEV(uci_dev_major, udev->minor)); /* Unlink mhi_dev from uci_dev */ mutex_lock(&udev->mhi_dev_lock); udev->mhi_dev = NULL; mutex_unlock(&udev->mhi_dev_lock); /* wake up any blocked user */ wake_up_interruptible(&udev->dl_wq); wake_up_interruptible(&udev->ul_wq); kref_put(&udev->ref_count, mhi_uci_dev_release); } /* .driver_data stores max mtu */ static const struct mhi_device_id mhi_uci_match_table[] = { { .chan = "QMI", .driver_data = 0x1000}, { .chan = "MBIM", .driver_data = 0x1000}, { .chan = "DUN", .driver_data = 0x1000}, {}, }; MODULE_DEVICE_TABLE(mhi, mhi_uci_match_table); static struct mhi_driver mhi_uci_driver = { .id_table = mhi_uci_match_table, .remove = mhi_uci_remove, .probe = mhi_uci_probe, .ul_xfer_cb = mhi_ul_xfer_cb, .dl_xfer_cb = mhi_dl_xfer_cb, .driver = { .name = MHI_UCI_DRIVER_NAME, }, }; static int __init mhi_uci_init(void) { int ret; ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops); if (ret < 0) return ret; uci_dev_major = ret; uci_dev_class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME); if (IS_ERR(uci_dev_class)) { unregister_chrdev(uci_dev_major, MHI_UCI_DRIVER_NAME); return PTR_ERR(uci_dev_class); } ret = mhi_driver_register(&mhi_uci_driver); if (ret) { class_destroy(uci_dev_class); unregister_chrdev(uci_dev_major, MHI_UCI_DRIVER_NAME); } return ret; } static void __exit mhi_uci_exit(void) { mhi_driver_unregister(&mhi_uci_driver); class_destroy(uci_dev_class); unregister_chrdev(uci_dev_major, MHI_UCI_DRIVER_NAME); idr_destroy(&uci_idr); } module_init(mhi_uci_init); module_exit(mhi_uci_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MHI UCI Driver");